summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2013-07-21 16:39:19 +0200
committerSergei Golubchik <sergii@pisem.net>2013-07-21 16:39:19 +0200
commitb7b5f6f1ab49948b0e15b762266d4640b3d6b7fb (patch)
tree7c302c2025184dbd053aa6135f0ff28c8ce6f359
parent5f6380adde2dac3f32b40339b9b702c0135eb7d6 (diff)
parentc1d6a2d7e194225ccc19a68ea5d0f368632620d0 (diff)
downloadmariadb-git-b7b5f6f1ab49948b0e15b762266d4640b3d6b7fb.tar.gz
10.0-monty merge
includes: * remove some remnants of "Bug#14521864: MYSQL 5.1 TO 5.5 BUGS PARTITIONING" * introduce LOCK_share, now LOCK_ha_data is strictly for engines * rea_create_table() always creates .par file (even in "frm-only" mode) * fix a 5.6 bug, temp file leak on dummy ALTER TABLE
-rwxr-xr-xBUILD/SETUP.sh3
-rw-r--r--CMakeLists.txt9
-rw-r--r--TODO7
-rw-r--r--client/CMakeLists.txt6
-rw-r--r--client/mysql.cc13
-rw-r--r--client/mysql_upgrade.c2
-rw-r--r--client/mysqladmin.cc3
-rw-r--r--client/mysqlbinlog.cc2
-rw-r--r--client/mysqlcheck.c4
-rw-r--r--client/mysqltest.cc4
-rw-r--r--cmake/configure.pl20
-rw-r--r--cmake/cpu_info.cmake30
-rw-r--r--cmake/libevent.cmake89
-rw-r--r--cmake/libutils.cmake25
-rw-r--r--cmake/os/Windows.cmake4
-rw-r--r--cmake/ssl.cmake196
-rw-r--r--dbug/dbug.c4
-rw-r--r--extra/comp_err.c11
-rw-r--r--extra/my_print_defaults.c2
-rw-r--r--include/big_endian.h82
-rw-r--r--include/byte_order_generic.h95
-rw-r--r--include/byte_order_generic_x86.h97
-rw-r--r--include/byte_order_generic_x86_64.h83
-rw-r--r--include/crypt_genhash_impl.h32
-rw-r--r--include/errmsg.h12
-rw-r--r--include/ft_global.h21
-rw-r--r--include/little_endian.h75
-rw-r--r--include/m_ctype.h36
-rw-r--r--include/m_string.h2
-rw-r--r--include/my_base.h58
-rw-r--r--include/my_bitmap.h1
-rw-r--r--include/my_byteorder.h54
-rw-r--r--include/my_default.h50
-rw-r--r--include/my_getopt.h5
-rw-r--r--include/my_global.h304
-rw-r--r--include/my_handler_errors.h4
-rw-r--r--include/my_md5.h95
-rw-r--r--include/my_rnd.h32
-rw-r--r--include/my_sys.h62
-rw-r--r--include/my_time.h4
-rw-r--r--include/myisammrg.h3
-rw-r--r--include/mysql/client_authentication.h13
-rw-r--r--include/mysql/plugin.h11
-rw-r--r--include/mysql/plugin_audit.h.pp9
-rw-r--r--include/mysql/plugin_auth.h.pp9
-rw-r--r--include/mysql/plugin_auth_common.h21
-rw-r--r--include/mysql/plugin_ftparser.h.pp9
-rw-r--r--include/mysql/psi/mysql_file.h249
-rw-r--r--include/mysql/psi/mysql_idle.h4
-rw-r--r--include/mysql/psi/mysql_socket.h178
-rw-r--r--include/mysql/psi/mysql_stage.h4
-rw-r--r--include/mysql/psi/mysql_statement.h33
-rw-r--r--include/mysql/psi/mysql_table.h83
-rw-r--r--include/mysql/psi/mysql_thread.h80
-rw-r--r--include/mysql/psi/psi.h120
-rw-r--r--include/mysql/psi/psi_abi_v1.h.pp28
-rw-r--r--include/mysql/service_debug_sync.h11
-rw-r--r--include/mysql/service_my_plugin_log.h64
-rw-r--r--include/mysql/service_sha1.h57
-rw-r--r--include/mysql/services.h1
-rw-r--r--include/mysql_com.h43
-rw-r--r--include/password.h2
-rw-r--r--include/service_versions.h7
-rw-r--r--include/sha1.h90
-rw-r--r--include/sql_common.h2
-rw-r--r--include/thread_pool_priv.h1
-rw-r--r--libevent/CMakeLists.txt80
-rw-r--r--libevent/ChangeLog190
-rw-r--r--libevent/Doxyfile230
-rw-r--r--libevent/Makefile.am124
-rw-r--r--libevent/README57
-rw-r--r--libevent/WIN32-Code/event-config.h244
-rw-r--r--libevent/WIN32-Code/misc.c93
-rw-r--r--libevent/WIN32-Code/misc.h11
-rw-r--r--libevent/WIN32-Code/tree.h1354
-rw-r--r--libevent/WIN32-Code/win32.c486
-rw-r--r--libevent/WIN32-Prj/libevent.dsw74
-rw-r--r--libevent/WIN32-Prj/libevent.sln53
-rw-r--r--libevent/autogen.sh11
-rw-r--r--libevent/buffer.c451
-rw-r--r--libevent/compat/sys/_time.h163
-rw-r--r--libevent/compat/sys/queue.h488
-rw-r--r--libevent/configure.in387
-rw-r--r--libevent/devpoll.c417
-rw-r--r--libevent/epoll.c373
-rw-r--r--libevent/epoll_sub.c52
-rw-r--r--libevent/evbuffer.c455
-rw-r--r--libevent/evdns.3322
-rw-r--r--libevent/evdns.c3200
-rw-r--r--libevent/evdns.h528
-rw-r--r--libevent/event-internal.h102
-rw-r--r--libevent/event.3624
-rw-r--r--libevent/event.c1025
-rw-r--r--libevent/event.h1175
-rw-r--r--libevent/event_rpcgen.py1417
-rw-r--r--libevent/event_tagging.c443
-rw-r--r--libevent/evhttp.h371
-rw-r--r--libevent/evport.c513
-rw-r--r--libevent/evrpc-internal.h87
-rw-r--r--libevent/evrpc.c661
-rw-r--r--libevent/evrpc.h486
-rw-r--r--libevent/evsignal.h54
-rw-r--r--libevent/evutil.c245
-rw-r--r--libevent/evutil.h185
-rw-r--r--libevent/http-internal.h154
-rw-r--r--libevent/http.c2830
-rw-r--r--libevent/kqueue.c449
-rw-r--r--libevent/log.c187
-rw-r--r--libevent/log.h51
-rw-r--r--libevent/min_heap.h150
-rw-r--r--libevent/poll.c379
-rw-r--r--libevent/sample/Makefile.am14
-rw-r--r--libevent/sample/event-test.c139
-rw-r--r--libevent/sample/signal-test.c63
-rw-r--r--libevent/sample/time-test.c70
-rw-r--r--libevent/select.c356
-rw-r--r--libevent/signal.c357
-rw-r--r--libevent/strlcpy-internal.h23
-rw-r--r--libevent/strlcpy.c76
-rw-r--r--libevent/test/Makefile.am35
-rw-r--r--libevent/test/bench.c188
-rw-r--r--libevent/test/regress.c1703
-rw-r--r--libevent/test/regress.gen.c872
-rw-r--r--libevent/test/regress.gen.h183
-rw-r--r--libevent/test/regress.h45
-rw-r--r--libevent/test/regress.rpc20
-rw-r--r--libevent/test/regress_dns.c376
-rw-r--r--libevent/test/regress_http.c1476
-rw-r--r--libevent/test/regress_rpc.c631
-rw-r--r--libevent/test/test-eof.c82
-rw-r--r--libevent/test/test-init.c33
-rw-r--r--libevent/test/test-time.c82
-rw-r--r--libevent/test/test-weof.c80
-rw-r--r--libevent/test/test.sh91
-rw-r--r--libmysql/CMakeLists.txt5
-rw-r--r--libmysql/errmsg.c2
-rw-r--r--libmysql/libmysql.c10
-rw-r--r--libmysqld/CMakeLists.txt5
-rw-r--r--libmysqld/emb_qcache.cc2
-rw-r--r--libmysqld/lib_sql.cc10
-rw-r--r--libservices/CMakeLists.txt1
-rw-r--r--libservices/my_sha1_service.c18
-rw-r--r--mysql-test/include/commit.inc4
-rw-r--r--mysql-test/include/default_mysqld.cnf21
-rw-r--r--mysql-test/include/have_innodb.combinations6
-rw-r--r--mysql-test/include/have_ipv6.inc20
-rw-r--r--mysql-test/include/mix1.inc31
-rwxr-xr-xmysql-test/mysql-test-run.pl1
-rw-r--r--mysql-test/r/1st.result4
-rw-r--r--mysql-test/r/alter_table.result495
-rw-r--r--mysql-test/r/alter_table_online.result44
-rw-r--r--mysql-test/r/bootstrap.result2
-rw-r--r--mysql-test/r/cast.result12
-rw-r--r--mysql-test/r/commit_1innodb.result4
-rw-r--r--mysql-test/r/connect.result12
-rw-r--r--mysql-test/r/create.result18
-rw-r--r--mysql-test/r/ctype_errors.result4
-rw-r--r--mysql-test/r/ctype_tis620.result2
-rw-r--r--mysql-test/r/ctype_ujis.result4
-rw-r--r--mysql-test/r/ctype_utf8.result2
-rw-r--r--mysql-test/r/ctype_utf8mb4.result6
-rw-r--r--mysql-test/r/ctype_utf8mb4_heap.result2
-rw-r--r--mysql-test/r/ctype_utf8mb4_innodb.result2
-rw-r--r--mysql-test/r/ctype_utf8mb4_myisam.result2
-rw-r--r--mysql-test/r/drop.result10
-rw-r--r--mysql-test/r/dyncol.result4
-rw-r--r--mysql-test/r/error_simulation.result2
-rw-r--r--mysql-test/r/events_restart.result23
-rw-r--r--mysql-test/r/flush_read_lock.result31
-rw-r--r--mysql-test/r/func_analyse.result2
-rw-r--r--mysql-test/r/func_crypt.result2
-rw-r--r--mysql-test/r/func_rollback.result4
-rw-r--r--mysql-test/r/func_sapdb.result4
-rw-r--r--mysql-test/r/gis-rtree.result2
-rw-r--r--mysql-test/r/grant.result5
-rw-r--r--mysql-test/r/handlersocket.result2
-rw-r--r--mysql-test/r/innodb_mysql_sync.result197
-rw-r--r--mysql-test/r/key.result2
-rw-r--r--mysql-test/r/log_slow.result1
-rw-r--r--mysql-test/r/log_state.result6
-rw-r--r--mysql-test/r/log_tables.result56
-rw-r--r--mysql-test/r/log_tables_upgrade.result4
-rw-r--r--mysql-test/r/lowercase_table4.result26
-rw-r--r--mysql-test/r/mdl_sync.result233
-rw-r--r--mysql-test/r/myisam-system.result2
-rw-r--r--mysql-test/r/myisam.result2
-rw-r--r--mysql-test/r/mysql_upgrade.result28
-rw-r--r--mysql-test/r/mysql_upgrade_ssl.result4
-rw-r--r--mysql-test/r/mysqlcheck.result16
-rw-r--r--mysql-test/r/mysqld--help.result104
-rw-r--r--mysql-test/r/mysqldump.result9
-rw-r--r--mysql-test/r/partition.result49
-rw-r--r--mysql-test/r/partition_binlog.result2
-rw-r--r--mysql-test/r/partition_debug_sync.result33
-rw-r--r--mysql-test/r/partition_disabled.result6
-rw-r--r--mysql-test/r/partition_innodb.result13
-rw-r--r--mysql-test/r/partition_innodb_plugin.result16
-rw-r--r--mysql-test/r/partition_mgm_err.result2
-rw-r--r--mysql-test/r/partition_myisam.result129
-rw-r--r--mysql-test/r/partition_not_blackhole.result2
-rw-r--r--mysql-test/r/partition_truncate.result2
-rw-r--r--mysql-test/r/plugin.result12
-rw-r--r--mysql-test/r/profiling.result6
-rw-r--r--mysql-test/r/ps.result44
-rw-r--r--mysql-test/r/ps_1general.result4
-rw-r--r--mysql-test/r/ps_ddl1.result2
-rw-r--r--mysql-test/r/query_cache.result35
-rw-r--r--mysql-test/r/read_only.result4
-rw-r--r--mysql-test/r/rename.result2
-rw-r--r--mysql-test/r/row-checksum-old.result2
-rw-r--r--mysql-test/r/row-checksum.result2
-rw-r--r--mysql-test/r/select.result12
-rw-r--r--mysql-test/r/select_jcl6.result12
-rw-r--r--mysql-test/r/select_pkeycache.result12
-rw-r--r--mysql-test/r/signal.result47
-rw-r--r--mysql-test/r/signal_demo2.result4
-rw-r--r--mysql-test/r/signal_demo3.result20
-rw-r--r--mysql-test/r/sp-big.result2
-rw-r--r--mysql-test/r/sp-bugs.result11
-rw-r--r--mysql-test/r/sp-code.result4
-rw-r--r--mysql-test/r/sp-destruct.result4
-rw-r--r--mysql-test/r/sp-dynamic.result2
-rw-r--r--mysql-test/r/sp-error.result896
-rw-r--r--mysql-test/r/sp-prelocking.result4
-rw-r--r--mysql-test/r/sp-vars.result2
-rw-r--r--mysql-test/r/sp.result183
-rw-r--r--mysql-test/r/sp_trans.result4
-rw-r--r--mysql-test/r/statistics.result28
-rw-r--r--mysql-test/r/strict.result29
-rw-r--r--mysql-test/r/subselect.result2
-rw-r--r--mysql-test/r/subselect2.result2
-rw-r--r--mysql-test/r/subselect4.result2
-rw-r--r--mysql-test/r/subselect_innodb.result4
-rw-r--r--mysql-test/r/subselect_no_mat.result2
-rw-r--r--mysql-test/r/subselect_no_opts.result2
-rw-r--r--mysql-test/r/subselect_no_scache.result2
-rw-r--r--mysql-test/r/subselect_no_semijoin.result2
-rw-r--r--mysql-test/r/system_mysql_db.result10
-rw-r--r--mysql-test/r/system_mysql_db_fix40123.result10
-rw-r--r--mysql-test/r/system_mysql_db_fix50030.result10
-rw-r--r--mysql-test/r/system_mysql_db_fix50117.result10
-rw-r--r--mysql-test/r/temp_table.result22
-rw-r--r--mysql-test/r/truncate_coverage.result6
-rw-r--r--mysql-test/r/type_newdecimal.result2
-rw-r--r--mysql-test/r/upgrade.result4
-rw-r--r--mysql-test/r/view.result4
-rw-r--r--mysql-test/r/view_grant.result8
-rw-r--r--mysql-test/r/warnings.result10
-rw-r--r--mysql-test/suite/archive/archive.result24
-rw-r--r--mysql-test/suite/archive/archive.test15
-rw-r--r--mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result4
-rw-r--r--mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result4
-rw-r--r--mysql-test/suite/csv/csv.result6
-rw-r--r--mysql-test/suite/federated/federated_server.result20
-rw-r--r--mysql-test/suite/federated/federated_transactions.result4
-rw-r--r--mysql-test/suite/federated/federatedx.result22
-rw-r--r--mysql-test/suite/funcs_1/r/innodb_storedproc_02.result44
-rw-r--r--mysql-test/suite/funcs_1/r/innodb_trig_0102.result2
-rw-r--r--mysql-test/suite/funcs_1/r/is_columns_mysql.result112
-rw-r--r--mysql-test/suite/funcs_1/r/is_key_column_usage.result7
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics.result5
-rw-r--r--mysql-test/suite/funcs_1/r/is_statistics_mysql.result5
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints.result5
-rw-r--r--mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result5
-rw-r--r--mysql-test/suite/funcs_1/r/is_tables_mysql.result92
-rw-r--r--mysql-test/suite/funcs_1/r/is_user_privileges.result33
-rw-r--r--mysql-test/suite/funcs_1/r/memory_storedproc_02.result44
-rw-r--r--mysql-test/suite/funcs_1/r/memory_trig_0102.result2
-rw-r--r--mysql-test/suite/funcs_1/r/myisam_storedproc_02.result44
-rw-r--r--mysql-test/suite/funcs_1/r/myisam_trig_0102.result2
-rw-r--r--mysql-test/suite/funcs_1/r/storedproc.result174
-rw-r--r--mysql-test/suite/handler/aria.result4
-rw-r--r--mysql-test/suite/handler/handler.inc2
-rw-r--r--mysql-test/suite/handler/heap.result4
-rw-r--r--mysql-test/suite/handler/innodb.result4
-rw-r--r--mysql-test/suite/handler/myisam.result4
-rw-r--r--mysql-test/suite/innodb/include/restart_and_reinit.inc3
-rw-r--r--mysql-test/suite/innodb/r/innodb-autoinc-44030.result4
-rw-r--r--mysql-test/suite/innodb/r/innodb-autoinc-56228.result6
-rw-r--r--mysql-test/suite/innodb/r/innodb-autoinc.result40
-rw-r--r--mysql-test/suite/innodb/r/innodb-create-options.result328
-rw-r--r--mysql-test/suite/innodb/r/innodb-index.result606
-rw-r--r--mysql-test/suite/innodb/r/innodb-zip.result253
-rw-r--r--mysql-test/suite/innodb/r/innodb.result20
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug14007649.result5
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug21704.result68
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug52745.result2
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug53591.result7
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug54044.result12
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug56947.result11
-rw-r--r--mysql-test/suite/innodb/r/innodb_bug60049.result7
-rw-r--r--mysql-test/suite/innodb/r/innodb_corrupt_bit.result1
-rw-r--r--mysql-test/suite/innodb/r/innodb_index_large_prefix.result382
-rw-r--r--mysql-test/suite/innodb/r/innodb_information_schema.result28
-rw-r--r--mysql-test/suite/innodb/r/innodb_information_schema_buffer.result100
-rw-r--r--mysql-test/suite/innodb/r/innodb_mysql.result480
-rw-r--r--mysql-test/suite/innodb/r/innodb_prefix_index_liftedlimit.result53
-rw-r--r--mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result2
-rw-r--r--mysql-test/suite/innodb/t/innodb-autoinc-44030.test8
-rw-r--r--mysql-test/suite/innodb/t/innodb-create-options.test134
-rw-r--r--mysql-test/suite/innodb/t/innodb-index.test581
-rw-r--r--mysql-test/suite/innodb/t/innodb-zip.test158
-rw-r--r--mysql-test/suite/innodb/t/innodb.test25
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug21704.test55
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug52745.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug53591.test6
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug53592.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug54044.test13
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug56947.test18
-rw-r--r--mysql-test/suite/innodb/t/innodb_bug60049.test8
-rw-r--r--mysql-test/suite/innodb/t/innodb_corrupt_bit.test2
-rw-r--r--mysql-test/suite/innodb/t/innodb_file_format.test4
-rw-r--r--mysql-test/suite/innodb/t/innodb_index_large_prefix.test310
-rw-r--r--mysql-test/suite/innodb/t/innodb_information_schema_buffer.test14
-rw-r--r--mysql-test/suite/innodb/t/innodb_mysql.test513
-rw-r--r--mysql-test/suite/innodb/t/innodb_prefix_index_liftedlimit.test116
-rw-r--r--mysql-test/suite/maria/lock.result4
-rw-r--r--mysql-test/suite/maria/maria-gis-rtree-dynamic.result2
-rw-r--r--mysql-test/suite/maria/maria-gis-rtree-trans.result2
-rw-r--r--mysql-test/suite/maria/maria-gis-rtree.result2
-rw-r--r--mysql-test/suite/maria/maria-page-checksum.result108
-rw-r--r--mysql-test/suite/maria/maria-page-checksum.test108
-rw-r--r--mysql-test/suite/maria/maria-partitioning.result4
-rw-r--r--mysql-test/suite/maria/maria-ucs2.result1
-rw-r--r--mysql-test/suite/maria/maria3.result2
-rw-r--r--mysql-test/suite/maria/maria3.test2
-rw-r--r--mysql-test/suite/maria/max_length.result4
-rw-r--r--mysql-test/suite/maria/small_blocksize.result2
-rw-r--r--mysql-test/suite/optimizer_unfixed_bugs/r/bug41029.result2
-rw-r--r--mysql-test/suite/optimizer_unfixed_bugs/r/bug41996.result2
-rw-r--r--mysql-test/suite/oqgraph/r/basic.result2
-rw-r--r--mysql-test/suite/parts/inc/partition.pre8
-rw-r--r--mysql-test/suite/parts/inc/partition_alter3.inc2
-rw-r--r--mysql-test/suite/parts/inc/partition_crash_exchange.inc29
-rw-r--r--mysql-test/suite/parts/inc/partition_crash_t2.inc12
-rw-r--r--mysql-test/suite/parts/inc/partition_fail_exchange.inc27
-rw-r--r--mysql-test/suite/parts/inc/partition_fail_t2.inc31
-rw-r--r--mysql-test/suite/parts/r/part_ctype_utf32.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_1_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_2_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter1_2_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_maria.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_1_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_maria.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter2_2_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter3_innodb.result98
-rw-r--r--mysql-test/suite/parts/r/partition_alter3_myisam.result4
-rw-r--r--mysql-test/suite/parts/r/partition_alter4_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_alter4_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_basic_innodb.result562
-rw-r--r--mysql-test/suite/parts/r/partition_basic_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_basic_symlink_innodb.result122
-rw-r--r--mysql-test/suite/parts/r/partition_basic_symlink_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_debug_innodb.result1918
-rw-r--r--mysql-test/suite/parts/r/partition_debug_myisam.result17
-rw-r--r--mysql-test/suite/parts/r/partition_engine_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_engine_myisam.result2
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc0_archive.result10
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result10
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc0_memory.result10
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result10
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc1_archive.result10
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result10
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc1_memory.result10
-rw-r--r--mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result10
-rw-r--r--mysql-test/suite/parts/r/partition_repair_myisam.result24
-rw-r--r--mysql-test/suite/parts/r/partition_syntax_innodb.result2
-rw-r--r--mysql-test/suite/parts/r/partition_syntax_myisam.result2
-rw-r--r--mysql-test/suite/parts/t/partition_basic_innodb.test12
-rw-r--r--mysql-test/suite/parts/t/partition_basic_symlink_innodb.test220
-rw-r--r--mysql-test/suite/parts/t/partition_debug_innodb.test31
-rw-r--r--mysql-test/suite/parts/t/partition_repair_myisam.test24
-rw-r--r--mysql-test/suite/perfschema/disabled.def38
-rw-r--r--mysql-test/suite/perfschema/include/connection_load.inc35
-rw-r--r--mysql-test/suite/perfschema/include/digest_cleanup.inc3
-rw-r--r--mysql-test/suite/perfschema/include/digest_execution.inc54
-rw-r--r--mysql-test/suite/perfschema/include/digest_setup.inc3
-rw-r--r--mysql-test/suite/perfschema/include/event_aggregate_load.inc24
-rw-r--r--mysql-test/suite/perfschema/include/event_aggregate_setup.inc3
-rw-r--r--mysql-test/suite/perfschema/include/hostcache_set_state.inc23
-rw-r--r--mysql-test/suite/perfschema/include/schema.inc2
-rw-r--r--mysql-test/suite/perfschema/include/sizing_auto.inc16
-rw-r--r--mysql-test/suite/perfschema/include/start_server_common.inc10
-rw-r--r--mysql-test/suite/perfschema/include/table_aggregate_load.inc36
-rw-r--r--mysql-test/suite/perfschema/r/binlog_stmt.result16
-rw-r--r--mysql-test/suite/perfschema/r/csv_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/ddl_session_account_connect_attrs.result9
-rw-r--r--mysql-test/suite/perfschema/r/ddl_session_connect_attrs.result9
-rw-r--r--mysql-test/suite/perfschema/r/digest_table_full.result60
-rw-r--r--mysql-test/suite/perfschema/r/dml_esms_by_digest.result4
-rw-r--r--mysql-test/suite/perfschema/r/dml_handler.result45
-rw-r--r--mysql-test/suite/perfschema/r/dml_session_account_connect_attrs.result25
-rw-r--r--mysql-test/suite/perfschema/r/dml_session_connect_attrs.result25
-rw-r--r--mysql-test/suite/perfschema/r/dml_setup_instruments.result6
-rw-r--r--mysql-test/suite/perfschema/r/func_file_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/func_mutex.result1
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_auth_plugin.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_blocked.result21
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_format.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_max_con.result20
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result25
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_deny.result33
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_passwd.result22
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv4_ssl.result22
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_auth_plugin.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_blocked.result21
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_max_con.result20
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result25
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_deny.result33
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_allow.result23
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_deny.result27
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_passwd.result22
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_ipv6_ssl.result22
-rw-r--r--mysql-test/suite/perfschema/r/hostcache_peer_addr.result23
-rw-r--r--mysql-test/suite/perfschema/r/indexed_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/information_schema.result27
-rw-r--r--mysql-test/suite/perfschema/r/innodb_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/memory_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/merge_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/multi_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/myisam_file_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/myisam_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/nesting.result1
-rw-r--r--mysql-test/suite/perfschema/r/ortho_iter.result2
-rw-r--r--mysql-test/suite/perfschema/r/part_table_io.result31
-rw-r--r--mysql-test/suite/perfschema/r/pfs_upgrade.result303
-rw-r--r--mysql-test/suite/perfschema/r/pfs_upgrade_event.result63
-rw-r--r--mysql-test/suite/perfschema/r/pfs_upgrade_func.result63
-rw-r--r--mysql-test/suite/perfschema/r/pfs_upgrade_proc.result63
-rw-r--r--mysql-test/suite/perfschema/r/pfs_upgrade_table.result65
-rw-r--r--mysql-test/suite/perfschema/r/pfs_upgrade_view.result65
-rw-r--r--mysql-test/suite/perfschema/r/privilege_table_io.result9
-rw-r--r--mysql-test/suite/perfschema/r/rollback_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/schema.result105
-rw-r--r--mysql-test/suite/perfschema/r/short_option_1.result2
-rw-r--r--mysql-test/suite/perfschema/r/socket_summary_by_instance_func_win.result2
-rw-r--r--mysql-test/suite/perfschema/r/stage_mdl_table.result3
-rw-r--r--mysql-test/suite/perfschema/r/start_server_disable_idle.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_disable_stages.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_disable_statements.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_disable_waits.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_innodb.result11
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_account.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_cond_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_cond_inst.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_digests.result56
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_file_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_file_inst.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_host.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_mutex_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_mutex_inst.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_rwlock_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_rwlock_inst.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_setup_actors.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_setup_objects.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_socket_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_socket_inst.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_stage_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_stages_history.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_stages_history_long.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_statement_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_statements_history.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_statements_history_long.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_table_hdl.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_table_inst.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_thread_class.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_thread_inst.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_user.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_waits_history.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_no_waits_history_long.result10
-rw-r--r--mysql-test/suite/perfschema/r/start_server_nothing.result27
-rw-r--r--mysql-test/suite/perfschema/r/start_server_off.result50
-rw-r--r--mysql-test/suite/perfschema/r/start_server_on.result11
-rw-r--r--mysql-test/suite/perfschema/r/statement_digest.result115
-rw-r--r--mysql-test/suite/perfschema/r/statement_digest_consumers.result115
-rw-r--r--mysql-test/suite/perfschema/r/statement_digest_consumers2.result56
-rw-r--r--mysql-test/suite/perfschema/r/statement_digest_long_query.result8
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_global_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_global_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_global_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_global_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_off.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_thread_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_thread_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_thread_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_aggregate_thread_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_2t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_3t.result19
-rw-r--r--mysql-test/suite/perfschema/r/table_schema.result130
-rw-r--r--mysql-test/suite/perfschema/r/temp_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/trigger_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/r/view_table_io.result1
-rw-r--r--mysql-test/suite/perfschema/t/ddl_session_account_connect_attrs.test15
-rw-r--r--mysql-test/suite/perfschema/t/ddl_session_connect_attrs.test15
-rw-r--r--mysql-test/suite/perfschema/t/digest_table_full.test2
-rw-r--r--mysql-test/suite/perfschema/t/disabled.def2
-rw-r--r--mysql-test/suite/perfschema/t/dml_session_account_connect_attrs.test38
-rw-r--r--mysql-test/suite/perfschema/t/dml_session_connect_attrs.test38
-rw-r--r--mysql-test/suite/perfschema/t/dml_setup_instruments.test6
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_auth_plugin.test14
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_blocked.test13
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_format.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_max_con-master.opt1
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_max_con.test13
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_allow.test12
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_deny.test12
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_passwd.test11
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv4_ssl.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_deny.test11
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_auth_plugin.test15
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_blocked.test13
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_max_con-master.opt1
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_max_con.test13
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_allow.test12
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_deny.test12
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_allow.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_deny.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_passwd.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_ipv6_ssl.test10
-rw-r--r--mysql-test/suite/perfschema/t/hostcache_peer_addr.test11
-rw-r--r--mysql-test/suite/perfschema/t/pfs_upgrade.test10
-rw-r--r--mysql-test/suite/perfschema/t/pfs_upgrade_event.test36
-rw-r--r--mysql-test/suite/perfschema/t/pfs_upgrade_func.test36
-rw-r--r--mysql-test/suite/perfschema/t/pfs_upgrade_proc.test36
-rw-r--r--mysql-test/suite/perfschema/t/pfs_upgrade_table.test44
-rw-r--r--mysql-test/suite/perfschema/t/pfs_upgrade_view.test44
-rw-r--r--mysql-test/suite/perfschema/t/privilege_table_io.test6
-rw-r--r--mysql-test/suite/perfschema/t/short_option_1.test2
-rw-r--r--mysql-test/suite/perfschema/t/socket_connect.test2
-rw-r--r--mysql-test/suite/perfschema/t/socket_instances_func.test2
-rw-r--r--mysql-test/suite/perfschema/t/socket_instances_func_win.test2
-rw-r--r--mysql-test/suite/perfschema/t/socket_summary_by_event_name_func.test2
-rw-r--r--mysql-test/suite/perfschema/t/socket_summary_by_instance_func.test4
-rw-r--r--mysql-test/suite/perfschema/t/socket_summary_by_instance_func_win.test4
-rw-r--r--mysql-test/suite/perfschema/t/start_server_no_digests.test2
-rw-r--r--mysql-test/suite/perfschema/t/start_server_nothing-master.opt2
-rw-r--r--mysql-test/suite/perfschema/t/start_server_nothing.test4
-rw-r--r--mysql-test/suite/perfschema/t/start_server_off.test27
-rw-r--r--mysql-test/suite/perfschema/t/statement_digest.test2
-rw-r--r--mysql-test/suite/perfschema/t/statement_digest_consumers.test2
-rw-r--r--mysql-test/suite/perfschema/t/statement_digest_consumers2.test2
-rw-r--r--mysql-test/suite/perfschema/t/statement_digest_long_query.test2
-rw-r--r--mysql-test/suite/rpl/r/rpl_000013.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_drop.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_drop_temp.result6
-rw-r--r--mysql-test/suite/rpl/r/rpl_mixed_drop_create_temp_table.result124
-rw-r--r--mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_multi_update3.result4
-rw-r--r--mysql-test/suite/rpl/r/rpl_password_boundaries.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_colSize.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_drop_create_temp_table.result124
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_sp005.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_row_sp006_InnoDB.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_session_var.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_drop_create_temp_table.result124
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_stm_no_op.result2
-rw-r--r--mysql-test/suite/rpl/r/rpl_temp_table.result2
-rw-r--r--mysql-test/suite/sys_vars/r/host_cache_size_basic.result37
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_adaptive_flushing_lwm_basic.result96
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_analyze_is_persistent_basic.result103
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result64
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_api_disable_rowlock_basic.result53
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_api_enable_binlog_basic.result53
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_api_enable_mdl_basic.result53
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_api_trx_level_basic.result66
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result4
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result65
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result96
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_compression_level_basic.result73
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result86
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result8
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_disable_background_merge_basic.result4
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result4
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result96
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_flush_neighbors_basic.result73
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result98
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_force_recovery_crash_basic.result33
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_ft_cache_size_basic.result10
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_ft_enable_diag_print_basic.result14
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_io_capacity_max_basic.result81
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result109
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result1709
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result1709
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result1709
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result1709
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_old_blocks_time_basic.result14
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_online_alter_log_max_size_basic.result64
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_purge_run_now_basic.result27
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_purge_stop_now_basic.result27
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_read_only_basic.result22
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_stats_auto_recalc_basic.result24
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_stats_on_metadata_basic.result14
-rw-r--r--mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result24
-rw-r--r--mysql-test/suite/sys_vars/r/max_connect_errors_basic.result8
-rw-r--r--mysql-test/suite/sys_vars/r/metadata_locks_hash_instances_basic.result51
-rw-r--r--mysql-test/suite/sys_vars/r/pfs_session_connect_attrs_size_basic.result23
-rw-r--r--mysql-test/suite/sys_vars/r/slow_query_log_func.result2
-rw-r--r--mysql-test/suite/sys_vars/r/sql_notes_func.result2
-rw-r--r--mysql-test/suite/sys_vars/t/host_cache_size_basic-master.opt1
-rw-r--r--mysql-test/suite/sys_vars/t/host_cache_size_basic.test41
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_adaptive_flushing_lwm_basic.test142
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_analyze_is_persistent_basic.test81
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test58
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test102
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test102
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test102
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test58
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test2
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test69
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test143
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test64
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test136
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test2
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_disable_background_merge_basic.test12
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test151
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test29
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test143
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test28
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_io_capacity_max_basic.test74
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test151
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test31
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test31
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test31
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test31
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test51
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_purge_run_now_basic.test53
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_purge_stop_now_basic.test53
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_read_only_basic.test20
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_stats_auto_recalc_basic.test31
-rw-r--r--mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test31
-rw-r--r--mysql-test/suite/sys_vars/t/metadata_locks_hash_instances_basic.test60
-rw-r--r--mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic-master.opt2
-rw-r--r--mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic.test47
-rw-r--r--mysql-test/suite/vcol/t/rpl_vcol.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_column_def_options_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_handler_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_ins_upd_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_keys_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_non_stored_columns_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_partition_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_select_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_supported_sql_funcs_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_trigger_sp_innodb.test2
-rw-r--r--mysql-test/suite/vcol/t/vcol_view_innodb.test2
-rw-r--r--mysql-test/t/alter_table.test376
-rw-r--r--mysql-test/t/alter_table_online.test46
-rw-r--r--mysql-test/t/ctype_utf8mb4.test3
-rw-r--r--mysql-test/t/events_restart.test22
-rw-r--r--mysql-test/t/flush_read_lock.test40
-rw-r--r--mysql-test/t/innodb_mysql_sync.test316
-rw-r--r--mysql-test/t/log_state.test4
-rw-r--r--mysql-test/t/log_tables.test14
-rw-r--r--mysql-test/t/lowercase_table4.test4
-rw-r--r--mysql-test/t/mdl_sync.test319
-rw-r--r--mysql-test/t/partition.test43
-rw-r--r--mysql-test/t/partition_binlog.test2
-rw-r--r--mysql-test/t/partition_debug_sync.test57
-rw-r--r--mysql-test/t/partition_innodb.test13
-rw-r--r--mysql-test/t/partition_mgm_err.test2
-rw-r--r--mysql-test/t/partition_myisam.test135
-rw-r--r--mysql-test/t/partition_pruning.test1
-rw-r--r--mysql-test/t/partition_truncate.test2
-rw-r--r--mysql-test/t/query_cache.test22
-rw-r--r--mysql-test/t/signal.test11
-rw-r--r--mysql-test/t/sp-bugs.test10
-rw-r--r--mysql-test/t/sp-error.test970
-rw-r--r--mysql-test/t/strict.test20
-rw-r--r--mysql-test/t/system_mysql_db_fix40123.test2
-rw-r--r--mysql-test/t/system_mysql_db_fix50030.test2
-rw-r--r--mysql-test/t/system_mysql_db_fix50117.test5
-rw-r--r--mysql-test/t/temp_table.test25
-rw-r--r--mysql-test/t/truncate_coverage.test4
-rw-r--r--mysql-test/t/upgrade.test2
-rw-r--r--mysys/CMakeLists.txt7
-rw-r--r--mysys/array.c4
-rw-r--r--mysys/hash.c3
-rw-r--r--mysys/lf_alloc-pin.c2
-rw-r--r--mysys/lf_dynarray.c2
-rw-r--r--mysys/ma_dyncol.c19
-rw-r--r--mysys/md5.c.THIS (renamed from mysys/md5.c)0
-rw-r--r--mysys/mf_dirname.c2
-rw-r--r--mysys/mf_format.c2
-rw-r--r--mysys/mf_iocache.c8
-rw-r--r--mysys/mf_iocache2.c2
-rw-r--r--mysys/my_access.c63
-rw-r--r--mysys/my_aes.c.THIS (renamed from mysys/my_aes.c)0
-rw-r--r--mysys/my_alloc.c2
-rw-r--r--mysys/my_bitmap.c111
-rw-r--r--mysys/my_compare.c4
-rw-r--r--mysys/my_compress.c2
-rw-r--r--mysys/my_conio.c6
-rw-r--r--mysys/my_default.c (renamed from mysys/default.c)7
-rw-r--r--mysys/my_error.c176
-rw-r--r--mysys/my_file.c8
-rw-r--r--mysys/my_getopt.c1
-rw-r--r--mysys/my_rnd.c37
-rw-r--r--mysys/my_thr_init.c14
-rw-r--r--mysys/my_uuid.c3
-rw-r--r--mysys/psi_noop.c49
-rw-r--r--mysys/rijndael.c1379
-rw-r--r--mysys/safemalloc.c6
-rw-r--r--mysys/sha1.c422
-rw-r--r--mysys/stacktrace.c4
-rw-r--r--mysys/string.c74
-rw-r--r--mysys/testhash.c4
-rw-r--r--mysys_ssl/CMakeLists.txt48
-rw-r--r--mysys_ssl/crypt_genhash_impl.cc454
-rw-r--r--mysys_ssl/my_aes.cc278
-rw-r--r--mysys_ssl/my_md5.cc68
-rw-r--r--mysys_ssl/my_rnd.cc103
-rw-r--r--mysys_ssl/my_sha1.cc151
-rw-r--r--mysys_ssl/my_sha2.cc68
-rw-r--r--plugin/feedback/utils.cc5
-rw-r--r--plugin/handler_socket/handlersocket/database.cpp2
-rw-r--r--plugin/qc_info/qc_info.cc2
-rw-r--r--plugin/query_response_time/query_response_time.h4
-rw-r--r--scripts/CMakeLists.txt2
-rw-r--r--scripts/mysql_performance_tables.sql78
-rw-r--r--scripts/mysql_system_tables.sql123
-rw-r--r--scripts/mysql_system_tables_data.sql8
-rw-r--r--scripts/mysql_system_tables_fix.sql26
-rw-r--r--sql-common/client.c2
-rw-r--r--sql-common/client_authentication.cc253
-rw-r--r--sql-common/my_time.c2
-rw-r--r--sql/CMakeLists.txt11
-rw-r--r--sql/create_options.cc4
-rw-r--r--sql/debug_sync.cc37
-rw-r--r--sql/debug_sync.h3
-rw-r--r--sql/derror.cc22
-rw-r--r--sql/discover.cc2
-rw-r--r--sql/discover.h8
-rw-r--r--sql/event_data_objects.cc2
-rw-r--r--sql/event_db_repository.cc8
-rw-r--r--sql/event_parse_data.cc4
-rw-r--r--sql/event_scheduler.cc7
-rw-r--r--sql/events.cc25
-rw-r--r--sql/field.cc259
-rw-r--r--sql/field.h103
-rw-r--r--sql/field_conv.cc28
-rw-r--r--sql/filesort.cc11
-rw-r--r--sql/ha_ndbcluster.cc122
-rw-r--r--sql/ha_ndbcluster_binlog.cc46
-rw-r--r--sql/ha_ndbcluster_cond.cc4
-rw-r--r--sql/ha_partition.cc2774
-rw-r--r--sql/ha_partition.h257
-rw-r--r--sql/handler.cc425
-rw-r--r--sql/handler.h912
-rw-r--r--sql/hash_filo.h93
-rw-r--r--sql/hostname.cc713
-rw-r--r--sql/hostname.h166
-rw-r--r--sql/innodb_priv.h4
-rw-r--r--sql/item.cc77
-rw-r--r--sql/item.h15
-rw-r--r--sql/item_buff.cc4
-rw-r--r--sql/item_cmpfunc.cc50
-rw-r--r--sql/item_create.cc28
-rw-r--r--sql/item_func.cc62
-rw-r--r--sql/item_func.h4
-rw-r--r--sql/item_geofunc.cc2
-rw-r--r--sql/item_strfunc.cc238
-rw-r--r--sql/item_strfunc.h23
-rw-r--r--sql/item_subselect.cc11
-rw-r--r--sql/item_sum.cc18
-rw-r--r--sql/item_timefunc.cc54
-rw-r--r--sql/item_timefunc.h4
-rw-r--r--sql/item_xmlfunc.cc8
-rw-r--r--sql/key.cc34
-rw-r--r--sql/key.h2
-rw-r--r--sql/lex.h10
-rw-r--r--sql/lock.cc24
-rw-r--r--sql/log.cc50
-rw-r--r--sql/log_event.cc249
-rw-r--r--sql/log_event_old.cc16
-rw-r--r--sql/mdl.cc471
-rw-r--r--sql/mdl.h120
-rw-r--r--sql/multi_range_read.cc8
-rw-r--r--sql/my_decimal.cc8
-rw-r--r--sql/mysqld.cc128
-rw-r--r--sql/mysqld.h12
-rw-r--r--sql/net_serv.cc4
-rw-r--r--sql/opt_index_cond_pushdown.cc4
-rw-r--r--sql/opt_range.cc143
-rw-r--r--sql/opt_range.h4
-rw-r--r--sql/opt_range_mrr.cc4
-rw-r--r--sql/opt_subselect.cc19
-rw-r--r--sql/opt_sum.cc2
-rw-r--r--sql/opt_table_elimination.cc6
-rw-r--r--sql/partition_info.cc963
-rw-r--r--sql/partition_info.h85
-rw-r--r--sql/password.c115
-rw-r--r--sql/protocol.cc52
-rw-r--r--sql/rpl_mi.cc28
-rw-r--r--sql/rpl_mi.h2
-rw-r--r--sql/rpl_record.cc6
-rw-r--r--sql/rpl_rli.cc14
-rw-r--r--sql/rpl_utility.cc6
-rw-r--r--sql/rpl_utility.h2
-rw-r--r--sql/set_var.cc6
-rw-r--r--sql/share/errmsg-utf8.txt1211
-rw-r--r--sql/signal_handler.cc2
-rw-r--r--sql/slave.cc59
-rw-r--r--sql/sp.cc26
-rw-r--r--sql/sp_head.cc364
-rw-r--r--sql/sp_head.h74
-rw-r--r--sql/sp_pcontext.cc563
-rw-r--r--sql/sp_pcontext.h813
-rw-r--r--sql/sp_rcontext.cc741
-rw-r--r--sql/sp_rcontext.h589
-rw-r--r--sql/spatial.h4
-rw-r--r--sql/sql_acl.cc137
-rw-r--r--sql/sql_acl.h10
-rw-r--r--sql/sql_admin.cc142
-rw-r--r--sql/sql_admin.h81
-rw-r--r--sql/sql_alter.cc273
-rw-r--r--sql/sql_alter.h395
-rw-r--r--sql/sql_analyse.cc14
-rw-r--r--sql/sql_array.h124
-rw-r--r--sql/sql_audit.h8
-rw-r--r--sql/sql_base.cc821
-rw-r--r--sql/sql_base.h72
-rw-r--r--sql/sql_cache.cc146
-rw-r--r--sql/sql_cache.h6
-rw-r--r--sql/sql_class.cc316
-rw-r--r--sql/sql_class.h191
-rw-r--r--sql/sql_client.cc2
-rw-r--r--sql/sql_cmd.h1
-rw-r--r--sql/sql_connect.cc156
-rw-r--r--sql/sql_const.h12
-rw-r--r--sql/sql_crypt.h2
-rw-r--r--sql/sql_db.cc9
-rw-r--r--sql/sql_derived.cc6
-rw-r--r--sql/sql_error.cc397
-rw-r--r--sql/sql_error.h791
-rw-r--r--sql/sql_get_diagnostics.cc340
-rw-r--r--sql/sql_get_diagnostics.h318
-rw-r--r--sql/sql_handler.cc18
-rw-r--r--sql/sql_insert.cc48
-rw-r--r--sql/sql_join_cache.cc4
-rw-r--r--sql/sql_join_cache.h4
-rw-r--r--sql/sql_lex.cc55
-rw-r--r--sql/sql_lex.h155
-rw-r--r--sql/sql_load.cc44
-rw-r--r--sql/sql_locale.cc2
-rw-r--r--sql/sql_parse.cc277
-rw-r--r--sql/sql_parse.h5
-rw-r--r--sql/sql_partition.cc1052
-rw-r--r--sql/sql_partition.h22
-rw-r--r--sql/sql_partition_admin.cc719
-rw-r--r--sql/sql_partition_admin.h189
-rw-r--r--sql/sql_plist.h30
-rw-r--r--sql/sql_plugin.cc6
-rw-r--r--sql/sql_plugin_services.h6
-rw-r--r--sql/sql_prepare.cc113
-rw-r--r--sql/sql_prepare.h12
-rw-r--r--sql/sql_priv.h2
-rw-r--r--sql/sql_profile.cc2
-rw-r--r--sql/sql_reload.cc4
-rw-r--r--sql/sql_rename.cc6
-rw-r--r--sql/sql_repl.cc36
-rw-r--r--sql/sql_select.cc170
-rw-r--r--sql/sql_servers.cc8
-rw-r--r--sql/sql_show.cc185
-rw-r--r--sql/sql_signal.cc122
-rw-r--r--sql/sql_signal.h76
-rw-r--r--sql/sql_statistics.cc35
-rw-r--r--sql/sql_statistics.h11
-rw-r--r--sql/sql_string.cc71
-rw-r--r--sql/sql_string.h10
-rw-r--r--sql/sql_table.cc5177
-rw-r--r--sql/sql_table.h57
-rw-r--r--sql/sql_tablespace.cc4
-rw-r--r--sql/sql_test.cc2
-rw-r--r--sql/sql_time.cc18
-rw-r--r--sql/sql_time.h7
-rw-r--r--sql/sql_trigger.cc53
-rw-r--r--sql/sql_trigger.h4
-rw-r--r--sql/sql_truncate.cc54
-rw-r--r--sql/sql_truncate.h17
-rw-r--r--sql/sql_union.cc2
-rw-r--r--sql/sql_update.cc8
-rw-r--r--sql/sql_view.cc30
-rw-r--r--sql/sql_view.h1
-rw-r--r--sql/sql_yacc.yy1009
-rw-r--r--sql/strfunc.cc11
-rw-r--r--sql/strfunc.h2
-rw-r--r--sql/structs.h8
-rw-r--r--sql/sys_vars.cc242
-rw-r--r--sql/sys_vars.h2
-rw-r--r--sql/table.cc219
-rw-r--r--sql/table.h58
-rw-r--r--sql/thr_malloc.cc7
-rw-r--r--sql/transaction.cc4
-rw-r--r--sql/tztime.cc13
-rw-r--r--sql/unireg.cc79
-rw-r--r--sql/unireg.h7
-rw-r--r--storage/archive/ha_archive.cc403
-rw-r--r--storage/archive/ha_archive.h44
-rw-r--r--storage/cassandra/ha_cassandra.cc4
-rw-r--r--storage/csv/ha_tina.cc4
-rw-r--r--storage/example/ha_example.cc170
-rw-r--r--storage/example/ha_example.h18
-rw-r--r--storage/federated/ha_federated.cc6
-rw-r--r--storage/federatedx/ha_federatedx.cc16
-rw-r--r--storage/heap/ha_heap.cc12
-rw-r--r--storage/heap/hp_create.c10
-rw-r--r--storage/heap/hp_test2.c4
-rw-r--r--storage/innobase/CMakeLists.txt59
-rw-r--r--storage/innobase/api/api0api.cc3859
-rw-r--r--storage/innobase/api/api0misc.cc206
-rw-r--r--storage/innobase/btr/btr0btr.cc559
-rw-r--r--storage/innobase/btr/btr0cur.cc720
-rw-r--r--storage/innobase/btr/btr0pcur.cc73
-rw-r--r--storage/innobase/btr/btr0sea.cc36
-rw-r--r--storage/innobase/buf/buf0buddy.cc2
-rw-r--r--storage/innobase/buf/buf0buf.cc154
-rw-r--r--storage/innobase/buf/buf0dblwr.cc161
-rw-r--r--storage/innobase/buf/buf0dump.cc11
-rw-r--r--storage/innobase/buf/buf0flu.cc689
-rw-r--r--storage/innobase/buf/buf0lru.cc569
-rw-r--r--storage/innobase/buf/buf0rea.cc35
-rw-r--r--storage/innobase/dict/dict0boot.cc49
-rw-r--r--storage/innobase/dict/dict0crea.cc577
-rw-r--r--storage/innobase/dict/dict0dict.cc1502
-rw-r--r--storage/innobase/dict/dict0load.cc857
-rw-r--r--storage/innobase/dict/dict0mem.cc195
-rw-r--r--storage/innobase/dict/dict0stats.cc2833
-rw-r--r--storage/innobase/dict/dict0stats_bg.cc402
-rw-r--r--storage/innobase/fil/fil0fil.cc2946
-rw-r--r--storage/innobase/fsp/fsp0fsp.cc250
-rw-r--r--storage/innobase/fts/fts0ast.cc98
-rw-r--r--storage/innobase/fts/fts0blex.cc232
-rw-r--r--storage/innobase/fts/fts0blex.l2
-rw-r--r--storage/innobase/fts/fts0config.cc62
-rw-r--r--storage/innobase/fts/fts0fts.cc846
-rw-r--r--storage/innobase/fts/fts0opt.cc305
-rw-r--r--storage/innobase/fts/fts0pars.cc2
-rw-r--r--storage/innobase/fts/fts0que.cc571
-rw-r--r--storage/innobase/fts/fts0sql.cc12
-rw-r--r--storage/innobase/fts/fts0tlex.cc160
-rw-r--r--storage/innobase/fts/fts0tlex.l2
-rw-r--r--storage/innobase/ha/ha0ha.cc16
-rw-r--r--storage/innobase/ha/hash0hash.cc8
-rw-r--r--storage/innobase/handler/ha_innodb.cc4112
-rw-r--r--storage/innobase/handler/ha_innodb.h287
-rw-r--r--storage/innobase/handler/handler0alter.cc5557
-rw-r--r--storage/innobase/handler/i_s.cc1160
-rw-r--r--storage/innobase/handler/i_s.h6
-rw-r--r--storage/innobase/ibuf/ibuf0ibuf.cc469
-rw-r--r--storage/innobase/include/api0api.h1282
-rw-r--r--storage/innobase/include/api0misc.h78
-rw-r--r--storage/innobase/include/btr0btr.h152
-rw-r--r--storage/innobase/include/btr0btr.ic19
-rw-r--r--storage/innobase/include/btr0cur.h137
-rw-r--r--storage/innobase/include/btr0pcur.h32
-rw-r--r--storage/innobase/include/btr0pcur.ic39
-rw-r--r--storage/innobase/include/btr0sea.h12
-rw-r--r--storage/innobase/include/btr0sea.ic2
-rw-r--r--storage/innobase/include/btr0types.h8
-rw-r--r--storage/innobase/include/buf0buf.h67
-rw-r--r--storage/innobase/include/buf0buf.ic10
-rw-r--r--storage/innobase/include/buf0dblwr.h5
-rw-r--r--storage/innobase/include/buf0flu.h96
-rw-r--r--storage/innobase/include/buf0flu.ic7
-rw-r--r--storage/innobase/include/buf0lru.h24
-rw-r--r--storage/innobase/include/buf0types.h29
-rw-r--r--storage/innobase/include/data0data.h142
-rw-r--r--storage/innobase/include/data0data.ic28
-rw-r--r--storage/innobase/include/data0type.h10
-rw-r--r--storage/innobase/include/data0types.h4
-rw-r--r--storage/innobase/include/db0err.h39
-rw-r--r--storage/innobase/include/dict0boot.h70
-rw-r--r--storage/innobase/include/dict0boot.ic23
-rw-r--r--storage/innobase/include/dict0crea.h69
-rw-r--r--storage/innobase/include/dict0dict.h752
-rw-r--r--storage/innobase/include/dict0dict.ic328
-rw-r--r--storage/innobase/include/dict0load.h105
-rw-r--r--storage/innobase/include/dict0mem.h297
-rw-r--r--storage/innobase/include/dict0stats.h146
-rw-r--r--storage/innobase/include/dict0stats.ic250
-rw-r--r--storage/innobase/include/dict0stats_bg.h116
-rw-r--r--storage/innobase/include/dict0types.h25
-rw-r--r--storage/innobase/include/dyn0dyn.h7
-rw-r--r--storage/innobase/include/dyn0dyn.ic4
-rw-r--r--storage/innobase/include/fil0fil.h361
-rw-r--r--storage/innobase/include/fsp0fsp.h86
-rw-r--r--storage/innobase/include/fsp0fsp.ic148
-rw-r--r--storage/innobase/include/fts0ast.h76
-rw-r--r--storage/innobase/include/fts0fts.h183
-rw-r--r--storage/innobase/include/fts0priv.h195
-rw-r--r--storage/innobase/include/fts0priv.ic49
-rw-r--r--storage/innobase/include/fts0types.h52
-rw-r--r--storage/innobase/include/fts0types.ic40
-rw-r--r--storage/innobase/include/ha0ha.h13
-rw-r--r--storage/innobase/include/ha0ha.ic4
-rw-r--r--storage/innobase/include/ha0storage.h2
-rw-r--r--storage/innobase/include/ha0storage.ic6
-rw-r--r--storage/innobase/include/ha_prototypes.h216
-rw-r--r--storage/innobase/include/handler0alter.h86
-rw-r--r--storage/innobase/include/hash0hash.h16
-rw-r--r--storage/innobase/include/hash0hash.ic4
-rw-r--r--storage/innobase/include/ibuf0ibuf.h65
-rw-r--r--storage/innobase/include/ibuf0ibuf.ic31
-rw-r--r--storage/innobase/include/ibuf0types.h2
-rw-r--r--storage/innobase/include/lock0iter.h4
-rw-r--r--storage/innobase/include/lock0lock.h77
-rw-r--r--storage/innobase/include/lock0priv.h10
-rw-r--r--storage/innobase/include/lock0types.h4
-rw-r--r--storage/innobase/include/log0log.h43
-rw-r--r--storage/innobase/include/log0log.ic12
-rw-r--r--storage/innobase/include/log0recv.h41
-rw-r--r--storage/innobase/include/mach0data.h34
-rw-r--r--storage/innobase/include/mach0data.ic99
-rw-r--r--storage/innobase/include/mem0dbg.h2
-rw-r--r--storage/innobase/include/mem0mem.h12
-rw-r--r--storage/innobase/include/mem0mem.ic11
-rw-r--r--storage/innobase/include/mem0pool.h9
-rw-r--r--storage/innobase/include/mtr0log.h21
-rw-r--r--storage/innobase/include/mtr0mtr.h15
-rw-r--r--storage/innobase/include/mtr0mtr.ic4
-rw-r--r--storage/innobase/include/mtr0types.h2
-rw-r--r--storage/innobase/include/os0file.h101
-rw-r--r--storage/innobase/include/os0sync.h74
-rw-r--r--storage/innobase/include/os0sync.ic10
-rw-r--r--storage/innobase/include/page0cur.h32
-rw-r--r--storage/innobase/include/page0cur.ic29
-rw-r--r--storage/innobase/include/page0page.h24
-rw-r--r--storage/innobase/include/page0page.ic37
-rw-r--r--storage/innobase/include/page0types.h40
-rw-r--r--storage/innobase/include/page0zip.h64
-rw-r--r--storage/innobase/include/page0zip.ic73
-rw-r--r--storage/innobase/include/pars0pars.h61
-rw-r--r--storage/innobase/include/pars0sym.h8
-rw-r--r--storage/innobase/include/pars0types.h36
-rw-r--r--storage/innobase/include/que0que.h12
-rw-r--r--storage/innobase/include/que0types.h9
-rw-r--r--storage/innobase/include/read0read.h23
-rw-r--r--storage/innobase/include/read0read.ic93
-rw-r--r--storage/innobase/include/read0types.h4
-rw-r--r--storage/innobase/include/rem0cmp.h27
-rw-r--r--storage/innobase/include/rem0rec.h274
-rw-r--r--storage/innobase/include/rem0rec.ic29
-rw-r--r--storage/innobase/include/rem0types.h11
-rw-r--r--storage/innobase/include/row0ext.h2
-rw-r--r--storage/innobase/include/row0ftsort.h42
-rw-r--r--storage/innobase/include/row0import.h91
-rw-r--r--storage/innobase/include/row0import.ic25
-rw-r--r--storage/innobase/include/row0ins.h129
-rw-r--r--storage/innobase/include/row0log.h241
-rw-r--r--storage/innobase/include/row0log.ic84
-rw-r--r--storage/innobase/include/row0merge.h282
-rw-r--r--storage/innobase/include/row0mysql.h199
-rw-r--r--storage/innobase/include/row0purge.h19
-rw-r--r--storage/innobase/include/row0quiesce.h74
-rw-r--r--storage/innobase/include/row0quiesce.ic26
-rw-r--r--storage/innobase/include/row0row.h92
-rw-r--r--storage/innobase/include/row0row.ic27
-rw-r--r--storage/innobase/include/row0sel.h33
-rw-r--r--storage/innobase/include/row0types.h38
-rw-r--r--storage/innobase/include/row0uins.h8
-rw-r--r--storage/innobase/include/row0umod.h8
-rw-r--r--storage/innobase/include/row0undo.h2
-rw-r--r--storage/innobase/include/row0upd.h61
-rw-r--r--storage/innobase/include/row0upd.ic19
-rw-r--r--storage/innobase/include/row0vers.h18
-rw-r--r--storage/innobase/include/srv0mon.h142
-rw-r--r--storage/innobase/include/srv0srv.h260
-rw-r--r--storage/innobase/include/srv0start.h36
-rw-r--r--storage/innobase/include/sync0arr.h4
-rw-r--r--storage/innobase/include/sync0rw.h124
-rw-r--r--storage/innobase/include/sync0rw.ic118
-rw-r--r--storage/innobase/include/sync0sync.h75
-rw-r--r--storage/innobase/include/sync0sync.ic87
-rw-r--r--storage/innobase/include/sync0types.h5
-rw-r--r--storage/innobase/include/trx0i_s.h48
-rw-r--r--storage/innobase/include/trx0purge.h22
-rw-r--r--storage/innobase/include/trx0rec.h41
-rw-r--r--storage/innobase/include/trx0rec.ic2
-rw-r--r--storage/innobase/include/trx0roll.h44
-rw-r--r--storage/innobase/include/trx0rseg.h8
-rw-r--r--storage/innobase/include/trx0sys.h22
-rw-r--r--storage/innobase/include/trx0trx.h139
-rw-r--r--storage/innobase/include/trx0trx.ic13
-rw-r--r--storage/innobase/include/trx0types.h51
-rw-r--r--storage/innobase/include/trx0undo.h16
-rw-r--r--storage/innobase/include/trx0undo.ic15
-rw-r--r--storage/innobase/include/univ.i52
-rw-r--r--storage/innobase/include/usr0sess.h2
-rw-r--r--storage/innobase/include/usr0types.h2
-rw-r--r--storage/innobase/include/ut0bh.h4
-rw-r--r--storage/innobase/include/ut0counter.h203
-rw-r--r--storage/innobase/include/ut0crc32.h3
-rw-r--r--storage/innobase/include/ut0dbg.h4
-rw-r--r--storage/innobase/include/ut0list.h11
-rw-r--r--storage/innobase/include/ut0lst.h3
-rw-r--r--storage/innobase/include/ut0rbt.h20
-rw-r--r--storage/innobase/include/ut0ut.h38
-rw-r--r--storage/innobase/include/ut0vec.h12
-rw-r--r--storage/innobase/include/ut0vec.ic13
-rw-r--r--storage/innobase/include/ut0wqueue.h6
-rw-r--r--storage/innobase/lock/lock0lock.cc344
-rw-r--r--storage/innobase/lock/lock0wait.cc49
-rw-r--r--storage/innobase/log/log0log.cc270
-rw-r--r--storage/innobase/log/log0recv.cc455
-rw-r--r--storage/innobase/mem/mem0dbg.cc5
-rw-r--r--storage/innobase/mem/mem0pool.cc6
-rw-r--r--storage/innobase/mtr/mtr0log.cc73
-rw-r--r--storage/innobase/mtr/mtr0mtr.cc107
-rw-r--r--storage/innobase/os/os0file.cc1925
-rw-r--r--storage/innobase/os/os0sync.cc45
-rw-r--r--storage/innobase/os/os0thread.cc6
-rw-r--r--storage/innobase/page/page0cur.cc87
-rw-r--r--storage/innobase/page/page0page.cc96
-rw-r--r--storage/innobase/page/page0zip.cc117
-rw-r--r--storage/innobase/pars/lexyy.cc927
-rw-r--r--storage/innobase/pars/pars0lex.l2
-rw-r--r--storage/innobase/pars/pars0opt.cc4
-rw-r--r--storage/innobase/pars/pars0pars.cc86
-rw-r--r--storage/innobase/pars/pars0sym.cc2
-rw-r--r--storage/innobase/que/que0que.cc4
-rw-r--r--storage/innobase/read/read0read.cc76
-rw-r--r--storage/innobase/rem/rem0cmp.cc278
-rw-r--r--storage/innobase/rem/rem0rec.cc110
-rw-r--r--storage/innobase/row/row0ext.cc2
-rw-r--r--storage/innobase/row/row0ftsort.cc216
-rw-r--r--storage/innobase/row/row0import.cc3806
-rw-r--r--storage/innobase/row/row0ins.cc1234
-rw-r--r--storage/innobase/row/row0log.cc3219
-rw-r--r--storage/innobase/row/row0merge.cc2358
-rw-r--r--storage/innobase/row/row0mysql.cc1670
-rw-r--r--storage/innobase/row/row0purge.cc443
-rw-r--r--storage/innobase/row/row0quiesce.cc702
-rw-r--r--storage/innobase/row/row0row.cc199
-rw-r--r--storage/innobase/row/row0sel.cc348
-rw-r--r--storage/innobase/row/row0uins.cc194
-rw-r--r--storage/innobase/row/row0umod.cc508
-rw-r--r--storage/innobase/row/row0undo.cc21
-rw-r--r--storage/innobase/row/row0upd.cc507
-rw-r--r--storage/innobase/row/row0vers.cc88
-rw-r--r--storage/innobase/srv/srv0conc.cc29
-rw-r--r--storage/innobase/srv/srv0mon.cc221
-rw-r--r--storage/innobase/srv/srv0srv.cc585
-rw-r--r--storage/innobase/srv/srv0start.cc1765
-rw-r--r--storage/innobase/sync/sync0arr.cc28
-rw-r--r--storage/innobase/sync/sync0rw.cc186
-rw-r--r--storage/innobase/sync/sync0sync.cc161
-rw-r--r--storage/innobase/trx/trx0i_s.cc20
-rw-r--r--storage/innobase/trx/trx0purge.cc70
-rw-r--r--storage/innobase/trx/trx0rec.cc156
-rw-r--r--storage/innobase/trx/trx0roll.cc64
-rw-r--r--storage/innobase/trx/trx0sys.cc68
-rw-r--r--storage/innobase/trx/trx0trx.cc333
-rw-r--r--storage/innobase/trx/trx0undo.cc28
-rw-r--r--storage/innobase/ut/ut0crc32.cc10
-rw-r--r--storage/innobase/ut/ut0mem.cc7
-rw-r--r--storage/innobase/ut/ut0rbt.cc2
-rw-r--r--storage/innobase/ut/ut0ut.cc98
-rw-r--r--storage/innobase/ut/ut0vec.cc4
-rw-r--r--storage/innobase/ut/ut0wqueue.cc2
-rw-r--r--storage/maria/ha_maria.cc42
-rw-r--r--storage/maria/ma_bitmap.c10
-rw-r--r--storage/maria/ma_blockrec.c8
-rw-r--r--storage/maria/ma_cache.c2
-rw-r--r--storage/maria/ma_check.c13
-rw-r--r--storage/maria/ma_checkpoint.c8
-rw-r--r--storage/maria/ma_close.c33
-rw-r--r--storage/maria/ma_commit.c4
-rw-r--r--storage/maria/ma_create.c16
-rw-r--r--storage/maria/ma_delete.c6
-rw-r--r--storage/maria/ma_dynrec.c2
-rw-r--r--storage/maria/ma_extra.c2
-rw-r--r--storage/maria/ma_ft_boolean_search.c21
-rw-r--r--storage/maria/ma_ft_nlq_search.c25
-rw-r--r--storage/maria/ma_info.c2
-rw-r--r--storage/maria/ma_key_recover.c12
-rw-r--r--storage/maria/ma_loghandler.c6
-rw-r--r--storage/maria/ma_open.c41
-rw-r--r--storage/maria/ma_packrec.c4
-rw-r--r--storage/maria/ma_recovery.c2
-rw-r--r--storage/maria/ma_rt_mbr.c24
-rw-r--r--storage/maria/ma_sort.c16
-rw-r--r--storage/maria/ma_test1.c2
-rw-r--r--storage/maria/ma_test2.c2
-rw-r--r--storage/maria/ma_write.c4
-rw-r--r--storage/maria/maria_def.h1
-rw-r--r--storage/maria/maria_pack.c4
-rw-r--r--storage/maria/trnman.c2
-rwxr-xr-xstorage/maria/unittest/ma_test_all-t4
-rw-r--r--storage/myisam/ha_myisam.cc72
-rw-r--r--storage/myisam/mi_cache.c2
-rw-r--r--storage/myisam/mi_check.c16
-rw-r--r--storage/myisam/mi_close.c9
-rw-r--r--storage/myisam/mi_create.c20
-rw-r--r--storage/myisam/mi_dynrec.c27
-rw-r--r--storage/myisam/mi_extra.c2
-rw-r--r--storage/myisam/mi_open.c45
-rw-r--r--storage/myisam/mi_packrec.c4
-rw-r--r--storage/myisam/mi_test1.c2
-rw-r--r--storage/myisam/mi_test2.c2
-rw-r--r--storage/myisam/myisamchk.c1
-rw-r--r--storage/myisam/myisamlog.c2
-rw-r--r--storage/myisam/myisampack.c7
-rw-r--r--storage/myisam/rt_mbr.c24
-rw-r--r--storage/myisam/sort.c16
-rw-r--r--storage/myisammrg/ha_myisammrg.cc73
-rw-r--r--storage/myisammrg/ha_myisammrg.h6
-rw-r--r--storage/myisammrg/myrg_create.c4
-rw-r--r--storage/myisammrg/myrg_def.h5
-rw-r--r--storage/oqgraph/ha_oqgraph.cc8
-rw-r--r--storage/perfschema/CMakeLists.txt9
-rw-r--r--storage/perfschema/cursor_by_thread_connect_attr.cc71
-rw-r--r--storage/perfschema/cursor_by_thread_connect_attr.h81
-rw-r--r--storage/perfschema/gen_pfs_lex_token.cc4
-rw-r--r--storage/perfschema/ha_perfschema.cc20
-rw-r--r--storage/perfschema/ha_perfschema.h3
-rw-r--r--storage/perfschema/pfs.cc595
-rw-r--r--storage/perfschema/pfs_account.cc5
-rw-r--r--storage/perfschema/pfs_account.h4
-rw-r--r--storage/perfschema/pfs_atomic.h79
-rw-r--r--storage/perfschema/pfs_autosize.cc366
-rw-r--r--storage/perfschema/pfs_digest.cc206
-rw-r--r--storage/perfschema/pfs_digest.h54
-rw-r--r--storage/perfschema/pfs_engine_table.cc281
-rw-r--r--storage/perfschema/pfs_engine_table.h2
-rw-r--r--storage/perfschema/pfs_events.h2
-rw-r--r--storage/perfschema/pfs_events_waits.cc10
-rw-r--r--storage/perfschema/pfs_global.cc72
-rw-r--r--storage/perfschema/pfs_global.h15
-rw-r--r--storage/perfschema/pfs_host.cc5
-rw-r--r--storage/perfschema/pfs_host.h4
-rw-r--r--storage/perfschema/pfs_instr.cc332
-rw-r--r--storage/perfschema/pfs_instr.h98
-rw-r--r--storage/perfschema/pfs_instr_class.cc69
-rw-r--r--storage/perfschema/pfs_instr_class.h64
-rw-r--r--storage/perfschema/pfs_lock.h107
-rw-r--r--storage/perfschema/pfs_server.cc9
-rw-r--r--storage/perfschema/pfs_server.h113
-rw-r--r--storage/perfschema/pfs_setup_actor.cc10
-rw-r--r--storage/perfschema/pfs_setup_actor.h4
-rw-r--r--storage/perfschema/pfs_setup_object.cc12
-rw-r--r--storage/perfschema/pfs_setup_object.h4
-rw-r--r--storage/perfschema/pfs_stat.h167
-rw-r--r--storage/perfschema/pfs_timer.cc71
-rw-r--r--storage/perfschema/pfs_user.cc5
-rw-r--r--storage/perfschema/pfs_user.h4
-rw-r--r--storage/perfschema/pfs_visitor.cc89
-rw-r--r--storage/perfschema/table_esgs_by_thread_by_event_name.cc4
-rw-r--r--storage/perfschema/table_esgs_by_thread_by_event_name.h2
-rw-r--r--storage/perfschema/table_esgs_global_by_event_name.cc3
-rw-r--r--storage/perfschema/table_esms_by_digest.cc24
-rw-r--r--storage/perfschema/table_esms_by_thread_by_event_name.cc4
-rw-r--r--storage/perfschema/table_esms_by_thread_by_event_name.h2
-rw-r--r--storage/perfschema/table_esms_global_by_event_name.cc3
-rw-r--r--storage/perfschema/table_events_stages.cc4
-rw-r--r--storage/perfschema/table_events_stages.h2
-rw-r--r--storage/perfschema/table_events_statements.cc6
-rw-r--r--storage/perfschema/table_events_statements.h2
-rw-r--r--storage/perfschema/table_events_waits.cc7
-rw-r--r--storage/perfschema/table_events_waits.h2
-rw-r--r--storage/perfschema/table_events_waits_summary.cc10
-rw-r--r--storage/perfschema/table_ews_by_thread_by_event_name.cc4
-rw-r--r--storage/perfschema/table_ews_by_thread_by_event_name.h2
-rw-r--r--storage/perfschema/table_ews_global_by_event_name.cc8
-rw-r--r--storage/perfschema/table_helper.cc27
-rw-r--r--storage/perfschema/table_helper.h4
-rw-r--r--storage/perfschema/table_host_cache.cc6
-rw-r--r--storage/perfschema/table_os_global_by_type.cc9
-rw-r--r--storage/perfschema/table_session_account_connect_attrs.cc70
-rw-r--r--storage/perfschema/table_session_account_connect_attrs.h50
-rw-r--r--storage/perfschema/table_session_connect.cc268
-rw-r--r--storage/perfschema/table_session_connect.h77
-rw-r--r--storage/perfschema/table_session_connect_attrs.cc43
-rw-r--r--storage/perfschema/table_session_connect_attrs.h47
-rw-r--r--storage/perfschema/table_setup_actors.cc41
-rw-r--r--storage/perfschema/table_setup_objects.cc39
-rw-r--r--storage/perfschema/table_socket_instances.cc4
-rw-r--r--storage/perfschema/table_socket_instances.h2
-rw-r--r--storage/perfschema/table_sync_instances.cc8
-rw-r--r--storage/perfschema/table_sync_instances.h4
-rw-r--r--storage/perfschema/table_threads.cc41
-rw-r--r--storage/perfschema/table_threads.h6
-rw-r--r--storage/perfschema/table_tiws_by_index_usage.cc12
-rw-r--r--storage/perfschema/unittest/CMakeLists.txt11
-rw-r--r--storage/perfschema/unittest/pfs-t.cc59
-rw-r--r--storage/perfschema/unittest/pfs_account-oom-t.cc1
-rw-r--r--storage/perfschema/unittest/pfs_connect_attr-t.cc345
-rw-r--r--storage/perfschema/unittest/pfs_host-oom-t.cc1
-rw-r--r--storage/perfschema/unittest/pfs_instr-oom-t.cc19
-rw-r--r--storage/perfschema/unittest/pfs_instr-t.cc42
-rw-r--r--storage/perfschema/unittest/pfs_instr_class-t.cc2
-rw-r--r--storage/sphinx/ha_sphinx.cc4
-rw-r--r--strings/ctype-big5.c4
-rw-r--r--strings/ctype-bin.c10
-rw-r--r--strings/ctype-gbk.c4
-rw-r--r--strings/ctype-mb.c6
-rw-r--r--strings/ctype-simple.c8
-rw-r--r--strings/ctype-tis620.c4
-rw-r--r--strings/ctype-uca.c2
-rw-r--r--strings/ctype-ucs2.c10
-rw-r--r--strings/ctype-utf8.c4
-rw-r--r--strings/ctype.c141
-rw-r--r--strings/decimal.c48
-rw-r--r--strings/dtoa.c6
-rw-r--r--strings/my_vsnprintf.c18
-rw-r--r--strings/str2int.c2
-rw-r--r--tests/CMakeLists.txt2
-rw-r--r--tests/mysql_client_fw.c2
-rw-r--r--tests/mysql_client_test.c5
1378 files changed, 121559 insertions, 44549 deletions
diff --git a/BUILD/SETUP.sh b/BUILD/SETUP.sh
index 22f6371b069..bde095b0aa8 100755
--- a/BUILD/SETUP.sh
+++ b/BUILD/SETUP.sh
@@ -165,8 +165,7 @@ valgrind_flags="$valgrind_flags -DMYSQL_SERVER_SUFFIX=-valgrind-max"
valgrind_configs="--with-valgrind"
#
# Used in -debug builds
-debug_cflags="-DUNIV_MUST_NOT_INLINE -DEXTRA_DEBUG"
-debug_cflags="$debug_cflags -DSAFE_MUTEX -DSAFEMALLOC"
+debug_cflags="-DEXTRA_DEBUG -DSAFE_MUTEX -DSAFEMALLOC"
error_inject="--with-error-inject "
#
# Base C++ flags for all builds
diff --git a/CMakeLists.txt b/CMakeLists.txt
index f73150bfc4a..a16679337ca 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -139,7 +139,9 @@ INCLUDE(cpack_rpm)
# Add macros
INCLUDE(character_sets)
+INCLUDE(cpu_info)
INCLUDE(zlib)
+INCLUDE(libevent)
INCLUDE(ssl)
INCLUDE(readline)
INCLUDE(libutils)
@@ -204,7 +206,7 @@ ENDFOREACH()
# Add safemutex for debug configurations, except on Windows
# (safemutex has never worked on Windows)
-IF(NOT WIN32)
+IF(NOT WIN32 AND NOT WITH_INNODB_MEMCACHED)
FOREACH(LANG C CXX)
SET(CMAKE_${LANG}_FLAGS_DEBUG "${CMAKE_${LANG}_FLAGS_DEBUG} -DSAFE_MUTEX")
ENDFOREACH()
@@ -256,10 +258,12 @@ INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR}/include)
# Add bundled or system zlib.
MYSQL_CHECK_ZLIB_WITH_COMPRESS()
-# Optionally add bundled yassl/taocrypt or system openssl.
+# Add bundled yassl/taocrypt or system openssl.
MYSQL_CHECK_SSL()
# Add readline or libedit.
MYSQL_CHECK_READLINE()
+# Add libevent
+MYSQL_CHECK_LIBEVENT()
#
# Setup maintainer mode options. Platform checks are
@@ -297,6 +301,7 @@ ADD_SUBDIRECTORY(strings)
ADD_SUBDIRECTORY(vio)
ADD_SUBDIRECTORY(regex)
ADD_SUBDIRECTORY(mysys)
+ADD_SUBDIRECTORY(mysys_ssl)
ADD_SUBDIRECTORY(libmysql)
ADD_SUBDIRECTORY(client)
ADD_SUBDIRECTORY(extra)
diff --git a/TODO b/TODO
index 827d5a139ab..19c32379a1a 100644
--- a/TODO
+++ b/TODO
@@ -32,12 +32,10 @@ Short time TODO:
- add support for host_error()
- Enable performance_schema.host_cache in scripts/mysql_system_tables.sql
-- Add full support for automatic timestamp.
- (remove timestamp handling from ha_write())
- - Timour is working on this
-
- Add Sys_my_bind_addr(); Needed for perfschema
+- Add support for format_section_buff in unireg.cc and table.cc
+
- mysql_socket_shutdown() was removed from vio/viosocket.cc.
It was replaced with inline function in include/mysql/psi/mysql_socket.h
but this doesn't call DisconnectEx(). We should check if we need to
@@ -60,4 +58,3 @@ Sergei's notes:
rpl_slave.cc
XXX in mysql_client_test
net_serv.cc
-
diff --git a/client/CMakeLists.txt b/client/CMakeLists.txt
index e4507f9c8ba..9fed5b4ea19 100644
--- a/client/CMakeLists.txt
+++ b/client/CMakeLists.txt
@@ -15,6 +15,7 @@
INCLUDE_DIRECTORIES(
${CMAKE_SOURCE_DIR}/include
+ ${CMAKE_SOURCE_DIR}/mysys_ssl
${ZLIB_INCLUDE_DIR}
${SSL_INCLUDE_DIRS}
${CMAKE_SOURCE_DIR}/libmysql
@@ -25,6 +26,9 @@ INCLUDE_DIRECTORIES(
${CMAKE_CURRENT_BINARY_DIR}
)
+## We will need libeay32.dll and ssleay32.dll when running client executables.
+COPY_OPENSSL_DLLS(copy_openssl_client)
+
ADD_DEFINITIONS(${SSL_DEFINES})
MYSQL_ADD_EXECUTABLE(mysql completion_hash.cc mysql.cc readline.cc
${CMAKE_SOURCE_DIR}/sql/sql_string.cc)
@@ -78,7 +82,7 @@ ENDIF(WIN32)
ADD_EXECUTABLE(async_example async_example.c)
TARGET_LINK_LIBRARIES(async_example mysqlclient)
-SET_TARGET_PROPERTIES (mysqlcheck mysqldump mysqlimport mysql_upgrade mysqlshow mysqlslap mysql_plugin
+SET_TARGET_PROPERTIES (mysqlcheck mysqldump mysqlimport mysql_upgrade mysqlshow mysqlslap mysql_plugin async_example
PROPERTIES HAS_CXX TRUE)
ADD_DEFINITIONS(-DHAVE_DLOPEN)
diff --git a/client/mysql.cc b/client/mysql.cc
index 3e9edd94ba6..90310f85e95 100644
--- a/client/mysql.cc
+++ b/client/mysql.cc
@@ -3504,9 +3504,9 @@ print_table_data(MYSQL_RES *result)
{
uint length= column_names ? field->name_length : 0;
if (quick)
- length=max(length,field->length);
+ length= MY_MAX(length,field->length);
else
- length=max(length,field->max_length);
+ length= MY_MAX(length,field->max_length);
if (length < 4 && !IS_NOT_NULL(field->flags))
length=4; // Room for "NULL"
field->max_length=length;
@@ -3526,8 +3526,8 @@ print_table_data(MYSQL_RES *result)
field->name,
field->name + name_length);
uint display_length= field->max_length + name_length - numcells;
- tee_fprintf(PAGER, " %-*s |",(int) min(display_length,
- MAX_COLUMN_LENGTH),
+ tee_fprintf(PAGER, " %-*s |",(int) MY_MIN(display_length,
+ MAX_COLUMN_LENGTH),
field->name);
num_flag[off]= IS_NUM(field->type);
}
@@ -3616,9 +3616,9 @@ static int get_field_disp_length(MYSQL_FIELD *field)
uint length= column_names ? field->name_length : 0;
if (quick)
- length= max(length, field->length);
+ length= MY_MAX(length, field->length);
else
- length= max(length, field->max_length);
+ length= MY_MAX(length, field->max_length);
if (length < 4 && !IS_NOT_NULL(field->flags))
length= 4; /* Room for "NULL" */
@@ -3634,6 +3634,7 @@ static int get_field_disp_length(MYSQL_FIELD *field)
@returns The max number of characters in any row of this result
*/
+
static int get_result_width(MYSQL_RES *result)
{
unsigned int len= 0;
diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c
index c1f7d028c0d..8b3f0fdec79 100644
--- a/client/mysql_upgrade.c
+++ b/client/mysql_upgrade.c
@@ -585,7 +585,7 @@ static int extract_variable_from_show(DYNAMIC_STRING* ds, char* value)
if ((value_end= strchr(value_start, '\n')) == NULL)
return 1; /* Unexpected result */
- len= (size_t) min(FN_REFLEN, value_end-value_start);
+ len= (size_t) MY_MIN(FN_REFLEN, value_end-value_start);
strncpy(value, value_start, len);
value[len]= '\0';
return 0;
diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc
index 923539a181f..9a8901435c8 100644
--- a/client/mysqladmin.cc
+++ b/client/mysqladmin.cc
@@ -23,7 +23,8 @@
#include <sys/stat.h>
#include <mysql.h>
#include <sql_common.h>
-#include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */
+#include <welcome_copyright_notice.h>
+#include <my_rnd.h>
#define ADMIN_VERSION "9.1"
#define MAX_MYSQL_VAR 512
diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc
index d15bcca4d0c..31a1d583c4f 100644
--- a/client/mysqlbinlog.cc
+++ b/client/mysqlbinlog.cc
@@ -2345,7 +2345,7 @@ static Exit_status dump_local_log_entries(PRINT_EVENT_INFO *print_event_info,
my_off_t length,tmp;
for (length= start_position_mot ; length > 0 ; length-=tmp)
{
- tmp=min(length,sizeof(buff));
+ tmp= MY_MIN(length,sizeof(buff));
if (my_b_read(file, buff, (uint) tmp))
{
error("Failed reading from file.");
diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c
index e96f855719e..e614d628630 100644
--- a/client/mysqlcheck.c
+++ b/client/mysqlcheck.c
@@ -784,8 +784,8 @@ static int handle_request_for_tables(char *tables, uint length)
org= ptr= strmov(strmov(query, op), " TABLE ");
ptr= fix_table_name(ptr, tables);
- strmake(table_name_buff, org, min((int) sizeof(table_name_buff)-1,
- (int) (ptr - org)));
+ strmake(table_name_buff, org, MY_MIN((int) sizeof(table_name_buff)-1,
+ (int) (ptr - org)));
table_name= table_name_buff;
ptr= strxmov(ptr, " ", options, NullS);
query_length= (uint) (ptr - query);
diff --git a/client/mysqltest.cc b/client/mysqltest.cc
index 2ec8414dbcf..87d6c80917a 100644
--- a/client/mysqltest.cc
+++ b/client/mysqltest.cc
@@ -6485,9 +6485,9 @@ int read_line(char *buf, int size)
}
else if ((c == '{' &&
(!my_strnncoll_simple(charset_info, (const uchar*) "while", 5,
- (uchar*) buf, min(5, p - buf), 0) ||
+ (uchar*) buf, MY_MIN(5, p - buf), 0) ||
!my_strnncoll_simple(charset_info, (const uchar*) "if", 2,
- (uchar*) buf, min(2, p - buf), 0))))
+ (uchar*) buf, MY_MIN(2, p - buf), 0))))
{
/* Only if and while commands can be terminated by { */
*p++= c;
diff --git a/cmake/configure.pl b/cmake/configure.pl
index d5c0b9b061a..a71795a3bc8 100644
--- a/cmake/configure.pl
+++ b/cmake/configure.pl
@@ -150,6 +150,16 @@ foreach my $option (@ARGV)
$cmakeargs = $cmakeargs." -DWITH_ZLIB=system";
next;
}
+ if($option =~ /with-libevent=/)
+ {
+ $cmakeargs = $cmakeargs." -DWITH_LIBEVENT=system";
+ next;
+ }
+ if($option =~ /with-libevent/)
+ {
+ $cmakeargs = $cmakeargs." -DWITH_LIBEVENT=bundled";
+ next;
+ }
if($option =~ /with-ssl=/)
{
$cmakeargs = $cmakeargs." -DWITH_SSL=yes";
@@ -237,6 +247,16 @@ foreach my $option (@ARGV)
print("configure.pl : ignoring $option\n");
next;
}
+ if ($option =~ /with-client-ldflags/)
+ {
+ print("configure.pl : ignoring $option\n");
+ next;
+ }
+ if ($option =~ /with-mysqld-ldflags=/)
+ {
+ print("configure.pl : ignoring $option\n");
+ next;
+ }
$option = uc($option);
$option =~ s/-/_/g;
diff --git a/cmake/cpu_info.cmake b/cmake/cpu_info.cmake
new file mode 100644
index 00000000000..32b98142ace
--- /dev/null
+++ b/cmake/cpu_info.cmake
@@ -0,0 +1,30 @@
+# Copyright (c) 2009, 2011, 2012 Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Symbols with information about the CPU.
+
+FIND_PROGRAM(GETCONF getconf)
+MARK_AS_ADVANCED(GETCONF)
+
+IF(GETCONF)
+ EXECUTE_PROCESS(
+ COMMAND ${GETCONF} LEVEL1_DCACHE_LINESIZE
+ OUTPUT_VARIABLE CPU_LEVEL1_DCACHE_LINESIZE
+ )
+ENDIF()
+IF(CPU_LEVEL1_DCACHE_LINESIZE AND CPU_LEVEL1_DCACHE_LINESIZE GREATER 0)
+ELSE()
+ SET(CPU_LEVEL1_DCACHE_LINESIZE 64)
+ENDIF()
diff --git a/cmake/libevent.cmake b/cmake/libevent.cmake
new file mode 100644
index 00000000000..54498e1bb15
--- /dev/null
+++ b/cmake/libevent.cmake
@@ -0,0 +1,89 @@
+# Copyright (C) 2011 Oracle and/or its affiliates. All Rights Reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+MACRO (MYSQL_USE_BUNDLED_LIBEVENT)
+ SET(LIBEVENT_LIBRARY event)
+ SET(LIBEVENT_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/libevent)
+ SET(LIBEVENT_FOUND TRUE)
+ SET(WITH_LIBEVENT "bundled" CACHE STRING "Use bundled libevent")
+ ADD_SUBDIRECTORY(libevent)
+ GET_TARGET_PROPERTY(src libevent SOURCES)
+ FOREACH(file ${src})
+ SET(LIBEVENT_SOURCES ${LIBEVENT_SOURCES} ${CMAKE_SOURCE_DIR}/libevent/${file})
+ ENDFOREACH()
+ENDMACRO()
+
+# MYSQL_CHECK_LIBEVENT
+#
+# Provides the following configure options:
+# WITH_LIBEVENT_BUNDLED
+# If this is set,we use bindled libevent
+# If this is not set,search for system libevent.
+# if system libevent is not found, use bundled copy
+# LIBEVENT_LIBRARIES, LIBEVENT_INCLUDE_DIR and LIBEVENT_SOURCES
+# are set after this macro has run
+
+MACRO (MYSQL_CHECK_LIBEVENT)
+
+ IF (NOT WITH_LIBEVENT)
+ SET(WITH_LIBEVENT "bundled" CACHE STRING "By default use bundled libevent on this platform")
+ ENDIF()
+
+ IF(WITH_LIBEVENT STREQUAL "bundled")
+ MYSQL_USE_BUNDLED_LIBEVENT()
+ ELSEIF(WITH_LIBEVENT STREQUAL "system" OR WITH_LIBEVENT STREQUAL "yes")
+ SET(LIBEVENT_FIND_QUIETLY TRUE)
+
+ IF (NOT LIBEVENT_INCLUDE_PATH)
+ set(LIBEVENT_INCLUDE_PATH /usr/local/include /opt/local/include)
+ ENDIF()
+
+ find_path(LIBEVENT_INCLUDE_DIR event.h PATHS ${LIBEVENT_INCLUDE_PATH})
+
+ if (NOT LIBEVENT_INCLUDE_DIR)
+ MESSAGE(SEND_ERROR "Cannot find appropriate event.h in /usr/local/include or /opt/local/include. Use bundled libevent")
+ endif()
+
+ IF (NOT LIBEVENT_LIB_PATHS)
+ set(LIBEVENT_LIB_PATHS /usr/local/lib /opt/local/lib)
+ ENDIF()
+
+ find_library(LIBEVENT_LIB event PATHS ${LIBEVENT_LIB_PATHS})
+
+ if (NOT LIBEVENT_LIB)
+ MESSAGE(SEND_ERROR "Cannot find appropriate event lib in /usr/local/lib or /opt/local/lib. Use bundled libevent")
+ endif()
+
+ IF (LIBEVENT_LIB AND LIBEVENT_INCLUDE_DIR)
+ set(LIBEVENT_FOUND TRUE)
+ set(LIBEVENT_LIBS ${LIBEVENT_LIB})
+ ELSE()
+ set(LIBEVENT_FOUND FALSE)
+ ENDIF()
+
+ IF(LIBEVENT_FOUND)
+ SET(LIBEVENT_SOURCES "")
+ SET(LIBEVENT_LIBRARIES ${LIBEVENT_LIBS})
+ SET(LIBEVENT_INCLUDE_DIRS ${LIBEVENT_INCLUDE_DIR})
+ SET(LIBEVENT_DEFINES "-DHAVE_LIBEVENT")
+ ELSE()
+ IF(WITH_LIBEVENT STREQUAL "system")
+ MESSAGE(SEND_ERROR "Cannot find appropriate system libraries for libevent. Use bundled libevent")
+ ENDIF()
+ MYSQL_USE_BUNDLED_LIBEVENT()
+ ENDIF()
+
+ ENDIF()
+ENDMACRO()
diff --git a/cmake/libutils.cmake b/cmake/libutils.cmake
index 7c13df05ca4..2da701d39b0 100644
--- a/cmake/libutils.cmake
+++ b/cmake/libutils.cmake
@@ -304,20 +304,22 @@ FUNCTION(GET_DEPENDEND_OS_LIBS target result)
SET(${result} ${ret} PARENT_SCOPE)
ENDFUNCTION()
-MACRO(RESTRICT_SYMBOL_EXPORTS target)
- SET(VISIBILITY_HIDDEN_FLAG)
+SET(VISIBILITY_HIDDEN_FLAG)
- IF(CMAKE_COMPILER_IS_GNUCXX AND UNIX)
- CHECK_C_COMPILER_FLAG("-fvisibility=hidden" HAVE_VISIBILITY_HIDDEN)
- IF(HAVE_VISIBILITY_HIDDEN)
- SET(VISIBILITY_HIDDEN_FLAG "-fvisibility=hidden")
- ENDIF()
+IF(CMAKE_COMPILER_IS_GNUCXX AND UNIX)
+ CHECK_C_COMPILER_FLAG("-fvisibility=hidden" HAVE_VISIBILITY_HIDDEN)
+ IF(HAVE_VISIBILITY_HIDDEN)
+ SET(VISIBILITY_HIDDEN_FLAG "-fvisibility=hidden")
ENDIF()
+ENDIF()
- IF(CMAKE_C_COMPILER_ID MATCHES "SunPro")
- SET(VISIBILITY_HIDDEN_FLAG "-xldscope=hidden")
- ENDIF()
+IF(CMAKE_C_COMPILER_ID MATCHES "SunPro")
+ SET(VISIBILITY_HIDDEN_FLAG "-xldscope=hidden")
+ENDIF()
+# We try to hide the symbols in yassl/zlib to avoid name clashes with
+# other libraries like openssl.
+FUNCTION(RESTRICT_SYMBOL_EXPORTS target)
IF(VISIBILITY_HIDDEN_FLAG)
GET_TARGET_PROPERTY(COMPILE_FLAGS ${target} COMPILE_FLAGS)
IF(NOT COMPILE_FLAGS)
@@ -327,5 +329,4 @@ MACRO(RESTRICT_SYMBOL_EXPORTS target)
SET_TARGET_PROPERTIES(${target} PROPERTIES
COMPILE_FLAGS "${COMPILE_FLAGS} ${VISIBILITY_HIDDEN_FLAG}")
ENDIF()
-
-ENDMACRO()
+ENDFUNCTION()
diff --git a/cmake/os/Windows.cmake b/cmake/os/Windows.cmake
index 42ddb12bf37..254d9f6d946 100644
--- a/cmake/os/Windows.cmake
+++ b/cmake/os/Windows.cmake
@@ -80,10 +80,6 @@ IF(MSVC)
STRING(REPLACE "/MD" "/MT" "${flag}" "${${flag}}")
ENDFOREACH()
- # Remove support for exceptions
- FOREACH(flag CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_INIT)
- STRING(REPLACE "/EHsc" "" "${flag}" "${${flag}}")
- ENDFOREACH()
# Fix CMake's predefined huge stack size
FOREACH(type EXE SHARED MODULE)
diff --git a/cmake/ssl.cmake b/cmake/ssl.cmake
index ca950229129..18ace717965 100644
--- a/cmake/ssl.cmake
+++ b/cmake/ssl.cmake
@@ -13,80 +13,224 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+# We support different versions of SSL:
+# - "bundled" uses source code in <source dir>/extra/yassl
+# - "system" (typically) uses headers/libraries in /usr/lib and /usr/lib64
+# - a custom installation of openssl can be used like this
+# - cmake -DCMAKE_PREFIX_PATH=</path/to/custom/openssl> -DWITH_SSL="system"
+# or
+# - cmake -DWITH_SSL=</path/to/custom/openssl>
+#
+# The default value for WITH_SSL is "bundled"
+# set in cmake/build_configurations/feature_set.cmake
+#
+# For custom build/install of openssl, see the accompanying README and
+# INSTALL* files. When building with gcc, you must build the shared libraries
+# (in addition to the static ones):
+# ./config --prefix=</path/to/custom/openssl> --shared; make; make install
+# On some platforms (mac) you need to choose 32/64 bit architecture.
+# Build/Install of openssl on windows is slightly different: you need to run
+# perl and nmake. You might also need to
+# 'set path=</path/to/custom/openssl>\bin;%PATH%
+# in order to find the .dll files at runtime.
+
+SET(WITH_SSL_DOC "bundled (use yassl)")
+SET(WITH_SSL_DOC
+ "${WITH_SSL_DOC}, yes (prefer os library if present, otherwise use bundled)")
+SET(WITH_SSL_DOC
+ "${WITH_SSL_DOC}, system (use os library)")
+SET(WITH_SSL_DOC
+ "${WITH_SSL_DOC}, </path/to/custom/installation>")
+
MACRO (CHANGE_SSL_SETTINGS string)
- SET(WITH_SSL ${string} CACHE STRING "Options are: no bundled yes(prefer os library if present otherwise use bundled) system(use os library)" FORCE)
+ SET(WITH_SSL ${string} CACHE STRING ${WITH_SSL_DOC} FORCE)
ENDMACRO()
MACRO (MYSQL_USE_BUNDLED_SSL)
SET(INC_DIRS
- ${CMAKE_SOURCE_DIR}/extra/yassl/include
- ${CMAKE_SOURCE_DIR}/extra/yassl/taocrypt/include
+ ${CMAKE_SOURCE_DIR}/extra/yassl/include
+ ${CMAKE_SOURCE_DIR}/extra/yassl/taocrypt/include
)
SET(SSL_LIBRARIES yassl taocrypt)
SET(SSL_INCLUDE_DIRS ${INC_DIRS})
SET(SSL_INTERNAL_INCLUDE_DIRS ${CMAKE_SOURCE_DIR}/extra/yassl/taocrypt/mySTL)
- SET(SSL_DEFINES "-DHAVE_YASSL -DYASSL_PURE_C -DYASSL_PREFIX -DHAVE_OPENSSL -DMULTI_THREADED")
+ SET(SSL_DEFINES "-DHAVE_YASSL -DYASSL_PREFIX -DHAVE_OPENSSL -DMULTI_THREADED")
CHANGE_SSL_SETTINGS("bundled")
- #Remove -fno-implicit-templates
- #(yassl sources cannot be compiled with it)
- SET(SAVE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
- IF(CMAKE_CXX_FLAGS)
- STRING(REPLACE "-fno-implicit-templates" "" CMAKE_CXX_FLAGS
- ${CMAKE_CXX_FLAGS})
- ENDIF()
ADD_SUBDIRECTORY(extra/yassl)
ADD_SUBDIRECTORY(extra/yassl/taocrypt)
- SET(CMAKE_CXX_FLAGS ${SAVE_CXX_FLAGS})
GET_TARGET_PROPERTY(src yassl SOURCES)
FOREACH(file ${src})
SET(SSL_SOURCES ${SSL_SOURCES} ${CMAKE_SOURCE_DIR}/extra/yassl/${file})
ENDFOREACH()
GET_TARGET_PROPERTY(src taocrypt SOURCES)
FOREACH(file ${src})
- SET(SSL_SOURCES ${SSL_SOURCES} ${CMAKE_SOURCE_DIR}/extra/yassl/taocrypt/${file})
+ SET(SSL_SOURCES ${SSL_SOURCES}
+ ${CMAKE_SOURCE_DIR}/extra/yassl/taocrypt/${file})
ENDFOREACH()
ENDMACRO()
# MYSQL_CHECK_SSL
#
# Provides the following configure options:
-# WITH_SSL=[yes|no|bundled]
+# WITH_SSL=[yes|bundled|system|<path/to/custom/installation>]
MACRO (MYSQL_CHECK_SSL)
IF(NOT WITH_SSL)
IF(WIN32)
CHANGE_SSL_SETTINGS("bundled")
ELSE()
- CHANGE_SSL_SETTINGS("no")
+ SET(WITH_SSL "yes")
ENDIF()
ENDIF()
+ # See if WITH_SSL is of the form </path/to/custom/installation>
+ FILE(GLOB WITH_SSL_HEADER ${WITH_SSL}/include/openssl/ssl.h)
+ IF (WITH_SSL_HEADER)
+ SET(WITH_SSL_PATH ${WITH_SSL} CACHE PATH "path to custom SSL installation")
+ ENDIF()
+
IF(WITH_SSL STREQUAL "bundled")
MYSQL_USE_BUNDLED_SSL()
- ELSEIF(WITH_SSL STREQUAL "system" OR WITH_SSL STREQUAL "yes")
- # Check for system library
- SET(OPENSSL_FIND_QUIETLY TRUE)
- INCLUDE(FindOpenSSL)
- FIND_LIBRARY(CRYPTO_LIBRARY crypto)
- MARK_AS_ADVANCED(CRYPTO_LIBRARY)
+ # Reset some variables, in case we switch from /path/to/ssl to "bundled".
+ IF (WITH_SSL_PATH)
+ UNSET(WITH_SSL_PATH)
+ UNSET(WITH_SSL_PATH CACHE)
+ ENDIF()
+ IF (OPENSSL_ROOT_DIR)
+ UNSET(OPENSSL_ROOT_DIR)
+ UNSET(OPENSSL_ROOT_DIR CACHE)
+ ENDIF()
+ IF (OPENSSL_INCLUDE_DIR)
+ UNSET(OPENSSL_INCLUDE_DIR)
+ UNSET(OPENSSL_INCLUDE_DIR CACHE)
+ ENDIF()
+ IF (WIN32 AND OPENSSL_APPLINK_C)
+ UNSET(OPENSSL_APPLINK_C)
+ UNSET(OPENSSL_APPLINK_C CACHE)
+ ENDIF()
+ IF (OPENSSL_LIBRARIES)
+ UNSET(OPENSSL_LIBRARIES)
+ UNSET(OPENSSL_LIBRARIES CACHE)
+ ENDIF()
+ ELSEIF(WITH_SSL STREQUAL "system" OR
+ WITH_SSL STREQUAL "yes" OR
+ WITH_SSL_PATH
+ )
+ # First search in WITH_SSL_PATH.
+ FIND_PATH(OPENSSL_ROOT_DIR
+ NAMES include/openssl/ssl.h
+ NO_CMAKE_PATH
+ NO_CMAKE_ENVIRONMENT_PATH
+ HINTS ${WITH_SSL_PATH}
+ )
+ # Then search in standard places (if not found above).
+ FIND_PATH(OPENSSL_ROOT_DIR
+ NAMES include/openssl/ssl.h
+ )
+
+ FIND_PATH(OPENSSL_INCLUDE_DIR
+ NAMES openssl/ssl.h
+ HINTS ${OPENSSL_ROOT_DIR}/include
+ )
+
+ IF (WIN32)
+ FIND_FILE(OPENSSL_APPLINK_C
+ NAMES openssl/applink.c
+ HINTS ${OPENSSL_ROOT_DIR}/include
+ )
+ MESSAGE(STATUS "OPENSSL_APPLINK_C ${OPENSSL_APPLINK_C}")
+ ENDIF()
+
+ # On mac this list is <.dylib;.so;.a>
+ # We prefer static libraries, so we revert it here.
+ LIST(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
+ MESSAGE(STATUS "suffixes <${CMAKE_FIND_LIBRARY_SUFFIXES}>")
+ FIND_LIBRARY(OPENSSL_LIBRARIES
+ NAMES ssl ssleay32 ssleay32MD
+ HINTS ${OPENSSL_ROOT_DIR}/lib)
+ FIND_LIBRARY(CRYPTO_LIBRARY
+ NAMES crypto libeay32
+ HINTS ${OPENSSL_ROOT_DIR}/lib)
+ LIST(REVERSE CMAKE_FIND_LIBRARY_SUFFIXES)
+
+ # Verify version number. Version information looks like:
+ # #define OPENSSL_VERSION_NUMBER 0x1000103fL
+ # Encoded as MNNFFPPS: major minor fix patch status
+ FILE(STRINGS "${OPENSSL_INCLUDE_DIR}/openssl/opensslv.h"
+ OPENSSL_VERSION_NUMBER
+ REGEX "^#define[\t ]+OPENSSL_VERSION_NUMBER[\t ]+0x[0-9].*"
+ )
+ STRING(REGEX REPLACE
+ "^.*OPENSSL_VERSION_NUMBER[\t ]+0x([0-9]).*$" "\\1"
+ OPENSSL_MAJOR_VERSION "${OPENSSL_VERSION_NUMBER}"
+ )
+
+ IF(OPENSSL_INCLUDE_DIR AND
+ OPENSSL_LIBRARIES AND
+ CRYPTO_LIBRARY AND
+ OPENSSL_MAJOR_VERSION STREQUAL "1"
+ )
+ SET(OPENSSL_FOUND TRUE)
+ ELSE()
+ SET(OPENSSL_FOUND FALSE)
+ ENDIF()
+
+ MESSAGE(STATUS "OPENSSL_INCLUDE_DIR = ${OPENSSL_INCLUDE_DIR}")
+ MESSAGE(STATUS "OPENSSL_LIBRARIES = ${OPENSSL_LIBRARIES}")
+ MESSAGE(STATUS "CRYPTO_LIBRARY = ${CRYPTO_LIBRARY}")
+ MESSAGE(STATUS "OPENSSL_MAJOR_VERSION = ${OPENSSL_MAJOR_VERSION}")
+
INCLUDE(CheckSymbolExists)
SET(CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
CHECK_SYMBOL_EXISTS(SHA512_DIGEST_LENGTH "openssl/sha.h"
HAVE_SHA512_DIGEST_LENGTH)
- SET(CMAKE_REQUIRED_INCLUDES)
- IF(OPENSSL_FOUND AND CRYPTO_LIBRARY AND HAVE_SHA512_DIGEST_LENGTH)
+ IF(OPENSSL_FOUND AND HAVE_SHA512_DIGEST_LENGTH)
SET(SSL_SOURCES "")
SET(SSL_LIBRARIES ${OPENSSL_LIBRARIES} ${CRYPTO_LIBRARY})
+ IF(CMAKE_SYSTEM_NAME MATCHES "SunOS")
+ SET(SSL_LIBRARIES ${SSL_LIBRARIES} ${LIBSOCKET})
+ ENDIF()
+ IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
+ SET(SSL_LIBRARIES ${SSL_LIBRARIES} ${LIBDL})
+ ENDIF()
+ MESSAGE(STATUS "SSL_LIBRARIES = ${SSL_LIBRARIES}")
SET(SSL_INCLUDE_DIRS ${OPENSSL_INCLUDE_DIR})
SET(SSL_INTERNAL_INCLUDE_DIRS "")
SET(SSL_DEFINES "-DHAVE_OPENSSL")
- CHANGE_SSL_SETTINGS("system")
ELSE()
IF(WITH_SSL STREQUAL "system")
MESSAGE(SEND_ERROR "Cannot find appropriate system libraries for SSL. Use WITH_SSL=bundled to enable SSL support")
ENDIF()
MYSQL_USE_BUNDLED_SSL()
ENDIF()
- ELSEIF(NOT WITH_SSL STREQUAL "no")
- MESSAGE(SEND_ERROR "Wrong option for WITH_SSL. Valid values are : yes, no, bundled")
+ ELSE()
+ MESSAGE(SEND_ERROR
+ "Wrong option for WITH_SSL. Valid values are : "${WITH_SSL_DOC})
+ ENDIF()
+ENDMACRO()
+
+
+# Many executables will depend on libeay32.dll and ssleay32.dll at runtime.
+# In order to ensure we find the right version(s), we copy them into
+# the same directory as the executables.
+# NOTE: Using dlls will likely crash in malloc/free,
+# see INSTALL.W32 which comes with the openssl sources.
+# So we should be linking static versions of the libraries.
+MACRO (COPY_OPENSSL_DLLS target_name)
+ IF (WIN32 AND WITH_SSL_PATH)
+ GET_FILENAME_COMPONENT(CRYPTO_NAME "${CRYPTO_LIBRARY}" NAME_WE)
+ GET_FILENAME_COMPONENT(OPENSSL_NAME "${OPENSSL_LIBRARIES}" NAME_WE)
+ FILE(GLOB HAVE_CRYPTO_DLL "${WITH_SSL_PATH}/bin/${CRYPTO_NAME}.dll")
+ FILE(GLOB HAVE_OPENSSL_DLL "${WITH_SSL_PATH}/bin/${OPENSSL_NAME}.dll")
+ IF (HAVE_CRYPTO_DLL AND HAVE_OPENSSL_DLL)
+ ADD_CUSTOM_COMMAND(OUTPUT ${target_name}
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ "${WITH_SSL_PATH}/bin/${CRYPTO_NAME}.dll"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${CRYPTO_NAME}.dll"
+ COMMAND ${CMAKE_COMMAND} -E copy_if_different
+ "${WITH_SSL_PATH}/bin/${OPENSSL_NAME}.dll"
+ "${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_CFG_INTDIR}/${OPENSSL_NAME}.dll"
+ )
+ ADD_CUSTOM_TARGET(${target_name} ALL)
+ ENDIF()
ENDIF()
ENDMACRO()
diff --git a/dbug/dbug.c b/dbug/dbug.c
index b285b32fa17..9ec8044eaf1 100644
--- a/dbug/dbug.c
+++ b/dbug/dbug.c
@@ -1332,7 +1332,7 @@ void _db_dump_(uint _line_, const char *keyword,
if (TRACING)
{
Indent(cs, cs->level + 1);
- pos= min(max(cs->level-cs->stack->sub_level,0)*INDENT,80);
+ pos= MY_MIN(MY_MAX(cs->level-cs->stack->sub_level,0)*INDENT,80);
}
else
{
@@ -1737,7 +1737,7 @@ static void Indent(CODE_STATE *cs, int indent)
{
int count;
- indent= max(indent-1-cs->stack->sub_level,0)*INDENT;
+ indent= MY_MAX(indent-1-cs->stack->sub_level,0)*INDENT;
for (count= 0; count < indent ; count++)
{
if ((count % INDENT) == 0)
diff --git a/extra/comp_err.c b/extra/comp_err.c
index fb51377ddc5..bf757122957 100644
--- a/extra/comp_err.c
+++ b/extra/comp_err.c
@@ -33,8 +33,9 @@
#include <assert.h>
#include <my_dir.h>
-#define MAX_ROWS 1000
+#define MAX_ROWS 2000
#define HEADER_LENGTH 32 /* Length of header in errmsg.sys */
+#define ERRMSG_VERSION 3 /* Version number of errmsg.sys */
#define DEFAULT_CHARSET_DIR "../sql/share/charsets"
#define ER_PREFIX "ER_"
#define ER_PREFIX2 "MARIA_ER_"
@@ -50,9 +51,9 @@ static char *default_dbug_option= (char*) "d:t:O,/tmp/comp_err.trace";
#endif
/* Header for errmsg.sys files */
-uchar file_head[]= { 254, 254, 2, 2 };
+uchar file_head[]= { 254, 254, 2, ERRMSG_VERSION };
/* Store positions to each error message row to store in errmsg.sys header */
-uint file_pos[MAX_ROWS];
+uint file_pos[MAX_ROWS+1];
const char *empty_string= ""; /* For empty states */
/*
@@ -379,9 +380,11 @@ static int create_sys_files(struct languages *lang_head,
if (my_fwrite(to, (uchar*) head, HEADER_LENGTH, MYF(MY_WME | MY_FNABP)))
goto err;
+ file_pos[row_count]= (ftell(to) - start_pos);
for (i= 0; i < row_count; i++)
{
- int2store(head, file_pos[i]);
+ /* Store length of each string */
+ int2store(head, file_pos[i+1] - file_pos[i]);
if (my_fwrite(to, (uchar*) head, 2, MYF(MY_WME | MY_FNABP)))
goto err;
}
diff --git a/extra/my_print_defaults.c b/extra/my_print_defaults.c
index 7558d6d00ae..e91163dde1c 100644
--- a/extra/my_print_defaults.c
+++ b/extra/my_print_defaults.c
@@ -26,6 +26,7 @@
#include <my_sys.h>
#include <m_string.h>
#include <my_getopt.h>
+#include <my_default.h>
#include <mysql_version.h>
#define load_default_groups mysqld_groups
@@ -33,6 +34,7 @@
#undef load_default_groups
my_bool opt_mysqld;
+
const char *config_file="my"; /* Default config file */
uint verbose= 0, opt_defaults_file_used= 0;
const char *default_dbug_option="d:t:o,/tmp/my_print_defaults.trace";
diff --git a/include/big_endian.h b/include/big_endian.h
new file mode 100644
index 00000000000..021b6abc383
--- /dev/null
+++ b/include/big_endian.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+/*
+ Data in big-endian format.
+*/
+#define float4store(T,A) do { *(T)= ((uchar *) &A)[3];\
+ *((T)+1)=(char) ((uchar *) &A)[2];\
+ *((T)+2)=(char) ((uchar *) &A)[1];\
+ *((T)+3)=(char) ((uchar *) &A)[0]; } while(0)
+
+#define float4get(V,M) do { float def_temp;\
+ ((uchar*) &def_temp)[0]=(M)[3];\
+ ((uchar*) &def_temp)[1]=(M)[2];\
+ ((uchar*) &def_temp)[2]=(M)[1];\
+ ((uchar*) &def_temp)[3]=(M)[0];\
+ (V)=def_temp; } while(0)
+
+#define float8store(T,V) do { *(T)= ((uchar *) &V)[7];\
+ *((T)+1)=(char) ((uchar *) &V)[6];\
+ *((T)+2)=(char) ((uchar *) &V)[5];\
+ *((T)+3)=(char) ((uchar *) &V)[4];\
+ *((T)+4)=(char) ((uchar *) &V)[3];\
+ *((T)+5)=(char) ((uchar *) &V)[2];\
+ *((T)+6)=(char) ((uchar *) &V)[1];\
+ *((T)+7)=(char) ((uchar *) &V)[0]; } while(0)
+
+#define float8get(V,M) do { double def_temp;\
+ ((uchar*) &def_temp)[0]=(M)[7];\
+ ((uchar*) &def_temp)[1]=(M)[6];\
+ ((uchar*) &def_temp)[2]=(M)[5];\
+ ((uchar*) &def_temp)[3]=(M)[4];\
+ ((uchar*) &def_temp)[4]=(M)[3];\
+ ((uchar*) &def_temp)[5]=(M)[2];\
+ ((uchar*) &def_temp)[6]=(M)[1];\
+ ((uchar*) &def_temp)[7]=(M)[0];\
+ (V) = def_temp; } while(0)
+
+#define ushortget(V,M) do { V = (uint16) (((uint16) ((uchar) (M)[1]))+\
+ ((uint16) ((uint16) (M)[0]) << 8)); } while(0)
+#define shortget(V,M) do { V = (short) (((short) ((uchar) (M)[1]))+\
+ ((short) ((short) (M)[0]) << 8)); } while(0)
+#define longget(V,M) do { int32 def_temp;\
+ ((uchar*) &def_temp)[0]=(M)[0];\
+ ((uchar*) &def_temp)[1]=(M)[1];\
+ ((uchar*) &def_temp)[2]=(M)[2];\
+ ((uchar*) &def_temp)[3]=(M)[3];\
+ (V)=def_temp; } while(0)
+#define ulongget(V,M) do { uint32 def_temp;\
+ ((uchar*) &def_temp)[0]=(M)[0];\
+ ((uchar*) &def_temp)[1]=(M)[1];\
+ ((uchar*) &def_temp)[2]=(M)[2];\
+ ((uchar*) &def_temp)[3]=(M)[3];\
+ (V)=def_temp; } while(0)
+#define shortstore(T,A) do { uint def_temp=(uint) (A) ;\
+ *(((char*)T)+1)=(char)(def_temp); \
+ *(((char*)T)+0)=(char)(def_temp >> 8); } while(0)
+#define longstore(T,A) do { *(((char*)T)+3)=((A));\
+ *(((char*)T)+2)=(((A) >> 8));\
+ *(((char*)T)+1)=(((A) >> 16));\
+ *(((char*)T)+0)=(((A) >> 24)); } while(0)
+
+#define floatget(V,M) memcpy(&V, (M), sizeof(float))
+/* Cast away type qualifiers (necessary as macro takes argument by value). */
+#define floatstore(T,V) memcpy((T), (void*) (&V), sizeof(float))
+#define doubleget(V,M) memcpy(&V, (M), sizeof(double))
+/* Cast away type qualifiers (necessary as macro takes argument by value). */
+#define doublestore(T,V) memcpy((T), (void*) &V, sizeof(double))
+#define longlongget(V,M) memcpy(&V, (M), sizeof(ulonglong))
+#define longlongstore(T,V) memcpy((T), &V, sizeof(ulonglong))
diff --git a/include/byte_order_generic.h b/include/byte_order_generic.h
new file mode 100644
index 00000000000..d4ac27eeb9c
--- /dev/null
+++ b/include/byte_order_generic.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+/*
+ Endianness-independent definitions for architectures other
+ than the x86 architecture.
+*/
+#define sint2korr(A) (int16) (((int16) ((uchar) (A)[0])) +\
+ ((int16) ((int16) (A)[1]) << 8))
+#define sint3korr(A) ((int32) ((((uchar) (A)[2]) & 128) ? \
+ (((uint32) 255L << 24) | \
+ (((uint32) (uchar) (A)[2]) << 16) |\
+ (((uint32) (uchar) (A)[1]) << 8) | \
+ ((uint32) (uchar) (A)[0])) : \
+ (((uint32) (uchar) (A)[2]) << 16) |\
+ (((uint32) (uchar) (A)[1]) << 8) | \
+ ((uint32) (uchar) (A)[0])))
+#define sint4korr(A) (int32) (((int32) ((uchar) (A)[0])) +\
+ (((int32) ((uchar) (A)[1]) << 8)) +\
+ (((int32) ((uchar) (A)[2]) << 16)) +\
+ (((int32) ((int16) (A)[3]) << 24)))
+#define sint8korr(A) (longlong) uint8korr(A)
+#define uint2korr(A) (uint16) (((uint16) ((uchar) (A)[0])) +\
+ ((uint16) ((uchar) (A)[1]) << 8))
+#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16))
+#define uint4korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16) +\
+ (((uint32) ((uchar) (A)[3])) << 24))
+#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16) +\
+ (((uint32) ((uchar) (A)[3])) << 24)) +\
+ (((ulonglong) ((uchar) (A)[4])) << 32))
+#define uint6korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) + \
+ (((uint32) ((uchar) (A)[1])) << 8) + \
+ (((uint32) ((uchar) (A)[2])) << 16) + \
+ (((uint32) ((uchar) (A)[3])) << 24)) + \
+ (((ulonglong) ((uchar) (A)[4])) << 32) + \
+ (((ulonglong) ((uchar) (A)[5])) << 40))
+#define uint8korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16) +\
+ (((uint32) ((uchar) (A)[3])) << 24)) +\
+ (((ulonglong) (((uint32) ((uchar) (A)[4])) +\
+ (((uint32) ((uchar) (A)[5])) << 8) +\
+ (((uint32) ((uchar) (A)[6])) << 16) +\
+ (((uint32) ((uchar) (A)[7])) << 24))) <<\
+ 32))
+#define int2store(T,A) do { uint def_temp= (uint) (A) ;\
+ *((uchar*) (T))= (uchar)(def_temp); \
+ *((uchar*) (T)+1)=(uchar)((def_temp >> 8)); \
+ } while(0)
+#define int3store(T,A) do { /*lint -save -e734 */\
+ *((uchar*)(T))=(uchar) ((A));\
+ *((uchar*) (T)+1)=(uchar) (((A) >> 8));\
+ *((uchar*)(T)+2)=(uchar) (((A) >> 16)); \
+ /*lint -restore */} while(0)
+#define int4store(T,A) do { *((char *)(T))=(char) ((A));\
+ *(((char *)(T))+1)=(char) (((A) >> 8));\
+ *(((char *)(T))+2)=(char) (((A) >> 16));\
+ *(((char *)(T))+3)=(char) (((A) >> 24));\
+ } while(0)
+#define int5store(T,A) do { *((char *)(T))= (char)((A)); \
+ *(((char *)(T))+1)= (char)(((A) >> 8)); \
+ *(((char *)(T))+2)= (char)(((A) >> 16)); \
+ *(((char *)(T))+3)= (char)(((A) >> 24)); \
+ *(((char *)(T))+4)= (char)(((A) >> 32)); \
+ } while(0)
+#define int6store(T,A) do { *((char *)(T))= (char)((A)); \
+ *(((char *)(T))+1)= (char)(((A) >> 8)); \
+ *(((char *)(T))+2)= (char)(((A) >> 16)); \
+ *(((char *)(T))+3)= (char)(((A) >> 24)); \
+ *(((char *)(T))+4)= (char)(((A) >> 32)); \
+ *(((char *)(T))+5)= (char)(((A) >> 40)); \
+ } while(0)
+#define int8store(T,A) do { uint def_temp= (uint) (A), \
+ def_temp2= (uint) ((A) >> 32); \
+ int4store((T),def_temp); \
+ int4store((T+4),def_temp2);\
+ } while(0)
diff --git a/include/byte_order_generic_x86.h b/include/byte_order_generic_x86.h
new file mode 100644
index 00000000000..0a71a17829b
--- /dev/null
+++ b/include/byte_order_generic_x86.h
@@ -0,0 +1,97 @@
+/* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+/*
+ Optimized function-like macros for the x86 architecture (_WIN32 included).
+*/
+#define sint2korr(A) (*((const int16 *) (A)))
+#define sint3korr(A) ((int32) ((((uchar) (A)[2]) & 128) ? \
+ (((uint32) 255L << 24) | \
+ (((uint32) (uchar) (A)[2]) << 16) |\
+ (((uint32) (uchar) (A)[1]) << 8) | \
+ ((uint32) (uchar) (A)[0])) : \
+ (((uint32) (uchar) (A)[2]) << 16) |\
+ (((uint32) (uchar) (A)[1]) << 8) | \
+ ((uint32) (uchar) (A)[0])))
+#define sint4korr(A) (*((const long *) (A)))
+#define uint2korr(A) (*((const uint16 *) (A)))
+
+/*
+ Attention: Please, note, uint3korr reads 4 bytes (not 3)!
+ It means, that you have to provide enough allocated space.
+*/
+#if defined(HAVE_valgrind) && !defined(_WIN32)
+#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16))
+#else
+#define uint3korr(A) (long) (*((const unsigned int *) (A)) & 0xFFFFFF)
+#endif
+
+#define uint4korr(A) (*((const uint32 *) (A)))
+#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16) +\
+ (((uint32) ((uchar) (A)[3])) << 24)) +\
+ (((ulonglong) ((uchar) (A)[4])) << 32))
+#define uint6korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) + \
+ (((uint32) ((uchar) (A)[1])) << 8) + \
+ (((uint32) ((uchar) (A)[2])) << 16) + \
+ (((uint32) ((uchar) (A)[3])) << 24)) + \
+ (((ulonglong) ((uchar) (A)[4])) << 32) + \
+ (((ulonglong) ((uchar) (A)[5])) << 40))
+#define uint8korr(A) (*((const ulonglong *) (A)))
+#define sint8korr(A) (*((const longlong *) (A)))
+
+#define int2store(T,A) *((uint16*) (T))= (uint16) (A)
+#define int3store(T,A) do { *(T)= (uchar) ((A));\
+ *(T+1)=(uchar) (((uint) (A) >> 8));\
+ *(T+2)=(uchar) (((A) >> 16));\
+ } while (0)
+#define int4store(T,A) *((long *) (T))= (long) (A)
+#define int5store(T,A) do { *(T)= (uchar)((A));\
+ *((T)+1)=(uchar) (((A) >> 8));\
+ *((T)+2)=(uchar) (((A) >> 16));\
+ *((T)+3)=(uchar) (((A) >> 24));\
+ *((T)+4)=(uchar) (((A) >> 32));\
+ } while(0)
+#define int6store(T,A) do { *(T)= (uchar)((A)); \
+ *((T)+1)=(uchar) (((A) >> 8)); \
+ *((T)+2)=(uchar) (((A) >> 16)); \
+ *((T)+3)=(uchar) (((A) >> 24)); \
+ *((T)+4)=(uchar) (((A) >> 32)); \
+ *((T)+5)=(uchar) (((A) >> 40)); \
+ } while(0)
+#define int8store(T,A) *((ulonglong *) (T))= (ulonglong) (A)
+typedef union {
+ double v;
+ long m[2];
+} doubleget_union;
+#define doubleget(V,M) \
+do { doubleget_union _tmp; \
+ _tmp.m[0] = *((const long*)(M)); \
+ _tmp.m[1] = *(((const long*) (M))+1); \
+ (V) = _tmp.v; } while(0)
+#define doublestore(T,V) \
+do { *((long *) T) = ((const doubleget_union *)&V)->m[0]; \
+ *(((long *) T)+1) = ((const doubleget_union *)&V)->m[1]; \
+ } while (0)
+#define float4get(V,M) \
+do { *((float *) &(V)) = *((const float*) (M)); } while(0)
+#define float8get(V,M) doubleget((V),(M))
+#define float4store(V,M) memcpy((uchar*)(V), (uchar*)(&M), sizeof(float))
+#define floatstore(T,V) memcpy((uchar*)(T), (uchar*)(&V), sizeof(float))
+#define floatget(V,M) memcpy((uchar*)(&V),(uchar*) (M), sizeof(float))
+#define float8store(V,M) doublestore((V),(M))
diff --git a/include/byte_order_generic_x86_64.h b/include/byte_order_generic_x86_64.h
new file mode 100644
index 00000000000..877c1574dfa
--- /dev/null
+++ b/include/byte_order_generic_x86_64.h
@@ -0,0 +1,83 @@
+/* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+/*
+ Optimized function-like macros for the x86 architecture (_WIN32 included).
+*/
+#define sint2korr(A) (int16) (*((int16 *) (A)))
+#define sint3korr(A) ((int32) ((((uchar) (A)[2]) & 128) ? \
+ (((uint32) 255L << 24) | \
+ (((uint32) (uchar) (A)[2]) << 16) |\
+ (((uint32) (uchar) (A)[1]) << 8) | \
+ ((uint32) (uchar) (A)[0])) : \
+ (((uint32) (uchar) (A)[2]) << 16) |\
+ (((uint32) (uchar) (A)[1]) << 8) | \
+ ((uint32) (uchar) (A)[0])))
+#define sint4korr(A) (int32) (*((int32 *) (A)))
+#define uint2korr(A) (uint16) (*((uint16 *) (A)))
+/*
+ Attention: Please, note, uint3korr reads 4 bytes (not 3)!
+ It means, that you have to provide enough allocated space.
+*/
+#if defined(HAVE_purify) && !defined(_WIN32)
+#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16))
+#else
+#define uint3korr(A) (uint32) (*((unsigned int *) (A)) & 0xFFFFFF)
+#endif
+#define uint4korr(A) (uint32) (*((uint32 *) (A)))
+#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
+ (((uint32) ((uchar) (A)[1])) << 8) +\
+ (((uint32) ((uchar) (A)[2])) << 16) +\
+ (((uint32) ((uchar) (A)[3])) << 24)) +\
+ (((ulonglong) ((uchar) (A)[4])) << 32))
+#define uint6korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) + \
+ (((uint32) ((uchar) (A)[1])) << 8) + \
+ (((uint32) ((uchar) (A)[2])) << 16) + \
+ (((uint32) ((uchar) (A)[3])) << 24)) + \
+ (((ulonglong) ((uchar) (A)[4])) << 32) + \
+ (((ulonglong) ((uchar) (A)[5])) << 40))
+#define uint8korr(A) (ulonglong) (*((ulonglong *) (A)))
+#define sint8korr(A) (longlong) (*((longlong *) (A)))
+
+#define int2store(T,A) do { uchar *pT= (uchar*)(T);\
+ *((uint16*)(pT))= (uint16) (A);\
+ } while (0)
+
+#define int3store(T,A) do { *(T)= (uchar) ((A));\
+ *(T+1)=(uchar) (((uint) (A) >> 8));\
+ *(T+2)=(uchar) (((A) >> 16));\
+ } while (0)
+#define int4store(T,A) do { uchar *pT= (uchar*)(T);\
+ *((uint32 *) (pT))= (uint32) (A); \
+ } while (0)
+
+#define int5store(T,A) do { *(T)= (uchar)((A));\
+ *((T)+1)=(uchar) (((A) >> 8));\
+ *((T)+2)=(uchar) (((A) >> 16));\
+ *((T)+3)=(uchar) (((A) >> 24));\
+ *((T)+4)=(uchar) (((A) >> 32));\
+ } while(0)
+#define int6store(T,A) do { *(T)= (uchar)((A)); \
+ *((T)+1)=(uchar) (((A) >> 8)); \
+ *((T)+2)=(uchar) (((A) >> 16)); \
+ *((T)+3)=(uchar) (((A) >> 24)); \
+ *((T)+4)=(uchar) (((A) >> 32)); \
+ *((T)+5)=(uchar) (((A) >> 40)); \
+ } while(0)
+#define int8store(T,A) do { uchar *pT= (uchar*)(T);\
+ *((ulonglong *) (pT))= (ulonglong) (A);\
+ } while(0)
diff --git a/include/crypt_genhash_impl.h b/include/crypt_genhash_impl.h
new file mode 100644
index 00000000000..af5afd23e86
--- /dev/null
+++ b/include/crypt_genhash_impl.h
@@ -0,0 +1,32 @@
+/* defines and prototypes for using crypt_genhash_impl.cc */
+
+#ifndef CRYPT_HASHGEN_IMPL_H
+#define CRYPT_HASHGEN_IMPL_H
+#define ROUNDS_DEFAULT 5000
+#define ROUNDS_MIN 1000
+#define ROUNDS_MAX 999999999
+#define MIXCHARS 32
+#define CRYPT_SALT_LENGTH 20
+#define CRYPT_MAGIC_LENGTH 3
+#define CRYPT_PARAM_LENGTH 13
+#define SHA256_HASH_LENGTH 43
+#define CRYPT_MAX_PASSWORD_SIZE (CRYPT_SALT_LENGTH + \
+ SHA256_HASH_LENGTH + \
+ CRYPT_MAGIC_LENGTH + \
+ CRYPT_PARAM_LENGTH)
+
+int extract_user_salt(char **salt_begin,
+ char **salt_end);
+C_MODE_START
+char *
+my_crypt_genhash(char *ctbuffer,
+ size_t ctbufflen,
+ const char *plaintext,
+ int plaintext_len,
+ const char *switchsalt,
+ const char **params);
+void generate_user_salt(char *buffer, int buffer_len);
+void xor_string(char *to, int to_len, char *pattern, int pattern_len);
+
+C_MODE_END
+#endif
diff --git a/include/errmsg.h b/include/errmsg.h
index 64ec2df395c..b839060a881 100644
--- a/include/errmsg.h
+++ b/include/errmsg.h
@@ -16,8 +16,12 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-/* Error messages for MySQL clients */
-/* (Error messages for the daemon are in sql/share/errmsg.txt) */
+/*
+ Error messages numbers for MySQL clients.
+ The error messages itself are in libmysql/errmsg.c
+
+ Error messages for the mysqld daemon are in sql/share/errmsg.txt
+*/
#ifdef __cplusplus
extern "C" {
@@ -102,7 +106,9 @@ extern const char *client_errors[]; /* Error messages */
#define CR_NEW_STMT_METADATA 2057
#define CR_ALREADY_CONNECTED 2058
#define CR_AUTH_PLUGIN_CANNOT_LOAD 2059
-#define CR_ERROR_LAST /*Copy last error nr:*/ 2059
+#define CR_DUPLICATE_CONNECTION_ATTR 2060
+#define CR_AUTH_PLUGIN_ERR 2061
+#define CR_ERROR_LAST /*Copy last error nr:*/ 2061
/* Add error numbers before CR_ERROR_LAST and change it accordingly. */
#endif /* ERRMSG_INCLUDED */
diff --git a/include/ft_global.h b/include/ft_global.h
index 73726018d0a..aad3b4cb56e 100644
--- a/include/ft_global.h
+++ b/include/ft_global.h
@@ -43,11 +43,32 @@ struct _ft_vft
void (*reinit_search)(FT_INFO *);
};
+typedef struct st_ft_info_ext FT_INFO_EXT;
+struct _ft_vft_ext
+{
+ uint (*get_version)(); // Extended API version
+ ulonglong (*get_flags)();
+ ulonglong (*get_docid)(FT_INFO_EXT *);
+ ulonglong (*count_matches)(FT_INFO_EXT *);
+};
+
+/* Flags for extended FT API */
+#define FTS_ORDERED_RESULT (1LL << 1)
+#define FTS_DOCID_IN_RESULT (1LL << 2)
+
+#define FTS_DOC_ID_COL_NAME "FTS_DOC_ID"
+
#ifndef FT_CORE
struct st_ft_info
{
struct _ft_vft *please; /* INTERCAL style :-) */
};
+
+struct st_ft_info_ext
+{
+ struct _ft_vft *please; /* INTERCAL style :-) */
+ struct _ft_vft_ext *could_you;
+};
#endif
extern const char *ft_stopword_file;
diff --git a/include/little_endian.h b/include/little_endian.h
new file mode 100644
index 00000000000..7223fea648f
--- /dev/null
+++ b/include/little_endian.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+/*
+ Data in little-endian format.
+*/
+
+#ifndef MY_BYTE_ORDER_ARCH_OPTIMIZED
+#define float4get(V,M) memcpy(&V, (M), sizeof(float))
+#define float4store(V,M) memcpy(V, (&M), sizeof(float))
+#define float8get(V,M) doubleget((V),(M))
+#define float8store(V,M) doublestore((V),(M))
+
+/* Bi-endian hardware.... */
+#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN)
+#define doublestore(T,V) do { *(((char*)T)+0)=(char) ((uchar *) &V)[4];\
+ *(((char*)T)+1)=(char) ((uchar *) &V)[5];\
+ *(((char*)T)+2)=(char) ((uchar *) &V)[6];\
+ *(((char*)T)+3)=(char) ((uchar *) &V)[7];\
+ *(((char*)T)+4)=(char) ((uchar *) &V)[0];\
+ *(((char*)T)+5)=(char) ((uchar *) &V)[1];\
+ *(((char*)T)+6)=(char) ((uchar *) &V)[2];\
+ *(((char*)T)+7)=(char) ((uchar *) &V)[3]; }\
+ while(0)
+#define doubleget(V,M) do { double def_temp;\
+ ((uchar*) &def_temp)[0]=(M)[4];\
+ ((uchar*) &def_temp)[1]=(M)[5];\
+ ((uchar*) &def_temp)[2]=(M)[6];\
+ ((uchar*) &def_temp)[3]=(M)[7];\
+ ((uchar*) &def_temp)[4]=(M)[0];\
+ ((uchar*) &def_temp)[5]=(M)[1];\
+ ((uchar*) &def_temp)[6]=(M)[2];\
+ ((uchar*) &def_temp)[7]=(M)[3];\
+ (V) = def_temp; } while(0)
+#else /* Bi-endian hardware.... */
+
+/* Cast away type qualifiers (necessary as macro takes argument by value). */
+#define doublestore(T,V) memcpy((T), (void*) &V, sizeof(double))
+#define doubleget(V,M) memcpy(&V, (M), sizeof(double))
+
+#endif /* Bi-endian hardware.... */
+
+#endif /* !MY_BYTE_ORDER_ARCH_OPTIMIZED */
+
+#define ushortget(V,M) do { uchar *pM= (uchar*)(M);V = uint2korr(pM);} while(0)
+#define shortget(V,M) do { uchar *pM= (uchar*)(M);V = sint2korr(pM);} while(0)
+#define longget(V,M) do { uchar *pM= (uchar*)(M);V = sint4korr(pM);} while(0)
+#define ulongget(V,M) do { uchar *pM= (uchar*)(M);V = uint4korr(pM);} while(0)
+#define shortstore(T,V) int2store(T,V)
+#define longstore(T,V) int4store(T,V)
+
+#ifndef floatstore
+/* Cast away type qualifiers (necessary as macro takes argument by value). */
+#define floatstore(T,V) memcpy((T), (void*) (&V), sizeof(float))
+#define floatget(V,M) memcpy(&V, (M), sizeof(float))
+#endif
+#ifndef doubleget
+#define doubleget(V,M) memcpy(&V, (M), sizeof(double))
+#define doublestore(T,V) memcpy((T), (void *) &V, sizeof(double))
+#endif /* doubleget */
+
+#define longlongget(V,M) memcpy(&V, (M), sizeof(ulonglong))
+#define longlongstore(T,V) memcpy((T), &V, sizeof(ulonglong))
diff --git a/include/m_ctype.h b/include/m_ctype.h
index 95b520e4ee9..e3fb2dbc66e 100644
--- a/include/m_ctype.h
+++ b/include/m_ctype.h
@@ -137,6 +137,38 @@ extern MY_UNI_CTYPE my_uni_ctype[256];
#define MY_REPERTOIRE_EXTENDED 2 /* Extended characters: U+0080..U+FFFF */
#define MY_REPERTOIRE_UNICODE30 3 /* ASCII | EXTENDED: U+0000..U+FFFF */
+/* Flags for strxfrm */
+#define MY_STRXFRM_LEVEL1 0x00000001 /* for primary weights */
+#define MY_STRXFRM_LEVEL2 0x00000002 /* for secondary weights */
+#define MY_STRXFRM_LEVEL3 0x00000004 /* for tertiary weights */
+#define MY_STRXFRM_LEVEL4 0x00000008 /* fourth level weights */
+#define MY_STRXFRM_LEVEL5 0x00000010 /* fifth level weights */
+#define MY_STRXFRM_LEVEL6 0x00000020 /* sixth level weights */
+#define MY_STRXFRM_LEVEL_ALL 0x0000003F /* Bit OR for the above six */
+#define MY_STRXFRM_NLEVELS 6 /* Number of possible levels*/
+
+#define MY_STRXFRM_PAD_WITH_SPACE 0x00000040 /* if pad result with spaces */
+#define MY_STRXFRM_PAD_TO_MAXLEN 0x00000080 /* if pad tail(for filesort) */
+
+#define MY_STRXFRM_DESC_LEVEL1 0x00000100 /* if desc order for level1 */
+#define MY_STRXFRM_DESC_LEVEL2 0x00000200 /* if desc order for level2 */
+#define MY_STRXFRM_DESC_LEVEL3 0x00000300 /* if desc order for level3 */
+#define MY_STRXFRM_DESC_LEVEL4 0x00000800 /* if desc order for level4 */
+#define MY_STRXFRM_DESC_LEVEL5 0x00001000 /* if desc order for level5 */
+#define MY_STRXFRM_DESC_LEVEL6 0x00002000 /* if desc order for level6 */
+#define MY_STRXFRM_DESC_SHIFT 8
+
+#define MY_STRXFRM_UNUSED_00004000 0x00004000 /* for future extensions */
+#define MY_STRXFRM_UNUSED_00008000 0x00008000 /* for future extensions */
+
+#define MY_STRXFRM_REVERSE_LEVEL1 0x00010000 /* if reverse order for level1 */
+#define MY_STRXFRM_REVERSE_LEVEL2 0x00020000 /* if reverse order for level2 */
+#define MY_STRXFRM_REVERSE_LEVEL3 0x00040000 /* if reverse order for level3 */
+#define MY_STRXFRM_REVERSE_LEVEL4 0x00080000 /* if reverse order for level4 */
+#define MY_STRXFRM_REVERSE_LEVEL5 0x00100000 /* if reverse order for level5 */
+#define MY_STRXFRM_REVERSE_LEVEL6 0x00200000 /* if reverse order for level6 */
+#define MY_STRXFRM_REVERSE_SHIFT 16
+
struct my_uni_idx_st
{
uint16 from;
@@ -591,6 +623,10 @@ my_bool my_charset_is_ascii_compatible(CHARSET_INFO *cs);
extern size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n,
const char* fmt, va_list ap);
+uint32 my_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors);
+
#define _MY_U 01 /* Upper case */
#define _MY_L 02 /* Lower case */
#define _MY_NMR 04 /* Numeral (digit) */
diff --git a/include/m_string.h b/include/m_string.h
index 1f59fd06084..95b28d6d69a 100644
--- a/include/m_string.h
+++ b/include/m_string.h
@@ -168,7 +168,7 @@ size_t my_gcvt(double x, my_gcvt_arg_type type, int width, char *to,
(DBL_DIG + 2) significant digits + sign + "." + ("e-NNN" or
MAX_DECPT_FOR_F_FORMAT zeros for cases when |x|<1 and the 'f' format is used).
*/
-#define MY_GCVT_MAX_FIELD_WIDTH (DBL_DIG + 4 + max(5, MAX_DECPT_FOR_F_FORMAT)) \
+#define MY_GCVT_MAX_FIELD_WIDTH (DBL_DIG + 4 + MY_MAX(5, MAX_DECPT_FOR_F_FORMAT)) \
extern char *llstr(longlong value,char *buff);
extern char *ullstr(longlong value,char *buff);
diff --git a/include/my_base.h b/include/my_base.h
index 0984ff8dfa9..c195830e35a 100644
--- a/include/my_base.h
+++ b/include/my_base.h
@@ -46,7 +46,8 @@
#define HA_OPEN_COPY 256 /* Open copy (for repair) */
/* Internal temp table, used for temporary results */
#define HA_OPEN_INTERNAL_TABLE 512
-#define HA_OPEN_MERGE_TABLE 1024
+#define HA_OPEN_NO_PSI_CALL 1024 /* Don't call/connect PSI */
+#define HA_OPEN_MERGE_TABLE 2048
/* The following is parameter to ha_rkey() how to use key */
@@ -194,6 +195,11 @@ enum ha_extra_function {
HA_EXTRA_ATTACH_CHILDREN,
HA_EXTRA_IS_ATTACHED_CHILDREN,
HA_EXTRA_DETACH_CHILDREN,
+ /*
+ Prepare table for export
+ (e.g. quiesce the table and write table metadata).
+ */
+ HA_EXTRA_EXPORT,
HA_EXTRA_DETACH_CHILD,
/* Inform handler we will force a close as part of flush */
HA_EXTRA_PREPARE_FOR_FORCED_CLOSE
@@ -317,6 +323,23 @@ enum ha_base_keytype {
#define HA_OPTION_RELIES_ON_SQL_LAYER 512
#define HA_OPTION_NULL_FIELDS 1024
#define HA_OPTION_PAGE_CHECKSUM 2048
+/*
+ STATS_PERSISTENT=1 has been specified in the SQL command (either CREATE
+ or ALTER TABLE). Table and index statistics that are collected by the
+ storage engine and used by the optimizer for query optimization will be
+ stored on disk and will not change after a server restart.
+*/
+#define HA_OPTION_STATS_PERSISTENT 4096
+/*
+ STATS_PERSISTENT=0 has been specified in CREATE/ALTER TABLE. Statistics
+ for the table will be wiped away on server shutdown and new ones recalculated
+ after the server is started again. If none of HA_OPTION_STATS_PERSISTENT or
+ HA_OPTION_NO_STATS_PERSISTENT is set, this means that the setting is not
+ explicitly set at table level and the corresponding table will use whatever
+ is the global server default.
+*/
+#define HA_OPTION_NO_STATS_PERSISTENT 8192
+
/* .frm has extra create options in linked-list format */
#define HA_OPTION_TEXT_CREATE_OPTIONS_legacy (1L << 14) /* 5.2 to 5.5, unused since 10.0 */
#define HA_OPTION_TEMP_COMPRESS_RECORD (1L << 15) /* set by isamchk */
@@ -334,7 +357,7 @@ enum ha_base_keytype {
#define HA_CREATE_PAGE_CHECKSUM 32
#define HA_CREATE_DELAY_KEY_WRITE 64
#define HA_CREATE_RELIES_ON_SQL_LAYER 128
-
+#define HA_CREATE_INTERNAL_TABLE 256
/* Flags used by start_bulk_insert */
@@ -458,7 +481,8 @@ enum ha_base_keytype {
/* It is not possible to log this statement */
#define HA_ERR_LOGGING_IMPOSSIBLE 170
/* The event was corrupt, leading to illegal data being read */
-#define HA_ERR_CORRUPT_EVENT 171
+#define HA_ERR_CORRUPT_EVENT 171 /* The event was corrupt, leading to
+ illegal data being read */
#define HA_ERR_NEW_FILE 172 /* New file format */
/* The event could not be processed no other handler error happened */
#define HA_ERR_ROWS_EVENT_APPLY 173
@@ -466,16 +490,19 @@ enum ha_base_keytype {
#define HA_ERR_FILE_TOO_SHORT 175 /* File too short */
#define HA_ERR_WRONG_CRC 176 /* Wrong CRC on page */
#define HA_ERR_TOO_MANY_CONCURRENT_TRXS 177 /*Too many active concurrent transactions */
+/* There's no explicitly listed partition in table for the given value */
#define HA_ERR_NOT_IN_LOCK_PARTITIONS 178
#define HA_ERR_INDEX_COL_TOO_LONG 179 /* Index column length exceeds limit */
#define HA_ERR_INDEX_CORRUPT 180 /* Index corrupted */
#define HA_ERR_UNDO_REC_TOO_BIG 181 /* Undo log record too big */
-#define HA_ERR_TABLE_IN_FK_CHECK 182 /* Table being used in foreign key check */
-#define HA_FTS_INVALID_DOCID 183 /* Invalid InnoDB Doc ID */
-#define HA_ERR_ROW_NOT_VISIBLE 184
-#define HA_ERR_ABORTED_BY_USER 185
-#define HA_ERR_DISK_FULL 186
-#define HA_ERR_LAST 186 /* Copy of last error nr */
+#define HA_FTS_INVALID_DOCID 182 /* Invalid InnoDB Doc ID */
+#define HA_ERR_TABLE_IN_FK_CHECK 183 /* Table being used in foreign key check */
+#define HA_ERR_TABLESPACE_EXISTS 184 /* The tablespace existed in storage engine */
+#define HA_ERR_TOO_MANY_FIELDS 185 /* Table has too many columns */
+#define HA_ERR_ROW_NOT_VISIBLE 186
+#define HA_ERR_ABORTED_BY_USER 187
+#define HA_ERR_DISK_FULL 188
+#define HA_ERR_LAST 188 /* Copy of last error nr */
/* Number of different errors */
#define HA_ERR_ERRORS (HA_ERR_LAST - HA_ERR_FIRST + 1)
@@ -608,4 +635,17 @@ C_MODE_START
typedef void (* invalidator_by_filename)(const char * filename);
C_MODE_END
+
+enum durability_properties
+{
+ /*
+ Preserves the durability properties defined by the engine */
+ HA_REGULAR_DURABILITY= 0,
+ /*
+ Ignore the durability properties defined by the engine and
+ write only in-memory entries.
+ */
+ HA_IGNORE_DURABILITY= 1
+};
+
#endif /* _my_base_h */
diff --git a/include/my_bitmap.h b/include/my_bitmap.h
index 06f43f79df8..ef3274a8269 100644
--- a/include/my_bitmap.h
+++ b/include/my_bitmap.h
@@ -63,6 +63,7 @@ extern uint bitmap_set_next(MY_BITMAP *map);
extern uint bitmap_get_first(const MY_BITMAP *map);
extern uint bitmap_get_first_set(const MY_BITMAP *map);
extern uint bitmap_bits_set(const MY_BITMAP *map);
+extern uint bitmap_get_next_set(const MY_BITMAP *map, uint bitmap_bit);
extern void bitmap_free(MY_BITMAP *map);
extern void bitmap_set_above(MY_BITMAP *map, uint from_byte, uint use_bit);
extern void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size);
diff --git a/include/my_byteorder.h b/include/my_byteorder.h
new file mode 100644
index 00000000000..1f29248bfb2
--- /dev/null
+++ b/include/my_byteorder.h
@@ -0,0 +1,54 @@
+#ifndef MY_BYTEORDER_INCLUDED
+#define MY_BYTEORDER_INCLUDED
+
+/* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+
+/*
+ Macro for reading 32-bit integer from network byte order (big-endian)
+ from an unaligned memory location.
+*/
+#define int4net(A) (int32) (((uint32) ((uchar) (A)[3])) | \
+ (((uint32) ((uchar) (A)[2])) << 8) | \
+ (((uint32) ((uchar) (A)[1])) << 16) | \
+ (((uint32) ((uchar) (A)[0])) << 24))
+
+/*
+ Function-like macros for reading and storing in machine independent
+ format (low byte first). There are 'korr' (assume 'corrector') variants
+ for integer types, but 'get' (assume 'getter') for floating point types.
+*/
+#if defined(__i386__) || defined(_WIN32)
+#define MY_BYTE_ORDER_ARCH_OPTIMIZED
+#include "byte_order_generic_x86.h"
+#elif defined(__x86_64__)
+#include "byte_order_generic_x86_64.h"
+#else
+#include "byte_order_generic.h"
+#endif
+
+/*
+ Function-like macros for reading and storing in machine format from/to
+ short/long to/from some place in memory V should be a variable (not on
+ a register) and M a pointer to byte.
+*/
+#ifdef WORDS_BIGENDIAN
+#include "big_endian.h"
+#else
+#include "little_endian.h"
+#endif
+
+#endif /* MY_BYTEORDER_INCLUDED */
diff --git a/include/my_default.h b/include/my_default.h
new file mode 100644
index 00000000000..1d556de69ee
--- /dev/null
+++ b/include/my_default.h
@@ -0,0 +1,50 @@
+/* Copyright (C) 2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/* Definitions for mysys/my_default.c */
+
+#ifndef MY_DEFAULT_INCLUDED
+#define MY_DEFAULT_INCLUDED
+
+C_MODE_START
+
+extern const char *my_defaults_extra_file;
+extern const char *my_defaults_group_suffix;
+extern const char *my_defaults_file;
+extern my_bool my_getopt_use_args_separator;
+extern my_bool my_getopt_is_args_separator(const char* arg);
+
+/* Define the type of function to be passed to process_default_option_files */
+typedef int (*Process_option_func)(void *ctx, const char *group_name,
+ const char *option);
+
+extern int get_defaults_options(int argc, char **argv,
+ char **defaults, char **extra_defaults,
+ char **group_suffix);
+extern int my_load_defaults(const char *conf_file, const char **groups,
+ int *argc, char ***argv, const char ***);
+extern int load_defaults(const char *conf_file, const char **groups,
+ int *argc, char ***argv);
+extern int my_search_option_files(const char *conf_file, int *argc,
+ char ***argv, uint *args_used,
+ Process_option_func func, void *func_ctx,
+ const char **default_directories);
+extern void free_defaults(char **argv);
+extern void my_print_default_files(const char *conf_file);
+extern void print_defaults(const char *conf_file, const char **groups);
+
+C_MODE_END
+
+#endif /* MY_DEFAULT_INCLUDED */
diff --git a/include/my_getopt.h b/include/my_getopt.h
index 589d9c9880c..2cbbca9cab9 100644
--- a/include/my_getopt.h
+++ b/include/my_getopt.h
@@ -17,7 +17,9 @@
#ifndef _my_getopt_h
#define _my_getopt_h
-#include "my_sys.h" /* loglevel */
+#include "my_sys.h" /* loglevel */
+/* my_getopt and my_default are almost always used together */
+#include <my_default.h>
C_MODE_START
@@ -85,7 +87,6 @@ struct my_option
void *app_type; /**< To be used by an application */
};
-
typedef my_bool (*my_get_one_option)(int, const struct my_option *, char *);
/**
diff --git a/include/my_global.h b/include/my_global.h
index 95b69c96dd7..78bf3cfd86c 100644
--- a/include/my_global.h
+++ b/include/my_global.h
@@ -50,11 +50,6 @@
#define _POSIX_THREAD_CPUTIME
#endif /* __CYGWIN__ */
-/* to make command line shorter we'll define USE_PRAGMA_INTERFACE here */
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#define USE_PRAGMA_INTERFACE
-#endif
-
#if defined(__OpenBSD__) && (OpenBSD >= 200411)
#define HAVE_ERRNO_AS_DEFINE
#endif
@@ -130,6 +125,7 @@
/* Define missing access() modes. */
#define F_OK 0
#define W_OK 2
+#define R_OK 4 /* Test for read permission. */
/* Define missing file locking constants. */
#define F_RDLCK 1
@@ -348,6 +344,9 @@ C_MODE_END
#ifdef HAVE_FCNTL_H
#include <fcntl.h>
#endif
+#ifdef HAVE_SYS_TIMEB_H
+#include <sys/timeb.h> /* Avoid warnings on SCO */
+#endif
#ifdef HAVE_SYS_STAT_H
#include <sys/stat.h>
#endif
@@ -1046,296 +1045,7 @@ typedef ulong myf; /* Type of MyFlags in my_funcs */
#define MY_HOW_OFTEN_TO_ALARM 2 /* How often we want info on screen */
#define MY_HOW_OFTEN_TO_WRITE 10000 /* How often we want info on screen */
-/*
- Define-funktions for reading and storing in machine independent format
- (low byte first)
-*/
-
-/* Optimized store functions for Intel x86 */
-#if defined(__i386__) || defined(_WIN32)
-#define sint2korr(A) (*((const int16 *) (A)))
-#define sint3korr(A) ((int32) ((((uchar) (A)[2]) & 128) ? \
- (((uint32) 255L << 24) | \
- (((uint32) (uchar) (A)[2]) << 16) |\
- (((uint32) (uchar) (A)[1]) << 8) | \
- ((uint32) (uchar) (A)[0])) : \
- (((uint32) (uchar) (A)[2]) << 16) |\
- (((uint32) (uchar) (A)[1]) << 8) | \
- ((uint32) (uchar) (A)[0])))
-#define sint4korr(A) (*((const long *) (A)))
-#define uint2korr(A) (*((const uint16 *) (A)))
-#if defined(HAVE_valgrind) && !defined(_WIN32)
-#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[2])) << 16))
-#else
-/*
- ATTENTION !
-
- Please, note, uint3korr reads 4 bytes (not 3) !
- It means, that you have to provide enough allocated space !
-*/
-#define uint3korr(A) (long) (*((const unsigned int *) (A)) & 0xFFFFFF)
-#endif /* HAVE_valgrind && !_WIN32 */
-#define uint4korr(A) (*((const uint32 *) (A)))
-#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[2])) << 16) +\
- (((uint32) ((uchar) (A)[3])) << 24)) +\
- (((ulonglong) ((uchar) (A)[4])) << 32))
-#define uint6korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) + \
- (((uint32) ((uchar) (A)[1])) << 8) + \
- (((uint32) ((uchar) (A)[2])) << 16) + \
- (((uint32) ((uchar) (A)[3])) << 24)) + \
- (((ulonglong) ((uchar) (A)[4])) << 32) + \
- (((ulonglong) ((uchar) (A)[5])) << 40))
-#define uint8korr(A) (*((const ulonglong *) (A)))
-#define sint8korr(A) (*((const longlong *) (A)))
-#define int2store(T,A) *((uint16*) (T))= (uint16) (A)
-#define int3store(T,A) do { *(T)= (uchar) ((A));\
- *(T+1)=(uchar) (((uint) (A) >> 8));\
- *(T+2)=(uchar) (((A) >> 16)); } while (0)
-#define int4store(T,A) *((long *) (T))= (long) (A)
-#define int5store(T,A) do { *(T)= (uchar)((A));\
- *((T)+1)=(uchar) (((A) >> 8));\
- *((T)+2)=(uchar) (((A) >> 16));\
- *((T)+3)=(uchar) (((A) >> 24)); \
- *((T)+4)=(uchar) (((A) >> 32)); } while(0)
-#define int6store(T,A) do { *(T)= (uchar)((A)); \
- *((T)+1)=(uchar) (((A) >> 8)); \
- *((T)+2)=(uchar) (((A) >> 16)); \
- *((T)+3)=(uchar) (((A) >> 24)); \
- *((T)+4)=(uchar) (((A) >> 32)); \
- *((T)+5)=(uchar) (((A) >> 40)); } while(0)
-#define int8store(T,A) *((ulonglong *) (T))= (ulonglong) (A)
-
-typedef union {
- double v;
- long m[2];
-} doubleget_union;
-#define doubleget(V,M) \
-do { doubleget_union _tmp; \
- _tmp.m[0] = *((const long*)(M)); \
- _tmp.m[1] = *(((const long*) (M))+1); \
- (V) = _tmp.v; } while(0)
-#define doublestore(T,V) do { *((long *) T) = ((const doubleget_union *)&V)->m[0]; \
- *(((long *) T)+1) = ((const doubleget_union *)&V)->m[1]; \
- } while (0)
-#define float4get(V,M) do { *((float *) &(V)) = *((const float*) (M)); } while(0)
-#define float8get(V,M) doubleget((V),(M))
-#define float4store(V,M) memcpy((uchar*) V,(uchar*) (&M),sizeof(float))
-#define floatstore(T,V) memcpy((uchar*)(T), (uchar*)(&V),sizeof(float))
-#define floatget(V,M) memcpy((uchar*) &V,(uchar*) (M),sizeof(float))
-#define float8store(V,M) doublestore((V),(M))
-#else
-
-/*
- We're here if it's not a IA-32 architecture (Win32 and UNIX IA-32 defines
- were done before)
-*/
-#define sint2korr(A) (int16) (((int16) ((uchar) (A)[0])) +\
- ((int16) ((int16) (A)[1]) << 8))
-#define sint3korr(A) ((int32) ((((uchar) (A)[2]) & 128) ? \
- (((uint32) 255L << 24) | \
- (((uint32) (uchar) (A)[2]) << 16) |\
- (((uint32) (uchar) (A)[1]) << 8) | \
- ((uint32) (uchar) (A)[0])) : \
- (((uint32) (uchar) (A)[2]) << 16) |\
- (((uint32) (uchar) (A)[1]) << 8) | \
- ((uint32) (uchar) (A)[0])))
-#define sint4korr(A) (int32) (((int32) ((uchar) (A)[0])) +\
- (((int32) ((uchar) (A)[1]) << 8)) +\
- (((int32) ((uchar) (A)[2]) << 16)) +\
- (((int32) ((int16) (A)[3]) << 24)))
-#define sint8korr(A) (longlong) uint8korr(A)
-#define uint2korr(A) (uint16) (((uint16) ((uchar) (A)[0])) +\
- ((uint16) ((uchar) (A)[1]) << 8))
-#define uint3korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[2])) << 16))
-#define uint4korr(A) (uint32) (((uint32) ((uchar) (A)[0])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[2])) << 16) +\
- (((uint32) ((uchar) (A)[3])) << 24))
-#define uint5korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[2])) << 16) +\
- (((uint32) ((uchar) (A)[3])) << 24)) +\
- (((ulonglong) ((uchar) (A)[4])) << 32))
-#define uint6korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) + \
- (((uint32) ((uchar) (A)[1])) << 8) + \
- (((uint32) ((uchar) (A)[2])) << 16) + \
- (((uint32) ((uchar) (A)[3])) << 24)) + \
- (((ulonglong) ((uchar) (A)[4])) << 32) + \
- (((ulonglong) ((uchar) (A)[5])) << 40))
-#define uint8korr(A) ((ulonglong)(((uint32) ((uchar) (A)[0])) +\
- (((uint32) ((uchar) (A)[1])) << 8) +\
- (((uint32) ((uchar) (A)[2])) << 16) +\
- (((uint32) ((uchar) (A)[3])) << 24)) +\
- (((ulonglong) (((uint32) ((uchar) (A)[4])) +\
- (((uint32) ((uchar) (A)[5])) << 8) +\
- (((uint32) ((uchar) (A)[6])) << 16) +\
- (((uint32) ((uchar) (A)[7])) << 24))) <<\
- 32))
-#define int2store(T,A) do { uint def_temp= (uint) (A) ;\
- *((uchar*) (T))= (uchar)(def_temp); \
- *((uchar*) (T)+1)=(uchar)((def_temp >> 8)); \
- } while(0)
-#define int3store(T,A) do { /*lint -save -e734 */\
- *((uchar*)(T))=(uchar) ((A));\
- *((uchar*) (T)+1)=(uchar) (((A) >> 8));\
- *((uchar*)(T)+2)=(uchar) (((A) >> 16)); \
- /*lint -restore */} while(0)
-#define int4store(T,A) do { *((char *)(T))=(char) ((A));\
- *(((char *)(T))+1)=(char) (((A) >> 8));\
- *(((char *)(T))+2)=(char) (((A) >> 16));\
- *(((char *)(T))+3)=(char) (((A) >> 24)); } while(0)
-#define int5store(T,A) do { *((char *)(T))= (char)((A)); \
- *(((char *)(T))+1)= (char)(((A) >> 8)); \
- *(((char *)(T))+2)= (char)(((A) >> 16)); \
- *(((char *)(T))+3)= (char)(((A) >> 24)); \
- *(((char *)(T))+4)= (char)(((A) >> 32)); \
- } while(0)
-#define int6store(T,A) do { *((char *)(T))= (char)((A)); \
- *(((char *)(T))+1)= (char)(((A) >> 8)); \
- *(((char *)(T))+2)= (char)(((A) >> 16)); \
- *(((char *)(T))+3)= (char)(((A) >> 24)); \
- *(((char *)(T))+4)= (char)(((A) >> 32)); \
- *(((char *)(T))+5)= (char)(((A) >> 40)); \
- } while(0)
-#define int8store(T,A) do { uint def_temp= (uint) (A), def_temp2= (uint) ((A) >> 32); \
- int4store((T),def_temp); \
- int4store((T+4),def_temp2); } while(0)
-#ifdef WORDS_BIGENDIAN
-#define float4store(T,A) do { *(T)= ((uchar *) &A)[3];\
- *((T)+1)=(char) ((uchar *) &A)[2];\
- *((T)+2)=(char) ((uchar *) &A)[1];\
- *((T)+3)=(char) ((uchar *) &A)[0]; } while(0)
-
-#define float4get(V,M) do { float def_temp;\
- ((uchar*) &def_temp)[0]=(M)[3];\
- ((uchar*) &def_temp)[1]=(M)[2];\
- ((uchar*) &def_temp)[2]=(M)[1];\
- ((uchar*) &def_temp)[3]=(M)[0];\
- (V)=def_temp; } while(0)
-#define float8store(T,V) do { *(T)= ((uchar *) &V)[7];\
- *((T)+1)=(char) ((uchar *) &V)[6];\
- *((T)+2)=(char) ((uchar *) &V)[5];\
- *((T)+3)=(char) ((uchar *) &V)[4];\
- *((T)+4)=(char) ((uchar *) &V)[3];\
- *((T)+5)=(char) ((uchar *) &V)[2];\
- *((T)+6)=(char) ((uchar *) &V)[1];\
- *((T)+7)=(char) ((uchar *) &V)[0]; } while(0)
-
-#define float8get(V,M) do { double def_temp;\
- ((uchar*) &def_temp)[0]=(M)[7];\
- ((uchar*) &def_temp)[1]=(M)[6];\
- ((uchar*) &def_temp)[2]=(M)[5];\
- ((uchar*) &def_temp)[3]=(M)[4];\
- ((uchar*) &def_temp)[4]=(M)[3];\
- ((uchar*) &def_temp)[5]=(M)[2];\
- ((uchar*) &def_temp)[6]=(M)[1];\
- ((uchar*) &def_temp)[7]=(M)[0];\
- (V) = def_temp; } while(0)
-#else
-#define float4get(V,M) memcpy(&V, (M), sizeof(float))
-#define float4store(V,M) memcpy(V, (&M), sizeof(float))
-
-#if defined(__FLOAT_WORD_ORDER) && (__FLOAT_WORD_ORDER == __BIG_ENDIAN)
-#define doublestore(T,V) do { *(((char*)T)+0)=(char) ((uchar *) &V)[4];\
- *(((char*)T)+1)=(char) ((uchar *) &V)[5];\
- *(((char*)T)+2)=(char) ((uchar *) &V)[6];\
- *(((char*)T)+3)=(char) ((uchar *) &V)[7];\
- *(((char*)T)+4)=(char) ((uchar *) &V)[0];\
- *(((char*)T)+5)=(char) ((uchar *) &V)[1];\
- *(((char*)T)+6)=(char) ((uchar *) &V)[2];\
- *(((char*)T)+7)=(char) ((uchar *) &V)[3]; }\
- while(0)
-#define doubleget(V,M) do { double def_temp;\
- ((uchar*) &def_temp)[0]=(M)[4];\
- ((uchar*) &def_temp)[1]=(M)[5];\
- ((uchar*) &def_temp)[2]=(M)[6];\
- ((uchar*) &def_temp)[3]=(M)[7];\
- ((uchar*) &def_temp)[4]=(M)[0];\
- ((uchar*) &def_temp)[5]=(M)[1];\
- ((uchar*) &def_temp)[6]=(M)[2];\
- ((uchar*) &def_temp)[7]=(M)[3];\
- (V) = def_temp; } while(0)
-#endif /* __FLOAT_WORD_ORDER */
-
-#define float8get(V,M) doubleget((V),(M))
-#define float8store(V,M) doublestore((V),(M))
-#endif /* WORDS_BIGENDIAN */
-
-#endif /* __i386__ OR _WIN32 */
-
-/*
- Macro for reading 32-bit integer from network byte order (big-endian)
- from unaligned memory location.
-*/
-#define int4net(A) (int32) (((uint32) ((uchar) (A)[3])) |\
- (((uint32) ((uchar) (A)[2])) << 8) |\
- (((uint32) ((uchar) (A)[1])) << 16) |\
- (((uint32) ((uchar) (A)[0])) << 24))
-/*
- Define-funktions for reading and storing in machine format from/to
- short/long to/from some place in memory V should be a (not
- register) variable, M is a pointer to byte
-*/
-
-#ifdef WORDS_BIGENDIAN
-
-#define ushortget(V,M) do { V = (uint16) (((uint16) ((uchar) (M)[1]))+\
- ((uint16) ((uint16) (M)[0]) << 8)); } while(0)
-#define shortget(V,M) do { V = (short) (((short) ((uchar) (M)[1]))+\
- ((short) ((short) (M)[0]) << 8)); } while(0)
-#define longget(V,M) do { int32 def_temp;\
- ((uchar*) &def_temp)[0]=(M)[0];\
- ((uchar*) &def_temp)[1]=(M)[1];\
- ((uchar*) &def_temp)[2]=(M)[2];\
- ((uchar*) &def_temp)[3]=(M)[3];\
- (V)=def_temp; } while(0)
-#define ulongget(V,M) do { uint32 def_temp;\
- ((uchar*) &def_temp)[0]=(M)[0];\
- ((uchar*) &def_temp)[1]=(M)[1];\
- ((uchar*) &def_temp)[2]=(M)[2];\
- ((uchar*) &def_temp)[3]=(M)[3];\
- (V)=def_temp; } while(0)
-#define shortstore(T,A) do { uint def_temp=(uint) (A) ;\
- *(((char*)T)+1)=(char)(def_temp); \
- *(((char*)T)+0)=(char)(def_temp >> 8); } while(0)
-#define longstore(T,A) do { *(((char*)T)+3)=((A));\
- *(((char*)T)+2)=(((A) >> 8));\
- *(((char*)T)+1)=(((A) >> 16));\
- *(((char*)T)+0)=(((A) >> 24)); } while(0)
-
-#define floatget(V,M) memcpy(&V, (M), sizeof(float))
-#define floatstore(T,V) memcpy((T), (void*) (&V), sizeof(float))
-#define doubleget(V,M) memcpy(&V, (M), sizeof(double))
-#define doublestore(T,V) memcpy((T), (void *) &V, sizeof(double))
-#define longlongget(V,M) memcpy(&V, (M), sizeof(ulonglong))
-#define longlongstore(T,V) memcpy((T), &V, sizeof(ulonglong))
-
-#else
-
-#define ushortget(V,M) do { V = uint2korr(M); } while(0)
-#define shortget(V,M) do { V = sint2korr(M); } while(0)
-#define longget(V,M) do { V = sint4korr(M); } while(0)
-#define ulongget(V,M) do { V = uint4korr(M); } while(0)
-#define shortstore(T,V) int2store(T,V)
-#define longstore(T,V) int4store(T,V)
-#ifndef floatstore
-#define floatstore(T,V) memcpy((T), (void *) (&V), sizeof(float))
-#define floatget(V,M) memcpy(&V, (M), sizeof(float))
-#endif
-#ifndef doubleget
-#define doubleget(V,M) memcpy(&V, (M), sizeof(double))
-#define doublestore(T,V) memcpy((T), (void *) &V, sizeof(double))
-#endif /* doubleget */
-#define longlongget(V,M) memcpy(&V, (M), sizeof(ulonglong))
-#define longlongstore(T,V) memcpy((T), &V, sizeof(ulonglong))
-
-#endif /* WORDS_BIGENDIAN */
+#include <my_byteorder.h>
#ifdef HAVE_CHARSET_utf8
#define MYSQL_UNIVERSAL_CLIENT_CHARSET "utf8"
@@ -1396,10 +1106,6 @@ static inline char *dlerror(void)
#endif
/* Define some useful general macros (should be done after all headers). */
-#if !defined(max)
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#endif
#define MY_MAX(a, b) ((a) > (b) ? (a) : (b))
#define MY_MIN(a, b) ((a) < (b) ? (a) : (b))
diff --git a/include/my_handler_errors.h b/include/my_handler_errors.h
index f2c51773e83..24b977c38ce 100644
--- a/include/my_handler_errors.h
+++ b/include/my_handler_errors.h
@@ -84,8 +84,10 @@ static const char *handler_error_messages[]=
"Index column length exceeds limit",
"Index corrupted",
"Undo record too big",
- "Table is being used in foreign key check",
"Invalid InnoDB FTS Doc ID",
+ "Table is being used in foreign key check",
+ "Tablespace already exists",
+ "Too many columns",
"Row is not visible by the current transaction",
"Operation was interrupted by end user (probably kill command?)",
"Disk full"
diff --git a/include/my_md5.h b/include/my_md5.h
index 1273616a19b..77557fb9346 100644
--- a/include/my_md5.h
+++ b/include/my_md5.h
@@ -1,8 +1,8 @@
#ifndef MY_MD5_INCLUDED
#define MY_MD5_INCLUDED
-/* Copyright (c) 2000, 2001, 2007 MySQL AB, 2009 Sun Microsystems, Inc.
- Use is subject to license terms
+/* Copyright (c) 2000, 2012, Oracle and/or its affiliates.
+ Copyright (c) 2013 Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,79 +17,36 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
-/* See md5.c for explanation and copyright information. */
+#include "m_string.h"
-/*
- * $FreeBSD: src/contrib/cvs/lib/md5.h,v 1.2 1999/12/11 15:10:02 peter Exp $
- */
+#define MD5_HASH_SIZE 16 /* Hash size in bytes */
-#if defined(HAVE_YASSL) || defined(HAVE_OPENSSL)
/*
- Use MD5 implementation provided by the SSL libraries.
+ Wrapper function for MD5 implementation.
*/
-
-#if defined(HAVE_YASSL)
-
-C_MODE_START
-
-void my_md5_hash(char *digest, const char *buf, int len);
-
-C_MODE_END
-
-#else /* HAVE_YASSL */
-
-#include <openssl/md5.h>
-
-#define MY_MD5_HASH(digest, buf, len) \
-do { \
- MD5_CTX ctx; \
- MD5_Init (&ctx); \
- MD5_Update (&ctx, buf, len); \
- MD5_Final (digest, &ctx); \
-} while (0)
-
-#endif /* HAVE_YASSL */
-
-#else /* HAVE_YASSL || HAVE_OPENSSL */
-/* Fallback to the MySQL's implementation. */
-
-/* Unlike previous versions of this code, uint32 need not be exactly
- 32 bits, merely 32 bits or more. Choosing a data type which is 32
- bits instead of 64 is not important; speed is considerably more
- important. ANSI guarantees that "unsigned long" will be big enough,
- and always using it seems to have few disadvantages. */
-typedef uint32 cvs_uint32;
-
-typedef struct {
- cvs_uint32 buf[4];
- cvs_uint32 bits[2];
- unsigned char in[64];
-} my_MD5Context;
-
-C_MODE_START
-
-void my_MD5Init (my_MD5Context *context);
-void my_MD5Update (my_MD5Context *context,
- unsigned char const *buf, unsigned len);
-void my_MD5Final (unsigned char digest[16],
- my_MD5Context *context);
-
-C_MODE_END
-
-#define MY_MD5_HASH(digest,buf,len) \
-do { \
- my_MD5Context ctx; \
- my_MD5Init (&ctx); \
- my_MD5Update (&ctx, buf, len); \
- my_MD5Final (digest, &ctx); \
-} while (0)
-
-#endif /* defined(HAVE_YASSL) || defined(HAVE_OPENSSL) */
-
-C_MODE_START
+#ifdef __cplusplus
+extern "C" {
+#endif
void compute_md5_hash(char *digest, const char *buf, int len);
-C_MODE_END
+/*
+ Convert an array of bytes to a hexadecimal representation.
+
+ Used to generate a hexadecimal representation of a message digest.
+*/
+static inline void array_to_hex(char *to, const unsigned char *str, uint len)
+{
+ const unsigned char *str_end= str + len;
+ for (; str != str_end; ++str)
+ {
+ *to++= _dig_vec_lower[((uchar) *str) >> 4];
+ *to++= _dig_vec_lower[((uchar) *str) & 0x0F];
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
#endif /* MY_MD5_INCLUDED */
diff --git a/include/my_rnd.h b/include/my_rnd.h
new file mode 100644
index 00000000000..b4a5d735811
--- /dev/null
+++ b/include/my_rnd.h
@@ -0,0 +1,32 @@
+/* Copyright (C) 2013 Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 or later of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef _my_rnd_h
+#define _my_rnd_h
+
+C_MODE_START
+
+struct my_rnd_struct {
+ unsigned long seed1,seed2,max_value;
+ double max_value_dbl;
+};
+
+void my_rnd_init(struct my_rnd_struct *rand_st, ulong seed1, ulong seed2);
+double my_rnd(struct my_rnd_struct *rand_st);
+double my_rnd_ssl(struct my_rnd_struct *rand_st);
+
+C_MODE_END
+
+#endif /* _my_rnd_h */
diff --git a/include/my_sys.h b/include/my_sys.h
index f5f0d61ab46..a29c3653d17 100644
--- a/include/my_sys.h
+++ b/include/my_sys.h
@@ -174,8 +174,6 @@ extern void *my_memdup(const void *from,size_t length,myf MyFlags);
extern char *my_strdup(const char *from,myf MyFlags);
extern char *my_strndup(const char *from, size_t length, myf MyFlags);
-extern int sf_leaking_memory; /* set to 1 to disable memleak detection */
-
#ifdef HAVE_LARGE_PAGES
extern uint my_get_large_page_size(void);
extern uchar * my_large_malloc(size_t size, myf my_flags);
@@ -199,14 +197,18 @@ extern void my_large_free(uchar *ptr);
#endif /* GNUC */
#define my_alloca(SZ) alloca((size_t) (SZ))
#define my_afree(PTR) ((void)0)
+#define my_safe_alloca(size, max_alloca_sz) ((size <= max_alloca_sz) ? \
+ my_alloca(size) : \
+ my_malloc(size, MYF(0)))
+#define my_safe_afree(ptr, size, max_alloca_sz) if (size > max_alloca_sz) \
+ my_free(ptr)
#else
#define my_alloca(SZ) my_malloc(SZ,MYF(MY_FAE))
#define my_afree(PTR) my_free(PTR)
+#define my_safe_alloca(size, max_alloca_sz) my_alloca(size)
+#define my_safe_afree(ptr, size, max_alloca_sz) my_afree(ptr)
#endif /* HAVE_ALLOCA */
-#define my_safe_alloca(size, min_length) ((size <= min_length) ? my_alloca(size) : my_malloc(size,MYF(MY_FAE)))
-#define my_safe_afree(ptr, size, min_length) ((size <= min_length) ? my_afree(ptr) : my_free(ptr))
-
#ifndef errno /* did we already get it? */
#ifdef HAVE_ERRNO_AS_DEFINE
#include <errno.h> /* errno is a define */
@@ -223,6 +225,7 @@ extern void (*fatal_error_handler_hook)(uint my_err, const char *str,
myf MyFlags);
extern uint my_file_limit;
extern ulonglong my_thread_stack_size;
+extern int sf_leaking_memory; /* set to 1 to disable memleak detection */
extern void (*proc_info_hook)(void *, const PSI_stage_info *, PSI_stage_info *,
const char *, const char *, const unsigned int);
@@ -265,11 +268,6 @@ extern my_bool my_disable_locking, my_disable_async_io,
extern my_bool my_disable_sync;
extern char wild_many,wild_one,wild_prefix;
extern const char *charsets_dir;
-/* from default.c */
-extern const char *my_defaults_extra_file;
-extern const char *my_defaults_group_suffix;
-extern const char *my_defaults_file;
-
extern my_bool timed_mutexes;
enum loglevel {
@@ -566,13 +564,8 @@ my_off_t my_b_safe_tell(IO_CACHE* info); /* picks the correct tell() */
typedef uint32 ha_checksum;
extern ulong my_crc_dbug_check;
-/* Define the type of function to be passed to process_default_option_files */
-typedef int (*Process_option_func)(void *ctx, const char *group_name,
- const char *option);
-
#include <my_alloc.h>
-
/* Prototypes for mysys and my_func functions */
extern int my_copy(const char *from,const char *to,myf MyFlags);
@@ -631,6 +624,13 @@ extern int my_access(const char *path, int amode);
extern int check_if_legal_filename(const char *path);
extern int check_if_legal_tablename(const char *path);
+#ifdef __WIN__
+extern my_bool is_filename_allowed(const char *name, size_t length,
+ my_bool allow_current_dir);
+#else /* __WIN__ */
+# define is_filename_allowed(name, length, allow_cwd) (TRUE)
+#endif /* __WIN__ */
+
#ifdef _WIN32
/* Windows-only functions (CRT equivalents)*/
extern HANDLE my_get_osfhandle(File fd);
@@ -656,15 +656,16 @@ extern void thr_set_sync_wait_callback(void (*before_sync)(void),
extern int my_sync(File fd, myf my_flags);
extern int my_sync_dir(const char *dir_name, myf my_flags);
extern int my_sync_dir_by_file(const char *file_name, myf my_flags);
-extern void my_error(int nr,myf MyFlags, ...);
+extern const char *my_get_err_msg(uint nr);
+extern void my_error(uint nr,myf MyFlags, ...);
extern void my_printf_error(uint my_err, const char *format,
myf MyFlags, ...)
ATTRIBUTE_FORMAT(printf, 2, 4);
extern void my_printv_error(uint error, const char *format, myf MyFlags,
va_list ap);
extern int my_error_register(const char** (*get_errmsgs) (),
- int first, int last);
-extern const char **my_error_unregister(int first, int last);
+ uint first, uint last);
+extern const char **my_error_unregister(uint first, uint last);
extern void my_message(uint my_err, const char *str,myf MyFlags);
extern void my_message_stderr(uint my_err, const char *str, myf MyFlags);
extern my_bool my_init(void);
@@ -853,22 +854,6 @@ static inline char *safe_strdup_root(MEM_ROOT *root, const char *str)
}
extern char *strmake_root(MEM_ROOT *root,const char *str,size_t len);
extern void *memdup_root(MEM_ROOT *root,const void *str, size_t len);
-extern int get_defaults_options(int argc, char **argv,
- char **defaults, char **extra_defaults,
- char **group_suffix);
-extern my_bool my_getopt_use_args_separator;
-extern my_bool my_getopt_is_args_separator(const char* arg);
-extern int my_load_defaults(const char *conf_file, const char **groups,
- int *argc, char ***argv, const char ***);
-extern int load_defaults(const char *conf_file, const char **groups,
- int *argc, char ***argv);
-extern int my_search_option_files(const char *conf_file, int *argc,
- char ***argv, uint *args_used,
- Process_option_func func, void *func_ctx,
- const char **default_directories);
-extern void free_defaults(char **argv);
-extern void my_print_default_files(const char *conf_file);
-extern void print_defaults(const char *conf_file, const char **groups);
extern my_bool my_compress(uchar *, size_t *, size_t *);
extern my_bool my_uncompress(uchar *, size_t , size_t *);
extern uchar *my_compress_alloc(const uchar *packet, size_t *len,
@@ -960,14 +945,6 @@ void my_uuid(uchar *guid);
void my_uuid2str(const uchar *guid, char *s);
void my_uuid_end();
-struct my_rnd_struct {
- unsigned long seed1,seed2,max_value;
- double max_value_dbl;
-};
-
-void my_rnd_init(struct my_rnd_struct *rand_st, ulong seed1, ulong seed2);
-double my_rnd(struct my_rnd_struct *rand_st);
-
/* character sets */
extern uint get_charset_number(const char *cs_name, uint cs_flags);
extern uint get_collation_number(const char *name);
@@ -1030,6 +1007,5 @@ void my_init_mysys_psi_keys(void);
struct st_mysql_file;
extern struct st_mysql_file *mysql_stdin;
-
C_MODE_END
#endif /* _my_sys_h */
diff --git a/include/my_time.h b/include/my_time.h
index ea471fd6b0a..4991d996258 100644
--- a/include/my_time.h
+++ b/include/my_time.h
@@ -146,8 +146,8 @@ void my_init_time(void);
estimate.
RETURN VALUES
- FALSE The value seems sane
- TRUE The MYSQL_TIME value is definitely out of range
+ TRUE The value seems sane
+ FALSE The MYSQL_TIME value is definitely out of range
*/
static inline my_bool validate_timestamp_range(const MYSQL_TIME *t)
diff --git a/include/myisammrg.h b/include/myisammrg.h
index 84b2d637892..89293537989 100644
--- a/include/myisammrg.h
+++ b/include/myisammrg.h
@@ -31,7 +31,8 @@ extern "C" {
#include <queues.h>
-#define MYRG_NAME_EXT ".MRG"
+#define MYRG_NAME_EXT ".MRG"
+#define MYRG_NAME_TMPEXT ".MRG_TMP"
/* In which table to INSERT rows */
#define MERGE_INSERT_DISABLED 0
diff --git a/include/mysql/client_authentication.h b/include/mysql/client_authentication.h
new file mode 100644
index 00000000000..2bd2fc98bac
--- /dev/null
+++ b/include/mysql/client_authentication.h
@@ -0,0 +1,13 @@
+#ifndef CLIENT_AUTHENTICATION_H
+#define CLIENT_AUTHENTICATION_H
+#include "mysql.h"
+#include "mysql/client_plugin.h"
+
+C_MODE_START
+int sha256_password_auth_client(MYSQL_PLUGIN_VIO *vio, MYSQL *mysql);
+int sha256_password_init(char *, size_t, int, va_list);
+int sha256_password_deinit(void);
+C_MODE_END
+
+#endif
+
diff --git a/include/mysql/plugin.h b/include/mysql/plugin.h
index a96e3e72b7b..7092739f5d1 100644
--- a/include/mysql/plugin.h
+++ b/include/mysql/plugin.h
@@ -46,6 +46,7 @@ class Item;
#endif
typedef char my_bool;
+typedef void * MYSQL_PLUGIN;
#include <mysql/services.h>
@@ -71,10 +72,10 @@ typedef struct st_mysql_xid MYSQL_XID;
*/
/* MySQL plugin interface version */
-#define MYSQL_PLUGIN_INTERFACE_VERSION 0x0103
+#define MYSQL_PLUGIN_INTERFACE_VERSION 0x0104
/* MariaDB plugin interface version */
-#define MARIA_PLUGIN_INTERFACE_VERSION 0x0105
+#define MARIA_PLUGIN_INTERFACE_VERSION 0x0107
/*
The allowable types of plugins
@@ -87,7 +88,8 @@ typedef struct st_mysql_xid MYSQL_XID;
#define MYSQL_AUDIT_PLUGIN 5 /* The Audit plugin type */
#define MYSQL_REPLICATION_PLUGIN 6 /* The replication plugin type */
#define MYSQL_AUTHENTICATION_PLUGIN 7 /* The authentication plugin type */
-#define MYSQL_MAX_PLUGIN_TYPE_NUM 8 /* The number of plugin types */
+#define MYSQL_VALIDATE_PASSWORD_PLUGIN 8 /* validate password plugin type */
+#define MYSQL_MAX_PLUGIN_TYPE_NUM 9 /* The number of plugin types */
/* We use the following strings to define licenses for plugins */
#define PLUGIN_LICENSE_PROPRIETARY 0
@@ -560,7 +562,7 @@ struct handlerton;
/*
API for Replication plugin. (MYSQL_REPLICATION_PLUGIN)
*/
- #define MYSQL_REPLICATION_INTERFACE_VERSION 0x0100
+ #define MYSQL_REPLICATION_INTERFACE_VERSION 0x0200
/**
Replication plugin descriptor
@@ -608,6 +610,7 @@ int thd_sql_command(const MYSQL_THD thd);
void **thd_ha_data(const MYSQL_THD thd, const struct handlerton *hton);
void thd_storage_lock_wait(MYSQL_THD thd, long long value);
int thd_tx_isolation(const MYSQL_THD thd);
+int thd_tx_is_read_only(const MYSQL_THD thd);
char *thd_security_context(MYSQL_THD thd, char *buffer, unsigned int length,
unsigned int max_query_len);
/* Increments the row counter, see THD::row_count */
diff --git a/include/mysql/plugin_audit.h.pp b/include/mysql/plugin_audit.h.pp
index a3fabf011ab..ff82ad951ea 100644
--- a/include/mysql/plugin_audit.h.pp
+++ b/include/mysql/plugin_audit.h.pp
@@ -1,5 +1,6 @@
#include "plugin.h"
typedef char my_bool;
+typedef void * MYSQL_PLUGIN;
#include <mysql/services.h>
#include <mysql/service_my_snprintf.h>
extern struct my_snprintf_service_st {
@@ -107,6 +108,13 @@ extern struct thd_timezone_service_st {
} *thd_timezone_service;
my_time_t thd_TIME_to_gmt_sec(void* thd, const MYSQL_TIME *ltime, unsigned int *errcode);
void thd_gmt_sec_to_TIME(void* thd, MYSQL_TIME *ltime, my_time_t t);
+#include <mysql/service_sha1.h>
+extern struct my_sha1_service_st {
+ void (*my_sha1_type)(unsigned char*, const char*, size_t);
+ void (*my_sha1_multi_type)(unsigned char*, ...);
+} *my_sha1_service;
+void my_sha1(unsigned char*, const char*, size_t);
+void my_sha1_multi(unsigned char*, ...);
struct st_mysql_xid {
long formatID;
long gtrid_length;
@@ -247,6 +255,7 @@ int thd_sql_command(const void* thd);
void **thd_ha_data(const void* thd, const struct handlerton *hton);
void thd_storage_lock_wait(void* thd, long long value);
int thd_tx_isolation(const void* thd);
+int thd_tx_is_read_only(const void* thd);
char *thd_security_context(void* thd, char *buffer, unsigned int length,
unsigned int max_query_len);
void thd_inc_row_count(void* thd);
diff --git a/include/mysql/plugin_auth.h.pp b/include/mysql/plugin_auth.h.pp
index 28172286fb3..5f7c1ab72f1 100644
--- a/include/mysql/plugin_auth.h.pp
+++ b/include/mysql/plugin_auth.h.pp
@@ -1,5 +1,6 @@
#include <mysql/plugin.h>
typedef char my_bool;
+typedef void * MYSQL_PLUGIN;
#include <mysql/services.h>
#include <mysql/service_my_snprintf.h>
extern struct my_snprintf_service_st {
@@ -107,6 +108,13 @@ extern struct thd_timezone_service_st {
} *thd_timezone_service;
my_time_t thd_TIME_to_gmt_sec(void* thd, const MYSQL_TIME *ltime, unsigned int *errcode);
void thd_gmt_sec_to_TIME(void* thd, MYSQL_TIME *ltime, my_time_t t);
+#include <mysql/service_sha1.h>
+extern struct my_sha1_service_st {
+ void (*my_sha1_type)(unsigned char*, const char*, size_t);
+ void (*my_sha1_multi_type)(unsigned char*, ...);
+} *my_sha1_service;
+void my_sha1(unsigned char*, const char*, size_t);
+void my_sha1_multi(unsigned char*, ...);
struct st_mysql_xid {
long formatID;
long gtrid_length;
@@ -247,6 +255,7 @@ int thd_sql_command(const void* thd);
void **thd_ha_data(const void* thd, const struct handlerton *hton);
void thd_storage_lock_wait(void* thd, long long value);
int thd_tx_isolation(const void* thd);
+int thd_tx_is_read_only(const void* thd);
char *thd_security_context(void* thd, char *buffer, unsigned int length,
unsigned int max_query_len);
void thd_inc_row_count(void* thd);
diff --git a/include/mysql/plugin_auth_common.h b/include/mysql/plugin_auth_common.h
index c0b61730d0d..9d7dd2a08bf 100644
--- a/include/mysql/plugin_auth_common.h
+++ b/include/mysql/plugin_auth_common.h
@@ -34,6 +34,27 @@
return values of the plugin authenticate_user() method.
*/
+ /**
+ Authentication failed, plugin internal error.
+ An error occurred in the authentication plugin itself.
+ These errors are reported in table performance_schema.host_cache,
+ column COUNT_AUTH_PLUGIN_ERRORS.
+*/
+#define CR_AUTH_PLUGIN_ERROR 3
+/**
+ Authentication failed, client server handshake.
+ An error occurred during the client server handshake.
+ These errors are reported in table performance_schema.host_cache,
+ column COUNT_HANDSHAKE_ERRORS.
+*/
+#define CR_AUTH_HANDSHAKE 2
+/**
+ Authentication failed, user credentials.
+ For example, wrong passwords.
+ These errors are reported in table performance_schema.host_cache,
+ column COUNT_AUTHENTICATION_ERRORS.
+*/
+#define CR_AUTH_USER_CREDENTIALS 1
/**
Authentication failed. Additionally, all other CR_xxx values
(libmysql error code) can be used too.
diff --git a/include/mysql/plugin_ftparser.h.pp b/include/mysql/plugin_ftparser.h.pp
index 532e049cf53..05eed030d66 100644
--- a/include/mysql/plugin_ftparser.h.pp
+++ b/include/mysql/plugin_ftparser.h.pp
@@ -1,5 +1,6 @@
#include "plugin.h"
typedef char my_bool;
+typedef void * MYSQL_PLUGIN;
#include <mysql/services.h>
#include <mysql/service_my_snprintf.h>
extern struct my_snprintf_service_st {
@@ -107,6 +108,13 @@ extern struct thd_timezone_service_st {
} *thd_timezone_service;
my_time_t thd_TIME_to_gmt_sec(void* thd, const MYSQL_TIME *ltime, unsigned int *errcode);
void thd_gmt_sec_to_TIME(void* thd, MYSQL_TIME *ltime, my_time_t t);
+#include <mysql/service_sha1.h>
+extern struct my_sha1_service_st {
+ void (*my_sha1_type)(unsigned char*, const char*, size_t);
+ void (*my_sha1_multi_type)(unsigned char*, ...);
+} *my_sha1_service;
+void my_sha1(unsigned char*, const char*, size_t);
+void my_sha1_multi(unsigned char*, ...);
struct st_mysql_xid {
long formatID;
long gtrid_length;
@@ -200,6 +208,7 @@ int thd_sql_command(const void* thd);
void **thd_ha_data(const void* thd, const struct handlerton *hton);
void thd_storage_lock_wait(void* thd, long long value);
int thd_tx_isolation(const void* thd);
+int thd_tx_is_read_only(const void* thd);
char *thd_security_context(void* thd, char *buffer, unsigned int length,
unsigned int max_query_len);
void thd_inc_row_count(void* thd);
diff --git a/include/mysql/psi/mysql_file.h b/include/mysql/psi/mysql_file.h
index 816ac713631..c226258f462 100644
--- a/include/mysql/psi/mysql_file.h
+++ b/include/mysql/psi/mysql_file.h
@@ -518,7 +518,7 @@ static inline void inline_mysql_file_register(
)
{
#ifdef HAVE_PSI_FILE_INTERFACE
- PSI_CALL(register_file)(category, info, count);
+ PSI_FILE_CALL(register_file)(category, info, count);
#endif
}
@@ -533,13 +533,13 @@ inline_mysql_file_fgets(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_READ);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_READ);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) size, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) size, src_file, src_line);
result= fgets(str, size, file->m_file);
- PSI_CALL(end_file_wait)(locker, result ? strlen(result) : 0);
+ PSI_FILE_CALL(end_file_wait)(locker, result ? strlen(result) : 0);
return result;
}
#endif
@@ -559,13 +559,13 @@ inline_mysql_file_fgetc(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_READ);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_READ);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line);
result= fgetc(file->m_file);
- PSI_CALL(end_file_wait)(locker, (size_t) 1);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 1);
return result;
}
#endif
@@ -586,14 +586,14 @@ inline_mysql_file_fputs(
struct PSI_file_locker *locker;
PSI_file_locker_state state;
size_t bytes;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_WRITE);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_WRITE);
if (likely(locker != NULL))
{
bytes= str ? strlen(str) : 0;
- PSI_CALL(start_file_wait)(locker, bytes, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, bytes, src_file, src_line);
result= fputs(str, file->m_file);
- PSI_CALL(end_file_wait)(locker, bytes);
+ PSI_FILE_CALL(end_file_wait)(locker, bytes);
return result;
}
#endif
@@ -613,13 +613,13 @@ inline_mysql_file_fputc(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_WRITE);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_WRITE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 1, src_file, src_line);
result= fputc(c, file->m_file);
- PSI_CALL(end_file_wait)(locker, (size_t) 1);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 1);
return result;
}
#endif
@@ -639,15 +639,15 @@ inline_mysql_file_fprintf(MYSQL_FILE *file, const char *format, ...)
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_WRITE);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_WRITE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, __FILE__, __LINE__);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, __FILE__, __LINE__);
va_start(args, format);
result= vfprintf(file->m_file, format, args);
va_end(args);
- PSI_CALL(end_file_wait)(locker, (size_t) result);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) result);
return result;
}
#endif
@@ -669,13 +669,13 @@ inline_mysql_file_vfprintf(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_WRITE);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_WRITE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= vfprintf(file->m_file, format, args);
- PSI_CALL(end_file_wait)(locker, (size_t) result);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) result);
return result;
}
#endif
@@ -695,13 +695,13 @@ inline_mysql_file_fflush(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_FLUSH);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_FLUSH);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= fflush(file->m_file);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -727,13 +727,13 @@ inline_mysql_file_fstat(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, filenr,
- PSI_FILE_FSTAT);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, filenr, PSI_FILE_FSTAT);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_fstat(filenr, stat_area, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -753,14 +753,13 @@ inline_mysql_file_stat(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state,
- key, PSI_FILE_STAT,
- path, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_STAT, path, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_open_wait)(locker, src_file, src_line);
+ PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line);
result= my_stat(path, stat_area, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_open_wait)(locker, result);
return result;
}
#endif
@@ -780,14 +779,14 @@ inline_mysql_file_chsize(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file,
- PSI_FILE_CHSIZE);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_CHSIZE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) newlength, src_file,
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) newlength, src_file,
src_line);
result= my_chsize(file, newlength, filler, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) newlength);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) newlength);
return result;
}
#endif
@@ -810,14 +809,14 @@ inline_mysql_file_fopen(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
(&state, key, PSI_FILE_STREAM_OPEN, filename, that);
if (likely(locker != NULL))
{
- that->m_psi= PSI_CALL(start_file_open_wait)(locker, src_file,
- src_line);
+ PSI_FILE_CALL(start_file_open_wait)
+ (locker, src_file, src_line);
that->m_file= my_fopen(filename, flags, myFlags);
- PSI_CALL(end_file_open_wait)(locker);
+ that->m_psi= PSI_FILE_CALL(end_file_open_wait)(locker, that->m_file);
if (unlikely(that->m_file == NULL))
{
my_free(that);
@@ -851,13 +850,13 @@ inline_mysql_file_fclose(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_STREAM_CLOSE);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_STREAM_CLOSE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line);
result= my_fclose(file->m_file, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_close_wait)(locker, result);
my_free(file);
return result;
}
@@ -881,17 +880,17 @@ inline_mysql_file_fread(
struct PSI_file_locker *locker;
PSI_file_locker_state state;
size_t bytes_read;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_READ);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_READ);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, count, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line);
result= my_fread(file->m_file, buffer, count, flags);
if (flags & (MY_NABP | MY_FNABP))
bytes_read= (result == 0) ? count : 0;
else
bytes_read= (result != MY_FILE_ERROR) ? result : 0;
- PSI_CALL(end_file_wait)(locker, bytes_read);
+ PSI_FILE_CALL(end_file_wait)(locker, bytes_read);
return result;
}
#endif
@@ -912,17 +911,17 @@ inline_mysql_file_fwrite(
struct PSI_file_locker *locker;
PSI_file_locker_state state;
size_t bytes_written;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_WRITE);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_WRITE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, count, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line);
result= my_fwrite(file->m_file, buffer, count, flags);
if (flags & (MY_NABP | MY_FNABP))
bytes_written= (result == 0) ? count : 0;
else
bytes_written= (result != MY_FILE_ERROR) ? result : 0;
- PSI_CALL(end_file_wait)(locker, bytes_written);
+ PSI_FILE_CALL(end_file_wait)(locker, bytes_written);
return result;
}
#endif
@@ -942,13 +941,13 @@ inline_mysql_file_fseek(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_SEEK);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_SEEK);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_fseek(file->m_file, pos, whence, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -968,13 +967,13 @@ inline_mysql_file_ftell(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_stream_locker)(&state, file->m_psi,
- PSI_FILE_TELL);
+ locker= PSI_FILE_CALL(get_thread_file_stream_locker)
+ (&state, file->m_psi, PSI_FILE_TELL);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_ftell(file->m_file, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -994,13 +993,13 @@ inline_mysql_file_create(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_CREATE,
- filename, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_CREATE, filename, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_open_wait)(locker, src_file, src_line);
+ PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line);
file= my_create(filename, create_flags, access_flags, myFlags);
- PSI_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file);
+ PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file);
return file;
}
#endif
@@ -1024,7 +1023,7 @@ inline_mysql_file_create_temp(
*/
file= create_temp_file(to, dir, pfx, mode, myFlags);
#ifdef HAVE_PSI_FILE_INTERFACE
- PSI_CALL(create_file)(key, to, file);
+ PSI_FILE_CALL(create_file)(key, to, file);
#endif
return file;
}
@@ -1040,13 +1039,13 @@ inline_mysql_file_open(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_OPEN,
- filename, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_OPEN, filename, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_open_wait)(locker, src_file, src_line);
+ PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line);
file= my_open(filename, flags, myFlags);
- PSI_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file);
+ PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file);
return file;
}
#endif
@@ -1066,13 +1065,13 @@ inline_mysql_file_close(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file,
- PSI_FILE_CLOSE);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_CLOSE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line);
result= my_close(file, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_close_wait)(locker, result);
return result;
}
#endif
@@ -1093,17 +1092,17 @@ inline_mysql_file_read(
struct PSI_file_locker *locker;
PSI_file_locker_state state;
size_t bytes_read;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file,
- PSI_FILE_READ);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_READ);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, count, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line);
result= my_read(file, buffer, count, flags);
if (flags & (MY_NABP | MY_FNABP))
bytes_read= (result == 0) ? count : 0;
else
bytes_read= (result != MY_FILE_ERROR) ? result : 0;
- PSI_CALL(end_file_wait)(locker, bytes_read);
+ PSI_FILE_CALL(end_file_wait)(locker, bytes_read);
return result;
}
#endif
@@ -1124,17 +1123,17 @@ inline_mysql_file_write(
struct PSI_file_locker *locker;
PSI_file_locker_state state;
size_t bytes_written;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file,
- PSI_FILE_WRITE);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_WRITE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, count, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line);
result= my_write(file, buffer, count, flags);
if (flags & (MY_NABP | MY_FNABP))
bytes_written= (result == 0) ? count : 0;
else
bytes_written= (result != MY_FILE_ERROR) ? result : 0;
- PSI_CALL(end_file_wait)(locker, bytes_written);
+ PSI_FILE_CALL(end_file_wait)(locker, bytes_written);
return result;
}
#endif
@@ -1155,16 +1154,17 @@ inline_mysql_file_pread(
struct PSI_file_locker *locker;
PSI_file_locker_state state;
size_t bytes_read;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_READ);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_READ);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, count, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line);
result= my_pread(file, buffer, count, offset, flags);
if (flags & (MY_NABP | MY_FNABP))
bytes_read= (result == 0) ? count : 0;
else
bytes_read= (result != MY_FILE_ERROR) ? result : 0;
- PSI_CALL(end_file_wait)(locker, bytes_read);
+ PSI_FILE_CALL(end_file_wait)(locker, bytes_read);
return result;
}
#endif
@@ -1185,17 +1185,17 @@ inline_mysql_file_pwrite(
struct PSI_file_locker *locker;
PSI_file_locker_state state;
size_t bytes_written;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file,
- PSI_FILE_WRITE);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_WRITE);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, count, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, count, src_file, src_line);
result= my_pwrite(file, buffer, count, offset, flags);
if (flags & (MY_NABP | MY_FNABP))
bytes_written= (result == 0) ? count : 0;
else
bytes_written= (result != MY_FILE_ERROR) ? result : 0;
- PSI_CALL(end_file_wait)(locker, bytes_written);
+ PSI_FILE_CALL(end_file_wait)(locker, bytes_written);
return result;
}
#endif
@@ -1215,12 +1215,13 @@ inline_mysql_file_seek(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_SEEK);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_SEEK);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_seek(file, pos, whence, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -1240,12 +1241,13 @@ inline_mysql_file_tell(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, file, PSI_FILE_TELL);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, file, PSI_FILE_TELL);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_tell(file, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -1265,13 +1267,13 @@ inline_mysql_file_delete(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_DELETE,
- name, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_DELETE, name, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line);
result= my_delete(name, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_close_wait)(locker, result);
return result;
}
#endif
@@ -1291,13 +1293,13 @@ inline_mysql_file_rename(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_RENAME,
- to, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_RENAME, to, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_rename(from, to, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -1318,14 +1320,14 @@ inline_mysql_file_create_with_symlink(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_CREATE,
- filename, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_CREATE, filename, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_open_wait)(locker, src_file, src_line);
+ PSI_FILE_CALL(start_file_open_wait)(locker, src_file, src_line);
file= my_create_with_symlink(linkname, filename, create_flags, access_flags,
flags);
- PSI_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file);
+ PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(locker, file);
return file;
}
#endif
@@ -1346,13 +1348,13 @@ inline_mysql_file_delete_with_symlink(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_DELETE,
- name, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_DELETE, name, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_close_wait)(locker, src_file, src_line);
result= my_delete_with_symlink(name, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_close_wait)(locker, result);
return result;
}
#endif
@@ -1372,13 +1374,13 @@ inline_mysql_file_rename_with_symlink(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_name_locker)(&state, key, PSI_FILE_RENAME,
- to, &locker);
+ locker= PSI_FILE_CALL(get_thread_file_name_locker)
+ (&state, key, PSI_FILE_RENAME, to, &locker);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_rename_with_symlink(from, to, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
@@ -1398,12 +1400,13 @@ inline_mysql_file_sync(
#ifdef HAVE_PSI_FILE_INTERFACE
struct PSI_file_locker *locker;
PSI_file_locker_state state;
- locker= PSI_CALL(get_thread_file_descriptor_locker)(&state, fd, PSI_FILE_SYNC);
+ locker= PSI_FILE_CALL(get_thread_file_descriptor_locker)
+ (&state, fd, PSI_FILE_SYNC);
if (likely(locker != NULL))
{
- PSI_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
+ PSI_FILE_CALL(start_file_wait)(locker, (size_t) 0, src_file, src_line);
result= my_sync(fd, flags);
- PSI_CALL(end_file_wait)(locker, (size_t) 0);
+ PSI_FILE_CALL(end_file_wait)(locker, (size_t) 0);
return result;
}
#endif
diff --git a/include/mysql/psi/mysql_idle.h b/include/mysql/psi/mysql_idle.h
index 7a3fccfdb8c..c53d0ceb8c7 100644
--- a/include/mysql/psi/mysql_idle.h
+++ b/include/mysql/psi/mysql_idle.h
@@ -70,7 +70,7 @@ inline_mysql_start_idle_wait(PSI_idle_locker_state *state,
const char *src_file, int src_line)
{
struct PSI_idle_locker *locker;
- locker= PSI_CALL(start_idle_wait)(state, src_file, src_line);
+ locker= PSI_IDLE_CALL(start_idle_wait)(state, src_file, src_line);
return locker;
}
@@ -82,7 +82,7 @@ static inline void
inline_mysql_end_idle_wait(struct PSI_idle_locker *locker)
{
if (likely(locker != NULL))
- PSI_CALL(end_idle_wait)(locker);
+ PSI_IDLE_CALL(end_idle_wait)(locker);
}
#endif
diff --git a/include/mysql/psi/mysql_socket.h b/include/mysql/psi/mysql_socket.h
index c908032883a..e1d56539f85 100644
--- a/include/mysql/psi/mysql_socket.h
+++ b/include/mysql/psi/mysql_socket.h
@@ -29,6 +29,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
#ifdef __WIN__
#include <ws2def.h>
#include <winsock2.h>
+ #include <MSWSock.h>
#define SOCKBUF_T char
#else
#include <netinet/in.h>
@@ -121,7 +122,7 @@ mysql_socket_set_address(
{
#ifdef HAVE_PSI_SOCKET_INTERFACE
if (socket.m_psi != NULL)
- PSI_CALL(set_socket_info)(socket.m_psi, NULL, addr, addr_len);
+ PSI_SOCKET_CALL(set_socket_info)(socket.m_psi, NULL, addr, addr_len);
#endif
}
@@ -141,7 +142,7 @@ MYSQL_SOCKET socket __attribute__ ((unused))
{
#ifdef HAVE_PSI_SOCKET_INTERFACE
if (socket.m_psi != NULL)
- PSI_CALL(set_socket_thread_owner)(socket.m_psi);
+ PSI_SOCKET_CALL(set_socket_thread_owner)(socket.m_psi);
#endif
}
@@ -247,8 +248,8 @@ inline_mysql_start_socket_wait(PSI_socket_locker_state *state,
struct PSI_socket_locker *locker;
if (mysql_socket.m_psi != NULL)
{
- locker= PSI_CALL(start_socket_wait)(state, mysql_socket.m_psi, op,
- byte_count, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (state, mysql_socket.m_psi, op, byte_count, src_file, src_line);
}
else
locker= NULL;
@@ -263,7 +264,7 @@ static inline void
inline_mysql_end_socket_wait(struct PSI_socket_locker *locker, size_t byte_count)
{
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, byte_count);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, byte_count);
}
/**
@@ -276,7 +277,7 @@ static inline void
inline_mysql_socket_set_state(MYSQL_SOCKET socket, enum PSI_socket_state state)
{
if (socket.m_psi != NULL)
- PSI_CALL(set_socket_state)(socket.m_psi, state);
+ PSI_SOCKET_CALL(set_socket_state)(socket.m_psi, state);
}
#endif /* HAVE_PSI_SOCKET_INTERFACE */
@@ -537,7 +538,7 @@ static inline void inline_mysql_socket_register(
PSI_socket_info *info,
int count)
{
- PSI_CALL(register_socket)(category, info, count);
+ PSI_SOCKET_CALL(register_socket)(category, info, count);
}
#endif
@@ -551,16 +552,15 @@ inline_mysql_socket_socket
#endif
int domain, int type, int protocol)
{
- MYSQL_SOCKET mysql_socket;
+ MYSQL_SOCKET mysql_socket= MYSQL_INVALID_SOCKET;
mysql_socket.fd= socket(domain, type, protocol);
#ifdef HAVE_PSI_SOCKET_INTERFACE
- mysql_socket.m_psi= PSI_CALL(init_socket)(key, (const my_socket*)&mysql_socket.fd);
-
- if (likely(mysql_socket.fd != INVALID_SOCKET && mysql_socket.m_psi != NULL))
- PSI_CALL(set_socket_info)(mysql_socket.m_psi, &mysql_socket.fd, NULL, 0);
-#else
- mysql_socket.m_psi= NULL;
+ if (likely(mysql_socket.fd != INVALID_SOCKET))
+ {
+ mysql_socket.m_psi= PSI_SOCKET_CALL(init_socket)
+ (key, (const my_socket*)&mysql_socket.fd, NULL, 0);
+ }
#endif
return mysql_socket;
}
@@ -583,17 +583,18 @@ inline_mysql_socket_bind
/* Instrumentation start */
PSI_socket_locker_state state;
PSI_socket_locker *locker;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_BIND, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_BIND, (size_t)0, src_file, src_line);
/* Instrumented code */
result= bind(mysql_socket.fd, addr, len);
/* Instrumentation end */
- PSI_CALL(set_socket_info)(mysql_socket.m_psi, NULL, addr, len);
+ if (result == 0)
+ PSI_SOCKET_CALL(set_socket_info)(mysql_socket.m_psi, NULL, addr, len);
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
@@ -622,15 +623,15 @@ inline_mysql_socket_getsockname
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_BIND, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_BIND, (size_t)0, src_file, src_line);
/* Instrumented code */
result= getsockname(mysql_socket.fd, addr, len);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
@@ -660,15 +661,15 @@ inline_mysql_socket_connect
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_CONNECT, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_CONNECT, (size_t)0, src_file, src_line);
/* Instrumented code */
result= connect(mysql_socket.fd, addr, len);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
@@ -698,15 +699,15 @@ inline_mysql_socket_getpeername
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_BIND, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_BIND, (size_t)0, src_file, src_line);
/* Instrumented code */
result= getpeername(mysql_socket.fd, addr, len);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
@@ -736,18 +737,18 @@ inline_mysql_socket_send
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_SEND, n, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_SEND, n, src_file, src_line);
/* Instrumented code */
- result= send(mysql_socket.fd, buf, n, flags);
+ result= send(mysql_socket.fd, buf, IF_WIN((int),) n, flags);
/* Instrumentation end */
if (locker != NULL)
{
size_t bytes_written;
bytes_written= (result > -1) ? result : 0;
- PSI_CALL(end_socket_wait)(locker, bytes_written);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, bytes_written);
}
return result;
@@ -755,7 +756,7 @@ inline_mysql_socket_send
#endif
/* Non instrumented code */
- result= send(mysql_socket.fd, buf, n, flags);
+ result= send(mysql_socket.fd, buf, IF_WIN((int),) n, flags);
return result;
}
@@ -778,18 +779,18 @@ inline_mysql_socket_recv
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_RECV, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_RECV, (size_t)0, src_file, src_line);
/* Instrumented code */
- result= recv(mysql_socket.fd, buf, n, flags);
+ result= recv(mysql_socket.fd, buf, IF_WIN((int),) n, flags);
/* Instrumentation end */
if (locker != NULL)
{
size_t bytes_read;
bytes_read= (result > -1) ? result : 0;
- PSI_CALL(end_socket_wait)(locker, bytes_read);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, bytes_read);
}
return result;
@@ -797,7 +798,7 @@ inline_mysql_socket_recv
#endif
/* Non instrumented code */
- result= recv(mysql_socket.fd, buf, n, flags);
+ result= recv(mysql_socket.fd, buf, IF_WIN((int),) n, flags);
return result;
}
@@ -820,18 +821,18 @@ inline_mysql_socket_sendto
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_SEND, n, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_SEND, n, src_file, src_line);
/* Instrumented code */
- result= sendto(mysql_socket.fd, buf, n, flags, addr, addr_len);
+ result= sendto(mysql_socket.fd, buf, IF_WIN((int),) n, flags, addr, addr_len);
/* Instrumentation end */
if (locker != NULL)
{
size_t bytes_written;
bytes_written = (result > -1) ? result : 0;
- PSI_CALL(end_socket_wait)(locker, bytes_written);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, bytes_written);
}
return result;
@@ -839,7 +840,7 @@ inline_mysql_socket_sendto
#endif
/* Non instrumented code */
- result= sendto(mysql_socket.fd, buf, n, flags, addr, addr_len);
+ result= sendto(mysql_socket.fd, buf, IF_WIN((int),) n, flags, addr, addr_len);
return result;
}
@@ -863,18 +864,18 @@ inline_mysql_socket_recvfrom
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_RECV, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_RECV, (size_t)0, src_file, src_line);
/* Instrumented code */
- result= recvfrom(mysql_socket.fd, buf, n, flags, addr, addr_len);
+ result= recvfrom(mysql_socket.fd, buf, IF_WIN((int),) n, flags, addr, addr_len);
/* Instrumentation end */
if (locker != NULL)
{
size_t bytes_read;
bytes_read = (result > -1) ? result : 0;
- PSI_CALL(end_socket_wait)(locker, bytes_read);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, bytes_read);
}
return result;
@@ -882,7 +883,7 @@ inline_mysql_socket_recvfrom
#endif
/* Non instrumented code */
- result= recvfrom(mysql_socket.fd, buf, n, flags, addr, addr_len);
+ result= recvfrom(mysql_socket.fd, buf, IF_WIN((int),) n, flags, addr, addr_len);
return result;
}
@@ -905,15 +906,15 @@ inline_mysql_socket_getsockopt
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_OPT, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_OPT, (size_t)0, src_file, src_line);
/* Instrumented code */
result= getsockopt(mysql_socket.fd, level, optname, optval, optlen);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
@@ -944,15 +945,15 @@ inline_mysql_socket_setsockopt
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_OPT, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_OPT, (size_t)0, src_file, src_line);
/* Instrumented code */
result= setsockopt(mysql_socket.fd, level, optname, optval, optlen);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
@@ -982,15 +983,15 @@ inline_mysql_socket_listen
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_CONNECT, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_CONNECT, (size_t)0, src_file, src_line);
/* Instrumented code */
result= listen(mysql_socket.fd, backlog);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
@@ -1021,15 +1022,15 @@ inline_mysql_socket_accept
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, socket_listen.m_psi,
- PSI_SOCKET_CONNECT, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, socket_listen.m_psi, PSI_SOCKET_CONNECT, (size_t)0, src_file, src_line);
/* Instrumented code */
socket_accept.fd= accept(socket_listen.fd, addr, &addr_length);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
}
else
#endif
@@ -1039,14 +1040,12 @@ inline_mysql_socket_accept
}
#ifdef HAVE_PSI_SOCKET_INTERFACE
- /* Initialize the instrument with the new socket descriptor and address */
- socket_accept.m_psi=
- PSI_CALL(init_socket)(key, (const my_socket*)&socket_accept.fd);
-
- /* FIXME: simplify this with just 1 call to init_socket(). */
- if (socket_accept.m_psi != NULL)
- PSI_CALL(set_socket_info)(socket_accept.m_psi, &socket_accept.fd, addr,
- addr_length);
+ if (likely(socket_accept.fd != INVALID_SOCKET))
+ {
+ /* Initialize the instrument with the new socket descriptor and address */
+ socket_accept.m_psi= PSI_SOCKET_CALL(init_socket)
+ (key, (const my_socket*)&socket_accept.fd, addr, addr_length);
+ }
#endif
return socket_accept;
@@ -1070,18 +1069,18 @@ inline_mysql_socket_close
/* Instrumentation start */
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_CLOSE, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_CLOSE, (size_t)0, src_file, src_line);
/* Instrumented code */
result= closesocket(mysql_socket.fd);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
/* Remove the instrumentation for this socket. */
if (mysql_socket.m_psi != NULL)
- PSI_CALL(destroy_socket)(mysql_socket.m_psi);
+ PSI_SOCKET_CALL(destroy_socket)(mysql_socket.m_psi);
return result;
}
@@ -1105,28 +1104,53 @@ inline_mysql_socket_shutdown
{
int result;
- /* Instrumentation start */
+#ifdef __WIN__
+ static LPFN_DISCONNECTEX DisconnectEx = NULL;
+ if (DisconnectEx == NULL)
+ {
+ DWORD dwBytesReturned;
+ GUID guidDisconnectEx = WSAID_DISCONNECTEX;
+ WSAIoctl(mysql_socket.fd, SIO_GET_EXTENSION_FUNCTION_POINTER,
+ &guidDisconnectEx, sizeof(GUID),
+ &DisconnectEx, sizeof(DisconnectEx),
+ &dwBytesReturned, NULL, NULL);
+ }
+#endif
+
+/* Instrumentation start */
#ifdef HAVE_PSI_SOCKET_INTERFACE
if (mysql_socket.m_psi != NULL)
{
PSI_socket_locker *locker;
PSI_socket_locker_state state;
- locker= PSI_CALL(start_socket_wait)(&state, mysql_socket.m_psi,
- PSI_SOCKET_SHUTDOWN, (size_t)0, src_file, src_line);
+ locker= PSI_SOCKET_CALL(start_socket_wait)
+ (&state, mysql_socket.m_psi, PSI_SOCKET_SHUTDOWN, (size_t)0, src_file, src_line);
/* Instrumented code */
- result= shutdown(mysql_socket.fd, how);
+#ifdef __WIN__
+ if (DisconnectEx)
+ result= (DisconnectEx(mysql_socket.fd, (LPOVERLAPPED) NULL,
+ (DWORD) 0, (DWORD) 0) == TRUE) ? 0 : -1;
+ else
+#endif
+ result= shutdown(mysql_socket.fd, how);
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_socket_wait)(locker, (size_t)0);
+ PSI_SOCKET_CALL(end_socket_wait)(locker, (size_t)0);
return result;
}
#endif
/* Non instrumented code */
- result= shutdown(mysql_socket.fd, how);
+#ifdef __WIN__
+ if (DisconnectEx)
+ result= (DisconnectEx(mysql_socket.fd, (LPOVERLAPPED) NULL,
+ (DWORD) 0, (DWORD) 0) == TRUE) ? 0 : -1;
+ else
+#endif
+ result= shutdown(mysql_socket.fd, how);
return result;
}
diff --git a/include/mysql/psi/mysql_stage.h b/include/mysql/psi/mysql_stage.h
index dc44e9b0bed..61bfdbb7d59 100644
--- a/include/mysql/psi/mysql_stage.h
+++ b/include/mysql/psi/mysql_stage.h
@@ -53,7 +53,7 @@
static inline void inline_mysql_stage_register(
const char *category, PSI_stage_info **info, int count)
{
- PSI_CALL(register_stage)(category, info, count);
+ PSI_STAGE_CALL(register_stage)(category, info, count);
}
#endif
@@ -62,7 +62,7 @@ static inline void
inline_mysql_set_stage(PSI_stage_key key,
const char *src_file, int src_line)
{
- PSI_CALL(start_stage)(key, src_file, src_line);
+ PSI_STAGE_CALL(start_stage)(key, src_file, src_line);
}
#endif
diff --git a/include/mysql/psi/mysql_statement.h b/include/mysql/psi/mysql_statement.h
index 1b065065e57..d7a76ee25e4 100644
--- a/include/mysql/psi/mysql_statement.h
+++ b/include/mysql/psi/mysql_statement.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -63,10 +63,10 @@
#endif
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- #define MYSQL_START_STATEMENT(STATE, K, DB, DB_LEN) \
- inline_mysql_start_statement(STATE, K, DB, DB_LEN, __FILE__, __LINE__)
+ #define MYSQL_START_STATEMENT(STATE, K, DB, DB_LEN, CS) \
+ inline_mysql_start_statement(STATE, K, DB, DB_LEN, CS, __FILE__, __LINE__)
#else
- #define MYSQL_START_STATEMENT(STATE, K, DB, DB_LEN) \
+ #define MYSQL_START_STATEMENT(STATE, K, DB, DB_LEN, CS) \
NULL
#endif
@@ -122,7 +122,7 @@
static inline void inline_mysql_statement_register(
const char *category, PSI_statement_info *info, int count)
{
- PSI_CALL(register_statement)(category, info, count);
+ PSI_STATEMENT_CALL(register_statement)(category, info, count);
}
#ifdef HAVE_PSI_STATEMENT_DIGEST_INTERFACE
@@ -132,7 +132,7 @@ inline_mysql_digest_start(PSI_statement_locker *locker)
PSI_digest_locker* digest_locker= NULL;
if (likely(locker != NULL))
- digest_locker= PSI_CALL(digest_start)(locker);
+ digest_locker= PSI_STATEMENT_CALL(digest_start)(locker);
return digest_locker;
}
#endif
@@ -143,7 +143,7 @@ inline_mysql_add_token(PSI_digest_locker *locker, uint token,
void *yylval)
{
if (likely(locker != NULL))
- locker= PSI_CALL(digest_add_token)(locker, token,
+ locker= PSI_STATEMENT_CALL(digest_add_token)(locker, token,
(OPAQUE_LEX_YYSTYPE*)yylval);
return locker;
}
@@ -153,12 +153,13 @@ static inline struct PSI_statement_locker *
inline_mysql_start_statement(PSI_statement_locker_state *state,
PSI_statement_key key,
const char *db, uint db_len,
+ const CHARSET_INFO *charset,
const char *src_file, int src_line)
{
PSI_statement_locker *locker;
- locker= PSI_CALL(get_thread_statement_locker)(state, key);
+ locker= PSI_STATEMENT_CALL(get_thread_statement_locker)(state, key, charset);
if (likely(locker != NULL))
- PSI_CALL(start_statement)(locker, db, db_len, src_file, src_line);
+ PSI_STATEMENT_CALL(start_statement)(locker, db, db_len, src_file, src_line);
return locker;
}
@@ -168,7 +169,7 @@ inline_mysql_refine_statement(PSI_statement_locker *locker,
{
if (likely(locker != NULL))
{
- locker= PSI_CALL(refine_statement)(locker, key);
+ locker= PSI_STATEMENT_CALL(refine_statement)(locker, key);
}
return locker;
}
@@ -179,7 +180,7 @@ inline_mysql_set_statement_text(PSI_statement_locker *locker,
{
if (likely(locker != NULL))
{
- PSI_CALL(set_statement_text)(locker, text, text_len);
+ PSI_STATEMENT_CALL(set_statement_text)(locker, text, text_len);
}
}
@@ -189,7 +190,7 @@ inline_mysql_set_statement_lock_time(PSI_statement_locker *locker,
{
if (likely(locker != NULL))
{
- PSI_CALL(set_statement_lock_time)(locker, count);
+ PSI_STATEMENT_CALL(set_statement_lock_time)(locker, count);
}
}
@@ -199,7 +200,7 @@ inline_mysql_set_statement_rows_sent(PSI_statement_locker *locker,
{
if (likely(locker != NULL))
{
- PSI_CALL(set_statement_rows_sent)(locker, count);
+ PSI_STATEMENT_CALL(set_statement_rows_sent)(locker, count);
}
}
@@ -209,7 +210,7 @@ inline_mysql_set_statement_rows_examined(PSI_statement_locker *locker,
{
if (likely(locker != NULL))
{
- PSI_CALL(set_statement_rows_examined)(locker, count);
+ PSI_STATEMENT_CALL(set_statement_rows_examined)(locker, count);
}
}
@@ -217,9 +218,9 @@ static inline void
inline_mysql_end_statement(struct PSI_statement_locker *locker,
Diagnostics_area *stmt_da)
{
- PSI_CALL(end_stage)();
+ PSI_STAGE_CALL(end_stage)();
if (likely(locker != NULL))
- PSI_CALL(end_statement)(locker, stmt_da);
+ PSI_STATEMENT_CALL(end_statement)(locker, stmt_da);
}
#endif
diff --git a/include/mysql/psi/mysql_table.h b/include/mysql/psi/mysql_table.h
index 5067bdaaab8..bd703d75e1f 100644
--- a/include/mysql/psi/mysql_table.h
+++ b/include/mysql/psi/mysql_table.h
@@ -30,18 +30,20 @@
*/
#ifdef HAVE_PSI_TABLE_INTERFACE
-#define PSI_CALL_unbind_table PSI_CALL(unbind_table)
-#define PSI_CALL_rebind_table PSI_CALL(rebind_table)
-#define PSI_CALL_open_table PSI_CALL(open_table)
-#define PSI_CALL_close_table PSI_CALL(close_table)
-#define PSI_CALL_get_table_share PSI_CALL(get_table_share)
-#define PSI_CALL_drop_table_share PSI_CALL(drop_table_share)
+#define PSI_CALL_unbind_table PSI_TABLE_CALL(unbind_table)
+#define PSI_CALL_rebind_table PSI_TABLE_CALL(rebind_table)
+#define PSI_CALL_open_table PSI_TABLE_CALL(open_table)
+#define PSI_CALL_close_table PSI_TABLE_CALL(close_table)
+#define PSI_CALL_get_table_share PSI_TABLE_CALL(get_table_share)
+#define PSI_CALL_release_table_share PSI_TABLE_CALL(release_table_share)
+#define PSI_CALL_drop_table_share PSI_TABLE_CALL(drop_table_share)
#else
#define PSI_CALL_unbind_table(A1) /* no-op */
#define PSI_CALL_rebind_table(A1,A2,A3) NULL
#define PSI_CALL_close_table(A1) /* no-op */
#define PSI_CALL_open_table(A1,A2) NULL
#define PSI_CALL_get_table_share(A1,A2) NULL
+#define PSI_CALL_release_table_share(A1) /* no-op */
#define PSI_CALL_drop_table_share(A1,A2,A3,A4,A5) /* no-op */
#endif
@@ -76,22 +78,22 @@
@sa MYSQL_END_TABLE_WAIT.
*/
#ifdef HAVE_PSI_TABLE_INTERFACE
- #define MYSQL_TABLE_IO_WAIT(PSI, OP, INDEX, FLAGS, PAYLOAD) \
- { \
- if (PSI != NULL) \
- { \
- PSI_table_locker *locker; \
- PSI_table_locker_state state; \
- locker= PSI_CALL(start_table_io_wait)(& state, PSI, OP, INDEX, \
- __FILE__, __LINE__); \
- PAYLOAD \
- if (locker != NULL) \
- PSI_CALL(end_table_io_wait)(locker); \
- } \
- else \
- { \
- PAYLOAD \
- } \
+ #define MYSQL_TABLE_IO_WAIT(PSI, OP, INDEX, FLAGS, PAYLOAD) \
+ { \
+ if (PSI != NULL) \
+ { \
+ PSI_table_locker *locker; \
+ PSI_table_locker_state state; \
+ locker= PSI_TABLE_CALL(start_table_io_wait) \
+ (& state, PSI, OP, INDEX, __FILE__, __LINE__); \
+ PAYLOAD \
+ if (locker != NULL) \
+ PSI_TABLE_CALL(end_table_io_wait)(locker); \
+ } \
+ else \
+ { \
+ PAYLOAD \
+ } \
}
#else
#define MYSQL_TABLE_IO_WAIT(PSI, OP, INDEX, FLAGS, PAYLOAD) \
@@ -109,22 +111,22 @@
@sa MYSQL_END_TABLE_WAIT.
*/
#ifdef HAVE_PSI_TABLE_INTERFACE
- #define MYSQL_TABLE_LOCK_WAIT(PSI, OP, FLAGS, PAYLOAD) \
- { \
- if (PSI != NULL) \
- { \
- PSI_table_locker *locker; \
- PSI_table_locker_state state; \
- locker= PSI_CALL(start_table_lock_wait)(& state, PSI, OP, FLAGS, \
- __FILE__, __LINE__); \
- PAYLOAD \
- if (locker != NULL) \
- PSI_CALL(end_table_lock_wait)(locker); \
- } \
- else \
- { \
- PAYLOAD \
- } \
+ #define MYSQL_TABLE_LOCK_WAIT(PSI, OP, FLAGS, PAYLOAD) \
+ { \
+ if (PSI != NULL) \
+ { \
+ PSI_table_locker *locker; \
+ PSI_table_locker_state state; \
+ locker= PSI_TABLE_CALL(start_table_lock_wait) \
+ (& state, PSI, OP, FLAGS, __FILE__, __LINE__); \
+ PAYLOAD \
+ if (locker != NULL) \
+ PSI_TABLE_CALL(end_table_lock_wait)(locker); \
+ } \
+ else \
+ { \
+ PAYLOAD \
+ } \
}
#else
#define MYSQL_TABLE_LOCK_WAIT(PSI, OP, FLAGS, PAYLOAD) \
@@ -180,7 +182,8 @@ inline_mysql_start_table_lock_wait(PSI_table_locker_state *state,
if (psi != NULL)
{
struct PSI_table_locker *locker;
- locker= PSI_CALL(start_table_lock_wait)(state, psi, op, flags, src_file, src_line);
+ locker= PSI_TABLE_CALL(start_table_lock_wait)
+ (state, psi, op, flags, src_file, src_line);
return locker;
}
return NULL;
@@ -194,7 +197,7 @@ static inline void
inline_mysql_end_table_lock_wait(struct PSI_table_locker *locker)
{
if (locker != NULL)
- PSI_CALL(end_table_lock_wait)(locker);
+ PSI_TABLE_CALL(end_table_lock_wait)(locker);
}
#endif
diff --git a/include/mysql/psi/mysql_thread.h b/include/mysql/psi/mysql_thread.h
index 78175196fa2..f0d88ff8ede 100644
--- a/include/mysql/psi/mysql_thread.h
+++ b/include/mysql/psi/mysql_thread.h
@@ -597,7 +597,7 @@ static inline void inline_mysql_mutex_register(
)
{
#ifdef HAVE_PSI_MUTEX_INTERFACE
- PSI_CALL(register_mutex)(category, info, count);
+ PSI_MUTEX_CALL(register_mutex)(category, info, count);
#endif
}
@@ -613,7 +613,7 @@ static inline int inline_mysql_mutex_init(
)
{
#ifdef HAVE_PSI_MUTEX_INTERFACE
- that->m_psi= PSI_CALL(init_mutex)(key, &that->m_mutex);
+ that->m_psi= PSI_MUTEX_CALL(init_mutex)(key, &that->m_mutex);
#else
that->m_psi= NULL;
#endif
@@ -636,7 +636,7 @@ static inline int inline_mysql_mutex_destroy(
#ifdef HAVE_PSI_MUTEX_INTERFACE
if (that->m_psi != NULL)
{
- PSI_CALL(destroy_mutex)(that->m_psi);
+ PSI_MUTEX_CALL(destroy_mutex)(that->m_psi);
that->m_psi= NULL;
}
#endif
@@ -664,7 +664,7 @@ static inline int inline_mysql_mutex_lock(
/* Instrumentation start */
PSI_mutex_locker *locker;
PSI_mutex_locker_state state;
- locker= PSI_CALL(start_mutex_wait)(&state, that->m_psi,
+ locker= PSI_MUTEX_CALL(start_mutex_wait)(&state, that->m_psi,
PSI_MUTEX_LOCK, src_file, src_line);
/* Instrumented code */
@@ -678,7 +678,7 @@ static inline int inline_mysql_mutex_lock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_mutex_wait)(locker, result);
+ PSI_MUTEX_CALL(end_mutex_wait)(locker, result);
return result;
}
@@ -711,7 +711,7 @@ static inline int inline_mysql_mutex_trylock(
/* Instrumentation start */
PSI_mutex_locker *locker;
PSI_mutex_locker_state state;
- locker= PSI_CALL(start_mutex_wait)(&state, that->m_psi,
+ locker= PSI_MUTEX_CALL(start_mutex_wait)(&state, that->m_psi,
PSI_MUTEX_TRYLOCK, src_file, src_line);
/* Instrumented code */
@@ -725,7 +725,7 @@ static inline int inline_mysql_mutex_trylock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_mutex_wait)(locker, result);
+ PSI_MUTEX_CALL(end_mutex_wait)(locker, result);
return result;
}
@@ -754,7 +754,7 @@ static inline int inline_mysql_mutex_unlock(
#ifdef HAVE_PSI_MUTEX_INTERFACE
if (that->m_psi != NULL)
- PSI_CALL(unlock_mutex)(that->m_psi);
+ PSI_MUTEX_CALL(unlock_mutex)(that->m_psi);
#endif
#ifdef SAFE_MUTEX
@@ -781,7 +781,7 @@ static inline void inline_mysql_rwlock_register(
)
{
#ifdef HAVE_PSI_RWLOCK_INTERFACE
- PSI_CALL(register_rwlock)(category, info, count);
+ PSI_RWLOCK_CALL(register_rwlock)(category, info, count);
#endif
}
@@ -792,7 +792,7 @@ static inline int inline_mysql_rwlock_init(
mysql_rwlock_t *that)
{
#ifdef HAVE_PSI_RWLOCK_INTERFACE
- that->m_psi= PSI_CALL(init_rwlock)(key, &that->m_rwlock);
+ that->m_psi= PSI_RWLOCK_CALL(init_rwlock)(key, &that->m_rwlock);
#else
that->m_psi= NULL;
#endif
@@ -810,7 +810,7 @@ static inline int inline_mysql_prlock_init(
mysql_prlock_t *that)
{
#ifdef HAVE_PSI_RWLOCK_INTERFACE
- that->m_psi= PSI_CALL(init_rwlock)(key, &that->m_prlock);
+ that->m_psi= PSI_RWLOCK_CALL(init_rwlock)(key, &that->m_prlock);
#else
that->m_psi= NULL;
#endif
@@ -824,7 +824,7 @@ static inline int inline_mysql_rwlock_destroy(
#ifdef HAVE_PSI_RWLOCK_INTERFACE
if (that->m_psi != NULL)
{
- PSI_CALL(destroy_rwlock)(that->m_psi);
+ PSI_RWLOCK_CALL(destroy_rwlock)(that->m_psi);
that->m_psi= NULL;
}
#endif
@@ -838,7 +838,7 @@ static inline int inline_mysql_prlock_destroy(
#ifdef HAVE_PSI_RWLOCK_INTERFACE
if (that->m_psi != NULL)
{
- PSI_CALL(destroy_rwlock)(that->m_psi);
+ PSI_RWLOCK_CALL(destroy_rwlock)(that->m_psi);
that->m_psi= NULL;
}
#endif
@@ -861,7 +861,7 @@ static inline int inline_mysql_rwlock_rdlock(
/* Instrumentation start */
PSI_rwlock_locker *locker;
PSI_rwlock_locker_state state;
- locker= PSI_CALL(start_rwlock_rdwait)(&state, that->m_psi,
+ locker= PSI_RWLOCK_CALL(start_rwlock_rdwait)(&state, that->m_psi,
PSI_RWLOCK_READLOCK, src_file, src_line);
/* Instrumented code */
@@ -869,7 +869,7 @@ static inline int inline_mysql_rwlock_rdlock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_rwlock_rdwait)(locker, result);
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, result);
return result;
}
@@ -897,7 +897,7 @@ static inline int inline_mysql_prlock_rdlock(
/* Instrumentation start */
PSI_rwlock_locker *locker;
PSI_rwlock_locker_state state;
- locker= PSI_CALL(start_rwlock_rdwait)(&state, that->m_psi,
+ locker= PSI_RWLOCK_CALL(start_rwlock_rdwait)(&state, that->m_psi,
PSI_RWLOCK_READLOCK, src_file, src_line);
/* Instrumented code */
@@ -905,7 +905,7 @@ static inline int inline_mysql_prlock_rdlock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_rwlock_rdwait)(locker, result);
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, result);
return result;
}
@@ -933,7 +933,7 @@ static inline int inline_mysql_rwlock_wrlock(
/* Instrumentation start */
PSI_rwlock_locker *locker;
PSI_rwlock_locker_state state;
- locker= PSI_CALL(start_rwlock_wrwait)(&state, that->m_psi,
+ locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)(&state, that->m_psi,
PSI_RWLOCK_WRITELOCK, src_file, src_line);
/* Instrumented code */
@@ -941,7 +941,7 @@ static inline int inline_mysql_rwlock_wrlock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_rwlock_wrwait)(locker, result);
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, result);
return result;
}
@@ -969,7 +969,7 @@ static inline int inline_mysql_prlock_wrlock(
/* Instrumentation start */
PSI_rwlock_locker *locker;
PSI_rwlock_locker_state state;
- locker= PSI_CALL(start_rwlock_wrwait)(&state, that->m_psi,
+ locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)(&state, that->m_psi,
PSI_RWLOCK_WRITELOCK, src_file, src_line);
/* Instrumented code */
@@ -977,7 +977,7 @@ static inline int inline_mysql_prlock_wrlock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_rwlock_wrwait)(locker, result);
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, result);
return result;
}
@@ -1005,7 +1005,7 @@ static inline int inline_mysql_rwlock_tryrdlock(
/* Instrumentation start */
PSI_rwlock_locker *locker;
PSI_rwlock_locker_state state;
- locker= PSI_CALL(start_rwlock_rdwait)(&state, that->m_psi,
+ locker= PSI_RWLOCK_CALL(start_rwlock_rdwait)(&state, that->m_psi,
PSI_RWLOCK_TRYREADLOCK, src_file, src_line);
/* Instrumented code */
@@ -1013,7 +1013,7 @@ static inline int inline_mysql_rwlock_tryrdlock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_rwlock_rdwait)(locker, result);
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, result);
return result;
}
@@ -1040,7 +1040,7 @@ static inline int inline_mysql_rwlock_trywrlock(
/* Instrumentation start */
PSI_rwlock_locker *locker;
PSI_rwlock_locker_state state;
- locker= PSI_CALL(start_rwlock_wrwait)(&state, that->m_psi,
+ locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)(&state, that->m_psi,
PSI_RWLOCK_TRYWRITELOCK, src_file, src_line);
/* Instrumented code */
@@ -1048,7 +1048,7 @@ static inline int inline_mysql_rwlock_trywrlock(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_rwlock_wrwait)(locker, result);
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, result);
return result;
}
@@ -1066,7 +1066,7 @@ static inline int inline_mysql_rwlock_unlock(
int result;
#ifdef HAVE_PSI_RWLOCK_INTERFACE
if (that->m_psi != NULL)
- PSI_CALL(unlock_rwlock)(that->m_psi);
+ PSI_RWLOCK_CALL(unlock_rwlock)(that->m_psi);
#endif
result= rw_unlock(&that->m_rwlock);
return result;
@@ -1079,7 +1079,7 @@ static inline int inline_mysql_prlock_unlock(
int result;
#ifdef HAVE_PSI_RWLOCK_INTERFACE
if (that->m_psi != NULL)
- PSI_CALL(unlock_rwlock)(that->m_psi);
+ PSI_RWLOCK_CALL(unlock_rwlock)(that->m_psi);
#endif
result= rw_pr_unlock(&that->m_prlock);
return result;
@@ -1099,7 +1099,7 @@ static inline void inline_mysql_cond_register(
)
{
#ifdef HAVE_PSI_COND_INTERFACE
- PSI_CALL(register_cond)(category, info, count);
+ PSI_COND_CALL(register_cond)(category, info, count);
#endif
}
@@ -1111,7 +1111,7 @@ static inline int inline_mysql_cond_init(
const pthread_condattr_t *attr)
{
#ifdef HAVE_PSI_COND_INTERFACE
- that->m_psi= PSI_CALL(init_cond)(key, &that->m_cond);
+ that->m_psi= PSI_COND_CALL(init_cond)(key, &that->m_cond);
#else
that->m_psi= NULL;
#endif
@@ -1124,7 +1124,7 @@ static inline int inline_mysql_cond_destroy(
#ifdef HAVE_PSI_COND_INTERFACE
if (that->m_psi != NULL)
{
- PSI_CALL(destroy_cond)(that->m_psi);
+ PSI_COND_CALL(destroy_cond)(that->m_psi);
that->m_psi= NULL;
}
#endif
@@ -1147,7 +1147,7 @@ static inline int inline_mysql_cond_wait(
/* Instrumentation start */
PSI_cond_locker *locker;
PSI_cond_locker_state state;
- locker= PSI_CALL(start_cond_wait)(&state, that->m_psi, mutex->m_psi,
+ locker= PSI_COND_CALL(start_cond_wait)(&state, that->m_psi, mutex->m_psi,
PSI_COND_WAIT, src_file, src_line);
/* Instrumented code */
@@ -1155,7 +1155,7 @@ static inline int inline_mysql_cond_wait(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_cond_wait)(locker, result);
+ PSI_COND_CALL(end_cond_wait)(locker, result);
return result;
}
@@ -1184,7 +1184,7 @@ static inline int inline_mysql_cond_timedwait(
/* Instrumentation start */
PSI_cond_locker *locker;
PSI_cond_locker_state state;
- locker= PSI_CALL(start_cond_wait)(&state, that->m_psi, mutex->m_psi,
+ locker= PSI_COND_CALL(start_cond_wait)(&state, that->m_psi, mutex->m_psi,
PSI_COND_TIMEDWAIT, src_file, src_line);
/* Instrumented code */
@@ -1192,7 +1192,7 @@ static inline int inline_mysql_cond_timedwait(
/* Instrumentation end */
if (locker != NULL)
- PSI_CALL(end_cond_wait)(locker, result);
+ PSI_COND_CALL(end_cond_wait)(locker, result);
return result;
}
@@ -1210,7 +1210,7 @@ static inline int inline_mysql_cond_signal(
int result;
#ifdef HAVE_PSI_COND_INTERFACE
if (that->m_psi != NULL)
- PSI_CALL(signal_cond)(that->m_psi);
+ PSI_COND_CALL(signal_cond)(that->m_psi);
#endif
result= pthread_cond_signal(&that->m_cond);
return result;
@@ -1222,7 +1222,7 @@ static inline int inline_mysql_cond_broadcast(
int result;
#ifdef HAVE_PSI_COND_INTERFACE
if (that->m_psi != NULL)
- PSI_CALL(broadcast_cond)(that->m_psi);
+ PSI_COND_CALL(broadcast_cond)(that->m_psi);
#endif
result= pthread_cond_broadcast(&that->m_cond);
return result;
@@ -1241,7 +1241,7 @@ static inline void inline_mysql_thread_register(
)
{
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(register_thread)(category, info, count);
+ PSI_THREAD_CALL(register_thread)(category, info, count);
#endif
}
@@ -1252,14 +1252,14 @@ static inline int inline_mysql_thread_create(
void *(*start_routine)(void*), void *arg)
{
int result;
- result= PSI_CALL(spawn_thread)(key, thread, attr, start_routine, arg);
+ result= PSI_THREAD_CALL(spawn_thread)(key, thread, attr, start_routine, arg);
return result;
}
static inline void inline_mysql_thread_set_psi_id(ulong id)
{
- struct PSI_thread *psi= PSI_CALL(get_thread)();
- PSI_CALL(set_thread_id)(psi, id);
+ struct PSI_thread *psi= PSI_THREAD_CALL(get_thread)();
+ PSI_THREAD_CALL(set_thread_id)(psi, id);
}
#endif
diff --git a/include/mysql/psi/psi.h b/include/mysql/psi/psi.h
index 8d5e6db7307..cc2057c630d 100644
--- a/include/mysql/psi/psi.h
+++ b/include/mysql/psi/psi.h
@@ -899,6 +899,10 @@ struct PSI_file_locker_state_v1
enum PSI_file_operation m_operation;
/** Current file. */
struct PSI_file *m_file;
+ /** Current file name. */
+ const char *m_name;
+ /** Current file class. */
+ void *m_class;
/** Current thread. */
struct PSI_thread *m_thread;
/** Operation number of bytes. */
@@ -958,6 +962,8 @@ struct PSI_digest_storage
{
my_bool m_full;
int m_byte_count;
+ /** Character set number. */
+ uint m_charset_number;
unsigned char m_token_array[PSI_MAX_DIGEST_STORAGE_SIZE];
};
typedef struct PSI_digest_storage PSI_digest_storage;
@@ -969,6 +975,9 @@ struct PSI_digest_locker_state
};
typedef struct PSI_digest_locker_state PSI_digest_locker_state;
+/* Duplicate of NAME_LEN, to avoid dependency on mysql_com.h */
+#define PSI_SCHEMA_NAME_LEN (64 * 3)
+
/**
State data storage for @c get_thread_statement_locker_v1_t,
@c get_thread_statement_locker_v1_t.
@@ -1029,6 +1038,10 @@ struct PSI_statement_locker_state_v1
ulong m_sort_scan;
/** Statement digest. */
PSI_digest_locker_state m_digest_state;
+ /** Current schema name. */
+ char m_schema_name[PSI_SCHEMA_NAME_LEN];
+ /** Length in bytes of @c m_schema_name. */
+ uint m_schema_name_length;
};
/**
@@ -1187,10 +1200,13 @@ typedef void (*destroy_cond_v1_t)(struct PSI_cond *cond);
Socket instrumentation initialisation API.
@param key the registered mutex key
@param socket descriptor
+ @param addr the socket ip address
+ @param addr_len length of socket ip address
@return an instrumented socket
*/
typedef struct PSI_socket* (*init_socket_v1_t)
- (PSI_socket_key key, const my_socket *fd);
+ (PSI_socket_key key, const my_socket *fd,
+ const struct sockaddr *addr, socklen_t addr_len);
/**
socket instrumentation destruction API.
@@ -1290,7 +1306,7 @@ typedef int (*spawn_thread_v1_t)(PSI_thread_key key,
@return an instrumented thread
*/
typedef struct PSI_thread* (*new_thread_v1_t)
- (PSI_thread_key key, const void *identity, ulong thread_id);
+ (PSI_thread_key key, const void *identity, ulonglong thread_id);
/**
Assign an id to an instrumented thread.
@@ -1298,7 +1314,7 @@ typedef struct PSI_thread* (*new_thread_v1_t)
@param id the id to assign
*/
typedef void (*set_thread_id_v1_t)(struct PSI_thread *thread,
- unsigned long id);
+ ulonglong id);
/**
Get the instrumentation for the running thread.
@@ -1570,16 +1586,18 @@ typedef void (*end_table_lock_wait_v1_t)(struct PSI_table_locker *locker);
@param op the operation to perform
@param src_file the source file name
@param src_line the source line number
- @return an instrumented file handle
*/
-typedef struct PSI_file* (*start_file_open_wait_v1_t)
+typedef void (*start_file_open_wait_v1_t)
(struct PSI_file_locker *locker, const char *src_file, uint src_line);
/**
End a file instrumentation open operation, for file streams.
@param locker the file locker.
+ @param result the opened file (NULL indicates failure, non NULL success).
+ @return an instrumented file handle
*/
-typedef void (*end_file_open_wait_v1_t)(struct PSI_file_locker *locker);
+typedef struct PSI_file* (*end_file_open_wait_v1_t)
+ (struct PSI_file_locker *locker, void *result);
/**
End a file instrumentation open operation, for non stream files.
@@ -1617,6 +1635,25 @@ typedef void (*end_file_wait_v1_t)
(struct PSI_file_locker *locker, size_t count);
/**
+ Start a file instrumentation close operation.
+ @param locker the file locker
+ @param op the operation to perform
+ @param src_file the source file name
+ @param src_line the source line number
+*/
+typedef void (*start_file_close_wait_v1_t)
+ (struct PSI_file_locker *locker, const char *src_file, uint src_line);
+
+/**
+ End a file instrumentation close operation.
+ @param locker the file locker.
+ @param rc the close operation return code (0 for success).
+ @return an instrumented file handle
+*/
+typedef void (*end_file_close_wait_v1_t)
+ (struct PSI_file_locker *locker, int rc);
+
+/**
Start a new stage, and implicitly end the previous stage.
@param key the key of the new stage
@param src_file the source file name
@@ -1632,11 +1669,12 @@ typedef void (*end_stage_v1_t) (void);
Get a statement instrumentation locker.
@param state data storage for the locker
@param key the statement instrumentation key
+ @param charset client character set
@return a statement locker, or NULL
*/
typedef struct PSI_statement_locker* (*get_thread_statement_locker_v1_t)
(struct PSI_statement_locker_state_v1 *state,
- PSI_statement_key key);
+ PSI_statement_key key, const void *charset);
/**
Refine a statement locker to a more specific key.
@@ -1871,6 +1909,19 @@ typedef struct PSI_digest_locker* (*digest_add_token_v1_t)
(struct PSI_digest_locker *locker, uint token, struct OPAQUE_LEX_YYSTYPE *yylval);
/**
+ Stores an array of connection attributes
+ @param buffer char array of length encoded connection attributes
+ in network format
+ @param length legnth of the data in buffer
+ @param from_cs charset in which @buffer is encodded
+ @return state
+ @retval non-0 attributes truncated
+ @retval 0 stored the attribute
+*/
+typedef int (*set_thread_connect_attrs_v1_t)(const char *buffer, uint length,
+ const void *from_cs);
+
+/**
Performance Schema Interface, version 1.
@since PSI_VERSION_1
*/
@@ -2005,6 +2056,10 @@ struct PSI_v1
start_file_wait_v1_t start_file_wait;
/** @sa end_file_wait_v1_t. */
end_file_wait_v1_t end_file_wait;
+ /** @sa start_file_close_wait_v1_t. */
+ start_file_close_wait_v1_t start_file_close_wait;
+ /** @sa end_file_close_wait_v1_t. */
+ end_file_close_wait_v1_t end_file_close_wait;
/** @sa start_stage_v1_t. */
start_stage_v1_t start_stage;
/** @sa end_stage_v1_t. */
@@ -2065,6 +2120,8 @@ struct PSI_v1
digest_start_v1_t digest_start;
/** @sa digest_add_token_v1_t. */
digest_add_token_v1_t digest_add_token;
+ /** @sa set_thread_connect_attrs_v1_t. */
+ set_thread_connect_attrs_v1_t set_thread_connect_attrs;
};
/** @} (end of group Group_PSI_v1) */
@@ -2318,7 +2375,54 @@ typedef struct PSI_stage_info_none PSI_stage_info;
extern MYSQL_PLUGIN_IMPORT PSI *PSI_server;
-#define PSI_CALL(M) PSI_server->M
+/*
+ Allow to override PSI_XXX_CALL at compile time
+ with more efficient implementations, if available.
+ If nothing better is available,
+ make a dynamic call using the PSI_server function pointer.
+*/
+
+#ifndef PSI_MUTEX_CALL
+#define PSI_MUTEX_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_RWLOCK_CALL
+#define PSI_RWLOCK_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_COND_CALL
+#define PSI_COND_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_THREAD_CALL
+#define PSI_THREAD_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_FILE_CALL
+#define PSI_FILE_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_SOCKET_CALL
+#define PSI_SOCKET_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_STAGE_CALL
+#define PSI_STAGE_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_STATEMENT_CALL
+#define PSI_STATEMENT_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_TABLE_CALL
+#define PSI_TABLE_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#ifndef PSI_IDLE_CALL
+#define PSI_IDLE_CALL(M) PSI_DYNAMIC_CALL(M)
+#endif
+
+#define PSI_DYNAMIC_CALL(M) PSI_server->M
/** @} */
diff --git a/include/mysql/psi/psi_abi_v1.h.pp b/include/mysql/psi/psi_abi_v1.h.pp
index b0559213998..f2037c5b724 100644
--- a/include/mysql/psi/psi_abi_v1.h.pp
+++ b/include/mysql/psi/psi_abi_v1.h.pp
@@ -221,6 +221,8 @@ struct PSI_file_locker_state_v1
uint m_flags;
enum PSI_file_operation m_operation;
struct PSI_file *m_file;
+ const char *m_name;
+ void *m_class;
struct PSI_thread *m_thread;
size_t m_number_of_bytes;
ulonglong m_timer_start;
@@ -243,6 +245,7 @@ struct PSI_digest_storage
{
my_bool m_full;
int m_byte_count;
+ uint m_charset_number;
unsigned char m_token_array[1024];
};
typedef struct PSI_digest_storage PSI_digest_storage;
@@ -278,6 +281,8 @@ struct PSI_statement_locker_state_v1
ulong m_sort_rows;
ulong m_sort_scan;
PSI_digest_locker_state m_digest_state;
+ char m_schema_name[(64 * 3)];
+ uint m_schema_name_length;
};
struct PSI_socket_locker_state_v1
{
@@ -318,7 +323,8 @@ typedef struct PSI_cond* (*init_cond_v1_t)
(PSI_cond_key key, const void *identity);
typedef void (*destroy_cond_v1_t)(struct PSI_cond *cond);
typedef struct PSI_socket* (*init_socket_v1_t)
- (PSI_socket_key key, const my_socket *fd);
+ (PSI_socket_key key, const my_socket *fd,
+ const struct sockaddr *addr, socklen_t addr_len);
typedef void (*destroy_socket_v1_t)(struct PSI_socket *socket);
typedef struct PSI_table_share* (*get_table_share_v1_t)
(my_bool temporary, struct TABLE_SHARE *share);
@@ -340,9 +346,9 @@ typedef int (*spawn_thread_v1_t)(PSI_thread_key key,
const pthread_attr_t *attr,
void *(*start_routine)(void*), void *arg);
typedef struct PSI_thread* (*new_thread_v1_t)
- (PSI_thread_key key, const void *identity, ulong thread_id);
+ (PSI_thread_key key, const void *identity, ulonglong thread_id);
typedef void (*set_thread_id_v1_t)(struct PSI_thread *thread,
- unsigned long id);
+ ulonglong id);
typedef struct PSI_thread* (*get_thread_v1_t)(void);
typedef void (*set_thread_user_v1_t)(const char *user, int user_len);
typedef void (*set_thread_user_host_v1_t)(const char *user, int user_len,
@@ -420,9 +426,10 @@ typedef struct PSI_table_locker* (*start_table_lock_wait_v1_t)
ulong flags,
const char *src_file, uint src_line);
typedef void (*end_table_lock_wait_v1_t)(struct PSI_table_locker *locker);
-typedef struct PSI_file* (*start_file_open_wait_v1_t)
+typedef void (*start_file_open_wait_v1_t)
(struct PSI_file_locker *locker, const char *src_file, uint src_line);
-typedef void (*end_file_open_wait_v1_t)(struct PSI_file_locker *locker);
+typedef struct PSI_file* (*end_file_open_wait_v1_t)
+ (struct PSI_file_locker *locker, void *result);
typedef void (*end_file_open_wait_and_bind_to_descriptor_v1_t)
(struct PSI_file_locker *locker, File file);
typedef void (*start_file_wait_v1_t)
@@ -430,12 +437,16 @@ typedef void (*start_file_wait_v1_t)
const char *src_file, uint src_line);
typedef void (*end_file_wait_v1_t)
(struct PSI_file_locker *locker, size_t count);
+typedef void (*start_file_close_wait_v1_t)
+ (struct PSI_file_locker *locker, const char *src_file, uint src_line);
+typedef void (*end_file_close_wait_v1_t)
+ (struct PSI_file_locker *locker, int rc);
typedef void (*start_stage_v1_t)
(PSI_stage_key key, const char *src_file, int src_line);
typedef void (*end_stage_v1_t) (void);
typedef struct PSI_statement_locker* (*get_thread_statement_locker_v1_t)
(struct PSI_statement_locker_state_v1 *state,
- PSI_statement_key key);
+ PSI_statement_key key, const void *charset);
typedef struct PSI_statement_locker* (*refine_statement_v1_t)
(struct PSI_statement_locker *locker,
PSI_statement_key key);
@@ -499,6 +510,8 @@ typedef struct PSI_digest_locker * (*digest_start_v1_t)
(struct PSI_statement_locker *locker);
typedef struct PSI_digest_locker* (*digest_add_token_v1_t)
(struct PSI_digest_locker *locker, uint token, struct OPAQUE_LEX_YYSTYPE *yylval);
+typedef int (*set_thread_connect_attrs_v1_t)(const char *buffer, uint length,
+ const void *from_cs);
struct PSI_v1
{
register_mutex_v1_t register_mutex;
@@ -566,6 +579,8 @@ struct PSI_v1
end_file_open_wait_and_bind_to_descriptor;
start_file_wait_v1_t start_file_wait;
end_file_wait_v1_t end_file_wait;
+ start_file_close_wait_v1_t start_file_close_wait;
+ end_file_close_wait_v1_t end_file_close_wait;
start_stage_v1_t start_stage;
end_stage_v1_t end_stage;
get_thread_statement_locker_v1_t get_thread_statement_locker;
@@ -596,6 +611,7 @@ struct PSI_v1
set_socket_thread_owner_v1_t set_socket_thread_owner;
digest_start_v1_t digest_start;
digest_add_token_v1_t digest_add_token;
+ set_thread_connect_attrs_v1_t set_thread_connect_attrs;
};
typedef struct PSI_v1 PSI;
typedef struct PSI_mutex_info_v1 PSI_mutex_info;
diff --git a/include/mysql/service_debug_sync.h b/include/mysql/service_debug_sync.h
index bb1202c5e63..eee8e6bbe96 100644
--- a/include/mysql/service_debug_sync.h
+++ b/include/mysql/service_debug_sync.h
@@ -339,9 +339,16 @@ extern void (*debug_sync_C_callback_ptr)(MYSQL_THD, const char *, size_t);
if (debug_sync_service) \
debug_sync_service(thd, STRING_WITH_LEN(name)); \
} while(0)
+
+#define DEBUG_SYNC_C_IF_THD(thd, name) \
+ do { \
+ if (debug_sync_service && thd) \
+ debug_sync_service((MYSQL_THD) thd, STRING_WITH_LEN(name)); \
+ } while(0)
#else
-#define DEBUG_SYNC(thd,name) do { } while(0)
-#endif
+#define DEBUG_SYNC(thd,name) do { } while(0)
+#define DEBUG_SYNC_C_IF_THD(thd, _sync_point_name_) do { } while(0)
+#endif /* defined(ENABLED_DEBUG_SYNC) */
/* compatibility macro */
#define DEBUG_SYNC_C(name) DEBUG_SYNC(NULL, name)
diff --git a/include/mysql/service_my_plugin_log.h b/include/mysql/service_my_plugin_log.h
new file mode 100644
index 00000000000..0cf7817573c
--- /dev/null
+++ b/include/mysql/service_my_plugin_log.h
@@ -0,0 +1,64 @@
+/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; version 2 of the
+ License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/**
+ @file
+ This service provides functions to report error conditions and log to
+ mysql error log.
+*/
+
+#ifndef MYSQL_SERVICE_MY_PLUGIN_LOG_INCLUDED
+#define MYSQL_SERVICE_MY_PLUGIN_LOG_INCLUDED
+
+#ifndef MYSQL_ABI_CHECK
+#include <stdarg.h>
+#endif
+
+/* keep in sync with the loglevel enum in my_sys.h */
+enum plugin_log_level
+{
+ MY_ERROR_LEVEL,
+ MY_WARNING_LEVEL,
+ MY_INFORMATION_LEVEL
+};
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern struct my_plugin_log_service
+{
+ /** write a message to the log */
+ int (*my_plugin_log_message)(MYSQL_PLUGIN *, enum plugin_log_level, const char *, ...);
+} *my_plugin_log_service;
+
+#ifdef MYSQL_DYNAMIC_PLUGIN
+
+#define my_plugin_log_message my_plugin_log_service->my_plugin_log_message
+
+#else
+
+int my_plugin_log_message(MYSQL_PLUGIN *plugin, enum plugin_log_level level,
+ const char *format, ...);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/mysql/service_sha1.h b/include/mysql/service_sha1.h
new file mode 100644
index 00000000000..01f5ba81566
--- /dev/null
+++ b/include/mysql/service_sha1.h
@@ -0,0 +1,57 @@
+#ifndef MYSQL_SERVICE_SHA1_INCLUDED
+/* Copyright (c) 2013, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+/**
+ @file
+ my sha1 service
+
+ Functions to calculate SHA1 hash from a memory buffer
+*/
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef MYSQL_ABI_CHECK
+#include <stdlib.h>
+#endif
+
+#define MY_SHA1_HASH_SIZE 20 /* Hash size in bytes */
+
+extern struct my_sha1_service_st {
+ void (*my_sha1_type)(unsigned char*, const char*, size_t);
+ void (*my_sha1_multi_type)(unsigned char*, ...);
+} *my_sha1_service;
+
+#ifdef MYSQL_DYNAMIC_PLUGIN
+
+#define my_sha1(A,B,C) my_sha1_service->my_sha1_type(A,B,C)
+#define my_sha1_multi my_sha1_service->my_sha1_multi_type
+
+#else
+
+void my_sha1(unsigned char*, const char*, size_t);
+void my_sha1_multi(unsigned char*, ...);
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#define MYSQL_SERVICE_SHA1_INCLUDED
+#endif
+
diff --git a/include/mysql/services.h b/include/mysql/services.h
index 1145d19872b..49670b5673b 100644
--- a/include/mysql/services.h
+++ b/include/mysql/services.h
@@ -26,6 +26,7 @@ extern "C" {
#include <mysql/service_debug_sync.h>
#include <mysql/service_kill_statement.h>
#include <mysql/service_thd_timezone.h>
+#include <mysql/service_sha1.h>
#ifdef __cplusplus
}
diff --git a/include/mysql_com.h b/include/mysql_com.h
index a9bba2f59a3..cc206a0230f 100644
--- a/include/mysql_com.h
+++ b/include/mysql_com.h
@@ -68,6 +68,7 @@
#define TABLE_COMMENT_MAXLEN 2048
#define COLUMN_COMMENT_MAXLEN 1024
#define INDEX_COMMENT_MAXLEN 1024
+#define TABLE_PARTITION_COMMENT_MAXLEN 1024
/*
USER_HOST_BUFF_SIZE -- length of string buffer, that is enough to contain
@@ -142,13 +143,19 @@ enum enum_server_command
#define BINCMP_FLAG 131072 /* Intern: Used by sql_yacc */
#define GET_FIXED_FIELDS_FLAG (1 << 18) /* Used to get fields in item tree */
#define FIELD_IN_PART_FUNC_FLAG (1 << 19)/* Field part of partition func */
-#define FIELD_IN_ADD_INDEX (1<< 20) /* Intern: Field used in ADD INDEX */
+
+/**
+ Intern: Field in TABLE object for new version of altered table,
+ which participates in a newly added index.
+*/
+#define FIELD_IN_ADD_INDEX (1 << 20)
#define FIELD_IS_RENAMED (1<< 21) /* Intern: Field is being renamed */
-#define FIELD_FLAGS_STORAGE_MEDIA 22 /* Field storage media, bit 22-23,
- reserved by MySQL Cluster */
-#define FIELD_FLAGS_COLUMN_FORMAT 24 /* Field column format, bit 24-25,
- reserved by MySQL Cluster */
-#define HAS_EXPLICIT_VALUE (1 << 26) /* An INSERT/UPDATE operation supplied
+#define FIELD_FLAGS_STORAGE_MEDIA 22 /* Field storage media, bit 22-23 */
+#define FIELD_FLAGS_STORAGE_MEDIA_MASK (3 << FIELD_FLAGS_STORAGE_MEDIA)
+#define FIELD_FLAGS_COLUMN_FORMAT 24 /* Field column format, bit 24-25 */
+#define FIELD_FLAGS_COLUMN_FORMAT_MASK (3 << FIELD_FLAGS_COLUMN_FORMAT)
+#define FIELD_IS_DROPPED (1<< 26) /* Intern: Field is being dropped */
+#define HAS_EXPLICIT_VALUE (1 << 27) /* An INSERT/UPDATE operation supplied
an explicit default value */
#define REFRESH_GRANT (1ULL << 0) /* Refresh grant tables */
@@ -177,11 +184,12 @@ enum enum_server_command
#define REFRESH_QUERY_CACHE_FREE (1ULL << 17) /* pack query cache */
#define REFRESH_DES_KEY_FILE (1ULL << 18)
#define REFRESH_USER_RESOURCES (1ULL << 19)
+#define REFRESH_FOR_EXPORT (1ULL << 20) /* FLUSH TABLES ... FOR EXPORT */
-#define REFRESH_TABLE_STATS (1ULL << 20) /* Refresh table stats hash table */
-#define REFRESH_INDEX_STATS (1ULL << 21) /* Refresh index stats hash table */
-#define REFRESH_USER_STATS (1ULL << 22) /* Refresh user stats hash table */
-#define REFRESH_CLIENT_STATS (1ULL << 23) /* Refresh client stats hash table */
+#define REFRESH_TABLE_STATS (1ULL << 27) /* Refresh table stats hash table */
+#define REFRESH_INDEX_STATS (1ULL << 28) /* Refresh index stats hash table */
+#define REFRESH_USER_STATS (1ULL << 29) /* Refresh user stats hash table */
+#define REFRESH_CLIENT_STATS (1ULL << 30) /* Refresh client stats hash table */
#define REFRESH_FAST (1ULL << 31) /* Intern flag */
@@ -206,8 +214,15 @@ enum enum_server_command
#define CLIENT_PS_MULTI_RESULTS (1UL << 18) /* Multi-results in PS-protocol */
#define CLIENT_PLUGIN_AUTH (1UL << 19) /* Client supports plugin authentication */
-#define CLIENT_PROGRESS (1UL << 29) /* Client support progress indicator */
+#define CLIENT_PLUGIN_AUTH (1UL << 19) /* Client supports plugin authentication */
+#define CLIENT_CONNECT_ATTRS (1UL << 20) /* Client supports connection attributes */
+/* Enable authentication response packet to be larger than 255 bytes. */
+#define CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA (1UL << 21)
+/* Don't close the connection for a connection with expired password. */
+#define CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS (1UL << 22)
+
+#define CLIENT_PROGRESS (1UL << 29) /* Client support progress indicator */
#define CLIENT_SSL_VERIFY_SERVER_CERT (1UL << 30)
/*
It used to be that if mysql_real_connect() failed, it would delete any
@@ -252,6 +267,12 @@ enum enum_server_command
CLIENT_PLUGIN_AUTH)
/*
+ To be added later:
+ CLIENT_CONNECT_ATTRS, CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA,
+ CLIENT_CAN_HANDLE_EXPIRED_PASSWORDS
+*/
+
+/*
Switch off the flags that are optional and depending on build flags
If any of the optional flags is supported by the build it will be switched
on before sending to the client during the connection handshake.
diff --git a/include/password.h b/include/password.h
index 082f917e7c0..5dfea533546 100644
--- a/include/password.h
+++ b/include/password.h
@@ -24,6 +24,8 @@ void my_make_scrambled_password_323(char *to, const char *password,
size_t pass_len);
void my_make_scrambled_password(char *to, const char *password,
size_t pass_len);
+void my_make_scrambled_password_sha1(char *to, const char *password,
+ size_t pass_len);
void hash_password(ulong *result, const char *password, uint password_len);
diff --git a/include/service_versions.h b/include/service_versions.h
index 2dffa7cf863..b2c5ccd1948 100644
--- a/include/service_versions.h
+++ b/include/service_versions.h
@@ -20,10 +20,13 @@
#define SERVICE_VERSION void *
#endif
+#define VERSION_debug_sync 0x1000
+#define VERSION_kill_statement 0x1000
+
#define VERSION_my_snprintf 0x0100
#define VERSION_thd_alloc 0x0100
#define VERSION_thd_wait 0x0100
#define VERSION_progress_report 0x0100
-#define VERSION_debug_sync 0x1000
-#define VERSION_kill_statement 0x1000
#define VERSION_thd_timezone 0x0100
+#define VERSION_my_sha1 0x0100
+
diff --git a/include/sha1.h b/include/sha1.h
index c3469333c27..d927cd26ad9 100644
--- a/include/sha1.h
+++ b/include/sha1.h
@@ -1,8 +1,7 @@
#ifndef SHA1_INCLUDED
#define SHA1_INCLUDED
-/* Copyright (c) 2002, 2006 MySQL AB, 2009 Sun Microsystems, Inc.
- Use is subject to license terms.
+/* Copyright (c) 2013, Monty Program Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -18,88 +17,9 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-/*
- This is the header file for code which implements the Secure
- Hashing Algorithm 1 as defined in FIPS PUB 180-1 published
- April 17, 1995.
-
- Many of the variable names in this code, especially the
- single character names, were used because those were the names
- used in the publication.
-
- Please read the file sha1.c for more information.
-
- Modified 2002 by Peter Zaitsev to better follow MySQL standards
-
- Original Source from: http://www.faqs.org/rfcs/rfc3174.html
-
- Copyright (C) The Internet Society (2001). All Rights Reserved.
-
- This document and translations of it may be copied and furnished to
- others, and derivative works that comment on or otherwise explain it
- or assist in its implementation may be prepared, copied, published
- and distributed, in whole or in part, without restriction of any
- kind, provided that the above copyright notice and this paragraph are
- included on all such copies and derivative works. However, this
- document itself may not be modified in any way, such as by removing
- the copyright notice or references to the Internet Society or other
- Internet organizations, except as needed for the purpose of
- developing Internet standards in which case the procedures for
- copyrights defined in the Internet Standards process must be
- followed, or as required to translate it into languages other than
- English.
-
- The limited permissions granted above are perpetual and will not be
- revoked by the Internet Society or its successors or assigns.
-
- This document and the information contained herein is provided on an
- "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING
- TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING
- BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION
- HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF
- MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
-
- Acknowledgement
- Funding for the RFC Editor function is currently provided by the
- Internet Society.
-*/
-
-
-enum sha_result_codes
-{
- SHA_SUCCESS = 0,
- SHA_NULL, /* Null pointer parameter */
- SHA_INPUT_TOO_LONG, /* input data too long */
- SHA_STATE_ERROR /* called Input after Result */
-};
-
-#define SHA1_HASH_SIZE 20 /* Hash size in bytes */
-
-/*
- This structure will hold context information for the SHA-1
- hashing operation
-*/
-
-typedef struct SHA1_CONTEXT
-{
- ulonglong Length; /* Message length in bits */
- uint32 Intermediate_Hash[SHA1_HASH_SIZE/4]; /* Message Digest */
- int Computed; /* Is the digest computed? */
- int Corrupted; /* Is the message digest corrupted? */
- int16 Message_Block_Index; /* Index into message block array */
- uint8 Message_Block[64]; /* 512-bit message blocks */
-} SHA1_CONTEXT;
-
-/*
- Function Prototypes
-*/
-
-C_MODE_START
-
-int mysql_sha1_reset(SHA1_CONTEXT*);
-int mysql_sha1_input(SHA1_CONTEXT*, const uint8 *, unsigned int);
-int mysql_sha1_result(SHA1_CONTEXT* , uint8 Message_Digest[SHA1_HASH_SIZE]);
-
-C_MODE_END
+#include <mysql/service_sha1.h>
+#define SHA1_HASH_SIZE MY_SHA1_HASH_SIZE
+#define compute_sha1_hash(A,B,C) my_sha1(A,B,C)
+#define compute_sha1_hash_multi(A,B,C,D,E) my_sha1_multi(A,B,C,D,E,NULL)
#endif /* SHA__INCLUDED */
diff --git a/include/sql_common.h b/include/sql_common.h
index 406a87010fb..5a033fbe522 100644
--- a/include/sql_common.h
+++ b/include/sql_common.h
@@ -21,6 +21,7 @@ extern "C" {
#endif
#include <mysql.h>
+#include <hash.h>
extern const char *unknown_sqlstate;
extern const char *cant_connect_sqlstate;
@@ -34,6 +35,7 @@ struct st_mysql_options_extention {
char *default_auth;
char *ssl_crl; /* PEM CRL file */
char *ssl_crlpath; /* PEM directory of CRL-s? */
+ char *server_public_key_path;
void (*report_progress)(const MYSQL *mysql,
unsigned int stage,
unsigned int max_stage,
diff --git a/include/thread_pool_priv.h b/include/thread_pool_priv.h
index 9a9c65af6da..449c8ded66b 100644
--- a/include/thread_pool_priv.h
+++ b/include/thread_pool_priv.h
@@ -49,7 +49,6 @@ void thd_set_killed(THD *thd);
void thd_clear_errors(THD *thd);
void thd_set_thread_stack(THD *thd, char *stack_start);
void thd_lock_thread_count(THD *thd);
-void thd_unlock_thread_count(THD *thd);
void thd_close_connection(THD *thd);
THD *thd_get_current_thd();
void thd_lock_data(THD *thd);
diff --git a/libevent/CMakeLists.txt b/libevent/CMakeLists.txt
new file mode 100644
index 00000000000..ea50bab2530
--- /dev/null
+++ b/libevent/CMakeLists.txt
@@ -0,0 +1,80 @@
+# Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Common defines and includes
+IF(WITH_INNODB_MEMCACHED AND UNIX)
+
+ADD_DEFINITIONS(-DHAVE_CONFIG_H)
+INCLUDE_DIRECTORIES(${LIBEVENT_INCLUDE_DIR}/compat/sys
+ ${LIBEVENT_INCLUDE_DIR})
+
+SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_SHARED_LIBRARY_C_FLAGS} -I${LIBEVENT_INCLUDE_DIR}")
+
+SET(LIBEVENT_CORE_SOURCES
+ event.h
+ event-internal.h
+ evutil.h
+ log.h
+ event.c
+ buffer.c
+ evbuffer.c
+ log.c
+ evutil.c)
+
+SET(LIBEVENT_EXTRA_SOURCES
+ event_tagging.c
+ http.c
+ evhttp.h
+ http-internal.h
+ evdns.h
+ evrpc.c
+ evrpc.h
+ evrpc-internal.h
+ strlcpy.c
+ strlcpy-internal.h)
+
+IF(HAVE_SIGNAL_H)
+ SET(LIBEVENT_SIGNAL_SOURCES signal.c)
+ENDIF()
+
+IF(HAVE_POLL_H)
+ SET(LIBEVENT_POLL_SOURCES poll.c)
+ENDIF()
+
+IF(HAVE_SELECT)
+ SET(LIBEVENT_SELECT_SOURCE select.c)
+ENDIF()
+
+IF(HAVE_SYS_EPOLL_H)
+ SET(LIBEVENT_EPOLL_SOURCES epoll.c epoll_sub.c)
+ENDIF()
+
+IF(HAVE_SYS_DEVPOLL_H)
+ SET(LIBEVENT_DEVPOLL_SOURCES devpoll.c)
+ENDIF()
+
+IF(HAVE_EVENT_PORTS)
+ SET(LIBEVENT_EVPORT_SOURCES evport.c)
+ENDIF()
+
+IF(HAVE_WORKING_KQUEUE)
+ SET(LIBEVENT_KQUEUE_SOURCES kqueue.c)
+ENDIF()
+
+ADD_LIBRARY(event_share SHARED ${LIBEVENT_CORE_SOURCES} ${LIBEVENT_EXTRA_SOURCES} ${LIBEVENT_SIGNAL_SOURCES} ${LIBEVENT_POLL_SOURCES} ${LIBEVENT_SELECT_SOURCE} ${LIBEVENT_EPOLL_SOURCES} ${LIBEVENT_DEVPOLL_SOURCES} ${LIBEVENT_EVPORT_SOURCES} ${LIBEVENT_KQUEUE_SOURCES})
+
+ADD_LIBRARY(event STATIC ${LIBEVENT_CORE_SOURCES} ${LIBEVENT_EXTRA_SOURCES} ${LIBEVENT_SIGNAL_SOURCES} ${LIBEVENT_POLL_SOURCES} ${LIBEVENT_SELECT_SOURCE} ${LIBEVENT_EPOLL_SOURCES} ${LIBEVENT_DEVPOLL_SOURCES} ${LIBEVENT_EVPORT_SOURCES} ${LIBEVENT_KQUEUE_SOURCES})
+ENDIF()
+
diff --git a/libevent/ChangeLog b/libevent/ChangeLog
new file mode 100644
index 00000000000..2435c1f15d8
--- /dev/null
+++ b/libevent/ChangeLog
@@ -0,0 +1,190 @@
+Changes in 1.4.12-stable:
+ o Try to contain degree of failure when running on a win32 version so heavily firewalled that we can't fake a socketpair.
+ o Fix an obscure timing-dependent, allocator-dependent crash in the evdns code.
+ o Use __VA_ARGS__ syntax for varargs macros in event_rpcgen when compiler is not GCC.
+ o Activate fd events in a pseudorandom order with O(N) backends, so that we don't systematically favor low fds (select) or earlier-added fds (poll, win32).
+ o Fix another pair of fencepost bugs in epoll.c. [Patch from Adam Langley.]
+ o Do not break evdns connections to nameservers when our IP changes.
+ o Set truncated flag correctly in evdns server replies.
+ o Disable strict aliasing with GCC: our code is not compliant with it.
+
+Changes in 1.4.11-stable:
+ o Fix a bug when removing a timeout from the heap. [Patch from Marko Kreen]
+ o Remove the limit on size of HTTP headers by removing static buffers.
+ o Fix a nasty dangling pointer bug in epoll.c that could occur after epoll_recalc(). [Patch from Kevin Springborn]
+ o Distribute Win32-Code/event-config.h, not ./event-config.h
+
+Changes in 1.4.10-stable:
+ o clean up buffered http connection data on reset; reported by Brian O'Kelley
+ o bug fix and potential race condition in signal handling; from Alexander Drozdov
+ o rename the Solaris event ports backend to evport
+ o support compilation on Haiku
+ o fix signal processing when a signal callback delivers a signal; from Alexander Drozdov
+ o const-ify some arguments to evdns functions.
+ o off-by-one error in epoll_recalc; reported by Victor Goya
+ o include Doxyfile in tar ball; from Jeff Garzik
+ o correctly parse queries with encoded \r, \n or + characters
+
+Changes in 1.4.9-stable:
+ o event_add would not return error for some backends; from Dean McNamee
+ o Clear the timer cache on entering the event loop; reported by Victor Chang
+ o Only bind the socket on connect when a local address has been provided; reported by Alejo Sanchez
+ o Allow setting of local port for evhttp connections to support millions of connections from a single system; from Richard Jones.
+ o Clear the timer cache when leaving the event loop; reported by Robin Haberkorn
+ o Fix a typo in setting the global event base; reported by lance.
+ o Fix a memory leak when reading multi-line headers
+ o Fix a memory leak by not running explicit close detection for server connections
+
+Changes in 1.4.8-stable:
+ o Match the query in DNS replies to the query in the request; from Vsevolod Stakhov.
+ o Fix a merge problem in which name_from_addr returned pointers to the stack; found by Jiang Hong.
+ o Do not remove Accept-Encoding header
+
+Changes in 1.4.7-stable:
+ o Fix a bug where headers arriving in multiple packets were not parsed; fix from Jiang Hong; test by me.
+
+Changes in 1.4.6-stable:
+ o evutil.h now includes <stdarg.h> directly
+ o switch all uses of [v]snprintf over to evutil
+ o Correct handling of trailing headers in chunked replies; from Scott Lamb.
+ o Support multi-line HTTP headers; based on a patch from Moshe Litvin
+ o Reject negative Content-Length headers; anonymous bug report
+ o Detect CLOCK_MONOTONIC at runtime for evdns; anonymous bug report
+ o Fix a bug where deleting signals with the kqueue backend would cause subsequent adds to fail
+ o Support multiple events listening on the same signal; make signals regular events that go on the same event queue; problem report by Alexander Drozdov.
+ o Deal with evbuffer_read() returning -1 on EINTR|EAGAIN; from Adam Langley.
+ o Fix a bug in which the DNS server would incorrectly set the type of a cname reply to a.
+ o Fix a bug where setting the timeout on a bufferevent would take not effect if the event was already pending.
+ o Fix a memory leak when using signals for some event bases; reported by Alexander Drozdov.
+ o Add libevent.vcproj file to distribution to help with Windows build.
+ o Fix a problem with epoll() and reinit; problem report by Alexander Drozdov.
+ o Fix off-by-one errors in devpoll; from Ian Bell
+ o Make event_add not change any state if it fails; reported by Ian Bell.
+ o Do not warn on accept when errno is either EAGAIN or EINTR
+
+Changes in 1.4.5-stable:
+ o Fix connection keep-alive behavior for HTTP/1.0
+ o Fix use of freed memory in event_reinit; pointed out by Peter Postma
+ o Constify struct timeval * where possible; pointed out by Forest Wilkinson
+ o allow min_heap_erase to be called on removed members; from liusifan.
+ o Rename INPUT and OUTPUT to EVRPC_INPUT and EVRPC_OUTPUT. Retain INPUT/OUTPUT aliases on on-win32 platforms for backwards compatibility.
+ o Do not use SO_REUSEADDR when connecting
+ o Fix Windows build
+ o Fix a bug in event_rpcgen when generated fixed-sized entries
+
+Changes in 1.4.4-stable:
+ o Correct the documentation on buffer printf functions.
+ o Don't warn on unimplemented epoll_create(): this isn't a problem, just a reason to fall back to poll or select.
+ o Correctly handle timeouts larger than 35 minutes on Linux with epoll.c. This is probably a kernel defect, but we'll have to support old kernels anyway even if it gets fixed.
+ o Fix a potential stack corruption bug in tagging on 64-bit CPUs.
+ o expose bufferevent_setwatermark via header files and fix high watermark on read
+ o fix a bug in bufferevent read water marks and add a test for them
+ o introduce bufferevent_setcb and bufferevent_setfd to allow better manipulation of bufferevents
+ o use libevent's internal timercmp on all platforms, to avoid bugs on old platforms where timercmp(a,b,<=) is buggy.
+ o reduce system calls for getting current time by caching it.
+ o fix evhttp_bind_socket() so that multiple sockets can be bound by the same http server.
+ o Build test directory correctly with CPPFLAGS set.
+ o Fix build under Visual C++ 2005.
+ o Expose evhttp_accept_socket() API.
+ o Merge windows gettimeofday() replacement into a new evutil_gettimeofday() function.
+ o Fix autoconf script behavior on IRIX.
+ o Make sure winsock2.h include always comes before windows.h include.
+
+Changes in 1.4.3-stable:
+ o include Content-Length in reply for HTTP/1.0 requests with keep-alive
+ o Patch from Tani Hosokawa: make some functions in http.c threadsafe.
+ o Do not free the kqop file descriptor in other processes, also allow it to be 0; from Andrei Nigmatulin
+ o make event_rpcgen.py generate code include event-config.h; reported by Sam Banks.
+ o make event methods static so that they are not exported; from Andrei Nigmatulin
+ o make RPC replies use application/octet-stream as mime type
+ o do not delete uninitialized timeout event in evdns
+
+Changes in 1.4.2-rc:
+ o remove pending timeouts on event_base_free()
+ o also check EAGAIN for Solaris' event ports; from W.C.A. Wijngaards
+ o devpoll and evport need reinit; tested by W.C.A Wijngaards
+ o event_base_get_method; from Springande Ulv
+ o Send CRLF after each chunk in HTTP output, for compliance with RFC2626. Patch from "propanbutan". Fixes bug 1894184.
+ o Add a int64_t parsing function, with unit tests, so we can apply Scott Lamb's fix to allow large HTTP values.
+ o Use a 64-bit field to hold HTTP content-lengths. Patch from Scott Lamb.
+ o Allow regression code to build even without Python installed
+ o remove NDEBUG ifdefs from evdns.c
+ o update documentation of event_loop and event_base_loop; from Tani Hosokawa.
+ o detect integer types properly on platforms without stdint.h
+ o Remove "AM_MAINTAINER_MODE" declaration in configure.in: now makefiles and configure should get re-generated automatically when Makefile.am or configure.in chanes.
+ o do not insert event into list when evsel->add fails
+
+Changes in 1.4.1-beta:
+ o free minheap on event_base_free(); from Christopher Layne
+ o debug cleanups in signal.c; from Christopher Layne
+ o provide event_base_new() that does not set the current_base global
+ o bufferevent_write now uses a const source argument; report from Charles Kerr
+ o better documentation for event_base_loopexit; from Scott Lamb.
+ o Make kqueue have the same behavior as other backends when a signal is caught between event_add() and event_loop(). Previously, it would catch and ignore such signals.
+ o Make kqueue restore signal handlers correctly when event_del() is called.
+ o provide event_reinit() to reintialize an event_base after fork
+ o small improvements to evhttp documentation
+ o always generate Date and Content-Length headers for HTTP/1.1 replies
+ o set the correct event base for HTTP close events
+ o New function, event_{base_}loopbreak. Like event_loopexit, it makes an event loop stop executing and return. Unlike event_loopexit, it keeps subsequent pending events from getting executed. Patch from Scott Lamb
+ o Removed obsoleted recalc code
+ o pull setters/getters out of RPC structures into a base class to which we just need to store a pointer; this reduces the memory footprint of these structures.
+ o fix a bug with event_rpcgen for integers
+ o move EV_PERSIST handling out of the event backends
+ o support for 32-bit tag numbers in rpc structures; this is wire compatible, but changes the API slightly.
+ o prefix {encode,decode}_tag functions with evtag to avoid collisions
+ o Correctly handle DNS replies with no answers set (Fixes bug 1846282)
+ o The configure script now takes an --enable-gcc-warnigns option that turns on many optional gcc warnings. (Nick has been building with these for a while, but they might be useful to other developers.)
+ o When building with GCC, use the "format" attribute to verify type correctness of calls to printf-like functions.
+ o removed linger from http server socket; reported by Ilya Martynov
+ o allow \r or \n individually to separate HTTP headers instead of the standard "\r\n"; from Charles Kerr.
+ o demote most http warnings to debug messages
+ o Fix Solaris compilation; from Magne Mahre
+ o Add a "Date" header to HTTP responses, as required by HTTP 1.1.
+ o Support specifying the local address of an evhttp_connection using set_local_address
+ o Fix a memory leak in which failed HTTP connections would not free the request object
+ o Make adding of array members in event_rpcgen more efficient, but doubling memory allocation
+ o Fix a memory leak in the DNS server
+ o Fix compilation when DNS_USE_OPENSSL_FOR_ID is enabled
+ o Fix buffer size and string generation in evdns_resolve_reverse_ipv6().
+ o Respond to nonstandard DNS queries with "NOTIMPL" rather than by ignoring them.
+ o In DNS responses, the CD flag should be preserved, not the TC flag.
+ o Fix http.c to compile properly with USE_DEBUG; from Christopher Layne
+ o Handle NULL timeouts correctly on Solaris; from Trond Norbye
+ o Recalculate pending events properly when reallocating event array on Solaris; from Trond Norbye
+ o Add Doxygen documentation to header files; from Mark Heily
+ o Add a evdns_set_transaction_id_fn() function to override the default
+ transaction ID generation code.
+ o Add an evutil module (with header evutil.h) to implement our standard cross-platform hacks, on the theory that somebody else would like to use them too.
+ o Fix signals implementation on windows.
+ o Fix http module on windows to close sockets properly.
+ o Make autogen.sh script run correctly on systems where /bin/sh isn't bash. (Patch from Trond Norbye, rewritten by Hagne Mahre and then Hannah Schroeter.)
+ o Skip calling gettime() in timeout_process if we are not in fact waiting for any events. (Patch from Trond Norbye)
+ o Make test subdirectory compile under mingw.
+ o Fix win32 buffer.c behavior so that it is correct for sockets (which do not like ReadFile and WriteFile).
+ o Make the test.sh script run unit tests for the evpoll method.
+ o Make the entire evdns.h header enclosed in "extern C" as appropriate.
+ o Fix implementation of strsep on platforms that lack it
+ o Fix implementation of getaddrinfo on platforms that lack it; mainly, this will make Windows http.c work better. Original patch by Lubomir Marinov.
+ o Fix evport implementation: port_disassociate called on unassociated events resulting in bogus errors; more efficient memory management; from Trond Norbye and Prakash Sangappa
+ o support for hooks on rpc input and output; can be used to implement rpc independent processing such as compression or authentication.
+ o use a min heap instead of a red-black tree for timeouts; as a result finding the min is a O(1) operation now; from Maxim Yegorushkin
+ o associate an event base with an rpc pool
+ o added two additional libraries: libevent_core and libevent_extra in addition to the regular libevent. libevent_core contains only the event core whereas libevent_extra contains dns, http and rpc support
+ o Begin using libtool's library versioning support correctly. If we don't mess up, this will more or less guarantee binaries linked against old versions of libevent continue working when we make changes to libevent that do not break backward compatibility.
+ o Fix evhttp.h compilation when TAILQ_ENTRY is not defined.
+ o Small code cleanups in epoll_dispatch().
+ o Increase the maximum number of addresses read from a packet in evdns to 32.
+ o Remove support for the rtsig method: it hasn't compiled for a while, and nobody seems to miss it very much. Let us know if there's a good reason to put it back in.
+ o Rename the "class" field in evdns_server_request to dns_question_class, so that it won't break compilation under C++. Use a macro so that old code won't break. Mark the macro as deprecated.
+ o Fix DNS unit tests so that having a DNS server with broken IPv6 support is no longer cause for aborting the unit tests.
+ o Make event_base_free() succeed even if there are pending non-internal events on a base. This may still leak memory and fds, but at least it no longer crashes.
+ o Post-process the config.h file into a new, installed event-config.h file that we can install, and whose macros will be safe to include in header files.
+ o Remove the long-deprecated acconfig.h file.
+ o Do not require #include <sys/types.h> before #include <event.h>.
+ o Add new evutil_timer* functions to wrap (or replace) the regular timeval manipulation functions.
+ o Fix many build issues when using the Microsoft C compiler.
+ o Remove a bash-ism in autogen.sh
+ o When calling event_del on a signal, restore the signal handler's previous value rather than setting it to SIG_DFL. Patch from Christopher Layne.
+ o Make the logic for active events work better with internal events; patch from Christopher Layne.
+ o We do not need to specially remove a timeout before calling event_del; patch from Christopher Layne.
diff --git a/libevent/Doxyfile b/libevent/Doxyfile
new file mode 100644
index 00000000000..77f6de89b46
--- /dev/null
+++ b/libevent/Doxyfile
@@ -0,0 +1,230 @@
+# Doxyfile 1.5.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = libevent
+
+# Place all output under 'doxygen/'
+
+OUTPUT_DIRECTORY = doxygen/
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF = YES
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = event.h evdns.h evhttp.h evrpc.h
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = YES
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = TAILQ_ENTRY RB_ENTRY _EVENT_DEFINED_TQENTRY
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
diff --git a/libevent/Makefile.am b/libevent/Makefile.am
new file mode 100644
index 00000000000..8d9d7520373
--- /dev/null
+++ b/libevent/Makefile.am
@@ -0,0 +1,124 @@
+AUTOMAKE_OPTIONS = foreign no-dependencies
+
+# This is the point release for libevent. It shouldn't include any
+# a/b/c/d/e notations.
+RELEASE = 1.4
+
+# This is the version info for the libevent binary API. It has three
+# numbers:
+# Current -- the number of the binary API that we're implementing
+# Revision -- which iteration of the implementation of the binary
+# API are we supplying?
+# Age -- How many previous binary API versions do we also
+# support?
+#
+# If we release a new version that does not change the binary API,
+# increment Revision.
+#
+# If we release a new version that changes the binary API, but does
+# not break programs compiled against the old binary API, increment
+# Current and Age. Set Revision to 0, since this is the first
+# implementation of the new API.
+#
+# Otherwise, we're changing the binary API and breaking bakward
+# compatibility with old binaries. Increment Current. Set Age to 0,
+# since we're backward compatible with no previous APIs. Set Revision
+# to 0 too.
+
+# History:
+# Libevent 1.4.1 was 2:0:0
+# Libevent 1.4.2 should be 3:0:0
+# Libevent 1.4.5 is 3:0:1 (we forgot to increment in the past)
+VERSION_INFO = 3:3:1
+
+bin_SCRIPTS = event_rpcgen.py
+
+EXTRA_DIST = autogen.sh event.h event-internal.h log.h evsignal.h evdns.3 \
+ evrpc.h evrpc-internal.h min_heap.h \
+ event.3 \
+ Doxyfile \
+ kqueue.c epoll_sub.c epoll.c select.c poll.c signal.c \
+ evport.c devpoll.c event_rpcgen.py \
+ sample/Makefile.am sample/Makefile.in sample/event-test.c \
+ sample/signal-test.c sample/time-test.c \
+ test/Makefile.am test/Makefile.in test/bench.c test/regress.c \
+ test/test-eof.c test/test-weof.c test/test-time.c \
+ test/test-init.c test/test.sh \
+ compat/sys/queue.h compat/sys/_time.h \
+ WIN32-Code/config.h \
+ WIN32-Code/event-config.h \
+ WIN32-Code/win32.c \
+ WIN32-Code/tree.h \
+ WIN32-Prj/event_test/event_test.dsp \
+ WIN32-Prj/event_test/test.txt WIN32-Prj/libevent.dsp \
+ WIN32-Prj/libevent.dsw WIN32-Prj/signal_test/signal_test.dsp \
+ WIN32-Prj/time_test/time_test.dsp WIN32-Prj/regress/regress.vcproj \
+ WIN32-Prj/libevent.sln WIN32-Prj/libevent.vcproj
+
+lib_LTLIBRARIES = libevent.la libevent_core.la libevent_extra.la
+
+if BUILD_WIN32
+
+SUBDIRS = . sample
+SYS_LIBS = -lws2_32
+SYS_SRC = WIN32-Code/win32.c
+SYS_INCLUDES = -IWIN32-Code
+
+else
+
+SUBDIRS = . sample test
+SYS_LIBS =
+SYS_SRC =
+SYS_INCLUDES =
+
+endif
+
+BUILT_SOURCES = event-config.h
+
+event-config.h: config.h
+ echo '/* event-config.h' > $@
+ echo ' * Generated by autoconf; post-processed by libevent.' >> $@
+ echo ' * Do not edit this file.' >> $@
+ echo ' * Do not rely on macros in this file existing in later versions.'>> $@
+ echo ' */' >> $@
+ echo '#ifndef _EVENT_CONFIG_H_' >> $@
+ echo '#define _EVENT_CONFIG_H_' >> $@
+
+ sed -e 's/#define /#define _EVENT_/' \
+ -e 's/#undef /#undef _EVENT_/' \
+ -e 's/#ifndef /#ifndef _EVENT_/' < config.h >> $@
+ echo "#endif" >> $@
+
+CORE_SRC = event.c buffer.c evbuffer.c log.c evutil.c $(SYS_SRC)
+EXTRA_SRC = event_tagging.c http.c evhttp.h http-internal.h evdns.c \
+ evdns.h evrpc.c evrpc.h evrpc-internal.h \
+ strlcpy.c strlcpy-internal.h strlcpy-internal.h
+
+libevent_la_SOURCES = $(CORE_SRC) $(EXTRA_SRC)
+libevent_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_la_LDFLAGS = -release $(RELEASE) -version-info $(VERSION_INFO)
+
+libevent_core_la_SOURCES = $(CORE_SRC)
+libevent_core_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_core_la_LDFLAGS = -release $(RELEASE) -version-info $(VERSION_INFO)
+
+libevent_extra_la_SOURCES = $(EXTRA_SRC)
+libevent_extra_la_LIBADD = @LTLIBOBJS@ $(SYS_LIBS)
+libevent_extra_la_LDFLAGS = -release $(RELEASE) -version-info $(VERSION_INFO)
+
+include_HEADERS = event.h evhttp.h evdns.h evrpc.h evutil.h
+
+nodist_include_HEADERS = event-config.h
+
+INCLUDES = -I$(srcdir)/compat $(SYS_INCLUDES)
+
+man_MANS = event.3 evdns.3
+
+verify: libevent.la
+ cd test && make verify
+
+doxygen: FORCE
+ doxygen $(srcdir)/Doxyfile
+FORCE:
+
+DISTCLEANFILES = *~ event-config.h
diff --git a/libevent/README b/libevent/README
new file mode 100644
index 00000000000..b0650392ed4
--- /dev/null
+++ b/libevent/README
@@ -0,0 +1,57 @@
+To build libevent, type
+
+$ ./configure && make
+
+ (If you got libevent from the subversion repository, you will
+ first need to run the included "autogen.sh" script in order to
+ generate the configure script.)
+
+Install as root via
+
+# make install
+
+You can run the regression tests by
+
+$ make verify
+
+Before, reporting any problems, please run the regression tests.
+
+To enable the low-level tracing build the library as:
+
+CFLAGS=-DUSE_DEBUG ./configure [...]
+
+Acknowledgements:
+-----------------
+
+The following people have helped with suggestions, ideas, code or
+fixing bugs:
+
+ Alejo
+ Weston Andros Adamson
+ William Ahern
+ Stas Bekman
+ Andrew Danforth
+ Mike Davis
+ Shie Erlich
+ Alexander von Gernler
+ Artur Grabowski
+ Aaron Hopkins
+ Claudio Jeker
+ Scott Lamb
+ Adam Langley
+ Philip Lewis
+ David Libenzi
+ Nick Mathewson
+ Andrey Matveev
+ Richard Nyberg
+ Jon Oberheide
+ Phil Oleson
+ Dave Pacheco
+ Tassilo von Parseval
+ Pierre Phaneuf
+ Jon Poland
+ Bert JW Regeer
+ Dug Song
+ Taral
+
+If I have forgotten your name, please contact me.
diff --git a/libevent/WIN32-Code/event-config.h b/libevent/WIN32-Code/event-config.h
new file mode 100644
index 00000000000..3059080274b
--- /dev/null
+++ b/libevent/WIN32-Code/event-config.h
@@ -0,0 +1,244 @@
+/* event-config.h
+ * Generated by autoconf; post-processed by libevent.
+ * Do not edit this file.
+ * Do not rely on macros in this file existing in later versions.
+ */
+#ifndef _EVENT_CONFIG_H_
+#define _EVENT_CONFIG_H_
+/* config.h. Generated by configure. */
+/* config.h.in. Generated from configure.in by autoheader. */
+
+/* Define if clock_gettime is available in libc */
+/* #undef _EVENT_DNS_USE_CPU_CLOCK_FOR_ID */
+
+/* Define is no secure id variant is available */
+#define _EVENT_DNS_USE_GETTIMEOFDAY_FOR_ID 1
+
+/* Define to 1 if you have the `clock_gettime' function. */
+/* #undef _EVENT_HAVE_CLOCK_GETTIME */
+
+/* Define if /dev/poll is available */
+/* #undef _EVENT_HAVE_DEVPOLL */
+
+/* Define to 1 if you have the <dlfcn.h> header file. */
+/* #undef _EVENT_HAVE_DLFCN_H */
+
+/* Define if your system supports the epoll system calls */
+/* #undef _EVENT_HAVE_EPOLL */
+
+/* Define to 1 if you have the `epoll_ctl' function. */
+/* #undef _EVENT_HAVE_EPOLL_CTL */
+
+/* Define if your system supports event ports */
+/* #undef _EVENT_HAVE_EVENT_PORTS */
+
+/* Define to 1 if you have the `fcntl' function. */
+/* #undef _EVENT_HAVE_FCNTL */
+
+/* Define to 1 if you have the <fcntl.h> header file. */
+#define _EVENT_HAVE_FCNTL_H 1
+
+/* Define to 1 if you have the `getaddrinfo' function. */
+/* #undef _EVENT_HAVE_GETADDRINFO */
+
+/* Define to 1 if you have the `getnameinfo' function. */
+/* #undef _EVENT_HAVE_GETNAMEINFO */
+
+/* Define to 1 if you have the `gettimeofday' function. */
+/* #define _EVENT_HAVE_GETTIMEOFDAY 1 */
+
+/* Define to 1 if you have the `inet_ntop' function. */
+/* #undef _EVENT_HAVE_INET_NTOP */
+
+/* Define to 1 if you have the <inttypes.h> header file. */
+/* #undef _EVENT_HAVE_INTTYPES_H 1 */
+
+/* Define to 1 if you have the `kqueue' function. */
+/* #undef _EVENT_HAVE_KQUEUE */
+
+/* Define to 1 if you have the `nsl' library (-lnsl). */
+/* #undef _EVENT_HAVE_LIBNSL */
+
+/* Define to 1 if you have the `resolv' library (-lresolv). */
+/* #undef _EVENT_HAVE_LIBRESOLV */
+
+/* Define to 1 if you have the `rt' library (-lrt). */
+/* #undef _EVENT_HAVE_LIBRT */
+
+/* Define to 1 if you have the `socket' library (-lsocket). */
+/* #undef _EVENT_HAVE_LIBSOCKET */
+
+/* Define to 1 if you have the <memory.h> header file. */
+#define _EVENT_HAVE_MEMORY_H 1
+
+/* Define to 1 if you have the <netinet/in6.h> header file. */
+/* #undef _EVENT_HAVE_NETINET_IN6_H */
+
+/* Define to 1 if you have the `poll' function. */
+/* #undef _EVENT_HAVE_POLL */
+
+/* Define to 1 if you have the <poll.h> header file. */
+/* #undef _EVENT_HAVE_POLL_H */
+
+/* Define to 1 if you have the `port_create' function. */
+/* #undef _EVENT_HAVE_PORT_CREATE */
+
+/* Define to 1 if you have the <port.h> header file. */
+/* #undef _EVENT_HAVE_PORT_H */
+
+/* Define to 1 if you have the `select' function. */
+/* #undef _EVENT_HAVE_SELECT */
+
+/* Define if F_SETFD is defined in <fcntl.h> */
+/* #undef _EVENT_HAVE_SETFD */
+
+/* Define to 1 if you have the `sigaction' function. */
+/* #undef _EVENT_HAVE_SIGACTION */
+
+/* Define to 1 if you have the `signal' function. */
+#define _EVENT_HAVE_SIGNAL 1
+
+/* Define to 1 if you have the <signal.h> header file. */
+#define _EVENT_HAVE_SIGNAL_H 1
+
+/* Define to 1 if you have the <stdarg.h> header file. */
+#define _EVENT_HAVE_STDARG_H 1
+
+/* Define to 1 if you have the <stdint.h> header file. */
+/* #define _EVENT_HAVE_STDINT_H 1 */
+
+/* Define to 1 if you have the <stdlib.h> header file. */
+#define _EVENT_HAVE_STDLIB_H 1
+
+/* Define to 1 if you have the <strings.h> header file. */
+#define _EVENT_HAVE_STRINGS_H 1
+
+/* Define to 1 if you have the <string.h> header file. */
+#define _EVENT_HAVE_STRING_H 1
+
+/* Define to 1 if you have the `strlcpy' function. */
+/* #undef _EVENT_HAVE_STRLCPY */
+
+/* Define to 1 if you have the `strsep' function. */
+/* #undef _EVENT_HAVE_STRSEP */
+
+/* Define to 1 if you have the `strtok_r' function. */
+/* #undef _EVENT_HAVE_STRTOK_R */
+
+/* Define to 1 if the system has the type `struct in6_addr'. */
+#define _EVENT_HAVE_STRUCT_IN6_ADDR 1
+
+/* Define to 1 if you have the <sys/devpoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_DEVPOLL_H */
+
+/* Define to 1 if you have the <sys/epoll.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EPOLL_H */
+
+/* Define to 1 if you have the <sys/event.h> header file. */
+/* #undef _EVENT_HAVE_SYS_EVENT_H */
+
+/* Define to 1 if you have the <sys/ioctl.h> header file. */
+/* #undef _EVENT_HAVE_SYS_IOCTL_H */
+
+/* Define to 1 if you have the <sys/queue.h> header file. */
+/* #undef _EVENT_HAVE_SYS_QUEUE_H */
+
+/* Define to 1 if you have the <sys/select.h> header file. */
+/* #undef _EVENT_HAVE_SYS_SELECT_H */
+
+/* Define to 1 if you have the <sys/socket.h> header file. */
+/* #undef _EVENT_HAVE_SYS_SOCKET_H */
+
+/* Define to 1 if you have the <sys/stat.h> header file. */
+#define _EVENT_HAVE_SYS_STAT_H 1
+
+/* Define to 1 if you have the <sys/time.h> header file. */
+/* #define _EVENT_HAVE_SYS_TIME_H 1 */
+
+/* Define to 1 if you have the <sys/types.h> header file. */
+/* #define _EVENT_HAVE_SYS_TYPES_H 1 */
+
+/* Define if TAILQ_FOREACH is defined in <sys/queue.h> */
+/* #undef _EVENT_HAVE_TAILQFOREACH */
+
+/* Define if timeradd is defined in <sys/time.h> */
+/* #undef _EVENT_HAVE_TIMERADD */
+
+/* Define if timerclear is defined in <sys/time.h> */
+/* #define _EVENT_HAVE_TIMERCLEAR 1 */
+
+/* Define if timercmp is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERCMP 1
+
+/* Define if timerisset is defined in <sys/time.h> */
+#define _EVENT_HAVE_TIMERISSET 1
+
+/* Define to 1 if you have the <unistd.h> header file. */
+/* #define _EVENT_HAVE_UNISTD_H 1 */
+
+/* Define to 1 if you have the `vasprintf' function. */
+/* #undef _EVENT_HAVE_VASPRINTF */
+
+/* Define if kqueue works correctly with pipes */
+/* #undef _EVENT_HAVE_WORKING_KQUEUE */
+
+/* Name of package */
+#define _EVENT_PACKAGE "libevent"
+
+/* Define to the address where bug reports for this package should be sent. */
+#define _EVENT_PACKAGE_BUGREPORT ""
+
+/* Define to the full name of this package. */
+#define _EVENT_PACKAGE_NAME ""
+
+/* Define to the full name and version of this package. */
+#define _EVENT_PACKAGE_STRING ""
+
+/* Define to the one symbol short name of this package. */
+#define _EVENT_PACKAGE_TARNAME ""
+
+/* Define to the version of this package. */
+#define _EVENT_PACKAGE_VERSION ""
+
+/* Define to 1 if you have the ANSI C header files. */
+#define _EVENT_STDC_HEADERS 1
+
+/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. */
+#define _EVENT_TIME_WITH_SYS_TIME 1
+
+/* Version number of package */
+#define _EVENT_VERSION "1.3.99-trunk"
+
+/* Define to appropriate substitue if compiler doesnt have __func__ */
+/* #undef _EVENT___func__ */
+
+/* Define to empty if `const' does not conform to ANSI C. */
+/* #undef _EVENT_const */
+
+/* Define to `__inline__' or `__inline' if that's what the C compiler
+ calls it, or to nothing if 'inline' is not supported under any name. */
+#ifndef _EVENT___cplusplus
+#define _EVENT_inline __inline
+#endif
+
+/* Define to `int' if <sys/types.h> does not define. */
+/* #undef _EVENT_pid_t */
+
+/* Define to `unsigned' if <sys/types.h> does not define. */
+/* #undef _EVENT_size_t */
+
+/* Define to unsigned int if you dont have it */
+#define _EVENT_socklen_t unsigned int
+
+/* Define to `unsigned short' if <sys/types.h> does not define. */
+/* #undef _EVENT_uint16_t */
+
+/* Define to `unsigned int' if <sys/types.h> does not define. */
+/* #undef _EVENT_uint32_t */
+
+/* Define to `unsigned long long' if <sys/types.h> does not define. */
+/* #undef _EVENT_uint64_t */
+
+/* Define to `unsigned char' if <sys/types.h> does not define. */
+/* #undef _EVENT_uint8_t */
+#endif
diff --git a/libevent/WIN32-Code/misc.c b/libevent/WIN32-Code/misc.c
new file mode 100644
index 00000000000..371e192beae
--- /dev/null
+++ b/libevent/WIN32-Code/misc.c
@@ -0,0 +1,93 @@
+#include <stdio.h>
+#include <string.h>
+#include <windows.h>
+#include <sys/timeb.h>
+#include <time.h>
+
+#ifdef __GNUC__
+/*our prototypes for timeval and timezone are in here, just in case the above
+ headers don't have them*/
+#include "misc.h"
+#endif
+
+/****************************************************************************
+ *
+ * Function: gettimeofday(struct timeval *, struct timezone *)
+ *
+ * Purpose: Get current time of day.
+ *
+ * Arguments: tv => Place to store the curent time of day.
+ * tz => Ignored.
+ *
+ * Returns: 0 => Success.
+ *
+ ****************************************************************************/
+
+#ifndef HAVE_GETTIMEOFDAY
+int gettimeofday(struct timeval *tv, struct timezone *tz) {
+ struct _timeb tb;
+
+ if(tv == NULL)
+ return -1;
+
+ _ftime(&tb);
+ tv->tv_sec = (long) tb.time;
+ tv->tv_usec = ((int) tb.millitm) * 1000;
+ return 0;
+}
+#endif
+
+#if 0
+int
+win_read(int fd, void *buf, unsigned int length)
+{
+ DWORD dwBytesRead;
+ int res = ReadFile((HANDLE) fd, buf, length, &dwBytesRead, NULL);
+ if (res == 0) {
+ DWORD error = GetLastError();
+ if (error == ERROR_NO_DATA)
+ return (0);
+ return (-1);
+ } else
+ return (dwBytesRead);
+}
+
+int
+win_write(int fd, void *buf, unsigned int length)
+{
+ DWORD dwBytesWritten;
+ int res = WriteFile((HANDLE) fd, buf, length, &dwBytesWritten, NULL);
+ if (res == 0) {
+ DWORD error = GetLastError();
+ if (error == ERROR_NO_DATA)
+ return (0);
+ return (-1);
+ } else
+ return (dwBytesWritten);
+}
+
+int
+socketpair(int d, int type, int protocol, int *sv)
+{
+ static int count;
+ char buf[64];
+ HANDLE fd;
+ DWORD dwMode;
+ sprintf(buf, "\\\\.\\pipe\\levent-%d", count++);
+ /* Create a duplex pipe which will behave like a socket pair */
+ fd = CreateNamedPipe(buf, PIPE_ACCESS_DUPLEX, PIPE_TYPE_BYTE | PIPE_NOWAIT,
+ PIPE_UNLIMITED_INSTANCES, 4096, 4096, 0, NULL);
+ if (fd == INVALID_HANDLE_VALUE)
+ return (-1);
+ sv[0] = (int)fd;
+
+ fd = CreateFile(buf, GENERIC_READ|GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
+ if (fd == INVALID_HANDLE_VALUE)
+ return (-1);
+ dwMode = PIPE_NOWAIT;
+ SetNamedPipeHandleState(fd, &dwMode, NULL, NULL);
+ sv[1] = (int)fd;
+
+ return (0);
+}
+#endif
diff --git a/libevent/WIN32-Code/misc.h b/libevent/WIN32-Code/misc.h
new file mode 100644
index 00000000000..aced574687c
--- /dev/null
+++ b/libevent/WIN32-Code/misc.h
@@ -0,0 +1,11 @@
+#ifndef MISC_H
+#define MISC_H
+
+struct timezone;
+struct timeval;
+
+#ifndef HAVE_GETTIMEOFDAY
+int gettimeofday(struct timeval *,struct timezone *);
+#endif
+
+#endif
diff --git a/libevent/WIN32-Code/tree.h b/libevent/WIN32-Code/tree.h
new file mode 100644
index 00000000000..79e8d91f0eb
--- /dev/null
+++ b/libevent/WIN32-Code/tree.h
@@ -0,0 +1,1354 @@
+/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
+/*
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_H_
+#define _SYS_TREE_H_
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root))) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-back tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+void name##_RB_INSERT_COLOR(struct name *, struct type *); \
+void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+struct type *name##_RB_REMOVE(struct name *, struct type *); \
+struct type *name##_RB_INSERT(struct name *, struct type *); \
+struct type *name##_RB_FIND(struct name *, struct type *); \
+struct type *name##_RB_NEXT(struct type *); \
+struct type *name##_RB_MINMAX(struct name *, int); \
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)))\
+ RB_COLOR(oleft, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)))\
+ RB_COLOR(oright, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field))) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field))); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#endif /* _SYS_TREE_H_ */
+/* $OpenBSD: tree.h,v 1.7 2002/10/17 21:51:54 art Exp $ */
+/*
+ * Copyright 2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SYS_TREE_H_
+#define _SYS_TREE_H_
+
+/*
+ * This file defines data structures for different types of trees:
+ * splay trees and red-black trees.
+ *
+ * A splay tree is a self-organizing data structure. Every operation
+ * on the tree causes a splay to happen. The splay moves the requested
+ * node to the root of the tree and partly rebalances it.
+ *
+ * This has the benefit that request locality causes faster lookups as
+ * the requested nodes move to the top of the tree. On the other hand,
+ * every lookup causes memory writes.
+ *
+ * The Balance Theorem bounds the total access time for m operations
+ * and n inserts on an initially empty tree as O((m + n)lg n). The
+ * amortized cost for a sequence of m accesses to a splay tree is O(lg n);
+ *
+ * A red-black tree is a binary search tree with the node color as an
+ * extra attribute. It fulfills a set of conditions:
+ * - every search path from the root to a leaf consists of the
+ * same number of black nodes,
+ * - each red node (except for the root) has a black parent,
+ * - each leaf node is black.
+ *
+ * Every operation on a red-black tree is bounded as O(lg n).
+ * The maximum height of a red-black tree is 2lg (n+1).
+ */
+
+#define SPLAY_HEAD(name, type) \
+struct name { \
+ struct type *sph_root; /* root of the tree */ \
+}
+
+#define SPLAY_INITIALIZER(root) \
+ { NULL }
+
+#define SPLAY_INIT(root) do { \
+ (root)->sph_root = NULL; \
+} while (0)
+
+#define SPLAY_ENTRY(type) \
+struct { \
+ struct type *spe_left; /* left element */ \
+ struct type *spe_right; /* right element */ \
+}
+
+#define SPLAY_LEFT(elm, field) (elm)->field.spe_left
+#define SPLAY_RIGHT(elm, field) (elm)->field.spe_right
+#define SPLAY_ROOT(head) (head)->sph_root
+#define SPLAY_EMPTY(head) (SPLAY_ROOT(head) == NULL)
+
+/* SPLAY_ROTATE_{LEFT,RIGHT} expect that tmp hold SPLAY_{RIGHT,LEFT} */
+#define SPLAY_ROTATE_RIGHT(head, tmp, field) do { \
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(tmp, field); \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_ROTATE_LEFT(head, tmp, field) do { \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(tmp, field); \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ (head)->sph_root = tmp; \
+} while (0)
+
+#define SPLAY_LINKLEFT(head, tmp, field) do { \
+ SPLAY_LEFT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_LINKRIGHT(head, tmp, field) do { \
+ SPLAY_RIGHT(tmp, field) = (head)->sph_root; \
+ tmp = (head)->sph_root; \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field); \
+} while (0)
+
+#define SPLAY_ASSEMBLE(head, node, left, right, field) do { \
+ SPLAY_RIGHT(left, field) = SPLAY_LEFT((head)->sph_root, field); \
+ SPLAY_LEFT(right, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT((head)->sph_root, field) = SPLAY_RIGHT(node, field); \
+ SPLAY_RIGHT((head)->sph_root, field) = SPLAY_LEFT(node, field); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+
+#define SPLAY_PROTOTYPE(name, type, field, cmp) \
+void name##_SPLAY(struct name *, struct type *); \
+void name##_SPLAY_MINMAX(struct name *, int); \
+struct type *name##_SPLAY_INSERT(struct name *, struct type *); \
+struct type *name##_SPLAY_REMOVE(struct name *, struct type *); \
+ \
+/* Finds the node with the same key as elm */ \
+static __inline struct type * \
+name##_SPLAY_FIND(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) \
+ return(NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) \
+ return (head->sph_root); \
+ return (NULL); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_NEXT(struct name *head, struct type *elm) \
+{ \
+ name##_SPLAY(head, elm); \
+ if (SPLAY_RIGHT(elm, field) != NULL) { \
+ elm = SPLAY_RIGHT(elm, field); \
+ while (SPLAY_LEFT(elm, field) != NULL) { \
+ elm = SPLAY_LEFT(elm, field); \
+ } \
+ } else \
+ elm = NULL; \
+ return (elm); \
+} \
+ \
+static __inline struct type * \
+name##_SPLAY_MIN_MAX(struct name *head, int val) \
+{ \
+ name##_SPLAY_MINMAX(head, val); \
+ return (SPLAY_ROOT(head)); \
+}
+
+/* Main splay operation.
+ * Moves node close to the key of elm to top
+ */
+#define SPLAY_GENERATE(name, type, field, cmp) \
+struct type * \
+name##_SPLAY_INSERT(struct name *head, struct type *elm) \
+{ \
+ if (SPLAY_EMPTY(head)) { \
+ SPLAY_LEFT(elm, field) = SPLAY_RIGHT(elm, field) = NULL; \
+ } else { \
+ int __comp; \
+ name##_SPLAY(head, elm); \
+ __comp = (cmp)(elm, (head)->sph_root); \
+ if(__comp < 0) { \
+ SPLAY_LEFT(elm, field) = SPLAY_LEFT((head)->sph_root, field);\
+ SPLAY_RIGHT(elm, field) = (head)->sph_root; \
+ SPLAY_LEFT((head)->sph_root, field) = NULL; \
+ } else if (__comp > 0) { \
+ SPLAY_RIGHT(elm, field) = SPLAY_RIGHT((head)->sph_root, field);\
+ SPLAY_LEFT(elm, field) = (head)->sph_root; \
+ SPLAY_RIGHT((head)->sph_root, field) = NULL; \
+ } else \
+ return ((head)->sph_root); \
+ } \
+ (head)->sph_root = (elm); \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_SPLAY_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *__tmp; \
+ if (SPLAY_EMPTY(head)) \
+ return (NULL); \
+ name##_SPLAY(head, elm); \
+ if ((cmp)(elm, (head)->sph_root) == 0) { \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL) { \
+ (head)->sph_root = SPLAY_RIGHT((head)->sph_root, field);\
+ } else { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ (head)->sph_root = SPLAY_LEFT((head)->sph_root, field);\
+ name##_SPLAY(head, elm); \
+ SPLAY_RIGHT((head)->sph_root, field) = __tmp; \
+ } \
+ return (elm); \
+ } \
+ return (NULL); \
+} \
+ \
+void \
+name##_SPLAY(struct name *head, struct type *elm) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+ int __comp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while ((__comp = (cmp)(elm, (head)->sph_root))) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if ((cmp)(elm, __tmp) > 0){ \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+} \
+ \
+/* Splay with either the minimum or the maximum element \
+ * Used to find minimum or maximum element in tree. \
+ */ \
+void name##_SPLAY_MINMAX(struct name *head, int __comp) \
+{ \
+ struct type __node, *__left, *__right, *__tmp; \
+\
+ SPLAY_LEFT(&__node, field) = SPLAY_RIGHT(&__node, field) = NULL;\
+ __left = __right = &__node; \
+\
+ while (1) { \
+ if (__comp < 0) { \
+ __tmp = SPLAY_LEFT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp < 0){ \
+ SPLAY_ROTATE_RIGHT(head, __tmp, field); \
+ if (SPLAY_LEFT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKLEFT(head, __right, field); \
+ } else if (__comp > 0) { \
+ __tmp = SPLAY_RIGHT((head)->sph_root, field); \
+ if (__tmp == NULL) \
+ break; \
+ if (__comp > 0) { \
+ SPLAY_ROTATE_LEFT(head, __tmp, field); \
+ if (SPLAY_RIGHT((head)->sph_root, field) == NULL)\
+ break; \
+ } \
+ SPLAY_LINKRIGHT(head, __left, field); \
+ } \
+ } \
+ SPLAY_ASSEMBLE(head, &__node, __left, __right, field); \
+}
+
+#define SPLAY_NEGINF -1
+#define SPLAY_INF 1
+
+#define SPLAY_INSERT(name, x, y) name##_SPLAY_INSERT(x, y)
+#define SPLAY_REMOVE(name, x, y) name##_SPLAY_REMOVE(x, y)
+#define SPLAY_FIND(name, x, y) name##_SPLAY_FIND(x, y)
+#define SPLAY_NEXT(name, x, y) name##_SPLAY_NEXT(x, y)
+#define SPLAY_MIN(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_NEGINF))
+#define SPLAY_MAX(name, x) (SPLAY_EMPTY(x) ? NULL \
+ : name##_SPLAY_MIN_MAX(x, SPLAY_INF))
+
+#define SPLAY_FOREACH(x, name, head) \
+ for ((x) = SPLAY_MIN(name, head); \
+ (x) != NULL; \
+ (x) = SPLAY_NEXT(name, head, x))
+
+/* Macros that define a red-back tree */
+#define RB_HEAD(name, type) \
+struct name { \
+ struct type *rbh_root; /* root of the tree */ \
+}
+
+#define RB_INITIALIZER(root) \
+ { NULL }
+
+#define RB_INIT(root) do { \
+ (root)->rbh_root = NULL; \
+} while (0)
+
+#define RB_BLACK 0
+#define RB_RED 1
+#define RB_ENTRY(type) \
+struct { \
+ struct type *rbe_left; /* left element */ \
+ struct type *rbe_right; /* right element */ \
+ struct type *rbe_parent; /* parent element */ \
+ int rbe_color; /* node color */ \
+}
+
+#define RB_LEFT(elm, field) (elm)->field.rbe_left
+#define RB_RIGHT(elm, field) (elm)->field.rbe_right
+#define RB_PARENT(elm, field) (elm)->field.rbe_parent
+#define RB_COLOR(elm, field) (elm)->field.rbe_color
+#define RB_ROOT(head) (head)->rbh_root
+#define RB_EMPTY(head) (RB_ROOT(head) == NULL)
+
+#define RB_SET(elm, parent, field) do { \
+ RB_PARENT(elm, field) = parent; \
+ RB_LEFT(elm, field) = RB_RIGHT(elm, field) = NULL; \
+ RB_COLOR(elm, field) = RB_RED; \
+} while (0)
+
+#define RB_SET_BLACKRED(black, red, field) do { \
+ RB_COLOR(black, field) = RB_BLACK; \
+ RB_COLOR(red, field) = RB_RED; \
+} while (0)
+
+#ifndef RB_AUGMENT
+#define RB_AUGMENT(x)
+#endif
+
+#define RB_ROTATE_LEFT(head, elm, tmp, field) do { \
+ (tmp) = RB_RIGHT(elm, field); \
+ if ((RB_RIGHT(elm, field) = RB_LEFT(tmp, field))) { \
+ RB_PARENT(RB_LEFT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_LEFT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+#define RB_ROTATE_RIGHT(head, elm, tmp, field) do { \
+ (tmp) = RB_LEFT(elm, field); \
+ if ((RB_LEFT(elm, field) = RB_RIGHT(tmp, field))) { \
+ RB_PARENT(RB_RIGHT(tmp, field), field) = (elm); \
+ } \
+ RB_AUGMENT(elm); \
+ if ((RB_PARENT(tmp, field) = RB_PARENT(elm, field))) { \
+ if ((elm) == RB_LEFT(RB_PARENT(elm, field), field)) \
+ RB_LEFT(RB_PARENT(elm, field), field) = (tmp); \
+ else \
+ RB_RIGHT(RB_PARENT(elm, field), field) = (tmp); \
+ } else \
+ (head)->rbh_root = (tmp); \
+ RB_RIGHT(tmp, field) = (elm); \
+ RB_PARENT(elm, field) = (tmp); \
+ RB_AUGMENT(tmp); \
+ if ((RB_PARENT(tmp, field))) \
+ RB_AUGMENT(RB_PARENT(tmp, field)); \
+} while (0)
+
+/* Generates prototypes and inline functions */
+#define RB_PROTOTYPE(name, type, field, cmp) \
+void name##_RB_INSERT_COLOR(struct name *, struct type *); \
+void name##_RB_REMOVE_COLOR(struct name *, struct type *, struct type *);\
+struct type *name##_RB_REMOVE(struct name *, struct type *); \
+struct type *name##_RB_INSERT(struct name *, struct type *); \
+struct type *name##_RB_FIND(struct name *, struct type *); \
+struct type *name##_RB_NEXT(struct type *); \
+struct type *name##_RB_MINMAX(struct name *, int); \
+ \
+
+/* Main rb operation.
+ * Moves node close to the key of elm to top
+ */
+#define RB_GENERATE(name, type, field, cmp) \
+void \
+name##_RB_INSERT_COLOR(struct name *head, struct type *elm) \
+{ \
+ struct type *parent, *gparent, *tmp; \
+ while ((parent = RB_PARENT(elm, field)) && \
+ RB_COLOR(parent, field) == RB_RED) { \
+ gparent = RB_PARENT(parent, field); \
+ if (parent == RB_LEFT(gparent, field)) { \
+ tmp = RB_RIGHT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_RIGHT(parent, field) == elm) { \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_RIGHT(head, gparent, tmp, field); \
+ } else { \
+ tmp = RB_LEFT(gparent, field); \
+ if (tmp && RB_COLOR(tmp, field) == RB_RED) { \
+ RB_COLOR(tmp, field) = RB_BLACK; \
+ RB_SET_BLACKRED(parent, gparent, field);\
+ elm = gparent; \
+ continue; \
+ } \
+ if (RB_LEFT(parent, field) == elm) { \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = parent; \
+ parent = elm; \
+ elm = tmp; \
+ } \
+ RB_SET_BLACKRED(parent, gparent, field); \
+ RB_ROTATE_LEFT(head, gparent, tmp, field); \
+ } \
+ } \
+ RB_COLOR(head->rbh_root, field) = RB_BLACK; \
+} \
+ \
+void \
+name##_RB_REMOVE_COLOR(struct name *head, struct type *parent, struct type *elm) \
+{ \
+ struct type *tmp; \
+ while ((elm == NULL || RB_COLOR(elm, field) == RB_BLACK) && \
+ elm != RB_ROOT(head)) { \
+ if (RB_LEFT(parent, field) == elm) { \
+ tmp = RB_RIGHT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK) {\
+ struct type *oleft; \
+ if ((oleft = RB_LEFT(tmp, field)))\
+ RB_COLOR(oleft, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_RIGHT(head, tmp, oleft, field);\
+ tmp = RB_RIGHT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_RIGHT(tmp, field)) \
+ RB_COLOR(RB_RIGHT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_LEFT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } else { \
+ tmp = RB_LEFT(parent, field); \
+ if (RB_COLOR(tmp, field) == RB_RED) { \
+ RB_SET_BLACKRED(tmp, parent, field); \
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ if ((RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) &&\
+ (RB_RIGHT(tmp, field) == NULL || \
+ RB_COLOR(RB_RIGHT(tmp, field), field) == RB_BLACK)) {\
+ RB_COLOR(tmp, field) = RB_RED; \
+ elm = parent; \
+ parent = RB_PARENT(elm, field); \
+ } else { \
+ if (RB_LEFT(tmp, field) == NULL || \
+ RB_COLOR(RB_LEFT(tmp, field), field) == RB_BLACK) {\
+ struct type *oright; \
+ if ((oright = RB_RIGHT(tmp, field)))\
+ RB_COLOR(oright, field) = RB_BLACK;\
+ RB_COLOR(tmp, field) = RB_RED; \
+ RB_ROTATE_LEFT(head, tmp, oright, field);\
+ tmp = RB_LEFT(parent, field); \
+ } \
+ RB_COLOR(tmp, field) = RB_COLOR(parent, field);\
+ RB_COLOR(parent, field) = RB_BLACK; \
+ if (RB_LEFT(tmp, field)) \
+ RB_COLOR(RB_LEFT(tmp, field), field) = RB_BLACK;\
+ RB_ROTATE_RIGHT(head, parent, tmp, field);\
+ elm = RB_ROOT(head); \
+ break; \
+ } \
+ } \
+ } \
+ if (elm) \
+ RB_COLOR(elm, field) = RB_BLACK; \
+} \
+ \
+struct type * \
+name##_RB_REMOVE(struct name *head, struct type *elm) \
+{ \
+ struct type *child, *parent, *old = elm; \
+ int color; \
+ if (RB_LEFT(elm, field) == NULL) \
+ child = RB_RIGHT(elm, field); \
+ else if (RB_RIGHT(elm, field) == NULL) \
+ child = RB_LEFT(elm, field); \
+ else { \
+ struct type *left; \
+ elm = RB_RIGHT(elm, field); \
+ while ((left = RB_LEFT(elm, field))) \
+ elm = left; \
+ child = RB_RIGHT(elm, field); \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+ if (RB_PARENT(elm, field) == old) \
+ parent = elm; \
+ (elm)->field = (old)->field; \
+ if (RB_PARENT(old, field)) { \
+ if (RB_LEFT(RB_PARENT(old, field), field) == old)\
+ RB_LEFT(RB_PARENT(old, field), field) = elm;\
+ else \
+ RB_RIGHT(RB_PARENT(old, field), field) = elm;\
+ RB_AUGMENT(RB_PARENT(old, field)); \
+ } else \
+ RB_ROOT(head) = elm; \
+ RB_PARENT(RB_LEFT(old, field), field) = elm; \
+ if (RB_RIGHT(old, field)) \
+ RB_PARENT(RB_RIGHT(old, field), field) = elm; \
+ if (parent) { \
+ left = parent; \
+ do { \
+ RB_AUGMENT(left); \
+ } while ((left = RB_PARENT(left, field))); \
+ } \
+ goto color; \
+ } \
+ parent = RB_PARENT(elm, field); \
+ color = RB_COLOR(elm, field); \
+ if (child) \
+ RB_PARENT(child, field) = parent; \
+ if (parent) { \
+ if (RB_LEFT(parent, field) == elm) \
+ RB_LEFT(parent, field) = child; \
+ else \
+ RB_RIGHT(parent, field) = child; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = child; \
+color: \
+ if (color == RB_BLACK) \
+ name##_RB_REMOVE_COLOR(head, parent, child); \
+ return (old); \
+} \
+ \
+/* Inserts a node into the RB tree */ \
+struct type * \
+name##_RB_INSERT(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp; \
+ struct type *parent = NULL; \
+ int comp = 0; \
+ tmp = RB_ROOT(head); \
+ while (tmp) { \
+ parent = tmp; \
+ comp = (cmp)(elm, parent); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ RB_SET(elm, parent, field); \
+ if (parent != NULL) { \
+ if (comp < 0) \
+ RB_LEFT(parent, field) = elm; \
+ else \
+ RB_RIGHT(parent, field) = elm; \
+ RB_AUGMENT(parent); \
+ } else \
+ RB_ROOT(head) = elm; \
+ name##_RB_INSERT_COLOR(head, elm); \
+ return (NULL); \
+} \
+ \
+/* Finds the node with the same key as elm */ \
+struct type * \
+name##_RB_FIND(struct name *head, struct type *elm) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ int comp; \
+ while (tmp) { \
+ comp = cmp(elm, tmp); \
+ if (comp < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else if (comp > 0) \
+ tmp = RB_RIGHT(tmp, field); \
+ else \
+ return (tmp); \
+ } \
+ return (NULL); \
+} \
+ \
+struct type * \
+name##_RB_NEXT(struct type *elm) \
+{ \
+ if (RB_RIGHT(elm, field)) { \
+ elm = RB_RIGHT(elm, field); \
+ while (RB_LEFT(elm, field)) \
+ elm = RB_LEFT(elm, field); \
+ } else { \
+ if (RB_PARENT(elm, field) && \
+ (elm == RB_LEFT(RB_PARENT(elm, field), field))) \
+ elm = RB_PARENT(elm, field); \
+ else { \
+ while (RB_PARENT(elm, field) && \
+ (elm == RB_RIGHT(RB_PARENT(elm, field), field)))\
+ elm = RB_PARENT(elm, field); \
+ elm = RB_PARENT(elm, field); \
+ } \
+ } \
+ return (elm); \
+} \
+ \
+struct type * \
+name##_RB_MINMAX(struct name *head, int val) \
+{ \
+ struct type *tmp = RB_ROOT(head); \
+ struct type *parent = NULL; \
+ while (tmp) { \
+ parent = tmp; \
+ if (val < 0) \
+ tmp = RB_LEFT(tmp, field); \
+ else \
+ tmp = RB_RIGHT(tmp, field); \
+ } \
+ return (parent); \
+}
+
+#define RB_NEGINF -1
+#define RB_INF 1
+
+#define RB_INSERT(name, x, y) name##_RB_INSERT(x, y)
+#define RB_REMOVE(name, x, y) name##_RB_REMOVE(x, y)
+#define RB_FIND(name, x, y) name##_RB_FIND(x, y)
+#define RB_NEXT(name, x, y) name##_RB_NEXT(y)
+#define RB_MIN(name, x) name##_RB_MINMAX(x, RB_NEGINF)
+#define RB_MAX(name, x) name##_RB_MINMAX(x, RB_INF)
+
+#define RB_FOREACH(x, name, head) \
+ for ((x) = RB_MIN(name, head); \
+ (x) != NULL; \
+ (x) = name##_RB_NEXT(x))
+
+#endif /* _SYS_TREE_H_ */
diff --git a/libevent/WIN32-Code/win32.c b/libevent/WIN32-Code/win32.c
new file mode 100644
index 00000000000..8a603b7eceb
--- /dev/null
+++ b/libevent/WIN32-Code/win32.c
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * Copyright 2003 Michael A. Davis <mike@datanerds.net>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef _MSC_VER
+#include "./config.h"
+#else
+/* Avoid the windows/msvc thing. */
+#include "../config.h"
+#endif
+
+#include <winsock2.h>
+#include <windows.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#define RB_AUGMENT(x) (void)(x)
+#include "./tree.h"
+#include "log.h"
+#include "event.h"
+#include "event-internal.h"
+
+#define XFREE(ptr) do { if (ptr) free(ptr); } while(0)
+
+extern struct event_list timequeue;
+extern struct event_list addqueue;
+#if 0
+extern struct event_list signalqueue;
+#endif
+
+struct win_fd_set {
+ u_int fd_count;
+ SOCKET fd_array[1];
+};
+
+int evsigcaught[NSIG];
+volatile sig_atomic_t signal_caught = 0;
+/* MSDN says this is required to handle SIGFPE */
+volatile double SIGFPE_REQ = 0.0f;
+
+#if 0
+static void signal_handler(int sig);
+
+void signal_process(void);
+int signal_recalc(void);
+#endif
+
+struct event_entry {
+ RB_ENTRY(event_entry) node;
+ SOCKET sock;
+ int read_pos;
+ int write_pos;
+ struct event *read_event;
+ struct event *write_event;
+};
+
+static int
+compare(struct event_entry *a, struct event_entry *b)
+{
+ if (a->sock < b->sock)
+ return -1;
+ else if (a->sock > b->sock)
+ return 1;
+ else
+ return 0;
+}
+
+struct win32op {
+ int fd_setsz;
+ struct win_fd_set *readset_in;
+ struct win_fd_set *writeset_in;
+ struct win_fd_set *readset_out;
+ struct win_fd_set *writeset_out;
+ struct win_fd_set *exset_out;
+ RB_HEAD(event_map, event_entry) event_root;
+
+ unsigned signals_are_broken : 1;
+};
+
+RB_PROTOTYPE(event_map, event_entry, node, compare);
+RB_GENERATE(event_map, event_entry, node, compare);
+
+void *win32_init (struct event_base *);
+int win32_insert (void *, struct event *);
+int win32_del (void *, struct event *);
+int win32_dispatch (struct event_base *base, void *, struct timeval *);
+void win32_dealloc (struct event_base *, void *);
+
+struct eventop win32ops = {
+ "win32",
+ win32_init,
+ win32_insert,
+ win32_del,
+ win32_dispatch,
+ win32_dealloc,
+ 0
+};
+
+#define FD_SET_ALLOC_SIZE(n) ((sizeof(struct win_fd_set) + ((n)-1)*sizeof(SOCKET)))
+
+static int
+realloc_fd_sets(struct win32op *op, size_t new_size)
+{
+ size_t size;
+
+ assert(new_size >= op->readset_in->fd_count &&
+ new_size >= op->writeset_in->fd_count);
+ assert(new_size >= 1);
+
+ size = FD_SET_ALLOC_SIZE(new_size);
+ if (!(op->readset_in = realloc(op->readset_in, size)))
+ return (-1);
+ if (!(op->writeset_in = realloc(op->writeset_in, size)))
+ return (-1);
+ if (!(op->readset_out = realloc(op->readset_out, size)))
+ return (-1);
+ if (!(op->exset_out = realloc(op->exset_out, size)))
+ return (-1);
+ if (!(op->writeset_out = realloc(op->writeset_out, size)))
+ return (-1);
+ op->fd_setsz = new_size;
+ return (0);
+}
+
+static int
+timeval_to_ms(struct timeval *tv)
+{
+ return ((tv->tv_sec * 1000) + (tv->tv_usec / 1000));
+}
+
+static struct event_entry*
+get_event_entry(struct win32op *op, SOCKET s, int create)
+{
+ struct event_entry key, *val;
+ key.sock = s;
+ val = RB_FIND(event_map, &op->event_root, &key);
+ if (val || !create)
+ return val;
+ if (!(val = calloc(1, sizeof(struct event_entry)))) {
+ event_warn("%s: calloc", __func__);
+ return NULL;
+ }
+ val->sock = s;
+ val->read_pos = val->write_pos = -1;
+ RB_INSERT(event_map, &op->event_root, val);
+ return val;
+}
+
+static int
+do_fd_set(struct win32op *op, struct event_entry *ent, int read)
+{
+ SOCKET s = ent->sock;
+ struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
+ if (read) {
+ if (ent->read_pos >= 0)
+ return (0);
+ } else {
+ if (ent->write_pos >= 0)
+ return (0);
+ }
+ if (set->fd_count == op->fd_setsz) {
+ if (realloc_fd_sets(op, op->fd_setsz*2))
+ return (-1);
+ /* set pointer will have changed and needs reiniting! */
+ set = read ? op->readset_in : op->writeset_in;
+ }
+ set->fd_array[set->fd_count] = s;
+ if (read)
+ ent->read_pos = set->fd_count;
+ else
+ ent->write_pos = set->fd_count;
+ return (set->fd_count++);
+}
+
+static int
+do_fd_clear(struct win32op *op, struct event_entry *ent, int read)
+{
+ int i;
+ struct win_fd_set *set = read ? op->readset_in : op->writeset_in;
+ if (read) {
+ i = ent->read_pos;
+ ent->read_pos = -1;
+ } else {
+ i = ent->write_pos;
+ ent->write_pos = -1;
+ }
+ if (i < 0)
+ return (0);
+ if (--set->fd_count != i) {
+ struct event_entry *ent2;
+ SOCKET s2;
+ s2 = set->fd_array[i] = set->fd_array[set->fd_count];
+ ent2 = get_event_entry(op, s2, 0);
+ if (!ent) /* This indicates a bug. */
+ return (0);
+ if (read)
+ ent2->read_pos = i;
+ else
+ ent2->write_pos = i;
+ }
+ return (0);
+}
+
+#define NEVENT 64
+void *
+win32_init(struct event_base *_base)
+{
+ struct win32op *winop;
+ size_t size;
+ if (!(winop = calloc(1, sizeof(struct win32op))))
+ return NULL;
+ winop->fd_setsz = NEVENT;
+ size = FD_SET_ALLOC_SIZE(NEVENT);
+ if (!(winop->readset_in = malloc(size)))
+ goto err;
+ if (!(winop->writeset_in = malloc(size)))
+ goto err;
+ if (!(winop->readset_out = malloc(size)))
+ goto err;
+ if (!(winop->writeset_out = malloc(size)))
+ goto err;
+ if (!(winop->exset_out = malloc(size)))
+ goto err;
+ RB_INIT(&winop->event_root);
+ winop->readset_in->fd_count = winop->writeset_in->fd_count = 0;
+ winop->readset_out->fd_count = winop->writeset_out->fd_count
+ = winop->exset_out->fd_count = 0;
+
+ if (evsignal_init(_base) < 0)
+ winop->signals_are_broken = 1;
+
+ return (winop);
+ err:
+ XFREE(winop->readset_in);
+ XFREE(winop->writeset_in);
+ XFREE(winop->readset_out);
+ XFREE(winop->writeset_out);
+ XFREE(winop->exset_out);
+ XFREE(winop);
+ return (NULL);
+}
+
+int
+win32_insert(void *op, struct event *ev)
+{
+ struct win32op *win32op = op;
+ struct event_entry *ent;
+
+ if (ev->ev_events & EV_SIGNAL) {
+ if (win32op->signals_are_broken)
+ return (-1);
+ return (evsignal_add(ev));
+ }
+ if (!(ev->ev_events & (EV_READ|EV_WRITE)))
+ return (0);
+ ent = get_event_entry(win32op, ev->ev_fd, 1);
+ if (!ent)
+ return (-1); /* out of memory */
+
+ event_debug(("%s: adding event for %d", __func__, (int)ev->ev_fd));
+ if (ev->ev_events & EV_READ) {
+ if (do_fd_set(win32op, ent, 1)<0)
+ return (-1);
+ ent->read_event = ev;
+ }
+ if (ev->ev_events & EV_WRITE) {
+ if (do_fd_set(win32op, ent, 0)<0)
+ return (-1);
+ ent->write_event = ev;
+ }
+ return (0);
+}
+
+int
+win32_del(void *op, struct event *ev)
+{
+ struct win32op *win32op = op;
+ struct event_entry *ent;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_del(ev));
+
+ if (!(ent = get_event_entry(win32op, ev->ev_fd, 0)))
+ return (-1);
+ event_debug(("%s: Removing event for %d", __func__, ev->ev_fd));
+ if (ev == ent->read_event) {
+ do_fd_clear(win32op, ent, 1);
+ ent->read_event = NULL;
+ }
+ if (ev == ent->write_event) {
+ do_fd_clear(win32op, ent, 0);
+ ent->write_event = NULL;
+ }
+ if (!ent->read_event && !ent->write_event) {
+ RB_REMOVE(event_map, &win32op->event_root, ent);
+ free(ent);
+ }
+
+ return 0;
+}
+
+static void
+fd_set_copy(struct win_fd_set *out, const struct win_fd_set *in)
+{
+ out->fd_count = in->fd_count;
+ memcpy(out->fd_array, in->fd_array, in->fd_count * (sizeof(SOCKET)));
+}
+
+/*
+ static void dump_fd_set(struct win_fd_set *s)
+ {
+ unsigned int i;
+ printf("[ ");
+ for(i=0;i<s->fd_count;++i)
+ printf("%d ",(int)s->fd_array[i]);
+ printf("]\n");
+ }
+*/
+
+int
+win32_dispatch(struct event_base *base, void *op,
+ struct timeval *tv)
+{
+ struct win32op *win32op = op;
+ int res = 0;
+ unsigned j, i;
+ int fd_count;
+ SOCKET s;
+ struct event_entry *ent;
+
+ fd_set_copy(win32op->readset_out, win32op->readset_in);
+ fd_set_copy(win32op->exset_out, win32op->readset_in);
+ fd_set_copy(win32op->writeset_out, win32op->writeset_in);
+
+ fd_count =
+ (win32op->readset_out->fd_count > win32op->writeset_out->fd_count) ?
+ win32op->readset_out->fd_count : win32op->writeset_out->fd_count;
+
+ if (!fd_count) {
+ /* Windows doesn't like you to call select() with no sockets */
+ Sleep(timeval_to_ms(tv));
+ evsignal_process(base);
+ return (0);
+ }
+
+ res = select(fd_count,
+ (struct fd_set*)win32op->readset_out,
+ (struct fd_set*)win32op->writeset_out,
+ (struct fd_set*)win32op->exset_out, tv);
+
+ event_debug(("%s: select returned %d", __func__, res));
+
+ if(res <= 0) {
+ evsignal_process(base);
+ return res;
+ } else if (base->sig.evsignal_caught) {
+ evsignal_process(base);
+ }
+
+ if (win32op->readset_out->fd_count) {
+ i = rand() % win32op->readset_out->fd_count;
+ for (j=0; j<win32op->readset_out->fd_count; ++j) {
+ if (++i >= win32op->readset_out->fd_count)
+ i = 0;
+ s = win32op->readset_out->fd_array[i];
+ if ((ent = get_event_entry(win32op, s, 0)) && ent->read_event)
+ event_active(ent->read_event, EV_READ, 1);
+ }
+ }
+ if (win32op->exset_out->fd_count) {
+ i = rand() % win32op->exset_out->fd_count;
+ for (j=0; j<win32op->exset_out->fd_count; ++j) {
+ if (++i >= win32op->exset_out->fd_count)
+ i = 0;
+ s = win32op->exset_out->fd_array[i];
+ if ((ent = get_event_entry(win32op, s, 0)) && ent->read_event)
+ event_active(ent->read_event, EV_READ, 1);
+ }
+ }
+ if (win32op->writeset_out->fd_count) {
+ i = rand() % win32op->writeset_out->fd_count;
+ for (j=0; j<win32op->writeset_out->fd_count; ++j) {
+ if (++i >= win32op->exset_out->fd_count)
+ i = 0;
+ s = win32op->writeset_out->fd_array[i];
+ if ((ent = get_event_entry(win32op, s, 0)) && ent->write_event)
+ event_active(ent->write_event, EV_WRITE, 1);
+
+ }
+ }
+
+ return (0);
+}
+
+void
+win32_dealloc(struct event_base *_base, void *arg)
+{
+ struct win32op *win32op = arg;
+
+ evsignal_dealloc(_base);
+ if (win32op->readset_in)
+ free(win32op->readset_in);
+ if (win32op->writeset_in)
+ free(win32op->writeset_in);
+ if (win32op->readset_out)
+ free(win32op->readset_out);
+ if (win32op->writeset_out)
+ free(win32op->writeset_out);
+ if (win32op->exset_out)
+ free(win32op->exset_out);
+ /* XXXXX free the tree. */
+
+ memset(win32op, 0, sizeof(win32op));
+ free(win32op);
+}
+
+#if 0
+static void
+signal_handler(int sig)
+{
+ evsigcaught[sig]++;
+ signal_caught = 1;
+}
+
+int
+signal_recalc(void)
+{
+ struct event *ev;
+
+ /* Reinstall our signal handler. */
+ TAILQ_FOREACH(ev, &signalqueue, ev_signal_next) {
+ if((int)signal(EVENT_SIGNAL(ev), signal_handler) == -1)
+ return (-1);
+ }
+ return (0);
+}
+
+void
+signal_process(void)
+{
+ struct event *ev;
+ short ncalls;
+
+ TAILQ_FOREACH(ev, &signalqueue, ev_signal_next) {
+ ncalls = evsigcaught[EVENT_SIGNAL(ev)];
+ if (ncalls) {
+ if (!(ev->ev_events & EV_PERSIST))
+ event_del(ev);
+ event_active(ev, EV_SIGNAL, ncalls);
+ }
+ }
+
+ memset(evsigcaught, 0, sizeof(evsigcaught));
+ signal_caught = 0;
+}
+#endif
+
diff --git a/libevent/WIN32-Prj/libevent.dsw b/libevent/WIN32-Prj/libevent.dsw
new file mode 100644
index 00000000000..fb05451ca25
--- /dev/null
+++ b/libevent/WIN32-Prj/libevent.dsw
@@ -0,0 +1,74 @@
+Microsoft Developer Studio Workspace File, Format Version 6.00
+# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
+
+###############################################################################
+
+Project: "event_test"=".\event_test\event_test.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name libevent
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "libevent"=".\libevent.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+}}}
+
+###############################################################################
+
+Project: "signal_test"=".\signal_test\signal_test.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name libevent
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Project: "time_test"=".\time_test\time_test.dsp" - Package Owner=<4>
+
+Package=<5>
+{{{
+}}}
+
+Package=<4>
+{{{
+ Begin Project Dependency
+ Project_Dep_Name libevent
+ End Project Dependency
+}}}
+
+###############################################################################
+
+Global:
+
+Package=<5>
+{{{
+}}}
+
+Package=<3>
+{{{
+}}}
+
+###############################################################################
+
diff --git a/libevent/WIN32-Prj/libevent.sln b/libevent/WIN32-Prj/libevent.sln
new file mode 100644
index 00000000000..17e0c98bae6
--- /dev/null
+++ b/libevent/WIN32-Prj/libevent.sln
@@ -0,0 +1,53 @@
+
+Microsoft Visual Studio Solution File, Format Version 9.00
+# Visual Studio 2005
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "event_test", "event_test\event_test.vcproj", "{52099A8B-455B-4BE9-8E61-A3D6E8A4338D}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libevent", "libevent.vcproj", "{B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "signal_test", "signal_test\signal_test.vcproj", "{768DB9DD-2694-4274-89B8-74106E8F7786}"
+ ProjectSection(ProjectDependencies) = postProject
+ {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9} = {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "time_test", "time_test\time_test.vcproj", "{D4BE29FB-E45C-4177-9647-74BBAFDC1257}"
+ ProjectSection(ProjectDependencies) = postProject
+ {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9} = {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "regress", "regress\regress.vcproj", "{F7C26008-6066-4AD3-8543-452EFE58BD2E}"
+ ProjectSection(ProjectDependencies) = postProject
+ {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9} = {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}
+ EndProjectSection
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Win32 = Debug|Win32
+ Release|Win32 = Release|Win32
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {52099A8B-455B-4BE9-8E61-A3D6E8A4338D}.Debug|Win32.ActiveCfg = Debug|Win32
+ {52099A8B-455B-4BE9-8E61-A3D6E8A4338D}.Debug|Win32.Build.0 = Debug|Win32
+ {52099A8B-455B-4BE9-8E61-A3D6E8A4338D}.Release|Win32.ActiveCfg = Release|Win32
+ {52099A8B-455B-4BE9-8E61-A3D6E8A4338D}.Release|Win32.Build.0 = Release|Win32
+ {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}.Debug|Win32.ActiveCfg = Debug|Win32
+ {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}.Debug|Win32.Build.0 = Debug|Win32
+ {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}.Release|Win32.ActiveCfg = Release|Win32
+ {B98ABFCE-24D4-4B70-94DE-EF7F1E0662F9}.Release|Win32.Build.0 = Release|Win32
+ {768DB9DD-2694-4274-89B8-74106E8F7786}.Debug|Win32.ActiveCfg = Debug|Win32
+ {768DB9DD-2694-4274-89B8-74106E8F7786}.Debug|Win32.Build.0 = Debug|Win32
+ {768DB9DD-2694-4274-89B8-74106E8F7786}.Release|Win32.ActiveCfg = Release|Win32
+ {768DB9DD-2694-4274-89B8-74106E8F7786}.Release|Win32.Build.0 = Release|Win32
+ {D4BE29FB-E45C-4177-9647-74BBAFDC1257}.Debug|Win32.ActiveCfg = Debug|Win32
+ {D4BE29FB-E45C-4177-9647-74BBAFDC1257}.Debug|Win32.Build.0 = Debug|Win32
+ {D4BE29FB-E45C-4177-9647-74BBAFDC1257}.Release|Win32.ActiveCfg = Release|Win32
+ {D4BE29FB-E45C-4177-9647-74BBAFDC1257}.Release|Win32.Build.0 = Release|Win32
+ {F7C26008-6066-4AD3-8543-452EFE58BD2E}.Debug|Win32.ActiveCfg = Debug|Win32
+ {F7C26008-6066-4AD3-8543-452EFE58BD2E}.Debug|Win32.Build.0 = Debug|Win32
+ {F7C26008-6066-4AD3-8543-452EFE58BD2E}.Release|Win32.ActiveCfg = Release|Win32
+ {F7C26008-6066-4AD3-8543-452EFE58BD2E}.Release|Win32.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+EndGlobal
diff --git a/libevent/autogen.sh b/libevent/autogen.sh
new file mode 100644
index 00000000000..6d4275a6392
--- /dev/null
+++ b/libevent/autogen.sh
@@ -0,0 +1,11 @@
+#!/bin/sh
+LIBTOOLIZE=libtoolize
+SYSNAME=`uname`
+if [ "x$SYSNAME" = "xDarwin" ] ; then
+ LIBTOOLIZE=glibtoolize
+fi
+aclocal && \
+ autoheader && \
+ $LIBTOOLIZE && \
+ autoconf && \
+ automake --add-missing --copy
diff --git a/libevent/buffer.c b/libevent/buffer.c
new file mode 100644
index 00000000000..9cb0f0ce323
--- /dev/null
+++ b/libevent/buffer.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2002, 2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_VASPRINTF
+/* If we have vasprintf, we need to define this before we include stdio.h. */
+#define _GNU_SOURCE
+#endif
+
+#include <sys/types.h>
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#ifdef HAVE_SYS_IOCTL_H
+#include <sys/ioctl.h>
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "event.h"
+#include "config.h"
+#include "evutil.h"
+
+struct evbuffer *
+evbuffer_new(void)
+{
+ struct evbuffer *buffer;
+
+ buffer = calloc(1, sizeof(struct evbuffer));
+
+ return (buffer);
+}
+
+void
+evbuffer_free(struct evbuffer *buffer)
+{
+ if (buffer->orig_buffer != NULL)
+ free(buffer->orig_buffer);
+ free(buffer);
+}
+
+/*
+ * This is a destructive add. The data from one buffer moves into
+ * the other buffer.
+ */
+
+#define SWAP(x,y) do { \
+ (x)->buffer = (y)->buffer; \
+ (x)->orig_buffer = (y)->orig_buffer; \
+ (x)->misalign = (y)->misalign; \
+ (x)->totallen = (y)->totallen; \
+ (x)->off = (y)->off; \
+} while (0)
+
+int
+evbuffer_add_buffer(struct evbuffer *outbuf, struct evbuffer *inbuf)
+{
+ int res;
+
+ /* Short cut for better performance */
+ if (outbuf->off == 0) {
+ struct evbuffer tmp;
+ size_t oldoff = inbuf->off;
+
+ /* Swap them directly */
+ SWAP(&tmp, outbuf);
+ SWAP(outbuf, inbuf);
+ SWAP(inbuf, &tmp);
+
+ /*
+ * Optimization comes with a price; we need to notify the
+ * buffer if necessary of the changes. oldoff is the amount
+ * of data that we transfered from inbuf to outbuf
+ */
+ if (inbuf->off != oldoff && inbuf->cb != NULL)
+ (*inbuf->cb)(inbuf, oldoff, inbuf->off, inbuf->cbarg);
+ if (oldoff && outbuf->cb != NULL)
+ (*outbuf->cb)(outbuf, 0, oldoff, outbuf->cbarg);
+
+ return (0);
+ }
+
+ res = evbuffer_add(outbuf, inbuf->buffer, inbuf->off);
+ if (res == 0) {
+ /* We drain the input buffer on success */
+ evbuffer_drain(inbuf, inbuf->off);
+ }
+
+ return (res);
+}
+
+int
+evbuffer_add_vprintf(struct evbuffer *buf, const char *fmt, va_list ap)
+{
+ char *buffer;
+ size_t space;
+ size_t oldoff = buf->off;
+ int sz;
+ va_list aq;
+
+ /* make sure that at least some space is available */
+ evbuffer_expand(buf, 64);
+ for (;;) {
+ size_t used = buf->misalign + buf->off;
+ buffer = (char *)buf->buffer + buf->off;
+ assert(buf->totallen >= used);
+ space = buf->totallen - used;
+
+#ifndef va_copy
+#define va_copy(dst, src) memcpy(&(dst), &(src), sizeof(va_list))
+#endif
+ va_copy(aq, ap);
+
+ sz = evutil_vsnprintf(buffer, space, fmt, aq);
+
+ va_end(aq);
+
+ if (sz < 0)
+ return (-1);
+ if ((size_t)sz < space) {
+ buf->off += sz;
+ if (buf->cb != NULL)
+ (*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+ return (sz);
+ }
+ if (evbuffer_expand(buf, sz + 1) == -1)
+ return (-1);
+
+ }
+ /* NOTREACHED */
+}
+
+int
+evbuffer_add_printf(struct evbuffer *buf, const char *fmt, ...)
+{
+ int res = -1;
+ va_list ap;
+
+ va_start(ap, fmt);
+ res = evbuffer_add_vprintf(buf, fmt, ap);
+ va_end(ap);
+
+ return (res);
+}
+
+/* Reads data from an event buffer and drains the bytes read */
+
+int
+evbuffer_remove(struct evbuffer *buf, void *data, size_t datlen)
+{
+ size_t nread = datlen;
+ if (nread >= buf->off)
+ nread = buf->off;
+
+ memcpy(data, buf->buffer, nread);
+ evbuffer_drain(buf, nread);
+
+ return (nread);
+}
+
+/*
+ * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
+ * The returned buffer needs to be freed by the called.
+ */
+
+char *
+evbuffer_readline(struct evbuffer *buffer)
+{
+ u_char *data = EVBUFFER_DATA(buffer);
+ size_t len = EVBUFFER_LENGTH(buffer);
+ char *line;
+ unsigned int i;
+
+ for (i = 0; i < len; i++) {
+ if (data[i] == '\r' || data[i] == '\n')
+ break;
+ }
+
+ if (i == len)
+ return (NULL);
+
+ if ((line = malloc(i + 1)) == NULL) {
+ fprintf(stderr, "%s: out of memory\n", __func__);
+ evbuffer_drain(buffer, i);
+ return (NULL);
+ }
+
+ memcpy(line, data, i);
+ line[i] = '\0';
+
+ /*
+ * Some protocols terminate a line with '\r\n', so check for
+ * that, too.
+ */
+ if ( i < len - 1 ) {
+ char fch = data[i], sch = data[i+1];
+
+ /* Drain one more character if needed */
+ if ( (sch == '\r' || sch == '\n') && sch != fch )
+ i += 1;
+ }
+
+ evbuffer_drain(buffer, i + 1);
+
+ return (line);
+}
+
+/* Adds data to an event buffer */
+
+static void
+evbuffer_align(struct evbuffer *buf)
+{
+ memmove(buf->orig_buffer, buf->buffer, buf->off);
+ buf->buffer = buf->orig_buffer;
+ buf->misalign = 0;
+}
+
+/* Expands the available space in the event buffer to at least datlen */
+
+int
+evbuffer_expand(struct evbuffer *buf, size_t datlen)
+{
+ size_t need = buf->misalign + buf->off + datlen;
+
+ /* If we can fit all the data, then we don't have to do anything */
+ if (buf->totallen >= need)
+ return (0);
+
+ /*
+ * If the misalignment fulfills our data needs, we just force an
+ * alignment to happen. Afterwards, we have enough space.
+ */
+ if (buf->misalign >= datlen) {
+ evbuffer_align(buf);
+ } else {
+ void *newbuf;
+ size_t length = buf->totallen;
+
+ if (length < 256)
+ length = 256;
+ while (length < need)
+ length <<= 1;
+
+ if (buf->orig_buffer != buf->buffer)
+ evbuffer_align(buf);
+ if ((newbuf = realloc(buf->buffer, length)) == NULL)
+ return (-1);
+
+ buf->orig_buffer = buf->buffer = newbuf;
+ buf->totallen = length;
+ }
+
+ return (0);
+}
+
+int
+evbuffer_add(struct evbuffer *buf, const void *data, size_t datlen)
+{
+ size_t need = buf->misalign + buf->off + datlen;
+ size_t oldoff = buf->off;
+
+ if (buf->totallen < need) {
+ if (evbuffer_expand(buf, datlen) == -1)
+ return (-1);
+ }
+
+ memcpy(buf->buffer + buf->off, data, datlen);
+ buf->off += datlen;
+
+ if (datlen && buf->cb != NULL)
+ (*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+
+ return (0);
+}
+
+void
+evbuffer_drain(struct evbuffer *buf, size_t len)
+{
+ size_t oldoff = buf->off;
+
+ if (len >= buf->off) {
+ buf->off = 0;
+ buf->buffer = buf->orig_buffer;
+ buf->misalign = 0;
+ goto done;
+ }
+
+ buf->buffer += len;
+ buf->misalign += len;
+
+ buf->off -= len;
+
+ done:
+ /* Tell someone about changes in this buffer */
+ if (buf->off != oldoff && buf->cb != NULL)
+ (*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+
+}
+
+/*
+ * Reads data from a file descriptor into a buffer.
+ */
+
+#define EVBUFFER_MAX_READ 4096
+
+int
+evbuffer_read(struct evbuffer *buf, int fd, int howmuch)
+{
+ u_char *p;
+ size_t oldoff = buf->off;
+ int n = EVBUFFER_MAX_READ;
+
+#if defined(FIONREAD)
+#ifdef WIN32
+ long lng = n;
+ if (ioctlsocket(fd, FIONREAD, &lng) == -1 || (n=lng) == 0) {
+#else
+ if (ioctl(fd, FIONREAD, &n) == -1 || n == 0) {
+#endif
+ n = EVBUFFER_MAX_READ;
+ } else if (n > EVBUFFER_MAX_READ && n > howmuch) {
+ /*
+ * It's possible that a lot of data is available for
+ * reading. We do not want to exhaust resources
+ * before the reader has a chance to do something
+ * about it. If the reader does not tell us how much
+ * data we should read, we artifically limit it.
+ */
+ if ((size_t)n > buf->totallen << 2)
+ n = buf->totallen << 2;
+ if (n < EVBUFFER_MAX_READ)
+ n = EVBUFFER_MAX_READ;
+ }
+#endif
+ if (howmuch < 0 || howmuch > n)
+ howmuch = n;
+
+ /* If we don't have FIONREAD, we might waste some space here */
+ if (evbuffer_expand(buf, howmuch) == -1)
+ return (-1);
+
+ /* We can append new data at this point */
+ p = buf->buffer + buf->off;
+
+#ifndef WIN32
+ n = read(fd, p, howmuch);
+#else
+ n = recv(fd, p, howmuch, 0);
+#endif
+ if (n == -1)
+ return (-1);
+ if (n == 0)
+ return (0);
+
+ buf->off += n;
+
+ /* Tell someone about changes in this buffer */
+ if (buf->off != oldoff && buf->cb != NULL)
+ (*buf->cb)(buf, oldoff, buf->off, buf->cbarg);
+
+ return (n);
+}
+
+int
+evbuffer_write(struct evbuffer *buffer, int fd)
+{
+ int n;
+
+#ifndef WIN32
+ n = write(fd, buffer->buffer, buffer->off);
+#else
+ n = send(fd, buffer->buffer, buffer->off, 0);
+#endif
+ if (n == -1)
+ return (-1);
+ if (n == 0)
+ return (0);
+ evbuffer_drain(buffer, n);
+
+ return (n);
+}
+
+u_char *
+evbuffer_find(struct evbuffer *buffer, const u_char *what, size_t len)
+{
+ u_char *search = buffer->buffer, *end = search + buffer->off;
+ u_char *p;
+
+ while (search < end &&
+ (p = memchr(search, *what, end - search)) != NULL) {
+ if (p + len > end)
+ break;
+ if (memcmp(p, what, len) == 0)
+ return (p);
+ search = p + 1;
+ }
+
+ return (NULL);
+}
+
+void evbuffer_setcb(struct evbuffer *buffer,
+ void (*cb)(struct evbuffer *, size_t, size_t, void *),
+ void *cbarg)
+{
+ buffer->cb = cb;
+ buffer->cbarg = cbarg;
+}
diff --git a/libevent/compat/sys/_time.h b/libevent/compat/sys/_time.h
new file mode 100644
index 00000000000..8cabb0d55e7
--- /dev/null
+++ b/libevent/compat/sys/_time.h
@@ -0,0 +1,163 @@
+/* $OpenBSD: time.h,v 1.11 2000/10/10 13:36:48 itojun Exp $ */
+/* $NetBSD: time.h,v 1.18 1996/04/23 10:29:33 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1982, 1986, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)time.h 8.2 (Berkeley) 7/10/94
+ */
+
+#ifndef _SYS_TIME_H_
+#define _SYS_TIME_H_
+
+#include <sys/types.h>
+
+/*
+ * Structure returned by gettimeofday(2) system call,
+ * and used in other calls.
+ */
+struct timeval {
+ long tv_sec; /* seconds */
+ long tv_usec; /* and microseconds */
+};
+
+/*
+ * Structure defined by POSIX.1b to be like a timeval.
+ */
+struct timespec {
+ time_t tv_sec; /* seconds */
+ long tv_nsec; /* and nanoseconds */
+};
+
+#define TIMEVAL_TO_TIMESPEC(tv, ts) { \
+ (ts)->tv_sec = (tv)->tv_sec; \
+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
+}
+#define TIMESPEC_TO_TIMEVAL(tv, ts) { \
+ (tv)->tv_sec = (ts)->tv_sec; \
+ (tv)->tv_usec = (ts)->tv_nsec / 1000; \
+}
+
+struct timezone {
+ int tz_minuteswest; /* minutes west of Greenwich */
+ int tz_dsttime; /* type of dst correction */
+};
+#define DST_NONE 0 /* not on dst */
+#define DST_USA 1 /* USA style dst */
+#define DST_AUST 2 /* Australian style dst */
+#define DST_WET 3 /* Western European dst */
+#define DST_MET 4 /* Middle European dst */
+#define DST_EET 5 /* Eastern European dst */
+#define DST_CAN 6 /* Canada */
+
+/* Operations on timevals. */
+#define timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
+#define timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
+#define timercmp(tvp, uvp, cmp) \
+ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
+ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec))
+#define timeradd(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
+ if ((vvp)->tv_usec >= 1000000) { \
+ (vvp)->tv_sec++; \
+ (vvp)->tv_usec -= 1000000; \
+ } \
+ } while (0)
+#define timersub(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
+ if ((vvp)->tv_usec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_usec += 1000000; \
+ } \
+ } while (0)
+
+/* Operations on timespecs. */
+#define timespecclear(tsp) (tsp)->tv_sec = (tsp)->tv_nsec = 0
+#define timespecisset(tsp) ((tsp)->tv_sec || (tsp)->tv_nsec)
+#define timespeccmp(tsp, usp, cmp) \
+ (((tsp)->tv_sec == (usp)->tv_sec) ? \
+ ((tsp)->tv_nsec cmp (usp)->tv_nsec) : \
+ ((tsp)->tv_sec cmp (usp)->tv_sec))
+#define timespecadd(tsp, usp, vsp) \
+ do { \
+ (vsp)->tv_sec = (tsp)->tv_sec + (usp)->tv_sec; \
+ (vsp)->tv_nsec = (tsp)->tv_nsec + (usp)->tv_nsec; \
+ if ((vsp)->tv_nsec >= 1000000000L) { \
+ (vsp)->tv_sec++; \
+ (vsp)->tv_nsec -= 1000000000L; \
+ } \
+ } while (0)
+#define timespecsub(tsp, usp, vsp) \
+ do { \
+ (vsp)->tv_sec = (tsp)->tv_sec - (usp)->tv_sec; \
+ (vsp)->tv_nsec = (tsp)->tv_nsec - (usp)->tv_nsec; \
+ if ((vsp)->tv_nsec < 0) { \
+ (vsp)->tv_sec--; \
+ (vsp)->tv_nsec += 1000000000L; \
+ } \
+ } while (0)
+
+/*
+ * Names of the interval timers, and structure
+ * defining a timer setting.
+ */
+#define ITIMER_REAL 0
+#define ITIMER_VIRTUAL 1
+#define ITIMER_PROF 2
+
+struct itimerval {
+ struct timeval it_interval; /* timer interval */
+ struct timeval it_value; /* current value */
+};
+
+/*
+ * Getkerninfo clock information structure
+ */
+struct clockinfo {
+ int hz; /* clock frequency */
+ int tick; /* micro-seconds per hz tick */
+ int tickadj; /* clock skew rate for adjtime() */
+ int stathz; /* statistics clock frequency */
+ int profhz; /* profiling clock frequency */
+};
+
+#define CLOCK_REALTIME 0
+#define CLOCK_VIRTUAL 1
+#define CLOCK_PROF 2
+
+#define TIMER_RELTIME 0x0 /* relative timer */
+#define TIMER_ABSTIME 0x1 /* absolute timer */
+
+/* --- stuff got cut here - niels --- */
+
+#endif /* !_SYS_TIME_H_ */
diff --git a/libevent/compat/sys/queue.h b/libevent/compat/sys/queue.h
new file mode 100644
index 00000000000..c0956ddce43
--- /dev/null
+++ b/libevent/compat/sys/queue.h
@@ -0,0 +1,488 @@
+/* $OpenBSD: queue.h,v 1.16 2000/09/07 19:47:59 art Exp $ */
+/* $NetBSD: queue.h,v 1.11 1996/05/16 05:17:14 mycroft Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ *
+ * A singly-linked list is headed by a single forward pointer. The elements
+ * are singly linked for minimum space and pointer manipulation overhead at
+ * the expense of O(n) removal for arbitrary elements. New elements can be
+ * added to the list after an existing element or at the head of the list.
+ * Elements being removed from the head of the list should use the explicit
+ * macro for this purpose for optimum efficiency. A singly-linked list may
+ * only be traversed in the forward direction. Singly-linked lists are ideal
+ * for applications with large datasets and few or no removals or for
+ * implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#ifndef WIN32
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+#endif
+
+/*
+ * Singly-linked List access methods.
+ */
+#define SLIST_FIRST(head) ((head)->slh_first)
+#define SLIST_END(head) NULL
+#define SLIST_EMPTY(head) (SLIST_FIRST(head) == SLIST_END(head))
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_FOREACH(var, head, field) \
+ for((var) = SLIST_FIRST(head); \
+ (var) != SLIST_END(head); \
+ (var) = SLIST_NEXT(var, field))
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_INIT(head) { \
+ SLIST_FIRST(head) = SLIST_END(head); \
+}
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (0)
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List access methods
+ */
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_END(head) NULL
+#define LIST_EMPTY(head) (LIST_FIRST(head) == LIST_END(head))
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_FOREACH(var, head, field) \
+ for((var) = LIST_FIRST(head); \
+ (var)!= LIST_END(head); \
+ (var) = LIST_NEXT(var, field))
+
+/*
+ * List functions.
+ */
+#define LIST_INIT(head) do { \
+ LIST_FIRST(head) = LIST_END(head); \
+} while (0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.le_next = (head)->lh_first) != NULL) \
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (0)
+
+#define LIST_REMOVE(elm, field) do { \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+} while (0)
+
+#define LIST_REPLACE(elm, elm2, field) do { \
+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+ (elm2)->field.le_next->field.le_prev = \
+ &(elm2)->field.le_next; \
+ (elm2)->field.le_prev = (elm)->field.le_prev; \
+ *(elm2)->field.le_prev = (elm2); \
+} while (0)
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define SIMPLEQ_END(head) NULL
+#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for((var) = SIMPLEQ_FIRST(head); \
+ (var) != SIMPLEQ_END(head); \
+ (var) = SIMPLEQ_NEXT(var, field))
+
+/*
+ * Simple queue functions.
+ */
+#define SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, elm, field) do { \
+ if (((head)->sqh_first = (elm)->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (0)
+
+/*
+ * Tail queue definitions.
+ */
+#define TAILQ_HEAD(name, type) \
+struct name { \
+ struct type *tqh_first; /* first element */ \
+ struct type **tqh_last; /* addr of last next element */ \
+}
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+
+/*
+ * tail queue access methods
+ */
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_END(head) NULL
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+/* XXX */
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define TAILQ_EMPTY(head) \
+ (TAILQ_FIRST(head) == TAILQ_END(head))
+
+#define TAILQ_FOREACH(var, head, field) \
+ for((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+
+#define TAILQ_FOREACH_REVERSE(var, head, field, headname) \
+ for((var) = TAILQ_LAST(head, headname); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_PREV(var, headname, field))
+
+/*
+ * Tail queue functions.
+ */
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = NULL; \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.tqe_next = NULL; \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ if (((elm)->field.tqe_next) != NULL) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+} while (0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != NULL) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+} while (0)
+
+/*
+ * Circular queue definitions.
+ */
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue access methods
+ */
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+#define CIRCLEQ_END(head) ((void *)(head))
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define CIRCLEQ_EMPTY(head) \
+ (CIRCLEQ_FIRST(head) == CIRCLEQ_END(head))
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for((var) = CIRCLEQ_FIRST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_NEXT(var, field))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for((var) = CIRCLEQ_LAST(head); \
+ (var) != CIRCLEQ_END(head); \
+ (var) = CIRCLEQ_PREV(var, field))
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = CIRCLEQ_END(head); \
+ (head)->cqh_last = CIRCLEQ_END(head); \
+} while (0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
+ if ((head)->cqh_last == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ if ((elm)->field.cqe_next == CIRCLEQ_END(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == CIRCLEQ_END(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+} while (0)
+
+#define CIRCLEQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.cqe_next = (elm)->field.cqe_next) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_last = (elm2); \
+ else \
+ (elm2)->field.cqe_next->field.cqe_prev = (elm2); \
+ if (((elm2)->field.cqe_prev = (elm)->field.cqe_prev) == \
+ CIRCLEQ_END(head)) \
+ (head).cqh_first = (elm2); \
+ else \
+ (elm2)->field.cqe_prev->field.cqe_next = (elm2); \
+} while (0)
+
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/libevent/configure.in b/libevent/configure.in
new file mode 100644
index 00000000000..bc3eca1f043
--- /dev/null
+++ b/libevent/configure.in
@@ -0,0 +1,387 @@
+dnl configure.in for libevent
+dnl Dug Song <dugsong@monkey.org>
+AC_INIT(event.c)
+
+AM_INIT_AUTOMAKE(libevent,1.4.12-stable)
+AM_CONFIG_HEADER(config.h)
+dnl AM_MAINTAINER_MODE
+
+dnl Initialize prefix.
+if test "$prefix" = "NONE"; then
+ prefix="/usr/local"
+fi
+
+dnl Checks for programs.
+AC_PROG_CC
+AC_PROG_INSTALL
+AC_PROG_LN_S
+
+AC_PROG_GCC_TRADITIONAL
+if test "$GCC" = yes ; then
+ CFLAGS="$CFLAGS -Wall"
+ # And disable the strict-aliasing optimization, since it breaks
+ # our sockaddr-handling code in strange ways.
+ CFLAGS="$CFLAGS -fno-strict-aliasing"
+fi
+
+AC_ARG_ENABLE(gcc-warnings,
+ AS_HELP_STRING(--enable-gcc-warnings, enable verbose warnings with GCC))
+
+AC_PROG_LIBTOOL
+
+dnl Uncomment "AC_DISABLE_SHARED" to make shared librraries not get
+dnl built by default. You can also turn shared libs on and off from
+dnl the command line with --enable-shared and --disable-shared.
+dnl AC_DISABLE_SHARED
+AC_SUBST(LIBTOOL_DEPS)
+
+dnl Checks for libraries.
+AC_CHECK_LIB(socket, socket)
+AC_CHECK_LIB(resolv, inet_aton)
+AC_CHECK_LIB(rt, clock_gettime)
+AC_CHECK_LIB(nsl, inet_ntoa)
+
+dnl Checks for header files.
+AC_HEADER_STDC
+AC_CHECK_HEADERS(fcntl.h stdarg.h inttypes.h stdint.h poll.h signal.h unistd.h sys/epoll.h sys/time.h sys/queue.h sys/event.h sys/param.h sys/ioctl.h sys/select.h sys/devpoll.h port.h netinet/in6.h sys/socket.h)
+if test "x$ac_cv_header_sys_queue_h" = "xyes"; then
+ AC_MSG_CHECKING(for TAILQ_FOREACH in sys/queue.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/queue.h>
+#ifdef TAILQ_FOREACH
+ yes
+#endif
+], [AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_TAILQFOREACH, 1,
+ [Define if TAILQ_FOREACH is defined in <sys/queue.h>])],
+ AC_MSG_RESULT(no)
+ )
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timeradd in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timeradd
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERADD, 1,
+ [Define if timeradd is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timercmp in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timercmp
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERCMP, 1,
+ [Define if timercmp is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timerclear in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timerclear
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERCLEAR, 1,
+ [Define if timerclear is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+if test "x$ac_cv_header_sys_time_h" = "xyes"; then
+ AC_MSG_CHECKING(for timerisset in sys/time.h)
+ AC_EGREP_CPP(yes,
+[
+#include <sys/time.h>
+#ifdef timerisset
+ yes
+#endif
+], [ AC_DEFINE(HAVE_TIMERISSET, 1,
+ [Define if timerisset is defined in <sys/time.h>])
+ AC_MSG_RESULT(yes)] ,AC_MSG_RESULT(no)
+)
+fi
+
+dnl - check if the macro WIN32 is defined on this compiler.
+dnl - (this is how we check for a windows version of GCC)
+AC_MSG_CHECKING(for WIN32)
+AC_TRY_COMPILE(,
+ [
+#ifndef WIN32
+die horribly
+#endif
+ ],
+ bwin32=true; AC_MSG_RESULT(yes),
+ bwin32=false; AC_MSG_RESULT(no),
+)
+
+AM_CONDITIONAL(BUILD_WIN32, test x$bwin32 = xtrue)
+
+dnl Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_C_INLINE
+AC_HEADER_TIME
+
+dnl Checks for library functions.
+AC_CHECK_FUNCS(gettimeofday vasprintf fcntl clock_gettime strtok_r strsep getaddrinfo getnameinfo strlcpy inet_ntop signal sigaction strtoll)
+
+AC_CHECK_SIZEOF(long)
+
+if test "x$ac_cv_func_clock_gettime" = "xyes"; then
+ AC_DEFINE(DNS_USE_CPU_CLOCK_FOR_ID, 1, [Define if clock_gettime is available in libc])
+else
+ AC_DEFINE(DNS_USE_GETTIMEOFDAY_FOR_ID, 1, [Define is no secure id variant is available])
+fi
+
+AC_MSG_CHECKING(for F_SETFD in fcntl.h)
+AC_EGREP_CPP(yes,
+[
+#define _GNU_SOURCE
+#include <fcntl.h>
+#ifdef F_SETFD
+yes
+#endif
+], [ AC_DEFINE(HAVE_SETFD, 1,
+ [Define if F_SETFD is defined in <fcntl.h>])
+ AC_MSG_RESULT(yes) ], AC_MSG_RESULT(no))
+
+needsignal=no
+haveselect=no
+AC_CHECK_FUNCS(select, [haveselect=yes], )
+if test "x$haveselect" = "xyes" ; then
+ AC_LIBOBJ(select)
+ needsignal=yes
+fi
+
+havepoll=no
+AC_CHECK_FUNCS(poll, [havepoll=yes], )
+if test "x$havepoll" = "xyes" ; then
+ AC_LIBOBJ(poll)
+ needsignal=yes
+fi
+
+haveepoll=no
+AC_CHECK_FUNCS(epoll_ctl, [haveepoll=yes], )
+if test "x$haveepoll" = "xyes" ; then
+ AC_DEFINE(HAVE_EPOLL, 1,
+ [Define if your system supports the epoll system calls])
+ AC_LIBOBJ(epoll)
+ needsignal=yes
+fi
+
+havedevpoll=no
+if test "x$ac_cv_header_sys_devpoll_h" = "xyes"; then
+ AC_DEFINE(HAVE_DEVPOLL, 1,
+ [Define if /dev/poll is available])
+ AC_LIBOBJ(devpoll)
+fi
+
+havekqueue=no
+if test "x$ac_cv_header_sys_event_h" = "xyes"; then
+ AC_CHECK_FUNCS(kqueue, [havekqueue=yes], )
+ if test "x$havekqueue" = "xyes" ; then
+ AC_MSG_CHECKING(for working kqueue)
+ AC_TRY_RUN(
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/event.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+int
+main(int argc, char **argv)
+{
+ int kq;
+ int n;
+ int fd[[2]];
+ struct kevent ev;
+ struct timespec ts;
+ char buf[[8000]];
+
+ if (pipe(fd) == -1)
+ exit(1);
+ if (fcntl(fd[[1]], F_SETFL, O_NONBLOCK) == -1)
+ exit(1);
+
+ while ((n = write(fd[[1]], buf, sizeof(buf))) == sizeof(buf))
+ ;
+
+ if ((kq = kqueue()) == -1)
+ exit(1);
+
+ ev.ident = fd[[1]];
+ ev.filter = EVFILT_WRITE;
+ ev.flags = EV_ADD | EV_ENABLE;
+ n = kevent(kq, &ev, 1, NULL, 0, NULL);
+ if (n == -1)
+ exit(1);
+
+ read(fd[[0]], buf, sizeof(buf));
+
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ n = kevent(kq, NULL, 0, &ev, 1, &ts);
+ if (n == -1 || n == 0)
+ exit(1);
+
+ exit(0);
+}, [AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_WORKING_KQUEUE, 1,
+ [Define if kqueue works correctly with pipes])
+ AC_LIBOBJ(kqueue)], AC_MSG_RESULT(no), AC_MSG_RESULT(no))
+ fi
+fi
+
+haveepollsyscall=no
+if test "x$ac_cv_header_sys_epoll_h" = "xyes"; then
+ if test "x$haveepoll" = "xno" ; then
+ AC_MSG_CHECKING(for epoll system call)
+ AC_TRY_RUN(
+#include <stdint.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+int
+epoll_create(int size)
+{
+ return (syscall(__NR_epoll_create, size));
+}
+
+int
+main(int argc, char **argv)
+{
+ int epfd;
+
+ epfd = epoll_create(256);
+ exit (epfd == -1 ? 1 : 0);
+}, [AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_EPOLL, 1,
+ [Define if your system supports the epoll system calls])
+ needsignal=yes
+ AC_LIBOBJ(epoll_sub)
+ AC_LIBOBJ(epoll)], AC_MSG_RESULT(no), AC_MSG_RESULT(no))
+ fi
+fi
+
+haveeventports=no
+AC_CHECK_FUNCS(port_create, [haveeventports=yes], )
+if test "x$haveeventports" = "xyes" ; then
+ AC_DEFINE(HAVE_EVENT_PORTS, 1,
+ [Define if your system supports event ports])
+ AC_LIBOBJ(evport)
+ needsignal=yes
+fi
+if test "x$bwin32" = "xtrue"; then
+ needsignal=yes
+fi
+if test "x$bwin32" = "xtrue"; then
+ needsignal=yes
+fi
+if test "x$needsignal" = "xyes" ; then
+ AC_LIBOBJ(signal)
+fi
+
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+AC_CHECK_TYPES([uint64_t, uint32_t, uint16_t, uint8_t], , ,
+[#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif])
+AC_CHECK_SIZEOF(long long)
+AC_CHECK_SIZEOF(int)
+AC_CHECK_SIZEOF(short)
+AC_CHECK_TYPES([struct in6_addr], , ,
+[#ifdef WIN32
+#include <winsock2.h>
+#else
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif])
+
+AC_MSG_CHECKING([for socklen_t])
+AC_TRY_COMPILE([
+ #include <sys/types.h>
+ #include <sys/socket.h>],
+ [socklen_t x;],
+ AC_MSG_RESULT([yes]),
+ [AC_MSG_RESULT([no])
+ AC_DEFINE(socklen_t, unsigned int,
+ [Define to unsigned int if you dont have it])]
+)
+
+AC_MSG_CHECKING([whether our compiler supports __func__])
+AC_TRY_COMPILE([],
+ [ const char *cp = __func__; ],
+ AC_MSG_RESULT([yes]),
+ AC_MSG_RESULT([no])
+ AC_MSG_CHECKING([whether our compiler supports __FUNCTION__])
+ AC_TRY_COMPILE([],
+ [ const char *cp = __FUNCTION__; ],
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(__func__, __FUNCTION__,
+ [Define to appropriate substitue if compiler doesnt have __func__]),
+ AC_MSG_RESULT([no])
+ AC_DEFINE(__func__, __FILE__,
+ [Define to appropriate substitue if compiler doesnt have __func__])))
+
+
+# Add some more warnings which we use in development but not in the
+# released versions. (Some relevant gcc versions can't handle these.)
+if test x$enable_gcc_warnings = xyes; then
+
+ AC_COMPILE_IFELSE(AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 4)
+#error
+#endif]), have_gcc4=yes, have_gcc4=no)
+
+ AC_COMPILE_IFELSE(AC_LANG_PROGRAM([], [
+#if !defined(__GNUC__) || (__GNUC__ < 4) || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
+#error
+#endif]), have_gcc42=yes, have_gcc42=no)
+
+ CFLAGS="$CFLAGS -W -Wfloat-equal -Wundef -Wpointer-arith -Wstrict-prototypes -Wmissing-prototypes -Wwrite-strings -Wredundant-decls -Wchar-subscripts -Wcomment -Wformat=2 -Wwrite-strings -Wmissing-declarations -Wredundant-decls -Wnested-externs -Wbad-function-cast -Wswitch-enum -Werror"
+ CFLAGS="$CFLAGS -Wno-unused-parameter -Wno-sign-compare -Wstrict-aliasing"
+
+ if test x$have_gcc4 = xyes ; then
+ # These warnings break gcc 3.3.5 and work on gcc 4.0.2
+ CFLAGS="$CFLAGS -Winit-self -Wmissing-field-initializers -Wdeclaration-after-statement"
+ #CFLAGS="$CFLAGS -Wold-style-definition"
+ fi
+
+ if test x$have_gcc42 = xyes ; then
+ # These warnings break gcc 4.0.2 and work on gcc 4.2
+ CFLAGS="$CFLAGS -Waddress -Wnormalized=id -Woverride-init"
+ fi
+
+##This will break the world on some 64-bit architectures
+# CFLAGS="$CFLAGS -Winline"
+
+fi
+
+AC_OUTPUT(Makefile test/Makefile sample/Makefile)
diff --git a/libevent/devpoll.c b/libevent/devpoll.c
new file mode 100644
index 00000000000..cbd27309079
--- /dev/null
+++ b/libevent/devpoll.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/resource.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/devpoll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+/* due to limitations in the devpoll interface, we need to keep track of
+ * all file descriptors outself.
+ */
+struct evdevpoll {
+ struct event *evread;
+ struct event *evwrite;
+};
+
+struct devpollop {
+ struct evdevpoll *fds;
+ int nfds;
+ struct pollfd *events;
+ int nevents;
+ int dpfd;
+ struct pollfd *changes;
+ int nchanges;
+};
+
+static void *devpoll_init (struct event_base *);
+static int devpoll_add (void *, struct event *);
+static int devpoll_del (void *, struct event *);
+static int devpoll_dispatch (struct event_base *, void *, struct timeval *);
+static void devpoll_dealloc (struct event_base *, void *);
+
+const struct eventop devpollops = {
+ "devpoll",
+ devpoll_init,
+ devpoll_add,
+ devpoll_del,
+ devpoll_dispatch,
+ devpoll_dealloc,
+ 1 /* need reinit */
+};
+
+#define NEVENT 32000
+
+static int
+devpoll_commit(struct devpollop *devpollop)
+{
+ /*
+ * Due to a bug in Solaris, we have to use pwrite with an offset of 0.
+ * Write is limited to 2GB of data, until it will fail.
+ */
+ if (pwrite(devpollop->dpfd, devpollop->changes,
+ sizeof(struct pollfd) * devpollop->nchanges, 0) == -1)
+ return(-1);
+
+ devpollop->nchanges = 0;
+ return(0);
+}
+
+static int
+devpoll_queue(struct devpollop *devpollop, int fd, int events) {
+ struct pollfd *pfd;
+
+ if (devpollop->nchanges >= devpollop->nevents) {
+ /*
+ * Change buffer is full, must commit it to /dev/poll before
+ * adding more
+ */
+ if (devpoll_commit(devpollop) != 0)
+ return(-1);
+ }
+
+ pfd = &devpollop->changes[devpollop->nchanges++];
+ pfd->fd = fd;
+ pfd->events = events;
+ pfd->revents = 0;
+
+ return(0);
+}
+
+static void *
+devpoll_init(struct event_base *base)
+{
+ int dpfd, nfiles = NEVENT;
+ struct rlimit rl;
+ struct devpollop *devpollop;
+
+ /* Disable devpoll when this environment variable is set */
+ if (getenv("EVENT_NODEVPOLL"))
+ return (NULL);
+
+ if (!(devpollop = calloc(1, sizeof(struct devpollop))))
+ return (NULL);
+
+ if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
+ rl.rlim_cur != RLIM_INFINITY)
+ nfiles = rl.rlim_cur;
+
+ /* Initialize the kernel queue */
+ if ((dpfd = open("/dev/poll", O_RDWR)) == -1) {
+ event_warn("open: /dev/poll");
+ free(devpollop);
+ return (NULL);
+ }
+
+ devpollop->dpfd = dpfd;
+
+ /* Initialize fields */
+ devpollop->events = calloc(nfiles, sizeof(struct pollfd));
+ if (devpollop->events == NULL) {
+ free(devpollop);
+ close(dpfd);
+ return (NULL);
+ }
+ devpollop->nevents = nfiles;
+
+ devpollop->fds = calloc(nfiles, sizeof(struct evdevpoll));
+ if (devpollop->fds == NULL) {
+ free(devpollop->events);
+ free(devpollop);
+ close(dpfd);
+ return (NULL);
+ }
+ devpollop->nfds = nfiles;
+
+ devpollop->changes = calloc(nfiles, sizeof(struct pollfd));
+ if (devpollop->changes == NULL) {
+ free(devpollop->fds);
+ free(devpollop->events);
+ free(devpollop);
+ close(dpfd);
+ return (NULL);
+ }
+
+ evsignal_init(base);
+
+ return (devpollop);
+}
+
+static int
+devpoll_recalc(struct event_base *base, void *arg, int max)
+{
+ struct devpollop *devpollop = arg;
+
+ if (max >= devpollop->nfds) {
+ struct evdevpoll *fds;
+ int nfds;
+
+ nfds = devpollop->nfds;
+ while (nfds <= max)
+ nfds <<= 1;
+
+ fds = realloc(devpollop->fds, nfds * sizeof(struct evdevpoll));
+ if (fds == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+ devpollop->fds = fds;
+ memset(fds + devpollop->nfds, 0,
+ (nfds - devpollop->nfds) * sizeof(struct evdevpoll));
+ devpollop->nfds = nfds;
+ }
+
+ return (0);
+}
+
+static int
+devpoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+ struct devpollop *devpollop = arg;
+ struct pollfd *events = devpollop->events;
+ struct dvpoll dvp;
+ struct evdevpoll *evdp;
+ int i, res, timeout = -1;
+
+ if (devpollop->nchanges)
+ devpoll_commit(devpollop);
+
+ if (tv != NULL)
+ timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
+
+ dvp.dp_fds = devpollop->events;
+ dvp.dp_nfds = devpollop->nevents;
+ dvp.dp_timeout = timeout;
+
+ res = ioctl(devpollop->dpfd, DP_POLL, &dvp);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("ioctl: DP_POLL");
+ return (-1);
+ }
+
+ evsignal_process(base);
+ return (0);
+ } else if (base->sig.evsignal_caught) {
+ evsignal_process(base);
+ }
+
+ event_debug(("%s: devpoll_wait reports %d", __func__, res));
+
+ for (i = 0; i < res; i++) {
+ int which = 0;
+ int what = events[i].revents;
+ struct event *evread = NULL, *evwrite = NULL;
+
+ assert(events[i].fd < devpollop->nfds);
+ evdp = &devpollop->fds[events[i].fd];
+
+ if (what & POLLHUP)
+ what |= POLLIN | POLLOUT;
+ else if (what & POLLERR)
+ what |= POLLIN | POLLOUT;
+
+ if (what & POLLIN) {
+ evread = evdp->evread;
+ which |= EV_READ;
+ }
+
+ if (what & POLLOUT) {
+ evwrite = evdp->evwrite;
+ which |= EV_WRITE;
+ }
+
+ if (!which)
+ continue;
+
+ if (evread != NULL && !(evread->ev_events & EV_PERSIST))
+ event_del(evread);
+ if (evwrite != NULL && evwrite != evread &&
+ !(evwrite->ev_events & EV_PERSIST))
+ event_del(evwrite);
+
+ if (evread != NULL)
+ event_active(evread, EV_READ, 1);
+ if (evwrite != NULL)
+ event_active(evwrite, EV_WRITE, 1);
+ }
+
+ return (0);
+}
+
+
+static int
+devpoll_add(void *arg, struct event *ev)
+{
+ struct devpollop *devpollop = arg;
+ struct evdevpoll *evdp;
+ int fd, events;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_add(ev));
+
+ fd = ev->ev_fd;
+ if (fd >= devpollop->nfds) {
+ /* Extend the file descriptor array as necessary */
+ if (devpoll_recalc(ev->ev_base, devpollop, fd) == -1)
+ return (-1);
+ }
+ evdp = &devpollop->fds[fd];
+
+ /*
+ * It's not necessary to OR the existing read/write events that we
+ * are currently interested in with the new event we are adding.
+ * The /dev/poll driver ORs any new events with the existing events
+ * that it has cached for the fd.
+ */
+
+ events = 0;
+ if (ev->ev_events & EV_READ) {
+ if (evdp->evread && evdp->evread != ev) {
+ /* There is already a different read event registered */
+ return(-1);
+ }
+ events |= POLLIN;
+ }
+
+ if (ev->ev_events & EV_WRITE) {
+ if (evdp->evwrite && evdp->evwrite != ev) {
+ /* There is already a different write event registered */
+ return(-1);
+ }
+ events |= POLLOUT;
+ }
+
+ if (devpoll_queue(devpollop, fd, events) != 0)
+ return(-1);
+
+ /* Update events responsible */
+ if (ev->ev_events & EV_READ)
+ evdp->evread = ev;
+ if (ev->ev_events & EV_WRITE)
+ evdp->evwrite = ev;
+
+ return (0);
+}
+
+static int
+devpoll_del(void *arg, struct event *ev)
+{
+ struct devpollop *devpollop = arg;
+ struct evdevpoll *evdp;
+ int fd, events;
+ int needwritedelete = 1, needreaddelete = 1;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_del(ev));
+
+ fd = ev->ev_fd;
+ if (fd >= devpollop->nfds)
+ return (0);
+ evdp = &devpollop->fds[fd];
+
+ events = 0;
+ if (ev->ev_events & EV_READ)
+ events |= POLLIN;
+ if (ev->ev_events & EV_WRITE)
+ events |= POLLOUT;
+
+ /*
+ * The only way to remove an fd from the /dev/poll monitored set is
+ * to use POLLREMOVE by itself. This removes ALL events for the fd
+ * provided so if we care about two events and are only removing one
+ * we must re-add the other event after POLLREMOVE.
+ */
+
+ if (devpoll_queue(devpollop, fd, POLLREMOVE) != 0)
+ return(-1);
+
+ if ((events & (POLLIN|POLLOUT)) != (POLLIN|POLLOUT)) {
+ /*
+ * We're not deleting all events, so we must resubmit the
+ * event that we are still interested in if one exists.
+ */
+
+ if ((events & POLLIN) && evdp->evwrite != NULL) {
+ /* Deleting read, still care about write */
+ devpoll_queue(devpollop, fd, POLLOUT);
+ needwritedelete = 0;
+ } else if ((events & POLLOUT) && evdp->evread != NULL) {
+ /* Deleting write, still care about read */
+ devpoll_queue(devpollop, fd, POLLIN);
+ needreaddelete = 0;
+ }
+ }
+
+ if (needreaddelete)
+ evdp->evread = NULL;
+ if (needwritedelete)
+ evdp->evwrite = NULL;
+
+ return (0);
+}
+
+static void
+devpoll_dealloc(struct event_base *base, void *arg)
+{
+ struct devpollop *devpollop = arg;
+
+ evsignal_dealloc(base);
+ if (devpollop->fds)
+ free(devpollop->fds);
+ if (devpollop->events)
+ free(devpollop->events);
+ if (devpollop->changes)
+ free(devpollop->changes);
+ if (devpollop->dpfd >= 0)
+ close(devpollop->dpfd);
+
+ memset(devpollop, 0, sizeof(struct devpollop));
+ free(devpollop);
+}
diff --git a/libevent/epoll.c b/libevent/epoll.c
new file mode 100644
index 00000000000..b479b9c07e9
--- /dev/null
+++ b/libevent/epoll.c
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/epoll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+/* due to limitations in the epoll interface, we need to keep track of
+ * all file descriptors outself.
+ */
+struct evepoll {
+ struct event *evread;
+ struct event *evwrite;
+};
+
+struct epollop {
+ struct evepoll *fds;
+ int nfds;
+ struct epoll_event *events;
+ int nevents;
+ int epfd;
+};
+
+static void *epoll_init (struct event_base *);
+static int epoll_add (void *, struct event *);
+static int epoll_del (void *, struct event *);
+static int epoll_dispatch (struct event_base *, void *, struct timeval *);
+static void epoll_dealloc (struct event_base *, void *);
+
+const struct eventop epollops = {
+ "epoll",
+ epoll_init,
+ epoll_add,
+ epoll_del,
+ epoll_dispatch,
+ epoll_dealloc,
+ 1 /* need reinit */
+};
+
+#ifdef HAVE_SETFD
+#define FD_CLOSEONEXEC(x) do { \
+ if (fcntl(x, F_SETFD, 1) == -1) \
+ event_warn("fcntl(%d, F_SETFD)", x); \
+} while (0)
+#else
+#define FD_CLOSEONEXEC(x)
+#endif
+
+#define NEVENT 32000
+
+/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout
+ * values bigger than (LONG_MAX - 999ULL)/HZ. HZ in the wild can be
+ * as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the
+ * largest number of msec we can support here is 2147482. Let's
+ * round that down by 47 seconds.
+ */
+#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000)
+
+static void *
+epoll_init(struct event_base *base)
+{
+ int epfd, nfiles = NEVENT;
+ struct rlimit rl;
+ struct epollop *epollop;
+
+ /* Disable epollueue when this environment variable is set */
+ if (getenv("EVENT_NOEPOLL"))
+ return (NULL);
+
+ if (getrlimit(RLIMIT_NOFILE, &rl) == 0 &&
+ rl.rlim_cur != RLIM_INFINITY) {
+ /*
+ * Solaris is somewhat retarded - it's important to drop
+ * backwards compatibility when making changes. So, don't
+ * dare to put rl.rlim_cur here.
+ */
+ nfiles = rl.rlim_cur - 1;
+ }
+
+ /* Initalize the kernel queue */
+
+ if ((epfd = epoll_create(nfiles)) == -1) {
+ if (errno != ENOSYS)
+ event_warn("epoll_create");
+ return (NULL);
+ }
+
+ FD_CLOSEONEXEC(epfd);
+
+ if (!(epollop = calloc(1, sizeof(struct epollop))))
+ return (NULL);
+
+ epollop->epfd = epfd;
+
+ /* Initalize fields */
+ epollop->events = malloc(nfiles * sizeof(struct epoll_event));
+ if (epollop->events == NULL) {
+ free(epollop);
+ return (NULL);
+ }
+ epollop->nevents = nfiles;
+
+ epollop->fds = calloc(nfiles, sizeof(struct evepoll));
+ if (epollop->fds == NULL) {
+ free(epollop->events);
+ free(epollop);
+ return (NULL);
+ }
+ epollop->nfds = nfiles;
+
+ evsignal_init(base);
+
+ return (epollop);
+}
+
+static int
+epoll_recalc(struct event_base *base, void *arg, int max)
+{
+ struct epollop *epollop = arg;
+
+ if (max >= epollop->nfds) {
+ struct evepoll *fds;
+ int nfds;
+
+ nfds = epollop->nfds;
+ while (nfds <= max)
+ nfds <<= 1;
+
+ fds = realloc(epollop->fds, nfds * sizeof(struct evepoll));
+ if (fds == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+ epollop->fds = fds;
+ memset(fds + epollop->nfds, 0,
+ (nfds - epollop->nfds) * sizeof(struct evepoll));
+ epollop->nfds = nfds;
+ }
+
+ return (0);
+}
+
+static int
+epoll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+ struct epollop *epollop = arg;
+ struct epoll_event *events = epollop->events;
+ struct evepoll *evep;
+ int i, res, timeout = -1;
+
+ if (tv != NULL)
+ timeout = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
+
+ if (timeout > MAX_EPOLL_TIMEOUT_MSEC) {
+ /* Linux kernels can wait forever if the timeout is too big;
+ * see comment on MAX_EPOLL_TIMEOUT_MSEC. */
+ timeout = MAX_EPOLL_TIMEOUT_MSEC;
+ }
+
+ res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("epoll_wait");
+ return (-1);
+ }
+
+ evsignal_process(base);
+ return (0);
+ } else if (base->sig.evsignal_caught) {
+ evsignal_process(base);
+ }
+
+ event_debug(("%s: epoll_wait reports %d", __func__, res));
+
+ for (i = 0; i < res; i++) {
+ int what = events[i].events;
+ struct event *evread = NULL, *evwrite = NULL;
+ int fd = events[i].data.fd;
+
+ if (fd < 0 || fd >= epollop->nfds)
+ continue;
+ evep = &epollop->fds[fd];
+
+ if (what & (EPOLLHUP|EPOLLERR)) {
+ evread = evep->evread;
+ evwrite = evep->evwrite;
+ } else {
+ if (what & EPOLLIN) {
+ evread = evep->evread;
+ }
+
+ if (what & EPOLLOUT) {
+ evwrite = evep->evwrite;
+ }
+ }
+
+ if (!(evread||evwrite))
+ continue;
+
+ if (evread != NULL)
+ event_active(evread, EV_READ, 1);
+ if (evwrite != NULL)
+ event_active(evwrite, EV_WRITE, 1);
+ }
+
+ return (0);
+}
+
+
+static int
+epoll_add(void *arg, struct event *ev)
+{
+ struct epollop *epollop = arg;
+ struct epoll_event epev = {0, {0}};
+ struct evepoll *evep;
+ int fd, op, events;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_add(ev));
+
+ fd = ev->ev_fd;
+ if (fd >= epollop->nfds) {
+ /* Extent the file descriptor array as necessary */
+ if (epoll_recalc(ev->ev_base, epollop, fd) == -1)
+ return (-1);
+ }
+ evep = &epollop->fds[fd];
+ op = EPOLL_CTL_ADD;
+ events = 0;
+ if (evep->evread != NULL) {
+ events |= EPOLLIN;
+ op = EPOLL_CTL_MOD;
+ }
+ if (evep->evwrite != NULL) {
+ events |= EPOLLOUT;
+ op = EPOLL_CTL_MOD;
+ }
+
+ if (ev->ev_events & EV_READ)
+ events |= EPOLLIN;
+ if (ev->ev_events & EV_WRITE)
+ events |= EPOLLOUT;
+
+ epev.data.fd = fd;
+ epev.events = events;
+ if (epoll_ctl(epollop->epfd, op, ev->ev_fd, &epev) == -1)
+ return (-1);
+
+ /* Update events responsible */
+ if (ev->ev_events & EV_READ)
+ evep->evread = ev;
+ if (ev->ev_events & EV_WRITE)
+ evep->evwrite = ev;
+
+ return (0);
+}
+
+static int
+epoll_del(void *arg, struct event *ev)
+{
+ struct epollop *epollop = arg;
+ struct epoll_event epev = {0, {0}};
+ struct evepoll *evep;
+ int fd, events, op;
+ int needwritedelete = 1, needreaddelete = 1;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_del(ev));
+
+ fd = ev->ev_fd;
+ if (fd >= epollop->nfds)
+ return (0);
+ evep = &epollop->fds[fd];
+
+ op = EPOLL_CTL_DEL;
+ events = 0;
+
+ if (ev->ev_events & EV_READ)
+ events |= EPOLLIN;
+ if (ev->ev_events & EV_WRITE)
+ events |= EPOLLOUT;
+
+ if ((events & (EPOLLIN|EPOLLOUT)) != (EPOLLIN|EPOLLOUT)) {
+ if ((events & EPOLLIN) && evep->evwrite != NULL) {
+ needwritedelete = 0;
+ events = EPOLLOUT;
+ op = EPOLL_CTL_MOD;
+ } else if ((events & EPOLLOUT) && evep->evread != NULL) {
+ needreaddelete = 0;
+ events = EPOLLIN;
+ op = EPOLL_CTL_MOD;
+ }
+ }
+
+ epev.events = events;
+ epev.data.fd = fd;
+
+ if (needreaddelete)
+ evep->evread = NULL;
+ if (needwritedelete)
+ evep->evwrite = NULL;
+
+ if (epoll_ctl(epollop->epfd, op, fd, &epev) == -1)
+ return (-1);
+
+ return (0);
+}
+
+static void
+epoll_dealloc(struct event_base *base, void *arg)
+{
+ struct epollop *epollop = arg;
+
+ evsignal_dealloc(base);
+ if (epollop->fds)
+ free(epollop->fds);
+ if (epollop->events)
+ free(epollop->events);
+ if (epollop->epfd >= 0)
+ close(epollop->epfd);
+
+ memset(epollop, 0, sizeof(struct epollop));
+ free(epollop);
+}
diff --git a/libevent/epoll_sub.c b/libevent/epoll_sub.c
new file mode 100644
index 00000000000..431970c73a6
--- /dev/null
+++ b/libevent/epoll_sub.c
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdint.h>
+
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+int
+epoll_create(int size)
+{
+ return (syscall(__NR_epoll_create, size));
+}
+
+int
+epoll_ctl(int epfd, int op, int fd, struct epoll_event *event)
+{
+
+ return (syscall(__NR_epoll_ctl, epfd, op, fd, event));
+}
+
+int
+epoll_wait(int epfd, struct epoll_event *events, int maxevents, int timeout)
+{
+ return (syscall(__NR_epoll_wait, epfd, events, maxevents, timeout));
+}
diff --git a/libevent/evbuffer.c b/libevent/evbuffer.c
new file mode 100644
index 00000000000..f2179a5044f
--- /dev/null
+++ b/libevent/evbuffer.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (c) 2002-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_STDARG_H
+#include <stdarg.h>
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+
+#include "evutil.h"
+#include "event.h"
+
+/* prototypes */
+
+void bufferevent_read_pressure_cb(struct evbuffer *, size_t, size_t, void *);
+
+static int
+bufferevent_add(struct event *ev, int timeout)
+{
+ struct timeval tv, *ptv = NULL;
+
+ if (timeout) {
+ evutil_timerclear(&tv);
+ tv.tv_sec = timeout;
+ ptv = &tv;
+ }
+
+ return (event_add(ev, ptv));
+}
+
+/*
+ * This callback is executed when the size of the input buffer changes.
+ * We use it to apply back pressure on the reading side.
+ */
+
+void
+bufferevent_read_pressure_cb(struct evbuffer *buf, size_t old, size_t now,
+ void *arg) {
+ struct bufferevent *bufev = arg;
+ /*
+ * If we are below the watermark then reschedule reading if it's
+ * still enabled.
+ */
+ if (bufev->wm_read.high == 0 || now < bufev->wm_read.high) {
+ evbuffer_setcb(buf, NULL, NULL);
+
+ if (bufev->enabled & EV_READ)
+ bufferevent_add(&bufev->ev_read, bufev->timeout_read);
+ }
+}
+
+static void
+bufferevent_readcb(int fd, short event, void *arg)
+{
+ struct bufferevent *bufev = arg;
+ int res = 0;
+ short what = EVBUFFER_READ;
+ size_t len;
+ int howmuch = -1;
+
+ if (event == EV_TIMEOUT) {
+ what |= EVBUFFER_TIMEOUT;
+ goto error;
+ }
+
+ /*
+ * If we have a high watermark configured then we don't want to
+ * read more data than would make us reach the watermark.
+ */
+ if (bufev->wm_read.high != 0) {
+ howmuch = bufev->wm_read.high - EVBUFFER_LENGTH(bufev->input);
+ /* we might have lowered the watermark, stop reading */
+ if (howmuch <= 0) {
+ struct evbuffer *buf = bufev->input;
+ event_del(&bufev->ev_read);
+ evbuffer_setcb(buf,
+ bufferevent_read_pressure_cb, bufev);
+ return;
+ }
+ }
+
+ res = evbuffer_read(bufev->input, fd, howmuch);
+ if (res == -1) {
+ if (errno == EAGAIN || errno == EINTR)
+ goto reschedule;
+ /* error case */
+ what |= EVBUFFER_ERROR;
+ } else if (res == 0) {
+ /* eof case */
+ what |= EVBUFFER_EOF;
+ }
+
+ if (res <= 0)
+ goto error;
+
+ bufferevent_add(&bufev->ev_read, bufev->timeout_read);
+
+ /* See if this callbacks meets the water marks */
+ len = EVBUFFER_LENGTH(bufev->input);
+ if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
+ return;
+ if (bufev->wm_read.high != 0 && len >= bufev->wm_read.high) {
+ struct evbuffer *buf = bufev->input;
+ event_del(&bufev->ev_read);
+
+ /* Now schedule a callback for us when the buffer changes */
+ evbuffer_setcb(buf, bufferevent_read_pressure_cb, bufev);
+ }
+
+ /* Invoke the user callback - must always be called last */
+ if (bufev->readcb != NULL)
+ (*bufev->readcb)(bufev, bufev->cbarg);
+ return;
+
+ reschedule:
+ bufferevent_add(&bufev->ev_read, bufev->timeout_read);
+ return;
+
+ error:
+ (*bufev->errorcb)(bufev, what, bufev->cbarg);
+}
+
+static void
+bufferevent_writecb(int fd, short event, void *arg)
+{
+ struct bufferevent *bufev = arg;
+ int res = 0;
+ short what = EVBUFFER_WRITE;
+
+ if (event == EV_TIMEOUT) {
+ what |= EVBUFFER_TIMEOUT;
+ goto error;
+ }
+
+ if (EVBUFFER_LENGTH(bufev->output)) {
+ res = evbuffer_write(bufev->output, fd);
+ if (res == -1) {
+#ifndef WIN32
+/*todo. evbuffer uses WriteFile when WIN32 is set. WIN32 system calls do not
+ *set errno. thus this error checking is not portable*/
+ if (errno == EAGAIN ||
+ errno == EINTR ||
+ errno == EINPROGRESS)
+ goto reschedule;
+ /* error case */
+ what |= EVBUFFER_ERROR;
+
+#else
+ goto reschedule;
+#endif
+
+ } else if (res == 0) {
+ /* eof case */
+ what |= EVBUFFER_EOF;
+ }
+ if (res <= 0)
+ goto error;
+ }
+
+ if (EVBUFFER_LENGTH(bufev->output) != 0)
+ bufferevent_add(&bufev->ev_write, bufev->timeout_write);
+
+ /*
+ * Invoke the user callback if our buffer is drained or below the
+ * low watermark.
+ */
+ if (bufev->writecb != NULL &&
+ EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
+ (*bufev->writecb)(bufev, bufev->cbarg);
+
+ return;
+
+ reschedule:
+ if (EVBUFFER_LENGTH(bufev->output) != 0)
+ bufferevent_add(&bufev->ev_write, bufev->timeout_write);
+ return;
+
+ error:
+ (*bufev->errorcb)(bufev, what, bufev->cbarg);
+}
+
+/*
+ * Create a new buffered event object.
+ *
+ * The read callback is invoked whenever we read new data.
+ * The write callback is invoked whenever the output buffer is drained.
+ * The error callback is invoked on a write/read error or on EOF.
+ *
+ * Both read and write callbacks maybe NULL. The error callback is not
+ * allowed to be NULL and have to be provided always.
+ */
+
+struct bufferevent *
+bufferevent_new(int fd, evbuffercb readcb, evbuffercb writecb,
+ everrorcb errorcb, void *cbarg)
+{
+ struct bufferevent *bufev;
+
+ if ((bufev = calloc(1, sizeof(struct bufferevent))) == NULL)
+ return (NULL);
+
+ if ((bufev->input = evbuffer_new()) == NULL) {
+ free(bufev);
+ return (NULL);
+ }
+
+ if ((bufev->output = evbuffer_new()) == NULL) {
+ evbuffer_free(bufev->input);
+ free(bufev);
+ return (NULL);
+ }
+
+ event_set(&bufev->ev_read, fd, EV_READ, bufferevent_readcb, bufev);
+ event_set(&bufev->ev_write, fd, EV_WRITE, bufferevent_writecb, bufev);
+
+ bufferevent_setcb(bufev, readcb, writecb, errorcb, cbarg);
+
+ /*
+ * Set to EV_WRITE so that using bufferevent_write is going to
+ * trigger a callback. Reading needs to be explicitly enabled
+ * because otherwise no data will be available.
+ */
+ bufev->enabled = EV_WRITE;
+
+ return (bufev);
+}
+
+void
+bufferevent_setcb(struct bufferevent *bufev,
+ evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg)
+{
+ bufev->readcb = readcb;
+ bufev->writecb = writecb;
+ bufev->errorcb = errorcb;
+
+ bufev->cbarg = cbarg;
+}
+
+void
+bufferevent_setfd(struct bufferevent *bufev, int fd)
+{
+ event_del(&bufev->ev_read);
+ event_del(&bufev->ev_write);
+
+ event_set(&bufev->ev_read, fd, EV_READ, bufferevent_readcb, bufev);
+ event_set(&bufev->ev_write, fd, EV_WRITE, bufferevent_writecb, bufev);
+ if (bufev->ev_base != NULL) {
+ event_base_set(bufev->ev_base, &bufev->ev_read);
+ event_base_set(bufev->ev_base, &bufev->ev_write);
+ }
+
+ /* might have to manually trigger event registration */
+}
+
+int
+bufferevent_priority_set(struct bufferevent *bufev, int priority)
+{
+ if (event_priority_set(&bufev->ev_read, priority) == -1)
+ return (-1);
+ if (event_priority_set(&bufev->ev_write, priority) == -1)
+ return (-1);
+
+ return (0);
+}
+
+/* Closing the file descriptor is the responsibility of the caller */
+
+void
+bufferevent_free(struct bufferevent *bufev)
+{
+ event_del(&bufev->ev_read);
+ event_del(&bufev->ev_write);
+
+ evbuffer_free(bufev->input);
+ evbuffer_free(bufev->output);
+
+ free(bufev);
+}
+
+/*
+ * Returns 0 on success;
+ * -1 on failure.
+ */
+
+int
+bufferevent_write(struct bufferevent *bufev, const void *data, size_t size)
+{
+ int res;
+
+ res = evbuffer_add(bufev->output, data, size);
+
+ if (res == -1)
+ return (res);
+
+ /* If everything is okay, we need to schedule a write */
+ if (size > 0 && (bufev->enabled & EV_WRITE))
+ bufferevent_add(&bufev->ev_write, bufev->timeout_write);
+
+ return (res);
+}
+
+int
+bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf)
+{
+ int res;
+
+ res = bufferevent_write(bufev, buf->buffer, buf->off);
+ if (res != -1)
+ evbuffer_drain(buf, buf->off);
+
+ return (res);
+}
+
+size_t
+bufferevent_read(struct bufferevent *bufev, void *data, size_t size)
+{
+ struct evbuffer *buf = bufev->input;
+
+ if (buf->off < size)
+ size = buf->off;
+
+ /* Copy the available data to the user buffer */
+ memcpy(data, buf->buffer, size);
+
+ if (size)
+ evbuffer_drain(buf, size);
+
+ return (size);
+}
+
+int
+bufferevent_enable(struct bufferevent *bufev, short event)
+{
+ if (event & EV_READ) {
+ if (bufferevent_add(&bufev->ev_read, bufev->timeout_read) == -1)
+ return (-1);
+ }
+ if (event & EV_WRITE) {
+ if (bufferevent_add(&bufev->ev_write, bufev->timeout_write) == -1)
+ return (-1);
+ }
+
+ bufev->enabled |= event;
+ return (0);
+}
+
+int
+bufferevent_disable(struct bufferevent *bufev, short event)
+{
+ if (event & EV_READ) {
+ if (event_del(&bufev->ev_read) == -1)
+ return (-1);
+ }
+ if (event & EV_WRITE) {
+ if (event_del(&bufev->ev_write) == -1)
+ return (-1);
+ }
+
+ bufev->enabled &= ~event;
+ return (0);
+}
+
+/*
+ * Sets the read and write timeout for a buffered event.
+ */
+
+void
+bufferevent_settimeout(struct bufferevent *bufev,
+ int timeout_read, int timeout_write) {
+ bufev->timeout_read = timeout_read;
+ bufev->timeout_write = timeout_write;
+
+ if (event_pending(&bufev->ev_read, EV_READ, NULL))
+ bufferevent_add(&bufev->ev_read, timeout_read);
+ if (event_pending(&bufev->ev_write, EV_WRITE, NULL))
+ bufferevent_add(&bufev->ev_write, timeout_write);
+}
+
+/*
+ * Sets the water marks
+ */
+
+void
+bufferevent_setwatermark(struct bufferevent *bufev, short events,
+ size_t lowmark, size_t highmark)
+{
+ if (events & EV_READ) {
+ bufev->wm_read.low = lowmark;
+ bufev->wm_read.high = highmark;
+ }
+
+ if (events & EV_WRITE) {
+ bufev->wm_write.low = lowmark;
+ bufev->wm_write.high = highmark;
+ }
+
+ /* If the watermarks changed then see if we should call read again */
+ bufferevent_read_pressure_cb(bufev->input,
+ 0, EVBUFFER_LENGTH(bufev->input), bufev);
+}
+
+int
+bufferevent_base_set(struct event_base *base, struct bufferevent *bufev)
+{
+ int res;
+
+ bufev->ev_base = base;
+
+ res = event_base_set(base, &bufev->ev_read);
+ if (res == -1)
+ return (res);
+
+ res = event_base_set(base, &bufev->ev_write);
+ return (res);
+}
diff --git a/libevent/evdns.3 b/libevent/evdns.3
new file mode 100644
index 00000000000..10414fa2efb
--- /dev/null
+++ b/libevent/evdns.3
@@ -0,0 +1,322 @@
+.\"
+.\" Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. The name of the author may not be used to endorse or promote products
+.\" derived from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd October 7, 2006
+.Dt EVDNS 3
+.Os
+.Sh NAME
+.Nm evdns_init
+.Nm evdns_shutdown
+.Nm evdns_err_to_string
+.Nm evdns_nameserver_add
+.Nm evdns_count_nameservers
+.Nm evdns_clear_nameservers_and_suspend
+.Nm evdns_resume
+.Nm evdns_nameserver_ip_add
+.Nm evdns_resolve_ipv4
+.Nm evdns_resolve_reverse
+.Nm evdns_resolv_conf_parse
+.Nm evdns_config_windows_nameservers
+.Nm evdns_search_clear
+.Nm evdns_search_add
+.Nm evdns_search_ndots_set
+.Nm evdns_set_log_fn
+.Nd asynchronous functions for DNS resolution.
+.Sh SYNOPSIS
+.Fd #include <sys/time.h>
+.Fd #include <event.h>
+.Fd #include <evdns.h>
+.Ft int
+.Fn evdns_init
+.Ft void
+.Fn evdns_shutdown "int fail_requests"
+.Ft "const char *"
+.Fn evdns_err_to_string "int err"
+.Ft int
+.Fn evdns_nameserver_add "unsigned long int address"
+.Ft int
+.Fn evdns_count_nameservers
+.Ft int
+.Fn evdns_clear_nameservers_and_suspend
+.Ft int
+.Fn evdns_resume
+.Ft int
+.Fn evdns_nameserver_ip_add(const char *ip_as_string);
+.Ft int
+.Fn evdns_resolve_ipv4 "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
+.Ft int
+.Fn evdns_resolve_reverse "struct in_addr *in" "int flags" "evdns_callback_type callback" "void *ptr"
+.Ft int
+.Fn evdns_resolv_conf_parse "int flags" "const char *"
+.Ft void
+.Fn evdns_search_clear
+.Ft void
+.Fn evdns_search_add "const char *domain"
+.Ft void
+.Fn evdns_search_ndots_set "const int ndots"
+.Ft void
+.Fn evdns_set_log_fn "evdns_debug_log_fn_type fn"
+.Ft int
+.Fn evdns_config_windows_nameservers
+.Sh DESCRIPTION
+Welcome, gentle reader
+.Pp
+Async DNS lookups are really a whole lot harder than they should be,
+mostly stemming from the fact that the libc resolver has never been
+very good at them. Before you use this library you should see if libc
+can do the job for you with the modern async call getaddrinfo_a
+(see http://www.imperialviolet.org/page25.html#e498). Otherwise,
+please continue.
+.Pp
+This code is based on libevent and you must call event_init before
+any of the APIs in this file. You must also seed the OpenSSL random
+source if you are using OpenSSL for ids (see below).
+.Pp
+This library is designed to be included and shipped with your source
+code. You statically link with it. You should also test for the
+existence of strtok_r and define HAVE_STRTOK_R if you have it.
+.Pp
+The DNS protocol requires a good source of id numbers and these
+numbers should be unpredictable for spoofing reasons. There are
+three methods for generating them here and you must define exactly
+one of them. In increasing order of preference:
+.Pp
+.Bl -tag -width "DNS_USE_GETTIMEOFDAY_FOR_ID" -compact -offset indent
+.It DNS_USE_GETTIMEOFDAY_FOR_ID
+Using the bottom 16 bits of the usec result from gettimeofday. This
+is a pretty poor solution but should work anywhere.
+.It DNS_USE_CPU_CLOCK_FOR_ID
+Using the bottom 16 bits of the nsec result from the CPU's time
+counter. This is better, but may not work everywhere. Requires
+POSIX realtime support and you'll need to link against -lrt on
+glibc systems at least.
+.It DNS_USE_OPENSSL_FOR_ID
+Uses the OpenSSL RAND_bytes call to generate the data. You must
+have seeded the pool before making any calls to this library.
+.El
+.Pp
+The library keeps track of the state of nameservers and will avoid
+them when they go down. Otherwise it will round robin between them.
+.Pp
+Quick start guide:
+ #include "evdns.h"
+ void callback(int result, char type, int count, int ttl,
+ void *addresses, void *arg);
+ evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+ evdns_resolve("www.hostname.com", 0, callback, NULL);
+.Pp
+When the lookup is complete the callback function is called. The
+first argument will be one of the DNS_ERR_* defines in evdns.h.
+Hopefully it will be DNS_ERR_NONE, in which case type will be
+DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
+which the data can be cached for (in seconds), addresses will point
+to an array of uint32_t's and arg will be whatever you passed to
+evdns_resolve.
+.Pp
+Searching:
+.Pp
+In order for this library to be a good replacement for glibc's resolver it
+supports searching. This involves setting a list of default domains, in
+which names will be queried for. The number of dots in the query name
+determines the order in which this list is used.
+.Pp
+Searching appears to be a single lookup from the point of view of the API,
+although many DNS queries may be generated from a single call to
+evdns_resolve. Searching can also drastically slow down the resolution
+of names.
+.Pp
+To disable searching:
+.Bl -enum -compact -offset indent
+.It
+Never set it up. If you never call
+.Fn evdns_resolv_conf_parse,
+.Fn evdns_init,
+or
+.Fn evdns_search_add
+then no searching will occur.
+.It
+If you do call
+.Fn evdns_resolv_conf_parse
+then don't pass
+.Va DNS_OPTION_SEARCH
+(or
+.Va DNS_OPTIONS_ALL,
+which implies it).
+.It
+When calling
+.Fn evdns_resolve,
+pass the
+.Va DNS_QUERY_NO_SEARCH
+flag.
+.El
+.Pp
+The order of searches depends on the number of dots in the name. If the
+number is greater than the ndots setting then the names is first tried
+globally. Otherwise each search domain is appended in turn.
+.Pp
+The ndots setting can either be set from a resolv.conf, or by calling
+evdns_search_ndots_set.
+.Pp
+For example, with ndots set to 1 (the default) and a search domain list of
+["myhome.net"]:
+ Query: www
+ Order: www.myhome.net, www.
+.Pp
+ Query: www.abc
+ Order: www.abc., www.abc.myhome.net
+.Pp
+.Sh API reference
+.Pp
+.Bl -tag -width 0123456
+.It Ft int Fn evdns_init
+Initializes support for non-blocking name resolution by calling
+.Fn evdns_resolv_conf_parse
+on UNIX and
+.Fn evdns_config_windows_nameservers
+on Windows.
+.It Ft int Fn evdns_nameserver_add "unsigned long int address"
+Add a nameserver. The address should be an IP address in
+network byte order. The type of address is chosen so that
+it matches in_addr.s_addr.
+Returns non-zero on error.
+.It Ft int Fn evdns_nameserver_ip_add "const char *ip_as_string"
+This wraps the above function by parsing a string as an IP
+address and adds it as a nameserver.
+Returns non-zero on error
+.It Ft int Fn evdns_resolve "const char *name" "int flags" "evdns_callback_type callback" "void *ptr"
+Resolve a name. The name parameter should be a DNS name.
+The flags parameter should be 0, or DNS_QUERY_NO_SEARCH
+which disables searching for this query. (see defn of
+searching above).
+.Pp
+The callback argument is a function which is called when
+this query completes and ptr is an argument which is passed
+to that callback function.
+.Pp
+Returns non-zero on error
+.It Ft void Fn evdns_search_clear
+Clears the list of search domains
+.It Ft void Fn evdns_search_add "const char *domain"
+Add a domain to the list of search domains
+.It Ft void Fn evdns_search_ndots_set "int ndots"
+Set the number of dots which, when found in a name, causes
+the first query to be without any search domain.
+.It Ft int Fn evdns_count_nameservers "void"
+Return the number of configured nameservers (not necessarily the
+number of running nameservers). This is useful for double-checking
+whether our calls to the various nameserver configuration functions
+have been successful.
+.It Ft int Fn evdns_clear_nameservers_and_suspend "void"
+Remove all currently configured nameservers, and suspend all pending
+resolves. Resolves will not necessarily be re-attempted until
+evdns_resume() is called.
+.It Ft int Fn evdns_resume "void"
+Re-attempt resolves left in limbo after an earlier call to
+evdns_clear_nameservers_and_suspend().
+.It Ft int Fn evdns_config_windows_nameservers "void"
+Attempt to configure a set of nameservers based on platform settings on
+a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
+looks in the registry. Returns 0 on success, nonzero on failure.
+.It Ft int Fn evdns_resolv_conf_parse "int flags" "const char *filename"
+Parse a resolv.conf like file from the given filename.
+.Pp
+See the man page for resolv.conf for the format of this file.
+The flags argument determines what information is parsed from
+this file:
+.Bl -tag -width "DNS_OPTION_NAMESERVERS" -offset indent -compact -nested
+.It DNS_OPTION_SEARCH
+domain, search and ndots options
+.It DNS_OPTION_NAMESERVERS
+nameserver lines
+.It DNS_OPTION_MISC
+timeout and attempts options
+.It DNS_OPTIONS_ALL
+all of the above
+.El
+.Pp
+The following directives are not parsed from the file:
+ sortlist, rotate, no-check-names, inet6, debug
+.Pp
+Returns non-zero on error:
+.Bl -tag -width "0" -offset indent -compact -nested
+.It 0
+no errors
+.It 1
+failed to open file
+.It 2
+failed to stat file
+.It 3
+file too large
+.It 4
+out of memory
+.It 5
+short read from file
+.El
+.El
+.Sh Internals:
+Requests are kept in two queues. The first is the inflight queue. In
+this queue requests have an allocated transaction id and nameserver.
+They will soon be transmitted if they haven't already been.
+.Pp
+The second is the waiting queue. The size of the inflight ring is
+limited and all other requests wait in waiting queue for space. This
+bounds the number of concurrent requests so that we don't flood the
+nameserver. Several algorithms require a full walk of the inflight
+queue and so bounding its size keeps thing going nicely under huge
+(many thousands of requests) loads.
+.Pp
+If a nameserver loses too many requests it is considered down and we
+try not to use it. After a while we send a probe to that nameserver
+(a lookup for google.com) and, if it replies, we consider it working
+again. If the nameserver fails a probe we wait longer to try again
+with the next probe.
+.Sh SEE ALSO
+.Xr event 3 ,
+.Xr gethostbyname 3 ,
+.Xr resolv.conf 5
+.Sh HISTORY
+The
+.Nm evdns
+API was developed by Adam Langley on top of the
+.Nm libevent
+API.
+The code was integrate into
+.Nm Tor
+by Nick Mathewson and finally put into
+.Nm libevent
+itself by Niels Provos.
+.Sh AUTHORS
+The
+.Nm evdns
+API and code was written by Adam Langley with significant
+contributions by Nick Mathewson.
+.Sh BUGS
+This documentation is neither complete nor authoritative.
+If you are in doubt about the usage of this API then
+check the source code to find out how it works, write
+up the missing piece of documentation and send it to
+me for inclusion in this man page.
diff --git a/libevent/evdns.c b/libevent/evdns.c
new file mode 100644
index 00000000000..e13357f1596
--- /dev/null
+++ b/libevent/evdns.c
@@ -0,0 +1,3200 @@
+/* $Id: evdns.c 6979 2006-08-04 18:31:13Z nickm $ */
+
+/* The original version of this module was written by Adam Langley; for
+ * a history of modifications, check out the subversion logs.
+ *
+ * When editing this module, try to keep it re-mergeable by Adam. Don't
+ * reformat the whitespace, add Tor dependencies, or so on.
+ *
+ * TODO:
+ * - Support IPv6 and PTR records.
+ * - Replace all externally visible magic numbers with #defined constants.
+ * - Write doccumentation for APIs of all external functions.
+ */
+
+/* Async DNS Library
+ * Adam Langley <agl@imperialviolet.org>
+ * http://www.imperialviolet.org/eventdns.html
+ * Public Domain code
+ *
+ * This software is Public Domain. To view a copy of the public domain dedication,
+ * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
+ * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
+ *
+ * I ask and expect, but do not require, that all derivative works contain an
+ * attribution similar to:
+ * Parts developed by Adam Langley <agl@imperialviolet.org>
+ *
+ * You may wish to replace the word "Parts" with something else depending on
+ * the amount of original code.
+ *
+ * (Derivative works does not include programs which link against, run or include
+ * the source verbatim in their source distributions)
+ *
+ * Version: 0.1b
+ */
+
+#include <sys/types.h>
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef DNS_USE_FTIME_FOR_ID
+#include <sys/timeb.h>
+#endif
+
+#ifndef DNS_USE_CPU_CLOCK_FOR_ID
+#ifdef HAVE_GETTIMEOFDAY
+#define DNS_USE_GETTIMEOFDAY_FOR_ID 1
+#endif
+#endif
+
+#ifndef DNS_USE_CPU_CLOCK_FOR_ID
+#ifdef HAVE_GETTIMEOFDAY
+#define DNS_USE_GETTIMEOFDAY_FOR_ID 1
+#endif
+#endif
+
+#ifndef DNS_USE_CPU_CLOCK_FOR_ID
+#ifndef DNS_USE_GETTIMEOFDAY_FOR_ID
+#ifndef DNS_USE_OPENSSL_FOR_ID
+#ifndef DNS_USE_FTIME_FOR_ID
+#error Must configure at least one id generation method.
+#error Please see the documentation.
+#endif
+#endif
+#endif
+#endif
+
+/* #define _POSIX_C_SOURCE 200507 */
+#define _GNU_SOURCE
+
+#ifdef DNS_USE_CPU_CLOCK_FOR_ID
+#ifdef DNS_USE_OPENSSL_FOR_ID
+#error Multiple id options selected
+#endif
+#ifdef DNS_USE_GETTIMEOFDAY_FOR_ID
+#error Multiple id options selected
+#endif
+#include <time.h>
+#endif
+
+#ifdef DNS_USE_OPENSSL_FOR_ID
+#ifdef DNS_USE_GETTIMEOFDAY_FOR_ID
+#error Multiple id options selected
+#endif
+#include <openssl/rand.h>
+#endif
+
+#ifndef _FORTIFY_SOURCE
+#define _FORTIFY_SOURCE 3
+#endif
+
+#include <string.h>
+#include <fcntl.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <limits.h>
+#include <sys/stat.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+#include "evdns.h"
+#include "evutil.h"
+#include "log.h"
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#include <iphlpapi.h>
+#include <io.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#endif
+
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+
+#define EVDNS_LOG_DEBUG 0
+#define EVDNS_LOG_WARN 1
+
+#ifndef HOST_NAME_MAX
+#define HOST_NAME_MAX 255
+#endif
+
+#include <stdio.h>
+
+#undef MIN
+#define MIN(a,b) ((a)<(b)?(a):(b))
+
+#ifdef __USE_ISOC99B
+/* libevent doesn't work without this */
+typedef ev_uint8_t u_char;
+typedef unsigned int uint;
+#endif
+#include <event.h>
+
+#define u64 ev_uint64_t
+#define u32 ev_uint32_t
+#define u16 ev_uint16_t
+#define u8 ev_uint8_t
+
+#ifdef WIN32
+#define open _open
+#define read _read
+#define close _close
+#define strdup _strdup
+#endif
+
+#define MAX_ADDRS 32 /* maximum number of addresses from a single packet */
+/* which we bother recording */
+
+#define TYPE_A EVDNS_TYPE_A
+#define TYPE_CNAME 5
+#define TYPE_PTR EVDNS_TYPE_PTR
+#define TYPE_AAAA EVDNS_TYPE_AAAA
+
+#define CLASS_INET EVDNS_CLASS_INET
+
+struct request {
+ u8 *request; /* the dns packet data */
+ unsigned int request_len;
+ int reissue_count;
+ int tx_count; /* the number of times that this packet has been sent */
+ unsigned int request_type; /* TYPE_PTR or TYPE_A */
+ void *user_pointer; /* the pointer given to us for this request */
+ evdns_callback_type user_callback;
+ struct nameserver *ns; /* the server which we last sent it */
+
+ /* elements used by the searching code */
+ int search_index;
+ struct search_state *search_state;
+ char *search_origname; /* needs to be free()ed */
+ int search_flags;
+
+ /* these objects are kept in a circular list */
+ struct request *next, *prev;
+
+ struct event timeout_event;
+
+ u16 trans_id; /* the transaction id */
+ char request_appended; /* true if the request pointer is data which follows this struct */
+ char transmit_me; /* needs to be transmitted */
+};
+
+#ifndef HAVE_STRUCT_IN6_ADDR
+struct in6_addr {
+ u8 s6_addr[16];
+};
+#endif
+
+struct reply {
+ unsigned int type;
+ unsigned int have_answer;
+ union {
+ struct {
+ u32 addrcount;
+ u32 addresses[MAX_ADDRS];
+ } a;
+ struct {
+ u32 addrcount;
+ struct in6_addr addresses[MAX_ADDRS];
+ } aaaa;
+ struct {
+ char name[HOST_NAME_MAX];
+ } ptr;
+ } data;
+};
+
+struct nameserver {
+ int socket; /* a connected UDP socket */
+ u32 address;
+ u16 port;
+ int failed_times; /* number of times which we have given this server a chance */
+ int timedout; /* number of times in a row a request has timed out */
+ struct event event;
+ /* these objects are kept in a circular list */
+ struct nameserver *next, *prev;
+ struct event timeout_event; /* used to keep the timeout for */
+ /* when we next probe this server. */
+ /* Valid if state == 0 */
+ char state; /* zero if we think that this server is down */
+ char choked; /* true if we have an EAGAIN from this server's socket */
+ char write_waiting; /* true if we are waiting for EV_WRITE events */
+};
+
+static struct request *req_head = NULL, *req_waiting_head = NULL;
+static struct nameserver *server_head = NULL;
+
+/* Represents a local port where we're listening for DNS requests. Right now, */
+/* only UDP is supported. */
+struct evdns_server_port {
+ int socket; /* socket we use to read queries and write replies. */
+ int refcnt; /* reference count. */
+ char choked; /* Are we currently blocked from writing? */
+ char closing; /* Are we trying to close this port, pending writes? */
+ evdns_request_callback_fn_type user_callback; /* Fn to handle requests */
+ void *user_data; /* Opaque pointer passed to user_callback */
+ struct event event; /* Read/write event */
+ /* circular list of replies that we want to write. */
+ struct server_request *pending_replies;
+};
+
+/* Represents part of a reply being built. (That is, a single RR.) */
+struct server_reply_item {
+ struct server_reply_item *next; /* next item in sequence. */
+ char *name; /* name part of the RR */
+ u16 type : 16; /* The RR type */
+ u16 class : 16; /* The RR class (usually CLASS_INET) */
+ u32 ttl; /* The RR TTL */
+ char is_name; /* True iff data is a label */
+ u16 datalen; /* Length of data; -1 if data is a label */
+ void *data; /* The contents of the RR */
+};
+
+/* Represents a request that we've received as a DNS server, and holds */
+/* the components of the reply as we're constructing it. */
+struct server_request {
+ /* Pointers to the next and previous entries on the list of replies */
+ /* that we're waiting to write. Only set if we have tried to respond */
+ /* and gotten EAGAIN. */
+ struct server_request *next_pending;
+ struct server_request *prev_pending;
+
+ u16 trans_id; /* Transaction id. */
+ struct evdns_server_port *port; /* Which port received this request on? */
+ struct sockaddr_storage addr; /* Where to send the response */
+ socklen_t addrlen; /* length of addr */
+
+ int n_answer; /* how many answer RRs have been set? */
+ int n_authority; /* how many authority RRs have been set? */
+ int n_additional; /* how many additional RRs have been set? */
+
+ struct server_reply_item *answer; /* linked list of answer RRs */
+ struct server_reply_item *authority; /* linked list of authority RRs */
+ struct server_reply_item *additional; /* linked list of additional RRs */
+
+ /* Constructed response. Only set once we're ready to send a reply. */
+ /* Once this is set, the RR fields are cleared, and no more should be set. */
+ char *response;
+ size_t response_len;
+
+ /* Caller-visible fields: flags, questions. */
+ struct evdns_server_request base;
+};
+
+/* helper macro */
+#define OFFSET_OF(st, member) ((off_t) (((char*)&((st*)0)->member)-(char*)0))
+
+/* Given a pointer to an evdns_server_request, get the corresponding */
+/* server_request. */
+#define TO_SERVER_REQUEST(base_ptr) \
+ ((struct server_request*) \
+ (((char*)(base_ptr) - OFFSET_OF(struct server_request, base))))
+
+/* The number of good nameservers that we have */
+static int global_good_nameservers = 0;
+
+/* inflight requests are contained in the req_head list */
+/* and are actually going out across the network */
+static int global_requests_inflight = 0;
+/* requests which aren't inflight are in the waiting list */
+/* and are counted here */
+static int global_requests_waiting = 0;
+
+static int global_max_requests_inflight = 64;
+
+static struct timeval global_timeout = {5, 0}; /* 5 seconds */
+static int global_max_reissues = 1; /* a reissue occurs when we get some errors from the server */
+static int global_max_retransmits = 3; /* number of times we'll retransmit a request which timed out */
+/* number of timeouts in a row before we consider this server to be down */
+static int global_max_nameserver_timeout = 3;
+
+/* These are the timeout values for nameservers. If we find a nameserver is down */
+/* we try to probe it at intervals as given below. Values are in seconds. */
+static const struct timeval global_nameserver_timeouts[] = {{10, 0}, {60, 0}, {300, 0}, {900, 0}, {3600, 0}};
+static const int global_nameserver_timeouts_length = sizeof(global_nameserver_timeouts)/sizeof(struct timeval);
+
+static struct nameserver *nameserver_pick(void);
+static void evdns_request_insert(struct request *req, struct request **head);
+static void nameserver_ready_callback(int fd, short events, void *arg);
+static int evdns_transmit(void);
+static int evdns_request_transmit(struct request *req);
+static void nameserver_send_probe(struct nameserver *const ns);
+static void search_request_finished(struct request *const);
+static int search_try_next(struct request *const req);
+static int search_request_new(int type, const char *const name, int flags, evdns_callback_type user_callback, void *user_arg);
+static void evdns_requests_pump_waiting_queue(void);
+static u16 transaction_id_pick(void);
+static struct request *request_new(int type, const char *name, int flags, evdns_callback_type callback, void *ptr);
+static void request_submit(struct request *const req);
+
+static int server_request_free(struct server_request *req);
+static void server_request_free_answers(struct server_request *req);
+static void server_port_free(struct evdns_server_port *port);
+static void server_port_ready_callback(int fd, short events, void *arg);
+
+static int strtoint(const char *const str);
+
+#ifdef WIN32
+static int
+last_error(int sock)
+{
+ int optval, optvallen=sizeof(optval);
+ int err = WSAGetLastError();
+ if (err == WSAEWOULDBLOCK && sock >= 0) {
+ if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (void*)&optval,
+ &optvallen))
+ return err;
+ if (optval)
+ return optval;
+ }
+ return err;
+
+}
+static int
+error_is_eagain(int err)
+{
+ return err == EAGAIN || err == WSAEWOULDBLOCK;
+}
+static int
+inet_aton(const char *c, struct in_addr *addr)
+{
+ ev_uint32_t r;
+ if (strcmp(c, "255.255.255.255") == 0) {
+ addr->s_addr = 0xffffffffu;
+ } else {
+ r = inet_addr(c);
+ if (r == INADDR_NONE)
+ return 0;
+ addr->s_addr = r;
+ }
+ return 1;
+}
+#else
+#define last_error(sock) (errno)
+#define error_is_eagain(err) ((err) == EAGAIN)
+#endif
+#define CLOSE_SOCKET(s) EVUTIL_CLOSESOCKET(s)
+
+#define ISSPACE(c) isspace((int)(unsigned char)(c))
+#define ISDIGIT(c) isdigit((int)(unsigned char)(c))
+
+static const char *
+debug_ntoa(u32 address)
+{
+ static char buf[32];
+ u32 a = ntohl(address);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ (int)(u8)((a>>24)&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a )&0xff));
+ return buf;
+}
+
+static evdns_debug_log_fn_type evdns_log_fn = NULL;
+
+void
+evdns_set_log_fn(evdns_debug_log_fn_type fn)
+{
+ evdns_log_fn = fn;
+}
+
+#ifdef __GNUC__
+#define EVDNS_LOG_CHECK __attribute__ ((format(printf, 2, 3)))
+#else
+#define EVDNS_LOG_CHECK
+#endif
+
+static void _evdns_log(int warn, const char *fmt, ...) EVDNS_LOG_CHECK;
+static void
+_evdns_log(int warn, const char *fmt, ...)
+{
+ va_list args;
+ static char buf[512];
+ if (!evdns_log_fn)
+ return;
+ va_start(args,fmt);
+ evutil_vsnprintf(buf, sizeof(buf), fmt, args);
+ buf[sizeof(buf)-1] = '\0';
+ evdns_log_fn(warn, buf);
+ va_end(args);
+}
+
+#define log _evdns_log
+
+/* This walks the list of inflight requests to find the */
+/* one with a matching transaction id. Returns NULL on */
+/* failure */
+static struct request *
+request_find_from_trans_id(u16 trans_id) {
+ struct request *req = req_head, *const started_at = req_head;
+
+ if (req) {
+ do {
+ if (req->trans_id == trans_id) return req;
+ req = req->next;
+ } while (req != started_at);
+ }
+
+ return NULL;
+}
+
+/* a libevent callback function which is called when a nameserver */
+/* has gone down and we want to test if it has came back to life yet */
+static void
+nameserver_prod_callback(int fd, short events, void *arg) {
+ struct nameserver *const ns = (struct nameserver *) arg;
+ (void)fd;
+ (void)events;
+
+ nameserver_send_probe(ns);
+}
+
+/* a libevent callback which is called when a nameserver probe (to see if */
+/* it has come back to life) times out. We increment the count of failed_times */
+/* and wait longer to send the next probe packet. */
+static void
+nameserver_probe_failed(struct nameserver *const ns) {
+ const struct timeval * timeout;
+ (void) evtimer_del(&ns->timeout_event);
+ if (ns->state == 1) {
+ /* This can happen if the nameserver acts in a way which makes us mark */
+ /* it as bad and then starts sending good replies. */
+ return;
+ }
+
+ timeout =
+ &global_nameserver_timeouts[MIN(ns->failed_times,
+ global_nameserver_timeouts_length - 1)];
+ ns->failed_times++;
+
+ if (evtimer_add(&ns->timeout_event, (struct timeval *) timeout) < 0) {
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer event for %s",
+ debug_ntoa(ns->address));
+ /* ???? Do more? */
+ }
+}
+
+/* called when a nameserver has been deemed to have failed. For example, too */
+/* many packets have timed out etc */
+static void
+nameserver_failed(struct nameserver *const ns, const char *msg) {
+ struct request *req, *started_at;
+ /* if this nameserver has already been marked as failed */
+ /* then don't do anything */
+ if (!ns->state) return;
+
+ log(EVDNS_LOG_WARN, "Nameserver %s has failed: %s",
+ debug_ntoa(ns->address), msg);
+ global_good_nameservers--;
+ assert(global_good_nameservers >= 0);
+ if (global_good_nameservers == 0) {
+ log(EVDNS_LOG_WARN, "All nameservers have failed");
+ }
+
+ ns->state = 0;
+ ns->failed_times = 1;
+
+ if (evtimer_add(&ns->timeout_event, (struct timeval *) &global_nameserver_timeouts[0]) < 0) {
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer event for %s",
+ debug_ntoa(ns->address));
+ /* ???? Do more? */
+ }
+
+ /* walk the list of inflight requests to see if any can be reassigned to */
+ /* a different server. Requests in the waiting queue don't have a */
+ /* nameserver assigned yet */
+
+ /* if we don't have *any* good nameservers then there's no point */
+ /* trying to reassign requests to one */
+ if (!global_good_nameservers) return;
+
+ req = req_head;
+ started_at = req_head;
+ if (req) {
+ do {
+ if (req->tx_count == 0 && req->ns == ns) {
+ /* still waiting to go out, can be moved */
+ /* to another server */
+ req->ns = nameserver_pick();
+ }
+ req = req->next;
+ } while (req != started_at);
+ }
+}
+
+static void
+nameserver_up(struct nameserver *const ns) {
+ if (ns->state) return;
+ log(EVDNS_LOG_WARN, "Nameserver %s is back up",
+ debug_ntoa(ns->address));
+ evtimer_del(&ns->timeout_event);
+ ns->state = 1;
+ ns->failed_times = 0;
+ ns->timedout = 0;
+ global_good_nameservers++;
+}
+
+static void
+request_trans_id_set(struct request *const req, const u16 trans_id) {
+ req->trans_id = trans_id;
+ *((u16 *) req->request) = htons(trans_id);
+}
+
+/* Called to remove a request from a list and dealloc it. */
+/* head is a pointer to the head of the list it should be */
+/* removed from or NULL if the request isn't in a list. */
+static void
+request_finished(struct request *const req, struct request **head) {
+ if (head) {
+ if (req->next == req) {
+ /* only item in the list */
+ *head = NULL;
+ } else {
+ req->next->prev = req->prev;
+ req->prev->next = req->next;
+ if (*head == req) *head = req->next;
+ }
+ }
+
+ log(EVDNS_LOG_DEBUG, "Removing timeout for request %lx",
+ (unsigned long) req);
+ evtimer_del(&req->timeout_event);
+
+ search_request_finished(req);
+ global_requests_inflight--;
+
+ if (!req->request_appended) {
+ /* need to free the request data on it's own */
+ free(req->request);
+ } else {
+ /* the request data is appended onto the header */
+ /* so everything gets free()ed when we: */
+ }
+
+ free(req);
+
+ evdns_requests_pump_waiting_queue();
+}
+
+/* This is called when a server returns a funny error code. */
+/* We try the request again with another server. */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 failed/reissue is pointless */
+static int
+request_reissue(struct request *req) {
+ const struct nameserver *const last_ns = req->ns;
+ /* the last nameserver should have been marked as failing */
+ /* by the caller of this function, therefore pick will try */
+ /* not to return it */
+ req->ns = nameserver_pick();
+ if (req->ns == last_ns) {
+ /* ... but pick did return it */
+ /* not a lot of point in trying again with the */
+ /* same server */
+ return 1;
+ }
+
+ req->reissue_count++;
+ req->tx_count = 0;
+ req->transmit_me = 1;
+
+ return 0;
+}
+
+/* this function looks for space on the inflight queue and promotes */
+/* requests from the waiting queue if it can. */
+static void
+evdns_requests_pump_waiting_queue(void) {
+ while (global_requests_inflight < global_max_requests_inflight &&
+ global_requests_waiting) {
+ struct request *req;
+ /* move a request from the waiting queue to the inflight queue */
+ assert(req_waiting_head);
+ if (req_waiting_head->next == req_waiting_head) {
+ /* only one item in the queue */
+ req = req_waiting_head;
+ req_waiting_head = NULL;
+ } else {
+ req = req_waiting_head;
+ req->next->prev = req->prev;
+ req->prev->next = req->next;
+ req_waiting_head = req->next;
+ }
+
+ global_requests_waiting--;
+ global_requests_inflight++;
+
+ req->ns = nameserver_pick();
+ request_trans_id_set(req, transaction_id_pick());
+
+ evdns_request_insert(req, &req_head);
+ evdns_request_transmit(req);
+ evdns_transmit();
+ }
+}
+
+static void
+reply_callback(struct request *const req, u32 ttl, u32 err, struct reply *reply) {
+ switch (req->request_type) {
+ case TYPE_A:
+ if (reply)
+ req->user_callback(DNS_ERR_NONE, DNS_IPv4_A,
+ reply->data.a.addrcount, ttl,
+ reply->data.a.addresses,
+ req->user_pointer);
+ else
+ req->user_callback(err, 0, 0, 0, NULL, req->user_pointer);
+ return;
+ case TYPE_PTR:
+ if (reply) {
+ char *name = reply->data.ptr.name;
+ req->user_callback(DNS_ERR_NONE, DNS_PTR, 1, ttl,
+ &name, req->user_pointer);
+ } else {
+ req->user_callback(err, 0, 0, 0, NULL,
+ req->user_pointer);
+ }
+ return;
+ case TYPE_AAAA:
+ if (reply)
+ req->user_callback(DNS_ERR_NONE, DNS_IPv6_AAAA,
+ reply->data.aaaa.addrcount, ttl,
+ reply->data.aaaa.addresses,
+ req->user_pointer);
+ else
+ req->user_callback(err, 0, 0, 0, NULL, req->user_pointer);
+ return;
+ }
+ assert(0);
+}
+
+/* this processes a parsed reply packet */
+static void
+reply_handle(struct request *const req, u16 flags, u32 ttl, struct reply *reply) {
+ int error;
+ static const int error_codes[] = {
+ DNS_ERR_FORMAT, DNS_ERR_SERVERFAILED, DNS_ERR_NOTEXIST,
+ DNS_ERR_NOTIMPL, DNS_ERR_REFUSED
+ };
+
+ if (flags & 0x020f || !reply || !reply->have_answer) {
+ /* there was an error */
+ if (flags & 0x0200) {
+ error = DNS_ERR_TRUNCATED;
+ } else {
+ u16 error_code = (flags & 0x000f) - 1;
+ if (error_code > 4) {
+ error = DNS_ERR_UNKNOWN;
+ } else {
+ error = error_codes[error_code];
+ }
+ }
+
+ switch(error) {
+ case DNS_ERR_NOTIMPL:
+ case DNS_ERR_REFUSED:
+ /* we regard these errors as marking a bad nameserver */
+ if (req->reissue_count < global_max_reissues) {
+ char msg[64];
+ evutil_snprintf(msg, sizeof(msg),
+ "Bad response %d (%s)",
+ error, evdns_err_to_string(error));
+ nameserver_failed(req->ns, msg);
+ if (!request_reissue(req)) return;
+ }
+ break;
+ case DNS_ERR_SERVERFAILED:
+ /* rcode 2 (servfailed) sometimes means "we
+ * are broken" and sometimes (with some binds)
+ * means "that request was very confusing."
+ * Treat this as a timeout, not a failure.
+ */
+ log(EVDNS_LOG_DEBUG, "Got a SERVERFAILED from nameserver %s; "
+ "will allow the request to time out.",
+ debug_ntoa(req->ns->address));
+ break;
+ default:
+ /* we got a good reply from the nameserver */
+ nameserver_up(req->ns);
+ }
+
+ if (req->search_state && req->request_type != TYPE_PTR) {
+ /* if we have a list of domains to search in,
+ * try the next one */
+ if (!search_try_next(req)) {
+ /* a new request was issued so this
+ * request is finished and */
+ /* the user callback will be made when
+ * that request (or a */
+ /* child of it) finishes. */
+ request_finished(req, &req_head);
+ return;
+ }
+ }
+
+ /* all else failed. Pass the failure up */
+ reply_callback(req, 0, error, NULL);
+ request_finished(req, &req_head);
+ } else {
+ /* all ok, tell the user */
+ reply_callback(req, ttl, 0, reply);
+ nameserver_up(req->ns);
+ request_finished(req, &req_head);
+ }
+}
+
+static int
+name_parse(u8 *packet, int length, int *idx, char *name_out, int name_out_len) {
+ int name_end = -1;
+ int j = *idx;
+ int ptr_count = 0;
+#define GET32(x) do { if (j + 4 > length) goto err; memcpy(&_t32, packet + j, 4); j += 4; x = ntohl(_t32); } while(0)
+#define GET16(x) do { if (j + 2 > length) goto err; memcpy(&_t, packet + j, 2); j += 2; x = ntohs(_t); } while(0)
+#define GET8(x) do { if (j >= length) goto err; x = packet[j++]; } while(0)
+
+ char *cp = name_out;
+ const char *const end = name_out + name_out_len;
+
+ /* Normally, names are a series of length prefixed strings terminated */
+ /* with a length of 0 (the lengths are u8's < 63). */
+ /* However, the length can start with a pair of 1 bits and that */
+ /* means that the next 14 bits are a pointer within the current */
+ /* packet. */
+
+ for(;;) {
+ u8 label_len;
+ if (j >= length) return -1;
+ GET8(label_len);
+ if (!label_len) break;
+ if (label_len & 0xc0) {
+ u8 ptr_low;
+ GET8(ptr_low);
+ if (name_end < 0) name_end = j;
+ j = (((int)label_len & 0x3f) << 8) + ptr_low;
+ /* Make sure that the target offset is in-bounds. */
+ if (j < 0 || j >= length) return -1;
+ /* If we've jumped more times than there are characters in the
+ * message, we must have a loop. */
+ if (++ptr_count > length) return -1;
+ continue;
+ }
+ if (label_len > 63) return -1;
+ if (cp != name_out) {
+ if (cp + 1 >= end) return -1;
+ *cp++ = '.';
+ }
+ if (cp + label_len >= end) return -1;
+ memcpy(cp, packet + j, label_len);
+ cp += label_len;
+ j += label_len;
+ }
+ if (cp >= end) return -1;
+ *cp = '\0';
+ if (name_end < 0)
+ *idx = j;
+ else
+ *idx = name_end;
+ return 0;
+ err:
+ return -1;
+}
+
+/* parses a raw request from a nameserver */
+static int
+reply_parse(u8 *packet, int length) {
+ int j = 0, k = 0; /* index into packet */
+ u16 _t; /* used by the macros */
+ u32 _t32; /* used by the macros */
+ char tmp_name[256], cmp_name[256]; /* used by the macros */
+
+ u16 trans_id, questions, answers, authority, additional, datalength;
+ u16 flags = 0;
+ u32 ttl, ttl_r = 0xffffffff;
+ struct reply reply;
+ struct request *req = NULL;
+ unsigned int i;
+
+ GET16(trans_id);
+ GET16(flags);
+ GET16(questions);
+ GET16(answers);
+ GET16(authority);
+ GET16(additional);
+ (void) authority; /* suppress "unused variable" warnings. */
+ (void) additional; /* suppress "unused variable" warnings. */
+
+ req = request_find_from_trans_id(trans_id);
+ if (!req) return -1;
+
+ memset(&reply, 0, sizeof(reply));
+
+ /* If it's not an answer, it doesn't correspond to any request. */
+ if (!(flags & 0x8000)) return -1; /* must be an answer */
+ if (flags & 0x020f) {
+ /* there was an error */
+ goto err;
+ }
+ /* if (!answers) return; */ /* must have an answer of some form */
+
+ /* This macro skips a name in the DNS reply. */
+#define SKIP_NAME \
+ do { tmp_name[0] = '\0'; \
+ if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)\
+ goto err; \
+ } while(0)
+#define TEST_NAME \
+ do { tmp_name[0] = '\0'; \
+ cmp_name[0] = '\0'; \
+ k = j; \
+ if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)\
+ goto err; \
+ if (name_parse(req->request, req->request_len, &k, cmp_name, sizeof(cmp_name))<0) \
+ goto err; \
+ if (memcmp(tmp_name, cmp_name, strlen (tmp_name)) != 0) \
+ return (-1); /* we ignore mismatching names */ \
+ } while(0)
+
+ reply.type = req->request_type;
+
+ /* skip over each question in the reply */
+ for (i = 0; i < questions; ++i) {
+ /* the question looks like
+ * <label:name><u16:type><u16:class>
+ */
+ TEST_NAME;
+ j += 4;
+ if (j > length) goto err;
+ }
+
+ /* now we have the answer section which looks like
+ * <label:name><u16:type><u16:class><u32:ttl><u16:len><data...>
+ */
+
+ for (i = 0; i < answers; ++i) {
+ u16 type, class;
+
+ SKIP_NAME;
+ GET16(type);
+ GET16(class);
+ GET32(ttl);
+ GET16(datalength);
+
+ if (type == TYPE_A && class == CLASS_INET) {
+ int addrcount, addrtocopy;
+ if (req->request_type != TYPE_A) {
+ j += datalength; continue;
+ }
+ if ((datalength & 3) != 0) /* not an even number of As. */
+ goto err;
+ addrcount = datalength >> 2;
+ addrtocopy = MIN(MAX_ADDRS - reply.data.a.addrcount, (unsigned)addrcount);
+
+ ttl_r = MIN(ttl_r, ttl);
+ /* we only bother with the first four addresses. */
+ if (j + 4*addrtocopy > length) goto err;
+ memcpy(&reply.data.a.addresses[reply.data.a.addrcount],
+ packet + j, 4*addrtocopy);
+ j += 4*addrtocopy;
+ reply.data.a.addrcount += addrtocopy;
+ reply.have_answer = 1;
+ if (reply.data.a.addrcount == MAX_ADDRS) break;
+ } else if (type == TYPE_PTR && class == CLASS_INET) {
+ if (req->request_type != TYPE_PTR) {
+ j += datalength; continue;
+ }
+ if (name_parse(packet, length, &j, reply.data.ptr.name,
+ sizeof(reply.data.ptr.name))<0)
+ goto err;
+ ttl_r = MIN(ttl_r, ttl);
+ reply.have_answer = 1;
+ break;
+ } else if (type == TYPE_AAAA && class == CLASS_INET) {
+ int addrcount, addrtocopy;
+ if (req->request_type != TYPE_AAAA) {
+ j += datalength; continue;
+ }
+ if ((datalength & 15) != 0) /* not an even number of AAAAs. */
+ goto err;
+ addrcount = datalength >> 4; /* each address is 16 bytes long */
+ addrtocopy = MIN(MAX_ADDRS - reply.data.aaaa.addrcount, (unsigned)addrcount);
+ ttl_r = MIN(ttl_r, ttl);
+
+ /* we only bother with the first four addresses. */
+ if (j + 16*addrtocopy > length) goto err;
+ memcpy(&reply.data.aaaa.addresses[reply.data.aaaa.addrcount],
+ packet + j, 16*addrtocopy);
+ reply.data.aaaa.addrcount += addrtocopy;
+ j += 16*addrtocopy;
+ reply.have_answer = 1;
+ if (reply.data.aaaa.addrcount == MAX_ADDRS) break;
+ } else {
+ /* skip over any other type of resource */
+ j += datalength;
+ }
+ }
+
+ reply_handle(req, flags, ttl_r, &reply);
+ return 0;
+ err:
+ if (req)
+ reply_handle(req, flags, 0, NULL);
+ return -1;
+}
+
+/* Parse a raw request (packet,length) sent to a nameserver port (port) from */
+/* a DNS client (addr,addrlen), and if it's well-formed, call the corresponding */
+/* callback. */
+static int
+request_parse(u8 *packet, int length, struct evdns_server_port *port, struct sockaddr *addr, socklen_t addrlen)
+{
+ int j = 0; /* index into packet */
+ u16 _t; /* used by the macros */
+ char tmp_name[256]; /* used by the macros */
+
+ int i;
+ u16 trans_id, flags, questions, answers, authority, additional;
+ struct server_request *server_req = NULL;
+
+ /* Get the header fields */
+ GET16(trans_id);
+ GET16(flags);
+ GET16(questions);
+ GET16(answers);
+ GET16(authority);
+ GET16(additional);
+
+ if (flags & 0x8000) return -1; /* Must not be an answer. */
+ flags &= 0x0110; /* Only RD and CD get preserved. */
+
+ server_req = malloc(sizeof(struct server_request));
+ if (server_req == NULL) return -1;
+ memset(server_req, 0, sizeof(struct server_request));
+
+ server_req->trans_id = trans_id;
+ memcpy(&server_req->addr, addr, addrlen);
+ server_req->addrlen = addrlen;
+
+ server_req->base.flags = flags;
+ server_req->base.nquestions = 0;
+ server_req->base.questions = malloc(sizeof(struct evdns_server_question *) * questions);
+ if (server_req->base.questions == NULL)
+ goto err;
+
+ for (i = 0; i < questions; ++i) {
+ u16 type, class;
+ struct evdns_server_question *q;
+ int namelen;
+ if (name_parse(packet, length, &j, tmp_name, sizeof(tmp_name))<0)
+ goto err;
+ GET16(type);
+ GET16(class);
+ namelen = strlen(tmp_name);
+ q = malloc(sizeof(struct evdns_server_question) + namelen);
+ if (!q)
+ goto err;
+ q->type = type;
+ q->dns_question_class = class;
+ memcpy(q->name, tmp_name, namelen+1);
+ server_req->base.questions[server_req->base.nquestions++] = q;
+ }
+
+ /* Ignore answers, authority, and additional. */
+
+ server_req->port = port;
+ port->refcnt++;
+
+ /* Only standard queries are supported. */
+ if (flags & 0x7800) {
+ evdns_server_request_respond(&(server_req->base), DNS_ERR_NOTIMPL);
+ return -1;
+ }
+
+ port->user_callback(&(server_req->base), port->user_data);
+
+ return 0;
+err:
+ if (server_req) {
+ if (server_req->base.questions) {
+ for (i = 0; i < server_req->base.nquestions; ++i)
+ free(server_req->base.questions[i]);
+ free(server_req->base.questions);
+ }
+ free(server_req);
+ }
+ return -1;
+
+#undef SKIP_NAME
+#undef GET32
+#undef GET16
+#undef GET8
+}
+
+static u16
+default_transaction_id_fn(void)
+{
+ u16 trans_id;
+#ifdef DNS_USE_CPU_CLOCK_FOR_ID
+ struct timespec ts;
+ static int clkid = -1;
+ if (clkid == -1) {
+ clkid = CLOCK_REALTIME;
+#ifdef CLOCK_MONOTONIC
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) != -1)
+ clkid = CLOCK_MONOTONIC;
+#endif
+ }
+ if (clock_gettime(clkid, &ts) == -1)
+ event_err(1, "clock_gettime");
+ trans_id = ts.tv_nsec & 0xffff;
+#endif
+
+#ifdef DNS_USE_FTIME_FOR_ID
+ struct _timeb tb;
+ _ftime(&tb);
+ trans_id = tb.millitm & 0xffff;
+#endif
+
+#ifdef DNS_USE_GETTIMEOFDAY_FOR_ID
+ struct timeval tv;
+ evutil_gettimeofday(&tv, NULL);
+ trans_id = tv.tv_usec & 0xffff;
+#endif
+
+#ifdef DNS_USE_OPENSSL_FOR_ID
+ if (RAND_pseudo_bytes((u8 *) &trans_id, 2) == -1) {
+ /* in the case that the RAND call fails we back */
+ /* down to using gettimeofday. */
+ /*
+ struct timeval tv;
+ evutil_gettimeofday(&tv, NULL);
+ trans_id = tv.tv_usec & 0xffff;
+ */
+ abort();
+ }
+#endif
+ return trans_id;
+}
+
+static ev_uint16_t (*trans_id_function)(void) = default_transaction_id_fn;
+
+void
+evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void))
+{
+ if (fn)
+ trans_id_function = fn;
+ else
+ trans_id_function = default_transaction_id_fn;
+}
+
+/* Try to choose a strong transaction id which isn't already in flight */
+static u16
+transaction_id_pick(void) {
+ for (;;) {
+ const struct request *req = req_head, *started_at;
+ u16 trans_id = trans_id_function();
+
+ if (trans_id == 0xffff) continue;
+ /* now check to see if that id is already inflight */
+ req = started_at = req_head;
+ if (req) {
+ do {
+ if (req->trans_id == trans_id) break;
+ req = req->next;
+ } while (req != started_at);
+ }
+ /* we didn't find it, so this is a good id */
+ if (req == started_at) return trans_id;
+ }
+}
+
+/* choose a namesever to use. This function will try to ignore */
+/* nameservers which we think are down and load balance across the rest */
+/* by updating the server_head global each time. */
+static struct nameserver *
+nameserver_pick(void) {
+ struct nameserver *started_at = server_head, *picked;
+ if (!server_head) return NULL;
+
+ /* if we don't have any good nameservers then there's no */
+ /* point in trying to find one. */
+ if (!global_good_nameservers) {
+ server_head = server_head->next;
+ return server_head;
+ }
+
+ /* remember that nameservers are in a circular list */
+ for (;;) {
+ if (server_head->state) {
+ /* we think this server is currently good */
+ picked = server_head;
+ server_head = server_head->next;
+ return picked;
+ }
+
+ server_head = server_head->next;
+ if (server_head == started_at) {
+ /* all the nameservers seem to be down */
+ /* so we just return this one and hope for the */
+ /* best */
+ assert(global_good_nameservers == 0);
+ picked = server_head;
+ server_head = server_head->next;
+ return picked;
+ }
+ }
+}
+
+static int
+address_is_correct(struct nameserver *ns, struct sockaddr *sa, socklen_t slen)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in*) sa;
+ if (sa->sa_family != AF_INET || slen != sizeof(struct sockaddr_in))
+ return 0;
+ if (sin->sin_addr.s_addr != ns->address)
+ return 0;
+ return 1;
+}
+
+/* this is called when a namesever socket is ready for reading */
+static void
+nameserver_read(struct nameserver *ns) {
+ u8 packet[1500];
+ struct sockaddr_storage ss;
+ socklen_t addrlen = sizeof(ss);
+
+ for (;;) {
+ const int r = recvfrom(ns->socket, packet, sizeof(packet), 0,
+ (struct sockaddr*)&ss, &addrlen);
+ if (r < 0) {
+ int err = last_error(ns->socket);
+ if (error_is_eagain(err)) return;
+ nameserver_failed(ns, strerror(err));
+ return;
+ }
+ if (!address_is_correct(ns, (struct sockaddr*)&ss, addrlen)) {
+ log(EVDNS_LOG_WARN, "Address mismatch on received "
+ "DNS packet.");
+ return;
+ }
+ ns->timedout = 0;
+ reply_parse(packet, r);
+ }
+}
+
+/* Read a packet from a DNS client on a server port s, parse it, and */
+/* act accordingly. */
+static void
+server_port_read(struct evdns_server_port *s) {
+ u8 packet[1500];
+ struct sockaddr_storage addr;
+ socklen_t addrlen;
+ int r;
+
+ for (;;) {
+ addrlen = sizeof(struct sockaddr_storage);
+ r = recvfrom(s->socket, packet, sizeof(packet), 0,
+ (struct sockaddr*) &addr, &addrlen);
+ if (r < 0) {
+ int err = last_error(s->socket);
+ if (error_is_eagain(err)) return;
+ log(EVDNS_LOG_WARN, "Error %s (%d) while reading request.",
+ strerror(err), err);
+ return;
+ }
+ request_parse(packet, r, s, (struct sockaddr*) &addr, addrlen);
+ }
+}
+
+/* Try to write all pending replies on a given DNS server port. */
+static void
+server_port_flush(struct evdns_server_port *port)
+{
+ while (port->pending_replies) {
+ struct server_request *req = port->pending_replies;
+ int r = sendto(port->socket, req->response, req->response_len, 0,
+ (struct sockaddr*) &req->addr, req->addrlen);
+ if (r < 0) {
+ int err = last_error(port->socket);
+ if (error_is_eagain(err))
+ return;
+ log(EVDNS_LOG_WARN, "Error %s (%d) while writing response to port; dropping", strerror(err), err);
+ }
+ if (server_request_free(req)) {
+ /* we released the last reference to req->port. */
+ return;
+ }
+ }
+
+ /* We have no more pending requests; stop listening for 'writeable' events. */
+ (void) event_del(&port->event);
+ event_set(&port->event, port->socket, EV_READ | EV_PERSIST,
+ server_port_ready_callback, port);
+ if (event_add(&port->event, NULL) < 0) {
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server.");
+ /* ???? Do more? */
+ }
+}
+
+/* set if we are waiting for the ability to write to this server. */
+/* if waiting is true then we ask libevent for EV_WRITE events, otherwise */
+/* we stop these events. */
+static void
+nameserver_write_waiting(struct nameserver *ns, char waiting) {
+ if (ns->write_waiting == waiting) return;
+
+ ns->write_waiting = waiting;
+ (void) event_del(&ns->event);
+ event_set(&ns->event, ns->socket, EV_READ | (waiting ? EV_WRITE : 0) | EV_PERSIST,
+ nameserver_ready_callback, ns);
+ if (event_add(&ns->event, NULL) < 0) {
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for %s",
+ debug_ntoa(ns->address));
+ /* ???? Do more? */
+ }
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a nameserver socket is ready for writing or reading */
+static void
+nameserver_ready_callback(int fd, short events, void *arg) {
+ struct nameserver *ns = (struct nameserver *) arg;
+ (void)fd;
+
+ if (events & EV_WRITE) {
+ ns->choked = 0;
+ if (!evdns_transmit()) {
+ nameserver_write_waiting(ns, 0);
+ }
+ }
+ if (events & EV_READ) {
+ nameserver_read(ns);
+ }
+}
+
+/* a callback function. Called by libevent when the kernel says that */
+/* a server socket is ready for writing or reading. */
+static void
+server_port_ready_callback(int fd, short events, void *arg) {
+ struct evdns_server_port *port = (struct evdns_server_port *) arg;
+ (void) fd;
+
+ if (events & EV_WRITE) {
+ port->choked = 0;
+ server_port_flush(port);
+ }
+ if (events & EV_READ) {
+ server_port_read(port);
+ }
+}
+
+/* This is an inefficient representation; only use it via the dnslabel_table_*
+ * functions, so that is can be safely replaced with something smarter later. */
+#define MAX_LABELS 128
+/* Structures used to implement name compression */
+struct dnslabel_entry { char *v; off_t pos; };
+struct dnslabel_table {
+ int n_labels; /* number of current entries */
+ /* map from name to position in message */
+ struct dnslabel_entry labels[MAX_LABELS];
+};
+
+/* Initialize dnslabel_table. */
+static void
+dnslabel_table_init(struct dnslabel_table *table)
+{
+ table->n_labels = 0;
+}
+
+/* Free all storage held by table, but not the table itself. */
+static void
+dnslabel_clear(struct dnslabel_table *table)
+{
+ int i;
+ for (i = 0; i < table->n_labels; ++i)
+ free(table->labels[i].v);
+ table->n_labels = 0;
+}
+
+/* return the position of the label in the current message, or -1 if the label */
+/* hasn't been used yet. */
+static int
+dnslabel_table_get_pos(const struct dnslabel_table *table, const char *label)
+{
+ int i;
+ for (i = 0; i < table->n_labels; ++i) {
+ if (!strcmp(label, table->labels[i].v))
+ return table->labels[i].pos;
+ }
+ return -1;
+}
+
+/* remember that we've used the label at position pos */
+static int
+dnslabel_table_add(struct dnslabel_table *table, const char *label, off_t pos)
+{
+ char *v;
+ int p;
+ if (table->n_labels == MAX_LABELS)
+ return (-1);
+ v = strdup(label);
+ if (v == NULL)
+ return (-1);
+ p = table->n_labels++;
+ table->labels[p].v = v;
+ table->labels[p].pos = pos;
+
+ return (0);
+}
+
+/* Converts a string to a length-prefixed set of DNS labels, starting */
+/* at buf[j]. name and buf must not overlap. name_len should be the length */
+/* of name. table is optional, and is used for compression. */
+/* */
+/* Input: abc.def */
+/* Output: <3>abc<3>def<0> */
+/* */
+/* Returns the first index after the encoded name, or negative on error. */
+/* -1 label was > 63 bytes */
+/* -2 name too long to fit in buffer. */
+/* */
+static off_t
+dnsname_to_labels(u8 *const buf, size_t buf_len, off_t j,
+ const char *name, const int name_len,
+ struct dnslabel_table *table) {
+ const char *end = name + name_len;
+ int ref = 0;
+ u16 _t;
+
+#define APPEND16(x) do { \
+ if (j + 2 > (off_t)buf_len) \
+ goto overflow; \
+ _t = htons(x); \
+ memcpy(buf + j, &_t, 2); \
+ j += 2; \
+ } while (0)
+#define APPEND32(x) do { \
+ if (j + 4 > (off_t)buf_len) \
+ goto overflow; \
+ _t32 = htonl(x); \
+ memcpy(buf + j, &_t32, 4); \
+ j += 4; \
+ } while (0)
+
+ if (name_len > 255) return -2;
+
+ for (;;) {
+ const char *const start = name;
+ if (table && (ref = dnslabel_table_get_pos(table, name)) >= 0) {
+ APPEND16(ref | 0xc000);
+ return j;
+ }
+ name = strchr(name, '.');
+ if (!name) {
+ const unsigned int label_len = end - start;
+ if (label_len > 63) return -1;
+ if ((size_t)(j+label_len+1) > buf_len) return -2;
+ if (table) dnslabel_table_add(table, start, j);
+ buf[j++] = label_len;
+
+ memcpy(buf + j, start, end - start);
+ j += end - start;
+ break;
+ } else {
+ /* append length of the label. */
+ const unsigned int label_len = name - start;
+ if (label_len > 63) return -1;
+ if ((size_t)(j+label_len+1) > buf_len) return -2;
+ if (table) dnslabel_table_add(table, start, j);
+ buf[j++] = label_len;
+
+ memcpy(buf + j, start, name - start);
+ j += name - start;
+ /* hop over the '.' */
+ name++;
+ }
+ }
+
+ /* the labels must be terminated by a 0. */
+ /* It's possible that the name ended in a . */
+ /* in which case the zero is already there */
+ if (!j || buf[j-1]) buf[j++] = 0;
+ return j;
+ overflow:
+ return (-2);
+}
+
+/* Finds the length of a dns request for a DNS name of the given */
+/* length. The actual request may be smaller than the value returned */
+/* here */
+static int
+evdns_request_len(const int name_len) {
+ return 96 + /* length of the DNS standard header */
+ name_len + 2 +
+ 4; /* space for the resource type */
+}
+
+/* build a dns request packet into buf. buf should be at least as long */
+/* as evdns_request_len told you it should be. */
+/* */
+/* Returns the amount of space used. Negative on error. */
+static int
+evdns_request_data_build(const char *const name, const int name_len,
+ const u16 trans_id, const u16 type, const u16 class,
+ u8 *const buf, size_t buf_len) {
+ off_t j = 0; /* current offset into buf */
+ u16 _t; /* used by the macros */
+
+ APPEND16(trans_id);
+ APPEND16(0x0100); /* standard query, recusion needed */
+ APPEND16(1); /* one question */
+ APPEND16(0); /* no answers */
+ APPEND16(0); /* no authority */
+ APPEND16(0); /* no additional */
+
+ j = dnsname_to_labels(buf, buf_len, j, name, name_len, NULL);
+ if (j < 0) {
+ return (int)j;
+ }
+
+ APPEND16(type);
+ APPEND16(class);
+
+ return (int)j;
+ overflow:
+ return (-1);
+}
+
+/* exported function */
+struct evdns_server_port *
+evdns_add_server_port(int socket, int is_tcp, evdns_request_callback_fn_type cb, void *user_data)
+{
+ struct evdns_server_port *port;
+ if (!(port = malloc(sizeof(struct evdns_server_port))))
+ return NULL;
+ memset(port, 0, sizeof(struct evdns_server_port));
+
+ assert(!is_tcp); /* TCP sockets not yet implemented */
+ port->socket = socket;
+ port->refcnt = 1;
+ port->choked = 0;
+ port->closing = 0;
+ port->user_callback = cb;
+ port->user_data = user_data;
+ port->pending_replies = NULL;
+
+ event_set(&port->event, port->socket, EV_READ | EV_PERSIST,
+ server_port_ready_callback, port);
+ event_add(&port->event, NULL); /* check return. */
+ return port;
+}
+
+/* exported function */
+void
+evdns_close_server_port(struct evdns_server_port *port)
+{
+ if (--port->refcnt == 0)
+ server_port_free(port);
+ port->closing = 1;
+}
+
+/* exported function */
+int
+evdns_server_request_add_reply(struct evdns_server_request *_req, int section, const char *name, int type, int class, int ttl, int datalen, int is_name, const char *data)
+{
+ struct server_request *req = TO_SERVER_REQUEST(_req);
+ struct server_reply_item **itemp, *item;
+ int *countp;
+
+ if (req->response) /* have we already answered? */
+ return (-1);
+
+ switch (section) {
+ case EVDNS_ANSWER_SECTION:
+ itemp = &req->answer;
+ countp = &req->n_answer;
+ break;
+ case EVDNS_AUTHORITY_SECTION:
+ itemp = &req->authority;
+ countp = &req->n_authority;
+ break;
+ case EVDNS_ADDITIONAL_SECTION:
+ itemp = &req->additional;
+ countp = &req->n_additional;
+ break;
+ default:
+ return (-1);
+ }
+ while (*itemp) {
+ itemp = &((*itemp)->next);
+ }
+ item = malloc(sizeof(struct server_reply_item));
+ if (!item)
+ return -1;
+ item->next = NULL;
+ if (!(item->name = strdup(name))) {
+ free(item);
+ return -1;
+ }
+ item->type = type;
+ item->dns_question_class = class;
+ item->ttl = ttl;
+ item->is_name = is_name != 0;
+ item->datalen = 0;
+ item->data = NULL;
+ if (data) {
+ if (item->is_name) {
+ if (!(item->data = strdup(data))) {
+ free(item->name);
+ free(item);
+ return -1;
+ }
+ item->datalen = (u16)-1;
+ } else {
+ if (!(item->data = malloc(datalen))) {
+ free(item->name);
+ free(item);
+ return -1;
+ }
+ item->datalen = datalen;
+ memcpy(item->data, data, datalen);
+ }
+ }
+
+ *itemp = item;
+ ++(*countp);
+ return 0;
+}
+
+/* exported function */
+int
+evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_A, CLASS_INET,
+ ttl, n*4, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_AAAA, CLASS_INET,
+ ttl, n*16, 0, addrs);
+}
+
+/* exported function */
+int
+evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl)
+{
+ u32 a;
+ char buf[32];
+ assert(in || inaddr_name);
+ assert(!(in && inaddr_name));
+ if (in) {
+ a = ntohl(in->s_addr);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+ (int)(u8)((a )&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>24)&0xff));
+ inaddr_name = buf;
+ }
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, inaddr_name, TYPE_PTR, CLASS_INET,
+ ttl, -1, 1, hostname);
+}
+
+/* exported function */
+int
+evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl)
+{
+ return evdns_server_request_add_reply(
+ req, EVDNS_ANSWER_SECTION, name, TYPE_CNAME, CLASS_INET,
+ ttl, -1, 1, cname);
+}
+
+
+static int
+evdns_server_request_format_response(struct server_request *req, int err)
+{
+ unsigned char buf[1500];
+ size_t buf_len = sizeof(buf);
+ off_t j = 0, r;
+ u16 _t;
+ u32 _t32;
+ int i;
+ u16 flags;
+ struct dnslabel_table table;
+
+ if (err < 0 || err > 15) return -1;
+
+ /* Set response bit and error code; copy OPCODE and RD fields from
+ * question; copy RA and AA if set by caller. */
+ flags = req->base.flags;
+ flags |= (0x8000 | err);
+
+ dnslabel_table_init(&table);
+ APPEND16(req->trans_id);
+ APPEND16(flags);
+ APPEND16(req->base.nquestions);
+ APPEND16(req->n_answer);
+ APPEND16(req->n_authority);
+ APPEND16(req->n_additional);
+
+ /* Add questions. */
+ for (i=0; i < req->base.nquestions; ++i) {
+ const char *s = req->base.questions[i]->name;
+ j = dnsname_to_labels(buf, buf_len, j, s, strlen(s), &table);
+ if (j < 0) {
+ dnslabel_clear(&table);
+ return (int) j;
+ }
+ APPEND16(req->base.questions[i]->type);
+ APPEND16(req->base.questions[i]->dns_question_class);
+ }
+
+ /* Add answer, authority, and additional sections. */
+ for (i=0; i<3; ++i) {
+ struct server_reply_item *item;
+ if (i==0)
+ item = req->answer;
+ else if (i==1)
+ item = req->authority;
+ else
+ item = req->additional;
+ while (item) {
+ r = dnsname_to_labels(buf, buf_len, j, item->name, strlen(item->name), &table);
+ if (r < 0)
+ goto overflow;
+ j = r;
+
+ APPEND16(item->type);
+ APPEND16(item->dns_question_class);
+ APPEND32(item->ttl);
+ if (item->is_name) {
+ off_t len_idx = j, name_start;
+ j += 2;
+ name_start = j;
+ r = dnsname_to_labels(buf, buf_len, j, item->data, strlen(item->data), &table);
+ if (r < 0)
+ goto overflow;
+ j = r;
+ _t = htons( (short) (j-name_start) );
+ memcpy(buf+len_idx, &_t, 2);
+ } else {
+ APPEND16(item->datalen);
+ if (j+item->datalen > (off_t)buf_len)
+ goto overflow;
+ memcpy(buf+j, item->data, item->datalen);
+ j += item->datalen;
+ }
+ item = item->next;
+ }
+ }
+
+ if (j > 512) {
+overflow:
+ j = 512;
+ buf[2] |= 0x02; /* set the truncated bit. */
+ }
+
+ req->response_len = j;
+
+ if (!(req->response = malloc(req->response_len))) {
+ server_request_free_answers(req);
+ dnslabel_clear(&table);
+ return (-1);
+ }
+ memcpy(req->response, buf, req->response_len);
+ server_request_free_answers(req);
+ dnslabel_clear(&table);
+ return (0);
+}
+
+/* exported function */
+int
+evdns_server_request_respond(struct evdns_server_request *_req, int err)
+{
+ struct server_request *req = TO_SERVER_REQUEST(_req);
+ struct evdns_server_port *port = req->port;
+ int r;
+ if (!req->response) {
+ if ((r = evdns_server_request_format_response(req, err))<0)
+ return r;
+ }
+
+ r = sendto(port->socket, req->response, req->response_len, 0,
+ (struct sockaddr*) &req->addr, req->addrlen);
+ if (r<0) {
+ int sock_err = last_error(port->socket);
+ if (! error_is_eagain(sock_err))
+ return -1;
+
+ if (port->pending_replies) {
+ req->prev_pending = port->pending_replies->prev_pending;
+ req->next_pending = port->pending_replies;
+ req->prev_pending->next_pending =
+ req->next_pending->prev_pending = req;
+ } else {
+ req->prev_pending = req->next_pending = req;
+ port->pending_replies = req;
+ port->choked = 1;
+
+ (void) event_del(&port->event);
+ event_set(&port->event, port->socket, (port->closing?0:EV_READ) | EV_WRITE | EV_PERSIST, server_port_ready_callback, port);
+
+ if (event_add(&port->event, NULL) < 0) {
+ log(EVDNS_LOG_WARN, "Error from libevent when adding event for DNS server");
+ }
+
+ }
+
+ return 1;
+ }
+ if (server_request_free(req))
+ return 0;
+
+ if (port->pending_replies)
+ server_port_flush(port);
+
+ return 0;
+}
+
+/* Free all storage held by RRs in req. */
+static void
+server_request_free_answers(struct server_request *req)
+{
+ struct server_reply_item *victim, *next, **list;
+ int i;
+ for (i = 0; i < 3; ++i) {
+ if (i==0)
+ list = &req->answer;
+ else if (i==1)
+ list = &req->authority;
+ else
+ list = &req->additional;
+
+ victim = *list;
+ while (victim) {
+ next = victim->next;
+ free(victim->name);
+ if (victim->data)
+ free(victim->data);
+ free(victim);
+ victim = next;
+ }
+ *list = NULL;
+ }
+}
+
+/* Free all storage held by req, and remove links to it. */
+/* return true iff we just wound up freeing the server_port. */
+static int
+server_request_free(struct server_request *req)
+{
+ int i, rc=1;
+ if (req->base.questions) {
+ for (i = 0; i < req->base.nquestions; ++i)
+ free(req->base.questions[i]);
+ free(req->base.questions);
+ }
+
+ if (req->port) {
+ if (req->port->pending_replies == req) {
+ if (req->next_pending)
+ req->port->pending_replies = req->next_pending;
+ else
+ req->port->pending_replies = NULL;
+ }
+ rc = --req->port->refcnt;
+ }
+
+ if (req->response) {
+ free(req->response);
+ }
+
+ server_request_free_answers(req);
+
+ if (req->next_pending && req->next_pending != req) {
+ req->next_pending->prev_pending = req->prev_pending;
+ req->prev_pending->next_pending = req->next_pending;
+ }
+
+ if (rc == 0) {
+ server_port_free(req->port);
+ free(req);
+ return (1);
+ }
+ free(req);
+ return (0);
+}
+
+/* Free all storage held by an evdns_server_port. Only called when */
+static void
+server_port_free(struct evdns_server_port *port)
+{
+ assert(port);
+ assert(!port->refcnt);
+ assert(!port->pending_replies);
+ if (port->socket > 0) {
+ CLOSE_SOCKET(port->socket);
+ port->socket = -1;
+ }
+ (void) event_del(&port->event);
+ /* XXXX actually free the port? -NM */
+}
+
+/* exported function */
+int
+evdns_server_request_drop(struct evdns_server_request *_req)
+{
+ struct server_request *req = TO_SERVER_REQUEST(_req);
+ server_request_free(req);
+ return 0;
+}
+
+/* exported function */
+int
+evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len)
+{
+ struct server_request *req = TO_SERVER_REQUEST(_req);
+ if (addr_len < (int)req->addrlen)
+ return -1;
+ memcpy(sa, &(req->addr), req->addrlen);
+ return req->addrlen;
+}
+
+#undef APPEND16
+#undef APPEND32
+
+/* this is a libevent callback function which is called when a request */
+/* has timed out. */
+static void
+evdns_request_timeout_callback(int fd, short events, void *arg) {
+ struct request *const req = (struct request *) arg;
+ (void) fd;
+ (void) events;
+
+ log(EVDNS_LOG_DEBUG, "Request %lx timed out", (unsigned long) arg);
+
+ req->ns->timedout++;
+ if (req->ns->timedout > global_max_nameserver_timeout) {
+ req->ns->timedout = 0;
+ nameserver_failed(req->ns, "request timed out.");
+ }
+
+ (void) evtimer_del(&req->timeout_event);
+ if (req->tx_count >= global_max_retransmits) {
+ /* this request has failed */
+ reply_callback(req, 0, DNS_ERR_TIMEOUT, NULL);
+ request_finished(req, &req_head);
+ } else {
+ /* retransmit it */
+ evdns_request_transmit(req);
+ }
+}
+
+/* try to send a request to a given server. */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 temporary failure */
+/* 2 other failure */
+static int
+evdns_request_transmit_to(struct request *req, struct nameserver *server) {
+ struct sockaddr_in sin;
+ int r;
+ memset(&sin, 0, sizeof(sin));
+ sin.sin_addr.s_addr = req->ns->address;
+ sin.sin_port = req->ns->port;
+ sin.sin_family = AF_INET;
+
+ r = sendto(server->socket, req->request, req->request_len, 0,
+ (struct sockaddr*)&sin, sizeof(sin));
+ if (r < 0) {
+ int err = last_error(server->socket);
+ if (error_is_eagain(err)) return 1;
+ nameserver_failed(req->ns, strerror(err));
+ return 2;
+ } else if (r != (int)req->request_len) {
+ return 1; /* short write */
+ } else {
+ return 0;
+ }
+}
+
+/* try to send a request, updating the fields of the request */
+/* as needed */
+/* */
+/* return: */
+/* 0 ok */
+/* 1 failed */
+static int
+evdns_request_transmit(struct request *req) {
+ int retcode = 0, r;
+
+ /* if we fail to send this packet then this flag marks it */
+ /* for evdns_transmit */
+ req->transmit_me = 1;
+ if (req->trans_id == 0xffff) abort();
+
+ if (req->ns->choked) {
+ /* don't bother trying to write to a socket */
+ /* which we have had EAGAIN from */
+ return 1;
+ }
+
+ r = evdns_request_transmit_to(req, req->ns);
+ switch (r) {
+ case 1:
+ /* temp failure */
+ req->ns->choked = 1;
+ nameserver_write_waiting(req->ns, 1);
+ return 1;
+ case 2:
+ /* failed in some other way */
+ retcode = 1;
+ /* fall through */
+ default:
+ /* all ok */
+ log(EVDNS_LOG_DEBUG,
+ "Setting timeout for request %lx", (unsigned long) req);
+ if (evtimer_add(&req->timeout_event, &global_timeout) < 0) {
+ log(EVDNS_LOG_WARN,
+ "Error from libevent when adding timer for request %lx",
+ (unsigned long) req);
+ /* ???? Do more? */
+ }
+ req->tx_count++;
+ req->transmit_me = 0;
+ return retcode;
+ }
+}
+
+static void
+nameserver_probe_callback(int result, char type, int count, int ttl, void *addresses, void *arg) {
+ struct nameserver *const ns = (struct nameserver *) arg;
+ (void) type;
+ (void) count;
+ (void) ttl;
+ (void) addresses;
+
+ if (result == DNS_ERR_NONE || result == DNS_ERR_NOTEXIST) {
+ /* this is a good reply */
+ nameserver_up(ns);
+ } else nameserver_probe_failed(ns);
+}
+
+static void
+nameserver_send_probe(struct nameserver *const ns) {
+ struct request *req;
+ /* here we need to send a probe to a given nameserver */
+ /* in the hope that it is up now. */
+
+ log(EVDNS_LOG_DEBUG, "Sending probe to %s", debug_ntoa(ns->address));
+
+ req = request_new(TYPE_A, "www.google.com", DNS_QUERY_NO_SEARCH, nameserver_probe_callback, ns);
+ if (!req) return;
+ /* we force this into the inflight queue no matter what */
+ request_trans_id_set(req, transaction_id_pick());
+ req->ns = ns;
+ request_submit(req);
+}
+
+/* returns: */
+/* 0 didn't try to transmit anything */
+/* 1 tried to transmit something */
+static int
+evdns_transmit(void) {
+ char did_try_to_transmit = 0;
+
+ if (req_head) {
+ struct request *const started_at = req_head, *req = req_head;
+ /* first transmit all the requests which are currently waiting */
+ do {
+ if (req->transmit_me) {
+ did_try_to_transmit = 1;
+ evdns_request_transmit(req);
+ }
+
+ req = req->next;
+ } while (req != started_at);
+ }
+
+ return did_try_to_transmit;
+}
+
+/* exported function */
+int
+evdns_count_nameservers(void)
+{
+ const struct nameserver *server = server_head;
+ int n = 0;
+ if (!server)
+ return 0;
+ do {
+ ++n;
+ server = server->next;
+ } while (server != server_head);
+ return n;
+}
+
+/* exported function */
+int
+evdns_clear_nameservers_and_suspend(void)
+{
+ struct nameserver *server = server_head, *started_at = server_head;
+ struct request *req = req_head, *req_started_at = req_head;
+
+ if (!server)
+ return 0;
+ while (1) {
+ struct nameserver *next = server->next;
+ (void) event_del(&server->event);
+ if (evtimer_initialized(&server->timeout_event))
+ (void) evtimer_del(&server->timeout_event);
+ if (server->socket >= 0)
+ CLOSE_SOCKET(server->socket);
+ free(server);
+ if (next == started_at)
+ break;
+ server = next;
+ }
+ server_head = NULL;
+ global_good_nameservers = 0;
+
+ while (req) {
+ struct request *next = req->next;
+ req->tx_count = req->reissue_count = 0;
+ req->ns = NULL;
+ /* ???? What to do about searches? */
+ (void) evtimer_del(&req->timeout_event);
+ req->trans_id = 0;
+ req->transmit_me = 0;
+
+ global_requests_waiting++;
+ evdns_request_insert(req, &req_waiting_head);
+ /* We want to insert these suspended elements at the front of
+ * the waiting queue, since they were pending before any of
+ * the waiting entries were added. This is a circular list,
+ * so we can just shift the start back by one.*/
+ req_waiting_head = req_waiting_head->prev;
+
+ if (next == req_started_at)
+ break;
+ req = next;
+ }
+ req_head = NULL;
+ global_requests_inflight = 0;
+
+ return 0;
+}
+
+
+/* exported function */
+int
+evdns_resume(void)
+{
+ evdns_requests_pump_waiting_queue();
+ return 0;
+}
+
+static int
+_evdns_nameserver_add_impl(unsigned long int address, int port) {
+ /* first check to see if we already have this nameserver */
+
+ const struct nameserver *server = server_head, *const started_at = server_head;
+ struct nameserver *ns;
+ int err = 0;
+ if (server) {
+ do {
+ if (server->address == address) return 3;
+ server = server->next;
+ } while (server != started_at);
+ }
+
+ ns = (struct nameserver *) malloc(sizeof(struct nameserver));
+ if (!ns) return -1;
+
+ memset(ns, 0, sizeof(struct nameserver));
+
+ evtimer_set(&ns->timeout_event, nameserver_prod_callback, ns);
+
+ ns->socket = socket(PF_INET, SOCK_DGRAM, 0);
+ if (ns->socket < 0) { err = 1; goto out1; }
+ evutil_make_socket_nonblocking(ns->socket);
+
+ ns->address = address;
+ ns->port = htons(port);
+ ns->state = 1;
+ event_set(&ns->event, ns->socket, EV_READ | EV_PERSIST, nameserver_ready_callback, ns);
+ if (event_add(&ns->event, NULL) < 0) {
+ err = 2;
+ goto out2;
+ }
+
+ log(EVDNS_LOG_DEBUG, "Added nameserver %s", debug_ntoa(address));
+
+ /* insert this nameserver into the list of them */
+ if (!server_head) {
+ ns->next = ns->prev = ns;
+ server_head = ns;
+ } else {
+ ns->next = server_head->next;
+ ns->prev = server_head;
+ server_head->next = ns;
+ if (server_head->prev == server_head) {
+ server_head->prev = ns;
+ }
+ }
+
+ global_good_nameservers++;
+
+ return 0;
+
+out2:
+ CLOSE_SOCKET(ns->socket);
+out1:
+ free(ns);
+ log(EVDNS_LOG_WARN, "Unable to add nameserver %s: error %d", debug_ntoa(address), err);
+ return err;
+}
+
+/* exported function */
+int
+evdns_nameserver_add(unsigned long int address) {
+ return _evdns_nameserver_add_impl(address, 53);
+}
+
+/* exported function */
+int
+evdns_nameserver_ip_add(const char *ip_as_string) {
+ struct in_addr ina;
+ int port;
+ char buf[20];
+ const char *cp;
+ cp = strchr(ip_as_string, ':');
+ if (! cp) {
+ cp = ip_as_string;
+ port = 53;
+ } else {
+ port = strtoint(cp+1);
+ if (port < 0 || port > 65535) {
+ return 4;
+ }
+ if ((cp-ip_as_string) >= (int)sizeof(buf)) {
+ return 4;
+ }
+ memcpy(buf, ip_as_string, cp-ip_as_string);
+ buf[cp-ip_as_string] = '\0';
+ cp = buf;
+ }
+ if (!inet_aton(cp, &ina)) {
+ return 4;
+ }
+ return _evdns_nameserver_add_impl(ina.s_addr, port);
+}
+
+/* insert into the tail of the queue */
+static void
+evdns_request_insert(struct request *req, struct request **head) {
+ if (!*head) {
+ *head = req;
+ req->next = req->prev = req;
+ return;
+ }
+
+ req->prev = (*head)->prev;
+ req->prev->next = req;
+ req->next = *head;
+ (*head)->prev = req;
+}
+
+static int
+string_num_dots(const char *s) {
+ int count = 0;
+ while ((s = strchr(s, '.'))) {
+ s++;
+ count++;
+ }
+ return count;
+}
+
+static struct request *
+request_new(int type, const char *name, int flags,
+ evdns_callback_type callback, void *user_ptr) {
+ const char issuing_now =
+ (global_requests_inflight < global_max_requests_inflight) ? 1 : 0;
+
+ const int name_len = strlen(name);
+ const int request_max_len = evdns_request_len(name_len);
+ const u16 trans_id = issuing_now ? transaction_id_pick() : 0xffff;
+ /* the request data is alloced in a single block with the header */
+ struct request *const req =
+ (struct request *) malloc(sizeof(struct request) + request_max_len);
+ int rlen;
+ (void) flags;
+
+ if (!req) return NULL;
+ memset(req, 0, sizeof(struct request));
+
+ evtimer_set(&req->timeout_event, evdns_request_timeout_callback, req);
+
+ /* request data lives just after the header */
+ req->request = ((u8 *) req) + sizeof(struct request);
+ /* denotes that the request data shouldn't be free()ed */
+ req->request_appended = 1;
+ rlen = evdns_request_data_build(name, name_len, trans_id,
+ type, CLASS_INET, req->request, request_max_len);
+ if (rlen < 0)
+ goto err1;
+ req->request_len = rlen;
+ req->trans_id = trans_id;
+ req->tx_count = 0;
+ req->request_type = type;
+ req->user_pointer = user_ptr;
+ req->user_callback = callback;
+ req->ns = issuing_now ? nameserver_pick() : NULL;
+ req->next = req->prev = NULL;
+
+ return req;
+err1:
+ free(req);
+ return NULL;
+}
+
+static void
+request_submit(struct request *const req) {
+ if (req->ns) {
+ /* if it has a nameserver assigned then this is going */
+ /* straight into the inflight queue */
+ evdns_request_insert(req, &req_head);
+ global_requests_inflight++;
+ evdns_request_transmit(req);
+ } else {
+ evdns_request_insert(req, &req_waiting_head);
+ global_requests_waiting++;
+ }
+}
+
+/* exported function */
+int evdns_resolve_ipv4(const char *name, int flags,
+ evdns_callback_type callback, void *ptr) {
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+ if (flags & DNS_QUERY_NO_SEARCH) {
+ struct request *const req =
+ request_new(TYPE_A, name, flags, callback, ptr);
+ if (req == NULL)
+ return (1);
+ request_submit(req);
+ return (0);
+ } else {
+ return (search_request_new(TYPE_A, name, flags, callback, ptr));
+ }
+}
+
+/* exported function */
+int evdns_resolve_ipv6(const char *name, int flags,
+ evdns_callback_type callback, void *ptr) {
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s", name);
+ if (flags & DNS_QUERY_NO_SEARCH) {
+ struct request *const req =
+ request_new(TYPE_AAAA, name, flags, callback, ptr);
+ if (req == NULL)
+ return (1);
+ request_submit(req);
+ return (0);
+ } else {
+ return (search_request_new(TYPE_AAAA, name, flags, callback, ptr));
+ }
+}
+
+int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ char buf[32];
+ struct request *req;
+ u32 a;
+ assert(in);
+ a = ntohl(in->s_addr);
+ evutil_snprintf(buf, sizeof(buf), "%d.%d.%d.%d.in-addr.arpa",
+ (int)(u8)((a )&0xff),
+ (int)(u8)((a>>8 )&0xff),
+ (int)(u8)((a>>16)&0xff),
+ (int)(u8)((a>>24)&0xff));
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+ req = request_new(TYPE_PTR, buf, flags, callback, ptr);
+ if (!req) return 1;
+ request_submit(req);
+ return 0;
+}
+
+int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr) {
+ /* 32 nybbles, 32 periods, "ip6.arpa", NUL. */
+ char buf[73];
+ char *cp;
+ struct request *req;
+ int i;
+ assert(in);
+ cp = buf;
+ for (i=15; i >= 0; --i) {
+ u8 byte = in->s6_addr[i];
+ *cp++ = "0123456789abcdef"[byte & 0x0f];
+ *cp++ = '.';
+ *cp++ = "0123456789abcdef"[byte >> 4];
+ *cp++ = '.';
+ }
+ assert(cp + strlen("ip6.arpa") < buf+sizeof(buf));
+ memcpy(cp, "ip6.arpa", strlen("ip6.arpa")+1);
+ log(EVDNS_LOG_DEBUG, "Resolve requested for %s (reverse)", buf);
+ req = request_new(TYPE_PTR, buf, flags, callback, ptr);
+ if (!req) return 1;
+ request_submit(req);
+ return 0;
+}
+
+/*/////////////////////////////////////////////////////////////////// */
+/* Search support */
+/* */
+/* the libc resolver has support for searching a number of domains */
+/* to find a name. If nothing else then it takes the single domain */
+/* from the gethostname() call. */
+/* */
+/* It can also be configured via the domain and search options in a */
+/* resolv.conf. */
+/* */
+/* The ndots option controls how many dots it takes for the resolver */
+/* to decide that a name is non-local and so try a raw lookup first. */
+
+struct search_domain {
+ int len;
+ struct search_domain *next;
+ /* the text string is appended to this structure */
+};
+
+struct search_state {
+ int refcount;
+ int ndots;
+ int num_domains;
+ struct search_domain *head;
+};
+
+static struct search_state *global_search_state = NULL;
+
+static void
+search_state_decref(struct search_state *const state) {
+ if (!state) return;
+ state->refcount--;
+ if (!state->refcount) {
+ struct search_domain *next, *dom;
+ for (dom = state->head; dom; dom = next) {
+ next = dom->next;
+ free(dom);
+ }
+ free(state);
+ }
+}
+
+static struct search_state *
+search_state_new(void) {
+ struct search_state *state = (struct search_state *) malloc(sizeof(struct search_state));
+ if (!state) return NULL;
+ memset(state, 0, sizeof(struct search_state));
+ state->refcount = 1;
+ state->ndots = 1;
+
+ return state;
+}
+
+static void
+search_postfix_clear(void) {
+ search_state_decref(global_search_state);
+
+ global_search_state = search_state_new();
+}
+
+/* exported function */
+void
+evdns_search_clear(void) {
+ search_postfix_clear();
+}
+
+static void
+search_postfix_add(const char *domain) {
+ int domain_len;
+ struct search_domain *sdomain;
+ while (domain[0] == '.') domain++;
+ domain_len = strlen(domain);
+
+ if (!global_search_state) global_search_state = search_state_new();
+ if (!global_search_state) return;
+ global_search_state->num_domains++;
+
+ sdomain = (struct search_domain *) malloc(sizeof(struct search_domain) + domain_len);
+ if (!sdomain) return;
+ memcpy( ((u8 *) sdomain) + sizeof(struct search_domain), domain, domain_len);
+ sdomain->next = global_search_state->head;
+ sdomain->len = domain_len;
+
+ global_search_state->head = sdomain;
+}
+
+/* reverse the order of members in the postfix list. This is needed because, */
+/* when parsing resolv.conf we push elements in the wrong order */
+static void
+search_reverse(void) {
+ struct search_domain *cur, *prev = NULL, *next;
+ cur = global_search_state->head;
+ while (cur) {
+ next = cur->next;
+ cur->next = prev;
+ prev = cur;
+ cur = next;
+ }
+
+ global_search_state->head = prev;
+}
+
+/* exported function */
+void
+evdns_search_add(const char *domain) {
+ search_postfix_add(domain);
+}
+
+/* exported function */
+void
+evdns_search_ndots_set(const int ndots) {
+ if (!global_search_state) global_search_state = search_state_new();
+ if (!global_search_state) return;
+ global_search_state->ndots = ndots;
+}
+
+static void
+search_set_from_hostname(void) {
+ char hostname[HOST_NAME_MAX + 1], *domainname;
+
+ search_postfix_clear();
+ if (gethostname(hostname, sizeof(hostname))) return;
+ domainname = strchr(hostname, '.');
+ if (!domainname) return;
+ search_postfix_add(domainname);
+}
+
+/* warning: returns malloced string */
+static char *
+search_make_new(const struct search_state *const state, int n, const char *const base_name) {
+ const int base_len = strlen(base_name);
+ const char need_to_append_dot = base_name[base_len - 1] == '.' ? 0 : 1;
+ struct search_domain *dom;
+
+ for (dom = state->head; dom; dom = dom->next) {
+ if (!n--) {
+ /* this is the postfix we want */
+ /* the actual postfix string is kept at the end of the structure */
+ const u8 *const postfix = ((u8 *) dom) + sizeof(struct search_domain);
+ const int postfix_len = dom->len;
+ char *const newname = (char *) malloc(base_len + need_to_append_dot + postfix_len + 1);
+ if (!newname) return NULL;
+ memcpy(newname, base_name, base_len);
+ if (need_to_append_dot) newname[base_len] = '.';
+ memcpy(newname + base_len + need_to_append_dot, postfix, postfix_len);
+ newname[base_len + need_to_append_dot + postfix_len] = 0;
+ return newname;
+ }
+ }
+
+ /* we ran off the end of the list and still didn't find the requested string */
+ abort();
+ return NULL; /* unreachable; stops warnings in some compilers. */
+}
+
+static int
+search_request_new(int type, const char *const name, int flags, evdns_callback_type user_callback, void *user_arg) {
+ assert(type == TYPE_A || type == TYPE_AAAA);
+ if ( ((flags & DNS_QUERY_NO_SEARCH) == 0) &&
+ global_search_state &&
+ global_search_state->num_domains) {
+ /* we have some domains to search */
+ struct request *req;
+ if (string_num_dots(name) >= global_search_state->ndots) {
+ req = request_new(type, name, flags, user_callback, user_arg);
+ if (!req) return 1;
+ req->search_index = -1;
+ } else {
+ char *const new_name = search_make_new(global_search_state, 0, name);
+ if (!new_name) return 1;
+ req = request_new(type, new_name, flags, user_callback, user_arg);
+ free(new_name);
+ if (!req) return 1;
+ req->search_index = 0;
+ }
+ req->search_origname = strdup(name);
+ req->search_state = global_search_state;
+ req->search_flags = flags;
+ global_search_state->refcount++;
+ request_submit(req);
+ return 0;
+ } else {
+ struct request *const req = request_new(type, name, flags, user_callback, user_arg);
+ if (!req) return 1;
+ request_submit(req);
+ return 0;
+ }
+}
+
+/* this is called when a request has failed to find a name. We need to check */
+/* if it is part of a search and, if so, try the next name in the list */
+/* returns: */
+/* 0 another request has been submitted */
+/* 1 no more requests needed */
+static int
+search_try_next(struct request *const req) {
+ if (req->search_state) {
+ /* it is part of a search */
+ char *new_name;
+ struct request *newreq;
+ req->search_index++;
+ if (req->search_index >= req->search_state->num_domains) {
+ /* no more postfixes to try, however we may need to try */
+ /* this name without a postfix */
+ if (string_num_dots(req->search_origname) < req->search_state->ndots) {
+ /* yep, we need to try it raw */
+ newreq = request_new(req->request_type, req->search_origname, req->search_flags, req->user_callback, req->user_pointer);
+ log(EVDNS_LOG_DEBUG, "Search: trying raw query %s", req->search_origname);
+ if (newreq) {
+ request_submit(newreq);
+ return 0;
+ }
+ }
+ return 1;
+ }
+
+ new_name = search_make_new(req->search_state, req->search_index, req->search_origname);
+ if (!new_name) return 1;
+ log(EVDNS_LOG_DEBUG, "Search: now trying %s (%d)", new_name, req->search_index);
+ newreq = request_new(req->request_type, new_name, req->search_flags, req->user_callback, req->user_pointer);
+ free(new_name);
+ if (!newreq) return 1;
+ newreq->search_origname = req->search_origname;
+ req->search_origname = NULL;
+ newreq->search_state = req->search_state;
+ newreq->search_flags = req->search_flags;
+ newreq->search_index = req->search_index;
+ newreq->search_state->refcount++;
+ request_submit(newreq);
+ return 0;
+ }
+ return 1;
+}
+
+static void
+search_request_finished(struct request *const req) {
+ if (req->search_state) {
+ search_state_decref(req->search_state);
+ req->search_state = NULL;
+ }
+ if (req->search_origname) {
+ free(req->search_origname);
+ req->search_origname = NULL;
+ }
+}
+
+/*/////////////////////////////////////////////////////////////////// */
+/* Parsing resolv.conf files */
+
+static void
+evdns_resolv_set_defaults(int flags) {
+ /* if the file isn't found then we assume a local resolver */
+ if (flags & DNS_OPTION_SEARCH) search_set_from_hostname();
+ if (flags & DNS_OPTION_NAMESERVERS) evdns_nameserver_ip_add("127.0.0.1");
+}
+
+#ifndef HAVE_STRTOK_R
+static char *
+strtok_r(char *s, const char *delim, char **state) {
+ return strtok(s, delim);
+}
+#endif
+
+/* helper version of atoi which returns -1 on error */
+static int
+strtoint(const char *const str) {
+ char *endptr;
+ const int r = strtol(str, &endptr, 10);
+ if (*endptr) return -1;
+ return r;
+}
+
+/* helper version of atoi that returns -1 on error and clips to bounds. */
+static int
+strtoint_clipped(const char *const str, int min, int max)
+{
+ int r = strtoint(str);
+ if (r == -1)
+ return r;
+ else if (r<min)
+ return min;
+ else if (r>max)
+ return max;
+ else
+ return r;
+}
+
+/* exported function */
+int
+evdns_set_option(const char *option, const char *val, int flags)
+{
+ if (!strncmp(option, "ndots:", 6)) {
+ const int ndots = strtoint(val);
+ if (ndots == -1) return -1;
+ if (!(flags & DNS_OPTION_SEARCH)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting ndots to %d", ndots);
+ if (!global_search_state) global_search_state = search_state_new();
+ if (!global_search_state) return -1;
+ global_search_state->ndots = ndots;
+ } else if (!strncmp(option, "timeout:", 8)) {
+ const int timeout = strtoint(val);
+ if (timeout == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting timeout to %d", timeout);
+ global_timeout.tv_sec = timeout;
+ } else if (!strncmp(option, "max-timeouts:", 12)) {
+ const int maxtimeout = strtoint_clipped(val, 1, 255);
+ if (maxtimeout == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting maximum allowed timeouts to %d",
+ maxtimeout);
+ global_max_nameserver_timeout = maxtimeout;
+ } else if (!strncmp(option, "max-inflight:", 13)) {
+ const int maxinflight = strtoint_clipped(val, 1, 65000);
+ if (maxinflight == -1) return -1;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting maximum inflight requests to %d",
+ maxinflight);
+ global_max_requests_inflight = maxinflight;
+ } else if (!strncmp(option, "attempts:", 9)) {
+ int retries = strtoint(val);
+ if (retries == -1) return -1;
+ if (retries > 255) retries = 255;
+ if (!(flags & DNS_OPTION_MISC)) return 0;
+ log(EVDNS_LOG_DEBUG, "Setting retries to %d", retries);
+ global_max_retransmits = retries;
+ }
+ return 0;
+}
+
+static void
+resolv_conf_parse_line(char *const start, int flags) {
+ char *strtok_state;
+ static const char *const delims = " \t";
+#define NEXT_TOKEN strtok_r(NULL, delims, &strtok_state)
+
+ char *const first_token = strtok_r(start, delims, &strtok_state);
+ if (!first_token) return;
+
+ if (!strcmp(first_token, "nameserver") && (flags & DNS_OPTION_NAMESERVERS)) {
+ const char *const nameserver = NEXT_TOKEN;
+ struct in_addr ina;
+
+ if (inet_aton(nameserver, &ina)) {
+ /* address is valid */
+ evdns_nameserver_add(ina.s_addr);
+ }
+ } else if (!strcmp(first_token, "domain") && (flags & DNS_OPTION_SEARCH)) {
+ const char *const domain = NEXT_TOKEN;
+ if (domain) {
+ search_postfix_clear();
+ search_postfix_add(domain);
+ }
+ } else if (!strcmp(first_token, "search") && (flags & DNS_OPTION_SEARCH)) {
+ const char *domain;
+ search_postfix_clear();
+
+ while ((domain = NEXT_TOKEN)) {
+ search_postfix_add(domain);
+ }
+ search_reverse();
+ } else if (!strcmp(first_token, "options")) {
+ const char *option;
+ while ((option = NEXT_TOKEN)) {
+ const char *val = strchr(option, ':');
+ evdns_set_option(option, val ? val+1 : "", flags);
+ }
+ }
+#undef NEXT_TOKEN
+}
+
+/* exported function */
+/* returns: */
+/* 0 no errors */
+/* 1 failed to open file */
+/* 2 failed to stat file */
+/* 3 file too large */
+/* 4 out of memory */
+/* 5 short read from file */
+int
+evdns_resolv_conf_parse(int flags, const char *const filename) {
+ struct stat st;
+ int fd, n, r;
+ u8 *resolv;
+ char *start;
+ int err = 0;
+
+ log(EVDNS_LOG_DEBUG, "Parsing resolv.conf file %s", filename);
+
+ fd = open(filename, O_RDONLY);
+ if (fd < 0) {
+ evdns_resolv_set_defaults(flags);
+ return 1;
+ }
+
+ if (fstat(fd, &st)) { err = 2; goto out1; }
+ if (!st.st_size) {
+ evdns_resolv_set_defaults(flags);
+ err = (flags & DNS_OPTION_NAMESERVERS) ? 6 : 0;
+ goto out1;
+ }
+ if (st.st_size > 65535) { err = 3; goto out1; } /* no resolv.conf should be any bigger */
+
+ resolv = (u8 *) malloc((size_t)st.st_size + 1);
+ if (!resolv) { err = 4; goto out1; }
+
+ n = 0;
+ while ((r = read(fd, resolv+n, (size_t)st.st_size-n)) > 0) {
+ n += r;
+ if (n == st.st_size)
+ break;
+ assert(n < st.st_size);
+ }
+ if (r < 0) { err = 5; goto out2; }
+ resolv[n] = 0; /* we malloced an extra byte; this should be fine. */
+
+ start = (char *) resolv;
+ for (;;) {
+ char *const newline = strchr(start, '\n');
+ if (!newline) {
+ resolv_conf_parse_line(start, flags);
+ break;
+ } else {
+ *newline = 0;
+ resolv_conf_parse_line(start, flags);
+ start = newline + 1;
+ }
+ }
+
+ if (!server_head && (flags & DNS_OPTION_NAMESERVERS)) {
+ /* no nameservers were configured. */
+ evdns_nameserver_ip_add("127.0.0.1");
+ err = 6;
+ }
+ if (flags & DNS_OPTION_SEARCH && (!global_search_state || global_search_state->num_domains == 0)) {
+ search_set_from_hostname();
+ }
+
+out2:
+ free(resolv);
+out1:
+ close(fd);
+ return err;
+}
+
+#ifdef WIN32
+/* Add multiple nameservers from a space-or-comma-separated list. */
+static int
+evdns_nameserver_ip_add_line(const char *ips) {
+ const char *addr;
+ char *buf;
+ int r;
+ while (*ips) {
+ while (ISSPACE(*ips) || *ips == ',' || *ips == '\t')
+ ++ips;
+ addr = ips;
+ while (ISDIGIT(*ips) || *ips == '.' || *ips == ':')
+ ++ips;
+ buf = malloc(ips-addr+1);
+ if (!buf) return 4;
+ memcpy(buf, addr, ips-addr);
+ buf[ips-addr] = '\0';
+ r = evdns_nameserver_ip_add(buf);
+ free(buf);
+ if (r) return r;
+ }
+ return 0;
+}
+
+typedef DWORD(WINAPI *GetNetworkParams_fn_t)(FIXED_INFO *, DWORD*);
+
+/* Use the windows GetNetworkParams interface in iphlpapi.dll to */
+/* figure out what our nameservers are. */
+static int
+load_nameservers_with_getnetworkparams(void)
+{
+ /* Based on MSDN examples and inspection of c-ares code. */
+ FIXED_INFO *fixed;
+ HMODULE handle = 0;
+ ULONG size = sizeof(FIXED_INFO);
+ void *buf = NULL;
+ int status = 0, r, added_any;
+ IP_ADDR_STRING *ns;
+ GetNetworkParams_fn_t fn;
+
+ if (!(handle = LoadLibrary("iphlpapi.dll"))) {
+ log(EVDNS_LOG_WARN, "Could not open iphlpapi.dll");
+ status = -1;
+ goto done;
+ }
+ if (!(fn = (GetNetworkParams_fn_t) GetProcAddress(handle, "GetNetworkParams"))) {
+ log(EVDNS_LOG_WARN, "Could not get address of function.");
+ status = -1;
+ goto done;
+ }
+
+ buf = malloc(size);
+ if (!buf) { status = 4; goto done; }
+ fixed = buf;
+ r = fn(fixed, &size);
+ if (r != ERROR_SUCCESS && r != ERROR_BUFFER_OVERFLOW) {
+ status = -1;
+ goto done;
+ }
+ if (r != ERROR_SUCCESS) {
+ free(buf);
+ buf = malloc(size);
+ if (!buf) { status = 4; goto done; }
+ fixed = buf;
+ r = fn(fixed, &size);
+ if (r != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG, "fn() failed.");
+ status = -1;
+ goto done;
+ }
+ }
+
+ assert(fixed);
+ added_any = 0;
+ ns = &(fixed->DnsServerList);
+ while (ns) {
+ r = evdns_nameserver_ip_add_line(ns->IpAddress.String);
+ if (r) {
+ log(EVDNS_LOG_DEBUG,"Could not add nameserver %s to list,error: %d",
+ (ns->IpAddress.String),(int)GetLastError());
+ status = r;
+ goto done;
+ } else {
+ log(EVDNS_LOG_DEBUG,"Succesfully added %s as nameserver",ns->IpAddress.String);
+ }
+
+ added_any++;
+ ns = ns->Next;
+ }
+
+ if (!added_any) {
+ log(EVDNS_LOG_DEBUG, "No nameservers added.");
+ status = -1;
+ }
+
+ done:
+ if (buf)
+ free(buf);
+ if (handle)
+ FreeLibrary(handle);
+ return status;
+}
+
+static int
+config_nameserver_from_reg_key(HKEY key, const char *subkey)
+{
+ char *buf;
+ DWORD bufsz = 0, type = 0;
+ int status = 0;
+
+ if (RegQueryValueEx(key, subkey, 0, &type, NULL, &bufsz)
+ != ERROR_MORE_DATA)
+ return -1;
+ if (!(buf = malloc(bufsz)))
+ return -1;
+
+ if (RegQueryValueEx(key, subkey, 0, &type, (LPBYTE)buf, &bufsz)
+ == ERROR_SUCCESS && bufsz > 1) {
+ status = evdns_nameserver_ip_add_line(buf);
+ }
+
+ free(buf);
+ return status;
+}
+
+#define SERVICES_KEY "System\\CurrentControlSet\\Services\\"
+#define WIN_NS_9X_KEY SERVICES_KEY "VxD\\MSTCP"
+#define WIN_NS_NT_KEY SERVICES_KEY "Tcpip\\Parameters"
+
+static int
+load_nameservers_from_registry(void)
+{
+ int found = 0;
+ int r;
+#define TRY(k, name) \
+ if (!found && config_nameserver_from_reg_key(k,name) == 0) { \
+ log(EVDNS_LOG_DEBUG,"Found nameservers in %s/%s",#k,name); \
+ found = 1; \
+ } else if (!found) { \
+ log(EVDNS_LOG_DEBUG,"Didn't find nameservers in %s/%s", \
+ #k,#name); \
+ }
+
+ if (((int)GetVersion()) > 0) { /* NT */
+ HKEY nt_key = 0, interfaces_key = 0;
+
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_NT_KEY, 0,
+ KEY_READ, &nt_key) != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG,"Couldn't open nt key, %d",(int)GetLastError());
+ return -1;
+ }
+ r = RegOpenKeyEx(nt_key, "Interfaces", 0,
+ KEY_QUERY_VALUE|KEY_ENUMERATE_SUB_KEYS,
+ &interfaces_key);
+ if (r != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG,"Couldn't open interfaces key, %d",(int)GetLastError());
+ return -1;
+ }
+ TRY(nt_key, "NameServer");
+ TRY(nt_key, "DhcpNameServer");
+ TRY(interfaces_key, "NameServer");
+ TRY(interfaces_key, "DhcpNameServer");
+ RegCloseKey(interfaces_key);
+ RegCloseKey(nt_key);
+ } else {
+ HKEY win_key = 0;
+ if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, WIN_NS_9X_KEY, 0,
+ KEY_READ, &win_key) != ERROR_SUCCESS) {
+ log(EVDNS_LOG_DEBUG, "Couldn't open registry key, %d", (int)GetLastError());
+ return -1;
+ }
+ TRY(win_key, "NameServer");
+ RegCloseKey(win_key);
+ }
+
+ if (found == 0) {
+ log(EVDNS_LOG_WARN,"Didn't find any nameservers.");
+ }
+
+ return found ? 0 : -1;
+#undef TRY
+}
+
+int
+evdns_config_windows_nameservers(void)
+{
+ if (load_nameservers_with_getnetworkparams() == 0)
+ return 0;
+ return load_nameservers_from_registry();
+}
+#endif
+
+int
+evdns_init(void)
+{
+ int res = 0;
+#ifdef WIN32
+ res = evdns_config_windows_nameservers();
+#else
+ res = evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+#endif
+
+ return (res);
+}
+
+const char *
+evdns_err_to_string(int err)
+{
+ switch (err) {
+ case DNS_ERR_NONE: return "no error";
+ case DNS_ERR_FORMAT: return "misformatted query";
+ case DNS_ERR_SERVERFAILED: return "server failed";
+ case DNS_ERR_NOTEXIST: return "name does not exist";
+ case DNS_ERR_NOTIMPL: return "query not implemented";
+ case DNS_ERR_REFUSED: return "refused";
+
+ case DNS_ERR_TRUNCATED: return "reply truncated or ill-formed";
+ case DNS_ERR_UNKNOWN: return "unknown";
+ case DNS_ERR_TIMEOUT: return "request timed out";
+ case DNS_ERR_SHUTDOWN: return "dns subsystem shut down";
+ default: return "[Unknown error code]";
+ }
+}
+
+void
+evdns_shutdown(int fail_requests)
+{
+ struct nameserver *server, *server_next;
+ struct search_domain *dom, *dom_next;
+
+ while (req_head) {
+ if (fail_requests)
+ reply_callback(req_head, 0, DNS_ERR_SHUTDOWN, NULL);
+ request_finished(req_head, &req_head);
+ }
+ while (req_waiting_head) {
+ if (fail_requests)
+ reply_callback(req_waiting_head, 0, DNS_ERR_SHUTDOWN, NULL);
+ request_finished(req_waiting_head, &req_waiting_head);
+ }
+ global_requests_inflight = global_requests_waiting = 0;
+
+ for (server = server_head; server; server = server_next) {
+ server_next = server->next;
+ if (server->socket >= 0)
+ CLOSE_SOCKET(server->socket);
+ (void) event_del(&server->event);
+ if (server->state == 0)
+ (void) event_del(&server->timeout_event);
+ free(server);
+ if (server_next == server_head)
+ break;
+ }
+ server_head = NULL;
+ global_good_nameservers = 0;
+
+ if (global_search_state) {
+ for (dom = global_search_state->head; dom; dom = dom_next) {
+ dom_next = dom->next;
+ free(dom);
+ }
+ free(global_search_state);
+ global_search_state = NULL;
+ }
+ evdns_log_fn = NULL;
+}
+
+#ifdef EVDNS_MAIN
+void
+main_callback(int result, char type, int count, int ttl,
+ void *addrs, void *orig) {
+ char *n = (char*)orig;
+ int i;
+ for (i = 0; i < count; ++i) {
+ if (type == DNS_IPv4_A) {
+ printf("%s: %s\n", n, debug_ntoa(((u32*)addrs)[i]));
+ } else if (type == DNS_PTR) {
+ printf("%s: %s\n", n, ((char**)addrs)[i]);
+ }
+ }
+ if (!count) {
+ printf("%s: No answer (%d)\n", n, result);
+ }
+ fflush(stdout);
+}
+void
+evdns_server_callback(struct evdns_server_request *req, void *data)
+{
+ int i, r;
+ (void)data;
+ /* dummy; give 192.168.11.11 as an answer for all A questions,
+ * give foo.bar.example.com as an answer for all PTR questions. */
+ for (i = 0; i < req->nquestions; ++i) {
+ u32 ans = htonl(0xc0a80b0bUL);
+ if (req->questions[i]->type == EVDNS_TYPE_A &&
+ req->questions[i]->dns_question_class == EVDNS_CLASS_INET) {
+ printf(" -- replying for %s (A)\n", req->questions[i]->name);
+ r = evdns_server_request_add_a_reply(req, req->questions[i]->name,
+ 1, &ans, 10);
+ if (r<0)
+ printf("eeep, didn't work.\n");
+ } else if (req->questions[i]->type == EVDNS_TYPE_PTR &&
+ req->questions[i]->dns_question_class == EVDNS_CLASS_INET) {
+ printf(" -- replying for %s (PTR)\n", req->questions[i]->name);
+ r = evdns_server_request_add_ptr_reply(req, NULL, req->questions[i]->name,
+ "foo.bar.example.com", 10);
+ } else {
+ printf(" -- skipping %s [%d %d]\n", req->questions[i]->name,
+ req->questions[i]->type, req->questions[i]->dns_question_class);
+ }
+ }
+
+ r = evdns_request_respond(req, 0);
+ if (r<0)
+ printf("eeek, couldn't send reply.\n");
+}
+
+void
+logfn(int is_warn, const char *msg) {
+ (void) is_warn;
+ fprintf(stderr, "%s\n", msg);
+}
+int
+main(int c, char **v) {
+ int idx;
+ int reverse = 0, verbose = 1, servertest = 0;
+ if (c<2) {
+ fprintf(stderr, "syntax: %s [-x] [-v] hostname\n", v[0]);
+ fprintf(stderr, "syntax: %s [-servertest]\n", v[0]);
+ return 1;
+ }
+ idx = 1;
+ while (idx < c && v[idx][0] == '-') {
+ if (!strcmp(v[idx], "-x"))
+ reverse = 1;
+ else if (!strcmp(v[idx], "-v"))
+ verbose = 1;
+ else if (!strcmp(v[idx], "-servertest"))
+ servertest = 1;
+ else
+ fprintf(stderr, "Unknown option %s\n", v[idx]);
+ ++idx;
+ }
+ event_init();
+ if (verbose)
+ evdns_set_log_fn(logfn);
+ evdns_resolv_conf_parse(DNS_OPTION_NAMESERVERS, "/etc/resolv.conf");
+ if (servertest) {
+ int sock;
+ struct sockaddr_in my_addr;
+ sock = socket(PF_INET, SOCK_DGRAM, 0);
+ evutil_make_socket_nonblocking(sock);
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_port = htons(10053);
+ my_addr.sin_addr.s_addr = INADDR_ANY;
+ if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr))<0) {
+ perror("bind");
+ exit(1);
+ }
+ evdns_add_server_port(sock, 0, evdns_server_callback, NULL);
+ }
+ for (; idx < c; ++idx) {
+ if (reverse) {
+ struct in_addr addr;
+ if (!inet_aton(v[idx], &addr)) {
+ fprintf(stderr, "Skipping non-IP %s\n", v[idx]);
+ continue;
+ }
+ fprintf(stderr, "resolving %s...\n",v[idx]);
+ evdns_resolve_reverse(&addr, 0, main_callback, v[idx]);
+ } else {
+ fprintf(stderr, "resolving (fwd) %s...\n",v[idx]);
+ evdns_resolve_ipv4(v[idx], 0, main_callback, v[idx]);
+ }
+ }
+ fflush(stdout);
+ event_dispatch();
+ return 0;
+}
+#endif
diff --git a/libevent/evdns.h b/libevent/evdns.h
new file mode 100644
index 00000000000..1eb5c382480
--- /dev/null
+++ b/libevent/evdns.h
@@ -0,0 +1,528 @@
+/*
+ * Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * The original DNS code is due to Adam Langley with heavy
+ * modifications by Nick Mathewson. Adam put his DNS software in the
+ * public domain. You can find his original copyright below. Please,
+ * aware that the code as part of libevent is governed by the 3-clause
+ * BSD license above.
+ *
+ * This software is Public Domain. To view a copy of the public domain dedication,
+ * visit http://creativecommons.org/licenses/publicdomain/ or send a letter to
+ * Creative Commons, 559 Nathan Abbott Way, Stanford, California 94305, USA.
+ *
+ * I ask and expect, but do not require, that all derivative works contain an
+ * attribution similar to:
+ * Parts developed by Adam Langley <agl@imperialviolet.org>
+ *
+ * You may wish to replace the word "Parts" with something else depending on
+ * the amount of original code.
+ *
+ * (Derivative works does not include programs which link against, run or include
+ * the source verbatim in their source distributions)
+ */
+
+/** @file evdns.h
+ *
+ * Welcome, gentle reader
+ *
+ * Async DNS lookups are really a whole lot harder than they should be,
+ * mostly stemming from the fact that the libc resolver has never been
+ * very good at them. Before you use this library you should see if libc
+ * can do the job for you with the modern async call getaddrinfo_a
+ * (see http://www.imperialviolet.org/page25.html#e498). Otherwise,
+ * please continue.
+ *
+ * This code is based on libevent and you must call event_init before
+ * any of the APIs in this file. You must also seed the OpenSSL random
+ * source if you are using OpenSSL for ids (see below).
+ *
+ * This library is designed to be included and shipped with your source
+ * code. You statically link with it. You should also test for the
+ * existence of strtok_r and define HAVE_STRTOK_R if you have it.
+ *
+ * The DNS protocol requires a good source of id numbers and these
+ * numbers should be unpredictable for spoofing reasons. There are
+ * three methods for generating them here and you must define exactly
+ * one of them. In increasing order of preference:
+ *
+ * DNS_USE_GETTIMEOFDAY_FOR_ID:
+ * Using the bottom 16 bits of the usec result from gettimeofday. This
+ * is a pretty poor solution but should work anywhere.
+ * DNS_USE_CPU_CLOCK_FOR_ID:
+ * Using the bottom 16 bits of the nsec result from the CPU's time
+ * counter. This is better, but may not work everywhere. Requires
+ * POSIX realtime support and you'll need to link against -lrt on
+ * glibc systems at least.
+ * DNS_USE_OPENSSL_FOR_ID:
+ * Uses the OpenSSL RAND_bytes call to generate the data. You must
+ * have seeded the pool before making any calls to this library.
+ *
+ * The library keeps track of the state of nameservers and will avoid
+ * them when they go down. Otherwise it will round robin between them.
+ *
+ * Quick start guide:
+ * #include "evdns.h"
+ * void callback(int result, char type, int count, int ttl,
+ * void *addresses, void *arg);
+ * evdns_resolv_conf_parse(DNS_OPTIONS_ALL, "/etc/resolv.conf");
+ * evdns_resolve("www.hostname.com", 0, callback, NULL);
+ *
+ * When the lookup is complete the callback function is called. The
+ * first argument will be one of the DNS_ERR_* defines in evdns.h.
+ * Hopefully it will be DNS_ERR_NONE, in which case type will be
+ * DNS_IPv4_A, count will be the number of IP addresses, ttl is the time
+ * which the data can be cached for (in seconds), addresses will point
+ * to an array of uint32_t's and arg will be whatever you passed to
+ * evdns_resolve.
+ *
+ * Searching:
+ *
+ * In order for this library to be a good replacement for glibc's resolver it
+ * supports searching. This involves setting a list of default domains, in
+ * which names will be queried for. The number of dots in the query name
+ * determines the order in which this list is used.
+ *
+ * Searching appears to be a single lookup from the point of view of the API,
+ * although many DNS queries may be generated from a single call to
+ * evdns_resolve. Searching can also drastically slow down the resolution
+ * of names.
+ *
+ * To disable searching:
+ * 1. Never set it up. If you never call evdns_resolv_conf_parse or
+ * evdns_search_add then no searching will occur.
+ *
+ * 2. If you do call evdns_resolv_conf_parse then don't pass
+ * DNS_OPTION_SEARCH (or DNS_OPTIONS_ALL, which implies it).
+ *
+ * 3. When calling evdns_resolve, pass the DNS_QUERY_NO_SEARCH flag.
+ *
+ * The order of searches depends on the number of dots in the name. If the
+ * number is greater than the ndots setting then the names is first tried
+ * globally. Otherwise each search domain is appended in turn.
+ *
+ * The ndots setting can either be set from a resolv.conf, or by calling
+ * evdns_search_ndots_set.
+ *
+ * For example, with ndots set to 1 (the default) and a search domain list of
+ * ["myhome.net"]:
+ * Query: www
+ * Order: www.myhome.net, www.
+ *
+ * Query: www.abc
+ * Order: www.abc., www.abc.myhome.net
+ *
+ * Internals:
+ *
+ * Requests are kept in two queues. The first is the inflight queue. In
+ * this queue requests have an allocated transaction id and nameserver.
+ * They will soon be transmitted if they haven't already been.
+ *
+ * The second is the waiting queue. The size of the inflight ring is
+ * limited and all other requests wait in waiting queue for space. This
+ * bounds the number of concurrent requests so that we don't flood the
+ * nameserver. Several algorithms require a full walk of the inflight
+ * queue and so bounding its size keeps thing going nicely under huge
+ * (many thousands of requests) loads.
+ *
+ * If a nameserver loses too many requests it is considered down and we
+ * try not to use it. After a while we send a probe to that nameserver
+ * (a lookup for google.com) and, if it replies, we consider it working
+ * again. If the nameserver fails a probe we wait longer to try again
+ * with the next probe.
+ */
+
+#ifndef EVENTDNS_H
+#define EVENTDNS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* For integer types. */
+#include <evutil.h>
+
+/** Error codes 0-5 are as described in RFC 1035. */
+#define DNS_ERR_NONE 0
+/** The name server was unable to interpret the query */
+#define DNS_ERR_FORMAT 1
+/** The name server was unable to process this query due to a problem with the
+ * name server */
+#define DNS_ERR_SERVERFAILED 2
+/** The domain name does not exist */
+#define DNS_ERR_NOTEXIST 3
+/** The name server does not support the requested kind of query */
+#define DNS_ERR_NOTIMPL 4
+/** The name server refuses to reform the specified operation for policy
+ * reasons */
+#define DNS_ERR_REFUSED 5
+/** The reply was truncated or ill-formated */
+#define DNS_ERR_TRUNCATED 65
+/** An unknown error occurred */
+#define DNS_ERR_UNKNOWN 66
+/** Communication with the server timed out */
+#define DNS_ERR_TIMEOUT 67
+/** The request was canceled because the DNS subsystem was shut down. */
+#define DNS_ERR_SHUTDOWN 68
+
+#define DNS_IPv4_A 1
+#define DNS_PTR 2
+#define DNS_IPv6_AAAA 3
+
+#define DNS_QUERY_NO_SEARCH 1
+
+#define DNS_OPTION_SEARCH 1
+#define DNS_OPTION_NAMESERVERS 2
+#define DNS_OPTION_MISC 4
+#define DNS_OPTIONS_ALL 7
+
+/**
+ * The callback that contains the results from a lookup.
+ * - type is either DNS_IPv4_A or DNS_PTR or DNS_IPv6_AAAA
+ * - count contains the number of addresses of form type
+ * - ttl is the number of seconds the resolution may be cached for.
+ * - addresses needs to be cast according to type
+ */
+typedef void (*evdns_callback_type) (int result, char type, int count, int ttl, void *addresses, void *arg);
+
+/**
+ Initialize the asynchronous DNS library.
+
+ This function initializes support for non-blocking name resolution by
+ calling evdns_resolv_conf_parse() on UNIX and
+ evdns_config_windows_nameservers() on Windows.
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_shutdown()
+ */
+int evdns_init(void);
+
+
+/**
+ Shut down the asynchronous DNS resolver and terminate all active requests.
+
+ If the 'fail_requests' option is enabled, all active requests will return
+ an empty result with the error flag set to DNS_ERR_SHUTDOWN. Otherwise,
+ the requests will be silently discarded.
+
+ @param fail_requests if zero, active requests will be aborted; if non-zero,
+ active requests will return DNS_ERR_SHUTDOWN.
+ @see evdns_init()
+ */
+void evdns_shutdown(int fail_requests);
+
+
+/**
+ Convert a DNS error code to a string.
+
+ @param err the DNS error code
+ @return a string containing an explanation of the error code
+*/
+const char *evdns_err_to_string(int err);
+
+
+/**
+ Add a nameserver.
+
+ The address should be an IPv4 address in network byte order.
+ The type of address is chosen so that it matches in_addr.s_addr.
+
+ @param address an IP address in network byte order
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_nameserver_ip_add()
+ */
+int evdns_nameserver_add(unsigned long int address);
+
+
+/**
+ Get the number of configured nameservers.
+
+ This returns the number of configured nameservers (not necessarily the
+ number of running nameservers). This is useful for double-checking
+ whether our calls to the various nameserver configuration functions
+ have been successful.
+
+ @return the number of configured nameservers
+ @see evdns_nameserver_add()
+ */
+int evdns_count_nameservers(void);
+
+
+/**
+ Remove all configured nameservers, and suspend all pending resolves.
+
+ Resolves will not necessarily be re-attempted until evdns_resume() is called.
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resume()
+ */
+int evdns_clear_nameservers_and_suspend(void);
+
+
+/**
+ Resume normal operation and continue any suspended resolve requests.
+
+ Re-attempt resolves left in limbo after an earlier call to
+ evdns_clear_nameservers_and_suspend().
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_clear_nameservers_and_suspend()
+ */
+int evdns_resume(void);
+
+
+/**
+ Add a nameserver.
+
+ This wraps the evdns_nameserver_add() function by parsing a string as an IP
+ address and adds it as a nameserver.
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_nameserver_add()
+ */
+int evdns_nameserver_ip_add(const char *ip_as_string);
+
+
+/**
+ Lookup an A record for a given name.
+
+ @param name a DNS hostname
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_ipv6(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_ipv4(const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+
+/**
+ Lookup an AAAA record for a given name.
+
+ @param name a DNS hostname
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_ipv4(), evdns_resolve_reverse(), evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_ipv6(const char *name, int flags, evdns_callback_type callback, void *ptr);
+
+struct in_addr;
+struct in6_addr;
+
+/**
+ Lookup a PTR record for a given IP address.
+
+ @param in an IPv4 address
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_reverse(const struct in_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+
+/**
+ Lookup a PTR record for a given IPv6 address.
+
+ @param in an IPv6 address
+ @param flags either 0, or DNS_QUERY_NO_SEARCH to disable searching for this query.
+ @param callback a callback function to invoke when the request is completed
+ @param ptr an argument to pass to the callback function
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolve_reverse_ipv6()
+ */
+int evdns_resolve_reverse_ipv6(const struct in6_addr *in, int flags, evdns_callback_type callback, void *ptr);
+
+
+/**
+ Set the value of a configuration option.
+
+ The currently available configuration options are:
+
+ ndots, timeout, max-timeouts, max-inflight, and attempts
+
+ @param option the name of the configuration option to be modified
+ @param val the value to be set
+ @param flags either 0 | DNS_OPTION_SEARCH | DNS_OPTION_MISC
+ @return 0 if successful, or -1 if an error occurred
+ */
+int evdns_set_option(const char *option, const char *val, int flags);
+
+
+/**
+ Parse a resolv.conf file.
+
+ The 'flags' parameter determines what information is parsed from the
+ resolv.conf file. See the man page for resolv.conf for the format of this
+ file.
+
+ The following directives are not parsed from the file: sortlist, rotate,
+ no-check-names, inet6, debug.
+
+ If this function encounters an error, the possible return values are: 1 =
+ failed to open file, 2 = failed to stat file, 3 = file too large, 4 = out of
+ memory, 5 = short read from file, 6 = no nameservers listed in the file
+
+ @param flags any of DNS_OPTION_NAMESERVERS|DNS_OPTION_SEARCH|DNS_OPTION_MISC|
+ DNS_OPTIONS_ALL
+ @param filename the path to the resolv.conf file
+ @return 0 if successful, or various positive error codes if an error
+ occurred (see above)
+ @see resolv.conf(3), evdns_config_windows_nameservers()
+ */
+int evdns_resolv_conf_parse(int flags, const char *const filename);
+
+
+/**
+ Obtain nameserver information using the Windows API.
+
+ Attempt to configure a set of nameservers based on platform settings on
+ a win32 host. Preferentially tries to use GetNetworkParams; if that fails,
+ looks in the registry.
+
+ @return 0 if successful, or -1 if an error occurred
+ @see evdns_resolv_conf_parse()
+ */
+#ifdef WIN32
+int evdns_config_windows_nameservers(void);
+#endif
+
+
+/**
+ Clear the list of search domains.
+ */
+void evdns_search_clear(void);
+
+
+/**
+ Add a domain to the list of search domains
+
+ @param domain the domain to be added to the search list
+ */
+void evdns_search_add(const char *domain);
+
+
+/**
+ Set the 'ndots' parameter for searches.
+
+ Sets the number of dots which, when found in a name, causes
+ the first query to be without any search domain.
+
+ @param ndots the new ndots parameter
+ */
+void evdns_search_ndots_set(const int ndots);
+
+/**
+ A callback that is invoked when a log message is generated
+
+ @param is_warning indicates if the log message is a 'warning'
+ @param msg the content of the log message
+ */
+typedef void (*evdns_debug_log_fn_type)(int is_warning, const char *msg);
+
+
+/**
+ Set the callback function to handle log messages.
+
+ @param fn the callback to be invoked when a log message is generated
+ */
+void evdns_set_log_fn(evdns_debug_log_fn_type fn);
+
+/**
+ Set a callback that will be invoked to generate transaction IDs. By
+ default, we pick transaction IDs based on the current clock time.
+
+ @param fn the new callback, or NULL to use the default.
+ */
+void evdns_set_transaction_id_fn(ev_uint16_t (*fn)(void));
+
+#define DNS_NO_SEARCH 1
+
+/*
+ * Structures and functions used to implement a DNS server.
+ */
+
+struct evdns_server_request {
+ int flags;
+ int nquestions;
+ struct evdns_server_question **questions;
+};
+struct evdns_server_question {
+ int type;
+#ifdef __cplusplus
+ int dns_question_class;
+#else
+ /* You should refer to this field as "dns_question_class". The
+ * name "class" works in C for backward compatibility, and will be
+ * removed in a future version. (1.5 or later). */
+ int class;
+#define dns_question_class class
+#endif
+ char name[1];
+};
+typedef void (*evdns_request_callback_fn_type)(struct evdns_server_request *, void *);
+#define EVDNS_ANSWER_SECTION 0
+#define EVDNS_AUTHORITY_SECTION 1
+#define EVDNS_ADDITIONAL_SECTION 2
+
+#define EVDNS_TYPE_A 1
+#define EVDNS_TYPE_NS 2
+#define EVDNS_TYPE_CNAME 5
+#define EVDNS_TYPE_SOA 6
+#define EVDNS_TYPE_PTR 12
+#define EVDNS_TYPE_MX 15
+#define EVDNS_TYPE_TXT 16
+#define EVDNS_TYPE_AAAA 28
+
+#define EVDNS_QTYPE_AXFR 252
+#define EVDNS_QTYPE_ALL 255
+
+#define EVDNS_CLASS_INET 1
+
+struct evdns_server_port *evdns_add_server_port(int socket, int is_tcp, evdns_request_callback_fn_type callback, void *user_data);
+void evdns_close_server_port(struct evdns_server_port *port);
+
+int evdns_server_request_add_reply(struct evdns_server_request *req, int section, const char *name, int type, int dns_class, int ttl, int datalen, int is_name, const char *data);
+int evdns_server_request_add_a_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
+int evdns_server_request_add_aaaa_reply(struct evdns_server_request *req, const char *name, int n, void *addrs, int ttl);
+int evdns_server_request_add_ptr_reply(struct evdns_server_request *req, struct in_addr *in, const char *inaddr_name, const char *hostname, int ttl);
+int evdns_server_request_add_cname_reply(struct evdns_server_request *req, const char *name, const char *cname, int ttl);
+
+int evdns_server_request_respond(struct evdns_server_request *req, int err);
+int evdns_server_request_drop(struct evdns_server_request *req);
+struct sockaddr;
+int evdns_server_request_get_requesting_addr(struct evdns_server_request *_req, struct sockaddr *sa, int addr_len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !EVENTDNS_H */
diff --git a/libevent/event-internal.h b/libevent/event-internal.h
new file mode 100644
index 00000000000..6436b3358bd
--- /dev/null
+++ b/libevent/event-internal.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVENT_INTERNAL_H_
+#define _EVENT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "config.h"
+#include "min_heap.h"
+#include "evsignal.h"
+
+struct eventop {
+ const char *name;
+ void *(*init)(struct event_base *);
+ int (*add)(void *, struct event *);
+ int (*del)(void *, struct event *);
+ int (*dispatch)(struct event_base *, void *, struct timeval *);
+ void (*dealloc)(struct event_base *, void *);
+ /* set if we need to reinitialize the event base */
+ int need_reinit;
+};
+
+struct event_base {
+ const struct eventop *evsel;
+ void *evbase;
+ int event_count; /* counts number of total events */
+ int event_count_active; /* counts number of active events */
+
+ int event_gotterm; /* Set to terminate loop */
+ int event_break; /* Set to terminate loop immediately */
+
+ /* active event management */
+ struct event_list **activequeues;
+ int nactivequeues;
+
+ /* signal handling info */
+ struct evsignal_info sig;
+
+ struct event_list eventqueue;
+ struct timeval event_tv;
+
+ struct min_heap timeheap;
+
+ struct timeval tv_cache;
+};
+
+/* Internal use only: Functions that might be missing from <sys/queue.h> */
+#ifndef HAVE_TAILQFOREACH
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_END(head) NULL
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define TAILQ_FOREACH(var, head, field) \
+ for((var) = TAILQ_FIRST(head); \
+ (var) != TAILQ_END(head); \
+ (var) = TAILQ_NEXT(var, field))
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (0)
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+
+#endif /* TAILQ_FOREACH */
+
+int _evsignal_set_handler(struct event_base *base, int evsignal,
+ void (*fn)(int));
+int _evsignal_restore_handler(struct event_base *base, int evsignal);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVENT_INTERNAL_H_ */
diff --git a/libevent/event.3 b/libevent/event.3
new file mode 100644
index 00000000000..5b33ec64a93
--- /dev/null
+++ b/libevent/event.3
@@ -0,0 +1,624 @@
+.\" $OpenBSD: event.3,v 1.4 2002/07/12 18:50:48 provos Exp $
+.\"
+.\" Copyright (c) 2000 Artur Grabowski <art@openbsd.org>
+.\" All rights reserved.
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\" 3. The name of the author may not be used to endorse or promote products
+.\" derived from this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+.\" INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+.\" AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+.\" THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+.\" EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+.\" PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+.\" OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+.\" WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+.\" OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+.\" ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+.\"
+.Dd August 8, 2000
+.Dt EVENT 3
+.Os
+.Sh NAME
+.Nm event_init ,
+.Nm event_dispatch ,
+.Nm event_loop ,
+.Nm event_loopexit ,
+.Nm event_loopbreak ,
+.Nm event_set ,
+.Nm event_base_dispatch ,
+.Nm event_base_loop ,
+.Nm event_base_loopexit ,
+.Nm event_base_loopbreak ,
+.Nm event_base_set ,
+.Nm event_base_free ,
+.Nm event_add ,
+.Nm event_del ,
+.Nm event_once ,
+.Nm event_base_once ,
+.Nm event_pending ,
+.Nm event_initialized ,
+.Nm event_priority_init ,
+.Nm event_priority_set ,
+.Nm evtimer_set ,
+.Nm evtimer_add ,
+.Nm evtimer_del ,
+.Nm evtimer_pending ,
+.Nm evtimer_initialized ,
+.Nm signal_set ,
+.Nm signal_add ,
+.Nm signal_del ,
+.Nm signal_pending ,
+.Nm signal_initialized ,
+.Nm bufferevent_new ,
+.Nm bufferevent_free ,
+.Nm bufferevent_write ,
+.Nm bufferevent_write_buffer ,
+.Nm bufferevent_read ,
+.Nm bufferevent_enable ,
+.Nm bufferevent_disable ,
+.Nm bufferevent_settimeout ,
+.Nm bufferevent_base_set ,
+.Nm evbuffer_new ,
+.Nm evbuffer_free ,
+.Nm evbuffer_add ,
+.Nm evbuffer_add_buffer ,
+.Nm evbuffer_add_printf ,
+.Nm evbuffer_add_vprintf ,
+.Nm evbuffer_drain ,
+.Nm evbuffer_write ,
+.Nm evbuffer_read ,
+.Nm evbuffer_find ,
+.Nm evbuffer_readline ,
+.Nm evhttp_new ,
+.Nm evhttp_bind_socket ,
+.Nm evhttp_free
+.Nd execute a function when a specific event occurs
+.Sh SYNOPSIS
+.Fd #include <sys/time.h>
+.Fd #include <event.h>
+.Ft "struct event_base *"
+.Fn "event_init" "void"
+.Ft int
+.Fn "event_dispatch" "void"
+.Ft int
+.Fn "event_loop" "int flags"
+.Ft int
+.Fn "event_loopexit" "struct timeval *tv"
+.Ft int
+.Fn "event_loopbreak" "void"
+.Ft void
+.Fn "event_set" "struct event *ev" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg"
+.Ft int
+.Fn "event_base_dispatch" "struct event_base *base"
+.Ft int
+.Fn "event_base_loop" "struct event_base *base" "int flags"
+.Ft int
+.Fn "event_base_loopexit" "struct event_base *base" "struct timeval *tv"
+.Ft int
+.Fn "event_base_loopbreak" "struct event_base *base"
+.Ft int
+.Fn "event_base_set" "struct event_base *base" "struct event *"
+.Ft void
+.Fn "event_base_free" "struct event_base *base"
+.Ft int
+.Fn "event_add" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "event_del" "struct event *ev"
+.Ft int
+.Fn "event_once" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
+.Ft int
+.Fn "event_base_once" "struct event_base *base" "int fd" "short event" "void (*fn)(int, short, void *)" "void *arg" "struct timeval *tv"
+.Ft int
+.Fn "event_pending" "struct event *ev" "short event" "struct timeval *tv"
+.Ft int
+.Fn "event_initialized" "struct event *ev"
+.Ft int
+.Fn "event_priority_init" "int npriorities"
+.Ft int
+.Fn "event_priority_set" "struct event *ev" "int priority"
+.Ft void
+.Fn "evtimer_set" "struct event *ev" "void (*fn)(int, short, void *)" "void *arg"
+.Ft void
+.Fn "evtimer_add" "struct event *ev" "struct timeval *"
+.Ft void
+.Fn "evtimer_del" "struct event *ev"
+.Ft int
+.Fn "evtimer_pending" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "evtimer_initialized" "struct event *ev"
+.Ft void
+.Fn "signal_set" "struct event *ev" "int signal" "void (*fn)(int, short, void *)" "void *arg"
+.Ft void
+.Fn "signal_add" "struct event *ev" "struct timeval *"
+.Ft void
+.Fn "signal_del" "struct event *ev"
+.Ft int
+.Fn "signal_pending" "struct event *ev" "struct timeval *tv"
+.Ft int
+.Fn "signal_initialized" "struct event *ev"
+.Ft "struct bufferevent *"
+.Fn "bufferevent_new" "int fd" "evbuffercb readcb" "evbuffercb writecb" "everrorcb" "void *cbarg"
+.Ft void
+.Fn "bufferevent_free" "struct bufferevent *bufev"
+.Ft int
+.Fn "bufferevent_write" "struct bufferevent *bufev" "void *data" "size_t size"
+.Ft int
+.Fn "bufferevent_write_buffer" "struct bufferevent *bufev" "struct evbuffer *buf"
+.Ft size_t
+.Fn "bufferevent_read" "struct bufferevent *bufev" "void *data" "size_t size"
+.Ft int
+.Fn "bufferevent_enable" "struct bufferevent *bufev" "short event"
+.Ft int
+.Fn "bufferevent_disable" "struct bufferevent *bufev" "short event"
+.Ft void
+.Fn "bufferevent_settimeout" "struct bufferevent *bufev" "int timeout_read" "int timeout_write"
+.Ft int
+.Fn "bufferevent_base_set" "struct event_base *base" "struct bufferevent *bufev"
+.Ft "struct evbuffer *"
+.Fn "evbuffer_new" "void"
+.Ft void
+.Fn "evbuffer_free" "struct evbuffer *buf"
+.Ft int
+.Fn "evbuffer_add" "struct evbuffer *buf" "const void *data" "size_t size"
+.Ft int
+.Fn "evbuffer_add_buffer" "struct evbuffer *dst" "struct evbuffer *src"
+.Ft int
+.Fn "evbuffer_add_printf" "struct evbuffer *buf" "const char *fmt" "..."
+.Ft int
+.Fn "evbuffer_add_vprintf" "struct evbuffer *buf" "const char *fmt" "va_list ap"
+.Ft void
+.Fn "evbuffer_drain" "struct evbuffer *buf" "size_t size"
+.Ft int
+.Fn "evbuffer_write" "struct evbuffer *buf" "int fd"
+.Ft int
+.Fn "evbuffer_read" "struct evbuffer *buf" "int fd" "int size"
+.Ft "u_char *"
+.Fn "evbuffer_find" "struct evbuffer *buf" "const u_char *data" "size_t size"
+.Ft "char *"
+.Fn "evbuffer_readline" "struct evbuffer *buf"
+.Ft "struct evhttp *"
+.Fn "evhttp_new" "struct event_base *base"
+.Ft int
+.Fn "evhttp_bind_socket" "struct evhttp *http" "const char *address" "u_short port"
+.Ft "void"
+.Fn "evhttp_free" "struct evhttp *http"
+.Ft int
+.Fa (*event_sigcb)(void) ;
+.Ft volatile sig_atomic_t
+.Fa event_gotsig ;
+.Sh DESCRIPTION
+The
+.Nm event
+API provides a mechanism to execute a function when a specific event
+on a file descriptor occurs or after a given time has passed.
+.Pp
+The
+.Nm event
+API needs to be initialized with
+.Fn event_init
+before it can be used.
+.Pp
+In order to process events, an application needs to call
+.Fn event_dispatch .
+This function only returns on error, and should replace the event core
+of the application program.
+.Pp
+The function
+.Fn event_set
+prepares the event structure
+.Fa ev
+to be used in future calls to
+.Fn event_add
+and
+.Fn event_del .
+The event will be prepared to call the function specified by the
+.Fa fn
+argument with an
+.Fa int
+argument indicating the file descriptor, a
+.Fa short
+argument indicating the type of event, and a
+.Fa void *
+argument given in the
+.Fa arg
+argument.
+The
+.Fa fd
+indicates the file descriptor that should be monitored for events.
+The events can be either
+.Va EV_READ ,
+.Va EV_WRITE ,
+or both,
+indicating that an application can read or write from the file descriptor
+respectively without blocking.
+.Pp
+The function
+.Fa fn
+will be called with the file descriptor that triggered the event and
+the type of event which will be either
+.Va EV_TIMEOUT ,
+.Va EV_SIGNAL ,
+.Va EV_READ ,
+or
+.Va EV_WRITE .
+Additionally, an event which has registered interest in more than one of the
+preceeding events, via bitwise-OR to
+.Fn event_set ,
+can provide its callback function with a bitwise-OR of more than one triggered
+event.
+The additional flag
+.Va EV_PERSIST
+makes an
+.Fn event_add
+persistent until
+.Fn event_del
+has been called.
+.Pp
+Once initialized, the
+.Fa ev
+structure can be used repeatedly with
+.Fn event_add
+and
+.Fn event_del
+and does not need to be reinitialized unless the function called and/or
+the argument to it are to be changed.
+However, when an
+.Fa ev
+structure has been added to libevent using
+.Fn event_add
+the structure must persist until the event occurs (assuming
+.Fa EV_PERSIST
+is not set) or is removed
+using
+.Fn event_del .
+You may not reuse the same
+.Fa ev
+structure for multiple monitored descriptors; each descriptor
+needs its own
+.Fa ev .
+.Pp
+The function
+.Fn event_add
+schedules the execution of the
+.Fa ev
+event when the event specified in
+.Fn event_set
+occurs or in at least the time specified in the
+.Fa tv .
+If
+.Fa tv
+is
+.Dv NULL ,
+no timeout occurs and the function will only be called
+if a matching event occurs on the file descriptor.
+The event in the
+.Fa ev
+argument must be already initialized by
+.Fn event_set
+and may not be used in calls to
+.Fn event_set
+until it has timed out or been removed with
+.Fn event_del .
+If the event in the
+.Fa ev
+argument already has a scheduled timeout, the old timeout will be
+replaced by the new one.
+.Pp
+The function
+.Fn event_del
+will cancel the event in the argument
+.Fa ev .
+If the event has already executed or has never been added
+the call will have no effect.
+.Pp
+The functions
+.Fn evtimer_set ,
+.Fn evtimer_add ,
+.Fn evtimer_del ,
+.Fn evtimer_initialized ,
+and
+.Fn evtimer_pending
+are abbreviations for common situations where only a timeout is required.
+The file descriptor passed will be \-1, and the event type will be
+.Va EV_TIMEOUT .
+.Pp
+The functions
+.Fn signal_set ,
+.Fn signal_add ,
+.Fn signal_del ,
+.Fn signal_initialized ,
+and
+.Fn signal_pending
+are abbreviations.
+The event type will be a persistent
+.Va EV_SIGNAL .
+That means
+.Fn signal_set
+adds
+.Va EV_PERSIST .
+.Pp
+In order to avoid races in signal handlers, the
+.Nm event
+API provides two variables:
+.Va event_sigcb
+and
+.Va event_gotsig .
+A signal handler
+sets
+.Va event_gotsig
+to indicate that a signal has been received.
+The application sets
+.Va event_sigcb
+to a callback function.
+After the signal handler sets
+.Va event_gotsig ,
+.Nm event_dispatch
+will execute the callback function to process received signals.
+The callback returns 1 when no events are registered any more.
+It can return \-1 to indicate an error to the
+.Nm event
+library, causing
+.Fn event_dispatch
+to terminate with
+.Va errno
+set to
+.Er EINTR .
+.Pp
+The function
+.Fn event_once
+is similar to
+.Fn event_set .
+However, it schedules a callback to be called exactly once and does not
+require the caller to prepare an
+.Fa event
+structure.
+This function supports
+.Fa EV_TIMEOUT ,
+.Fa EV_READ ,
+and
+.Fa EV_WRITE .
+.Pp
+The
+.Fn event_pending
+function can be used to check if the event specified by
+.Fa event
+is pending to run.
+If
+.Va EV_TIMEOUT
+was specified and
+.Fa tv
+is not
+.Dv NULL ,
+the expiration time of the event will be returned in
+.Fa tv .
+.Pp
+The
+.Fn event_initialized
+macro can be used to check if an event has been initialized.
+.Pp
+The
+.Nm event_loop
+function provides an interface for single pass execution of pending
+events.
+The flags
+.Va EVLOOP_ONCE
+and
+.Va EVLOOP_NONBLOCK
+are recognized.
+The
+.Nm event_loopexit
+function exits from the event loop. The next
+.Fn event_loop
+iteration after the
+given timer expires will complete normally (handling all queued events) then
+exit without blocking for events again. Subsequent invocations of
+.Fn event_loop
+will proceed normally.
+The
+.Nm event_loopbreak
+function exits from the event loop immediately.
+.Fn event_loop
+will abort after the next event is completed;
+.Fn event_loopbreak
+is typically invoked from this event's callback. This behavior is analogous
+to the "break;" statement. Subsequent invocations of
+.Fn event_loop
+will proceed normally.
+.Pp
+It is the responsibility of the caller to provide these functions with
+pre-allocated event structures.
+.Pp
+.Sh EVENT PRIORITIES
+By default
+.Nm libevent
+schedules all active events with the same priority.
+However, sometimes it is desirable to process some events with a higher
+priority than others.
+For that reason,
+.Nm libevent
+supports strict priority queues.
+Active events with a lower priority are always processed before events
+with a higher priority.
+.Pp
+The number of different priorities can be set initially with the
+.Fn event_priority_init
+function.
+This function should be called before the first call to
+.Fn event_dispatch .
+The
+.Fn event_priority_set
+function can be used to assign a priority to an event.
+By default,
+.Nm libevent
+assigns the middle priority to all events unless their priority
+is explicitly set.
+.Sh THREAD SAFE EVENTS
+.Nm Libevent
+has experimental support for thread-safe events.
+When initializing the library via
+.Fn event_init ,
+an event base is returned.
+This event base can be used in conjunction with calls to
+.Fn event_base_set ,
+.Fn event_base_dispatch ,
+.Fn event_base_loop ,
+.Fn event_base_loopexit ,
+.Fn bufferevent_base_set
+and
+.Fn event_base_free .
+.Fn event_base_set
+should be called after preparing an event with
+.Fn event_set ,
+as
+.Fn event_set
+assigns the provided event to the most recently created event base.
+.Fn bufferevent_base_set
+should be called after preparing a bufferevent with
+.Fn bufferevent_new .
+.Fn event_base_free
+should be used to free memory associated with the event base
+when it is no longer needed.
+.Sh BUFFERED EVENTS
+.Nm libevent
+provides an abstraction on top of the regular event callbacks.
+This abstraction is called a
+.Va "buffered event" .
+A buffered event provides input and output buffers that get filled
+and drained automatically.
+The user of a buffered event no longer deals directly with the IO,
+but instead is reading from input and writing to output buffers.
+.Pp
+A new bufferevent is created by
+.Fn bufferevent_new .
+The parameter
+.Fa fd
+specifies the file descriptor from which data is read and written to.
+This file descriptor is not allowed to be a
+.Xr pipe 2 .
+The next three parameters are callbacks.
+The read and write callback have the following form:
+.Ft void
+.Fn "(*cb)" "struct bufferevent *bufev" "void *arg" .
+The error callback has the following form:
+.Ft void
+.Fn "(*cb)" "struct bufferevent *bufev" "short what" "void *arg" .
+The argument is specified by the fourth parameter
+.Fa "cbarg" .
+A
+.Fa bufferevent struct
+pointer is returned on success, NULL on error.
+Both the read and the write callback may be NULL.
+The error callback has to be always provided.
+.Pp
+Once initialized, the bufferevent structure can be used repeatedly with
+bufferevent_enable() and bufferevent_disable().
+The flags parameter can be a combination of
+.Va EV_READ
+and
+.Va EV_WRITE .
+When read enabled the bufferevent will try to read from the file
+descriptor and call the read callback.
+The write callback is executed
+whenever the output buffer is drained below the write low watermark,
+which is
+.Va 0
+by default.
+.Pp
+The
+.Fn bufferevent_write
+function can be used to write data to the file descriptor.
+The data is appended to the output buffer and written to the descriptor
+automatically as it becomes available for writing.
+.Fn bufferevent_write
+returns 0 on success or \-1 on failure.
+The
+.Fn bufferevent_read
+function is used to read data from the input buffer,
+returning the amount of data read.
+.Pp
+If multiple bases are in use, bufferevent_base_set() must be called before
+enabling the bufferevent for the first time.
+.Sh NON-BLOCKING HTTP SUPPORT
+.Nm libevent
+provides a very thin HTTP layer that can be used both to host an HTTP
+server and also to make HTTP requests.
+An HTTP server can be created by calling
+.Fn evhttp_new .
+It can be bound to any port and address with the
+.Fn evhttp_bind_socket
+function.
+When the HTTP server is no longer used, it can be freed via
+.Fn evhttp_free .
+.Pp
+To be notified of HTTP requests, a user needs to register callbacks with the
+HTTP server.
+This can be done by calling
+.Fn evhttp_set_cb .
+The second argument is the URI for which a callback is being registered.
+The corresponding callback will receive an
+.Va struct evhttp_request
+object that contains all information about the request.
+.Pp
+This section does not document all the possible function calls; please
+check
+.Va event.h
+for the public interfaces.
+.Sh ADDITIONAL NOTES
+It is possible to disable support for
+.Va epoll , kqueue , devpoll , poll
+or
+.Va select
+by setting the environment variable
+.Va EVENT_NOEPOLL , EVENT_NOKQUEUE , EVENT_NODEVPOLL , EVENT_NOPOLL
+or
+.Va EVENT_NOSELECT ,
+respectively.
+By setting the environment variable
+.Va EVENT_SHOW_METHOD ,
+.Nm libevent
+displays the kernel notification method that it uses.
+.Sh RETURN VALUES
+Upon successful completion
+.Fn event_add
+and
+.Fn event_del
+return 0.
+Otherwise, \-1 is returned and the global variable errno is
+set to indicate the error.
+.Sh SEE ALSO
+.Xr kqueue 2 ,
+.Xr poll 2 ,
+.Xr select 2 ,
+.Xr evdns 3 ,
+.Xr timeout 9
+.Sh HISTORY
+The
+.Nm event
+API manpage is based on the
+.Xr timeout 9
+manpage by Artur Grabowski.
+The port of
+.Nm libevent
+to Windows is due to Michael A. Davis.
+Support for real-time signals is due to Taral.
+.Sh AUTHORS
+The
+.Nm event
+library was written by Niels Provos.
+.Sh BUGS
+This documentation is neither complete nor authoritative.
+If you are in doubt about the usage of this API then
+check the source code to find out how it works, write
+up the missing piece of documentation and send it to
+me for inclusion in this man page.
diff --git a/libevent/event.c b/libevent/event.c
new file mode 100644
index 00000000000..6eb5db05c87
--- /dev/null
+++ b/libevent/event.c
@@ -0,0 +1,1025 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+#include <time.h>
+
+#include "event.h"
+#include "event-internal.h"
+#include "evutil.h"
+#include "log.h"
+
+#ifdef HAVE_EVENT_PORTS
+extern const struct eventop evportops;
+#endif
+#ifdef HAVE_SELECT
+extern const struct eventop selectops;
+#endif
+#ifdef HAVE_POLL
+extern const struct eventop pollops;
+#endif
+#ifdef HAVE_EPOLL
+extern const struct eventop epollops;
+#endif
+#ifdef HAVE_WORKING_KQUEUE
+extern const struct eventop kqops;
+#endif
+#ifdef HAVE_DEVPOLL
+extern const struct eventop devpollops;
+#endif
+#ifdef WIN32
+extern const struct eventop win32ops;
+#endif
+
+/* In order of preference */
+static const struct eventop *eventops[] = {
+#ifdef HAVE_EVENT_PORTS
+ &evportops,
+#endif
+#ifdef HAVE_WORKING_KQUEUE
+ &kqops,
+#endif
+#ifdef HAVE_EPOLL
+ &epollops,
+#endif
+#ifdef HAVE_DEVPOLL
+ &devpollops,
+#endif
+#ifdef HAVE_POLL
+ &pollops,
+#endif
+#ifdef HAVE_SELECT
+ &selectops,
+#endif
+#ifdef WIN32
+ &win32ops,
+#endif
+ NULL
+};
+
+/* Global state */
+struct event_base *current_base = NULL;
+extern struct event_base *evsignal_base;
+static int use_monotonic;
+
+/* Handle signals - This is a deprecated interface */
+int (*event_sigcb)(void); /* Signal callback when gotsig is set */
+volatile sig_atomic_t event_gotsig; /* Set in signal handler */
+
+/* Prototypes */
+static void event_queue_insert(struct event_base *, struct event *, int);
+static void event_queue_remove(struct event_base *, struct event *, int);
+static int event_haveevents(struct event_base *);
+
+static void event_process_active(struct event_base *);
+
+static int timeout_next(struct event_base *, struct timeval **);
+static void timeout_process(struct event_base *);
+static void timeout_correct(struct event_base *, struct timeval *);
+
+static void
+detect_monotonic(void)
+{
+#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+ struct timespec ts;
+
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
+ use_monotonic = 1;
+#endif
+}
+
+static int
+gettime(struct event_base *base, struct timeval *tp)
+{
+ if (base->tv_cache.tv_sec) {
+ *tp = base->tv_cache;
+ return (0);
+ }
+
+#if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
+ if (use_monotonic) {
+ struct timespec ts;
+
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
+ return (-1);
+
+ tp->tv_sec = ts.tv_sec;
+ tp->tv_usec = ts.tv_nsec / 1000;
+ return (0);
+ }
+#endif
+
+ return (evutil_gettimeofday(tp, NULL));
+}
+
+struct event_base *
+event_init(void)
+{
+ struct event_base *base = event_base_new();
+
+ if (base != NULL)
+ current_base = base;
+
+ return (base);
+}
+
+struct event_base *
+event_base_new(void)
+{
+ int i;
+ struct event_base *base;
+
+ if ((base = calloc(1, sizeof(struct event_base))) == NULL)
+ event_err(1, "%s: calloc", __func__);
+
+ event_sigcb = NULL;
+ event_gotsig = 0;
+
+ detect_monotonic();
+ gettime(base, &base->event_tv);
+
+ min_heap_ctor(&base->timeheap);
+ TAILQ_INIT(&base->eventqueue);
+ base->sig.ev_signal_pair[0] = -1;
+ base->sig.ev_signal_pair[1] = -1;
+
+ base->evbase = NULL;
+ for (i = 0; eventops[i] && !base->evbase; i++) {
+ base->evsel = eventops[i];
+
+ base->evbase = base->evsel->init(base);
+ }
+
+ if (base->evbase == NULL)
+ event_errx(1, "%s: no event mechanism available", __func__);
+
+ if (getenv("EVENT_SHOW_METHOD"))
+ event_msgx("libevent using: %s\n",
+ base->evsel->name);
+
+ /* allocate a single active event queue */
+ event_base_priority_init(base, 1);
+
+ return (base);
+}
+
+void
+event_base_free(struct event_base *base)
+{
+ int i, n_deleted=0;
+ struct event *ev;
+
+ if (base == NULL && current_base)
+ base = current_base;
+ if (base == current_base)
+ current_base = NULL;
+
+ /* XXX(niels) - check for internal events first */
+ assert(base);
+ /* Delete all non-internal events. */
+ for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
+ struct event *next = TAILQ_NEXT(ev, ev_next);
+ if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+ event_del(ev);
+ ++n_deleted;
+ }
+ ev = next;
+ }
+ while ((ev = min_heap_top(&base->timeheap)) != NULL) {
+ event_del(ev);
+ ++n_deleted;
+ }
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
+ struct event *next = TAILQ_NEXT(ev, ev_active_next);
+ if (!(ev->ev_flags & EVLIST_INTERNAL)) {
+ event_del(ev);
+ ++n_deleted;
+ }
+ ev = next;
+ }
+ }
+
+ if (n_deleted)
+ event_debug(("%s: %d events were still set in base",
+ __func__, n_deleted));
+
+ if (base->evsel->dealloc != NULL)
+ base->evsel->dealloc(base, base->evbase);
+
+ for (i = 0; i < base->nactivequeues; ++i)
+ assert(TAILQ_EMPTY(base->activequeues[i]));
+
+ assert(min_heap_empty(&base->timeheap));
+ min_heap_dtor(&base->timeheap);
+
+ for (i = 0; i < base->nactivequeues; ++i)
+ free(base->activequeues[i]);
+ free(base->activequeues);
+
+ assert(TAILQ_EMPTY(&base->eventqueue));
+
+ free(base);
+}
+
+/* reinitialized the event base after a fork */
+int
+event_reinit(struct event_base *base)
+{
+ const struct eventop *evsel = base->evsel;
+ void *evbase = base->evbase;
+ int res = 0;
+ struct event *ev;
+
+ /* check if this event mechanism requires reinit */
+ if (!evsel->need_reinit)
+ return (0);
+
+ /* prevent internal delete */
+ if (base->sig.ev_signal_added) {
+ /* we cannot call event_del here because the base has
+ * not been reinitialized yet. */
+ event_queue_remove(base, &base->sig.ev_signal,
+ EVLIST_INSERTED);
+ if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
+ event_queue_remove(base, &base->sig.ev_signal,
+ EVLIST_ACTIVE);
+ base->sig.ev_signal_added = 0;
+ }
+
+ if (base->evsel->dealloc != NULL)
+ base->evsel->dealloc(base, base->evbase);
+ evbase = base->evbase = evsel->init(base);
+ if (base->evbase == NULL)
+ event_errx(1, "%s: could not reinitialize event mechanism",
+ __func__);
+
+ TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
+ if (evsel->add(evbase, ev) == -1)
+ res = -1;
+ }
+
+ return (res);
+}
+
+int
+event_priority_init(int npriorities)
+{
+ return event_base_priority_init(current_base, npriorities);
+}
+
+int
+event_base_priority_init(struct event_base *base, int npriorities)
+{
+ int i;
+
+ if (base->event_count_active)
+ return (-1);
+
+ if (base->nactivequeues && npriorities != base->nactivequeues) {
+ for (i = 0; i < base->nactivequeues; ++i) {
+ free(base->activequeues[i]);
+ }
+ free(base->activequeues);
+ }
+
+ /* Allocate our priority queues */
+ base->nactivequeues = npriorities;
+ base->activequeues = (struct event_list **)calloc(base->nactivequeues,
+ npriorities * sizeof(struct event_list *));
+ if (base->activequeues == NULL)
+ event_err(1, "%s: calloc", __func__);
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ base->activequeues[i] = malloc(sizeof(struct event_list));
+ if (base->activequeues[i] == NULL)
+ event_err(1, "%s: malloc", __func__);
+ TAILQ_INIT(base->activequeues[i]);
+ }
+
+ return (0);
+}
+
+int
+event_haveevents(struct event_base *base)
+{
+ return (base->event_count > 0);
+}
+
+/*
+ * Active events are stored in priority queues. Lower priorities are always
+ * process before higher priorities. Low priority events can starve high
+ * priority ones.
+ */
+
+static void
+event_process_active(struct event_base *base)
+{
+ struct event *ev;
+ struct event_list *activeq = NULL;
+ int i;
+ short ncalls;
+
+ for (i = 0; i < base->nactivequeues; ++i) {
+ if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
+ activeq = base->activequeues[i];
+ break;
+ }
+ }
+
+ assert(activeq != NULL);
+
+ for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
+ if (ev->ev_events & EV_PERSIST)
+ event_queue_remove(base, ev, EVLIST_ACTIVE);
+ else
+ event_del(ev);
+
+ /* Allows deletes to work */
+ ncalls = ev->ev_ncalls;
+ ev->ev_pncalls = &ncalls;
+ while (ncalls) {
+ ncalls--;
+ ev->ev_ncalls = ncalls;
+ (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
+ if (event_gotsig || base->event_break)
+ return;
+ }
+ }
+}
+
+/*
+ * Wait continously for events. We exit only if no events are left.
+ */
+
+int
+event_dispatch(void)
+{
+ return (event_loop(0));
+}
+
+int
+event_base_dispatch(struct event_base *event_base)
+{
+ return (event_base_loop(event_base, 0));
+}
+
+const char *
+event_base_get_method(struct event_base *base)
+{
+ assert(base);
+ return (base->evsel->name);
+}
+
+static void
+event_loopexit_cb(int fd, short what, void *arg)
+{
+ struct event_base *base = arg;
+ base->event_gotterm = 1;
+}
+
+/* not thread safe */
+int
+event_loopexit(const struct timeval *tv)
+{
+ return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
+ current_base, tv));
+}
+
+int
+event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
+{
+ return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
+ event_base, tv));
+}
+
+/* not thread safe */
+int
+event_loopbreak(void)
+{
+ return (event_base_loopbreak(current_base));
+}
+
+int
+event_base_loopbreak(struct event_base *event_base)
+{
+ if (event_base == NULL)
+ return (-1);
+
+ event_base->event_break = 1;
+ return (0);
+}
+
+
+
+/* not thread safe */
+
+int
+event_loop(int flags)
+{
+ return event_base_loop(current_base, flags);
+}
+
+int
+event_base_loop(struct event_base *base, int flags)
+{
+ const struct eventop *evsel = base->evsel;
+ void *evbase = base->evbase;
+ struct timeval tv;
+ struct timeval *tv_p;
+ int res, done;
+
+ /* clear time cache */
+ base->tv_cache.tv_sec = 0;
+
+ if (base->sig.ev_signal_added)
+ evsignal_base = base;
+ done = 0;
+ while (!done) {
+ /* Terminate the loop if we have been asked to */
+ if (base->event_gotterm) {
+ base->event_gotterm = 0;
+ break;
+ }
+
+ if (base->event_break) {
+ base->event_break = 0;
+ break;
+ }
+
+ /* You cannot use this interface for multi-threaded apps */
+ while (event_gotsig) {
+ event_gotsig = 0;
+ if (event_sigcb) {
+ res = (*event_sigcb)();
+ if (res == -1) {
+ errno = EINTR;
+ return (-1);
+ }
+ }
+ }
+
+ timeout_correct(base, &tv);
+
+ tv_p = &tv;
+ if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
+ timeout_next(base, &tv_p);
+ } else {
+ /*
+ * if we have active events, we just poll new events
+ * without waiting.
+ */
+ evutil_timerclear(&tv);
+ }
+
+ /* If we have no events, we just exit */
+ if (!event_haveevents(base)) {
+ event_debug(("%s: no events registered.", __func__));
+ return (1);
+ }
+
+ /* update last old time */
+ gettime(base, &base->event_tv);
+
+ /* clear time cache */
+ base->tv_cache.tv_sec = 0;
+
+ res = evsel->dispatch(base, evbase, tv_p);
+
+ if (res == -1)
+ return (-1);
+ gettime(base, &base->tv_cache);
+
+ timeout_process(base);
+
+ if (base->event_count_active) {
+ event_process_active(base);
+ if (!base->event_count_active && (flags & EVLOOP_ONCE))
+ done = 1;
+ } else if (flags & EVLOOP_NONBLOCK)
+ done = 1;
+ }
+
+ /* clear time cache */
+ base->tv_cache.tv_sec = 0;
+
+ event_debug(("%s: asked to terminate loop.", __func__));
+ return (0);
+}
+
+/* Sets up an event for processing once */
+
+struct event_once {
+ struct event ev;
+
+ void (*cb)(int, short, void *);
+ void *arg;
+};
+
+/* One-time callback, it deletes itself */
+
+static void
+event_once_cb(int fd, short events, void *arg)
+{
+ struct event_once *eonce = arg;
+
+ (*eonce->cb)(fd, events, eonce->arg);
+ free(eonce);
+}
+
+/* not threadsafe, event scheduled once. */
+int
+event_once(int fd, short events,
+ void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
+{
+ return event_base_once(current_base, fd, events, callback, arg, tv);
+}
+
+/* Schedules an event once */
+int
+event_base_once(struct event_base *base, int fd, short events,
+ void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
+{
+ struct event_once *eonce;
+ struct timeval etv;
+ int res;
+
+ /* We cannot support signals that just fire once */
+ if (events & EV_SIGNAL)
+ return (-1);
+
+ if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
+ return (-1);
+
+ eonce->cb = callback;
+ eonce->arg = arg;
+
+ if (events == EV_TIMEOUT) {
+ if (tv == NULL) {
+ evutil_timerclear(&etv);
+ tv = &etv;
+ }
+
+ evtimer_set(&eonce->ev, event_once_cb, eonce);
+ } else if (events & (EV_READ|EV_WRITE)) {
+ events &= EV_READ|EV_WRITE;
+
+ event_set(&eonce->ev, fd, events, event_once_cb, eonce);
+ } else {
+ /* Bad event combination */
+ free(eonce);
+ return (-1);
+ }
+
+ res = event_base_set(base, &eonce->ev);
+ if (res == 0)
+ res = event_add(&eonce->ev, tv);
+ if (res != 0) {
+ free(eonce);
+ return (res);
+ }
+
+ return (0);
+}
+
+void
+event_set(struct event *ev, int fd, short events,
+ void (*callback)(int, short, void *), void *arg)
+{
+ /* Take the current base - caller needs to set the real base later */
+ ev->ev_base = current_base;
+
+ ev->ev_callback = callback;
+ ev->ev_arg = arg;
+ ev->ev_fd = fd;
+ ev->ev_events = events;
+ ev->ev_res = 0;
+ ev->ev_flags = EVLIST_INIT;
+ ev->ev_ncalls = 0;
+ ev->ev_pncalls = NULL;
+
+ min_heap_elem_init(ev);
+
+ /* by default, we put new events into the middle priority */
+ if(current_base)
+ ev->ev_pri = current_base->nactivequeues/2;
+}
+
+int
+event_base_set(struct event_base *base, struct event *ev)
+{
+ /* Only innocent events may be assigned to a different base */
+ if (ev->ev_flags != EVLIST_INIT)
+ return (-1);
+
+ ev->ev_base = base;
+ ev->ev_pri = base->nactivequeues/2;
+
+ return (0);
+}
+
+/*
+ * Set's the priority of an event - if an event is already scheduled
+ * changing the priority is going to fail.
+ */
+
+int
+event_priority_set(struct event *ev, int pri)
+{
+ if (ev->ev_flags & EVLIST_ACTIVE)
+ return (-1);
+ if (pri < 0 || pri >= ev->ev_base->nactivequeues)
+ return (-1);
+
+ ev->ev_pri = pri;
+
+ return (0);
+}
+
+/*
+ * Checks if a specific event is pending or scheduled.
+ */
+
+int
+event_pending(struct event *ev, short event, struct timeval *tv)
+{
+ struct timeval now, res;
+ int flags = 0;
+
+ if (ev->ev_flags & EVLIST_INSERTED)
+ flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
+ if (ev->ev_flags & EVLIST_ACTIVE)
+ flags |= ev->ev_res;
+ if (ev->ev_flags & EVLIST_TIMEOUT)
+ flags |= EV_TIMEOUT;
+
+ event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
+
+ /* See if there is a timeout that we should report */
+ if (tv != NULL && (flags & event & EV_TIMEOUT)) {
+ gettime(ev->ev_base, &now);
+ evutil_timersub(&ev->ev_timeout, &now, &res);
+ /* correctly remap to real time */
+ evutil_gettimeofday(&now, NULL);
+ evutil_timeradd(&now, &res, tv);
+ }
+
+ return (flags & event);
+}
+
+int
+event_add(struct event *ev, const struct timeval *tv)
+{
+ struct event_base *base = ev->ev_base;
+ const struct eventop *evsel = base->evsel;
+ void *evbase = base->evbase;
+ int res = 0;
+
+ event_debug((
+ "event_add: event: %p, %s%s%scall %p",
+ ev,
+ ev->ev_events & EV_READ ? "EV_READ " : " ",
+ ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
+ tv ? "EV_TIMEOUT " : " ",
+ ev->ev_callback));
+
+ assert(!(ev->ev_flags & ~EVLIST_ALL));
+
+ /*
+ * prepare for timeout insertion further below, if we get a
+ * failure on any step, we should not change any state.
+ */
+ if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
+ if (min_heap_reserve(&base->timeheap,
+ 1 + min_heap_size(&base->timeheap)) == -1)
+ return (-1); /* ENOMEM == errno */
+ }
+
+ if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
+ !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
+ res = evsel->add(evbase, ev);
+ if (res != -1)
+ event_queue_insert(base, ev, EVLIST_INSERTED);
+ }
+
+ /*
+ * we should change the timout state only if the previous event
+ * addition succeeded.
+ */
+ if (res != -1 && tv != NULL) {
+ struct timeval now;
+
+ /*
+ * we already reserved memory above for the case where we
+ * are not replacing an exisiting timeout.
+ */
+ if (ev->ev_flags & EVLIST_TIMEOUT)
+ event_queue_remove(base, ev, EVLIST_TIMEOUT);
+
+ /* Check if it is active due to a timeout. Rescheduling
+ * this timeout before the callback can be executed
+ * removes it from the active list. */
+ if ((ev->ev_flags & EVLIST_ACTIVE) &&
+ (ev->ev_res & EV_TIMEOUT)) {
+ /* See if we are just active executing this
+ * event in a loop
+ */
+ if (ev->ev_ncalls && ev->ev_pncalls) {
+ /* Abort loop */
+ *ev->ev_pncalls = 0;
+ }
+
+ event_queue_remove(base, ev, EVLIST_ACTIVE);
+ }
+
+ gettime(base, &now);
+ evutil_timeradd(&now, tv, &ev->ev_timeout);
+
+ event_debug((
+ "event_add: timeout in %ld seconds, call %p",
+ tv->tv_sec, ev->ev_callback));
+
+ event_queue_insert(base, ev, EVLIST_TIMEOUT);
+ }
+
+ return (res);
+}
+
+int
+event_del(struct event *ev)
+{
+ struct event_base *base;
+ const struct eventop *evsel;
+ void *evbase;
+
+ event_debug(("event_del: %p, callback %p",
+ ev, ev->ev_callback));
+
+ /* An event without a base has not been added */
+ if (ev->ev_base == NULL)
+ return (-1);
+
+ base = ev->ev_base;
+ evsel = base->evsel;
+ evbase = base->evbase;
+
+ assert(!(ev->ev_flags & ~EVLIST_ALL));
+
+ /* See if we are just active executing this event in a loop */
+ if (ev->ev_ncalls && ev->ev_pncalls) {
+ /* Abort loop */
+ *ev->ev_pncalls = 0;
+ }
+
+ if (ev->ev_flags & EVLIST_TIMEOUT)
+ event_queue_remove(base, ev, EVLIST_TIMEOUT);
+
+ if (ev->ev_flags & EVLIST_ACTIVE)
+ event_queue_remove(base, ev, EVLIST_ACTIVE);
+
+ if (ev->ev_flags & EVLIST_INSERTED) {
+ event_queue_remove(base, ev, EVLIST_INSERTED);
+ return (evsel->del(evbase, ev));
+ }
+
+ return (0);
+}
+
+void
+event_active(struct event *ev, int res, short ncalls)
+{
+ /* We get different kinds of events, add them together */
+ if (ev->ev_flags & EVLIST_ACTIVE) {
+ ev->ev_res |= res;
+ return;
+ }
+
+ ev->ev_res = res;
+ ev->ev_ncalls = ncalls;
+ ev->ev_pncalls = NULL;
+ event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
+}
+
+static int
+timeout_next(struct event_base *base, struct timeval **tv_p)
+{
+ struct timeval now;
+ struct event *ev;
+ struct timeval *tv = *tv_p;
+
+ if ((ev = min_heap_top(&base->timeheap)) == NULL) {
+ /* if no time-based events are active wait for I/O */
+ *tv_p = NULL;
+ return (0);
+ }
+
+ if (gettime(base, &now) == -1)
+ return (-1);
+
+ if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
+ evutil_timerclear(tv);
+ return (0);
+ }
+
+ evutil_timersub(&ev->ev_timeout, &now, tv);
+
+ assert(tv->tv_sec >= 0);
+ assert(tv->tv_usec >= 0);
+
+ event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
+ return (0);
+}
+
+/*
+ * Determines if the time is running backwards by comparing the current
+ * time against the last time we checked. Not needed when using clock
+ * monotonic.
+ */
+
+static void
+timeout_correct(struct event_base *base, struct timeval *tv)
+{
+ struct event **pev;
+ unsigned int size;
+ struct timeval off;
+
+ if (use_monotonic)
+ return;
+
+ /* Check if time is running backwards */
+ gettime(base, tv);
+ if (evutil_timercmp(tv, &base->event_tv, >=)) {
+ base->event_tv = *tv;
+ return;
+ }
+
+ event_debug(("%s: time is running backwards, corrected",
+ __func__));
+ evutil_timersub(&base->event_tv, tv, &off);
+
+ /*
+ * We can modify the key element of the node without destroying
+ * the key, beause we apply it to all in the right order.
+ */
+ pev = base->timeheap.p;
+ size = base->timeheap.n;
+ for (; size-- > 0; ++pev) {
+ struct timeval *ev_tv = &(**pev).ev_timeout;
+ evutil_timersub(ev_tv, &off, ev_tv);
+ }
+ /* Now remember what the new time turned out to be. */
+ base->event_tv = *tv;
+}
+
+void
+timeout_process(struct event_base *base)
+{
+ struct timeval now;
+ struct event *ev;
+
+ if (min_heap_empty(&base->timeheap))
+ return;
+
+ gettime(base, &now);
+
+ while ((ev = min_heap_top(&base->timeheap))) {
+ if (evutil_timercmp(&ev->ev_timeout, &now, >))
+ break;
+
+ /* delete this event from the I/O queues */
+ event_del(ev);
+
+ event_debug(("timeout_process: call %p",
+ ev->ev_callback));
+ event_active(ev, EV_TIMEOUT, 1);
+ }
+}
+
+void
+event_queue_remove(struct event_base *base, struct event *ev, int queue)
+{
+ if (!(ev->ev_flags & queue))
+ event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
+ ev, ev->ev_fd, queue);
+
+ if (~ev->ev_flags & EVLIST_INTERNAL)
+ base->event_count--;
+
+ ev->ev_flags &= ~queue;
+ switch (queue) {
+ case EVLIST_INSERTED:
+ TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
+ break;
+ case EVLIST_ACTIVE:
+ base->event_count_active--;
+ TAILQ_REMOVE(base->activequeues[ev->ev_pri],
+ ev, ev_active_next);
+ break;
+ case EVLIST_TIMEOUT:
+ min_heap_erase(&base->timeheap, ev);
+ break;
+ default:
+ event_errx(1, "%s: unknown queue %x", __func__, queue);
+ }
+}
+
+void
+event_queue_insert(struct event_base *base, struct event *ev, int queue)
+{
+ if (ev->ev_flags & queue) {
+ /* Double insertion is possible for active events */
+ if (queue & EVLIST_ACTIVE)
+ return;
+
+ event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
+ ev, ev->ev_fd, queue);
+ }
+
+ if (~ev->ev_flags & EVLIST_INTERNAL)
+ base->event_count++;
+
+ ev->ev_flags |= queue;
+ switch (queue) {
+ case EVLIST_INSERTED:
+ TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
+ break;
+ case EVLIST_ACTIVE:
+ base->event_count_active++;
+ TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
+ ev,ev_active_next);
+ break;
+ case EVLIST_TIMEOUT: {
+ min_heap_push(&base->timeheap, ev);
+ break;
+ }
+ default:
+ event_errx(1, "%s: unknown queue %x", __func__, queue);
+ }
+}
+
+/* Functions for debugging */
+
+const char *
+event_get_version(void)
+{
+ return (VERSION);
+}
+
+/*
+ * No thread-safe interface needed - the information should be the same
+ * for all threads.
+ */
+
+const char *
+event_get_method(void)
+{
+ return (current_base->evsel->name);
+}
diff --git a/libevent/event.h b/libevent/event.h
new file mode 100644
index 00000000000..039e4f88bcb
--- /dev/null
+++ b/libevent/event.h
@@ -0,0 +1,1175 @@
+/*
+ * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVENT_H_
+#define _EVENT_H_
+
+/** @mainpage
+
+ @section intro Introduction
+
+ libevent is an event notification library for developing scalable network
+ servers. The libevent API provides a mechanism to execute a callback
+ function when a specific event occurs on a file descriptor or after a
+ timeout has been reached. Furthermore, libevent also support callbacks due
+ to signals or regular timeouts.
+
+ libevent is meant to replace the event loop found in event driven network
+ servers. An application just needs to call event_dispatch() and then add or
+ remove events dynamically without having to change the event loop.
+
+ Currently, libevent supports /dev/poll, kqueue(2), select(2), poll(2) and
+ epoll(4). It also has experimental support for real-time signals. The
+ internal event mechanism is completely independent of the exposed event API,
+ and a simple update of libevent can provide new functionality without having
+ to redesign the applications. As a result, Libevent allows for portable
+ application development and provides the most scalable event notification
+ mechanism available on an operating system. Libevent can also be used for
+ multi-threaded aplications; see Steven Grimm's explanation. Libevent should
+ compile on Linux, *BSD, Mac OS X, Solaris and Windows.
+
+ @section usage Standard usage
+
+ Every program that uses libevent must include the <event.h> header, and pass
+ the -levent flag to the linker. Before using any of the functions in the
+ library, you must call event_init() or event_base_new() to perform one-time
+ initialization of the libevent library.
+
+ @section event Event notification
+
+ For each file descriptor that you wish to monitor, you must declare an event
+ structure and call event_set() to initialize the members of the structure.
+ To enable notification, you add the structure to the list of monitored
+ events by calling event_add(). The event structure must remain allocated as
+ long as it is active, so it should be allocated on the heap. Finally, you
+ call event_dispatch() to loop and dispatch events.
+
+ @section bufferevent I/O Buffers
+
+ libevent provides an abstraction on top of the regular event callbacks. This
+ abstraction is called a buffered event. A buffered event provides input and
+ output buffers that get filled and drained automatically. The user of a
+ buffered event no longer deals directly with the I/O, but instead is reading
+ from input and writing to output buffers.
+
+ Once initialized via bufferevent_new(), the bufferevent structure can be
+ used repeatedly with bufferevent_enable() and bufferevent_disable().
+ Instead of reading and writing directly to a socket, you would call
+ bufferevent_read() and bufferevent_write().
+
+ When read enabled the bufferevent will try to read from the file descriptor
+ and call the read callback. The write callback is executed whenever the
+ output buffer is drained below the write low watermark, which is 0 by
+ default.
+
+ @section timers Timers
+
+ libevent can also be used to create timers that invoke a callback after a
+ certain amount of time has expired. The evtimer_set() function prepares an
+ event struct to be used as a timer. To activate the timer, call
+ evtimer_add(). Timers can be deactivated by calling evtimer_del().
+
+ @section timeouts Timeouts
+
+ In addition to simple timers, libevent can assign timeout events to file
+ descriptors that are triggered whenever a certain amount of time has passed
+ with no activity on a file descriptor. The timeout_set() function
+ initializes an event struct for use as a timeout. Once initialized, the
+ event must be activated by using timeout_add(). To cancel the timeout, call
+ timeout_del().
+
+ @section evdns Asynchronous DNS resolution
+
+ libevent provides an asynchronous DNS resolver that should be used instead
+ of the standard DNS resolver functions. These functions can be imported by
+ including the <evdns.h> header in your program. Before using any of the
+ resolver functions, you must call evdns_init() to initialize the library. To
+ convert a hostname to an IP address, you call the evdns_resolve_ipv4()
+ function. To perform a reverse lookup, you would call the
+ evdns_resolve_reverse() function. All of these functions use callbacks to
+ avoid blocking while the lookup is performed.
+
+ @section evhttp Event-driven HTTP servers
+
+ libevent provides a very simple event-driven HTTP server that can be
+ embedded in your program and used to service HTTP requests.
+
+ To use this capability, you need to include the <evhttp.h> header in your
+ program. You create the server by calling evhttp_new(). Add addresses and
+ ports to listen on with evhttp_bind_socket(). You then register one or more
+ callbacks to handle incoming requests. Each URI can be assigned a callback
+ via the evhttp_set_cb() function. A generic callback function can also be
+ registered via evhttp_set_gencb(); this callback will be invoked if no other
+ callbacks have been registered for a given URI.
+
+ @section evrpc A framework for RPC servers and clients
+
+ libevents provides a framework for creating RPC servers and clients. It
+ takes care of marshaling and unmarshaling all data structures.
+
+ @section api API Reference
+
+ To browse the complete documentation of the libevent API, click on any of
+ the following links.
+
+ event.h
+ The primary libevent header
+
+ evdns.h
+ Asynchronous DNS resolution
+
+ evhttp.h
+ An embedded libevent-based HTTP server
+
+ evrpc.h
+ A framework for creating RPC servers and clients
+
+ */
+
+/** @file event.h
+
+ A library for writing event-driven network servers
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <config.h>
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#include <stdarg.h>
+
+/* For int types. */
+#include <evutil.h>
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+#endif
+
+#define EVLIST_TIMEOUT 0x01
+#define EVLIST_INSERTED 0x02
+#define EVLIST_SIGNAL 0x04
+#define EVLIST_ACTIVE 0x08
+#define EVLIST_INTERNAL 0x10
+#define EVLIST_INIT 0x80
+
+/* EVLIST_X_ Private space: 0x1000-0xf000 */
+#define EVLIST_ALL (0xf000 | 0x9f)
+
+#define EV_TIMEOUT 0x01
+#define EV_READ 0x02
+#define EV_WRITE 0x04
+#define EV_SIGNAL 0x08
+#define EV_PERSIST 0x10 /* Persistant event */
+
+/* Fix so that ppl dont have to run with <sys/queue.h> */
+#ifndef TAILQ_ENTRY
+#define _EVENT_DEFINED_TQENTRY
+#define TAILQ_ENTRY(type) \
+struct { \
+ struct type *tqe_next; /* next element */ \
+ struct type **tqe_prev; /* address of previous next element */ \
+}
+#endif /* !TAILQ_ENTRY */
+
+struct event_base;
+struct event {
+ TAILQ_ENTRY (event) ev_next;
+ TAILQ_ENTRY (event) ev_active_next;
+ TAILQ_ENTRY (event) ev_signal_next;
+ unsigned int min_heap_idx; /* for managing timeouts */
+
+ struct event_base *ev_base;
+
+ int ev_fd;
+ short ev_events;
+ short ev_ncalls;
+ short *ev_pncalls; /* Allows deletes in callback */
+
+ struct timeval ev_timeout;
+
+ int ev_pri; /* smaller numbers are higher priority */
+
+ void (*ev_callback)(int, short, void *arg);
+ void *ev_arg;
+
+ int ev_res; /* result passed to event callback */
+ int ev_flags;
+};
+
+#define EVENT_SIGNAL(ev) (int)(ev)->ev_fd
+#define EVENT_FD(ev) (int)(ev)->ev_fd
+
+/*
+ * Key-Value pairs. Can be used for HTTP headers but also for
+ * query argument parsing.
+ */
+struct evkeyval {
+ TAILQ_ENTRY(evkeyval) next;
+
+ char *key;
+ char *value;
+};
+
+#ifdef _EVENT_DEFINED_TQENTRY
+#undef TAILQ_ENTRY
+struct event_list;
+struct evkeyvalq;
+#undef _EVENT_DEFINED_TQENTRY
+#else
+TAILQ_HEAD (event_list, event);
+TAILQ_HEAD (evkeyvalq, evkeyval);
+#endif /* _EVENT_DEFINED_TQENTRY */
+
+/**
+ Initialize the event API.
+
+ Use event_base_new() to initialize a new event base, but does not set
+ the current_base global. If using only event_base_new(), each event
+ added must have an event base set with event_base_set()
+
+ @see event_base_set(), event_base_free(), event_init()
+ */
+struct event_base *event_base_new(void);
+
+/**
+ Initialize the event API.
+
+ The event API needs to be initialized with event_init() before it can be
+ used. Sets the current_base global representing the default base for
+ events that have no base associated with them.
+
+ @see event_base_set(), event_base_new()
+ */
+struct event_base *event_init(void);
+
+/**
+ Reinitialized the event base after a fork
+
+ Some event mechanisms do not survive across fork. The event base needs
+ to be reinitialized with the event_reinit() function.
+
+ @param base the event base that needs to be re-initialized
+ @return 0 if successful, or -1 if some events could not be re-added.
+ @see event_base_new(), event_init()
+*/
+int event_reinit(struct event_base *base);
+
+/**
+ Loop to process events.
+
+ In order to process events, an application needs to call
+ event_dispatch(). This function only returns on error, and should
+ replace the event core of the application program.
+
+ @see event_base_dispatch()
+ */
+int event_dispatch(void);
+
+
+/**
+ Threadsafe event dispatching loop.
+
+ @param eb the event_base structure returned by event_init()
+ @see event_init(), event_dispatch()
+ */
+int event_base_dispatch(struct event_base *);
+
+
+/**
+ Get the kernel event notification mechanism used by libevent.
+
+ @param eb the event_base structure returned by event_base_new()
+ @return a string identifying the kernel event mechanism (kqueue, epoll, etc.)
+ */
+const char *event_base_get_method(struct event_base *);
+
+
+/**
+ Deallocate all memory associated with an event_base, and free the base.
+
+ Note that this function will not close any fds or free any memory passed
+ to event_set as the argument to callback.
+
+ @param eb an event_base to be freed
+ */
+void event_base_free(struct event_base *);
+
+
+#define _EVENT_LOG_DEBUG 0
+#define _EVENT_LOG_MSG 1
+#define _EVENT_LOG_WARN 2
+#define _EVENT_LOG_ERR 3
+typedef void (*event_log_cb)(int severity, const char *msg);
+/**
+ Redirect libevent's log messages.
+
+ @param cb a function taking two arguments: an integer severity between
+ _EVENT_LOG_DEBUG and _EVENT_LOG_ERR, and a string. If cb is NULL,
+ then the default log is used.
+ */
+void event_set_log_callback(event_log_cb cb);
+
+/**
+ Associate a different event base with an event.
+
+ @param eb the event base
+ @param ev the event
+ */
+int event_base_set(struct event_base *, struct event *);
+
+/**
+ event_loop() flags
+ */
+/*@{*/
+#define EVLOOP_ONCE 0x01 /**< Block at most once. */
+#define EVLOOP_NONBLOCK 0x02 /**< Do not block. */
+/*@}*/
+
+/**
+ Handle events.
+
+ This is a more flexible version of event_dispatch().
+
+ @param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK
+ @return 0 if successful, -1 if an error occurred, or 1 if no events were
+ registered.
+ @see event_loopexit(), event_base_loop()
+*/
+int event_loop(int);
+
+/**
+ Handle events (threadsafe version).
+
+ This is a more flexible version of event_base_dispatch().
+
+ @param eb the event_base structure returned by event_init()
+ @param flags any combination of EVLOOP_ONCE | EVLOOP_NONBLOCK
+ @return 0 if successful, -1 if an error occurred, or 1 if no events were
+ registered.
+ @see event_loopexit(), event_base_loop()
+ */
+int event_base_loop(struct event_base *, int);
+
+/**
+ Exit the event loop after the specified time.
+
+ The next event_loop() iteration after the given timer expires will
+ complete normally (handling all queued events) then exit without
+ blocking for events again.
+
+ Subsequent invocations of event_loop() will proceed normally.
+
+ @param tv the amount of time after which the loop should terminate.
+ @return 0 if successful, or -1 if an error occurred
+ @see event_loop(), event_base_loop(), event_base_loopexit()
+ */
+int event_loopexit(const struct timeval *);
+
+
+/**
+ Exit the event loop after the specified time (threadsafe variant).
+
+ The next event_base_loop() iteration after the given timer expires will
+ complete normally (handling all queued events) then exit without
+ blocking for events again.
+
+ Subsequent invocations of event_base_loop() will proceed normally.
+
+ @param eb the event_base structure returned by event_init()
+ @param tv the amount of time after which the loop should terminate.
+ @return 0 if successful, or -1 if an error occurred
+ @see event_loopexit()
+ */
+int event_base_loopexit(struct event_base *, const struct timeval *);
+
+/**
+ Abort the active event_loop() immediately.
+
+ event_loop() will abort the loop after the next event is completed;
+ event_loopbreak() is typically invoked from this event's callback.
+ This behavior is analogous to the "break;" statement.
+
+ Subsequent invocations of event_loop() will proceed normally.
+
+ @return 0 if successful, or -1 if an error occurred
+ @see event_base_loopbreak(), event_loopexit()
+ */
+int event_loopbreak(void);
+
+/**
+ Abort the active event_base_loop() immediately.
+
+ event_base_loop() will abort the loop after the next event is completed;
+ event_base_loopbreak() is typically invoked from this event's callback.
+ This behavior is analogous to the "break;" statement.
+
+ Subsequent invocations of event_loop() will proceed normally.
+
+ @param eb the event_base structure returned by event_init()
+ @return 0 if successful, or -1 if an error occurred
+ @see event_base_loopexit
+ */
+int event_base_loopbreak(struct event_base *);
+
+
+/**
+ Add a timer event.
+
+ @param ev the event struct
+ @param tv timeval struct
+ */
+#define evtimer_add(ev, tv) event_add(ev, tv)
+
+
+/**
+ Define a timer event.
+
+ @param ev event struct to be modified
+ @param cb callback function
+ @param arg argument that will be passed to the callback function
+ */
+#define evtimer_set(ev, cb, arg) event_set(ev, -1, 0, cb, arg)
+
+
+/**
+ * Delete a timer event.
+ *
+ * @param ev the event struct to be disabled
+ */
+#define evtimer_del(ev) event_del(ev)
+#define evtimer_pending(ev, tv) event_pending(ev, EV_TIMEOUT, tv)
+#define evtimer_initialized(ev) ((ev)->ev_flags & EVLIST_INIT)
+
+/**
+ * Add a timeout event.
+ *
+ * @param ev the event struct to be disabled
+ * @param tv the timeout value, in seconds
+ */
+#define timeout_add(ev, tv) event_add(ev, tv)
+
+
+/**
+ * Define a timeout event.
+ *
+ * @param ev the event struct to be defined
+ * @param cb the callback to be invoked when the timeout expires
+ * @param arg the argument to be passed to the callback
+ */
+#define timeout_set(ev, cb, arg) event_set(ev, -1, 0, cb, arg)
+
+
+/**
+ * Disable a timeout event.
+ *
+ * @param ev the timeout event to be disabled
+ */
+#define timeout_del(ev) event_del(ev)
+
+#define timeout_pending(ev, tv) event_pending(ev, EV_TIMEOUT, tv)
+#define timeout_initialized(ev) ((ev)->ev_flags & EVLIST_INIT)
+
+#define signal_add(ev, tv) event_add(ev, tv)
+#define signal_set(ev, x, cb, arg) \
+ event_set(ev, x, EV_SIGNAL|EV_PERSIST, cb, arg)
+#define signal_del(ev) event_del(ev)
+#define signal_pending(ev, tv) event_pending(ev, EV_SIGNAL, tv)
+#define signal_initialized(ev) ((ev)->ev_flags & EVLIST_INIT)
+
+/**
+ Prepare an event structure to be added.
+
+ The function event_set() prepares the event structure ev to be used in
+ future calls to event_add() and event_del(). The event will be prepared to
+ call the function specified by the fn argument with an int argument
+ indicating the file descriptor, a short argument indicating the type of
+ event, and a void * argument given in the arg argument. The fd indicates
+ the file descriptor that should be monitored for events. The events can be
+ either EV_READ, EV_WRITE, or both. Indicating that an application can read
+ or write from the file descriptor respectively without blocking.
+
+ The function fn will be called with the file descriptor that triggered the
+ event and the type of event which will be either EV_TIMEOUT, EV_SIGNAL,
+ EV_READ, or EV_WRITE. The additional flag EV_PERSIST makes an event_add()
+ persistent until event_del() has been called.
+
+ @param ev an event struct to be modified
+ @param fd the file descriptor to be monitored
+ @param event desired events to monitor; can be EV_READ and/or EV_WRITE
+ @param fn callback function to be invoked when the event occurs
+ @param arg an argument to be passed to the callback function
+
+ @see event_add(), event_del(), event_once()
+
+ */
+void event_set(struct event *, int, short, void (*)(int, short, void *), void *);
+
+/**
+ Schedule a one-time event to occur.
+
+ The function event_once() is similar to event_set(). However, it schedules
+ a callback to be called exactly once and does not require the caller to
+ prepare an event structure.
+
+ @param fd a file descriptor to monitor
+ @param events event(s) to monitor; can be any of EV_TIMEOUT | EV_READ |
+ EV_WRITE
+ @param callback callback function to be invoked when the event occurs
+ @param arg an argument to be passed to the callback function
+ @param timeout the maximum amount of time to wait for the event, or NULL
+ to wait forever
+ @return 0 if successful, or -1 if an error occurred
+ @see event_set()
+
+ */
+int event_once(int, short, void (*)(int, short, void *), void *,
+ const struct timeval *);
+
+
+/**
+ Schedule a one-time event (threadsafe variant)
+
+ The function event_base_once() is similar to event_set(). However, it
+ schedules a callback to be called exactly once and does not require the
+ caller to prepare an event structure.
+
+ @param base an event_base returned by event_init()
+ @param fd a file descriptor to monitor
+ @param events event(s) to monitor; can be any of EV_TIMEOUT | EV_READ |
+ EV_WRITE
+ @param callback callback function to be invoked when the event occurs
+ @param arg an argument to be passed to the callback function
+ @param timeout the maximum amount of time to wait for the event, or NULL
+ to wait forever
+ @return 0 if successful, or -1 if an error occurred
+ @see event_once()
+ */
+int event_base_once(struct event_base *base, int fd, short events,
+ void (*callback)(int, short, void *), void *arg,
+ const struct timeval *timeout);
+
+
+/**
+ Add an event to the set of monitored events.
+
+ The function event_add() schedules the execution of the ev event when the
+ event specified in event_set() occurs or in at least the time specified in
+ the tv. If tv is NULL, no timeout occurs and the function will only be
+ called if a matching event occurs on the file descriptor. The event in the
+ ev argument must be already initialized by event_set() and may not be used
+ in calls to event_set() until it has timed out or been removed with
+ event_del(). If the event in the ev argument already has a scheduled
+ timeout, the old timeout will be replaced by the new one.
+
+ @param ev an event struct initialized via event_set()
+ @param timeout the maximum amount of time to wait for the event, or NULL
+ to wait forever
+ @return 0 if successful, or -1 if an error occurred
+ @see event_del(), event_set()
+ */
+int event_add(struct event *ev, const struct timeval *timeout);
+
+
+/**
+ Remove an event from the set of monitored events.
+
+ The function event_del() will cancel the event in the argument ev. If the
+ event has already executed or has never been added the call will have no
+ effect.
+
+ @param ev an event struct to be removed from the working set
+ @return 0 if successful, or -1 if an error occurred
+ @see event_add()
+ */
+int event_del(struct event *);
+
+void event_active(struct event *, int, short);
+
+
+/**
+ Checks if a specific event is pending or scheduled.
+
+ @param ev an event struct previously passed to event_add()
+ @param event the requested event type; any of EV_TIMEOUT|EV_READ|
+ EV_WRITE|EV_SIGNAL
+ @param tv an alternate timeout (FIXME - is this true?)
+
+ @return 1 if the event is pending, or 0 if the event has not occurred
+
+ */
+int event_pending(struct event *ev, short event, struct timeval *tv);
+
+
+/**
+ Test if an event structure has been initialized.
+
+ The event_initialized() macro can be used to check if an event has been
+ initialized.
+
+ @param ev an event structure to be tested
+ @return 1 if the structure has been initialized, or 0 if it has not been
+ initialized
+ */
+#ifdef WIN32
+#define event_initialized(ev) ((ev)->ev_flags & EVLIST_INIT && (ev)->ev_fd != (int)INVALID_HANDLE_VALUE)
+#else
+#define event_initialized(ev) ((ev)->ev_flags & EVLIST_INIT)
+#endif
+
+
+/**
+ Get the libevent version number.
+
+ @return a string containing the version number of libevent
+ */
+const char *event_get_version(void);
+
+
+/**
+ Get the kernel event notification mechanism used by libevent.
+
+ @return a string identifying the kernel event mechanism (kqueue, epoll, etc.)
+ */
+const char *event_get_method(void);
+
+
+/**
+ Set the number of different event priorities.
+
+ By default libevent schedules all active events with the same priority.
+ However, some time it is desirable to process some events with a higher
+ priority than others. For that reason, libevent supports strict priority
+ queues. Active events with a lower priority are always processed before
+ events with a higher priority.
+
+ The number of different priorities can be set initially with the
+ event_priority_init() function. This function should be called before the
+ first call to event_dispatch(). The event_priority_set() function can be
+ used to assign a priority to an event. By default, libevent assigns the
+ middle priority to all events unless their priority is explicitly set.
+
+ @param npriorities the maximum number of priorities
+ @return 0 if successful, or -1 if an error occurred
+ @see event_base_priority_init(), event_priority_set()
+
+ */
+int event_priority_init(int);
+
+
+/**
+ Set the number of different event priorities (threadsafe variant).
+
+ See the description of event_priority_init() for more information.
+
+ @param eb the event_base structure returned by event_init()
+ @param npriorities the maximum number of priorities
+ @return 0 if successful, or -1 if an error occurred
+ @see event_priority_init(), event_priority_set()
+ */
+int event_base_priority_init(struct event_base *, int);
+
+
+/**
+ Assign a priority to an event.
+
+ @param ev an event struct
+ @param priority the new priority to be assigned
+ @return 0 if successful, or -1 if an error occurred
+ @see event_priority_init()
+ */
+int event_priority_set(struct event *, int);
+
+
+/* These functions deal with buffering input and output */
+
+struct evbuffer {
+ u_char *buffer;
+ u_char *orig_buffer;
+
+ size_t misalign;
+ size_t totallen;
+ size_t off;
+
+ void (*cb)(struct evbuffer *, size_t, size_t, void *);
+ void *cbarg;
+};
+
+/* Just for error reporting - use other constants otherwise */
+#define EVBUFFER_READ 0x01
+#define EVBUFFER_WRITE 0x02
+#define EVBUFFER_EOF 0x10
+#define EVBUFFER_ERROR 0x20
+#define EVBUFFER_TIMEOUT 0x40
+
+struct bufferevent;
+typedef void (*evbuffercb)(struct bufferevent *, void *);
+typedef void (*everrorcb)(struct bufferevent *, short what, void *);
+
+struct event_watermark {
+ size_t low;
+ size_t high;
+};
+
+struct bufferevent {
+ struct event_base *ev_base;
+
+ struct event ev_read;
+ struct event ev_write;
+
+ struct evbuffer *input;
+ struct evbuffer *output;
+
+ struct event_watermark wm_read;
+ struct event_watermark wm_write;
+
+ evbuffercb readcb;
+ evbuffercb writecb;
+ everrorcb errorcb;
+ void *cbarg;
+
+ int timeout_read; /* in seconds */
+ int timeout_write; /* in seconds */
+
+ short enabled; /* events that are currently enabled */
+};
+
+
+/**
+ Create a new bufferevent.
+
+ libevent provides an abstraction on top of the regular event callbacks.
+ This abstraction is called a buffered event. A buffered event provides
+ input and output buffers that get filled and drained automatically. The
+ user of a buffered event no longer deals directly with the I/O, but
+ instead is reading from input and writing to output buffers.
+
+ Once initialized, the bufferevent structure can be used repeatedly with
+ bufferevent_enable() and bufferevent_disable().
+
+ When read enabled the bufferevent will try to read from the file descriptor
+ and call the read callback. The write callback is executed whenever the
+ output buffer is drained below the write low watermark, which is 0 by
+ default.
+
+ If multiple bases are in use, bufferevent_base_set() must be called before
+ enabling the bufferevent for the first time.
+
+ @param fd the file descriptor from which data is read and written to.
+ This file descriptor is not allowed to be a pipe(2).
+ @param readcb callback to invoke when there is data to be read, or NULL if
+ no callback is desired
+ @param writecb callback to invoke when the file descriptor is ready for
+ writing, or NULL if no callback is desired
+ @param errorcb callback to invoke when there is an error on the file
+ descriptor
+ @param cbarg an argument that will be supplied to each of the callbacks
+ (readcb, writecb, and errorcb)
+ @return a pointer to a newly allocated bufferevent struct, or NULL if an
+ error occurred
+ @see bufferevent_base_set(), bufferevent_free()
+ */
+struct bufferevent *bufferevent_new(int fd,
+ evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg);
+
+
+/**
+ Assign a bufferevent to a specific event_base.
+
+ @param base an event_base returned by event_init()
+ @param bufev a bufferevent struct returned by bufferevent_new()
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_new()
+ */
+int bufferevent_base_set(struct event_base *base, struct bufferevent *bufev);
+
+
+/**
+ Assign a priority to a bufferevent.
+
+ @param bufev a bufferevent struct
+ @param pri the priority to be assigned
+ @return 0 if successful, or -1 if an error occurred
+ */
+int bufferevent_priority_set(struct bufferevent *bufev, int pri);
+
+
+/**
+ Deallocate the storage associated with a bufferevent structure.
+
+ @param bufev the bufferevent structure to be freed.
+ */
+void bufferevent_free(struct bufferevent *bufev);
+
+
+/**
+ Changes the callbacks for a bufferevent.
+
+ @param bufev the bufferevent object for which to change callbacks
+ @param readcb callback to invoke when there is data to be read, or NULL if
+ no callback is desired
+ @param writecb callback to invoke when the file descriptor is ready for
+ writing, or NULL if no callback is desired
+ @param errorcb callback to invoke when there is an error on the file
+ descriptor
+ @param cbarg an argument that will be supplied to each of the callbacks
+ (readcb, writecb, and errorcb)
+ @see bufferevent_new()
+ */
+void bufferevent_setcb(struct bufferevent *bufev,
+ evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg);
+
+/**
+ Changes the file descriptor on which the bufferevent operates.
+
+ @param bufev the bufferevent object for which to change the file descriptor
+ @param fd the file descriptor to operate on
+*/
+void bufferevent_setfd(struct bufferevent *bufev, int fd);
+
+/**
+ Write data to a bufferevent buffer.
+
+ The bufferevent_write() function can be used to write data to the file
+ descriptor. The data is appended to the output buffer and written to the
+ descriptor automatically as it becomes available for writing.
+
+ @param bufev the bufferevent to be written to
+ @param data a pointer to the data to be written
+ @param size the length of the data, in bytes
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_write_buffer()
+ */
+int bufferevent_write(struct bufferevent *bufev,
+ const void *data, size_t size);
+
+
+/**
+ Write data from an evbuffer to a bufferevent buffer. The evbuffer is
+ being drained as a result.
+
+ @param bufev the bufferevent to be written to
+ @param buf the evbuffer to be written
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_write()
+ */
+int bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf);
+
+
+/**
+ Read data from a bufferevent buffer.
+
+ The bufferevent_read() function is used to read data from the input buffer.
+
+ @param bufev the bufferevent to be read from
+ @param data pointer to a buffer that will store the data
+ @param size the size of the data buffer, in bytes
+ @return the amount of data read, in bytes.
+ */
+size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size);
+
+/**
+ Enable a bufferevent.
+
+ @param bufev the bufferevent to be enabled
+ @param event any combination of EV_READ | EV_WRITE.
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_disable()
+ */
+int bufferevent_enable(struct bufferevent *bufev, short event);
+
+
+/**
+ Disable a bufferevent.
+
+ @param bufev the bufferevent to be disabled
+ @param event any combination of EV_READ | EV_WRITE.
+ @return 0 if successful, or -1 if an error occurred
+ @see bufferevent_enable()
+ */
+int bufferevent_disable(struct bufferevent *bufev, short event);
+
+
+/**
+ Set the read and write timeout for a buffered event.
+
+ @param bufev the bufferevent to be modified
+ @param timeout_read the read timeout
+ @param timeout_write the write timeout
+ */
+void bufferevent_settimeout(struct bufferevent *bufev,
+ int timeout_read, int timeout_write);
+
+
+/**
+ Sets the watermarks for read and write events.
+
+ On input, a bufferevent does not invoke the user read callback unless
+ there is at least low watermark data in the buffer. If the read buffer
+ is beyond the high watermark, the buffevent stops reading from the network.
+
+ On output, the user write callback is invoked whenever the buffered data
+ falls below the low watermark.
+
+ @param bufev the bufferevent to be modified
+ @param events EV_READ, EV_WRITE or both
+ @param lowmark the lower watermark to set
+ @param highmark the high watermark to set
+*/
+
+void bufferevent_setwatermark(struct bufferevent *bufev, short events,
+ size_t lowmark, size_t highmark);
+
+#define EVBUFFER_LENGTH(x) (x)->off
+#define EVBUFFER_DATA(x) (x)->buffer
+#define EVBUFFER_INPUT(x) (x)->input
+#define EVBUFFER_OUTPUT(x) (x)->output
+
+
+/**
+ Allocate storage for a new evbuffer.
+
+ @return a pointer to a newly allocated evbuffer struct, or NULL if an error
+ occurred
+ */
+struct evbuffer *evbuffer_new(void);
+
+
+/**
+ Deallocate storage for an evbuffer.
+
+ @param pointer to the evbuffer to be freed
+ */
+void evbuffer_free(struct evbuffer *);
+
+
+/**
+ Expands the available space in an event buffer.
+
+ Expands the available space in the event buffer to at least datlen
+
+ @param buf the event buffer to be expanded
+ @param datlen the new minimum length requirement
+ @return 0 if successful, or -1 if an error occurred
+*/
+int evbuffer_expand(struct evbuffer *, size_t);
+
+
+/**
+ Append data to the end of an evbuffer.
+
+ @param buf the event buffer to be appended to
+ @param data pointer to the beginning of the data buffer
+ @param datlen the number of bytes to be copied from the data buffer
+ */
+int evbuffer_add(struct evbuffer *, const void *, size_t);
+
+
+
+/**
+ Read data from an event buffer and drain the bytes read.
+
+ @param buf the event buffer to be read from
+ @param data the destination buffer to store the result
+ @param datlen the maximum size of the destination buffer
+ @return the number of bytes read
+ */
+int evbuffer_remove(struct evbuffer *, void *, size_t);
+
+
+/**
+ * Read a single line from an event buffer.
+ *
+ * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'.
+ * The returned buffer needs to be freed by the caller.
+ *
+ * @param buffer the evbuffer to read from
+ * @return pointer to a single line, or NULL if an error occurred
+ */
+char *evbuffer_readline(struct evbuffer *);
+
+
+/**
+ Move data from one evbuffer into another evbuffer.
+
+ This is a destructive add. The data from one buffer moves into
+ the other buffer. The destination buffer is expanded as needed.
+
+ @param outbuf the output buffer
+ @param inbuf the input buffer
+ @return 0 if successful, or -1 if an error occurred
+ */
+int evbuffer_add_buffer(struct evbuffer *, struct evbuffer *);
+
+
+/**
+ Append a formatted string to the end of an evbuffer.
+
+ @param buf the evbuffer that will be appended to
+ @param fmt a format string
+ @param ... arguments that will be passed to printf(3)
+ @return The number of bytes added if successful, or -1 if an error occurred.
+ */
+int evbuffer_add_printf(struct evbuffer *, const char *fmt, ...)
+#ifdef __GNUC__
+ __attribute__((format(printf, 2, 3)))
+#endif
+;
+
+
+/**
+ Append a va_list formatted string to the end of an evbuffer.
+
+ @param buf the evbuffer that will be appended to
+ @param fmt a format string
+ @param ap a varargs va_list argument array that will be passed to vprintf(3)
+ @return The number of bytes added if successful, or -1 if an error occurred.
+ */
+int evbuffer_add_vprintf(struct evbuffer *, const char *fmt, va_list ap);
+
+
+/**
+ Remove a specified number of bytes data from the beginning of an evbuffer.
+
+ @param buf the evbuffer to be drained
+ @param len the number of bytes to drain from the beginning of the buffer
+ */
+void evbuffer_drain(struct evbuffer *, size_t);
+
+
+/**
+ Write the contents of an evbuffer to a file descriptor.
+
+ The evbuffer will be drained after the bytes have been successfully written.
+
+ @param buffer the evbuffer to be written and drained
+ @param fd the file descriptor to be written to
+ @return the number of bytes written, or -1 if an error occurred
+ @see evbuffer_read()
+ */
+int evbuffer_write(struct evbuffer *, int);
+
+
+/**
+ Read from a file descriptor and store the result in an evbuffer.
+
+ @param buf the evbuffer to store the result
+ @param fd the file descriptor to read from
+ @param howmuch the number of bytes to be read
+ @return the number of bytes read, or -1 if an error occurred
+ @see evbuffer_write()
+ */
+int evbuffer_read(struct evbuffer *, int, int);
+
+
+/**
+ Find a string within an evbuffer.
+
+ @param buffer the evbuffer to be searched
+ @param what the string to be searched for
+ @param len the length of the search string
+ @return a pointer to the beginning of the search string, or NULL if the search failed.
+ */
+u_char *evbuffer_find(struct evbuffer *, const u_char *, size_t);
+
+/**
+ Set a callback to invoke when the evbuffer is modified.
+
+ @param buffer the evbuffer to be monitored
+ @param cb the callback function to invoke when the evbuffer is modified
+ @param cbarg an argument to be provided to the callback function
+ */
+void evbuffer_setcb(struct evbuffer *, void (*)(struct evbuffer *, size_t, size_t, void *), void *);
+
+/*
+ * Marshaling tagged data - We assume that all tags are inserted in their
+ * numeric order - so that unknown tags will always be higher than the
+ * known ones - and we can just ignore the end of an event buffer.
+ */
+
+void evtag_init(void);
+
+void evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag, const void *data,
+ ev_uint32_t len);
+
+/**
+ Encode an integer and store it in an evbuffer.
+
+ We encode integer's by nibbles; the first nibble contains the number
+ of significant nibbles - 1; this allows us to encode up to 64-bit
+ integers. This function is byte-order independent.
+
+ @param evbuf evbuffer to store the encoded number
+ @param number a 32-bit integer
+ */
+void encode_int(struct evbuffer *evbuf, ev_uint32_t number);
+
+void evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag,
+ ev_uint32_t integer);
+
+void evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag,
+ const char *string);
+
+void evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag,
+ struct timeval *tv);
+
+int evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag,
+ struct evbuffer *dst);
+int evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag);
+int evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength);
+int evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength);
+int evtag_consume(struct evbuffer *evbuf);
+
+int evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint32_t *pinteger);
+
+int evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag,
+ void *data, size_t len);
+
+int evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ char **pstring);
+
+int evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ struct timeval *ptv);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVENT_H_ */
diff --git a/libevent/event_rpcgen.py b/libevent/event_rpcgen.py
new file mode 100644
index 00000000000..5503ff8a5c3
--- /dev/null
+++ b/libevent/event_rpcgen.py
@@ -0,0 +1,1417 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2005 Niels Provos <provos@citi.umich.edu>
+# All rights reserved.
+#
+# Generates marshaling code based on libevent.
+
+import sys
+import re
+
+#
+_NAME = "event_rpcgen.py"
+_VERSION = "0.1"
+_STRUCT_RE = '[a-z][a-z_0-9]*'
+
+# Globals
+line_count = 0
+
+white = re.compile(r'^\s+')
+cppcomment = re.compile(r'\/\/.*$')
+headerdirect = []
+cppdirect = []
+
+# Holds everything that makes a struct
+class Struct:
+ def __init__(self, name):
+ self._name = name
+ self._entries = []
+ self._tags = {}
+ print >>sys.stderr, ' Created struct: %s' % name
+
+ def AddEntry(self, entry):
+ if self._tags.has_key(entry.Tag()):
+ print >>sys.stderr, ( 'Entry "%s" duplicates tag number '
+ '%d from "%s" around line %d' ) % (
+ entry.Name(), entry.Tag(),
+ self._tags[entry.Tag()], line_count)
+ sys.exit(1)
+ self._entries.append(entry)
+ self._tags[entry.Tag()] = entry.Name()
+ print >>sys.stderr, ' Added entry: %s' % entry.Name()
+
+ def Name(self):
+ return self._name
+
+ def EntryTagName(self, entry):
+ """Creates the name inside an enumeration for distinguishing data
+ types."""
+ name = "%s_%s" % (self._name, entry.Name())
+ return name.upper()
+
+ def PrintIdented(self, file, ident, code):
+ """Takes an array, add indentation to each entry and prints it."""
+ for entry in code:
+ print >>file, '%s%s' % (ident, entry)
+
+ def PrintTags(self, file):
+ """Prints the tag definitions for a structure."""
+ print >>file, '/* Tag definition for %s */' % self._name
+ print >>file, 'enum %s_ {' % self._name.lower()
+ for entry in self._entries:
+ print >>file, ' %s=%d,' % (self.EntryTagName(entry),
+ entry.Tag())
+ print >>file, ' %s_MAX_TAGS' % (self._name.upper())
+ print >>file, '};\n'
+
+ def PrintForwardDeclaration(self, file):
+ print >>file, 'struct %s;' % self._name
+
+ def PrintDeclaration(self, file):
+ print >>file, '/* Structure declaration for %s */' % self._name
+ print >>file, 'struct %s_access_ {' % self._name
+ for entry in self._entries:
+ dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
+ dcl.extend(
+ entry.GetDeclaration('(*%s_get)' % entry.Name()))
+ if entry.Array():
+ dcl.extend(
+ entry.AddDeclaration('(*%s_add)' % entry.Name()))
+ self.PrintIdented(file, ' ', dcl)
+ print >>file, '};\n'
+
+ print >>file, 'struct %s {' % self._name
+ print >>file, ' struct %s_access_ *base;\n' % self._name
+ for entry in self._entries:
+ dcl = entry.Declaration()
+ self.PrintIdented(file, ' ', dcl)
+ print >>file, ''
+ for entry in self._entries:
+ print >>file, ' uint8_t %s_set;' % entry.Name()
+ print >>file, '};\n'
+
+ print >>file, \
+"""struct %(name)s *%(name)s_new(void);
+void %(name)s_free(struct %(name)s *);
+void %(name)s_clear(struct %(name)s *);
+void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
+int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
+int %(name)s_complete(struct %(name)s *);
+void evtag_marshal_%(name)s(struct evbuffer *, uint32_t,
+ const struct %(name)s *);
+int evtag_unmarshal_%(name)s(struct evbuffer *, uint32_t,
+ struct %(name)s *);""" % { 'name' : self._name }
+
+
+ # Write a setting function of every variable
+ for entry in self._entries:
+ self.PrintIdented(file, '', entry.AssignDeclaration(
+ entry.AssignFuncName()))
+ self.PrintIdented(file, '', entry.GetDeclaration(
+ entry.GetFuncName()))
+ if entry.Array():
+ self.PrintIdented(file, '', entry.AddDeclaration(
+ entry.AddFuncName()))
+
+ print >>file, '/* --- %s done --- */\n' % self._name
+
+ def PrintCode(self, file):
+ print >>file, ('/*\n'
+ ' * Implementation of %s\n'
+ ' */\n') % self._name
+
+ print >>file, \
+ 'static struct %(name)s_access_ __%(name)s_base = {' % \
+ { 'name' : self._name }
+ for entry in self._entries:
+ self.PrintIdented(file, ' ', entry.CodeBase())
+ print >>file, '};\n'
+
+ # Creation
+ print >>file, (
+ 'struct %(name)s *\n'
+ '%(name)s_new(void)\n'
+ '{\n'
+ ' struct %(name)s *tmp;\n'
+ ' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
+ ' event_warn("%%s: malloc", __func__);\n'
+ ' return (NULL);\n'
+ ' }\n'
+ ' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }
+
+ for entry in self._entries:
+ self.PrintIdented(file, ' ', entry.CodeNew('tmp'))
+ print >>file, ' tmp->%s_set = 0;\n' % entry.Name()
+
+ print >>file, (
+ ' return (tmp);\n'
+ '}\n')
+
+ # Adding
+ for entry in self._entries:
+ if entry.Array():
+ self.PrintIdented(file, '', entry.CodeAdd())
+ print >>file, ''
+
+ # Assigning
+ for entry in self._entries:
+ self.PrintIdented(file, '', entry.CodeAssign())
+ print >>file, ''
+
+ # Getting
+ for entry in self._entries:
+ self.PrintIdented(file, '', entry.CodeGet())
+ print >>file, ''
+
+ # Clearing
+ print >>file, ( 'void\n'
+ '%(name)s_clear(struct %(name)s *tmp)\n'
+ '{'
+ ) % { 'name' : self._name }
+ for entry in self._entries:
+ self.PrintIdented(file, ' ', entry.CodeClear('tmp'))
+
+ print >>file, '}\n'
+
+ # Freeing
+ print >>file, ( 'void\n'
+ '%(name)s_free(struct %(name)s *tmp)\n'
+ '{'
+ ) % { 'name' : self._name }
+
+ for entry in self._entries:
+ self.PrintIdented(file, ' ', entry.CodeFree('tmp'))
+
+ print >>file, (' free(tmp);\n'
+ '}\n')
+
+ # Marshaling
+ print >>file, ('void\n'
+ '%(name)s_marshal(struct evbuffer *evbuf, '
+ 'const struct %(name)s *tmp)'
+ '{') % { 'name' : self._name }
+ for entry in self._entries:
+ indent = ' '
+ # Optional entries do not have to be set
+ if entry.Optional():
+ indent += ' '
+ print >>file, ' if (tmp->%s_set) {' % entry.Name()
+ self.PrintIdented(
+ file, indent,
+ entry.CodeMarshal('evbuf', self.EntryTagName(entry), 'tmp'))
+ if entry.Optional():
+ print >>file, ' }'
+
+ print >>file, '}\n'
+
+ # Unmarshaling
+ print >>file, ('int\n'
+ '%(name)s_unmarshal(struct %(name)s *tmp, '
+ ' struct evbuffer *evbuf)\n'
+ '{\n'
+ ' uint32_t tag;\n'
+ ' while (EVBUFFER_LENGTH(evbuf) > 0) {\n'
+ ' if (evtag_peek(evbuf, &tag) == -1)\n'
+ ' return (-1);\n'
+ ' switch (tag) {\n'
+ ) % { 'name' : self._name }
+ for entry in self._entries:
+ print >>file, ' case %s:\n' % self.EntryTagName(entry)
+ if not entry.Array():
+ print >>file, (
+ ' if (tmp->%s_set)\n'
+ ' return (-1);'
+ ) % (entry.Name())
+
+ self.PrintIdented(
+ file, ' ',
+ entry.CodeUnmarshal('evbuf',
+ self.EntryTagName(entry), 'tmp'))
+
+ print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() +
+ ' break;\n' )
+ print >>file, ( ' default:\n'
+ ' return -1;\n'
+ ' }\n'
+ ' }\n' )
+ # Check if it was decoded completely
+ print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n'
+ ' return (-1);'
+ ) % { 'name' : self._name }
+
+ # Successfully decoded
+ print >>file, ( ' return (0);\n'
+ '}\n')
+
+ # Checking if a structure has all the required data
+ print >>file, (
+ 'int\n'
+ '%(name)s_complete(struct %(name)s *msg)\n'
+ '{' ) % { 'name' : self._name }
+ for entry in self._entries:
+ self.PrintIdented(
+ file, ' ',
+ entry.CodeComplete('msg'))
+ print >>file, (
+ ' return (0);\n'
+ '}\n' )
+
+ # Complete message unmarshaling
+ print >>file, (
+ 'int\n'
+ 'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
+ 'uint32_t need_tag, struct %(name)s *msg)\n'
+ '{\n'
+ ' uint32_t tag;\n'
+ ' int res = -1;\n'
+ '\n'
+ ' struct evbuffer *tmp = evbuffer_new();\n'
+ '\n'
+ ' if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
+ ' || tag != need_tag)\n'
+ ' goto error;\n'
+ '\n'
+ ' if (%(name)s_unmarshal(msg, tmp) == -1)\n'
+ ' goto error;\n'
+ '\n'
+ ' res = 0;\n'
+ '\n'
+ ' error:\n'
+ ' evbuffer_free(tmp);\n'
+ ' return (res);\n'
+ '}\n' ) % { 'name' : self._name }
+
+ # Complete message marshaling
+ print >>file, (
+ 'void\n'
+ 'evtag_marshal_%(name)s(struct evbuffer *evbuf, uint32_t tag, '
+ 'const struct %(name)s *msg)\n'
+ '{\n'
+ ' struct evbuffer *_buf = evbuffer_new();\n'
+ ' assert(_buf != NULL);\n'
+ ' evbuffer_drain(_buf, -1);\n'
+ ' %(name)s_marshal(_buf, msg);\n'
+ ' evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), '
+ 'EVBUFFER_LENGTH(_buf));\n'
+ ' evbuffer_free(_buf);\n'
+ '}\n' ) % { 'name' : self._name }
+
+class Entry:
+ def __init__(self, type, name, tag):
+ self._type = type
+ self._name = name
+ self._tag = int(tag)
+ self._ctype = type
+ self._optional = 0
+ self._can_be_array = 0
+ self._array = 0
+ self._line_count = -1
+ self._struct = None
+ self._refname = None
+
+ def GetTranslation(self):
+ return { "parent_name" : self._struct.Name(),
+ "name" : self._name,
+ "ctype" : self._ctype,
+ "refname" : self._refname
+ }
+
+ def SetStruct(self, struct):
+ self._struct = struct
+
+ def LineCount(self):
+ assert self._line_count != -1
+ return self._line_count
+
+ def SetLineCount(self, number):
+ self._line_count = number
+
+ def Array(self):
+ return self._array
+
+ def Optional(self):
+ return self._optional
+
+ def Tag(self):
+ return self._tag
+
+ def Name(self):
+ return self._name
+
+ def Type(self):
+ return self._type
+
+ def MakeArray(self, yes=1):
+ self._array = yes
+
+ def MakeOptional(self):
+ self._optional = 1
+
+ def GetFuncName(self):
+ return '%s_%s_get' % (self._struct.Name(), self._name)
+
+ def GetDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, %s *);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def CodeGet(self):
+ code = (
+ 'int',
+ '%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
+ '%(ctype)s *value)',
+ '{',
+ ' if (msg->%(name)s_set != 1)',
+ ' return (-1);',
+ ' *value = msg->%(name)s_data;',
+ ' return (0);',
+ '}' )
+ code = '\n'.join(code)
+ code = code % self.GetTranslation()
+ return code.split('\n')
+
+ def AssignFuncName(self):
+ return '%s_%s_assign' % (self._struct.Name(), self._name)
+
+ def AddFuncName(self):
+ return '%s_%s_add' % (self._struct.Name(), self._name)
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, const %s);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def CodeAssign(self):
+ code = [ 'int',
+ '%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
+ ' const %(ctype)s value)',
+ '{',
+ ' msg->%(name)s_set = 1;',
+ ' msg->%(name)s_data = value;',
+ ' return (0);',
+ '}' ]
+ code = '\n'.join(code)
+ code = code % self.GetTranslation()
+ return code.split('\n')
+
+ def CodeClear(self, structname):
+ code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
+
+ return code
+
+ def CodeComplete(self, structname):
+ if self.Optional():
+ return []
+
+ code = [ 'if (!%s->%s_set)' % (structname, self.Name()),
+ ' return (-1);' ]
+
+ return code
+
+ def CodeFree(self, name):
+ return []
+
+ def CodeBase(self):
+ code = [
+ '%(parent_name)s_%(name)s_assign,',
+ '%(parent_name)s_%(name)s_get,'
+ ]
+ if self.Array():
+ code.append('%(parent_name)s_%(name)s_add,')
+
+ code = '\n'.join(code)
+ code = code % self.GetTranslation()
+ return code.split('\n')
+
+ def Verify(self):
+ if self.Array() and not self._can_be_array:
+ print >>sys.stderr, (
+ 'Entry "%s" cannot be created as an array '
+ 'around line %d' ) % (self._name, self.LineCount())
+ sys.exit(1)
+ if not self._struct:
+ print >>sys.stderr, (
+ 'Entry "%s" does not know which struct it belongs to '
+ 'around line %d' ) % (self._name, self.LineCount())
+ sys.exit(1)
+ if self._optional and self._array:
+ print >>sys.stderr, ( 'Entry "%s" has illegal combination of '
+ 'optional and array around line %d' ) % (
+ self._name, self.LineCount() )
+ sys.exit(1)
+
+class EntryBytes(Entry):
+ def __init__(self, type, name, tag, length):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._length = length
+ self._ctype = 'uint8_t'
+
+ def GetDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, %s **);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, const %s *);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def Declaration(self):
+ dcl = ['uint8_t %s_data[%s];' % (self._name, self._length)]
+
+ return dcl
+
+ def CodeGet(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_get(struct %s *msg, %s **value)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_set != 1)' % name,
+ ' return (-1);',
+ ' *value = msg->%s_data;' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeAssign(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_assign(struct %s *msg, const %s *value)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' msg->%s_set = 1;' % name,
+ ' memcpy(msg->%s_data, value, %s);' % (
+ name, self._length),
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeUnmarshal(self, buf, tag_name, var_name):
+ code = [ 'if (evtag_unmarshal_fixed(%s, %s, ' % (buf, tag_name) +
+ '%s->%s_data, ' % (var_name, self._name) +
+ 'sizeof(%s->%s_data)) == -1) {' % (
+ var_name, self._name),
+ ' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+ self._name ),
+ ' return (-1);',
+ '}'
+ ]
+ return code
+
+ def CodeMarshal(self, buf, tag_name, var_name):
+ code = ['evtag_marshal(%s, %s, %s->%s_data, sizeof(%s->%s_data));' % (
+ buf, tag_name, var_name, self._name, var_name, self._name )]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ '%s->%s_set = 0;' % (structname, self.Name()),
+ 'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
+ structname, self._name, structname, self._name)]
+
+ return code
+
+ def CodeNew(self, name):
+ code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
+ name, self._name, name, self._name)]
+ return code
+
+ def Verify(self):
+ if not self._length:
+ print >>sys.stderr, 'Entry "%s" needs a length around line %d' % (
+ self._name, self.LineCount() )
+ sys.exit(1)
+
+ Entry.Verify(self)
+
+class EntryInt(Entry):
+ def __init__(self, type, name, tag):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._ctype = 'uint32_t'
+
+ def CodeUnmarshal(self, buf, tag_name, var_name):
+ code = ['if (evtag_unmarshal_int(%s, %s, &%s->%s_data) == -1) {' % (
+ buf, tag_name, var_name, self._name),
+ ' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+ self._name ),
+ ' return (-1);',
+ '}' ]
+ return code
+
+ def CodeMarshal(self, buf, tag_name, var_name):
+ code = ['evtag_marshal_int(%s, %s, %s->%s_data);' % (
+ buf, tag_name, var_name, self._name)]
+ return code
+
+ def Declaration(self):
+ dcl = ['uint32_t %s_data;' % self._name]
+
+ return dcl
+
+ def CodeNew(self, name):
+ code = ['%s->%s_data = 0;' % (name, self._name)]
+ return code
+
+class EntryString(Entry):
+ def __init__(self, type, name, tag):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._ctype = 'char *'
+
+ def CodeAssign(self):
+ name = self._name
+ code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
+ const %(ctype)s value)
+{
+ if (msg->%(name)s_data != NULL)
+ free(msg->%(name)s_data);
+ if ((msg->%(name)s_data = strdup(value)) == NULL)
+ return (-1);
+ msg->%(name)s_set = 1;
+ return (0);
+}""" % self.GetTranslation()
+
+ return code.split('\n')
+
+ def CodeUnmarshal(self, buf, tag_name, var_name):
+ code = ['if (evtag_unmarshal_string(%s, %s, &%s->%s_data) == -1) {' % (
+ buf, tag_name, var_name, self._name),
+ ' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+ self._name ),
+ ' return (-1);',
+ '}'
+ ]
+ return code
+
+ def CodeMarshal(self, buf, tag_name, var_name):
+ code = ['evtag_marshal_string(%s, %s, %s->%s_data);' % (
+ buf, tag_name, var_name, self._name)]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+ ' free (%s->%s_data);' % (structname, self.Name()),
+ ' %s->%s_data = NULL;' % (structname, self.Name()),
+ ' %s->%s_set = 0;' % (structname, self.Name()),
+ '}'
+ ]
+
+ return code
+
+ def CodeNew(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name)]
+ return code
+
+ def CodeFree(self, name):
+ code = ['if (%s->%s_data != NULL)' % (name, self._name),
+ ' free (%s->%s_data); ' % (name, self._name)]
+
+ return code
+
+ def Declaration(self):
+ dcl = ['char *%s_data;' % self._name]
+
+ return dcl
+
+class EntryStruct(Entry):
+ def __init__(self, type, name, tag, refname):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._can_be_array = 1
+ self._refname = refname
+ self._ctype = 'struct %s*' % refname
+
+ def CodeGet(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_get(struct %s *msg, %s *value)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_set != 1) {' % name,
+ ' msg->%s_data = %s_new();' % (name, self._refname),
+ ' if (msg->%s_data == NULL)' % name,
+ ' return (-1);',
+ ' msg->%s_set = 1;' % name,
+ ' }',
+ ' *value = msg->%s_data;' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeAssign(self):
+ name = self._name
+ code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
+ const %(ctype)s value)
+{
+ struct evbuffer *tmp = NULL;
+ if (msg->%(name)s_set) {
+ %(refname)s_clear(msg->%(name)s_data);
+ msg->%(name)s_set = 0;
+ } else {
+ msg->%(name)s_data = %(refname)s_new();
+ if (msg->%(name)s_data == NULL) {
+ event_warn("%%s: %(refname)s_new()", __func__);
+ goto error;
+ }
+ }
+ if ((tmp = evbuffer_new()) == NULL) {
+ event_warn("%%s: evbuffer_new()", __func__);
+ goto error;
+ }
+ %(refname)s_marshal(tmp, value);
+ if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
+ event_warnx("%%s: %(refname)s_unmarshal", __func__);
+ goto error;
+ }
+ msg->%(name)s_set = 1;
+ evbuffer_free(tmp);
+ return (0);
+ error:
+ if (tmp != NULL)
+ evbuffer_free(tmp);
+ if (msg->%(name)s_data != NULL) {
+ %(refname)s_free(msg->%(name)s_data);
+ msg->%(name)s_data = NULL;
+ }
+ return (-1);
+}""" % self.GetTranslation()
+ return code.split('\n')
+
+ def CodeComplete(self, structname):
+ if self.Optional():
+ code = [ 'if (%s->%s_set && %s_complete(%s->%s_data) == -1)' % (
+ structname, self.Name(),
+ self._refname, structname, self.Name()),
+ ' return (-1);' ]
+ else:
+ code = [ 'if (%s_complete(%s->%s_data) == -1)' % (
+ self._refname, structname, self.Name()),
+ ' return (-1);' ]
+
+ return code
+
+ def CodeUnmarshal(self, buf, tag_name, var_name):
+ code = ['%s->%s_data = %s_new();' % (
+ var_name, self._name, self._refname),
+ 'if (%s->%s_data == NULL)' % (var_name, self._name),
+ ' return (-1);',
+ 'if (evtag_unmarshal_%s(%s, %s, %s->%s_data) == -1) {' % (
+ self._refname, buf, tag_name, var_name, self._name),
+ ' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+ self._name ),
+ ' return (-1);',
+ '}'
+ ]
+ return code
+
+ def CodeMarshal(self, buf, tag_name, var_name):
+ code = ['evtag_marshal_%s(%s, %s, %s->%s_data);' % (
+ self._refname, buf, tag_name, var_name, self._name)]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+ ' %s_free(%s->%s_data);' % (
+ self._refname, structname, self.Name()),
+ ' %s->%s_data = NULL;' % (structname, self.Name()),
+ ' %s->%s_set = 0;' % (structname, self.Name()),
+ '}'
+ ]
+
+ return code
+
+ def CodeNew(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name)]
+ return code
+
+ def CodeFree(self, name):
+ code = ['if (%s->%s_data != NULL)' % (name, self._name),
+ ' %s_free(%s->%s_data); ' % (
+ self._refname, name, self._name)]
+
+ return code
+
+ def Declaration(self):
+ dcl = ['%s %s_data;' % (self._ctype, self._name)]
+
+ return dcl
+
+class EntryVarBytes(Entry):
+ def __init__(self, type, name, tag):
+ # Init base class
+ Entry.__init__(self, type, name, tag)
+
+ self._ctype = 'uint8_t *'
+
+ def GetDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, %s *, uint32_t *);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, const %s, uint32_t);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def CodeAssign(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_assign(struct %s *msg, '
+ 'const %s value, uint32_t len)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_data != NULL)' % name,
+ ' free (msg->%s_data);' % name,
+ ' msg->%s_data = malloc(len);' % name,
+ ' if (msg->%s_data == NULL)' % name,
+ ' return (-1);',
+ ' msg->%s_set = 1;' % name,
+ ' msg->%s_length = len;' % name,
+ ' memcpy(msg->%s_data, value, len);' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeGet(self):
+ name = self._name
+ code = [ 'int',
+ '%s_%s_get(struct %s *msg, %s *value, uint32_t *plen)' % (
+ self._struct.Name(), name,
+ self._struct.Name(), self._ctype),
+ '{',
+ ' if (msg->%s_set != 1)' % name,
+ ' return (-1);',
+ ' *value = msg->%s_data;' % name,
+ ' *plen = msg->%s_length;' % name,
+ ' return (0);',
+ '}' ]
+ return code
+
+ def CodeUnmarshal(self, buf, tag_name, var_name):
+ code = ['if (evtag_payload_length(%s, &%s->%s_length) == -1)' % (
+ buf, var_name, self._name),
+ ' return (-1);',
+ # We do not want DoS opportunities
+ 'if (%s->%s_length > EVBUFFER_LENGTH(%s))' % (
+ var_name, self._name, buf),
+ ' return (-1);',
+ 'if ((%s->%s_data = malloc(%s->%s_length)) == NULL)' % (
+ var_name, self._name, var_name, self._name),
+ ' return (-1);',
+ 'if (evtag_unmarshal_fixed(%s, %s, %s->%s_data, '
+ '%s->%s_length) == -1) {' % (
+ buf, tag_name, var_name, self._name, var_name, self._name),
+ ' event_warnx("%%s: failed to unmarshal %s", __func__);' % (
+ self._name ),
+ ' return (-1);',
+ '}'
+ ]
+ return code
+
+ def CodeMarshal(self, buf, tag_name, var_name):
+ code = ['evtag_marshal(%s, %s, %s->%s_data, %s->%s_length);' % (
+ buf, tag_name, var_name, self._name, var_name, self._name)]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+ ' free (%s->%s_data);' % (structname, self.Name()),
+ ' %s->%s_data = NULL;' % (structname, self.Name()),
+ ' %s->%s_length = 0;' % (structname, self.Name()),
+ ' %s->%s_set = 0;' % (structname, self.Name()),
+ '}'
+ ]
+
+ return code
+
+ def CodeNew(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name),
+ '%s->%s_length = 0;' % (name, self._name) ]
+ return code
+
+ def CodeFree(self, name):
+ code = ['if (%s->%s_data != NULL)' % (name, self._name),
+ ' free (%s->%s_data); ' % (name, self._name)]
+
+ return code
+
+ def Declaration(self):
+ dcl = ['uint8_t *%s_data;' % self._name,
+ 'uint32_t %s_length;' % self._name]
+
+ return dcl
+
+class EntryArray(Entry):
+ def __init__(self, entry):
+ # Init base class
+ Entry.__init__(self, entry._type, entry._name, entry._tag)
+
+ self._entry = entry
+ self._refname = entry._refname
+ self._ctype = 'struct %s *' % self._refname
+
+ def GetDeclaration(self, funcname):
+ """Allows direct access to elements of the array."""
+ translate = self.GetTranslation()
+ translate["funcname"] = funcname
+ code = [
+ 'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
+ translate ]
+ return code
+
+ def AssignDeclaration(self, funcname):
+ code = [ 'int %s(struct %s *, int, const %s);' % (
+ funcname, self._struct.Name(), self._ctype ) ]
+ return code
+
+ def AddDeclaration(self, funcname):
+ code = [ '%s %s(struct %s *);' % (
+ self._ctype, funcname, self._struct.Name() ) ]
+ return code
+
+ def CodeGet(self):
+ code = """int
+%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
+ %(ctype)s *value)
+{
+ if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
+ return (-1);
+ *value = msg->%(name)s_data[offset];
+ return (0);
+}""" % self.GetTranslation()
+
+ return code.split('\n')
+
+ def CodeAssign(self):
+ code = """int
+%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,
+ const %(ctype)s value)
+{
+ struct evbuffer *tmp = NULL;
+ if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)
+ return (-1);
+ %(refname)s_clear(msg->%(name)s_data[off]);
+ if ((tmp = evbuffer_new()) == NULL) {
+ event_warn("%%s: evbuffer_new()", __func__);
+ goto error;
+ }
+ %(refname)s_marshal(tmp, value);
+ if (%(refname)s_unmarshal(msg->%(name)s_data[off], tmp) == -1) {
+ event_warnx("%%s: %(refname)s_unmarshal", __func__);
+ goto error;
+ }
+ evbuffer_free(tmp);
+ return (0);
+error:
+ if (tmp != NULL)
+ evbuffer_free(tmp);
+ %(refname)s_clear(msg->%(name)s_data[off]);
+ return (-1);
+}""" % self.GetTranslation()
+
+ return code.split('\n')
+
+ def CodeAdd(self):
+ code = \
+"""%(ctype)s
+%(parent_name)s_%(name)s_add(struct %(parent_name)s *msg)
+{
+ if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {
+ int tobe_allocated = msg->%(name)s_num_allocated;
+ %(ctype)s* new_data = NULL;
+ tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;
+ new_data = (%(ctype)s*) realloc(msg->%(name)s_data,
+ tobe_allocated * sizeof(%(ctype)s));
+ if (new_data == NULL)
+ goto error;
+ msg->%(name)s_data = new_data;
+ msg->%(name)s_num_allocated = tobe_allocated;
+ }
+ msg->%(name)s_data[msg->%(name)s_length - 1] = %(refname)s_new();
+ if (msg->%(name)s_data[msg->%(name)s_length - 1] == NULL)
+ goto error;
+ msg->%(name)s_set = 1;
+ return (msg->%(name)s_data[msg->%(name)s_length - 1]);
+error:
+ --msg->%(name)s_length;
+ return (NULL);
+}
+ """ % self.GetTranslation()
+
+ return code.split('\n')
+
+ def CodeComplete(self, structname):
+ code = []
+ translate = self.GetTranslation()
+
+ if self.Optional():
+ code.append( 'if (%(structname)s->%(name)s_set)' % translate)
+
+ translate["structname"] = structname
+ tmp = """{
+ int i;
+ for (i = 0; i < %(structname)s->%(name)s_length; ++i) {
+ if (%(refname)s_complete(%(structname)s->%(name)s_data[i]) == -1)
+ return (-1);
+ }
+}""" % translate
+ code.extend(tmp.split('\n'))
+
+ return code
+
+ def CodeUnmarshal(self, buf, tag_name, var_name):
+ translate = self.GetTranslation()
+ translate["var_name"] = var_name
+ translate["buf"] = buf
+ translate["tag_name"] = tag_name
+ code = """if (%(parent_name)s_%(name)s_add(%(var_name)s) == NULL)
+ return (-1);
+if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag_name)s,
+ %(var_name)s->%(name)s_data[%(var_name)s->%(name)s_length - 1]) == -1) {
+ --%(var_name)s->%(name)s_length;
+ event_warnx("%%s: failed to unmarshal %(name)s", __func__);
+ return (-1);
+}""" % translate
+
+ return code.split('\n')
+
+ def CodeMarshal(self, buf, tag_name, var_name):
+ code = ['{',
+ ' int i;',
+ ' for (i = 0; i < %s->%s_length; ++i) {' % (
+ var_name, self._name),
+ ' evtag_marshal_%s(%s, %s, %s->%s_data[i]);' % (
+ self._refname, buf, tag_name, var_name, self._name),
+ ' }',
+ '}'
+ ]
+ return code
+
+ def CodeClear(self, structname):
+ code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
+ ' int i;',
+ ' for (i = 0; i < %s->%s_length; ++i) {' % (
+ structname, self.Name()),
+ ' %s_free(%s->%s_data[i]);' % (
+ self._refname, structname, self.Name()),
+ ' }',
+ ' free(%s->%s_data);' % (structname, self.Name()),
+ ' %s->%s_data = NULL;' % (structname, self.Name()),
+ ' %s->%s_set = 0;' % (structname, self.Name()),
+ ' %s->%s_length = 0;' % (structname, self.Name()),
+ ' %s->%s_num_allocated = 0;' % (structname, self.Name()),
+ '}'
+ ]
+
+ return code
+
+ def CodeNew(self, name):
+ code = ['%s->%s_data = NULL;' % (name, self._name),
+ '%s->%s_length = 0;' % (name, self._name),
+ '%s->%s_num_allocated = 0;' % (name, self._name)]
+ return code
+
+ def CodeFree(self, name):
+ code = ['if (%s->%s_data != NULL) {' % (name, self._name),
+ ' int i;',
+ ' for (i = 0; i < %s->%s_length; ++i) {' % (
+ name, self._name),
+ ' %s_free(%s->%s_data[i]); ' % (
+ self._refname, name, self._name),
+ ' %s->%s_data[i] = NULL;' % (name, self._name),
+ ' }',
+ ' free(%s->%s_data);' % (name, self._name),
+ ' %s->%s_data = NULL;' % (name, self._name),
+ ' %s->%s_length = 0;' % (name, self._name),
+ ' %s->%s_num_allocated = 0;' % (name, self._name),
+ '}'
+ ]
+
+ return code
+
+ def Declaration(self):
+ dcl = ['struct %s **%s_data;' % (self._refname, self._name),
+ 'int %s_length;' % self._name,
+ 'int %s_num_allocated;' % self._name ]
+
+ return dcl
+
+def NormalizeLine(line):
+ global white
+ global cppcomment
+
+ line = cppcomment.sub('', line)
+ line = line.strip()
+ line = white.sub(' ', line)
+
+ return line
+
+def ProcessOneEntry(newstruct, entry):
+ optional = 0
+ array = 0
+ entry_type = ''
+ name = ''
+ tag = ''
+ tag_set = None
+ separator = ''
+ fixed_length = ''
+
+ tokens = entry.split(' ')
+ while tokens:
+ token = tokens[0]
+ tokens = tokens[1:]
+
+ if not entry_type:
+ if not optional and token == 'optional':
+ optional = 1
+ continue
+
+ if not array and token == 'array':
+ array = 1
+ continue
+
+ if not entry_type:
+ entry_type = token
+ continue
+
+ if not name:
+ res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
+ if not res:
+ print >>sys.stderr, 'Cannot parse name: \"%s\" around %d' % (
+ entry, line_count)
+ sys.exit(1)
+ name = res.group(1)
+ fixed_length = res.group(2)
+ if fixed_length:
+ fixed_length = fixed_length[1:-1]
+ continue
+
+ if not separator:
+ separator = token
+ if separator != '=':
+ print >>sys.stderr, 'Expected "=" after name \"%s\" got %s' % (
+ name, token)
+ sys.exit(1)
+ continue
+
+ if not tag_set:
+ tag_set = 1
+ if not re.match(r'^(0x)?[0-9]+$', token):
+ print >>sys.stderr, 'Expected tag number: \"%s\"' % entry
+ sys.exit(1)
+ tag = int(token, 0)
+ continue
+
+ print >>sys.stderr, 'Cannot parse \"%s\"' % entry
+ sys.exit(1)
+
+ if not tag_set:
+ print >>sys.stderr, 'Need tag number: \"%s\"' % entry
+ sys.exit(1)
+
+ # Create the right entry
+ if entry_type == 'bytes':
+ if fixed_length:
+ newentry = EntryBytes(entry_type, name, tag, fixed_length)
+ else:
+ newentry = EntryVarBytes(entry_type, name, tag)
+ elif entry_type == 'int' and not fixed_length:
+ newentry = EntryInt(entry_type, name, tag)
+ elif entry_type == 'string' and not fixed_length:
+ newentry = EntryString(entry_type, name, tag)
+ else:
+ res = re.match(r'^struct\[(%s)\]$' % _STRUCT_RE,
+ entry_type, re.IGNORECASE)
+ if res:
+ # References another struct defined in our file
+ newentry = EntryStruct(entry_type, name, tag, res.group(1))
+ else:
+ print >>sys.stderr, 'Bad type: "%s" in "%s"' % (entry_type, entry)
+ sys.exit(1)
+
+ structs = []
+
+ if optional:
+ newentry.MakeOptional()
+ if array:
+ newentry.MakeArray()
+
+ newentry.SetStruct(newstruct)
+ newentry.SetLineCount(line_count)
+ newentry.Verify()
+
+ if array:
+ # We need to encapsulate this entry into a struct
+ newname = newentry.Name()+ '_array'
+
+ # Now borgify the new entry.
+ newentry = EntryArray(newentry)
+ newentry.SetStruct(newstruct)
+ newentry.SetLineCount(line_count)
+ newentry.MakeArray()
+
+ newstruct.AddEntry(newentry)
+
+ return structs
+
+def ProcessStruct(data):
+ tokens = data.split(' ')
+
+ # First three tokens are: 'struct' 'name' '{'
+ newstruct = Struct(tokens[1])
+
+ inside = ' '.join(tokens[3:-1])
+
+ tokens = inside.split(';')
+
+ structs = []
+
+ for entry in tokens:
+ entry = NormalizeLine(entry)
+ if not entry:
+ continue
+
+ # It's possible that new structs get defined in here
+ structs.extend(ProcessOneEntry(newstruct, entry))
+
+ structs.append(newstruct)
+ return structs
+
+def GetNextStruct(file):
+ global line_count
+ global cppdirect
+
+ got_struct = 0
+
+ processed_lines = []
+
+ have_c_comment = 0
+ data = ''
+ while 1:
+ line = file.readline()
+ if not line:
+ break
+
+ line_count += 1
+ line = line[:-1]
+
+ if not have_c_comment and re.search(r'/\*', line):
+ if re.search(r'/\*.*\*/', line):
+ line = re.sub(r'/\*.*\*/', '', line)
+ else:
+ line = re.sub(r'/\*.*$', '', line)
+ have_c_comment = 1
+
+ if have_c_comment:
+ if not re.search(r'\*/', line):
+ continue
+ have_c_comment = 0
+ line = re.sub(r'^.*\*/', '', line)
+
+ line = NormalizeLine(line)
+
+ if not line:
+ continue
+
+ if not got_struct:
+ if re.match(r'#include ["<].*[>"]', line):
+ cppdirect.append(line)
+ continue
+
+ if re.match(r'^#(if( |def)|endif)', line):
+ cppdirect.append(line)
+ continue
+
+ if re.match(r'^#define', line):
+ headerdirect.append(line)
+ continue
+
+ if not re.match(r'^struct %s {$' % _STRUCT_RE,
+ line, re.IGNORECASE):
+ print >>sys.stderr, 'Missing struct on line %d: %s' % (
+ line_count, line)
+ sys.exit(1)
+ else:
+ got_struct = 1
+ data += line
+ continue
+
+ # We are inside the struct
+ tokens = line.split('}')
+ if len(tokens) == 1:
+ data += ' ' + line
+ continue
+
+ if len(tokens[1]):
+ print >>sys.stderr, 'Trailing garbage after struct on line %d' % (
+ line_count )
+ sys.exit(1)
+
+ # We found the end of the struct
+ data += ' %s}' % tokens[0]
+ break
+
+ # Remove any comments, that might be in there
+ data = re.sub(r'/\*.*\*/', '', data)
+
+ return data
+
+
+def Parse(file):
+ """
+ Parses the input file and returns C code and corresponding header file.
+ """
+
+ entities = []
+
+ while 1:
+ # Just gets the whole struct nicely formatted
+ data = GetNextStruct(file)
+
+ if not data:
+ break
+
+ entities.extend(ProcessStruct(data))
+
+ return entities
+
+def GuardName(name):
+ name = '_'.join(name.split('.'))
+ name = '_'.join(name.split('/'))
+ guard = '_'+name.upper()+'_'
+
+ return guard
+
+def HeaderPreamble(name):
+ guard = GuardName(name)
+ pre = (
+ '/*\n'
+ ' * Automatically generated from %s\n'
+ ' */\n\n'
+ '#ifndef %s\n'
+ '#define %s\n\n' ) % (
+ name, guard, guard)
+
+ # insert stdint.h - let's hope everyone has it
+ pre += (
+ '#include <event-config.h>\n'
+ '#ifdef _EVENT_HAVE_STDINT_H\n'
+ '#include <stdint.h>\n'
+ '#endif\n' )
+
+ for statement in headerdirect:
+ pre += '%s\n' % statement
+ if headerdirect:
+ pre += '\n'
+
+ pre += (
+ '#define EVTAG_HAS(msg, member) ((msg)->member##_set == 1)\n'
+ '#ifdef __GNUC__\n'
+ '#define EVTAG_ASSIGN(msg, member, args...) '
+ '(*(msg)->base->member##_assign)(msg, ## args)\n'
+ '#define EVTAG_GET(msg, member, args...) '
+ '(*(msg)->base->member##_get)(msg, ## args)\n'
+ '#else\n'
+ '#define EVTAG_ASSIGN(msg, member, ...) '
+ '(*(msg)->base->member##_assign)(msg, ## __VA_ARGS__)\n'
+ '#define EVTAG_GET(msg, member, ...) '
+ '(*(msg)->base->member##_get)(msg, ## __VA_ARGS__)\n'
+ '#endif\n'
+ '#define EVTAG_ADD(msg, member) (*(msg)->base->member##_add)(msg)\n'
+ '#define EVTAG_LEN(msg, member) ((msg)->member##_length)\n'
+ )
+
+ return pre
+
+
+def HeaderPostamble(name):
+ guard = GuardName(name)
+ return '#endif /* %s */' % guard
+
+def BodyPreamble(name):
+ global _NAME
+ global _VERSION
+
+ header_file = '.'.join(name.split('.')[:-1]) + '.gen.h'
+
+ pre = ( '/*\n'
+ ' * Automatically generated from %s\n'
+ ' * by %s/%s. DO NOT EDIT THIS FILE.\n'
+ ' */\n\n' ) % (name, _NAME, _VERSION)
+ pre += ( '#include <sys/types.h>\n'
+ '#include <sys/time.h>\n'
+ '#include <stdlib.h>\n'
+ '#include <string.h>\n'
+ '#include <assert.h>\n'
+ '#include <event.h>\n\n' )
+
+ for statement in cppdirect:
+ pre += '%s\n' % statement
+
+ pre += '\n#include "%s"\n\n' % header_file
+
+ pre += 'void event_err(int eval, const char *fmt, ...);\n'
+ pre += 'void event_warn(const char *fmt, ...);\n'
+ pre += 'void event_errx(int eval, const char *fmt, ...);\n'
+ pre += 'void event_warnx(const char *fmt, ...);\n\n'
+
+ return pre
+
+def main(argv):
+ if len(argv) < 2 or not argv[1]:
+ print >>sys.stderr, 'Need RPC description file as first argument.'
+ sys.exit(1)
+
+ filename = argv[1]
+
+ ext = filename.split('.')[-1]
+ if ext != 'rpc':
+ print >>sys.stderr, 'Unrecognized file extension: %s' % ext
+ sys.exit(1)
+
+ print >>sys.stderr, 'Reading \"%s\"' % filename
+
+ fp = open(filename, 'r')
+ entities = Parse(fp)
+ fp.close()
+
+ header_file = '.'.join(filename.split('.')[:-1]) + '.gen.h'
+ impl_file = '.'.join(filename.split('.')[:-1]) + '.gen.c'
+
+ print >>sys.stderr, '... creating "%s"' % header_file
+ header_fp = open(header_file, 'w')
+ print >>header_fp, HeaderPreamble(filename)
+
+ # Create forward declarations: allows other structs to reference
+ # each other
+ for entry in entities:
+ entry.PrintForwardDeclaration(header_fp)
+ print >>header_fp, ''
+
+ for entry in entities:
+ entry.PrintTags(header_fp)
+ entry.PrintDeclaration(header_fp)
+ print >>header_fp, HeaderPostamble(filename)
+ header_fp.close()
+
+ print >>sys.stderr, '... creating "%s"' % impl_file
+ impl_fp = open(impl_file, 'w')
+ print >>impl_fp, BodyPreamble(filename)
+ for entry in entities:
+ entry.PrintCode(impl_fp)
+ impl_fp.close()
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/libevent/event_tagging.c b/libevent/event_tagging.c
new file mode 100644
index 00000000000..d436e3fd65b
--- /dev/null
+++ b/libevent/event_tagging.c
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2003, 2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#else
+#include <sys/ioctl.h>
+#endif
+
+#include <sys/queue.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef WIN32
+#include <syslog.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "event.h"
+#include "evutil.h"
+#include "log.h"
+
+int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
+int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag);
+int evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf);
+
+static struct evbuffer *_buf; /* not thread safe */
+
+void
+evtag_init(void)
+{
+ if (_buf != NULL)
+ return;
+
+ if ((_buf = evbuffer_new()) == NULL)
+ event_err(1, "%s: malloc", __func__);
+}
+
+/*
+ * We encode integer's by nibbles; the first nibble contains the number
+ * of significant nibbles - 1; this allows us to encode up to 64-bit
+ * integers. This function is byte-order independent.
+ */
+
+void
+encode_int(struct evbuffer *evbuf, ev_uint32_t number)
+{
+ int off = 1, nibbles = 0;
+ ev_uint8_t data[5];
+
+ memset(data, 0, sizeof(ev_uint32_t)+1);
+ while (number) {
+ if (off & 0x1)
+ data[off/2] = (data[off/2] & 0xf0) | (number & 0x0f);
+ else
+ data[off/2] = (data[off/2] & 0x0f) |
+ ((number & 0x0f) << 4);
+ number >>= 4;
+ off++;
+ }
+
+ if (off > 2)
+ nibbles = off - 2;
+
+ /* Off - 1 is the number of encoded nibbles */
+ data[0] = (data[0] & 0x0f) | ((nibbles & 0x0f) << 4);
+
+ evbuffer_add(evbuf, data, (off + 1) / 2);
+}
+
+/*
+ * Support variable length encoding of tags; we use the high bit in each
+ * octet as a continuation signal.
+ */
+
+int
+evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t tag)
+{
+ int bytes = 0;
+ ev_uint8_t data[5];
+
+ memset(data, 0, sizeof(data));
+ do {
+ ev_uint8_t lower = tag & 0x7f;
+ tag >>= 7;
+
+ if (tag)
+ lower |= 0x80;
+
+ data[bytes++] = lower;
+ } while (tag);
+
+ if (evbuf != NULL)
+ evbuffer_add(evbuf, data, bytes);
+
+ return (bytes);
+}
+
+static int
+decode_tag_internal(ev_uint32_t *ptag, struct evbuffer *evbuf, int dodrain)
+{
+ ev_uint32_t number = 0;
+ ev_uint8_t *data = EVBUFFER_DATA(evbuf);
+ int len = EVBUFFER_LENGTH(evbuf);
+ int count = 0, shift = 0, done = 0;
+
+ while (count++ < len) {
+ ev_uint8_t lower = *data++;
+ number |= (lower & 0x7f) << shift;
+ shift += 7;
+
+ if (!(lower & 0x80)) {
+ done = 1;
+ break;
+ }
+ }
+
+ if (!done)
+ return (-1);
+
+ if (dodrain)
+ evbuffer_drain(evbuf, count);
+
+ if (ptag != NULL)
+ *ptag = number;
+
+ return (count);
+}
+
+int
+evtag_decode_tag(ev_uint32_t *ptag, struct evbuffer *evbuf)
+{
+ return (decode_tag_internal(ptag, evbuf, 1 /* dodrain */));
+}
+
+/*
+ * Marshal a data type, the general format is as follows:
+ *
+ * tag number: one byte; length: var bytes; payload: var bytes
+ */
+
+void
+evtag_marshal(struct evbuffer *evbuf, ev_uint32_t tag,
+ const void *data, ev_uint32_t len)
+{
+ evtag_encode_tag(evbuf, tag);
+ encode_int(evbuf, len);
+ evbuffer_add(evbuf, (void *)data, len);
+}
+
+/* Marshaling for integers */
+void
+evtag_marshal_int(struct evbuffer *evbuf, ev_uint32_t tag, ev_uint32_t integer)
+{
+ evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+ encode_int(_buf, integer);
+
+ evtag_encode_tag(evbuf, tag);
+ encode_int(evbuf, EVBUFFER_LENGTH(_buf));
+ evbuffer_add_buffer(evbuf, _buf);
+}
+
+void
+evtag_marshal_string(struct evbuffer *buf, ev_uint32_t tag, const char *string)
+{
+ evtag_marshal(buf, tag, string, strlen(string));
+}
+
+void
+evtag_marshal_timeval(struct evbuffer *evbuf, ev_uint32_t tag, struct timeval *tv)
+{
+ evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+
+ encode_int(_buf, tv->tv_sec);
+ encode_int(_buf, tv->tv_usec);
+
+ evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf),
+ EVBUFFER_LENGTH(_buf));
+}
+
+static int
+decode_int_internal(ev_uint32_t *pnumber, struct evbuffer *evbuf, int dodrain)
+{
+ ev_uint32_t number = 0;
+ ev_uint8_t *data = EVBUFFER_DATA(evbuf);
+ int len = EVBUFFER_LENGTH(evbuf);
+ int nibbles = 0;
+
+ if (!len)
+ return (-1);
+
+ nibbles = ((data[0] & 0xf0) >> 4) + 1;
+ if (nibbles > 8 || (nibbles >> 1) + 1 > len)
+ return (-1);
+ len = (nibbles >> 1) + 1;
+
+ while (nibbles > 0) {
+ number <<= 4;
+ if (nibbles & 0x1)
+ number |= data[nibbles >> 1] & 0x0f;
+ else
+ number |= (data[nibbles >> 1] & 0xf0) >> 4;
+ nibbles--;
+ }
+
+ if (dodrain)
+ evbuffer_drain(evbuf, len);
+
+ *pnumber = number;
+
+ return (len);
+}
+
+int
+evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf)
+{
+ return (decode_int_internal(pnumber, evbuf, 1) == -1 ? -1 : 0);
+}
+
+int
+evtag_peek(struct evbuffer *evbuf, ev_uint32_t *ptag)
+{
+ return (decode_tag_internal(ptag, evbuf, 0 /* dodrain */));
+}
+
+int
+evtag_peek_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+ struct evbuffer tmp;
+ int res, len;
+
+ len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+ if (len == -1)
+ return (-1);
+
+ tmp = *evbuf;
+ tmp.buffer += len;
+ tmp.off -= len;
+
+ res = decode_int_internal(plength, &tmp, 0);
+ if (res == -1)
+ return (-1);
+
+ *plength += res + len;
+
+ return (0);
+}
+
+int
+evtag_payload_length(struct evbuffer *evbuf, ev_uint32_t *plength)
+{
+ struct evbuffer tmp;
+ int res, len;
+
+ len = decode_tag_internal(NULL, evbuf, 0 /* dodrain */);
+ if (len == -1)
+ return (-1);
+
+ tmp = *evbuf;
+ tmp.buffer += len;
+ tmp.off -= len;
+
+ res = decode_int_internal(plength, &tmp, 0);
+ if (res == -1)
+ return (-1);
+
+ return (0);
+}
+
+int
+evtag_consume(struct evbuffer *evbuf)
+{
+ ev_uint32_t len;
+ if (decode_tag_internal(NULL, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (evtag_decode_int(&len, evbuf) == -1)
+ return (-1);
+ evbuffer_drain(evbuf, len);
+
+ return (0);
+}
+
+/* Reads the data type from an event buffer */
+
+int
+evtag_unmarshal(struct evbuffer *src, ev_uint32_t *ptag, struct evbuffer *dst)
+{
+ ev_uint32_t len;
+ ev_uint32_t integer;
+
+ if (decode_tag_internal(ptag, src, 1 /* dodrain */) == -1)
+ return (-1);
+ if (evtag_decode_int(&integer, src) == -1)
+ return (-1);
+ len = integer;
+
+ if (EVBUFFER_LENGTH(src) < len)
+ return (-1);
+
+ if (evbuffer_add(dst, EVBUFFER_DATA(src), len) == -1)
+ return (-1);
+
+ evbuffer_drain(src, len);
+
+ return (len);
+}
+
+/* Marshaling for integers */
+
+int
+evtag_unmarshal_int(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ ev_uint32_t *pinteger)
+{
+ ev_uint32_t tag;
+ ev_uint32_t len;
+ ev_uint32_t integer;
+
+ if (decode_tag_internal(&tag, evbuf, 1 /* dodrain */) == -1)
+ return (-1);
+ if (need_tag != tag)
+ return (-1);
+ if (evtag_decode_int(&integer, evbuf) == -1)
+ return (-1);
+ len = integer;
+
+ if (EVBUFFER_LENGTH(evbuf) < len)
+ return (-1);
+
+ evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+ if (evbuffer_add(_buf, EVBUFFER_DATA(evbuf), len) == -1)
+ return (-1);
+
+ evbuffer_drain(evbuf, len);
+
+ return (evtag_decode_int(pinteger, _buf));
+}
+
+/* Unmarshal a fixed length tag */
+
+int
+evtag_unmarshal_fixed(struct evbuffer *src, ev_uint32_t need_tag, void *data,
+ size_t len)
+{
+ ev_uint32_t tag;
+
+ /* Initialize this event buffer so that we can read into it */
+ evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+
+ /* Now unmarshal a tag and check that it matches the tag we want */
+ if (evtag_unmarshal(src, &tag, _buf) == -1 || tag != need_tag)
+ return (-1);
+
+ if (EVBUFFER_LENGTH(_buf) != len)
+ return (-1);
+
+ memcpy(data, EVBUFFER_DATA(_buf), len);
+ return (0);
+}
+
+int
+evtag_unmarshal_string(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ char **pstring)
+{
+ ev_uint32_t tag;
+
+ evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+
+ if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
+ return (-1);
+
+ *pstring = calloc(EVBUFFER_LENGTH(_buf) + 1, 1);
+ if (*pstring == NULL)
+ event_err(1, "%s: calloc", __func__);
+ evbuffer_remove(_buf, *pstring, EVBUFFER_LENGTH(_buf));
+
+ return (0);
+}
+
+int
+evtag_unmarshal_timeval(struct evbuffer *evbuf, ev_uint32_t need_tag,
+ struct timeval *ptv)
+{
+ ev_uint32_t tag;
+ ev_uint32_t integer;
+
+ evbuffer_drain(_buf, EVBUFFER_LENGTH(_buf));
+ if (evtag_unmarshal(evbuf, &tag, _buf) == -1 || tag != need_tag)
+ return (-1);
+
+ if (evtag_decode_int(&integer, _buf) == -1)
+ return (-1);
+ ptv->tv_sec = integer;
+ if (evtag_decode_int(&integer, _buf) == -1)
+ return (-1);
+ ptv->tv_usec = integer;
+
+ return (0);
+}
diff --git a/libevent/evhttp.h b/libevent/evhttp.h
new file mode 100644
index 00000000000..99d16a2f47a
--- /dev/null
+++ b/libevent/evhttp.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVHTTP_H_
+#define _EVHTTP_H_
+
+#include <event.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+/** @file evhttp.h
+ *
+ * Basic support for HTTP serving.
+ *
+ * As libevent is a library for dealing with event notification and most
+ * interesting applications are networked today, I have often found the
+ * need to write HTTP code. The following prototypes and definitions provide
+ * an application with a minimal interface for making HTTP requests and for
+ * creating a very simple HTTP server.
+ */
+
+/* Response codes */
+#define HTTP_OK 200
+#define HTTP_NOCONTENT 204
+#define HTTP_MOVEPERM 301
+#define HTTP_MOVETEMP 302
+#define HTTP_NOTMODIFIED 304
+#define HTTP_BADREQUEST 400
+#define HTTP_NOTFOUND 404
+#define HTTP_SERVUNAVAIL 503
+
+struct evhttp;
+struct evhttp_request;
+struct evkeyvalq;
+
+/** Create a new HTTP server
+ *
+ * @param base (optional) the event base to receive the HTTP events
+ * @return a pointer to a newly initialized evhttp server structure
+ */
+struct evhttp *evhttp_new(struct event_base *base);
+
+/**
+ * Binds an HTTP server on the specified address and port.
+ *
+ * Can be called multiple times to bind the same http server
+ * to multiple different ports.
+ *
+ * @param http a pointer to an evhttp object
+ * @param address a string containing the IP address to listen(2) on
+ * @param port the port number to listen on
+ * @return a newly allocated evhttp struct
+ * @see evhttp_free()
+ */
+int evhttp_bind_socket(struct evhttp *http, const char *address, u_short port);
+
+/**
+ * Makes an HTTP server accept connections on the specified socket
+ *
+ * This may be useful to create a socket and then fork multiple instances
+ * of an http server, or when a socket has been communicated via file
+ * descriptor passing in situations where an http servers does not have
+ * permissions to bind to a low-numbered port.
+ *
+ * Can be called multiple times to have the http server listen to
+ * multiple different sockets.
+ *
+ * @param http a pointer to an evhttp object
+ * @param fd a socket fd that is ready for accepting connections
+ * @return 0 on success, -1 on failure.
+ * @see evhttp_free(), evhttp_bind_socket()
+ */
+int evhttp_accept_socket(struct evhttp *http, int fd);
+
+/**
+ * Free the previously created HTTP server.
+ *
+ * Works only if no requests are currently being served.
+ *
+ * @param http the evhttp server object to be freed
+ * @see evhttp_start()
+ */
+void evhttp_free(struct evhttp* http);
+
+/** Set a callback for a specified URI */
+void evhttp_set_cb(struct evhttp *, const char *,
+ void (*)(struct evhttp_request *, void *), void *);
+
+/** Removes the callback for a specified URI */
+int evhttp_del_cb(struct evhttp *, const char *);
+
+/** Set a callback for all requests that are not caught by specific callbacks
+ */
+void evhttp_set_gencb(struct evhttp *,
+ void (*)(struct evhttp_request *, void *), void *);
+
+/**
+ * Set the timeout for an HTTP request.
+ *
+ * @param http an evhttp object
+ * @param timeout_in_secs the timeout, in seconds
+ */
+void evhttp_set_timeout(struct evhttp *, int timeout_in_secs);
+
+/* Request/Response functionality */
+
+/**
+ * Send an HTML error message to the client.
+ *
+ * @param req a request object
+ * @param error the HTTP error code
+ * @param reason a brief explanation of the error
+ */
+void evhttp_send_error(struct evhttp_request *req, int error,
+ const char *reason);
+
+/**
+ * Send an HTML reply to the client.
+ *
+ * @param req a request object
+ * @param code the HTTP response code to send
+ * @param reason a brief message to send with the response code
+ * @param databuf the body of the response
+ */
+void evhttp_send_reply(struct evhttp_request *req, int code,
+ const char *reason, struct evbuffer *databuf);
+
+/* Low-level response interface, for streaming/chunked replies */
+void evhttp_send_reply_start(struct evhttp_request *, int, const char *);
+void evhttp_send_reply_chunk(struct evhttp_request *, struct evbuffer *);
+void evhttp_send_reply_end(struct evhttp_request *);
+
+/**
+ * Start an HTTP server on the specified address and port
+ *
+ * DEPRECATED: it does not allow an event base to be specified
+ *
+ * @param address the address to which the HTTP server should be bound
+ * @param port the port number on which the HTTP server should listen
+ * @return an struct evhttp object
+ */
+struct evhttp *evhttp_start(const char *address, u_short port);
+
+/*
+ * Interfaces for making requests
+ */
+enum evhttp_cmd_type { EVHTTP_REQ_GET, EVHTTP_REQ_POST, EVHTTP_REQ_HEAD };
+
+enum evhttp_request_kind { EVHTTP_REQUEST, EVHTTP_RESPONSE };
+
+/**
+ * the request structure that a server receives.
+ * WARNING: expect this structure to change. I will try to provide
+ * reasonable accessors.
+ */
+struct evhttp_request {
+#if defined(TAILQ_ENTRY)
+ TAILQ_ENTRY(evhttp_request) next;
+#else
+struct {
+ struct evhttp_request *tqe_next;
+ struct evhttp_request **tqe_prev;
+} next;
+#endif
+
+ /* the connection object that this request belongs to */
+ struct evhttp_connection *evcon;
+ int flags;
+#define EVHTTP_REQ_OWN_CONNECTION 0x0001
+#define EVHTTP_PROXY_REQUEST 0x0002
+
+ struct evkeyvalq *input_headers;
+ struct evkeyvalq *output_headers;
+
+ /* address of the remote host and the port connection came from */
+ char *remote_host;
+ u_short remote_port;
+
+ enum evhttp_request_kind kind;
+ enum evhttp_cmd_type type;
+
+ char *uri; /* uri after HTTP request was parsed */
+
+ char major; /* HTTP Major number */
+ char minor; /* HTTP Minor number */
+
+ int response_code; /* HTTP Response code */
+ char *response_code_line; /* Readable response */
+
+ struct evbuffer *input_buffer; /* read data */
+ ev_int64_t ntoread;
+ int chunked;
+
+ struct evbuffer *output_buffer; /* outgoing post or data */
+
+ /* Callback */
+ void (*cb)(struct evhttp_request *, void *);
+ void *cb_arg;
+
+ /*
+ * Chunked data callback - call for each completed chunk if
+ * specified. If not specified, all the data is delivered via
+ * the regular callback.
+ */
+ void (*chunk_cb)(struct evhttp_request *, void *);
+};
+
+/**
+ * Creates a new request object that needs to be filled in with the request
+ * parameters. The callback is executed when the request completed or an
+ * error occurred.
+ */
+struct evhttp_request *evhttp_request_new(
+ void (*cb)(struct evhttp_request *, void *), void *arg);
+
+/** enable delivery of chunks to requestor */
+void evhttp_request_set_chunked_cb(struct evhttp_request *,
+ void (*cb)(struct evhttp_request *, void *));
+
+/** Frees the request object and removes associated events. */
+void evhttp_request_free(struct evhttp_request *req);
+
+/**
+ * A connection object that can be used to for making HTTP requests. The
+ * connection object tries to establish the connection when it is given an
+ * http request object.
+ */
+struct evhttp_connection *evhttp_connection_new(
+ const char *address, unsigned short port);
+
+/** Frees an http connection */
+void evhttp_connection_free(struct evhttp_connection *evcon);
+
+/** sets the ip address from which http connections are made */
+void evhttp_connection_set_local_address(struct evhttp_connection *evcon,
+ const char *address);
+
+/** sets the local port from which http connections are made */
+void evhttp_connection_set_local_port(struct evhttp_connection *evcon,
+ unsigned short port);
+
+/** Sets the timeout for events related to this connection */
+void evhttp_connection_set_timeout(struct evhttp_connection *evcon,
+ int timeout_in_secs);
+
+/** Sets the retry limit for this connection - -1 repeats indefnitely */
+void evhttp_connection_set_retries(struct evhttp_connection *evcon,
+ int retry_max);
+
+/** Set a callback for connection close. */
+void evhttp_connection_set_closecb(struct evhttp_connection *evcon,
+ void (*)(struct evhttp_connection *, void *), void *);
+
+/**
+ * Associates an event base with the connection - can only be called
+ * on a freshly created connection object that has not been used yet.
+ */
+void evhttp_connection_set_base(struct evhttp_connection *evcon,
+ struct event_base *base);
+
+/** Get the remote address and port associated with this connection. */
+void evhttp_connection_get_peer(struct evhttp_connection *evcon,
+ char **address, u_short *port);
+
+/** The connection gets ownership of the request */
+int evhttp_make_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req,
+ enum evhttp_cmd_type type, const char *uri);
+
+const char *evhttp_request_uri(struct evhttp_request *req);
+
+/* Interfaces for dealing with HTTP headers */
+
+const char *evhttp_find_header(const struct evkeyvalq *, const char *);
+int evhttp_remove_header(struct evkeyvalq *, const char *);
+int evhttp_add_header(struct evkeyvalq *, const char *, const char *);
+void evhttp_clear_headers(struct evkeyvalq *);
+
+/* Miscellaneous utility functions */
+
+
+/**
+ Helper function to encode a URI.
+
+ The returned string must be freed by the caller.
+
+ @param uri an unencoded URI
+ @return a newly allocated URI-encoded string
+ */
+char *evhttp_encode_uri(const char *uri);
+
+
+/**
+ Helper function to decode a URI.
+
+ The returned string must be freed by the caller.
+
+ @param uri an encoded URI
+ @return a newly allocated unencoded URI
+ */
+char *evhttp_decode_uri(const char *uri);
+
+
+/**
+ * Helper function to parse out arguments in a query.
+ *
+ * Parsing a uri like
+ *
+ * http://foo.com/?q=test&s=some+thing
+ *
+ * will result in two entries in the key value queue.
+
+ * The first entry is: key="q", value="test"
+ * The second entry is: key="s", value="some thing"
+ *
+ * @param uri the request URI
+ * @param headers the head of the evkeyval queue
+ */
+void evhttp_parse_query(const char *uri, struct evkeyvalq *headers);
+
+
+/**
+ * Escape HTML character entities in a string.
+ *
+ * Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
+ * &#039; and &amp; correspondingly.
+ *
+ * The returned string needs to be freed by the caller.
+ *
+ * @param html an unescaped HTML string
+ * @return an escaped HTML string
+ */
+char *evhttp_htmlescape(const char *html);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVHTTP_H_ */
diff --git a/libevent/evport.c b/libevent/evport.c
new file mode 100644
index 00000000000..dae6900cc10
--- /dev/null
+++ b/libevent/evport.c
@@ -0,0 +1,513 @@
+/*
+ * Submitted by David Pacheco (dp.spambait@gmail.com)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Copyright (c) 2007 Sun Microsystems. All rights reserved.
+ * Use is subject to license terms.
+ */
+
+/*
+ * evport.c: event backend using Solaris 10 event ports. See port_create(3C).
+ * This implementation is loosely modeled after the one used for select(2) (in
+ * select.c).
+ *
+ * The outstanding events are tracked in a data structure called evport_data.
+ * Each entry in the ed_fds array corresponds to a file descriptor, and contains
+ * pointers to the read and write events that correspond to that fd. (That is,
+ * when the file is readable, the "read" event should handle it, etc.)
+ *
+ * evport_add and evport_del update this data structure. evport_dispatch uses it
+ * to determine where to callback when an event occurs (which it gets from
+ * port_getn).
+ *
+ * Helper functions are used: grow() grows the file descriptor array as
+ * necessary when large fd's come in. reassociate() takes care of maintaining
+ * the proper file-descriptor/event-port associations.
+ *
+ * As in the select(2) implementation, signals are handled by evsignal.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/time.h>
+#include <assert.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <poll.h>
+#include <port.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#ifdef CHECK_INVARIANTS
+#include <assert.h>
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "log.h"
+#include "evsignal.h"
+
+
+/*
+ * Default value for ed_nevents, which is the maximum file descriptor number we
+ * can handle. If an event comes in for a file descriptor F > nevents, we will
+ * grow the array of file descriptors, doubling its size.
+ */
+#define DEFAULT_NFDS 16
+
+
+/*
+ * EVENTS_PER_GETN is the maximum number of events to retrieve from port_getn on
+ * any particular call. You can speed things up by increasing this, but it will
+ * (obviously) require more memory.
+ */
+#define EVENTS_PER_GETN 8
+
+/*
+ * Per-file-descriptor information about what events we're subscribed to. These
+ * fields are NULL if no event is subscribed to either of them.
+ */
+
+struct fd_info {
+ struct event* fdi_revt; /* the event responsible for the "read" */
+ struct event* fdi_wevt; /* the event responsible for the "write" */
+};
+
+#define FDI_HAS_READ(fdi) ((fdi)->fdi_revt != NULL)
+#define FDI_HAS_WRITE(fdi) ((fdi)->fdi_wevt != NULL)
+#define FDI_HAS_EVENTS(fdi) (FDI_HAS_READ(fdi) || FDI_HAS_WRITE(fdi))
+#define FDI_TO_SYSEVENTS(fdi) (FDI_HAS_READ(fdi) ? POLLIN : 0) | \
+ (FDI_HAS_WRITE(fdi) ? POLLOUT : 0)
+
+struct evport_data {
+ int ed_port; /* event port for system events */
+ int ed_nevents; /* number of allocated fdi's */
+ struct fd_info *ed_fds; /* allocated fdi table */
+ /* fdi's that we need to reassoc */
+ int ed_pending[EVENTS_PER_GETN]; /* fd's with pending events */
+};
+
+static void* evport_init (struct event_base *);
+static int evport_add (void *, struct event *);
+static int evport_del (void *, struct event *);
+static int evport_dispatch (struct event_base *, void *, struct timeval *);
+static void evport_dealloc (struct event_base *, void *);
+
+const struct eventop evportops = {
+ "evport",
+ evport_init,
+ evport_add,
+ evport_del,
+ evport_dispatch,
+ evport_dealloc,
+ 1 /* need reinit */
+};
+
+/*
+ * Initialize the event port implementation.
+ */
+
+static void*
+evport_init(struct event_base *base)
+{
+ struct evport_data *evpd;
+ int i;
+ /*
+ * Disable event ports when this environment variable is set
+ */
+ if (getenv("EVENT_NOEVPORT"))
+ return (NULL);
+
+ if (!(evpd = calloc(1, sizeof(struct evport_data))))
+ return (NULL);
+
+ if ((evpd->ed_port = port_create()) == -1) {
+ free(evpd);
+ return (NULL);
+ }
+
+ /*
+ * Initialize file descriptor structure
+ */
+ evpd->ed_fds = calloc(DEFAULT_NFDS, sizeof(struct fd_info));
+ if (evpd->ed_fds == NULL) {
+ close(evpd->ed_port);
+ free(evpd);
+ return (NULL);
+ }
+ evpd->ed_nevents = DEFAULT_NFDS;
+ for (i = 0; i < EVENTS_PER_GETN; i++)
+ evpd->ed_pending[i] = -1;
+
+ evsignal_init(base);
+
+ return (evpd);
+}
+
+#ifdef CHECK_INVARIANTS
+/*
+ * Checks some basic properties about the evport_data structure. Because it
+ * checks all file descriptors, this function can be expensive when the maximum
+ * file descriptor ever used is rather large.
+ */
+
+static void
+check_evportop(struct evport_data *evpd)
+{
+ assert(evpd);
+ assert(evpd->ed_nevents > 0);
+ assert(evpd->ed_port > 0);
+ assert(evpd->ed_fds > 0);
+
+ /*
+ * Verify the integrity of the fd_info struct as well as the events to
+ * which it points (at least, that they're valid references and correct
+ * for their position in the structure).
+ */
+ int i;
+ for (i = 0; i < evpd->ed_nevents; ++i) {
+ struct event *ev;
+ struct fd_info *fdi;
+
+ fdi = &evpd->ed_fds[i];
+ if ((ev = fdi->fdi_revt) != NULL) {
+ assert(ev->ev_fd == i);
+ }
+ if ((ev = fdi->fdi_wevt) != NULL) {
+ assert(ev->ev_fd == i);
+ }
+ }
+}
+
+/*
+ * Verifies very basic integrity of a given port_event.
+ */
+static void
+check_event(port_event_t* pevt)
+{
+ /*
+ * We've only registered for PORT_SOURCE_FD events. The only
+ * other thing we can legitimately receive is PORT_SOURCE_ALERT,
+ * but since we're not using port_alert either, we can assume
+ * PORT_SOURCE_FD.
+ */
+ assert(pevt->portev_source == PORT_SOURCE_FD);
+ assert(pevt->portev_user == NULL);
+}
+
+#else
+#define check_evportop(epop)
+#define check_event(pevt)
+#endif /* CHECK_INVARIANTS */
+
+/*
+ * Doubles the size of the allocated file descriptor array.
+ */
+static int
+grow(struct evport_data *epdp, int factor)
+{
+ struct fd_info *tmp;
+ int oldsize = epdp->ed_nevents;
+ int newsize = factor * oldsize;
+ assert(factor > 1);
+
+ check_evportop(epdp);
+
+ tmp = realloc(epdp->ed_fds, sizeof(struct fd_info) * newsize);
+ if (NULL == tmp)
+ return -1;
+ epdp->ed_fds = tmp;
+ memset((char*) (epdp->ed_fds + oldsize), 0,
+ (newsize - oldsize)*sizeof(struct fd_info));
+ epdp->ed_nevents = newsize;
+
+ check_evportop(epdp);
+
+ return 0;
+}
+
+
+/*
+ * (Re)associates the given file descriptor with the event port. The OS events
+ * are specified (implicitly) from the fd_info struct.
+ */
+static int
+reassociate(struct evport_data *epdp, struct fd_info *fdip, int fd)
+{
+ int sysevents = FDI_TO_SYSEVENTS(fdip);
+
+ if (sysevents != 0) {
+ if (port_associate(epdp->ed_port, PORT_SOURCE_FD,
+ fd, sysevents, NULL) == -1) {
+ event_warn("port_associate");
+ return (-1);
+ }
+ }
+
+ check_evportop(epdp);
+
+ return (0);
+}
+
+/*
+ * Main event loop - polls port_getn for some number of events, and processes
+ * them.
+ */
+
+static int
+evport_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+ int i, res;
+ struct evport_data *epdp = arg;
+ port_event_t pevtlist[EVENTS_PER_GETN];
+
+ /*
+ * port_getn will block until it has at least nevents events. It will
+ * also return how many it's given us (which may be more than we asked
+ * for, as long as it's less than our maximum (EVENTS_PER_GETN)) in
+ * nevents.
+ */
+ int nevents = 1;
+
+ /*
+ * We have to convert a struct timeval to a struct timespec
+ * (only difference is nanoseconds vs. microseconds). If no time-based
+ * events are active, we should wait for I/O (and tv == NULL).
+ */
+ struct timespec ts;
+ struct timespec *ts_p = NULL;
+ if (tv != NULL) {
+ ts.tv_sec = tv->tv_sec;
+ ts.tv_nsec = tv->tv_usec * 1000;
+ ts_p = &ts;
+ }
+
+ /*
+ * Before doing anything else, we need to reassociate the events we hit
+ * last time which need reassociation. See comment at the end of the
+ * loop below.
+ */
+ for (i = 0; i < EVENTS_PER_GETN; ++i) {
+ struct fd_info *fdi = NULL;
+ if (epdp->ed_pending[i] != -1) {
+ fdi = &(epdp->ed_fds[epdp->ed_pending[i]]);
+ }
+
+ if (fdi != NULL && FDI_HAS_EVENTS(fdi)) {
+ int fd = FDI_HAS_READ(fdi) ? fdi->fdi_revt->ev_fd :
+ fdi->fdi_wevt->ev_fd;
+ reassociate(epdp, fdi, fd);
+ epdp->ed_pending[i] = -1;
+ }
+ }
+
+ if ((res = port_getn(epdp->ed_port, pevtlist, EVENTS_PER_GETN,
+ (unsigned int *) &nevents, ts_p)) == -1) {
+ if (errno == EINTR || errno == EAGAIN) {
+ evsignal_process(base);
+ return (0);
+ } else if (errno == ETIME) {
+ if (nevents == 0)
+ return (0);
+ } else {
+ event_warn("port_getn");
+ return (-1);
+ }
+ } else if (base->sig.evsignal_caught) {
+ evsignal_process(base);
+ }
+
+ event_debug(("%s: port_getn reports %d events", __func__, nevents));
+
+ for (i = 0; i < nevents; ++i) {
+ struct event *ev;
+ struct fd_info *fdi;
+ port_event_t *pevt = &pevtlist[i];
+ int fd = (int) pevt->portev_object;
+
+ check_evportop(epdp);
+ check_event(pevt);
+ epdp->ed_pending[i] = fd;
+
+ /*
+ * Figure out what kind of event it was
+ * (because we have to pass this to the callback)
+ */
+ res = 0;
+ if (pevt->portev_events & POLLIN)
+ res |= EV_READ;
+ if (pevt->portev_events & POLLOUT)
+ res |= EV_WRITE;
+
+ assert(epdp->ed_nevents > fd);
+ fdi = &(epdp->ed_fds[fd]);
+
+ /*
+ * We now check for each of the possible events (READ
+ * or WRITE). Then, we activate the event (which will
+ * cause its callback to be executed).
+ */
+
+ if ((res & EV_READ) && ((ev = fdi->fdi_revt) != NULL)) {
+ event_active(ev, res, 1);
+ }
+
+ if ((res & EV_WRITE) && ((ev = fdi->fdi_wevt) != NULL)) {
+ event_active(ev, res, 1);
+ }
+ } /* end of all events gotten */
+
+ check_evportop(epdp);
+
+ return (0);
+}
+
+
+/*
+ * Adds the given event (so that you will be notified when it happens via
+ * the callback function).
+ */
+
+static int
+evport_add(void *arg, struct event *ev)
+{
+ struct evport_data *evpd = arg;
+ struct fd_info *fdi;
+ int factor;
+
+ check_evportop(evpd);
+
+ /*
+ * Delegate, if it's not ours to handle.
+ */
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_add(ev));
+
+ /*
+ * If necessary, grow the file descriptor info table
+ */
+
+ factor = 1;
+ while (ev->ev_fd >= factor * evpd->ed_nevents)
+ factor *= 2;
+
+ if (factor > 1) {
+ if (-1 == grow(evpd, factor)) {
+ return (-1);
+ }
+ }
+
+ fdi = &evpd->ed_fds[ev->ev_fd];
+ if (ev->ev_events & EV_READ)
+ fdi->fdi_revt = ev;
+ if (ev->ev_events & EV_WRITE)
+ fdi->fdi_wevt = ev;
+
+ return reassociate(evpd, fdi, ev->ev_fd);
+}
+
+/*
+ * Removes the given event from the list of events to wait for.
+ */
+
+static int
+evport_del(void *arg, struct event *ev)
+{
+ struct evport_data *evpd = arg;
+ struct fd_info *fdi;
+ int i;
+ int associated = 1;
+
+ check_evportop(evpd);
+
+ /*
+ * Delegate, if it's not ours to handle
+ */
+ if (ev->ev_events & EV_SIGNAL) {
+ return (evsignal_del(ev));
+ }
+
+ if (evpd->ed_nevents < ev->ev_fd) {
+ return (-1);
+ }
+
+ for (i = 0; i < EVENTS_PER_GETN; ++i) {
+ if (evpd->ed_pending[i] == ev->ev_fd) {
+ associated = 0;
+ break;
+ }
+ }
+
+ fdi = &evpd->ed_fds[ev->ev_fd];
+ if (ev->ev_events & EV_READ)
+ fdi->fdi_revt = NULL;
+ if (ev->ev_events & EV_WRITE)
+ fdi->fdi_wevt = NULL;
+
+ if (associated) {
+ if (!FDI_HAS_EVENTS(fdi) &&
+ port_dissociate(evpd->ed_port, PORT_SOURCE_FD,
+ ev->ev_fd) == -1) {
+ /*
+ * Ignre EBADFD error the fd could have been closed
+ * before event_del() was called.
+ */
+ if (errno != EBADFD) {
+ event_warn("port_dissociate");
+ return (-1);
+ }
+ } else {
+ if (FDI_HAS_EVENTS(fdi)) {
+ return (reassociate(evpd, fdi, ev->ev_fd));
+ }
+ }
+ } else {
+ if (fdi->fdi_revt == NULL && fdi->fdi_wevt == NULL) {
+ evpd->ed_pending[i] = -1;
+ }
+ }
+ return 0;
+}
+
+
+static void
+evport_dealloc(struct event_base *base, void *arg)
+{
+ struct evport_data *evpd = arg;
+
+ evsignal_dealloc(base);
+
+ close(evpd->ed_port);
+
+ if (evpd->ed_fds)
+ free(evpd->ed_fds);
+ free(evpd);
+}
diff --git a/libevent/evrpc-internal.h b/libevent/evrpc-internal.h
new file mode 100644
index 00000000000..c900f959f97
--- /dev/null
+++ b/libevent/evrpc-internal.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVRPC_INTERNAL_H_
+#define _EVRPC_INTERNAL_H_
+
+#include "http-internal.h"
+
+struct evrpc;
+
+#define EVRPC_URI_PREFIX "/.rpc."
+
+struct evrpc_hook {
+ TAILQ_ENTRY(evrpc_hook) (next);
+
+ /* returns -1; if the rpc should be aborted, is allowed to rewrite */
+ int (*process)(struct evhttp_request *, struct evbuffer *, void *);
+ void *process_arg;
+};
+
+TAILQ_HEAD(evrpc_hook_list, evrpc_hook);
+
+/*
+ * this is shared between the base and the pool, so that we can reuse
+ * the hook adding functions; we alias both evrpc_pool and evrpc_base
+ * to this common structure.
+ */
+struct _evrpc_hooks {
+ /* hooks for processing outbound and inbound rpcs */
+ struct evrpc_hook_list in_hooks;
+ struct evrpc_hook_list out_hooks;
+};
+
+#define input_hooks common.in_hooks
+#define output_hooks common.out_hooks
+
+struct evrpc_base {
+ struct _evrpc_hooks common;
+
+ /* the HTTP server under which we register our RPC calls */
+ struct evhttp* http_server;
+
+ /* a list of all RPCs registered with us */
+ TAILQ_HEAD(evrpc_list, evrpc) registered_rpcs;
+};
+
+struct evrpc_req_generic;
+void evrpc_reqstate_free(struct evrpc_req_generic* rpc_state);
+
+/* A pool for holding evhttp_connection objects */
+struct evrpc_pool {
+ struct _evrpc_hooks common;
+
+ struct event_base *base;
+
+ struct evconq connections;
+
+ int timeout;
+
+ TAILQ_HEAD(evrpc_requestq, evrpc_request_wrapper) requests;
+};
+
+
+#endif /* _EVRPC_INTERNAL_H_ */
diff --git a/libevent/evrpc.c b/libevent/evrpc.c
new file mode 100644
index 00000000000..8b3b071d0bf
--- /dev/null
+++ b/libevent/evrpc.c
@@ -0,0 +1,661 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#ifndef WIN32
+#include <unistd.h>
+#endif
+#ifndef HAVE_TAILQFOREACH
+#include <event-internal.h>
+#endif
+
+#include <errno.h>
+#include <signal.h>
+#include <string.h>
+#include <assert.h>
+
+#include "event.h"
+#include "evrpc.h"
+#include "evrpc-internal.h"
+#include "evhttp.h"
+#include "evutil.h"
+#include "log.h"
+
+struct evrpc_base *
+evrpc_init(struct evhttp *http_server)
+{
+ struct evrpc_base* base = calloc(1, sizeof(struct evrpc_base));
+ if (base == NULL)
+ return (NULL);
+
+ /* we rely on the tagging sub system */
+ evtag_init();
+
+ TAILQ_INIT(&base->registered_rpcs);
+ TAILQ_INIT(&base->input_hooks);
+ TAILQ_INIT(&base->output_hooks);
+ base->http_server = http_server;
+
+ return (base);
+}
+
+void
+evrpc_free(struct evrpc_base *base)
+{
+ struct evrpc *rpc;
+ struct evrpc_hook *hook;
+
+ while ((rpc = TAILQ_FIRST(&base->registered_rpcs)) != NULL) {
+ assert(evrpc_unregister_rpc(base, rpc->uri));
+ }
+ while ((hook = TAILQ_FIRST(&base->input_hooks)) != NULL) {
+ assert(evrpc_remove_hook(base, EVRPC_INPUT, hook));
+ }
+ while ((hook = TAILQ_FIRST(&base->output_hooks)) != NULL) {
+ assert(evrpc_remove_hook(base, EVRPC_OUTPUT, hook));
+ }
+ free(base);
+}
+
+void *
+evrpc_add_hook(void *vbase,
+ enum EVRPC_HOOK_TYPE hook_type,
+ int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
+ void *cb_arg)
+{
+ struct _evrpc_hooks *base = vbase;
+ struct evrpc_hook_list *head = NULL;
+ struct evrpc_hook *hook = NULL;
+ switch (hook_type) {
+ case EVRPC_INPUT:
+ head = &base->in_hooks;
+ break;
+ case EVRPC_OUTPUT:
+ head = &base->out_hooks;
+ break;
+ default:
+ assert(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+ }
+
+ hook = calloc(1, sizeof(struct evrpc_hook));
+ assert(hook != NULL);
+
+ hook->process = cb;
+ hook->process_arg = cb_arg;
+ TAILQ_INSERT_TAIL(head, hook, next);
+
+ return (hook);
+}
+
+static int
+evrpc_remove_hook_internal(struct evrpc_hook_list *head, void *handle)
+{
+ struct evrpc_hook *hook = NULL;
+ TAILQ_FOREACH(hook, head, next) {
+ if (hook == handle) {
+ TAILQ_REMOVE(head, hook, next);
+ free(hook);
+ return (1);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * remove the hook specified by the handle
+ */
+
+int
+evrpc_remove_hook(void *vbase, enum EVRPC_HOOK_TYPE hook_type, void *handle)
+{
+ struct _evrpc_hooks *base = vbase;
+ struct evrpc_hook_list *head = NULL;
+ switch (hook_type) {
+ case EVRPC_INPUT:
+ head = &base->in_hooks;
+ break;
+ case EVRPC_OUTPUT:
+ head = &base->out_hooks;
+ break;
+ default:
+ assert(hook_type == EVRPC_INPUT || hook_type == EVRPC_OUTPUT);
+ }
+
+ return (evrpc_remove_hook_internal(head, handle));
+}
+
+static int
+evrpc_process_hooks(struct evrpc_hook_list *head,
+ struct evhttp_request *req, struct evbuffer *evbuf)
+{
+ struct evrpc_hook *hook;
+ TAILQ_FOREACH(hook, head, next) {
+ if (hook->process(req, evbuf, hook->process_arg) == -1)
+ return (-1);
+ }
+
+ return (0);
+}
+
+static void evrpc_pool_schedule(struct evrpc_pool *pool);
+static void evrpc_request_cb(struct evhttp_request *, void *);
+void evrpc_request_done(struct evrpc_req_generic*);
+
+/*
+ * Registers a new RPC with the HTTP server. The evrpc object is expected
+ * to have been filled in via the EVRPC_REGISTER_OBJECT macro which in turn
+ * calls this function.
+ */
+
+static char *
+evrpc_construct_uri(const char *uri)
+{
+ char *constructed_uri;
+ int constructed_uri_len;
+
+ constructed_uri_len = strlen(EVRPC_URI_PREFIX) + strlen(uri) + 1;
+ if ((constructed_uri = malloc(constructed_uri_len)) == NULL)
+ event_err(1, "%s: failed to register rpc at %s",
+ __func__, uri);
+ memcpy(constructed_uri, EVRPC_URI_PREFIX, strlen(EVRPC_URI_PREFIX));
+ memcpy(constructed_uri + strlen(EVRPC_URI_PREFIX), uri, strlen(uri));
+ constructed_uri[constructed_uri_len - 1] = '\0';
+
+ return (constructed_uri);
+}
+
+int
+evrpc_register_rpc(struct evrpc_base *base, struct evrpc *rpc,
+ void (*cb)(struct evrpc_req_generic *, void *), void *cb_arg)
+{
+ char *constructed_uri = evrpc_construct_uri(rpc->uri);
+
+ rpc->base = base;
+ rpc->cb = cb;
+ rpc->cb_arg = cb_arg;
+
+ TAILQ_INSERT_TAIL(&base->registered_rpcs, rpc, next);
+
+ evhttp_set_cb(base->http_server,
+ constructed_uri,
+ evrpc_request_cb,
+ rpc);
+
+ free(constructed_uri);
+
+ return (0);
+}
+
+int
+evrpc_unregister_rpc(struct evrpc_base *base, const char *name)
+{
+ char *registered_uri = NULL;
+ struct evrpc *rpc;
+
+ /* find the right rpc; linear search might be slow */
+ TAILQ_FOREACH(rpc, &base->registered_rpcs, next) {
+ if (strcmp(rpc->uri, name) == 0)
+ break;
+ }
+ if (rpc == NULL) {
+ /* We did not find an RPC with this name */
+ return (-1);
+ }
+ TAILQ_REMOVE(&base->registered_rpcs, rpc, next);
+
+ free((char *)rpc->uri);
+ free(rpc);
+
+ registered_uri = evrpc_construct_uri(name);
+
+ /* remove the http server callback */
+ assert(evhttp_del_cb(base->http_server, registered_uri) == 0);
+
+ free(registered_uri);
+ return (0);
+}
+
+static void
+evrpc_request_cb(struct evhttp_request *req, void *arg)
+{
+ struct evrpc *rpc = arg;
+ struct evrpc_req_generic *rpc_state = NULL;
+
+ /* let's verify the outside parameters */
+ if (req->type != EVHTTP_REQ_POST ||
+ EVBUFFER_LENGTH(req->input_buffer) <= 0)
+ goto error;
+
+ /*
+ * we might want to allow hooks to suspend the processing,
+ * but at the moment, we assume that they just act as simple
+ * filters.
+ */
+ if (evrpc_process_hooks(&rpc->base->input_hooks,
+ req, req->input_buffer) == -1)
+ goto error;
+
+ rpc_state = calloc(1, sizeof(struct evrpc_req_generic));
+ if (rpc_state == NULL)
+ goto error;
+
+ /* let's check that we can parse the request */
+ rpc_state->request = rpc->request_new();
+ if (rpc_state->request == NULL)
+ goto error;
+
+ rpc_state->rpc = rpc;
+
+ if (rpc->request_unmarshal(
+ rpc_state->request, req->input_buffer) == -1) {
+ /* we failed to parse the request; that's a bummer */
+ goto error;
+ }
+
+ /* at this point, we have a well formed request, prepare the reply */
+
+ rpc_state->reply = rpc->reply_new();
+ if (rpc_state->reply == NULL)
+ goto error;
+
+ rpc_state->http_req = req;
+ rpc_state->done = evrpc_request_done;
+
+ /* give the rpc to the user; they can deal with it */
+ rpc->cb(rpc_state, rpc->cb_arg);
+
+ return;
+
+error:
+ evrpc_reqstate_free(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
+ return;
+}
+
+void
+evrpc_reqstate_free(struct evrpc_req_generic* rpc_state)
+{
+ /* clean up all memory */
+ if (rpc_state != NULL) {
+ struct evrpc *rpc = rpc_state->rpc;
+
+ if (rpc_state->request != NULL)
+ rpc->request_free(rpc_state->request);
+ if (rpc_state->reply != NULL)
+ rpc->reply_free(rpc_state->reply);
+ free(rpc_state);
+ }
+}
+
+void
+evrpc_request_done(struct evrpc_req_generic* rpc_state)
+{
+ struct evhttp_request *req = rpc_state->http_req;
+ struct evrpc *rpc = rpc_state->rpc;
+ struct evbuffer* data = NULL;
+
+ if (rpc->reply_complete(rpc_state->reply) == -1) {
+ /* the reply was not completely filled in. error out */
+ goto error;
+ }
+
+ if ((data = evbuffer_new()) == NULL) {
+ /* out of memory */
+ goto error;
+ }
+
+ /* serialize the reply */
+ rpc->reply_marshal(data, rpc_state->reply);
+
+ /* do hook based tweaks to the request */
+ if (evrpc_process_hooks(&rpc->base->output_hooks,
+ req, data) == -1)
+ goto error;
+
+ /* on success, we are going to transmit marshaled binary data */
+ if (evhttp_find_header(req->output_headers, "Content-Type") == NULL) {
+ evhttp_add_header(req->output_headers,
+ "Content-Type", "application/octet-stream");
+ }
+
+ evhttp_send_reply(req, HTTP_OK, "OK", data);
+
+ evbuffer_free(data);
+
+ evrpc_reqstate_free(rpc_state);
+
+ return;
+
+error:
+ if (data != NULL)
+ evbuffer_free(data);
+ evrpc_reqstate_free(rpc_state);
+ evhttp_send_error(req, HTTP_SERVUNAVAIL, "Service Error");
+ return;
+}
+
+/* Client implementation of RPC site */
+
+static int evrpc_schedule_request(struct evhttp_connection *connection,
+ struct evrpc_request_wrapper *ctx);
+
+struct evrpc_pool *
+evrpc_pool_new(struct event_base *base)
+{
+ struct evrpc_pool *pool = calloc(1, sizeof(struct evrpc_pool));
+ if (pool == NULL)
+ return (NULL);
+
+ TAILQ_INIT(&pool->connections);
+ TAILQ_INIT(&pool->requests);
+
+ TAILQ_INIT(&pool->input_hooks);
+ TAILQ_INIT(&pool->output_hooks);
+
+ pool->base = base;
+ pool->timeout = -1;
+
+ return (pool);
+}
+
+static void
+evrpc_request_wrapper_free(struct evrpc_request_wrapper *request)
+{
+ free(request->name);
+ free(request);
+}
+
+void
+evrpc_pool_free(struct evrpc_pool *pool)
+{
+ struct evhttp_connection *connection;
+ struct evrpc_request_wrapper *request;
+ struct evrpc_hook *hook;
+
+ while ((request = TAILQ_FIRST(&pool->requests)) != NULL) {
+ TAILQ_REMOVE(&pool->requests, request, next);
+ /* if this gets more complicated we need our own function */
+ evrpc_request_wrapper_free(request);
+ }
+
+ while ((connection = TAILQ_FIRST(&pool->connections)) != NULL) {
+ TAILQ_REMOVE(&pool->connections, connection, next);
+ evhttp_connection_free(connection);
+ }
+
+ while ((hook = TAILQ_FIRST(&pool->input_hooks)) != NULL) {
+ assert(evrpc_remove_hook(pool, EVRPC_INPUT, hook));
+ }
+
+ while ((hook = TAILQ_FIRST(&pool->output_hooks)) != NULL) {
+ assert(evrpc_remove_hook(pool, EVRPC_OUTPUT, hook));
+ }
+
+ free(pool);
+}
+
+/*
+ * Add a connection to the RPC pool. A request scheduled on the pool
+ * may use any available connection.
+ */
+
+void
+evrpc_pool_add_connection(struct evrpc_pool *pool,
+ struct evhttp_connection *connection) {
+ assert(connection->http_server == NULL);
+ TAILQ_INSERT_TAIL(&pool->connections, connection, next);
+
+ /*
+ * associate an event base with this connection
+ */
+ if (pool->base != NULL)
+ evhttp_connection_set_base(connection, pool->base);
+
+ /*
+ * unless a timeout was specifically set for a connection,
+ * the connection inherits the timeout from the pool.
+ */
+ if (connection->timeout == -1)
+ connection->timeout = pool->timeout;
+
+ /*
+ * if we have any requests pending, schedule them with the new
+ * connections.
+ */
+
+ if (TAILQ_FIRST(&pool->requests) != NULL) {
+ struct evrpc_request_wrapper *request =
+ TAILQ_FIRST(&pool->requests);
+ TAILQ_REMOVE(&pool->requests, request, next);
+ evrpc_schedule_request(connection, request);
+ }
+}
+
+void
+evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs)
+{
+ struct evhttp_connection *evcon;
+ TAILQ_FOREACH(evcon, &pool->connections, next) {
+ evcon->timeout = timeout_in_secs;
+ }
+ pool->timeout = timeout_in_secs;
+}
+
+
+static void evrpc_reply_done(struct evhttp_request *, void *);
+static void evrpc_request_timeout(int, short, void *);
+
+/*
+ * Finds a connection object associated with the pool that is currently
+ * idle and can be used to make a request.
+ */
+static struct evhttp_connection *
+evrpc_pool_find_connection(struct evrpc_pool *pool)
+{
+ struct evhttp_connection *connection;
+ TAILQ_FOREACH(connection, &pool->connections, next) {
+ if (TAILQ_FIRST(&connection->requests) == NULL)
+ return (connection);
+ }
+
+ return (NULL);
+}
+
+/*
+ * We assume that the ctx is no longer queued on the pool.
+ */
+static int
+evrpc_schedule_request(struct evhttp_connection *connection,
+ struct evrpc_request_wrapper *ctx)
+{
+ struct evhttp_request *req = NULL;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+ char *uri = NULL;
+ int res = 0;
+
+ if ((req = evhttp_request_new(evrpc_reply_done, ctx)) == NULL)
+ goto error;
+
+ /* serialize the request data into the output buffer */
+ ctx->request_marshal(req->output_buffer, ctx->request);
+
+ uri = evrpc_construct_uri(ctx->name);
+ if (uri == NULL)
+ goto error;
+
+ /* we need to know the connection that we might have to abort */
+ ctx->evcon = connection;
+
+ /* apply hooks to the outgoing request */
+ if (evrpc_process_hooks(&pool->output_hooks,
+ req, req->output_buffer) == -1)
+ goto error;
+
+ if (pool->timeout > 0) {
+ /*
+ * a timeout after which the whole rpc is going to be aborted.
+ */
+ struct timeval tv;
+ evutil_timerclear(&tv);
+ tv.tv_sec = pool->timeout;
+ evtimer_add(&ctx->ev_timeout, &tv);
+ }
+
+ /* start the request over the connection */
+ res = evhttp_make_request(connection, req, EVHTTP_REQ_POST, uri);
+ free(uri);
+
+ if (res == -1)
+ goto error;
+
+ return (0);
+
+error:
+ memset(&status, 0, sizeof(status));
+ status.error = EVRPC_STATUS_ERR_UNSTARTED;
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+ evrpc_request_wrapper_free(ctx);
+ return (-1);
+}
+
+int
+evrpc_make_request(struct evrpc_request_wrapper *ctx)
+{
+ struct evrpc_pool *pool = ctx->pool;
+
+ /* initialize the event structure for this rpc */
+ evtimer_set(&ctx->ev_timeout, evrpc_request_timeout, ctx);
+ if (pool->base != NULL)
+ event_base_set(pool->base, &ctx->ev_timeout);
+
+ /* we better have some available connections on the pool */
+ assert(TAILQ_FIRST(&pool->connections) != NULL);
+
+ /*
+ * if no connection is available, we queue the request on the pool,
+ * the next time a connection is empty, the rpc will be send on that.
+ */
+ TAILQ_INSERT_TAIL(&pool->requests, ctx, next);
+
+ evrpc_pool_schedule(pool);
+
+ return (0);
+}
+
+static void
+evrpc_reply_done(struct evhttp_request *req, void *arg)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evrpc_pool *pool = ctx->pool;
+ struct evrpc_status status;
+ int res = -1;
+
+ /* cancel any timeout we might have scheduled */
+ event_del(&ctx->ev_timeout);
+
+ memset(&status, 0, sizeof(status));
+ status.http_req = req;
+
+ /* we need to get the reply now */
+ if (req != NULL) {
+ /* apply hooks to the incoming request */
+ if (evrpc_process_hooks(&pool->input_hooks,
+ req, req->input_buffer) == -1) {
+ status.error = EVRPC_STATUS_ERR_HOOKABORTED;
+ res = -1;
+ } else {
+ res = ctx->reply_unmarshal(ctx->reply,
+ req->input_buffer);
+ if (res == -1) {
+ status.error = EVRPC_STATUS_ERR_BADPAYLOAD;
+ }
+ }
+ } else {
+ status.error = EVRPC_STATUS_ERR_TIMEOUT;
+ }
+
+ if (res == -1) {
+ /* clear everything that we might have written previously */
+ ctx->reply_clear(ctx->reply);
+ }
+
+ (*ctx->cb)(&status, ctx->request, ctx->reply, ctx->cb_arg);
+
+ evrpc_request_wrapper_free(ctx);
+
+ /* the http layer owns the request structure */
+
+ /* see if we can schedule another request */
+ evrpc_pool_schedule(pool);
+}
+
+static void
+evrpc_pool_schedule(struct evrpc_pool *pool)
+{
+ struct evrpc_request_wrapper *ctx = TAILQ_FIRST(&pool->requests);
+ struct evhttp_connection *evcon;
+
+ /* if no requests are pending, we have no work */
+ if (ctx == NULL)
+ return;
+
+ if ((evcon = evrpc_pool_find_connection(pool)) != NULL) {
+ TAILQ_REMOVE(&pool->requests, ctx, next);
+ evrpc_schedule_request(evcon, ctx);
+ }
+}
+
+static void
+evrpc_request_timeout(int fd, short what, void *arg)
+{
+ struct evrpc_request_wrapper *ctx = arg;
+ struct evhttp_connection *evcon = ctx->evcon;
+ assert(evcon != NULL);
+
+ evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
+}
diff --git a/libevent/evrpc.h b/libevent/evrpc.h
new file mode 100644
index 00000000000..7c16b95c775
--- /dev/null
+++ b/libevent/evrpc.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright (c) 2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVRPC_H_
+#define _EVRPC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @file evrpc.h
+ *
+ * This header files provides basic support for an RPC server and client.
+ *
+ * To support RPCs in a server, every supported RPC command needs to be
+ * defined and registered.
+ *
+ * EVRPC_HEADER(SendCommand, Request, Reply);
+ *
+ * SendCommand is the name of the RPC command.
+ * Request is the name of a structure generated by event_rpcgen.py.
+ * It contains all parameters relating to the SendCommand RPC. The
+ * server needs to fill in the Reply structure.
+ * Reply is the name of a structure generated by event_rpcgen.py. It
+ * contains the answer to the RPC.
+ *
+ * To register an RPC with an HTTP server, you need to first create an RPC
+ * base with:
+ *
+ * struct evrpc_base *base = evrpc_init(http);
+ *
+ * A specific RPC can then be registered with
+ *
+ * EVRPC_REGISTER(base, SendCommand, Request, Reply, FunctionCB, arg);
+ *
+ * when the server receives an appropriately formatted RPC, the user callback
+ * is invokved. The callback needs to fill in the reply structure.
+ *
+ * void FunctionCB(EVRPC_STRUCT(SendCommand)* rpc, void *arg);
+ *
+ * To send the reply, call EVRPC_REQUEST_DONE(rpc);
+ *
+ * See the regression test for an example.
+ */
+
+struct evbuffer;
+struct event_base;
+struct evrpc_req_generic;
+
+/* Encapsulates a request */
+struct evrpc {
+ TAILQ_ENTRY(evrpc) next;
+
+ /* the URI at which the request handler lives */
+ const char* uri;
+
+ /* creates a new request structure */
+ void *(*request_new)(void);
+
+ /* frees the request structure */
+ void (*request_free)(void *);
+
+ /* unmarshals the buffer into the proper request structure */
+ int (*request_unmarshal)(void *, struct evbuffer *);
+
+ /* creates a new reply structure */
+ void *(*reply_new)(void);
+
+ /* creates a new reply structure */
+ void (*reply_free)(void *);
+
+ /* verifies that the reply is valid */
+ int (*reply_complete)(void *);
+
+ /* marshals the reply into a buffer */
+ void (*reply_marshal)(struct evbuffer*, void *);
+
+ /* the callback invoked for each received rpc */
+ void (*cb)(struct evrpc_req_generic *, void *);
+ void *cb_arg;
+
+ /* reference for further configuration */
+ struct evrpc_base *base;
+};
+
+/** The type of a specific RPC Message
+ *
+ * @param rpcname the name of the RPC message
+ */
+#define EVRPC_STRUCT(rpcname) struct evrpc_req__##rpcname
+
+struct evhttp_request;
+struct evrpc_status;
+
+/* We alias the RPC specific structs to this voided one */
+struct evrpc_req_generic {
+ /* the unmarshaled request object */
+ void *request;
+
+ /* the empty reply object that needs to be filled in */
+ void *reply;
+
+ /*
+ * the static structure for this rpc; that can be used to
+ * automatically unmarshal and marshal the http buffers.
+ */
+ struct evrpc *rpc;
+
+ /*
+ * the http request structure on which we need to answer.
+ */
+ struct evhttp_request* http_req;
+
+ /*
+ * callback to reply and finish answering this rpc
+ */
+ void (*done)(struct evrpc_req_generic* rpc);
+};
+
+/** Creates the definitions and prototypes for an RPC
+ *
+ * You need to use EVRPC_HEADER to create structures and function prototypes
+ * needed by the server and client implementation. The structures have to be
+ * defined in an .rpc file and converted to source code via event_rpcgen.py
+ *
+ * @param rpcname the name of the RPC
+ * @param reqstruct the name of the RPC request structure
+ * @param replystruct the name of the RPC reply structure
+ * @see EVRPC_GENERATE()
+ */
+#define EVRPC_HEADER(rpcname, reqstruct, rplystruct) \
+EVRPC_STRUCT(rpcname) { \
+ struct reqstruct* request; \
+ struct rplystruct* reply; \
+ struct evrpc* rpc; \
+ struct evhttp_request* http_req; \
+ void (*done)(struct evrpc_status *, \
+ struct evrpc* rpc, void *request, void *reply); \
+}; \
+int evrpc_send_request_##rpcname(struct evrpc_pool *, \
+ struct reqstruct *, struct rplystruct *, \
+ void (*)(struct evrpc_status *, \
+ struct reqstruct *, struct rplystruct *, void *cbarg), \
+ void *);
+
+/** Generates the code for receiving and sending an RPC message
+ *
+ * EVRPC_GENERATE is used to create the code corresponding to sending
+ * and receiving a particular RPC message
+ *
+ * @param rpcname the name of the RPC
+ * @param reqstruct the name of the RPC request structure
+ * @param replystruct the name of the RPC reply structure
+ * @see EVRPC_HEADER()
+ */
+#define EVRPC_GENERATE(rpcname, reqstruct, rplystruct) \
+int evrpc_send_request_##rpcname(struct evrpc_pool *pool, \
+ struct reqstruct *request, struct rplystruct *reply, \
+ void (*cb)(struct evrpc_status *, \
+ struct reqstruct *, struct rplystruct *, void *cbarg), \
+ void *cbarg) { \
+ struct evrpc_status status; \
+ struct evrpc_request_wrapper *ctx; \
+ ctx = (struct evrpc_request_wrapper *) \
+ malloc(sizeof(struct evrpc_request_wrapper)); \
+ if (ctx == NULL) \
+ goto error; \
+ ctx->pool = pool; \
+ ctx->evcon = NULL; \
+ ctx->name = strdup(#rpcname); \
+ if (ctx->name == NULL) { \
+ free(ctx); \
+ goto error; \
+ } \
+ ctx->cb = (void (*)(struct evrpc_status *, \
+ void *, void *, void *))cb; \
+ ctx->cb_arg = cbarg; \
+ ctx->request = (void *)request; \
+ ctx->reply = (void *)reply; \
+ ctx->request_marshal = (void (*)(struct evbuffer *, void *))reqstruct##_marshal; \
+ ctx->reply_clear = (void (*)(void *))rplystruct##_clear; \
+ ctx->reply_unmarshal = (int (*)(void *, struct evbuffer *))rplystruct##_unmarshal; \
+ return (evrpc_make_request(ctx)); \
+error: \
+ memset(&status, 0, sizeof(status)); \
+ status.error = EVRPC_STATUS_ERR_UNSTARTED; \
+ (*(cb))(&status, request, reply, cbarg); \
+ return (-1); \
+}
+
+/** Provides access to the HTTP request object underlying an RPC
+ *
+ * Access to the underlying http object; can be used to look at headers or
+ * for getting the remote ip address
+ *
+ * @param rpc_req the rpc request structure provided to the server callback
+ * @return an struct evhttp_request object that can be inspected for
+ * HTTP headers or sender information.
+ */
+#define EVRPC_REQUEST_HTTP(rpc_req) (rpc_req)->http_req
+
+/** Creates the reply to an RPC request
+ *
+ * EVRPC_REQUEST_DONE is used to answer a request; the reply is expected
+ * to have been filled in. The request and reply pointers become invalid
+ * after this call has finished.
+ *
+ * @param rpc_req the rpc request structure provided to the server callback
+ */
+#define EVRPC_REQUEST_DONE(rpc_req) do { \
+ struct evrpc_req_generic *_req = (struct evrpc_req_generic *)(rpc_req); \
+ _req->done(_req); \
+} while (0)
+
+
+/* Takes a request object and fills it in with the right magic */
+#define EVRPC_REGISTER_OBJECT(rpc, name, request, reply) \
+ do { \
+ (rpc)->uri = strdup(#name); \
+ if ((rpc)->uri == NULL) { \
+ fprintf(stderr, "failed to register object\n"); \
+ exit(1); \
+ } \
+ (rpc)->request_new = (void *(*)(void))request##_new; \
+ (rpc)->request_free = (void (*)(void *))request##_free; \
+ (rpc)->request_unmarshal = (int (*)(void *, struct evbuffer *))request##_unmarshal; \
+ (rpc)->reply_new = (void *(*)(void))reply##_new; \
+ (rpc)->reply_free = (void (*)(void *))reply##_free; \
+ (rpc)->reply_complete = (int (*)(void *))reply##_complete; \
+ (rpc)->reply_marshal = (void (*)(struct evbuffer*, void *))reply##_marshal; \
+ } while (0)
+
+struct evrpc_base;
+struct evhttp;
+
+/* functions to start up the rpc system */
+
+/** Creates a new rpc base from which RPC requests can be received
+ *
+ * @param server a pointer to an existing HTTP server
+ * @return a newly allocated evrpc_base struct
+ * @see evrpc_free()
+ */
+struct evrpc_base *evrpc_init(struct evhttp *server);
+
+/**
+ * Frees the evrpc base
+ *
+ * For now, you are responsible for making sure that no rpcs are ongoing.
+ *
+ * @param base the evrpc_base object to be freed
+ * @see evrpc_init
+ */
+void evrpc_free(struct evrpc_base *base);
+
+/** register RPCs with the HTTP Server
+ *
+ * registers a new RPC with the HTTP server, each RPC needs to have
+ * a unique name under which it can be identified.
+ *
+ * @param base the evrpc_base structure in which the RPC should be
+ * registered.
+ * @param name the name of the RPC
+ * @param request the name of the RPC request structure
+ * @param reply the name of the RPC reply structure
+ * @param callback the callback that should be invoked when the RPC
+ * is received. The callback has the following prototype
+ * void (*callback)(EVRPC_STRUCT(Message)* rpc, void *arg)
+ * @param cbarg an additional parameter that can be passed to the callback.
+ * The parameter can be used to carry around state.
+ */
+#define EVRPC_REGISTER(base, name, request, reply, callback, cbarg) \
+ do { \
+ struct evrpc* rpc = (struct evrpc *)calloc(1, sizeof(struct evrpc)); \
+ EVRPC_REGISTER_OBJECT(rpc, name, request, reply); \
+ evrpc_register_rpc(base, rpc, \
+ (void (*)(struct evrpc_req_generic*, void *))callback, cbarg); \
+ } while (0)
+
+int evrpc_register_rpc(struct evrpc_base *, struct evrpc *,
+ void (*)(struct evrpc_req_generic*, void *), void *);
+
+/**
+ * Unregisters an already registered RPC
+ *
+ * @param base the evrpc_base object from which to unregister an RPC
+ * @param name the name of the rpc to unregister
+ * @return -1 on error or 0 when successful.
+ * @see EVRPC_REGISTER()
+ */
+#define EVRPC_UNREGISTER(base, name) evrpc_unregister_rpc(base, #name)
+
+int evrpc_unregister_rpc(struct evrpc_base *base, const char *name);
+
+/*
+ * Client-side RPC support
+ */
+
+struct evrpc_pool;
+struct evhttp_connection;
+
+/**
+ * provides information about the completed RPC request.
+ */
+struct evrpc_status {
+#define EVRPC_STATUS_ERR_NONE 0
+#define EVRPC_STATUS_ERR_TIMEOUT 1
+#define EVRPC_STATUS_ERR_BADPAYLOAD 2
+#define EVRPC_STATUS_ERR_UNSTARTED 3
+#define EVRPC_STATUS_ERR_HOOKABORTED 4
+ int error;
+
+ /* for looking at headers or other information */
+ struct evhttp_request *http_req;
+};
+
+struct evrpc_request_wrapper {
+ TAILQ_ENTRY(evrpc_request_wrapper) next;
+
+ /* pool on which this rpc request is being made */
+ struct evrpc_pool *pool;
+
+ /* connection on which the request is being sent */
+ struct evhttp_connection *evcon;
+
+ /* event for implementing request timeouts */
+ struct event ev_timeout;
+
+ /* the name of the rpc */
+ char *name;
+
+ /* callback */
+ void (*cb)(struct evrpc_status*, void *request, void *reply, void *arg);
+ void *cb_arg;
+
+ void *request;
+ void *reply;
+
+ /* unmarshals the buffer into the proper request structure */
+ void (*request_marshal)(struct evbuffer *, void *);
+
+ /* removes all stored state in the reply */
+ void (*reply_clear)(void *);
+
+ /* marshals the reply into a buffer */
+ int (*reply_unmarshal)(void *, struct evbuffer*);
+};
+
+/** launches an RPC and sends it to the server
+ *
+ * EVRPC_MAKE_REQUEST() is used by the client to send an RPC to the server.
+ *
+ * @param name the name of the RPC
+ * @param pool the evrpc_pool that contains the connection objects over which
+ * the request should be sent.
+ * @param request a pointer to the RPC request structure - it contains the
+ * data to be sent to the server.
+ * @param reply a pointer to the RPC reply structure. It is going to be filled
+ * if the request was answered successfully
+ * @param cb the callback to invoke when the RPC request has been answered
+ * @param cbarg an additional argument to be passed to the client
+ * @return 0 on success, -1 on failure
+ */
+#define EVRPC_MAKE_REQUEST(name, pool, request, reply, cb, cbarg) \
+ evrpc_send_request_##name(pool, request, reply, cb, cbarg)
+
+int evrpc_make_request(struct evrpc_request_wrapper *);
+
+/** creates an rpc connection pool
+ *
+ * a pool has a number of connections associated with it.
+ * rpc requests are always made via a pool.
+ *
+ * @param base a pointer to an struct event_based object; can be left NULL
+ * in singled-threaded applications
+ * @return a newly allocated struct evrpc_pool object
+ * @see evrpc_pool_free()
+ */
+struct evrpc_pool *evrpc_pool_new(struct event_base *base);
+/** frees an rpc connection pool
+ *
+ * @param pool a pointer to an evrpc_pool allocated via evrpc_pool_new()
+ * @see evrpc_pool_new()
+ */
+void evrpc_pool_free(struct evrpc_pool *pool);
+/*
+ * adds a connection over which rpc can be dispatched. the connection
+ * object must have been newly created.
+ */
+void evrpc_pool_add_connection(struct evrpc_pool *,
+ struct evhttp_connection *);
+
+/**
+ * Sets the timeout in secs after which a request has to complete. The
+ * RPC is completely aborted if it does not complete by then. Setting
+ * the timeout to 0 means that it never timeouts and can be used to
+ * implement callback type RPCs.
+ *
+ * Any connection already in the pool will be updated with the new
+ * timeout. Connections added to the pool after set_timeout has be
+ * called receive the pool timeout only if no timeout has been set
+ * for the connection itself.
+ *
+ * @param pool a pointer to a struct evrpc_pool object
+ * @param timeout_in_secs the number of seconds after which a request should
+ * timeout and a failure be returned to the callback.
+ */
+void evrpc_pool_set_timeout(struct evrpc_pool *pool, int timeout_in_secs);
+
+/**
+ * Hooks for changing the input and output of RPCs; this can be used to
+ * implement compression, authentication, encryption, ...
+ */
+
+enum EVRPC_HOOK_TYPE {
+ EVRPC_INPUT, /**< apply the function to an input hook */
+ EVRPC_OUTPUT /**< apply the function to an output hook */
+};
+
+#ifndef WIN32
+/** Deprecated alias for EVRPC_INPUT. Not available on windows, where it
+ * conflicts with platform headers. */
+#define INPUT EVRPC_INPUT
+/** Deprecated alias for EVRPC_OUTPUT. Not available on windows, where it
+ * conflicts with platform headers. */
+#define OUTPUT EVRPC_OUTPUT
+#endif
+
+/** adds a processing hook to either an rpc base or rpc pool
+ *
+ * If a hook returns -1, the processing is aborted.
+ *
+ * The add functions return handles that can be used for removing hooks.
+ *
+ * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
+ * @param hook_type either INPUT or OUTPUT
+ * @param cb the callback to call when the hook is activated
+ * @param cb_arg an additional argument for the callback
+ * @return a handle to the hook so it can be removed later
+ * @see evrpc_remove_hook()
+ */
+void *evrpc_add_hook(void *vbase,
+ enum EVRPC_HOOK_TYPE hook_type,
+ int (*cb)(struct evhttp_request *, struct evbuffer *, void *),
+ void *cb_arg);
+
+/** removes a previously added hook
+ *
+ * @param vbase a pointer to either struct evrpc_base or struct evrpc_pool
+ * @param hook_type either INPUT or OUTPUT
+ * @param handle a handle returned by evrpc_add_hook()
+ * @return 1 on success or 0 on failure
+ * @see evrpc_add_hook()
+ */
+int evrpc_remove_hook(void *vbase,
+ enum EVRPC_HOOK_TYPE hook_type,
+ void *handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVRPC_H_ */
diff --git a/libevent/evsignal.h b/libevent/evsignal.h
new file mode 100644
index 00000000000..9b0405eea09
--- /dev/null
+++ b/libevent/evsignal.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVSIGNAL_H_
+#define _EVSIGNAL_H_
+
+#include <signal.h>
+
+typedef void (*ev_sighandler_t)(int);
+
+struct evsignal_info {
+ struct event ev_signal;
+ int ev_signal_pair[2];
+ int ev_signal_added;
+ volatile sig_atomic_t evsignal_caught;
+ struct event_list evsigevents[NSIG];
+ sig_atomic_t evsigcaught[NSIG];
+#ifdef HAVE_SIGACTION
+ struct sigaction **sh_old;
+#else
+ ev_sighandler_t **sh_old;
+#endif
+ int sh_old_max;
+};
+int evsignal_init(struct event_base *);
+void evsignal_process(struct event_base *);
+int evsignal_add(struct event *);
+int evsignal_del(struct event *);
+void evsignal_dealloc(struct event_base *);
+
+#endif /* _EVSIGNAL_H_ */
diff --git a/libevent/evutil.c b/libevent/evutil.c
new file mode 100644
index 00000000000..7d22d3eac16
--- /dev/null
+++ b/libevent/evutil.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#ifdef HAVE_STDLIB_H
+#include <stdlib.h>
+#endif
+#include <errno.h>
+#if defined WIN32 && !defined(HAVE_GETTIMEOFDAY_H)
+#include <sys/timeb.h>
+#endif
+#include <stdio.h>
+
+#include "evutil.h"
+#include "log.h"
+
+int
+evutil_socketpair(int family, int type, int protocol, int fd[2])
+{
+#ifndef WIN32
+ return socketpair(family, type, protocol, fd);
+#else
+ /* This code is originally from Tor. Used with permission. */
+
+ /* This socketpair does not work when localhost is down. So
+ * it's really not the same thing at all. But it's close enough
+ * for now, and really, when localhost is down sometimes, we
+ * have other problems too.
+ */
+ int listener = -1;
+ int connector = -1;
+ int acceptor = -1;
+ struct sockaddr_in listen_addr;
+ struct sockaddr_in connect_addr;
+ int size;
+ int saved_errno = -1;
+
+ if (protocol
+#ifdef AF_UNIX
+ || family != AF_UNIX
+#endif
+ ) {
+ EVUTIL_SET_SOCKET_ERROR(WSAEAFNOSUPPORT);
+ return -1;
+ }
+ if (!fd) {
+ EVUTIL_SET_SOCKET_ERROR(WSAEINVAL);
+ return -1;
+ }
+
+ listener = socket(AF_INET, type, 0);
+ if (listener < 0)
+ return -1;
+ memset(&listen_addr, 0, sizeof(listen_addr));
+ listen_addr.sin_family = AF_INET;
+ listen_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ listen_addr.sin_port = 0; /* kernel chooses port. */
+ if (bind(listener, (struct sockaddr *) &listen_addr, sizeof (listen_addr))
+ == -1)
+ goto tidy_up_and_fail;
+ if (listen(listener, 1) == -1)
+ goto tidy_up_and_fail;
+
+ connector = socket(AF_INET, type, 0);
+ if (connector < 0)
+ goto tidy_up_and_fail;
+ /* We want to find out the port number to connect to. */
+ size = sizeof(connect_addr);
+ if (getsockname(listener, (struct sockaddr *) &connect_addr, &size) == -1)
+ goto tidy_up_and_fail;
+ if (size != sizeof (connect_addr))
+ goto abort_tidy_up_and_fail;
+ if (connect(connector, (struct sockaddr *) &connect_addr,
+ sizeof(connect_addr)) == -1)
+ goto tidy_up_and_fail;
+
+ size = sizeof(listen_addr);
+ acceptor = accept(listener, (struct sockaddr *) &listen_addr, &size);
+ if (acceptor < 0)
+ goto tidy_up_and_fail;
+ if (size != sizeof(listen_addr))
+ goto abort_tidy_up_and_fail;
+ EVUTIL_CLOSESOCKET(listener);
+ /* Now check we are talking to ourself by matching port and host on the
+ two sockets. */
+ if (getsockname(connector, (struct sockaddr *) &connect_addr, &size) == -1)
+ goto tidy_up_and_fail;
+ if (size != sizeof (connect_addr)
+ || listen_addr.sin_family != connect_addr.sin_family
+ || listen_addr.sin_addr.s_addr != connect_addr.sin_addr.s_addr
+ || listen_addr.sin_port != connect_addr.sin_port)
+ goto abort_tidy_up_and_fail;
+ fd[0] = connector;
+ fd[1] = acceptor;
+
+ return 0;
+
+ abort_tidy_up_and_fail:
+ saved_errno = WSAECONNABORTED;
+ tidy_up_and_fail:
+ if (saved_errno < 0)
+ saved_errno = WSAGetLastError();
+ if (listener != -1)
+ EVUTIL_CLOSESOCKET(listener);
+ if (connector != -1)
+ EVUTIL_CLOSESOCKET(connector);
+ if (acceptor != -1)
+ EVUTIL_CLOSESOCKET(acceptor);
+
+ EVUTIL_SET_SOCKET_ERROR(saved_errno);
+ return -1;
+#endif
+}
+
+int
+evutil_make_socket_nonblocking(int fd)
+{
+#ifdef WIN32
+ {
+ unsigned long nonblocking = 1;
+ ioctlsocket(fd, FIONBIO, (unsigned long*) &nonblocking);
+ }
+#else
+ if (fcntl(fd, F_SETFL, O_NONBLOCK) == -1) {
+ event_warn("fcntl(O_NONBLOCK)");
+ return -1;
+}
+#endif
+ return 0;
+}
+
+ev_int64_t
+evutil_strtoll(const char *s, char **endptr, int base)
+{
+#ifdef HAVE_STRTOLL
+ return (ev_int64_t)strtoll(s, endptr, base);
+#elif SIZEOF_LONG == 8
+ return (ev_int64_t)strtol(s, endptr, base);
+#elif defined(WIN32) && defined(_MSC_VER) && _MSC_VER < 1300
+ /* XXXX on old versions of MS APIs, we only support base
+ * 10. */
+ ev_int64_t r;
+ if (base != 10)
+ return 0;
+ r = (ev_int64_t) _atoi64(s);
+ while (isspace(*s))
+ ++s;
+ while (isdigit(*s))
+ ++s;
+ if (endptr)
+ *endptr = (char*) s;
+ return r;
+#elif defined(WIN32)
+ return (ev_int64_t) _strtoi64(s, endptr, base);
+#else
+#error "I don't know how to parse 64-bit integers."
+#endif
+}
+
+#ifndef HAVE_GETTIMEOFDAY
+int
+evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+ struct _timeb tb;
+
+ if(tv == NULL)
+ return -1;
+
+ _ftime(&tb);
+ tv->tv_sec = (long) tb.time;
+ tv->tv_usec = ((int) tb.millitm) * 1000;
+ return 0;
+}
+#endif
+
+int
+evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
+{
+ int r;
+ va_list ap;
+ va_start(ap, format);
+ r = evutil_vsnprintf(buf, buflen, format, ap);
+ va_end(ap);
+ return r;
+}
+
+int
+evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap)
+{
+#ifdef _MSC_VER
+ int r = _vsnprintf(buf, buflen, format, ap);
+ buf[buflen-1] = '\0';
+ if (r >= 0)
+ return r;
+ else
+ return _vscprintf(format, ap);
+#else
+ int r = vsnprintf(buf, buflen, format, ap);
+ buf[buflen-1] = '\0';
+ return r;
+#endif
+}
diff --git a/libevent/evutil.h b/libevent/evutil.h
new file mode 100644
index 00000000000..ea751ddf7b7
--- /dev/null
+++ b/libevent/evutil.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2007 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _EVUTIL_H_
+#define _EVUTIL_H_
+
+/** @file evutil.h
+
+ Common convenience functions for cross-platform portability and
+ related socket manipulations.
+
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <config.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_STDINT_H
+#include <stdint.h>
+#elif defined(HAVE_INTTYPES_H)
+#include <inttypes.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+#include <stdarg.h>
+
+#ifdef HAVE_UINT64_T
+#define ev_uint64_t uint64_t
+#define ev_int64_t int64_t
+#elif defined(WIN32)
+#define ev_uint64_t unsigned __int64
+#define ev_int64_t signed __int64
+#elif SIZEOF_LONG_LONG == 8
+#define ev_uint64_t unsigned long long
+#define ev_int64_t long long
+#elif SIZEOF_LONG == 8
+#define ev_uint64_t unsigned long
+#define ev_int64_t long
+#else
+#error "No way to define ev_uint64_t"
+#endif
+
+#ifdef HAVE_UINT32_T
+#define ev_uint32_t uint32_t
+#elif defined(WIN32)
+#define ev_uint32_t unsigned int
+#elif SIZEOF_LONG == 4
+#define ev_uint32_t unsigned long
+#elif SIZEOF_INT == 4
+#define ev_uint32_t unsigned int
+#else
+#error "No way to define ev_uint32_t"
+#endif
+
+#ifdef HAVE_UINT16_T
+#define ev_uint16_t uint16_t
+#elif defined(WIN32)
+#define ev_uint16_t unsigned short
+#elif SIZEOF_INT == 2
+#define ev_uint16_t unsigned int
+#elif SIZEOF_SHORT == 2
+#define ev_uint16_t unsigned short
+#else
+#error "No way to define ev_uint16_t"
+#endif
+
+#ifdef HAVE_UINT8_T
+#define ev_uint8_t uint8_t
+#else
+#define ev_uint8_t unsigned char
+#endif
+
+int evutil_socketpair(int d, int type, int protocol, int sv[2]);
+int evutil_make_socket_nonblocking(int sock);
+#ifdef WIN32
+#define EVUTIL_CLOSESOCKET(s) closesocket(s)
+#else
+#define EVUTIL_CLOSESOCKET(s) close(s)
+#endif
+
+#ifdef WIN32
+#define EVUTIL_SOCKET_ERROR() WSAGetLastError()
+#define EVUTIL_SET_SOCKET_ERROR(errcode) \
+ do { WSASetLastError(errcode); } while (0)
+#else
+#define EVUTIL_SOCKET_ERROR() (errno)
+#define EVUTIL_SET_SOCKET_ERROR(errcode) \
+ do { errno = (errcode); } while (0)
+#endif
+
+/*
+ * Manipulation functions for struct timeval
+ */
+#ifdef HAVE_TIMERADD
+#define evutil_timeradd(tvp, uvp, vvp) timeradd((tvp), (uvp), (vvp))
+#define evutil_timersub(tvp, uvp, vvp) timersub((tvp), (uvp), (vvp))
+#else
+#define evutil_timeradd(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec + (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec + (uvp)->tv_usec; \
+ if ((vvp)->tv_usec >= 1000000) { \
+ (vvp)->tv_sec++; \
+ (vvp)->tv_usec -= 1000000; \
+ } \
+ } while (0)
+#define evutil_timersub(tvp, uvp, vvp) \
+ do { \
+ (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
+ (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \
+ if ((vvp)->tv_usec < 0) { \
+ (vvp)->tv_sec--; \
+ (vvp)->tv_usec += 1000000; \
+ } \
+ } while (0)
+#endif /* !HAVE_HAVE_TIMERADD */
+
+#ifdef HAVE_TIMERCLEAR
+#define evutil_timerclear(tvp) timerclear(tvp)
+#else
+#define evutil_timerclear(tvp) (tvp)->tv_sec = (tvp)->tv_usec = 0
+#endif
+
+#define evutil_timercmp(tvp, uvp, cmp) \
+ (((tvp)->tv_sec == (uvp)->tv_sec) ? \
+ ((tvp)->tv_usec cmp (uvp)->tv_usec) : \
+ ((tvp)->tv_sec cmp (uvp)->tv_sec))
+
+#ifdef HAVE_TIMERISSET
+#define evutil_timerisset(tvp) timerisset(tvp)
+#else
+#define evutil_timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_usec)
+#endif
+
+
+/* big-int related functions */
+ev_int64_t evutil_strtoll(const char *s, char **endptr, int base);
+
+
+#ifdef HAVE_GETTIMEOFDAY
+#define evutil_gettimeofday(tv, tz) gettimeofday((tv), (tz))
+#else
+int evutil_gettimeofday(struct timeval *tv, struct timezone *tz);
+#endif
+
+int evutil_snprintf(char *buf, size_t buflen, const char *format, ...)
+#ifdef __GNUC__
+ __attribute__((format(printf, 3, 4)))
+#endif
+ ;
+int evutil_vsnprintf(char *buf, size_t buflen, const char *format, va_list ap);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _EVUTIL_H_ */
diff --git a/libevent/http-internal.h b/libevent/http-internal.h
new file mode 100644
index 00000000000..9cd03cdd2bc
--- /dev/null
+++ b/libevent/http-internal.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2001 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * This header file contains definitions for dealing with HTTP requests
+ * that are internal to libevent. As user of the library, you should not
+ * need to know about these.
+ */
+
+#ifndef _HTTP_H_
+#define _HTTP_H_
+
+#define HTTP_CONNECT_TIMEOUT 45
+#define HTTP_WRITE_TIMEOUT 50
+#define HTTP_READ_TIMEOUT 50
+
+#define HTTP_PREFIX "http://"
+#define HTTP_DEFAULTPORT 80
+
+enum message_read_status {
+ ALL_DATA_READ = 1,
+ MORE_DATA_EXPECTED = 0,
+ DATA_CORRUPTED = -1,
+ REQUEST_CANCELED = -2
+};
+
+enum evhttp_connection_error {
+ EVCON_HTTP_TIMEOUT,
+ EVCON_HTTP_EOF,
+ EVCON_HTTP_INVALID_HEADER
+};
+
+struct evbuffer;
+struct addrinfo;
+struct evhttp_request;
+
+/* A stupid connection object - maybe make this a bufferevent later */
+
+enum evhttp_connection_state {
+ EVCON_DISCONNECTED, /**< not currently connected not trying either*/
+ EVCON_CONNECTING, /**< tries to currently connect */
+ EVCON_IDLE, /**< connection is established */
+ EVCON_READING_FIRSTLINE,/**< reading Request-Line (incoming conn) or
+ **< Status-Line (outgoing conn) */
+ EVCON_READING_HEADERS, /**< reading request/response headers */
+ EVCON_READING_BODY, /**< reading request/response body */
+ EVCON_READING_TRAILER, /**< reading request/response chunked trailer */
+ EVCON_WRITING /**< writing request/response headers/body */
+};
+
+struct event_base;
+
+struct evhttp_connection {
+ /* we use tailq only if they were created for an http server */
+ TAILQ_ENTRY(evhttp_connection) (next);
+
+ int fd;
+ struct event ev;
+ struct event close_ev;
+ struct evbuffer *input_buffer;
+ struct evbuffer *output_buffer;
+
+ char *bind_address; /* address to use for binding the src */
+ u_short bind_port; /* local port for binding the src */
+
+ char *address; /* address to connect to */
+ u_short port;
+
+ int flags;
+#define EVHTTP_CON_INCOMING 0x0001 /* only one request on it ever */
+#define EVHTTP_CON_OUTGOING 0x0002 /* multiple requests possible */
+#define EVHTTP_CON_CLOSEDETECT 0x0004 /* detecting if persistent close */
+
+ int timeout; /* timeout in seconds for events */
+ int retry_cnt; /* retry count */
+ int retry_max; /* maximum number of retries */
+
+ enum evhttp_connection_state state;
+
+ /* for server connections, the http server they are connected with */
+ struct evhttp *http_server;
+
+ TAILQ_HEAD(evcon_requestq, evhttp_request) requests;
+
+ void (*cb)(struct evhttp_connection *, void *);
+ void *cb_arg;
+
+ void (*closecb)(struct evhttp_connection *, void *);
+ void *closecb_arg;
+
+ struct event_base *base;
+};
+
+struct evhttp_cb {
+ TAILQ_ENTRY(evhttp_cb) next;
+
+ char *what;
+
+ void (*cb)(struct evhttp_request *req, void *);
+ void *cbarg;
+};
+
+/* both the http server as well as the rpc system need to queue connections */
+TAILQ_HEAD(evconq, evhttp_connection);
+
+/* each bound socket is stored in one of these */
+struct evhttp_bound_socket {
+ TAILQ_ENTRY(evhttp_bound_socket) (next);
+
+ struct event bind_ev;
+};
+
+struct evhttp {
+ TAILQ_HEAD(boundq, evhttp_bound_socket) sockets;
+
+ TAILQ_HEAD(httpcbq, evhttp_cb) callbacks;
+ struct evconq connections;
+
+ int timeout;
+
+ void (*gencb)(struct evhttp_request *req, void *);
+ void *gencbarg;
+
+ struct event_base *base;
+};
+
+/* resets the connection; can be reused for more requests */
+void evhttp_connection_reset(struct evhttp_connection *);
+
+/* connects if necessary */
+int evhttp_connection_connect(struct evhttp_connection *);
+
+/* notifies the current request that it failed; resets connection */
+void evhttp_connection_fail(struct evhttp_connection *,
+ enum evhttp_connection_error error);
+
+void evhttp_get_request(struct evhttp *, int, struct sockaddr *, socklen_t);
+
+int evhttp_hostportfile(char *, char **, u_short *, char **);
+
+int evhttp_parse_firstline(struct evhttp_request *, struct evbuffer*);
+int evhttp_parse_headers(struct evhttp_request *, struct evbuffer*);
+
+void evhttp_start_read(struct evhttp_connection *);
+void evhttp_make_header(struct evhttp_connection *, struct evhttp_request *);
+
+void evhttp_write_buffer(struct evhttp_connection *,
+ void (*)(struct evhttp_connection *, void *), void *);
+
+/* response sending HTML the data in the buffer */
+void evhttp_response_code(struct evhttp_request *, int, const char *);
+void evhttp_send_page(struct evhttp_request *, struct evbuffer *);
+
+#endif /* _HTTP_H */
diff --git a/libevent/http.c b/libevent/http.c
new file mode 100644
index 00000000000..871bc2e4d0c
--- /dev/null
+++ b/libevent/http.c
@@ -0,0 +1,2830 @@
+/*
+ * Copyright (c) 2002-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef HAVE_SYS_PARAM_H
+#include <sys/param.h>
+#endif
+#ifdef HAVE_SYS_TYPES_H
+#include <sys/types.h>
+#endif
+
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#ifdef HAVE_SYS_IOCCOM_H
+#include <sys/ioccom.h>
+#endif
+
+#ifndef WIN32
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+#endif
+
+#include <sys/queue.h>
+
+#ifndef HAVE_TAILQFOREACH
+#include <event-internal.h>
+#endif
+
+#ifndef WIN32
+#include <netinet/in.h>
+#include <netdb.h>
+#endif
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef WIN32
+#include <syslog.h>
+#endif
+#include <signal.h>
+#include <time.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+
+#undef timeout_pending
+#undef timeout_initialized
+
+#include "strlcpy-internal.h"
+#include "event.h"
+#include "evhttp.h"
+#include "evutil.h"
+#include "log.h"
+#include "http-internal.h"
+
+#ifdef WIN32
+#define strcasecmp _stricmp
+#define strncasecmp _strnicmp
+#define strdup _strdup
+#endif
+
+#ifndef HAVE_GETNAMEINFO
+#define NI_MAXSERV 32
+#define NI_MAXHOST 1025
+
+#define NI_NUMERICHOST 1
+#define NI_NUMERICSERV 2
+
+static int
+fake_getnameinfo(const struct sockaddr *sa, size_t salen, char *host,
+ size_t hostlen, char *serv, size_t servlen, int flags)
+{
+ struct sockaddr_in *sin = (struct sockaddr_in *)sa;
+
+ if (serv != NULL) {
+ char tmpserv[16];
+ evutil_snprintf(tmpserv, sizeof(tmpserv),
+ "%d", ntohs(sin->sin_port));
+ if (strlcpy(serv, tmpserv, servlen) >= servlen)
+ return (-1);
+ }
+
+ if (host != NULL) {
+ if (flags & NI_NUMERICHOST) {
+ if (strlcpy(host, inet_ntoa(sin->sin_addr),
+ hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ } else {
+ struct hostent *hp;
+ hp = gethostbyaddr((char *)&sin->sin_addr,
+ sizeof(struct in_addr), AF_INET);
+ if (hp == NULL)
+ return (-2);
+
+ if (strlcpy(host, hp->h_name, hostlen) >= hostlen)
+ return (-1);
+ else
+ return (0);
+ }
+ }
+ return (0);
+}
+
+#endif
+
+#ifndef HAVE_GETADDRINFO
+struct addrinfo {
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ size_t ai_addrlen;
+ struct sockaddr *ai_addr;
+ struct addrinfo *ai_next;
+};
+static int
+fake_getaddrinfo(const char *hostname, struct addrinfo *ai)
+{
+ struct hostent *he = NULL;
+ struct sockaddr_in *sa;
+ if (hostname) {
+ he = gethostbyname(hostname);
+ if (!he)
+ return (-1);
+ }
+ ai->ai_family = he ? he->h_addrtype : AF_INET;
+ ai->ai_socktype = SOCK_STREAM;
+ ai->ai_protocol = 0;
+ ai->ai_addrlen = sizeof(struct sockaddr_in);
+ if (NULL == (ai->ai_addr = malloc(ai->ai_addrlen)))
+ return (-1);
+ sa = (struct sockaddr_in*)ai->ai_addr;
+ memset(sa, 0, ai->ai_addrlen);
+ if (he) {
+ sa->sin_family = he->h_addrtype;
+ memcpy(&sa->sin_addr, he->h_addr_list[0], he->h_length);
+ } else {
+ sa->sin_family = AF_INET;
+ sa->sin_addr.s_addr = INADDR_ANY;
+ }
+ ai->ai_next = NULL;
+ return (0);
+}
+static void
+fake_freeaddrinfo(struct addrinfo *ai)
+{
+ free(ai->ai_addr);
+}
+#endif
+
+#ifndef MIN
+#define MIN(a,b) (((a)<(b))?(a):(b))
+#endif
+
+/* wrapper for setting the base from the http server */
+#define EVHTTP_BASE_SET(x, y) do { \
+ if ((x)->base != NULL) event_base_set((x)->base, y); \
+} while (0)
+
+extern int debug;
+
+static int socket_connect(int fd, const char *address, unsigned short port);
+static int bind_socket_ai(struct addrinfo *, int reuse);
+static int bind_socket(const char *, u_short, int reuse);
+static void name_from_addr(struct sockaddr *, socklen_t, char **, char **);
+static int evhttp_associate_new_request_with_connection(
+ struct evhttp_connection *evcon);
+static void evhttp_connection_start_detectclose(
+ struct evhttp_connection *evcon);
+static void evhttp_connection_stop_detectclose(
+ struct evhttp_connection *evcon);
+static void evhttp_request_dispatch(struct evhttp_connection* evcon);
+static void evhttp_read_firstline(struct evhttp_connection *evcon,
+ struct evhttp_request *req);
+static void evhttp_read_header(struct evhttp_connection *evcon,
+ struct evhttp_request *req);
+static int evhttp_add_header_internal(struct evkeyvalq *headers,
+ const char *key, const char *value);
+static int evhttp_decode_uri_internal(const char *uri, size_t length,
+ char *ret, int always_decode_plus);
+
+void evhttp_read(int, short, void *);
+void evhttp_write(int, short, void *);
+
+#ifndef HAVE_STRSEP
+/* strsep replacement for platforms that lack it. Only works if
+ * del is one character long. */
+static char *
+strsep(char **s, const char *del)
+{
+ char *d, *tok;
+ assert(strlen(del) == 1);
+ if (!s || !*s)
+ return NULL;
+ tok = *s;
+ d = strstr(tok, del);
+ if (d) {
+ *d = '\0';
+ *s = d + 1;
+ } else
+ *s = NULL;
+ return tok;
+}
+#endif
+
+static const char *
+html_replace(char ch, char *buf)
+{
+ switch (ch) {
+ case '<':
+ return "&lt;";
+ case '>':
+ return "&gt;";
+ case '"':
+ return "&quot;";
+ case '\'':
+ return "&#039;";
+ case '&':
+ return "&amp;";
+ default:
+ break;
+ }
+
+ /* Echo the character back */
+ buf[0] = ch;
+ buf[1] = '\0';
+
+ return buf;
+}
+
+/*
+ * Replaces <, >, ", ' and & with &lt;, &gt;, &quot;,
+ * &#039; and &amp; correspondingly.
+ *
+ * The returned string needs to be freed by the caller.
+ */
+
+char *
+evhttp_htmlescape(const char *html)
+{
+ int i, new_size = 0, old_size = strlen(html);
+ char *escaped_html, *p;
+ char scratch_space[2];
+
+ for (i = 0; i < old_size; ++i)
+ new_size += strlen(html_replace(html[i], scratch_space));
+
+ p = escaped_html = malloc(new_size + 1);
+ if (escaped_html == NULL)
+ event_err(1, "%s: malloc(%d)", __func__, new_size + 1);
+ for (i = 0; i < old_size; ++i) {
+ const char *replaced = html_replace(html[i], scratch_space);
+ /* this is length checked */
+ strcpy(p, replaced);
+ p += strlen(replaced);
+ }
+
+ *p = '\0';
+
+ return (escaped_html);
+}
+
+static const char *
+evhttp_method(enum evhttp_cmd_type type)
+{
+ const char *method;
+
+ switch (type) {
+ case EVHTTP_REQ_GET:
+ method = "GET";
+ break;
+ case EVHTTP_REQ_POST:
+ method = "POST";
+ break;
+ case EVHTTP_REQ_HEAD:
+ method = "HEAD";
+ break;
+ default:
+ method = NULL;
+ break;
+ }
+
+ return (method);
+}
+
+static void
+evhttp_add_event(struct event *ev, int timeout, int default_timeout)
+{
+ if (timeout != 0) {
+ struct timeval tv;
+
+ evutil_timerclear(&tv);
+ tv.tv_sec = timeout != -1 ? timeout : default_timeout;
+ event_add(ev, &tv);
+ } else {
+ event_add(ev, NULL);
+ }
+}
+
+void
+evhttp_write_buffer(struct evhttp_connection *evcon,
+ void (*cb)(struct evhttp_connection *, void *), void *arg)
+{
+ event_debug(("%s: preparing to write buffer\n", __func__));
+
+ /* Set call back */
+ evcon->cb = cb;
+ evcon->cb_arg = arg;
+
+ /* check if the event is already pending */
+ if (event_pending(&evcon->ev, EV_WRITE|EV_TIMEOUT, NULL))
+ event_del(&evcon->ev);
+
+ event_set(&evcon->ev, evcon->fd, EV_WRITE, evhttp_write, evcon);
+ EVHTTP_BASE_SET(evcon, &evcon->ev);
+ evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_WRITE_TIMEOUT);
+}
+
+static int
+evhttp_connected(struct evhttp_connection *evcon)
+{
+ switch (evcon->state) {
+ case EVCON_DISCONNECTED:
+ case EVCON_CONNECTING:
+ return (0);
+ case EVCON_IDLE:
+ case EVCON_READING_FIRSTLINE:
+ case EVCON_READING_HEADERS:
+ case EVCON_READING_BODY:
+ case EVCON_READING_TRAILER:
+ case EVCON_WRITING:
+ default:
+ return (1);
+ }
+}
+
+/*
+ * Create the headers needed for an HTTP request
+ */
+static void
+evhttp_make_header_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ const char *method;
+
+ evhttp_remove_header(req->output_headers, "Proxy-Connection");
+
+ /* Generate request line */
+ method = evhttp_method(req->type);
+ evbuffer_add_printf(evcon->output_buffer, "%s %s HTTP/%d.%d\r\n",
+ method, req->uri, req->major, req->minor);
+
+ /* Add the content length on a post request if missing */
+ if (req->type == EVHTTP_REQ_POST &&
+ evhttp_find_header(req->output_headers, "Content-Length") == NULL){
+ char size[12];
+ evutil_snprintf(size, sizeof(size), "%ld",
+ (long)EVBUFFER_LENGTH(req->output_buffer));
+ evhttp_add_header(req->output_headers, "Content-Length", size);
+ }
+}
+
+static int
+evhttp_is_connection_close(int flags, struct evkeyvalq* headers)
+{
+ if (flags & EVHTTP_PROXY_REQUEST) {
+ /* proxy connection */
+ const char *connection = evhttp_find_header(headers, "Proxy-Connection");
+ return (connection == NULL || strcasecmp(connection, "keep-alive") != 0);
+ } else {
+ const char *connection = evhttp_find_header(headers, "Connection");
+ return (connection != NULL && strcasecmp(connection, "close") == 0);
+ }
+}
+
+static int
+evhttp_is_connection_keepalive(struct evkeyvalq* headers)
+{
+ const char *connection = evhttp_find_header(headers, "Connection");
+ return (connection != NULL
+ && strncasecmp(connection, "keep-alive", 10) == 0);
+}
+
+static void
+evhttp_maybe_add_date_header(struct evkeyvalq *headers)
+{
+ if (evhttp_find_header(headers, "Date") == NULL) {
+ char date[50];
+#ifndef WIN32
+ struct tm cur;
+#endif
+ struct tm *cur_p;
+ time_t t = time(NULL);
+#ifdef WIN32
+ cur_p = gmtime(&t);
+#else
+ gmtime_r(&t, &cur);
+ cur_p = &cur;
+#endif
+ if (strftime(date, sizeof(date),
+ "%a, %d %b %Y %H:%M:%S GMT", cur_p) != 0) {
+ evhttp_add_header(headers, "Date", date);
+ }
+ }
+}
+
+static void
+evhttp_maybe_add_content_length_header(struct evkeyvalq *headers,
+ long content_length)
+{
+ if (evhttp_find_header(headers, "Transfer-Encoding") == NULL &&
+ evhttp_find_header(headers, "Content-Length") == NULL) {
+ char len[12];
+ evutil_snprintf(len, sizeof(len), "%ld", content_length);
+ evhttp_add_header(headers, "Content-Length", len);
+ }
+}
+
+/*
+ * Create the headers needed for an HTTP reply
+ */
+
+static void
+evhttp_make_header_response(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ int is_keepalive = evhttp_is_connection_keepalive(req->input_headers);
+ evbuffer_add_printf(evcon->output_buffer, "HTTP/%d.%d %d %s\r\n",
+ req->major, req->minor, req->response_code,
+ req->response_code_line);
+
+ if (req->major == 1) {
+ if (req->minor == 1)
+ evhttp_maybe_add_date_header(req->output_headers);
+
+ /*
+ * if the protocol is 1.0; and the connection was keep-alive
+ * we need to add a keep-alive header, too.
+ */
+ if (req->minor == 0 && is_keepalive)
+ evhttp_add_header(req->output_headers,
+ "Connection", "keep-alive");
+
+ if (req->minor == 1 || is_keepalive) {
+ /*
+ * we need to add the content length if the
+ * user did not give it, this is required for
+ * persistent connections to work.
+ */
+ evhttp_maybe_add_content_length_header(
+ req->output_headers,
+ (long)EVBUFFER_LENGTH(req->output_buffer));
+ }
+ }
+
+ /* Potentially add headers for unidentified content. */
+ if (EVBUFFER_LENGTH(req->output_buffer)) {
+ if (evhttp_find_header(req->output_headers,
+ "Content-Type") == NULL) {
+ evhttp_add_header(req->output_headers,
+ "Content-Type", "text/html; charset=ISO-8859-1");
+ }
+ }
+
+ /* if the request asked for a close, we send a close, too */
+ if (evhttp_is_connection_close(req->flags, req->input_headers)) {
+ evhttp_remove_header(req->output_headers, "Connection");
+ if (!(req->flags & EVHTTP_PROXY_REQUEST))
+ evhttp_add_header(req->output_headers, "Connection", "close");
+ evhttp_remove_header(req->output_headers, "Proxy-Connection");
+ }
+}
+
+void
+evhttp_make_header(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evkeyval *header;
+
+ /*
+ * Depending if this is a HTTP request or response, we might need to
+ * add some new headers or remove existing headers.
+ */
+ if (req->kind == EVHTTP_REQUEST) {
+ evhttp_make_header_request(evcon, req);
+ } else {
+ evhttp_make_header_response(evcon, req);
+ }
+
+ TAILQ_FOREACH(header, req->output_headers, next) {
+ evbuffer_add_printf(evcon->output_buffer, "%s: %s\r\n",
+ header->key, header->value);
+ }
+ evbuffer_add(evcon->output_buffer, "\r\n", 2);
+
+ if (EVBUFFER_LENGTH(req->output_buffer) > 0) {
+ /*
+ * For a request, we add the POST data, for a reply, this
+ * is the regular data.
+ */
+ evbuffer_add_buffer(evcon->output_buffer, req->output_buffer);
+ }
+}
+
+/* Separated host, port and file from URI */
+
+int
+evhttp_hostportfile(char *url, char **phost, u_short *pport, char **pfile)
+{
+ /* XXX not threadsafe. */
+ static char host[1024];
+ static char file[1024];
+ char *p;
+ const char *p2;
+ int len;
+ u_short port;
+
+ len = strlen(HTTP_PREFIX);
+ if (strncasecmp(url, HTTP_PREFIX, len))
+ return (-1);
+
+ url += len;
+
+ /* We might overrun */
+ if (strlcpy(host, url, sizeof (host)) >= sizeof(host))
+ return (-1);
+
+ p = strchr(host, '/');
+ if (p != NULL) {
+ *p = '\0';
+ p2 = p + 1;
+ } else
+ p2 = NULL;
+
+ if (pfile != NULL) {
+ /* Generate request file */
+ if (p2 == NULL)
+ p2 = "";
+ evutil_snprintf(file, sizeof(file), "/%s", p2);
+ }
+
+ p = strchr(host, ':');
+ if (p != NULL) {
+ *p = '\0';
+ port = atoi(p + 1);
+
+ if (port == 0)
+ return (-1);
+ } else
+ port = HTTP_DEFAULTPORT;
+
+ if (phost != NULL)
+ *phost = host;
+ if (pport != NULL)
+ *pport = port;
+ if (pfile != NULL)
+ *pfile = file;
+
+ return (0);
+}
+
+static int
+evhttp_connection_incoming_fail(struct evhttp_request *req,
+ enum evhttp_connection_error error)
+{
+ switch (error) {
+ case EVCON_HTTP_TIMEOUT:
+ case EVCON_HTTP_EOF:
+ /*
+ * these are cases in which we probably should just
+ * close the connection and not send a reply. this
+ * case may happen when a browser keeps a persistent
+ * connection open and we timeout on the read.
+ */
+ return (-1);
+ case EVCON_HTTP_INVALID_HEADER:
+ default: /* xxx: probably should just error on default */
+ /* the callback looks at the uri to determine errors */
+ if (req->uri) {
+ free(req->uri);
+ req->uri = NULL;
+ }
+
+ /*
+ * the callback needs to send a reply, once the reply has
+ * been send, the connection should get freed.
+ */
+ (*req->cb)(req, req->cb_arg);
+ }
+
+ return (0);
+}
+
+void
+evhttp_connection_fail(struct evhttp_connection *evcon,
+ enum evhttp_connection_error error)
+{
+ struct evhttp_request* req = TAILQ_FIRST(&evcon->requests);
+ void (*cb)(struct evhttp_request *, void *);
+ void *cb_arg;
+ assert(req != NULL);
+
+ if (evcon->flags & EVHTTP_CON_INCOMING) {
+ /*
+ * for incoming requests, there are two different
+ * failure cases. it's either a network level error
+ * or an http layer error. for problems on the network
+ * layer like timeouts we just drop the connections.
+ * For HTTP problems, we might have to send back a
+ * reply before the connection can be freed.
+ */
+ if (evhttp_connection_incoming_fail(req, error) == -1)
+ evhttp_connection_free(evcon);
+ return;
+ }
+
+ /* save the callback for later; the cb might free our object */
+ cb = req->cb;
+ cb_arg = req->cb_arg;
+
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ evhttp_request_free(req);
+
+ /* xxx: maybe we should fail all requests??? */
+
+ /* reset the connection */
+ evhttp_connection_reset(evcon);
+
+ /* We are trying the next request that was queued on us */
+ if (TAILQ_FIRST(&evcon->requests) != NULL)
+ evhttp_connection_connect(evcon);
+
+ /* inform the user */
+ if (cb != NULL)
+ (*cb)(NULL, cb_arg);
+}
+
+void
+evhttp_write(int fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ int n;
+
+ if (what == EV_TIMEOUT) {
+ evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
+ return;
+ }
+
+ n = evbuffer_write(evcon->output_buffer, fd);
+ if (n == -1) {
+ event_debug(("%s: evbuffer_write", __func__));
+ evhttp_connection_fail(evcon, EVCON_HTTP_EOF);
+ return;
+ }
+
+ if (n == 0) {
+ event_debug(("%s: write nothing", __func__));
+ evhttp_connection_fail(evcon, EVCON_HTTP_EOF);
+ return;
+ }
+
+ if (EVBUFFER_LENGTH(evcon->output_buffer) != 0) {
+ evhttp_add_event(&evcon->ev,
+ evcon->timeout, HTTP_WRITE_TIMEOUT);
+ return;
+ }
+
+ /* Activate our call back */
+ if (evcon->cb != NULL)
+ (*evcon->cb)(evcon, evcon->cb_arg);
+}
+
+/**
+ * Advance the connection state.
+ * - If this is an outgoing connection, we've just processed the response;
+ * idle or close the connection.
+ * - If this is an incoming connection, we've just processed the request;
+ * respond.
+ */
+static void
+evhttp_connection_done(struct evhttp_connection *evcon)
+{
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ int con_outgoing = evcon->flags & EVHTTP_CON_OUTGOING;
+
+ if (con_outgoing) {
+ /* idle or close the connection */
+ int need_close;
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ req->evcon = NULL;
+
+ evcon->state = EVCON_IDLE;
+
+ need_close =
+ evhttp_is_connection_close(req->flags, req->input_headers)||
+ evhttp_is_connection_close(req->flags, req->output_headers);
+
+ /* check if we got asked to close the connection */
+ if (need_close)
+ evhttp_connection_reset(evcon);
+
+ if (TAILQ_FIRST(&evcon->requests) != NULL) {
+ /*
+ * We have more requests; reset the connection
+ * and deal with the next request.
+ */
+ if (!evhttp_connected(evcon))
+ evhttp_connection_connect(evcon);
+ else
+ evhttp_request_dispatch(evcon);
+ } else if (!need_close) {
+ /*
+ * The connection is going to be persistent, but we
+ * need to detect if the other side closes it.
+ */
+ evhttp_connection_start_detectclose(evcon);
+ }
+ } else {
+ /*
+ * incoming connection - we need to leave the request on the
+ * connection so that we can reply to it.
+ */
+ evcon->state = EVCON_WRITING;
+ }
+
+ /* notify the user of the request */
+ (*req->cb)(req, req->cb_arg);
+
+ /* if this was an outgoing request, we own and it's done. so free it */
+ if (con_outgoing) {
+ evhttp_request_free(req);
+ }
+}
+
+/*
+ * Handles reading from a chunked request.
+ * return ALL_DATA_READ:
+ * all data has been read
+ * return MORE_DATA_EXPECTED:
+ * more data is expected
+ * return DATA_CORRUPTED:
+ * data is corrupted
+ * return REQUEST_CANCLED:
+ * request was canceled by the user calling evhttp_cancel_request
+ */
+
+static enum message_read_status
+evhttp_handle_chunked_read(struct evhttp_request *req, struct evbuffer *buf)
+{
+ int len;
+
+ while ((len = EVBUFFER_LENGTH(buf)) > 0) {
+ if (req->ntoread < 0) {
+ /* Read chunk size */
+ ev_int64_t ntoread;
+ char *p = evbuffer_readline(buf);
+ char *endp;
+ int error;
+ if (p == NULL)
+ break;
+ /* the last chunk is on a new line? */
+ if (strlen(p) == 0) {
+ free(p);
+ continue;
+ }
+ ntoread = evutil_strtoll(p, &endp, 16);
+ error = (*p == '\0' ||
+ (*endp != '\0' && *endp != ' ') ||
+ ntoread < 0);
+ free(p);
+ if (error) {
+ /* could not get chunk size */
+ return (DATA_CORRUPTED);
+ }
+ req->ntoread = ntoread;
+ if (req->ntoread == 0) {
+ /* Last chunk */
+ return (ALL_DATA_READ);
+ }
+ continue;
+ }
+
+ /* don't have enough to complete a chunk; wait for more */
+ if (len < req->ntoread)
+ return (MORE_DATA_EXPECTED);
+
+ /* Completed chunk */
+ evbuffer_add(req->input_buffer,
+ EVBUFFER_DATA(buf), (size_t)req->ntoread);
+ evbuffer_drain(buf, (size_t)req->ntoread);
+ req->ntoread = -1;
+ if (req->chunk_cb != NULL) {
+ (*req->chunk_cb)(req, req->cb_arg);
+ evbuffer_drain(req->input_buffer,
+ EVBUFFER_LENGTH(req->input_buffer));
+ }
+ }
+
+ return (MORE_DATA_EXPECTED);
+}
+
+static void
+evhttp_read_trailer(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evbuffer *buf = evcon->input_buffer;
+
+ switch (evhttp_parse_headers(req, buf)) {
+ case DATA_CORRUPTED:
+ evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+ break;
+ case ALL_DATA_READ:
+ event_del(&evcon->ev);
+ evhttp_connection_done(evcon);
+ break;
+ case MORE_DATA_EXPECTED:
+ default:
+ evhttp_add_event(&evcon->ev, evcon->timeout,
+ HTTP_READ_TIMEOUT);
+ break;
+ }
+}
+
+static void
+evhttp_read_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ struct evbuffer *buf = evcon->input_buffer;
+
+ if (req->chunked) {
+ switch (evhttp_handle_chunked_read(req, buf)) {
+ case ALL_DATA_READ:
+ /* finished last chunk */
+ evcon->state = EVCON_READING_TRAILER;
+ evhttp_read_trailer(evcon, req);
+ return;
+ case DATA_CORRUPTED:
+ /* corrupted data */
+ evhttp_connection_fail(evcon,
+ EVCON_HTTP_INVALID_HEADER);
+ return;
+ case REQUEST_CANCELED:
+ /* request canceled */
+ evhttp_request_free(req);
+ return;
+ case MORE_DATA_EXPECTED:
+ default:
+ break;
+ }
+ } else if (req->ntoread < 0) {
+ /* Read until connection close. */
+ evbuffer_add_buffer(req->input_buffer, buf);
+ } else if (EVBUFFER_LENGTH(buf) >= req->ntoread) {
+ /* Completed content length */
+ evbuffer_add(req->input_buffer, EVBUFFER_DATA(buf),
+ (size_t)req->ntoread);
+ evbuffer_drain(buf, (size_t)req->ntoread);
+ req->ntoread = 0;
+ evhttp_connection_done(evcon);
+ return;
+ }
+ /* Read more! */
+ event_set(&evcon->ev, evcon->fd, EV_READ, evhttp_read, evcon);
+ EVHTTP_BASE_SET(evcon, &evcon->ev);
+ evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_READ_TIMEOUT);
+}
+
+/*
+ * Reads data into a buffer structure until no more data
+ * can be read on the file descriptor or we have read all
+ * the data that we wanted to read.
+ * Execute callback when done.
+ */
+
+void
+evhttp_read(int fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ struct evbuffer *buf = evcon->input_buffer;
+ int n, len;
+
+ if (what == EV_TIMEOUT) {
+ evhttp_connection_fail(evcon, EVCON_HTTP_TIMEOUT);
+ return;
+ }
+ n = evbuffer_read(buf, fd, -1);
+ len = EVBUFFER_LENGTH(buf);
+ event_debug(("%s: got %d on %d\n", __func__, n, fd));
+
+ if (n == -1) {
+ if (errno != EINTR && errno != EAGAIN) {
+ event_debug(("%s: evbuffer_read", __func__));
+ evhttp_connection_fail(evcon, EVCON_HTTP_EOF);
+ } else {
+ evhttp_add_event(&evcon->ev, evcon->timeout,
+ HTTP_READ_TIMEOUT);
+ }
+ return;
+ } else if (n == 0) {
+ /* Connection closed */
+ evhttp_connection_done(evcon);
+ return;
+ }
+
+ switch (evcon->state) {
+ case EVCON_READING_FIRSTLINE:
+ evhttp_read_firstline(evcon, req);
+ break;
+ case EVCON_READING_HEADERS:
+ evhttp_read_header(evcon, req);
+ break;
+ case EVCON_READING_BODY:
+ evhttp_read_body(evcon, req);
+ break;
+ case EVCON_READING_TRAILER:
+ evhttp_read_trailer(evcon, req);
+ break;
+ case EVCON_DISCONNECTED:
+ case EVCON_CONNECTING:
+ case EVCON_IDLE:
+ case EVCON_WRITING:
+ default:
+ event_errx(1, "%s: illegal connection state %d",
+ __func__, evcon->state);
+ }
+}
+
+static void
+evhttp_write_connectioncb(struct evhttp_connection *evcon, void *arg)
+{
+ /* This is after writing the request to the server */
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ assert(req != NULL);
+
+ assert(evcon->state == EVCON_WRITING);
+
+ /* We are done writing our header and are now expecting the response */
+ req->kind = EVHTTP_RESPONSE;
+
+ evhttp_start_read(evcon);
+}
+
+/*
+ * Clean up a connection object
+ */
+
+void
+evhttp_connection_free(struct evhttp_connection *evcon)
+{
+ struct evhttp_request *req;
+
+ /* notify interested parties that this connection is going down */
+ if (evcon->fd != -1) {
+ if (evhttp_connected(evcon) && evcon->closecb != NULL)
+ (*evcon->closecb)(evcon, evcon->closecb_arg);
+ }
+
+ /* remove all requests that might be queued on this connection */
+ while ((req = TAILQ_FIRST(&evcon->requests)) != NULL) {
+ TAILQ_REMOVE(&evcon->requests, req, next);
+ evhttp_request_free(req);
+ }
+
+ if (evcon->http_server != NULL) {
+ struct evhttp *http = evcon->http_server;
+ TAILQ_REMOVE(&http->connections, evcon, next);
+ }
+
+ if (event_initialized(&evcon->close_ev))
+ event_del(&evcon->close_ev);
+
+ if (event_initialized(&evcon->ev))
+ event_del(&evcon->ev);
+
+ if (evcon->fd != -1)
+ EVUTIL_CLOSESOCKET(evcon->fd);
+
+ if (evcon->bind_address != NULL)
+ free(evcon->bind_address);
+
+ if (evcon->address != NULL)
+ free(evcon->address);
+
+ if (evcon->input_buffer != NULL)
+ evbuffer_free(evcon->input_buffer);
+
+ if (evcon->output_buffer != NULL)
+ evbuffer_free(evcon->output_buffer);
+
+ free(evcon);
+}
+
+void
+evhttp_connection_set_local_address(struct evhttp_connection *evcon,
+ const char *address)
+{
+ assert(evcon->state == EVCON_DISCONNECTED);
+ if (evcon->bind_address)
+ free(evcon->bind_address);
+ if ((evcon->bind_address = strdup(address)) == NULL)
+ event_err(1, "%s: strdup", __func__);
+}
+
+void
+evhttp_connection_set_local_port(struct evhttp_connection *evcon,
+ unsigned short port)
+{
+ assert(evcon->state == EVCON_DISCONNECTED);
+ evcon->bind_port = port;
+}
+
+static void
+evhttp_request_dispatch(struct evhttp_connection* evcon)
+{
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+
+ /* this should not usually happy but it's possible */
+ if (req == NULL)
+ return;
+
+ /* delete possible close detection events */
+ evhttp_connection_stop_detectclose(evcon);
+
+ /* we assume that the connection is connected already */
+ assert(evcon->state == EVCON_IDLE);
+
+ evcon->state = EVCON_WRITING;
+
+ /* Create the header from the store arguments */
+ evhttp_make_header(evcon, req);
+
+ evhttp_write_buffer(evcon, evhttp_write_connectioncb, NULL);
+}
+
+/* Reset our connection state */
+void
+evhttp_connection_reset(struct evhttp_connection *evcon)
+{
+ if (event_initialized(&evcon->ev))
+ event_del(&evcon->ev);
+
+ if (evcon->fd != -1) {
+ /* inform interested parties about connection close */
+ if (evhttp_connected(evcon) && evcon->closecb != NULL)
+ (*evcon->closecb)(evcon, evcon->closecb_arg);
+
+ EVUTIL_CLOSESOCKET(evcon->fd);
+ evcon->fd = -1;
+ }
+ evcon->state = EVCON_DISCONNECTED;
+
+ evbuffer_drain(evcon->input_buffer,
+ EVBUFFER_LENGTH(evcon->input_buffer));
+ evbuffer_drain(evcon->output_buffer,
+ EVBUFFER_LENGTH(evcon->output_buffer));
+}
+
+static void
+evhttp_detect_close_cb(int fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ evhttp_connection_reset(evcon);
+}
+
+static void
+evhttp_connection_start_detectclose(struct evhttp_connection *evcon)
+{
+ evcon->flags |= EVHTTP_CON_CLOSEDETECT;
+
+ if (event_initialized(&evcon->close_ev))
+ event_del(&evcon->close_ev);
+ event_set(&evcon->close_ev, evcon->fd, EV_READ,
+ evhttp_detect_close_cb, evcon);
+ EVHTTP_BASE_SET(evcon, &evcon->close_ev);
+ event_add(&evcon->close_ev, NULL);
+}
+
+static void
+evhttp_connection_stop_detectclose(struct evhttp_connection *evcon)
+{
+ evcon->flags &= ~EVHTTP_CON_CLOSEDETECT;
+ event_del(&evcon->close_ev);
+}
+
+static void
+evhttp_connection_retry(int fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+
+ evcon->state = EVCON_DISCONNECTED;
+ evhttp_connection_connect(evcon);
+}
+
+/*
+ * Call back for asynchronous connection attempt.
+ */
+
+static void
+evhttp_connectioncb(int fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ int error;
+ socklen_t errsz = sizeof(error);
+
+ if (what == EV_TIMEOUT) {
+ event_debug(("%s: connection timeout for \"%s:%d\" on %d",
+ __func__, evcon->address, evcon->port, evcon->fd));
+ goto cleanup;
+ }
+
+ /* Check if the connection completed */
+ if (getsockopt(evcon->fd, SOL_SOCKET, SO_ERROR, (void*)&error,
+ &errsz) == -1) {
+ event_debug(("%s: getsockopt for \"%s:%d\" on %d",
+ __func__, evcon->address, evcon->port, evcon->fd));
+ goto cleanup;
+ }
+
+ if (error) {
+ event_debug(("%s: connect failed for \"%s:%d\" on %d: %s",
+ __func__, evcon->address, evcon->port, evcon->fd,
+ strerror(error)));
+ goto cleanup;
+ }
+
+ /* We are connected to the server now */
+ event_debug(("%s: connected to \"%s:%d\" on %d\n",
+ __func__, evcon->address, evcon->port, evcon->fd));
+
+ /* Reset the retry count as we were successful in connecting */
+ evcon->retry_cnt = 0;
+ evcon->state = EVCON_IDLE;
+
+ /* try to start requests that have queued up on this connection */
+ evhttp_request_dispatch(evcon);
+ return;
+
+ cleanup:
+ if (evcon->retry_max < 0 || evcon->retry_cnt < evcon->retry_max) {
+ evtimer_set(&evcon->ev, evhttp_connection_retry, evcon);
+ EVHTTP_BASE_SET(evcon, &evcon->ev);
+ evhttp_add_event(&evcon->ev, MIN(3600, 2 << evcon->retry_cnt),
+ HTTP_CONNECT_TIMEOUT);
+ evcon->retry_cnt++;
+ return;
+ }
+ evhttp_connection_reset(evcon);
+
+ /* for now, we just signal all requests by executing their callbacks */
+ while (TAILQ_FIRST(&evcon->requests) != NULL) {
+ struct evhttp_request *request = TAILQ_FIRST(&evcon->requests);
+ TAILQ_REMOVE(&evcon->requests, request, next);
+ request->evcon = NULL;
+
+ /* we might want to set an error here */
+ request->cb(request, request->cb_arg);
+ evhttp_request_free(request);
+ }
+}
+
+/*
+ * Check if we got a valid response code.
+ */
+
+static int
+evhttp_valid_response_code(int code)
+{
+ if (code == 0)
+ return (0);
+
+ return (1);
+}
+
+/* Parses the status line of a web server */
+
+static int
+evhttp_parse_response_line(struct evhttp_request *req, char *line)
+{
+ char *protocol;
+ char *number;
+ char *readable;
+
+ protocol = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ number = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ readable = line;
+
+ if (strcmp(protocol, "HTTP/1.0") == 0) {
+ req->major = 1;
+ req->minor = 0;
+ } else if (strcmp(protocol, "HTTP/1.1") == 0) {
+ req->major = 1;
+ req->minor = 1;
+ } else {
+ event_debug(("%s: bad protocol \"%s\"",
+ __func__, protocol));
+ return (-1);
+ }
+
+ req->response_code = atoi(number);
+ if (!evhttp_valid_response_code(req->response_code)) {
+ event_debug(("%s: bad response code \"%s\"",
+ __func__, number));
+ return (-1);
+ }
+
+ if ((req->response_code_line = strdup(readable)) == NULL)
+ event_err(1, "%s: strdup", __func__);
+
+ return (0);
+}
+
+/* Parse the first line of a HTTP request */
+
+static int
+evhttp_parse_request_line(struct evhttp_request *req, char *line)
+{
+ char *method;
+ char *uri;
+ char *version;
+
+ /* Parse the request line */
+ method = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ uri = strsep(&line, " ");
+ if (line == NULL)
+ return (-1);
+ version = strsep(&line, " ");
+ if (line != NULL)
+ return (-1);
+
+ /* First line */
+ if (strcmp(method, "GET") == 0) {
+ req->type = EVHTTP_REQ_GET;
+ } else if (strcmp(method, "POST") == 0) {
+ req->type = EVHTTP_REQ_POST;
+ } else if (strcmp(method, "HEAD") == 0) {
+ req->type = EVHTTP_REQ_HEAD;
+ } else {
+ event_debug(("%s: bad method %s on request %p from %s",
+ __func__, method, req, req->remote_host));
+ return (-1);
+ }
+
+ if (strcmp(version, "HTTP/1.0") == 0) {
+ req->major = 1;
+ req->minor = 0;
+ } else if (strcmp(version, "HTTP/1.1") == 0) {
+ req->major = 1;
+ req->minor = 1;
+ } else {
+ event_debug(("%s: bad version %s on request %p from %s",
+ __func__, version, req, req->remote_host));
+ return (-1);
+ }
+
+ if ((req->uri = strdup(uri)) == NULL) {
+ event_debug(("%s: evhttp_decode_uri", __func__));
+ return (-1);
+ }
+
+ /* determine if it's a proxy request */
+ if (strlen(req->uri) > 0 && req->uri[0] != '/')
+ req->flags |= EVHTTP_PROXY_REQUEST;
+
+ return (0);
+}
+
+const char *
+evhttp_find_header(const struct evkeyvalq *headers, const char *key)
+{
+ struct evkeyval *header;
+
+ TAILQ_FOREACH(header, headers, next) {
+ if (strcasecmp(header->key, key) == 0)
+ return (header->value);
+ }
+
+ return (NULL);
+}
+
+void
+evhttp_clear_headers(struct evkeyvalq *headers)
+{
+ struct evkeyval *header;
+
+ for (header = TAILQ_FIRST(headers);
+ header != NULL;
+ header = TAILQ_FIRST(headers)) {
+ TAILQ_REMOVE(headers, header, next);
+ free(header->key);
+ free(header->value);
+ free(header);
+ }
+}
+
+/*
+ * Returns 0, if the header was successfully removed.
+ * Returns -1, if the header could not be found.
+ */
+
+int
+evhttp_remove_header(struct evkeyvalq *headers, const char *key)
+{
+ struct evkeyval *header;
+
+ TAILQ_FOREACH(header, headers, next) {
+ if (strcasecmp(header->key, key) == 0)
+ break;
+ }
+
+ if (header == NULL)
+ return (-1);
+
+ /* Free and remove the header that we found */
+ TAILQ_REMOVE(headers, header, next);
+ free(header->key);
+ free(header->value);
+ free(header);
+
+ return (0);
+}
+
+static int
+evhttp_header_is_valid_value(const char *value)
+{
+ const char *p = value;
+
+ while ((p = strpbrk(p, "\r\n")) != NULL) {
+ /* we really expect only one new line */
+ p += strspn(p, "\r\n");
+ /* we expect a space or tab for continuation */
+ if (*p != ' ' && *p != '\t')
+ return (0);
+ }
+ return (1);
+}
+
+int
+evhttp_add_header(struct evkeyvalq *headers,
+ const char *key, const char *value)
+{
+ event_debug(("%s: key: %s val: %s\n", __func__, key, value));
+
+ if (strchr(key, '\r') != NULL || strchr(key, '\n') != NULL) {
+ /* drop illegal headers */
+ event_debug(("%s: dropping illegal header key\n", __func__));
+ return (-1);
+ }
+
+ if (!evhttp_header_is_valid_value(value)) {
+ event_debug(("%s: dropping illegal header value\n", __func__));
+ return (-1);
+ }
+
+ return (evhttp_add_header_internal(headers, key, value));
+}
+
+static int
+evhttp_add_header_internal(struct evkeyvalq *headers,
+ const char *key, const char *value)
+{
+ struct evkeyval *header = calloc(1, sizeof(struct evkeyval));
+ if (header == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (-1);
+ }
+ if ((header->key = strdup(key)) == NULL) {
+ free(header);
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+ if ((header->value = strdup(value)) == NULL) {
+ free(header->key);
+ free(header);
+ event_warn("%s: strdup", __func__);
+ return (-1);
+ }
+
+ TAILQ_INSERT_TAIL(headers, header, next);
+
+ return (0);
+}
+
+/*
+ * Parses header lines from a request or a response into the specified
+ * request object given an event buffer.
+ *
+ * Returns
+ * DATA_CORRUPTED on error
+ * MORE_DATA_EXPECTED when we need to read more headers
+ * ALL_DATA_READ when all headers have been read.
+ */
+
+enum message_read_status
+evhttp_parse_firstline(struct evhttp_request *req, struct evbuffer *buffer)
+{
+ char *line;
+ enum message_read_status status = ALL_DATA_READ;
+
+ line = evbuffer_readline(buffer);
+ if (line == NULL)
+ return (MORE_DATA_EXPECTED);
+
+ switch (req->kind) {
+ case EVHTTP_REQUEST:
+ if (evhttp_parse_request_line(req, line) == -1)
+ status = DATA_CORRUPTED;
+ break;
+ case EVHTTP_RESPONSE:
+ if (evhttp_parse_response_line(req, line) == -1)
+ status = DATA_CORRUPTED;
+ break;
+ default:
+ status = DATA_CORRUPTED;
+ }
+
+ free(line);
+ return (status);
+}
+
+static int
+evhttp_append_to_last_header(struct evkeyvalq *headers, const char *line)
+{
+ struct evkeyval *header = TAILQ_LAST(headers, evkeyvalq);
+ char *newval;
+ size_t old_len, line_len;
+
+ if (header == NULL)
+ return (-1);
+
+ old_len = strlen(header->value);
+ line_len = strlen(line);
+
+ newval = realloc(header->value, old_len + line_len + 1);
+ if (newval == NULL)
+ return (-1);
+
+ memcpy(newval + old_len, line, line_len + 1);
+ header->value = newval;
+
+ return (0);
+}
+
+enum message_read_status
+evhttp_parse_headers(struct evhttp_request *req, struct evbuffer* buffer)
+{
+ char *line;
+ enum message_read_status status = MORE_DATA_EXPECTED;
+
+ struct evkeyvalq* headers = req->input_headers;
+ while ((line = evbuffer_readline(buffer))
+ != NULL) {
+ char *skey, *svalue;
+
+ if (*line == '\0') { /* Last header - Done */
+ status = ALL_DATA_READ;
+ free(line);
+ break;
+ }
+
+ /* Check if this is a continuation line */
+ if (*line == ' ' || *line == '\t') {
+ if (evhttp_append_to_last_header(headers, line) == -1)
+ goto error;
+ free(line);
+ continue;
+ }
+
+ /* Processing of header lines */
+ svalue = line;
+ skey = strsep(&svalue, ":");
+ if (svalue == NULL)
+ goto error;
+
+ svalue += strspn(svalue, " ");
+
+ if (evhttp_add_header(headers, skey, svalue) == -1)
+ goto error;
+
+ free(line);
+ }
+
+ return (status);
+
+ error:
+ free(line);
+ return (DATA_CORRUPTED);
+}
+
+static int
+evhttp_get_body_length(struct evhttp_request *req)
+{
+ struct evkeyvalq *headers = req->input_headers;
+ const char *content_length;
+ const char *connection;
+
+ content_length = evhttp_find_header(headers, "Content-Length");
+ connection = evhttp_find_header(headers, "Connection");
+
+ if (content_length == NULL && connection == NULL)
+ req->ntoread = -1;
+ else if (content_length == NULL &&
+ strcasecmp(connection, "Close") != 0) {
+ /* Bad combination, we don't know when it will end */
+ event_warnx("%s: we got no content length, but the "
+ "server wants to keep the connection open: %s.",
+ __func__, connection);
+ return (-1);
+ } else if (content_length == NULL) {
+ req->ntoread = -1;
+ } else {
+ char *endp;
+ ev_int64_t ntoread = evutil_strtoll(content_length, &endp, 10);
+ if (*content_length == '\0' || *endp != '\0' || ntoread < 0) {
+ event_debug(("%s: illegal content length: %s",
+ __func__, content_length));
+ return (-1);
+ }
+ req->ntoread = ntoread;
+ }
+
+ event_debug(("%s: bytes to read: %lld (in buffer %ld)\n",
+ __func__, req->ntoread,
+ EVBUFFER_LENGTH(req->evcon->input_buffer)));
+
+ return (0);
+}
+
+static void
+evhttp_get_body(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ const char *xfer_enc;
+
+ /* If this is a request without a body, then we are done */
+ if (req->kind == EVHTTP_REQUEST && req->type != EVHTTP_REQ_POST) {
+ evhttp_connection_done(evcon);
+ return;
+ }
+ evcon->state = EVCON_READING_BODY;
+ xfer_enc = evhttp_find_header(req->input_headers, "Transfer-Encoding");
+ if (xfer_enc != NULL && strcasecmp(xfer_enc, "chunked") == 0) {
+ req->chunked = 1;
+ req->ntoread = -1;
+ } else {
+ if (evhttp_get_body_length(req) == -1) {
+ evhttp_connection_fail(evcon,
+ EVCON_HTTP_INVALID_HEADER);
+ return;
+ }
+ }
+ evhttp_read_body(evcon, req);
+}
+
+static void
+evhttp_read_firstline(struct evhttp_connection *evcon,
+ struct evhttp_request *req)
+{
+ enum message_read_status res;
+
+ res = evhttp_parse_firstline(req, evcon->input_buffer);
+ if (res == DATA_CORRUPTED) {
+ /* Error while reading, terminate */
+ event_debug(("%s: bad header lines on %d\n",
+ __func__, evcon->fd));
+ evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+ return;
+ } else if (res == MORE_DATA_EXPECTED) {
+ /* Need more header lines */
+ evhttp_add_event(&evcon->ev,
+ evcon->timeout, HTTP_READ_TIMEOUT);
+ return;
+ }
+
+ evcon->state = EVCON_READING_HEADERS;
+ evhttp_read_header(evcon, req);
+}
+
+static void
+evhttp_read_header(struct evhttp_connection *evcon, struct evhttp_request *req)
+{
+ enum message_read_status res;
+ int fd = evcon->fd;
+
+ res = evhttp_parse_headers(req, evcon->input_buffer);
+ if (res == DATA_CORRUPTED) {
+ /* Error while reading, terminate */
+ event_debug(("%s: bad header lines on %d\n", __func__, fd));
+ evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+ return;
+ } else if (res == MORE_DATA_EXPECTED) {
+ /* Need more header lines */
+ evhttp_add_event(&evcon->ev,
+ evcon->timeout, HTTP_READ_TIMEOUT);
+ return;
+ }
+
+ /* Done reading headers, do the real work */
+ switch (req->kind) {
+ case EVHTTP_REQUEST:
+ event_debug(("%s: checking for post data on %d\n",
+ __func__, fd));
+ evhttp_get_body(evcon, req);
+ break;
+
+ case EVHTTP_RESPONSE:
+ if (req->response_code == HTTP_NOCONTENT ||
+ req->response_code == HTTP_NOTMODIFIED ||
+ (req->response_code >= 100 && req->response_code < 200)) {
+ event_debug(("%s: skipping body for code %d\n",
+ __func__, req->response_code));
+ evhttp_connection_done(evcon);
+ } else {
+ event_debug(("%s: start of read body for %s on %d\n",
+ __func__, req->remote_host, fd));
+ evhttp_get_body(evcon, req);
+ }
+ break;
+
+ default:
+ event_warnx("%s: bad header on %d", __func__, fd);
+ evhttp_connection_fail(evcon, EVCON_HTTP_INVALID_HEADER);
+ break;
+ }
+}
+
+/*
+ * Creates a TCP connection to the specified port and executes a callback
+ * when finished. Failure or sucess is indicate by the passed connection
+ * object.
+ *
+ * Although this interface accepts a hostname, it is intended to take
+ * only numeric hostnames so that non-blocking DNS resolution can
+ * happen elsewhere.
+ */
+
+struct evhttp_connection *
+evhttp_connection_new(const char *address, unsigned short port)
+{
+ struct evhttp_connection *evcon = NULL;
+
+ event_debug(("Attempting connection to %s:%d\n", address, port));
+
+ if ((evcon = calloc(1, sizeof(struct evhttp_connection))) == NULL) {
+ event_warn("%s: calloc failed", __func__);
+ goto error;
+ }
+
+ evcon->fd = -1;
+ evcon->port = port;
+
+ evcon->timeout = -1;
+ evcon->retry_cnt = evcon->retry_max = 0;
+
+ if ((evcon->address = strdup(address)) == NULL) {
+ event_warn("%s: strdup failed", __func__);
+ goto error;
+ }
+
+ if ((evcon->input_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new failed", __func__);
+ goto error;
+ }
+
+ if ((evcon->output_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new failed", __func__);
+ goto error;
+ }
+
+ evcon->state = EVCON_DISCONNECTED;
+ TAILQ_INIT(&evcon->requests);
+
+ return (evcon);
+
+ error:
+ if (evcon != NULL)
+ evhttp_connection_free(evcon);
+ return (NULL);
+}
+
+void evhttp_connection_set_base(struct evhttp_connection *evcon,
+ struct event_base *base)
+{
+ assert(evcon->base == NULL);
+ assert(evcon->state == EVCON_DISCONNECTED);
+ evcon->base = base;
+}
+
+void
+evhttp_connection_set_timeout(struct evhttp_connection *evcon,
+ int timeout_in_secs)
+{
+ evcon->timeout = timeout_in_secs;
+}
+
+void
+evhttp_connection_set_retries(struct evhttp_connection *evcon,
+ int retry_max)
+{
+ evcon->retry_max = retry_max;
+}
+
+void
+evhttp_connection_set_closecb(struct evhttp_connection *evcon,
+ void (*cb)(struct evhttp_connection *, void *), void *cbarg)
+{
+ evcon->closecb = cb;
+ evcon->closecb_arg = cbarg;
+}
+
+void
+evhttp_connection_get_peer(struct evhttp_connection *evcon,
+ char **address, u_short *port)
+{
+ *address = evcon->address;
+ *port = evcon->port;
+}
+
+int
+evhttp_connection_connect(struct evhttp_connection *evcon)
+{
+ if (evcon->state == EVCON_CONNECTING)
+ return (0);
+
+ evhttp_connection_reset(evcon);
+
+ assert(!(evcon->flags & EVHTTP_CON_INCOMING));
+ evcon->flags |= EVHTTP_CON_OUTGOING;
+
+ evcon->fd = bind_socket(
+ evcon->bind_address, evcon->bind_port, 0 /*reuse*/);
+ if (evcon->fd == -1) {
+ event_debug(("%s: failed to bind to \"%s\"",
+ __func__, evcon->bind_address));
+ return (-1);
+ }
+
+ if (socket_connect(evcon->fd, evcon->address, evcon->port) == -1) {
+ EVUTIL_CLOSESOCKET(evcon->fd); evcon->fd = -1;
+ return (-1);
+ }
+
+ /* Set up a callback for successful connection setup */
+ event_set(&evcon->ev, evcon->fd, EV_WRITE, evhttp_connectioncb, evcon);
+ EVHTTP_BASE_SET(evcon, &evcon->ev);
+ evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_CONNECT_TIMEOUT);
+
+ evcon->state = EVCON_CONNECTING;
+
+ return (0);
+}
+
+/*
+ * Starts an HTTP request on the provided evhttp_connection object.
+ * If the connection object is not connected to the web server already,
+ * this will start the connection.
+ */
+
+int
+evhttp_make_request(struct evhttp_connection *evcon,
+ struct evhttp_request *req,
+ enum evhttp_cmd_type type, const char *uri)
+{
+ /* We are making a request */
+ req->kind = EVHTTP_REQUEST;
+ req->type = type;
+ if (req->uri != NULL)
+ free(req->uri);
+ if ((req->uri = strdup(uri)) == NULL)
+ event_err(1, "%s: strdup", __func__);
+
+ /* Set the protocol version if it is not supplied */
+ if (!req->major && !req->minor) {
+ req->major = 1;
+ req->minor = 1;
+ }
+
+ assert(req->evcon == NULL);
+ req->evcon = evcon;
+ assert(!(req->flags & EVHTTP_REQ_OWN_CONNECTION));
+
+ TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+
+ /* If the connection object is not connected; make it so */
+ if (!evhttp_connected(evcon))
+ return (evhttp_connection_connect(evcon));
+
+ /*
+ * If it's connected already and we are the first in the queue,
+ * then we can dispatch this request immediately. Otherwise, it
+ * will be dispatched once the pending requests are completed.
+ */
+ if (TAILQ_FIRST(&evcon->requests) == req)
+ evhttp_request_dispatch(evcon);
+
+ return (0);
+}
+
+/*
+ * Reads data from file descriptor into request structure
+ * Request structure needs to be set up correctly.
+ */
+
+void
+evhttp_start_read(struct evhttp_connection *evcon)
+{
+ /* Set up an event to read the headers */
+ if (event_initialized(&evcon->ev))
+ event_del(&evcon->ev);
+ event_set(&evcon->ev, evcon->fd, EV_READ, evhttp_read, evcon);
+ EVHTTP_BASE_SET(evcon, &evcon->ev);
+
+ evhttp_add_event(&evcon->ev, evcon->timeout, HTTP_READ_TIMEOUT);
+ evcon->state = EVCON_READING_FIRSTLINE;
+}
+
+static void
+evhttp_send_done(struct evhttp_connection *evcon, void *arg)
+{
+ int need_close;
+ struct evhttp_request *req = TAILQ_FIRST(&evcon->requests);
+ TAILQ_REMOVE(&evcon->requests, req, next);
+
+ /* delete possible close detection events */
+ evhttp_connection_stop_detectclose(evcon);
+
+ need_close =
+ (req->minor == 0 &&
+ !evhttp_is_connection_keepalive(req->input_headers))||
+ evhttp_is_connection_close(req->flags, req->input_headers) ||
+ evhttp_is_connection_close(req->flags, req->output_headers);
+
+ assert(req->flags & EVHTTP_REQ_OWN_CONNECTION);
+ evhttp_request_free(req);
+
+ if (need_close) {
+ evhttp_connection_free(evcon);
+ return;
+ }
+
+ /* we have a persistent connection; try to accept another request. */
+ if (evhttp_associate_new_request_with_connection(evcon) == -1)
+ evhttp_connection_free(evcon);
+}
+
+/*
+ * Returns an error page.
+ */
+
+void
+evhttp_send_error(struct evhttp_request *req, int error, const char *reason)
+{
+#define ERR_FORMAT "<HTML><HEAD>\n" \
+ "<TITLE>%d %s</TITLE>\n" \
+ "</HEAD><BODY>\n" \
+ "<H1>Method Not Implemented</H1>\n" \
+ "Invalid method in request<P>\n" \
+ "</BODY></HTML>\n"
+
+ struct evbuffer *buf = evbuffer_new();
+
+ /* close the connection on error */
+ evhttp_add_header(req->output_headers, "Connection", "close");
+
+ evhttp_response_code(req, error, reason);
+
+ evbuffer_add_printf(buf, ERR_FORMAT, error, reason);
+
+ evhttp_send_page(req, buf);
+
+ evbuffer_free(buf);
+#undef ERR_FORMAT
+}
+
+/* Requires that headers and response code are already set up */
+
+static inline void
+evhttp_send(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ struct evhttp_connection *evcon = req->evcon;
+
+ assert(TAILQ_FIRST(&evcon->requests) == req);
+
+ /* xxx: not sure if we really should expose the data buffer this way */
+ if (databuf != NULL)
+ evbuffer_add_buffer(req->output_buffer, databuf);
+
+ /* Adds headers to the response */
+ evhttp_make_header(evcon, req);
+
+ evhttp_write_buffer(evcon, evhttp_send_done, NULL);
+}
+
+void
+evhttp_send_reply(struct evhttp_request *req, int code, const char *reason,
+ struct evbuffer *databuf)
+{
+ evhttp_response_code(req, code, reason);
+
+ evhttp_send(req, databuf);
+}
+
+void
+evhttp_send_reply_start(struct evhttp_request *req, int code,
+ const char *reason)
+{
+ evhttp_response_code(req, code, reason);
+ if (req->major == 1 && req->minor == 1) {
+ /* use chunked encoding for HTTP/1.1 */
+ evhttp_add_header(req->output_headers, "Transfer-Encoding",
+ "chunked");
+ req->chunked = 1;
+ }
+ evhttp_make_header(req->evcon, req);
+ evhttp_write_buffer(req->evcon, NULL, NULL);
+}
+
+void
+evhttp_send_reply_chunk(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ if (req->chunked) {
+ evbuffer_add_printf(req->evcon->output_buffer, "%x\r\n",
+ (unsigned)EVBUFFER_LENGTH(databuf));
+ }
+ evbuffer_add_buffer(req->evcon->output_buffer, databuf);
+ if (req->chunked) {
+ evbuffer_add(req->evcon->output_buffer, "\r\n", 2);
+ }
+ evhttp_write_buffer(req->evcon, NULL, NULL);
+}
+
+void
+evhttp_send_reply_end(struct evhttp_request *req)
+{
+ struct evhttp_connection *evcon = req->evcon;
+
+ if (req->chunked) {
+ evbuffer_add(req->evcon->output_buffer, "0\r\n\r\n", 5);
+ evhttp_write_buffer(req->evcon, evhttp_send_done, NULL);
+ req->chunked = 0;
+ } else if (!event_pending(&evcon->ev, EV_WRITE|EV_TIMEOUT, NULL)) {
+ /* let the connection know that we are done with the request */
+ evhttp_send_done(evcon, NULL);
+ } else {
+ /* make the callback execute after all data has been written */
+ evcon->cb = evhttp_send_done;
+ evcon->cb_arg = NULL;
+ }
+}
+
+void
+evhttp_response_code(struct evhttp_request *req, int code, const char *reason)
+{
+ req->kind = EVHTTP_RESPONSE;
+ req->response_code = code;
+ if (req->response_code_line != NULL)
+ free(req->response_code_line);
+ req->response_code_line = strdup(reason);
+}
+
+void
+evhttp_send_page(struct evhttp_request *req, struct evbuffer *databuf)
+{
+ if (!req->major || !req->minor) {
+ req->major = 1;
+ req->minor = 1;
+ }
+
+ if (req->kind != EVHTTP_RESPONSE)
+ evhttp_response_code(req, 200, "OK");
+
+ evhttp_clear_headers(req->output_headers);
+ evhttp_add_header(req->output_headers, "Content-Type", "text/html");
+ evhttp_add_header(req->output_headers, "Connection", "close");
+
+ evhttp_send(req, databuf);
+}
+
+static const char uri_chars[256] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0,
+ /* 64 */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1,
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0,
+ /* 128 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ /* 192 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+/*
+ * Helper functions to encode/decode a URI.
+ * The returned string must be freed by the caller.
+ */
+char *
+evhttp_encode_uri(const char *uri)
+{
+ struct evbuffer *buf = evbuffer_new();
+ char *p;
+
+ for (p = (char *)uri; *p != '\0'; p++) {
+ if (uri_chars[(u_char)(*p)]) {
+ evbuffer_add(buf, p, 1);
+ } else {
+ evbuffer_add_printf(buf, "%%%02X", (u_char)(*p));
+ }
+ }
+ evbuffer_add(buf, "", 1);
+ p = strdup((char *)EVBUFFER_DATA(buf));
+ evbuffer_free(buf);
+
+ return (p);
+}
+
+/*
+ * @param always_decode_plus: when true we transform plus to space even
+ * if we have not seen a ?.
+ */
+static int
+evhttp_decode_uri_internal(
+ const char *uri, size_t length, char *ret, int always_decode_plus)
+{
+ char c;
+ int i, j, in_query = always_decode_plus;
+
+ for (i = j = 0; uri[i] != '\0'; i++) {
+ c = uri[i];
+ if (c == '?') {
+ in_query = 1;
+ } else if (c == '+' && in_query) {
+ c = ' ';
+ } else if (c == '%' && isxdigit((unsigned char)uri[i+1]) &&
+ isxdigit((unsigned char)uri[i+2])) {
+ char tmp[] = { uri[i+1], uri[i+2], '\0' };
+ c = (char)strtol(tmp, NULL, 16);
+ i += 2;
+ }
+ ret[j++] = c;
+ }
+ ret[j] = '\0';
+
+ return (j);
+}
+
+char *
+evhttp_decode_uri(const char *uri)
+{
+ char *ret;
+
+ if ((ret = malloc(strlen(uri) + 1)) == NULL)
+ event_err(1, "%s: malloc(%lu)", __func__,
+ (unsigned long)(strlen(uri) + 1));
+
+ evhttp_decode_uri_internal(uri, strlen(uri),
+ ret, 0 /*always_decode_plus*/);
+
+ return (ret);
+}
+
+/*
+ * Helper function to parse out arguments in a query.
+ * The arguments are separated by key and value.
+ */
+
+void
+evhttp_parse_query(const char *uri, struct evkeyvalq *headers)
+{
+ char *line;
+ char *argument;
+ char *p;
+
+ TAILQ_INIT(headers);
+
+ /* No arguments - we are done */
+ if (strchr(uri, '?') == NULL)
+ return;
+
+ if ((line = strdup(uri)) == NULL)
+ event_err(1, "%s: strdup", __func__);
+
+
+ argument = line;
+
+ /* We already know that there has to be a ? */
+ strsep(&argument, "?");
+
+ p = argument;
+ while (p != NULL && *p != '\0') {
+ char *key, *value, *decoded_value;
+ argument = strsep(&p, "&");
+
+ value = argument;
+ key = strsep(&value, "=");
+ if (value == NULL)
+ goto error;
+
+ if ((decoded_value = malloc(strlen(value) + 1)) == NULL)
+ event_err(1, "%s: malloc", __func__);
+
+ evhttp_decode_uri_internal(value, strlen(value),
+ decoded_value, 1 /*always_decode_plus*/);
+ event_debug(("Query Param: %s -> %s\n", key, decoded_value));
+ evhttp_add_header_internal(headers, key, decoded_value);
+ free(decoded_value);
+ }
+
+ error:
+ free(line);
+}
+
+static struct evhttp_cb *
+evhttp_dispatch_callback(struct httpcbq *callbacks, struct evhttp_request *req)
+{
+ struct evhttp_cb *cb;
+ size_t offset = 0;
+
+ /* Test for different URLs */
+ char *p = strchr(req->uri, '?');
+ if (p != NULL)
+ offset = (size_t)(p - req->uri);
+
+ TAILQ_FOREACH(cb, callbacks, next) {
+ int res = 0;
+ if (p == NULL) {
+ res = strcmp(cb->what, req->uri) == 0;
+ } else {
+ res = ((strncmp(cb->what, req->uri, offset) == 0) &&
+ (cb->what[offset] == '\0'));
+ }
+
+ if (res)
+ return (cb);
+ }
+
+ return (NULL);
+}
+
+static void
+evhttp_handle_request(struct evhttp_request *req, void *arg)
+{
+ struct evhttp *http = arg;
+ struct evhttp_cb *cb = NULL;
+
+ if (req->uri == NULL) {
+ evhttp_send_error(req, HTTP_BADREQUEST, "Bad Request");
+ return;
+ }
+
+ if ((cb = evhttp_dispatch_callback(&http->callbacks, req)) != NULL) {
+ (*cb->cb)(req, cb->cbarg);
+ return;
+ }
+
+ /* Generic call back */
+ if (http->gencb) {
+ (*http->gencb)(req, http->gencbarg);
+ return;
+ } else {
+ /* We need to send a 404 here */
+#define ERR_FORMAT "<html><head>" \
+ "<title>404 Not Found</title>" \
+ "</head><body>" \
+ "<h1>Not Found</h1>" \
+ "<p>The requested URL %s was not found on this server.</p>"\
+ "</body></html>\n"
+
+ char *escaped_html = evhttp_htmlescape(req->uri);
+ struct evbuffer *buf = evbuffer_new();
+
+ evhttp_response_code(req, HTTP_NOTFOUND, "Not Found");
+
+ evbuffer_add_printf(buf, ERR_FORMAT, escaped_html);
+
+ free(escaped_html);
+
+ evhttp_send_page(req, buf);
+
+ evbuffer_free(buf);
+#undef ERR_FORMAT
+ }
+}
+
+static void
+accept_socket(int fd, short what, void *arg)
+{
+ struct evhttp *http = arg;
+ struct sockaddr_storage ss;
+ socklen_t addrlen = sizeof(ss);
+ int nfd;
+
+ if ((nfd = accept(fd, (struct sockaddr *)&ss, &addrlen)) == -1) {
+ if (errno != EAGAIN && errno != EINTR)
+ event_warn("%s: bad accept", __func__);
+ return;
+ }
+ if (evutil_make_socket_nonblocking(nfd) < 0)
+ return;
+
+ evhttp_get_request(http, nfd, (struct sockaddr *)&ss, addrlen);
+}
+
+int
+evhttp_bind_socket(struct evhttp *http, const char *address, u_short port)
+{
+ int fd;
+ int res;
+
+ if ((fd = bind_socket(address, port, 1 /*reuse*/)) == -1)
+ return (-1);
+
+ if (listen(fd, 128) == -1) {
+ event_warn("%s: listen", __func__);
+ EVUTIL_CLOSESOCKET(fd);
+ return (-1);
+ }
+
+ res = evhttp_accept_socket(http, fd);
+
+ if (res != -1)
+ event_debug(("Bound to port %d - Awaiting connections ... ",
+ port));
+
+ return (res);
+}
+
+int
+evhttp_accept_socket(struct evhttp *http, int fd)
+{
+ struct evhttp_bound_socket *bound;
+ struct event *ev;
+ int res;
+
+ bound = malloc(sizeof(struct evhttp_bound_socket));
+ if (bound == NULL)
+ return (-1);
+
+ ev = &bound->bind_ev;
+
+ /* Schedule the socket for accepting */
+ event_set(ev, fd, EV_READ | EV_PERSIST, accept_socket, http);
+ EVHTTP_BASE_SET(http, ev);
+
+ res = event_add(ev, NULL);
+
+ if (res == -1) {
+ free(bound);
+ return (-1);
+ }
+
+ TAILQ_INSERT_TAIL(&http->sockets, bound, next);
+
+ return (0);
+}
+
+static struct evhttp*
+evhttp_new_object(void)
+{
+ struct evhttp *http = NULL;
+
+ if ((http = calloc(1, sizeof(struct evhttp))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ return (NULL);
+ }
+
+ http->timeout = -1;
+
+ TAILQ_INIT(&http->sockets);
+ TAILQ_INIT(&http->callbacks);
+ TAILQ_INIT(&http->connections);
+
+ return (http);
+}
+
+struct evhttp *
+evhttp_new(struct event_base *base)
+{
+ struct evhttp *http = evhttp_new_object();
+
+ http->base = base;
+
+ return (http);
+}
+
+/*
+ * Start a web server on the specified address and port.
+ */
+
+struct evhttp *
+evhttp_start(const char *address, u_short port)
+{
+ struct evhttp *http = evhttp_new_object();
+
+ if (evhttp_bind_socket(http, address, port) == -1) {
+ free(http);
+ return (NULL);
+ }
+
+ return (http);
+}
+
+void
+evhttp_free(struct evhttp* http)
+{
+ struct evhttp_cb *http_cb;
+ struct evhttp_connection *evcon;
+ struct evhttp_bound_socket *bound;
+ int fd;
+
+ /* Remove the accepting part */
+ while ((bound = TAILQ_FIRST(&http->sockets)) != NULL) {
+ TAILQ_REMOVE(&http->sockets, bound, next);
+
+ fd = bound->bind_ev.ev_fd;
+ event_del(&bound->bind_ev);
+ EVUTIL_CLOSESOCKET(fd);
+
+ free(bound);
+ }
+
+ while ((evcon = TAILQ_FIRST(&http->connections)) != NULL) {
+ /* evhttp_connection_free removes the connection */
+ evhttp_connection_free(evcon);
+ }
+
+ while ((http_cb = TAILQ_FIRST(&http->callbacks)) != NULL) {
+ TAILQ_REMOVE(&http->callbacks, http_cb, next);
+ free(http_cb->what);
+ free(http_cb);
+ }
+
+ free(http);
+}
+
+void
+evhttp_set_timeout(struct evhttp* http, int timeout_in_secs)
+{
+ http->timeout = timeout_in_secs;
+}
+
+void
+evhttp_set_cb(struct evhttp *http, const char *uri,
+ void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+ struct evhttp_cb *http_cb;
+
+ if ((http_cb = calloc(1, sizeof(struct evhttp_cb))) == NULL)
+ event_err(1, "%s: calloc", __func__);
+
+ http_cb->what = strdup(uri);
+ http_cb->cb = cb;
+ http_cb->cbarg = cbarg;
+
+ TAILQ_INSERT_TAIL(&http->callbacks, http_cb, next);
+}
+
+int
+evhttp_del_cb(struct evhttp *http, const char *uri)
+{
+ struct evhttp_cb *http_cb;
+
+ TAILQ_FOREACH(http_cb, &http->callbacks, next) {
+ if (strcmp(http_cb->what, uri) == 0)
+ break;
+ }
+ if (http_cb == NULL)
+ return (-1);
+
+ TAILQ_REMOVE(&http->callbacks, http_cb, next);
+ free(http_cb->what);
+ free(http_cb);
+
+ return (0);
+}
+
+void
+evhttp_set_gencb(struct evhttp *http,
+ void (*cb)(struct evhttp_request *, void *), void *cbarg)
+{
+ http->gencb = cb;
+ http->gencbarg = cbarg;
+}
+
+/*
+ * Request related functions
+ */
+
+struct evhttp_request *
+evhttp_request_new(void (*cb)(struct evhttp_request *, void *), void *arg)
+{
+ struct evhttp_request *req = NULL;
+
+ /* Allocate request structure */
+ if ((req = calloc(1, sizeof(struct evhttp_request))) == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+
+ req->kind = EVHTTP_RESPONSE;
+ req->input_headers = calloc(1, sizeof(struct evkeyvalq));
+ if (req->input_headers == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+ TAILQ_INIT(req->input_headers);
+
+ req->output_headers = calloc(1, sizeof(struct evkeyvalq));
+ if (req->output_headers == NULL) {
+ event_warn("%s: calloc", __func__);
+ goto error;
+ }
+ TAILQ_INIT(req->output_headers);
+
+ if ((req->input_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new", __func__);
+ goto error;
+ }
+
+ if ((req->output_buffer = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new", __func__);
+ goto error;
+ }
+
+ req->cb = cb;
+ req->cb_arg = arg;
+
+ return (req);
+
+ error:
+ if (req != NULL)
+ evhttp_request_free(req);
+ return (NULL);
+}
+
+void
+evhttp_request_free(struct evhttp_request *req)
+{
+ if (req->remote_host != NULL)
+ free(req->remote_host);
+ if (req->uri != NULL)
+ free(req->uri);
+ if (req->response_code_line != NULL)
+ free(req->response_code_line);
+
+ evhttp_clear_headers(req->input_headers);
+ free(req->input_headers);
+
+ evhttp_clear_headers(req->output_headers);
+ free(req->output_headers);
+
+ if (req->input_buffer != NULL)
+ evbuffer_free(req->input_buffer);
+
+ if (req->output_buffer != NULL)
+ evbuffer_free(req->output_buffer);
+
+ free(req);
+}
+
+void
+evhttp_request_set_chunked_cb(struct evhttp_request *req,
+ void (*cb)(struct evhttp_request *, void *))
+{
+ req->chunk_cb = cb;
+}
+
+/*
+ * Allows for inspection of the request URI
+ */
+
+const char *
+evhttp_request_uri(struct evhttp_request *req) {
+ if (req->uri == NULL)
+ event_debug(("%s: request %p has no uri\n", __func__, req));
+ return (req->uri);
+}
+
+/*
+ * Takes a file descriptor to read a request from.
+ * The callback is executed once the whole request has been read.
+ */
+
+static struct evhttp_connection*
+evhttp_get_request_connection(
+ struct evhttp* http,
+ int fd, struct sockaddr *sa, socklen_t salen)
+{
+ struct evhttp_connection *evcon;
+ char *hostname = NULL, *portname = NULL;
+
+ name_from_addr(sa, salen, &hostname, &portname);
+ if (hostname == NULL || portname == NULL) {
+ if (hostname) free(hostname);
+ if (portname) free(portname);
+ return (NULL);
+ }
+
+ event_debug(("%s: new request from %s:%s on %d\n",
+ __func__, hostname, portname, fd));
+
+ /* we need a connection object to put the http request on */
+ evcon = evhttp_connection_new(hostname, atoi(portname));
+ free(hostname);
+ free(portname);
+ if (evcon == NULL)
+ return (NULL);
+
+ /* associate the base if we have one*/
+ evhttp_connection_set_base(evcon, http->base);
+
+ evcon->flags |= EVHTTP_CON_INCOMING;
+ evcon->state = EVCON_READING_FIRSTLINE;
+
+ evcon->fd = fd;
+
+ return (evcon);
+}
+
+static int
+evhttp_associate_new_request_with_connection(struct evhttp_connection *evcon)
+{
+ struct evhttp *http = evcon->http_server;
+ struct evhttp_request *req;
+ if ((req = evhttp_request_new(evhttp_handle_request, http)) == NULL)
+ return (-1);
+
+ req->evcon = evcon; /* the request ends up owning the connection */
+ req->flags |= EVHTTP_REQ_OWN_CONNECTION;
+
+ TAILQ_INSERT_TAIL(&evcon->requests, req, next);
+
+ req->kind = EVHTTP_REQUEST;
+
+ if ((req->remote_host = strdup(evcon->address)) == NULL)
+ event_err(1, "%s: strdup", __func__);
+ req->remote_port = evcon->port;
+
+ evhttp_start_read(evcon);
+
+ return (0);
+}
+
+void
+evhttp_get_request(struct evhttp *http, int fd,
+ struct sockaddr *sa, socklen_t salen)
+{
+ struct evhttp_connection *evcon;
+
+ evcon = evhttp_get_request_connection(http, fd, sa, salen);
+ if (evcon == NULL)
+ return;
+
+ /* the timeout can be used by the server to close idle connections */
+ if (http->timeout != -1)
+ evhttp_connection_set_timeout(evcon, http->timeout);
+
+ /*
+ * if we want to accept more than one request on a connection,
+ * we need to know which http server it belongs to.
+ */
+ evcon->http_server = http;
+ TAILQ_INSERT_TAIL(&http->connections, evcon, next);
+
+ if (evhttp_associate_new_request_with_connection(evcon) == -1)
+ evhttp_connection_free(evcon);
+}
+
+
+/*
+ * Network helper functions that we do not want to export to the rest of
+ * the world.
+ */
+#if 0 /* Unused */
+static struct addrinfo *
+addr_from_name(char *address)
+{
+#ifdef HAVE_GETADDRINFO
+ struct addrinfo ai, *aitop;
+ int ai_result;
+
+ memset(&ai, 0, sizeof(ai));
+ ai.ai_family = AF_INET;
+ ai.ai_socktype = SOCK_RAW;
+ ai.ai_flags = 0;
+ if ((ai_result = getaddrinfo(address, NULL, &ai, &aitop)) != 0) {
+ if ( ai_result == EAI_SYSTEM )
+ event_warn("getaddrinfo");
+ else
+ event_warnx("getaddrinfo: %s", gai_strerror(ai_result));
+ }
+
+ return (aitop);
+#else
+ assert(0);
+ return NULL; /* XXXXX Use gethostbyname, if this function is ever used. */
+#endif
+}
+#endif
+
+static void
+name_from_addr(struct sockaddr *sa, socklen_t salen,
+ char **phost, char **pport)
+{
+ char ntop[NI_MAXHOST];
+ char strport[NI_MAXSERV];
+ int ni_result;
+
+#ifdef HAVE_GETNAMEINFO
+ ni_result = getnameinfo(sa, salen,
+ ntop, sizeof(ntop), strport, sizeof(strport),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+
+ if (ni_result != 0) {
+ if (ni_result == EAI_SYSTEM)
+ event_err(1, "getnameinfo failed");
+ else
+ event_errx(1, "getnameinfo failed: %s", gai_strerror(ni_result));
+ return;
+ }
+#else
+ ni_result = fake_getnameinfo(sa, salen,
+ ntop, sizeof(ntop), strport, sizeof(strport),
+ NI_NUMERICHOST|NI_NUMERICSERV);
+ if (ni_result != 0)
+ return;
+#endif
+ *phost = strdup(ntop);
+ *pport = strdup(strport);
+}
+
+/* Create a non-blocking socket and bind it */
+/* todo: rename this function */
+static int
+bind_socket_ai(struct addrinfo *ai, int reuse)
+{
+ int fd, on = 1, r;
+ int serrno;
+
+ /* Create listen socket */
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (fd == -1) {
+ event_warn("socket");
+ return (-1);
+ }
+
+ if (evutil_make_socket_nonblocking(fd) < 0)
+ goto out;
+
+#ifndef WIN32
+ if (fcntl(fd, F_SETFD, 1) == -1) {
+ event_warn("fcntl(F_SETFD)");
+ goto out;
+ }
+#endif
+
+ setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (void *)&on, sizeof(on));
+ if (reuse) {
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
+ (void *)&on, sizeof(on));
+ }
+
+ if (ai != NULL) {
+ r = bind(fd, ai->ai_addr, ai->ai_addrlen);
+ if (r == -1)
+ goto out;
+ }
+
+ return (fd);
+
+ out:
+ serrno = EVUTIL_SOCKET_ERROR();
+ EVUTIL_CLOSESOCKET(fd);
+ EVUTIL_SET_SOCKET_ERROR(serrno);
+ return (-1);
+}
+
+static struct addrinfo *
+make_addrinfo(const char *address, u_short port)
+{
+ struct addrinfo *aitop = NULL;
+
+#ifdef HAVE_GETADDRINFO
+ struct addrinfo ai;
+ char strport[NI_MAXSERV];
+ int ai_result;
+
+ memset(&ai, 0, sizeof(ai));
+ ai.ai_family = AF_INET;
+ ai.ai_socktype = SOCK_STREAM;
+ ai.ai_flags = AI_PASSIVE; /* turn NULL host name into INADDR_ANY */
+ evutil_snprintf(strport, sizeof(strport), "%d", port);
+ if ((ai_result = getaddrinfo(address, strport, &ai, &aitop)) != 0) {
+ if ( ai_result == EAI_SYSTEM )
+ event_warn("getaddrinfo");
+ else
+ event_warnx("getaddrinfo: %s", gai_strerror(ai_result));
+ return (NULL);
+ }
+#else
+ static int cur;
+ static struct addrinfo ai[2]; /* We will be returning the address of some of this memory so it has to last even after this call. */
+ if (++cur == 2) cur = 0; /* allow calling this function twice */
+
+ if (fake_getaddrinfo(address, &ai[cur]) < 0) {
+ event_warn("fake_getaddrinfo");
+ return (NULL);
+ }
+ aitop = &ai[cur];
+ ((struct sockaddr_in *) aitop->ai_addr)->sin_port = htons(port);
+#endif
+
+ return (aitop);
+}
+
+static int
+bind_socket(const char *address, u_short port, int reuse)
+{
+ int fd;
+ struct addrinfo *aitop = NULL;
+
+ /* just create an unbound socket */
+ if (address == NULL && port == 0)
+ return bind_socket_ai(NULL, 0);
+
+ aitop = make_addrinfo(address, port);
+
+ if (aitop == NULL)
+ return (-1);
+
+ fd = bind_socket_ai(aitop, reuse);
+
+#ifdef HAVE_GETADDRINFO
+ freeaddrinfo(aitop);
+#else
+ fake_freeaddrinfo(aitop);
+#endif
+
+ return (fd);
+}
+
+static int
+socket_connect(int fd, const char *address, unsigned short port)
+{
+ struct addrinfo *ai = make_addrinfo(address, port);
+ int res = -1;
+
+ if (ai == NULL) {
+ event_debug(("%s: make_addrinfo: \"%s:%d\"",
+ __func__, address, port));
+ return (-1);
+ }
+
+ if (connect(fd, ai->ai_addr, ai->ai_addrlen) == -1) {
+#ifdef WIN32
+ int tmp_error = WSAGetLastError();
+ if (tmp_error != WSAEWOULDBLOCK && tmp_error != WSAEINVAL &&
+ tmp_error != WSAEINPROGRESS) {
+ goto out;
+ }
+#else
+ if (errno != EINPROGRESS) {
+ goto out;
+ }
+#endif
+ }
+
+ /* everything is fine */
+ res = 0;
+
+out:
+#ifdef HAVE_GETADDRINFO
+ freeaddrinfo(ai);
+#else
+ fake_freeaddrinfo(ai);
+#endif
+
+ return (res);
+}
diff --git a/libevent/kqueue.c b/libevent/kqueue.c
new file mode 100644
index 00000000000..36eebe5fc6e
--- /dev/null
+++ b/libevent/kqueue.c
@@ -0,0 +1,449 @@
+/* $OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $ */
+
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_time.h>
+#endif
+#include <sys/queue.h>
+#include <sys/event.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#ifdef HAVE_INTTYPES_H
+#include <inttypes.h>
+#endif
+
+/* Some platforms apparently define the udata field of struct kevent as
+ * intptr_t, whereas others define it as void*. There doesn't seem to be an
+ * easy way to tell them apart via autoconf, so we need to use OS macros. */
+#if defined(HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__)
+#define PTR_TO_UDATA(x) ((intptr_t)(x))
+#else
+#define PTR_TO_UDATA(x) (x)
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "log.h"
+#include "event-internal.h"
+
+#define EVLIST_X_KQINKERNEL 0x1000
+
+#define NEVENT 64
+
+struct kqop {
+ struct kevent *changes;
+ int nchanges;
+ struct kevent *events;
+ struct event_list evsigevents[NSIG];
+ int nevents;
+ int kq;
+ pid_t pid;
+};
+
+static void *kq_init (struct event_base *);
+static int kq_add (void *, struct event *);
+static int kq_del (void *, struct event *);
+static int kq_dispatch (struct event_base *, void *, struct timeval *);
+static int kq_insert (struct kqop *, struct kevent *);
+static void kq_dealloc (struct event_base *, void *);
+
+const struct eventop kqops = {
+ "kqueue",
+ kq_init,
+ kq_add,
+ kq_del,
+ kq_dispatch,
+ kq_dealloc,
+ 1 /* need reinit */
+};
+
+static void *
+kq_init(struct event_base *base)
+{
+ int i, kq;
+ struct kqop *kqueueop;
+
+ /* Disable kqueue when this environment variable is set */
+ if (getenv("EVENT_NOKQUEUE"))
+ return (NULL);
+
+ if (!(kqueueop = calloc(1, sizeof(struct kqop))))
+ return (NULL);
+
+ /* Initalize the kernel queue */
+
+ if ((kq = kqueue()) == -1) {
+ event_warn("kqueue");
+ free (kqueueop);
+ return (NULL);
+ }
+
+ kqueueop->kq = kq;
+
+ kqueueop->pid = getpid();
+
+ /* Initalize fields */
+ kqueueop->changes = malloc(NEVENT * sizeof(struct kevent));
+ if (kqueueop->changes == NULL) {
+ free (kqueueop);
+ return (NULL);
+ }
+ kqueueop->events = malloc(NEVENT * sizeof(struct kevent));
+ if (kqueueop->events == NULL) {
+ free (kqueueop->changes);
+ free (kqueueop);
+ return (NULL);
+ }
+ kqueueop->nevents = NEVENT;
+
+ /* we need to keep track of multiple events per signal */
+ for (i = 0; i < NSIG; ++i) {
+ TAILQ_INIT(&kqueueop->evsigevents[i]);
+ }
+
+ /* Check for Mac OS X kqueue bug. */
+ kqueueop->changes[0].ident = -1;
+ kqueueop->changes[0].filter = EVFILT_READ;
+ kqueueop->changes[0].flags = EV_ADD;
+ /*
+ * If kqueue works, then kevent will succeed, and it will
+ * stick an error in events[0]. If kqueue is broken, then
+ * kevent will fail.
+ */
+ if (kevent(kq,
+ kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
+ kqueueop->events[0].ident != -1 ||
+ kqueueop->events[0].flags != EV_ERROR) {
+ event_warn("%s: detected broken kqueue; not using.", __func__);
+ free(kqueueop->changes);
+ free(kqueueop->events);
+ free(kqueueop);
+ close(kq);
+ return (NULL);
+ }
+
+ return (kqueueop);
+}
+
+static int
+kq_insert(struct kqop *kqop, struct kevent *kev)
+{
+ int nevents = kqop->nevents;
+
+ if (kqop->nchanges == nevents) {
+ struct kevent *newchange;
+ struct kevent *newresult;
+
+ nevents *= 2;
+
+ newchange = realloc(kqop->changes,
+ nevents * sizeof(struct kevent));
+ if (newchange == NULL) {
+ event_warn("%s: malloc", __func__);
+ return (-1);
+ }
+ kqop->changes = newchange;
+
+ newresult = realloc(kqop->events,
+ nevents * sizeof(struct kevent));
+
+ /*
+ * If we fail, we don't have to worry about freeing,
+ * the next realloc will pick it up.
+ */
+ if (newresult == NULL) {
+ event_warn("%s: malloc", __func__);
+ return (-1);
+ }
+ kqop->events = newresult;
+
+ kqop->nevents = nevents;
+ }
+
+ memcpy(&kqop->changes[kqop->nchanges++], kev, sizeof(struct kevent));
+
+ event_debug(("%s: fd %d %s%s",
+ __func__, (int)kev->ident,
+ kev->filter == EVFILT_READ ? "EVFILT_READ" : "EVFILT_WRITE",
+ kev->flags == EV_DELETE ? " (del)" : ""));
+
+ return (0);
+}
+
+static void
+kq_sighandler(int sig)
+{
+ /* Do nothing here */
+}
+
+static int
+kq_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+ struct kqop *kqop = arg;
+ struct kevent *changes = kqop->changes;
+ struct kevent *events = kqop->events;
+ struct event *ev;
+ struct timespec ts, *ts_p = NULL;
+ int i, res;
+
+ if (tv != NULL) {
+ TIMEVAL_TO_TIMESPEC(tv, &ts);
+ ts_p = &ts;
+ }
+
+ res = kevent(kqop->kq, changes, kqop->nchanges,
+ events, kqop->nevents, ts_p);
+ kqop->nchanges = 0;
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("kevent");
+ return (-1);
+ }
+
+ return (0);
+ }
+
+ event_debug(("%s: kevent reports %d", __func__, res));
+
+ for (i = 0; i < res; i++) {
+ int which = 0;
+
+ if (events[i].flags & EV_ERROR) {
+ /*
+ * Error messages that can happen, when a delete fails.
+ * EBADF happens when the file discriptor has been
+ * closed,
+ * ENOENT when the file discriptor was closed and
+ * then reopened.
+ * EINVAL for some reasons not understood; EINVAL
+ * should not be returned ever; but FreeBSD does :-\
+ * An error is also indicated when a callback deletes
+ * an event we are still processing. In that case
+ * the data field is set to ENOENT.
+ */
+ if (events[i].data == EBADF ||
+ events[i].data == EINVAL ||
+ events[i].data == ENOENT)
+ continue;
+ errno = events[i].data;
+ return (-1);
+ }
+
+ if (events[i].filter == EVFILT_READ) {
+ which |= EV_READ;
+ } else if (events[i].filter == EVFILT_WRITE) {
+ which |= EV_WRITE;
+ } else if (events[i].filter == EVFILT_SIGNAL) {
+ which |= EV_SIGNAL;
+ }
+
+ if (!which)
+ continue;
+
+ if (events[i].filter == EVFILT_SIGNAL) {
+ struct event_list *head =
+ (struct event_list *)events[i].udata;
+ TAILQ_FOREACH(ev, head, ev_signal_next) {
+ event_active(ev, which, events[i].data);
+ }
+ } else {
+ ev = (struct event *)events[i].udata;
+
+ if (!(ev->ev_events & EV_PERSIST))
+ ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+
+ event_active(ev, which, 1);
+ }
+ }
+
+ return (0);
+}
+
+
+static int
+kq_add(void *arg, struct event *ev)
+{
+ struct kqop *kqop = arg;
+ struct kevent kev;
+
+ if (ev->ev_events & EV_SIGNAL) {
+ int nsignal = EVENT_SIGNAL(ev);
+
+ assert(nsignal >= 0 && nsignal < NSIG);
+ if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
+ struct timespec timeout = { 0, 0 };
+
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = nsignal;
+ kev.filter = EVFILT_SIGNAL;
+ kev.flags = EV_ADD;
+ kev.udata = PTR_TO_UDATA(&kqop->evsigevents[nsignal]);
+
+ /* Be ready for the signal if it is sent any
+ * time between now and the next call to
+ * kq_dispatch. */
+ if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
+ return (-1);
+
+ if (_evsignal_set_handler(ev->ev_base, nsignal,
+ kq_sighandler) == -1)
+ return (-1);
+ }
+
+ TAILQ_INSERT_TAIL(&kqop->evsigevents[nsignal], ev,
+ ev_signal_next);
+ ev->ev_flags |= EVLIST_X_KQINKERNEL;
+ return (0);
+ }
+
+ if (ev->ev_events & EV_READ) {
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = ev->ev_fd;
+ kev.filter = EVFILT_READ;
+#ifdef NOTE_EOF
+ /* Make it behave like select() and poll() */
+ kev.fflags = NOTE_EOF;
+#endif
+ kev.flags = EV_ADD;
+ if (!(ev->ev_events & EV_PERSIST))
+ kev.flags |= EV_ONESHOT;
+ kev.udata = PTR_TO_UDATA(ev);
+
+ if (kq_insert(kqop, &kev) == -1)
+ return (-1);
+
+ ev->ev_flags |= EVLIST_X_KQINKERNEL;
+ }
+
+ if (ev->ev_events & EV_WRITE) {
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = ev->ev_fd;
+ kev.filter = EVFILT_WRITE;
+ kev.flags = EV_ADD;
+ if (!(ev->ev_events & EV_PERSIST))
+ kev.flags |= EV_ONESHOT;
+ kev.udata = PTR_TO_UDATA(ev);
+
+ if (kq_insert(kqop, &kev) == -1)
+ return (-1);
+
+ ev->ev_flags |= EVLIST_X_KQINKERNEL;
+ }
+
+ return (0);
+}
+
+static int
+kq_del(void *arg, struct event *ev)
+{
+ struct kqop *kqop = arg;
+ struct kevent kev;
+
+ if (!(ev->ev_flags & EVLIST_X_KQINKERNEL))
+ return (0);
+
+ if (ev->ev_events & EV_SIGNAL) {
+ int nsignal = EVENT_SIGNAL(ev);
+ struct timespec timeout = { 0, 0 };
+
+ assert(nsignal >= 0 && nsignal < NSIG);
+ TAILQ_REMOVE(&kqop->evsigevents[nsignal], ev, ev_signal_next);
+ if (TAILQ_EMPTY(&kqop->evsigevents[nsignal])) {
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = nsignal;
+ kev.filter = EVFILT_SIGNAL;
+ kev.flags = EV_DELETE;
+
+ /* Because we insert signal events
+ * immediately, we need to delete them
+ * immediately, too */
+ if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
+ return (-1);
+
+ if (_evsignal_restore_handler(ev->ev_base,
+ nsignal) == -1)
+ return (-1);
+ }
+
+ ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+ return (0);
+ }
+
+ if (ev->ev_events & EV_READ) {
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = ev->ev_fd;
+ kev.filter = EVFILT_READ;
+ kev.flags = EV_DELETE;
+
+ if (kq_insert(kqop, &kev) == -1)
+ return (-1);
+
+ ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+ }
+
+ if (ev->ev_events & EV_WRITE) {
+ memset(&kev, 0, sizeof(kev));
+ kev.ident = ev->ev_fd;
+ kev.filter = EVFILT_WRITE;
+ kev.flags = EV_DELETE;
+
+ if (kq_insert(kqop, &kev) == -1)
+ return (-1);
+
+ ev->ev_flags &= ~EVLIST_X_KQINKERNEL;
+ }
+
+ return (0);
+}
+
+static void
+kq_dealloc(struct event_base *base, void *arg)
+{
+ struct kqop *kqop = arg;
+
+ if (kqop->changes)
+ free(kqop->changes);
+ if (kqop->events)
+ free(kqop->events);
+ if (kqop->kq >= 0 && kqop->pid == getpid())
+ close(kqop->kq);
+ memset(kqop, 0, sizeof(struct kqop));
+ free(kqop);
+}
diff --git a/libevent/log.c b/libevent/log.c
new file mode 100644
index 00000000000..b62a61915d1
--- /dev/null
+++ b/libevent/log.c
@@ -0,0 +1,187 @@
+/* $OpenBSD: err.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * log.c
+ *
+ * Based on err.c, which was adapted from OpenBSD libc *err* *warn* code.
+ *
+ * Copyright (c) 2005 Nick Mathewson <nickm@freehaven.net>
+ *
+ * Copyright (c) 2000 Dug Song <dugsong@monkey.org>
+ *
+ * Copyright (c) 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_time.h>
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include "event.h"
+
+#include "log.h"
+#include "evutil.h"
+
+static void _warn_helper(int severity, int log_errno, const char *fmt,
+ va_list ap);
+static void event_log(int severity, const char *msg);
+
+void
+event_err(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ _warn_helper(_EVENT_LOG_ERR, errno, fmt, ap);
+ va_end(ap);
+ exit(eval);
+}
+
+void
+event_warn(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ _warn_helper(_EVENT_LOG_WARN, errno, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_errx(int eval, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ _warn_helper(_EVENT_LOG_ERR, -1, fmt, ap);
+ va_end(ap);
+ exit(eval);
+}
+
+void
+event_warnx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ _warn_helper(_EVENT_LOG_WARN, -1, fmt, ap);
+ va_end(ap);
+}
+
+void
+event_msgx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ _warn_helper(_EVENT_LOG_MSG, -1, fmt, ap);
+ va_end(ap);
+}
+
+void
+_event_debugx(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ _warn_helper(_EVENT_LOG_DEBUG, -1, fmt, ap);
+ va_end(ap);
+}
+
+static void
+_warn_helper(int severity, int log_errno, const char *fmt, va_list ap)
+{
+ char buf[1024];
+ size_t len;
+
+ if (fmt != NULL)
+ evutil_vsnprintf(buf, sizeof(buf), fmt, ap);
+ else
+ buf[0] = '\0';
+
+ if (log_errno >= 0) {
+ len = strlen(buf);
+ if (len < sizeof(buf) - 3) {
+ evutil_snprintf(buf + len, sizeof(buf) - len, ": %s",
+ strerror(log_errno));
+ }
+ }
+
+ event_log(severity, buf);
+}
+
+static event_log_cb log_fn = NULL;
+
+void
+event_set_log_callback(event_log_cb cb)
+{
+ log_fn = cb;
+}
+
+static void
+event_log(int severity, const char *msg)
+{
+ if (log_fn)
+ log_fn(severity, msg);
+ else {
+ const char *severity_str;
+ switch (severity) {
+ case _EVENT_LOG_DEBUG:
+ severity_str = "debug";
+ break;
+ case _EVENT_LOG_MSG:
+ severity_str = "msg";
+ break;
+ case _EVENT_LOG_WARN:
+ severity_str = "warn";
+ break;
+ case _EVENT_LOG_ERR:
+ severity_str = "err";
+ break;
+ default:
+ severity_str = "???";
+ break;
+ }
+ (void)fprintf(stderr, "[%s] %s\n", severity_str, msg);
+ }
+}
diff --git a/libevent/log.h b/libevent/log.h
new file mode 100644
index 00000000000..7bc6632b8dd
--- /dev/null
+++ b/libevent/log.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _LOG_H_
+#define _LOG_H_
+
+#ifdef __GNUC__
+#define EV_CHECK_FMT(a,b) __attribute__((format(printf, a, b)))
+#else
+#define EV_CHECK_FMT(a,b)
+#endif
+
+void event_err(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
+void event_warn(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_errx(int eval, const char *fmt, ...) EV_CHECK_FMT(2,3);
+void event_warnx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void event_msgx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+void _event_debugx(const char *fmt, ...) EV_CHECK_FMT(1,2);
+
+#ifdef USE_DEBUG
+#define event_debug(x) _event_debugx x
+#else
+#define event_debug(x) do {;} while (0)
+#endif
+
+#undef EV_CHECK_FMT
+
+#endif
diff --git a/libevent/min_heap.h b/libevent/min_heap.h
new file mode 100644
index 00000000000..edaa5ae1270
--- /dev/null
+++ b/libevent/min_heap.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (c) 2006 Maxim Yegorushkin <maxim.yegorushkin@gmail.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _MIN_HEAP_H_
+#define _MIN_HEAP_H_
+
+#include "event.h"
+#include "evutil.h"
+#include "stdlib.h"
+
+typedef struct min_heap
+{
+ struct event** p;
+ unsigned n, a;
+} min_heap_t;
+
+static inline void min_heap_ctor(min_heap_t* s);
+static inline void min_heap_dtor(min_heap_t* s);
+static inline void min_heap_elem_init(struct event* e);
+static inline int min_heap_elem_greater(struct event *a, struct event *b);
+static inline int min_heap_empty(min_heap_t* s);
+static inline unsigned min_heap_size(min_heap_t* s);
+static inline struct event* min_heap_top(min_heap_t* s);
+static inline int min_heap_reserve(min_heap_t* s, unsigned n);
+static inline int min_heap_push(min_heap_t* s, struct event* e);
+static inline struct event* min_heap_pop(min_heap_t* s);
+static inline int min_heap_erase(min_heap_t* s, struct event* e);
+static inline void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e);
+static inline void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e);
+
+int min_heap_elem_greater(struct event *a, struct event *b)
+{
+ return evutil_timercmp(&a->ev_timeout, &b->ev_timeout, >);
+}
+
+void min_heap_ctor(min_heap_t* s) { s->p = 0; s->n = 0; s->a = 0; }
+void min_heap_dtor(min_heap_t* s) { free(s->p); }
+void min_heap_elem_init(struct event* e) { e->min_heap_idx = -1; }
+int min_heap_empty(min_heap_t* s) { return 0u == s->n; }
+unsigned min_heap_size(min_heap_t* s) { return s->n; }
+struct event* min_heap_top(min_heap_t* s) { return s->n ? *s->p : 0; }
+
+int min_heap_push(min_heap_t* s, struct event* e)
+{
+ if(min_heap_reserve(s, s->n + 1))
+ return -1;
+ min_heap_shift_up_(s, s->n++, e);
+ return 0;
+}
+
+struct event* min_heap_pop(min_heap_t* s)
+{
+ if(s->n)
+ {
+ struct event* e = *s->p;
+ min_heap_shift_down_(s, 0u, s->p[--s->n]);
+ e->min_heap_idx = -1;
+ return e;
+ }
+ return 0;
+}
+
+int min_heap_erase(min_heap_t* s, struct event* e)
+{
+ if(((unsigned int)-1) != e->min_heap_idx)
+ {
+ struct event *last = s->p[--s->n];
+ unsigned parent = (e->min_heap_idx - 1) / 2;
+ /* we replace e with the last element in the heap. We might need to
+ shift it upward if it is less than its parent, or downward if it is
+ greater than one or both its children. Since the children are known
+ to be less than the parent, it can't need to shift both up and
+ down. */
+ if (e->min_heap_idx > 0 && min_heap_elem_greater(s->p[parent], last))
+ min_heap_shift_up_(s, e->min_heap_idx, last);
+ else
+ min_heap_shift_down_(s, e->min_heap_idx, last);
+ e->min_heap_idx = -1;
+ return 0;
+ }
+ return -1;
+}
+
+int min_heap_reserve(min_heap_t* s, unsigned n)
+{
+ if(s->a < n)
+ {
+ struct event** p;
+ unsigned a = s->a ? s->a * 2 : 8;
+ if(a < n)
+ a = n;
+ if(!(p = (struct event**)realloc(s->p, a * sizeof *p)))
+ return -1;
+ s->p = p;
+ s->a = a;
+ }
+ return 0;
+}
+
+void min_heap_shift_up_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned parent = (hole_index - 1) / 2;
+ while(hole_index && min_heap_elem_greater(s->p[parent], e))
+ {
+ (s->p[hole_index] = s->p[parent])->min_heap_idx = hole_index;
+ hole_index = parent;
+ parent = (hole_index - 1) / 2;
+ }
+ (s->p[hole_index] = e)->min_heap_idx = hole_index;
+}
+
+void min_heap_shift_down_(min_heap_t* s, unsigned hole_index, struct event* e)
+{
+ unsigned min_child = 2 * (hole_index + 1);
+ while(min_child <= s->n)
+ {
+ min_child -= min_child == s->n || min_heap_elem_greater(s->p[min_child], s->p[min_child - 1]);
+ if(!(min_heap_elem_greater(e, s->p[min_child])))
+ break;
+ (s->p[hole_index] = s->p[min_child])->min_heap_idx = hole_index;
+ hole_index = min_child;
+ min_child = 2 * (hole_index + 1);
+ }
+ min_heap_shift_up_(s, hole_index, e);
+}
+
+#endif /* _MIN_HEAP_H_ */
diff --git a/libevent/poll.c b/libevent/poll.c
new file mode 100644
index 00000000000..5d496618d29
--- /dev/null
+++ b/libevent/poll.c
@@ -0,0 +1,379 @@
+/* $OpenBSD: poll.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * Copyright 2000-2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_time.h>
+#endif
+#include <sys/queue.h>
+#include <poll.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef CHECK_INVARIANTS
+#include <assert.h>
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+struct pollop {
+ int event_count; /* Highest number alloc */
+ int nfds; /* Size of event_* */
+ int fd_count; /* Size of idxplus1_by_fd */
+ struct pollfd *event_set;
+ struct event **event_r_back;
+ struct event **event_w_back;
+ int *idxplus1_by_fd; /* Index into event_set by fd; we add 1 so
+ * that 0 (which is easy to memset) can mean
+ * "no entry." */
+};
+
+static void *poll_init (struct event_base *);
+static int poll_add (void *, struct event *);
+static int poll_del (void *, struct event *);
+static int poll_dispatch (struct event_base *, void *, struct timeval *);
+static void poll_dealloc (struct event_base *, void *);
+
+const struct eventop pollops = {
+ "poll",
+ poll_init,
+ poll_add,
+ poll_del,
+ poll_dispatch,
+ poll_dealloc,
+ 0
+};
+
+static void *
+poll_init(struct event_base *base)
+{
+ struct pollop *pollop;
+
+ /* Disable poll when this environment variable is set */
+ if (getenv("EVENT_NOPOLL"))
+ return (NULL);
+
+ if (!(pollop = calloc(1, sizeof(struct pollop))))
+ return (NULL);
+
+ evsignal_init(base);
+
+ return (pollop);
+}
+
+#ifdef CHECK_INVARIANTS
+static void
+poll_check_ok(struct pollop *pop)
+{
+ int i, idx;
+ struct event *ev;
+
+ for (i = 0; i < pop->fd_count; ++i) {
+ idx = pop->idxplus1_by_fd[i]-1;
+ if (idx < 0)
+ continue;
+ assert(pop->event_set[idx].fd == i);
+ if (pop->event_set[idx].events & POLLIN) {
+ ev = pop->event_r_back[idx];
+ assert(ev);
+ assert(ev->ev_events & EV_READ);
+ assert(ev->ev_fd == i);
+ }
+ if (pop->event_set[idx].events & POLLOUT) {
+ ev = pop->event_w_back[idx];
+ assert(ev);
+ assert(ev->ev_events & EV_WRITE);
+ assert(ev->ev_fd == i);
+ }
+ }
+ for (i = 0; i < pop->nfds; ++i) {
+ struct pollfd *pfd = &pop->event_set[i];
+ assert(pop->idxplus1_by_fd[pfd->fd] == i+1);
+ }
+}
+#else
+#define poll_check_ok(pop)
+#endif
+
+static int
+poll_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+ int res, i, j, msec = -1, nfds;
+ struct pollop *pop = arg;
+
+ poll_check_ok(pop);
+
+ if (tv != NULL)
+ msec = tv->tv_sec * 1000 + (tv->tv_usec + 999) / 1000;
+
+ nfds = pop->nfds;
+ res = poll(pop->event_set, nfds, msec);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("poll");
+ return (-1);
+ }
+
+ evsignal_process(base);
+ return (0);
+ } else if (base->sig.evsignal_caught) {
+ evsignal_process(base);
+ }
+
+ event_debug(("%s: poll reports %d", __func__, res));
+
+ if (res == 0 || nfds == 0)
+ return (0);
+
+ i = random() % nfds;
+ for (j = 0; j < nfds; j++) {
+ struct event *r_ev = NULL, *w_ev = NULL;
+ int what;
+ if (++i == nfds)
+ i = 0;
+ what = pop->event_set[i].revents;
+
+ if (!what)
+ continue;
+
+ res = 0;
+
+ /* If the file gets closed notify */
+ if (what & (POLLHUP|POLLERR))
+ what |= POLLIN|POLLOUT;
+ if (what & POLLIN) {
+ res |= EV_READ;
+ r_ev = pop->event_r_back[i];
+ }
+ if (what & POLLOUT) {
+ res |= EV_WRITE;
+ w_ev = pop->event_w_back[i];
+ }
+ if (res == 0)
+ continue;
+
+ if (r_ev && (res & r_ev->ev_events)) {
+ event_active(r_ev, res & r_ev->ev_events, 1);
+ }
+ if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
+ event_active(w_ev, res & w_ev->ev_events, 1);
+ }
+ }
+
+ return (0);
+}
+
+static int
+poll_add(void *arg, struct event *ev)
+{
+ struct pollop *pop = arg;
+ struct pollfd *pfd = NULL;
+ int i;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_add(ev));
+ if (!(ev->ev_events & (EV_READ|EV_WRITE)))
+ return (0);
+
+ poll_check_ok(pop);
+ if (pop->nfds + 1 >= pop->event_count) {
+ struct pollfd *tmp_event_set;
+ struct event **tmp_event_r_back;
+ struct event **tmp_event_w_back;
+ int tmp_event_count;
+
+ if (pop->event_count < 32)
+ tmp_event_count = 32;
+ else
+ tmp_event_count = pop->event_count * 2;
+
+ /* We need more file descriptors */
+ tmp_event_set = realloc(pop->event_set,
+ tmp_event_count * sizeof(struct pollfd));
+ if (tmp_event_set == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+ pop->event_set = tmp_event_set;
+
+ tmp_event_r_back = realloc(pop->event_r_back,
+ tmp_event_count * sizeof(struct event *));
+ if (tmp_event_r_back == NULL) {
+ /* event_set overallocated; that's okay. */
+ event_warn("realloc");
+ return (-1);
+ }
+ pop->event_r_back = tmp_event_r_back;
+
+ tmp_event_w_back = realloc(pop->event_w_back,
+ tmp_event_count * sizeof(struct event *));
+ if (tmp_event_w_back == NULL) {
+ /* event_set and event_r_back overallocated; that's
+ * okay. */
+ event_warn("realloc");
+ return (-1);
+ }
+ pop->event_w_back = tmp_event_w_back;
+
+ pop->event_count = tmp_event_count;
+ }
+ if (ev->ev_fd >= pop->fd_count) {
+ int *tmp_idxplus1_by_fd;
+ int new_count;
+ if (pop->fd_count < 32)
+ new_count = 32;
+ else
+ new_count = pop->fd_count * 2;
+ while (new_count <= ev->ev_fd)
+ new_count *= 2;
+ tmp_idxplus1_by_fd =
+ realloc(pop->idxplus1_by_fd, new_count * sizeof(int));
+ if (tmp_idxplus1_by_fd == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+ pop->idxplus1_by_fd = tmp_idxplus1_by_fd;
+ memset(pop->idxplus1_by_fd + pop->fd_count,
+ 0, sizeof(int)*(new_count - pop->fd_count));
+ pop->fd_count = new_count;
+ }
+
+ i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
+ if (i >= 0) {
+ pfd = &pop->event_set[i];
+ } else {
+ i = pop->nfds++;
+ pfd = &pop->event_set[i];
+ pfd->events = 0;
+ pfd->fd = ev->ev_fd;
+ pop->event_w_back[i] = pop->event_r_back[i] = NULL;
+ pop->idxplus1_by_fd[ev->ev_fd] = i + 1;
+ }
+
+ pfd->revents = 0;
+ if (ev->ev_events & EV_WRITE) {
+ pfd->events |= POLLOUT;
+ pop->event_w_back[i] = ev;
+ }
+ if (ev->ev_events & EV_READ) {
+ pfd->events |= POLLIN;
+ pop->event_r_back[i] = ev;
+ }
+ poll_check_ok(pop);
+
+ return (0);
+}
+
+/*
+ * Nothing to be done here.
+ */
+
+static int
+poll_del(void *arg, struct event *ev)
+{
+ struct pollop *pop = arg;
+ struct pollfd *pfd = NULL;
+ int i;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_del(ev));
+
+ if (!(ev->ev_events & (EV_READ|EV_WRITE)))
+ return (0);
+
+ poll_check_ok(pop);
+ i = pop->idxplus1_by_fd[ev->ev_fd] - 1;
+ if (i < 0)
+ return (-1);
+
+ /* Do we still want to read or write? */
+ pfd = &pop->event_set[i];
+ if (ev->ev_events & EV_READ) {
+ pfd->events &= ~POLLIN;
+ pop->event_r_back[i] = NULL;
+ }
+ if (ev->ev_events & EV_WRITE) {
+ pfd->events &= ~POLLOUT;
+ pop->event_w_back[i] = NULL;
+ }
+ poll_check_ok(pop);
+ if (pfd->events)
+ /* Another event cares about that fd. */
+ return (0);
+
+ /* Okay, so we aren't interested in that fd anymore. */
+ pop->idxplus1_by_fd[ev->ev_fd] = 0;
+
+ --pop->nfds;
+ if (i != pop->nfds) {
+ /*
+ * Shift the last pollfd down into the now-unoccupied
+ * position.
+ */
+ memcpy(&pop->event_set[i], &pop->event_set[pop->nfds],
+ sizeof(struct pollfd));
+ pop->event_r_back[i] = pop->event_r_back[pop->nfds];
+ pop->event_w_back[i] = pop->event_w_back[pop->nfds];
+ pop->idxplus1_by_fd[pop->event_set[i].fd] = i + 1;
+ }
+
+ poll_check_ok(pop);
+ return (0);
+}
+
+static void
+poll_dealloc(struct event_base *base, void *arg)
+{
+ struct pollop *pop = arg;
+
+ evsignal_dealloc(base);
+ if (pop->event_set)
+ free(pop->event_set);
+ if (pop->event_r_back)
+ free(pop->event_r_back);
+ if (pop->event_w_back)
+ free(pop->event_w_back);
+ if (pop->idxplus1_by_fd)
+ free(pop->idxplus1_by_fd);
+
+ memset(pop, 0, sizeof(struct pollop));
+ free(pop);
+}
diff --git a/libevent/sample/Makefile.am b/libevent/sample/Makefile.am
new file mode 100644
index 00000000000..2f4e26e2f3f
--- /dev/null
+++ b/libevent/sample/Makefile.am
@@ -0,0 +1,14 @@
+AUTOMAKE_OPTIONS = foreign no-dependencies
+
+LDADD = ../libevent.la
+AM_CFLAGS = -I$(top_srcdir) -I$(top_srcdir)/compat
+
+noinst_PROGRAMS = event-test time-test signal-test
+
+event_test_sources = event-test.c
+time_test_sources = time-test.c
+signal_test_sources = signal-test.c
+
+verify:
+
+DISTCLEANFILES = *~
diff --git a/libevent/sample/event-test.c b/libevent/sample/event-test.c
new file mode 100644
index 00000000000..2c6cb93864c
--- /dev/null
+++ b/libevent/sample/event-test.c
@@ -0,0 +1,139 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o event-test event-test.c -L/usr/local/lib -levent
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifndef WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#include <sys/time.h>
+#else
+#include <windows.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+
+static void
+fifo_read(int fd, short event, void *arg)
+{
+ char buf[255];
+ int len;
+ struct event *ev = arg;
+#ifdef WIN32
+ DWORD dwBytesRead;
+#endif
+
+ /* Reschedule this event */
+ event_add(ev, NULL);
+
+ fprintf(stderr, "fifo_read called with fd: %d, event: %d, arg: %p\n",
+ fd, event, arg);
+#ifdef WIN32
+ len = ReadFile((HANDLE)fd, buf, sizeof(buf) - 1, &dwBytesRead, NULL);
+
+ // Check for end of file.
+ if(len && dwBytesRead == 0) {
+ fprintf(stderr, "End Of File");
+ event_del(ev);
+ return;
+ }
+
+ buf[dwBytesRead] = '\0';
+#else
+ len = read(fd, buf, sizeof(buf) - 1);
+
+ if (len == -1) {
+ perror("read");
+ return;
+ } else if (len == 0) {
+ fprintf(stderr, "Connection closed\n");
+ return;
+ }
+
+ buf[len] = '\0';
+#endif
+ fprintf(stdout, "Read: %s\n", buf);
+}
+
+int
+main (int argc, char **argv)
+{
+ struct event evfifo;
+#ifdef WIN32
+ HANDLE socket;
+ // Open a file.
+ socket = CreateFile("test.txt", // open File
+ GENERIC_READ, // open for reading
+ 0, // do not share
+ NULL, // no security
+ OPEN_EXISTING, // existing file only
+ FILE_ATTRIBUTE_NORMAL, // normal file
+ NULL); // no attr. template
+
+ if(socket == INVALID_HANDLE_VALUE)
+ return 1;
+
+#else
+ struct stat st;
+ const char *fifo = "event.fifo";
+ int socket;
+
+ if (lstat (fifo, &st) == 0) {
+ if ((st.st_mode & S_IFMT) == S_IFREG) {
+ errno = EEXIST;
+ perror("lstat");
+ exit (1);
+ }
+ }
+
+ unlink (fifo);
+ if (mkfifo (fifo, 0600) == -1) {
+ perror("mkfifo");
+ exit (1);
+ }
+
+ /* Linux pipes are broken, we need O_RDWR instead of O_RDONLY */
+#ifdef __linux
+ socket = open (fifo, O_RDWR | O_NONBLOCK, 0);
+#else
+ socket = open (fifo, O_RDONLY | O_NONBLOCK, 0);
+#endif
+
+ if (socket == -1) {
+ perror("open");
+ exit (1);
+ }
+
+ fprintf(stderr, "Write data to %s\n", fifo);
+#endif
+ /* Initalize the event library */
+ event_init();
+
+ /* Initalize one event */
+#ifdef WIN32
+ event_set(&evfifo, (int)socket, EV_READ, fifo_read, &evfifo);
+#else
+ event_set(&evfifo, socket, EV_READ, fifo_read, &evfifo);
+#endif
+
+ /* Add it to the active events, without a timeout */
+ event_add(&evfifo, NULL);
+
+ event_dispatch();
+#ifdef WIN32
+ CloseHandle(socket);
+#endif
+ return (0);
+}
+
diff --git a/libevent/sample/signal-test.c b/libevent/sample/signal-test.c
new file mode 100644
index 00000000000..9a131cb50c2
--- /dev/null
+++ b/libevent/sample/signal-test.c
@@ -0,0 +1,63 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o signal-test \
+ * signal-test.c -L/usr/local/lib -levent
+ */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/stat.h>
+#ifndef WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#include <sys/time.h>
+#else
+#include <windows.h>
+#endif
+#include <signal.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+
+int called = 0;
+
+static void
+signal_cb(int fd, short event, void *arg)
+{
+ struct event *signal = arg;
+
+ printf("%s: got signal %d\n", __func__, EVENT_SIGNAL(signal));
+
+ if (called >= 2)
+ event_del(signal);
+
+ called++;
+}
+
+int
+main (int argc, char **argv)
+{
+ struct event signal_int;
+
+ /* Initalize the event library */
+ event_init();
+
+ /* Initalize one event */
+ event_set(&signal_int, SIGINT, EV_SIGNAL|EV_PERSIST, signal_cb,
+ &signal_int);
+
+ event_add(&signal_int, NULL);
+
+ event_dispatch();
+
+ return (0);
+}
+
diff --git a/libevent/sample/time-test.c b/libevent/sample/time-test.c
new file mode 100644
index 00000000000..069d4f8f783
--- /dev/null
+++ b/libevent/sample/time-test.c
@@ -0,0 +1,70 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/stat.h>
+#ifndef WIN32
+#include <sys/queue.h>
+#include <unistd.h>
+#endif
+#include <time.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+int lasttime;
+
+static void
+timeout_cb(int fd, short event, void *arg)
+{
+ struct timeval tv;
+ struct event *timeout = arg;
+ int newtime = time(NULL);
+
+ printf("%s: called at %d: %d\n", __func__, newtime,
+ newtime - lasttime);
+ lasttime = newtime;
+
+ evutil_timerclear(&tv);
+ tv.tv_sec = 2;
+ event_add(timeout, &tv);
+}
+
+int
+main (int argc, char **argv)
+{
+ struct event timeout;
+ struct timeval tv;
+
+ /* Initalize the event library */
+ event_init();
+
+ /* Initalize one event */
+ evtimer_set(&timeout, timeout_cb, &timeout);
+
+ evutil_timerclear(&tv);
+ tv.tv_sec = 2;
+ event_add(&timeout, &tv);
+
+ lasttime = time(NULL);
+
+ event_dispatch();
+
+ return (0);
+}
+
diff --git a/libevent/select.c b/libevent/select.c
new file mode 100644
index 00000000000..ca6639fd829
--- /dev/null
+++ b/libevent/select.c
@@ -0,0 +1,356 @@
+/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#else
+#include <sys/_time.h>
+#endif
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
+#include <sys/queue.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+#ifdef CHECK_INVARIANTS
+#include <assert.h>
+#endif
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "log.h"
+
+#ifndef howmany
+#define howmany(x, y) (((x)+((y)-1))/(y))
+#endif
+
+struct selectop {
+ int event_fds; /* Highest fd in fd set */
+ int event_fdsz;
+ fd_set *event_readset_in;
+ fd_set *event_writeset_in;
+ fd_set *event_readset_out;
+ fd_set *event_writeset_out;
+ struct event **event_r_by_fd;
+ struct event **event_w_by_fd;
+};
+
+static void *select_init (struct event_base *);
+static int select_add (void *, struct event *);
+static int select_del (void *, struct event *);
+static int select_dispatch (struct event_base *, void *, struct timeval *);
+static void select_dealloc (struct event_base *, void *);
+
+const struct eventop selectops = {
+ "select",
+ select_init,
+ select_add,
+ select_del,
+ select_dispatch,
+ select_dealloc,
+ 0
+};
+
+static int select_resize(struct selectop *sop, int fdsz);
+
+static void *
+select_init(struct event_base *base)
+{
+ struct selectop *sop;
+
+ /* Disable select when this environment variable is set */
+ if (getenv("EVENT_NOSELECT"))
+ return (NULL);
+
+ if (!(sop = calloc(1, sizeof(struct selectop))))
+ return (NULL);
+
+ select_resize(sop, howmany(32 + 1, NFDBITS)*sizeof(fd_mask));
+
+ evsignal_init(base);
+
+ return (sop);
+}
+
+#ifdef CHECK_INVARIANTS
+static void
+check_selectop(struct selectop *sop)
+{
+ int i;
+ for (i = 0; i <= sop->event_fds; ++i) {
+ if (FD_ISSET(i, sop->event_readset_in)) {
+ assert(sop->event_r_by_fd[i]);
+ assert(sop->event_r_by_fd[i]->ev_events & EV_READ);
+ assert(sop->event_r_by_fd[i]->ev_fd == i);
+ } else {
+ assert(! sop->event_r_by_fd[i]);
+ }
+ if (FD_ISSET(i, sop->event_writeset_in)) {
+ assert(sop->event_w_by_fd[i]);
+ assert(sop->event_w_by_fd[i]->ev_events & EV_WRITE);
+ assert(sop->event_w_by_fd[i]->ev_fd == i);
+ } else {
+ assert(! sop->event_w_by_fd[i]);
+ }
+ }
+
+}
+#else
+#define check_selectop(sop) do { (void) sop; } while (0)
+#endif
+
+static int
+select_dispatch(struct event_base *base, void *arg, struct timeval *tv)
+{
+ int res, i, j;
+ struct selectop *sop = arg;
+
+ check_selectop(sop);
+
+ memcpy(sop->event_readset_out, sop->event_readset_in,
+ sop->event_fdsz);
+ memcpy(sop->event_writeset_out, sop->event_writeset_in,
+ sop->event_fdsz);
+
+ res = select(sop->event_fds + 1, sop->event_readset_out,
+ sop->event_writeset_out, NULL, tv);
+
+ check_selectop(sop);
+
+ if (res == -1) {
+ if (errno != EINTR) {
+ event_warn("select");
+ return (-1);
+ }
+
+ evsignal_process(base);
+ return (0);
+ } else if (base->sig.evsignal_caught) {
+ evsignal_process(base);
+ }
+
+ event_debug(("%s: select reports %d", __func__, res));
+
+ check_selectop(sop);
+ i = random() % (sop->event_fds+1);
+ for (j = 0; j <= sop->event_fds; ++j) {
+ struct event *r_ev = NULL, *w_ev = NULL;
+ if (++i >= sop->event_fds+1)
+ i = 0;
+
+ res = 0;
+ if (FD_ISSET(i, sop->event_readset_out)) {
+ r_ev = sop->event_r_by_fd[i];
+ res |= EV_READ;
+ }
+ if (FD_ISSET(i, sop->event_writeset_out)) {
+ w_ev = sop->event_w_by_fd[i];
+ res |= EV_WRITE;
+ }
+ if (r_ev && (res & r_ev->ev_events)) {
+ event_active(r_ev, res & r_ev->ev_events, 1);
+ }
+ if (w_ev && w_ev != r_ev && (res & w_ev->ev_events)) {
+ event_active(w_ev, res & w_ev->ev_events, 1);
+ }
+ }
+ check_selectop(sop);
+
+ return (0);
+}
+
+
+static int
+select_resize(struct selectop *sop, int fdsz)
+{
+ int n_events, n_events_old;
+
+ fd_set *readset_in = NULL;
+ fd_set *writeset_in = NULL;
+ fd_set *readset_out = NULL;
+ fd_set *writeset_out = NULL;
+ struct event **r_by_fd = NULL;
+ struct event **w_by_fd = NULL;
+
+ n_events = (fdsz/sizeof(fd_mask)) * NFDBITS;
+ n_events_old = (sop->event_fdsz/sizeof(fd_mask)) * NFDBITS;
+
+ if (sop->event_readset_in)
+ check_selectop(sop);
+
+ if ((readset_in = realloc(sop->event_readset_in, fdsz)) == NULL)
+ goto error;
+ sop->event_readset_in = readset_in;
+ if ((readset_out = realloc(sop->event_readset_out, fdsz)) == NULL)
+ goto error;
+ sop->event_readset_out = readset_out;
+ if ((writeset_in = realloc(sop->event_writeset_in, fdsz)) == NULL)
+ goto error;
+ sop->event_writeset_in = writeset_in;
+ if ((writeset_out = realloc(sop->event_writeset_out, fdsz)) == NULL)
+ goto error;
+ sop->event_writeset_out = writeset_out;
+ if ((r_by_fd = realloc(sop->event_r_by_fd,
+ n_events*sizeof(struct event*))) == NULL)
+ goto error;
+ sop->event_r_by_fd = r_by_fd;
+ if ((w_by_fd = realloc(sop->event_w_by_fd,
+ n_events * sizeof(struct event*))) == NULL)
+ goto error;
+ sop->event_w_by_fd = w_by_fd;
+
+ memset((char *)sop->event_readset_in + sop->event_fdsz, 0,
+ fdsz - sop->event_fdsz);
+ memset((char *)sop->event_writeset_in + sop->event_fdsz, 0,
+ fdsz - sop->event_fdsz);
+ memset(sop->event_r_by_fd + n_events_old, 0,
+ (n_events-n_events_old) * sizeof(struct event*));
+ memset(sop->event_w_by_fd + n_events_old, 0,
+ (n_events-n_events_old) * sizeof(struct event*));
+
+ sop->event_fdsz = fdsz;
+ check_selectop(sop);
+
+ return (0);
+
+ error:
+ event_warn("malloc");
+ return (-1);
+}
+
+
+static int
+select_add(void *arg, struct event *ev)
+{
+ struct selectop *sop = arg;
+
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_add(ev));
+
+ check_selectop(sop);
+ /*
+ * Keep track of the highest fd, so that we can calculate the size
+ * of the fd_sets for select(2)
+ */
+ if (sop->event_fds < ev->ev_fd) {
+ int fdsz = sop->event_fdsz;
+
+ if (fdsz < sizeof(fd_mask))
+ fdsz = sizeof(fd_mask);
+
+ while (fdsz <
+ (howmany(ev->ev_fd + 1, NFDBITS) * sizeof(fd_mask)))
+ fdsz *= 2;
+
+ if (fdsz != sop->event_fdsz) {
+ if (select_resize(sop, fdsz)) {
+ check_selectop(sop);
+ return (-1);
+ }
+ }
+
+ sop->event_fds = ev->ev_fd;
+ }
+
+ if (ev->ev_events & EV_READ) {
+ FD_SET(ev->ev_fd, sop->event_readset_in);
+ sop->event_r_by_fd[ev->ev_fd] = ev;
+ }
+ if (ev->ev_events & EV_WRITE) {
+ FD_SET(ev->ev_fd, sop->event_writeset_in);
+ sop->event_w_by_fd[ev->ev_fd] = ev;
+ }
+ check_selectop(sop);
+
+ return (0);
+}
+
+/*
+ * Nothing to be done here.
+ */
+
+static int
+select_del(void *arg, struct event *ev)
+{
+ struct selectop *sop = arg;
+
+ check_selectop(sop);
+ if (ev->ev_events & EV_SIGNAL)
+ return (evsignal_del(ev));
+
+ if (sop->event_fds < ev->ev_fd) {
+ check_selectop(sop);
+ return (0);
+ }
+
+ if (ev->ev_events & EV_READ) {
+ FD_CLR(ev->ev_fd, sop->event_readset_in);
+ sop->event_r_by_fd[ev->ev_fd] = NULL;
+ }
+
+ if (ev->ev_events & EV_WRITE) {
+ FD_CLR(ev->ev_fd, sop->event_writeset_in);
+ sop->event_w_by_fd[ev->ev_fd] = NULL;
+ }
+
+ check_selectop(sop);
+ return (0);
+}
+
+static void
+select_dealloc(struct event_base *base, void *arg)
+{
+ struct selectop *sop = arg;
+
+ evsignal_dealloc(base);
+ if (sop->event_readset_in)
+ free(sop->event_readset_in);
+ if (sop->event_writeset_in)
+ free(sop->event_writeset_in);
+ if (sop->event_readset_out)
+ free(sop->event_readset_out);
+ if (sop->event_writeset_out)
+ free(sop->event_writeset_out);
+ if (sop->event_r_by_fd)
+ free(sop->event_r_by_fd);
+ if (sop->event_w_by_fd)
+ free(sop->event_w_by_fd);
+
+ memset(sop, 0, sizeof(struct selectop));
+ free(sop);
+}
diff --git a/libevent/signal.c b/libevent/signal.c
new file mode 100644
index 00000000000..74fa23f688a
--- /dev/null
+++ b/libevent/signal.c
@@ -0,0 +1,357 @@
+/* $OpenBSD: select.c,v 1.2 2002/06/25 15:50:15 mickey Exp $ */
+
+/*
+ * Copyright 2000-2002 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#ifdef WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <winsock2.h>
+#include <windows.h>
+#undef WIN32_LEAN_AND_MEAN
+#endif
+#include <sys/types.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+#include <errno.h>
+#ifdef HAVE_FCNTL_H
+#include <fcntl.h>
+#endif
+#include <assert.h>
+
+#include "event.h"
+#include "event-internal.h"
+#include "evsignal.h"
+#include "evutil.h"
+#include "log.h"
+
+struct event_base *evsignal_base = NULL;
+
+static void evsignal_handler(int sig);
+
+/* Callback for when the signal handler write a byte to our signaling socket */
+static void
+evsignal_cb(int fd, short what, void *arg)
+{
+ static char signals[1];
+#ifdef WIN32
+ SSIZE_T n;
+#else
+ ssize_t n;
+#endif
+
+ n = recv(fd, signals, sizeof(signals), 0);
+ if (n == -1)
+ event_err(1, "%s: read", __func__);
+}
+
+#ifdef HAVE_SETFD
+#define FD_CLOSEONEXEC(x) do { \
+ if (fcntl(x, F_SETFD, 1) == -1) \
+ event_warn("fcntl(%d, F_SETFD)", x); \
+} while (0)
+#else
+#define FD_CLOSEONEXEC(x)
+#endif
+
+int
+evsignal_init(struct event_base *base)
+{
+ int i;
+
+ /*
+ * Our signal handler is going to write to one end of the socket
+ * pair to wake up our event loop. The event loop then scans for
+ * signals that got delivered.
+ */
+ if (evutil_socketpair(
+ AF_UNIX, SOCK_STREAM, 0, base->sig.ev_signal_pair) == -1) {
+#ifdef WIN32
+ /* Make this nonfatal on win32, where sometimes people
+ have localhost firewalled. */
+ event_warn("%s: socketpair", __func__);
+#else
+ event_err(1, "%s: socketpair", __func__);
+#endif
+ return -1;
+ }
+
+ FD_CLOSEONEXEC(base->sig.ev_signal_pair[0]);
+ FD_CLOSEONEXEC(base->sig.ev_signal_pair[1]);
+ base->sig.sh_old = NULL;
+ base->sig.sh_old_max = 0;
+ base->sig.evsignal_caught = 0;
+ memset(&base->sig.evsigcaught, 0, sizeof(sig_atomic_t)*NSIG);
+ /* initialize the queues for all events */
+ for (i = 0; i < NSIG; ++i)
+ TAILQ_INIT(&base->sig.evsigevents[i]);
+
+ evutil_make_socket_nonblocking(base->sig.ev_signal_pair[0]);
+
+ event_set(&base->sig.ev_signal, base->sig.ev_signal_pair[1],
+ EV_READ | EV_PERSIST, evsignal_cb, &base->sig.ev_signal);
+ base->sig.ev_signal.ev_base = base;
+ base->sig.ev_signal.ev_flags |= EVLIST_INTERNAL;
+
+ return 0;
+}
+
+/* Helper: set the signal handler for evsignal to handler in base, so that
+ * we can restore the original handler when we clear the current one. */
+int
+_evsignal_set_handler(struct event_base *base,
+ int evsignal, void (*handler)(int))
+{
+#ifdef HAVE_SIGACTION
+ struct sigaction sa;
+#else
+ ev_sighandler_t sh;
+#endif
+ struct evsignal_info *sig = &base->sig;
+ void *p;
+
+ /*
+ * resize saved signal handler array up to the highest signal number.
+ * a dynamic array is used to keep footprint on the low side.
+ */
+ if (evsignal >= sig->sh_old_max) {
+ int new_max = evsignal + 1;
+ event_debug(("%s: evsignal (%d) >= sh_old_max (%d), resizing",
+ __func__, evsignal, sig->sh_old_max));
+ p = realloc(sig->sh_old, new_max * sizeof(*sig->sh_old));
+ if (p == NULL) {
+ event_warn("realloc");
+ return (-1);
+ }
+
+ memset((char *)p + sig->sh_old_max * sizeof(*sig->sh_old),
+ 0, (new_max - sig->sh_old_max) * sizeof(*sig->sh_old));
+
+ sig->sh_old_max = new_max;
+ sig->sh_old = p;
+ }
+
+ /* allocate space for previous handler out of dynamic array */
+ sig->sh_old[evsignal] = malloc(sizeof *sig->sh_old[evsignal]);
+ if (sig->sh_old[evsignal] == NULL) {
+ event_warn("malloc");
+ return (-1);
+ }
+
+ /* save previous handler and setup new handler */
+#ifdef HAVE_SIGACTION
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = handler;
+ sa.sa_flags |= SA_RESTART;
+ sigfillset(&sa.sa_mask);
+
+ if (sigaction(evsignal, &sa, sig->sh_old[evsignal]) == -1) {
+ event_warn("sigaction");
+ free(sig->sh_old[evsignal]);
+ return (-1);
+ }
+#else
+ if ((sh = signal(evsignal, handler)) == SIG_ERR) {
+ event_warn("signal");
+ free(sig->sh_old[evsignal]);
+ return (-1);
+ }
+ *sig->sh_old[evsignal] = sh;
+#endif
+
+ return (0);
+}
+
+int
+evsignal_add(struct event *ev)
+{
+ int evsignal;
+ struct event_base *base = ev->ev_base;
+ struct evsignal_info *sig = &ev->ev_base->sig;
+
+ if (ev->ev_events & (EV_READ|EV_WRITE))
+ event_errx(1, "%s: EV_SIGNAL incompatible use", __func__);
+ evsignal = EVENT_SIGNAL(ev);
+ assert(evsignal >= 0 && evsignal < NSIG);
+ if (TAILQ_EMPTY(&sig->evsigevents[evsignal])) {
+ event_debug(("%s: %p: changing signal handler", __func__, ev));
+ if (_evsignal_set_handler(
+ base, evsignal, evsignal_handler) == -1)
+ return (-1);
+
+ /* catch signals if they happen quickly */
+ evsignal_base = base;
+
+ if (!sig->ev_signal_added) {
+ if (event_add(&sig->ev_signal, NULL))
+ return (-1);
+ sig->ev_signal_added = 1;
+ }
+ }
+
+ /* multiple events may listen to the same signal */
+ TAILQ_INSERT_TAIL(&sig->evsigevents[evsignal], ev, ev_signal_next);
+
+ return (0);
+}
+
+int
+_evsignal_restore_handler(struct event_base *base, int evsignal)
+{
+ int ret = 0;
+ struct evsignal_info *sig = &base->sig;
+#ifdef HAVE_SIGACTION
+ struct sigaction *sh;
+#else
+ ev_sighandler_t *sh;
+#endif
+
+ /* restore previous handler */
+ sh = sig->sh_old[evsignal];
+ sig->sh_old[evsignal] = NULL;
+#ifdef HAVE_SIGACTION
+ if (sigaction(evsignal, sh, NULL) == -1) {
+ event_warn("sigaction");
+ ret = -1;
+ }
+#else
+ if (signal(evsignal, *sh) == SIG_ERR) {
+ event_warn("signal");
+ ret = -1;
+ }
+#endif
+ free(sh);
+
+ return ret;
+}
+
+int
+evsignal_del(struct event *ev)
+{
+ struct event_base *base = ev->ev_base;
+ struct evsignal_info *sig = &base->sig;
+ int evsignal = EVENT_SIGNAL(ev);
+
+ assert(evsignal >= 0 && evsignal < NSIG);
+
+ /* multiple events may listen to the same signal */
+ TAILQ_REMOVE(&sig->evsigevents[evsignal], ev, ev_signal_next);
+
+ if (!TAILQ_EMPTY(&sig->evsigevents[evsignal]))
+ return (0);
+
+ event_debug(("%s: %p: restoring signal handler", __func__, ev));
+
+ return (_evsignal_restore_handler(ev->ev_base, EVENT_SIGNAL(ev)));
+}
+
+static void
+evsignal_handler(int sig)
+{
+ int save_errno = errno;
+
+ if (evsignal_base == NULL) {
+ event_warn(
+ "%s: received signal %d, but have no base configured",
+ __func__, sig);
+ return;
+ }
+
+ evsignal_base->sig.evsigcaught[sig]++;
+ evsignal_base->sig.evsignal_caught = 1;
+
+#ifndef HAVE_SIGACTION
+ signal(sig, evsignal_handler);
+#endif
+
+ /* Wake up our notification mechanism */
+ send(evsignal_base->sig.ev_signal_pair[0], "a", 1, 0);
+ errno = save_errno;
+}
+
+void
+evsignal_process(struct event_base *base)
+{
+ struct evsignal_info *sig = &base->sig;
+ struct event *ev, *next_ev;
+ sig_atomic_t ncalls;
+ int i;
+
+ base->sig.evsignal_caught = 0;
+ for (i = 1; i < NSIG; ++i) {
+ ncalls = sig->evsigcaught[i];
+ if (ncalls == 0)
+ continue;
+ sig->evsigcaught[i] -= ncalls;
+
+ for (ev = TAILQ_FIRST(&sig->evsigevents[i]);
+ ev != NULL; ev = next_ev) {
+ next_ev = TAILQ_NEXT(ev, ev_signal_next);
+ if (!(ev->ev_events & EV_PERSIST))
+ event_del(ev);
+ event_active(ev, EV_SIGNAL, ncalls);
+ }
+
+ }
+}
+
+void
+evsignal_dealloc(struct event_base *base)
+{
+ int i = 0;
+ if (base->sig.ev_signal_added) {
+ event_del(&base->sig.ev_signal);
+ base->sig.ev_signal_added = 0;
+ }
+ for (i = 0; i < NSIG; ++i) {
+ if (i < base->sig.sh_old_max && base->sig.sh_old[i] != NULL)
+ _evsignal_restore_handler(base, i);
+ }
+
+ EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
+ base->sig.ev_signal_pair[0] = -1;
+ EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
+ base->sig.ev_signal_pair[1] = -1;
+ base->sig.sh_old_max = 0;
+
+ /* per index frees are handled in evsignal_del() */
+ free(base->sig.sh_old);
+}
diff --git a/libevent/strlcpy-internal.h b/libevent/strlcpy-internal.h
new file mode 100644
index 00000000000..22b5f61d45e
--- /dev/null
+++ b/libevent/strlcpy-internal.h
@@ -0,0 +1,23 @@
+#ifndef _STRLCPY_INTERNAL_H_
+#define _STRLCPY_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif /* HAVE_CONFIG_H */
+
+#ifndef HAVE_STRLCPY
+#include <string.h>
+size_t _event_strlcpy(char *dst, const char *src, size_t siz);
+#define strlcpy _event_strlcpy
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/libevent/strlcpy.c b/libevent/strlcpy.c
new file mode 100644
index 00000000000..5d194527c8c
--- /dev/null
+++ b/libevent/strlcpy.c
@@ -0,0 +1,76 @@
+/* $OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $ */
+
+/*
+ * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(LIBC_SCCS) && !defined(lint)
+static char *rcsid = "$OpenBSD: strlcpy.c,v 1.5 2001/05/13 15:40:16 deraadt Exp $";
+#endif /* LIBC_SCCS and not lint */
+
+#include <sys/types.h>
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif /* HAVE_CONFIG_H */
+
+#ifndef HAVE_STRLCPY
+#include "strlcpy-internal.h"
+
+/*
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
+ * Returns strlen(src); if retval >= siz, truncation occurred.
+ */
+size_t
+_event_strlcpy(dst, src, siz)
+ char *dst;
+ const char *src;
+ size_t siz;
+{
+ register char *d = dst;
+ register const char *s = src;
+ register size_t n = siz;
+
+ /* Copy as many bytes as will fit */
+ if (n != 0 && --n != 0) {
+ do {
+ if ((*d++ = *s++) == 0)
+ break;
+ } while (--n != 0);
+ }
+
+ /* Not enough room in dst, add NUL and traverse rest of src */
+ if (n == 0) {
+ if (siz != 0)
+ *d = '\0'; /* NUL-terminate dst */
+ while (*s++)
+ ;
+ }
+
+ return(s - src - 1); /* count does not include NUL */
+}
+#endif
diff --git a/libevent/test/Makefile.am b/libevent/test/Makefile.am
new file mode 100644
index 00000000000..3558d02fd5a
--- /dev/null
+++ b/libevent/test/Makefile.am
@@ -0,0 +1,35 @@
+AUTOMAKE_OPTIONS = foreign no-dependencies
+
+AM_CFLAGS = -I$(top_srcdir) -I$(top_srcdir)/compat
+
+EXTRA_DIST = regress.rpc regress.gen.h regress.gen.c
+
+noinst_PROGRAMS = test-init test-eof test-weof test-time regress bench
+
+BUILT_SOURCES = regress.gen.c regress.gen.h
+test_init_SOURCES = test-init.c
+test_init_LDADD = ../libevent_core.la
+test_eof_SOURCES = test-eof.c
+test_eof_LDADD = ../libevent_core.la
+test_weof_SOURCES = test-weof.c
+test_weof_LDADD = ../libevent_core.la
+test_time_SOURCES = test-time.c
+test_time_LDADD = ../libevent_core.la
+regress_SOURCES = regress.c regress.h regress_http.c regress_dns.c \
+ regress_rpc.c \
+ regress.gen.c regress.gen.h
+regress_LDADD = ../libevent.la
+bench_SOURCES = bench.c
+bench_LDADD = ../libevent.la
+
+regress.gen.c regress.gen.h: regress.rpc $(top_srcdir)/event_rpcgen.py
+ $(top_srcdir)/event_rpcgen.py $(srcdir)/regress.rpc || echo "No Python installed"
+
+DISTCLEANFILES = *~
+
+test: test-init test-eof test-weof test-time regress
+
+verify: test
+ @$(srcdir)/test.sh
+
+bench test-init test-eof test-weof test-time: ../libevent.la
diff --git a/libevent/test/bench.c b/libevent/test/bench.c
new file mode 100644
index 00000000000..c976932fa80
--- /dev/null
+++ b/libevent/test/bench.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2003 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Mon 03/10/2003 - Modified by Davide Libenzi <davidel@xmailserver.org>
+ *
+ * Added chain event propagation to improve the sensitivity of
+ * the measure respect to the event loop efficency.
+ *
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#ifdef WIN32
+#include <windows.h>
+#else
+#include <sys/socket.h>
+#include <signal.h>
+#include <sys/resource.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+
+static int count, writes, fired;
+static int *pipes;
+static int num_pipes, num_active, num_writes;
+static struct event *events;
+
+static void
+read_cb(int fd, short which, void *arg)
+{
+ long idx = (long) arg, widx = idx + 1;
+ u_char ch;
+
+ count += read(fd, &ch, sizeof(ch));
+ if (writes) {
+ if (widx >= num_pipes)
+ widx -= num_pipes;
+ write(pipes[2 * widx + 1], "e", 1);
+ writes--;
+ fired++;
+ }
+}
+
+static struct timeval *
+run_once(void)
+{
+ int *cp, space;
+ long i;
+ static struct timeval ts, te;
+
+ for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+ event_del(&events[i]);
+ event_set(&events[i], cp[0], EV_READ | EV_PERSIST, read_cb, (void *) i);
+ event_add(&events[i], NULL);
+ }
+
+ event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK);
+
+ fired = 0;
+ space = num_pipes / num_active;
+ space = space * 2;
+ for (i = 0; i < num_active; i++, fired++)
+ write(pipes[i * space + 1], "e", 1);
+
+ count = 0;
+ writes = num_writes;
+ { int xcount = 0;
+ gettimeofday(&ts, NULL);
+ do {
+ event_loop(EVLOOP_ONCE | EVLOOP_NONBLOCK);
+ xcount++;
+ } while (count != fired);
+ gettimeofday(&te, NULL);
+
+ if (xcount != count) fprintf(stderr, "Xcount: %d, Rcount: %d\n", xcount, count);
+ }
+
+ evutil_timersub(&te, &ts, &te);
+
+ return (&te);
+}
+
+int
+main (int argc, char **argv)
+{
+#ifndef WIN32
+ struct rlimit rl;
+#endif
+ int i, c;
+ struct timeval *tv;
+ int *cp;
+
+ num_pipes = 100;
+ num_active = 1;
+ num_writes = num_pipes;
+ while ((c = getopt(argc, argv, "n:a:w:")) != -1) {
+ switch (c) {
+ case 'n':
+ num_pipes = atoi(optarg);
+ break;
+ case 'a':
+ num_active = atoi(optarg);
+ break;
+ case 'w':
+ num_writes = atoi(optarg);
+ break;
+ default:
+ fprintf(stderr, "Illegal argument \"%c\"\n", c);
+ exit(1);
+ }
+ }
+
+#ifndef WIN32
+ rl.rlim_cur = rl.rlim_max = num_pipes * 2 + 50;
+ if (setrlimit(RLIMIT_NOFILE, &rl) == -1) {
+ perror("setrlimit");
+ exit(1);
+ }
+#endif
+
+ events = calloc(num_pipes, sizeof(struct event));
+ pipes = calloc(num_pipes * 2, sizeof(int));
+ if (events == NULL || pipes == NULL) {
+ perror("malloc");
+ exit(1);
+ }
+
+ event_init();
+
+ for (cp = pipes, i = 0; i < num_pipes; i++, cp += 2) {
+#ifdef USE_PIPES
+ if (pipe(cp) == -1) {
+#else
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, cp) == -1) {
+#endif
+ perror("pipe");
+ exit(1);
+ }
+ }
+
+ for (i = 0; i < 25; i++) {
+ tv = run_once();
+ if (tv == NULL)
+ exit(1);
+ fprintf(stdout, "%ld\n",
+ tv->tv_sec * 1000000L + tv->tv_usec);
+ }
+
+ exit(0);
+}
diff --git a/libevent/test/regress.c b/libevent/test/regress.c
new file mode 100644
index 00000000000..0b7517d3aa4
--- /dev/null
+++ b/libevent/test/regress.c
@@ -0,0 +1,1703 @@
+/*
+ * Copyright (c) 2003, 2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <assert.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event.h"
+#include "evutil.h"
+#include "event-internal.h"
+#include "log.h"
+
+#include "regress.h"
+#ifndef WIN32
+#include "regress.gen.h"
+#endif
+
+int pair[2];
+int test_ok;
+static int called;
+static char wbuf[4096];
+static char rbuf[4096];
+static int woff;
+static int roff;
+static int usepersist;
+static struct timeval tset;
+static struct timeval tcalled;
+static struct event_base *global_base;
+
+#define TEST1 "this is a test"
+#define SECONDS 1
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+#ifdef WIN32
+#define write(fd,buf,len) send((fd),(buf),(len),0)
+#define read(fd,buf,len) recv((fd),(buf),(len),0)
+#endif
+
+static void
+simple_read_cb(int fd, short event, void *arg)
+{
+ char buf[256];
+ int len;
+
+ if (arg == NULL)
+ return;
+
+ len = read(fd, buf, sizeof(buf));
+
+ if (len) {
+ if (!called) {
+ if (event_add(arg, NULL) == -1)
+ exit(1);
+ }
+ } else if (called == 1)
+ test_ok = 1;
+
+ called++;
+}
+
+static void
+simple_write_cb(int fd, short event, void *arg)
+{
+ int len;
+
+ if (arg == NULL)
+ return;
+
+ len = write(fd, TEST1, strlen(TEST1) + 1);
+ if (len == -1)
+ test_ok = 0;
+ else
+ test_ok = 1;
+}
+
+static void
+multiple_write_cb(int fd, short event, void *arg)
+{
+ struct event *ev = arg;
+ int len;
+
+ len = 128;
+ if (woff + len >= sizeof(wbuf))
+ len = sizeof(wbuf) - woff;
+
+ len = write(fd, wbuf + woff, len);
+ if (len == -1) {
+ fprintf(stderr, "%s: write\n", __func__);
+ if (usepersist)
+ event_del(ev);
+ return;
+ }
+
+ woff += len;
+
+ if (woff >= sizeof(wbuf)) {
+ shutdown(fd, SHUT_WR);
+ if (usepersist)
+ event_del(ev);
+ return;
+ }
+
+ if (!usepersist) {
+ if (event_add(ev, NULL) == -1)
+ exit(1);
+ }
+}
+
+static void
+multiple_read_cb(int fd, short event, void *arg)
+{
+ struct event *ev = arg;
+ int len;
+
+ len = read(fd, rbuf + roff, sizeof(rbuf) - roff);
+ if (len == -1)
+ fprintf(stderr, "%s: read\n", __func__);
+ if (len <= 0) {
+ if (usepersist)
+ event_del(ev);
+ return;
+ }
+
+ roff += len;
+ if (!usepersist) {
+ if (event_add(ev, NULL) == -1)
+ exit(1);
+ }
+}
+
+static void
+timeout_cb(int fd, short event, void *arg)
+{
+ struct timeval tv;
+ int diff;
+
+ evutil_gettimeofday(&tcalled, NULL);
+ if (evutil_timercmp(&tcalled, &tset, >))
+ evutil_timersub(&tcalled, &tset, &tv);
+ else
+ evutil_timersub(&tset, &tcalled, &tv);
+
+ diff = tv.tv_sec*1000 + tv.tv_usec/1000 - SECONDS * 1000;
+ if (diff < 0)
+ diff = -diff;
+
+ if (diff < 100)
+ test_ok = 1;
+}
+
+#ifndef WIN32
+static void
+signal_cb_sa(int sig)
+{
+ test_ok = 2;
+}
+
+static void
+signal_cb(int fd, short event, void *arg)
+{
+ struct event *ev = arg;
+
+ signal_del(ev);
+ test_ok = 1;
+}
+#endif
+
+struct both {
+ struct event ev;
+ int nread;
+};
+
+static void
+combined_read_cb(int fd, short event, void *arg)
+{
+ struct both *both = arg;
+ char buf[128];
+ int len;
+
+ len = read(fd, buf, sizeof(buf));
+ if (len == -1)
+ fprintf(stderr, "%s: read\n", __func__);
+ if (len <= 0)
+ return;
+
+ both->nread += len;
+ if (event_add(&both->ev, NULL) == -1)
+ exit(1);
+}
+
+static void
+combined_write_cb(int fd, short event, void *arg)
+{
+ struct both *both = arg;
+ char buf[128];
+ int len;
+
+ len = sizeof(buf);
+ if (len > both->nread)
+ len = both->nread;
+
+ len = write(fd, buf, len);
+ if (len == -1)
+ fprintf(stderr, "%s: write\n", __func__);
+ if (len <= 0) {
+ shutdown(fd, SHUT_WR);
+ return;
+ }
+
+ both->nread -= len;
+ if (event_add(&both->ev, NULL) == -1)
+ exit(1);
+}
+
+/* Test infrastructure */
+
+static int
+setup_test(const char *name)
+{
+
+ fprintf(stdout, "%s", name);
+
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+ fprintf(stderr, "%s: socketpair\n", __func__);
+ exit(1);
+ }
+
+#ifdef HAVE_FCNTL
+ if (fcntl(pair[0], F_SETFL, O_NONBLOCK) == -1)
+ fprintf(stderr, "fcntl(O_NONBLOCK)");
+
+ if (fcntl(pair[1], F_SETFL, O_NONBLOCK) == -1)
+ fprintf(stderr, "fcntl(O_NONBLOCK)");
+#endif
+
+ test_ok = 0;
+ called = 0;
+ return (0);
+}
+
+static int
+cleanup_test(void)
+{
+#ifndef WIN32
+ close(pair[0]);
+ close(pair[1]);
+#else
+ CloseHandle((HANDLE)pair[0]);
+ CloseHandle((HANDLE)pair[1]);
+#endif
+ if (test_ok)
+ fprintf(stdout, "OK\n");
+ else {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+ test_ok = 0;
+ return (0);
+}
+
+static void
+test_registerfds(void)
+{
+ int i, j;
+ int pair[2];
+ struct event read_evs[512];
+ struct event write_evs[512];
+
+ struct event_base *base = event_base_new();
+
+ fprintf(stdout, "Testing register fds: ");
+
+ for (i = 0; i < 512; ++i) {
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1) {
+ /* run up to the limit of file descriptors */
+ break;
+ }
+ event_set(&read_evs[i], pair[0],
+ EV_READ|EV_PERSIST, simple_read_cb, NULL);
+ event_base_set(base, &read_evs[i]);
+ event_add(&read_evs[i], NULL);
+ event_set(&write_evs[i], pair[1],
+ EV_WRITE|EV_PERSIST, simple_write_cb, NULL);
+ event_base_set(base, &write_evs[i]);
+ event_add(&write_evs[i], NULL);
+
+ /* just loop once */
+ event_base_loop(base, EVLOOP_ONCE);
+ }
+
+ /* now delete everything */
+ for (j = 0; j < i; ++j) {
+ event_del(&read_evs[j]);
+ event_del(&write_evs[j]);
+#ifndef WIN32
+ close(read_evs[j].ev_fd);
+ close(write_evs[j].ev_fd);
+#else
+ CloseHandle((HANDLE)read_evs[j].ev_fd);
+ CloseHandle((HANDLE)write_evs[j].ev_fd);
+#endif
+
+ /* just loop once */
+ event_base_loop(base, EVLOOP_ONCE);
+ }
+
+ event_base_free(base);
+
+ fprintf(stdout, "OK\n");
+}
+
+static void
+test_simpleread(void)
+{
+ struct event ev;
+
+ /* Very simple read test */
+ setup_test("Simple read: ");
+
+ write(pair[0], TEST1, strlen(TEST1)+1);
+ shutdown(pair[0], SHUT_WR);
+
+ event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+static void
+test_simplewrite(void)
+{
+ struct event ev;
+
+ /* Very simple write test */
+ setup_test("Simple write: ");
+
+ event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+static void
+test_multiple(void)
+{
+ struct event ev, ev2;
+ int i;
+
+ /* Multiple read and write test */
+ setup_test("Multiple read/write: ");
+ memset(rbuf, 0, sizeof(rbuf));
+ for (i = 0; i < sizeof(wbuf); i++)
+ wbuf[i] = i;
+
+ roff = woff = 0;
+ usepersist = 0;
+
+ event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2);
+ if (event_add(&ev2, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ if (roff == woff)
+ test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
+
+ cleanup_test();
+}
+
+static void
+test_persistent(void)
+{
+ struct event ev, ev2;
+ int i;
+
+ /* Multiple read and write test with persist */
+ setup_test("Persist read/write: ");
+ memset(rbuf, 0, sizeof(rbuf));
+ for (i = 0; i < sizeof(wbuf); i++)
+ wbuf[i] = i;
+
+ roff = woff = 0;
+ usepersist = 1;
+
+ event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2);
+ if (event_add(&ev2, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ if (roff == woff)
+ test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
+
+ cleanup_test();
+}
+
+static void
+test_combined(void)
+{
+ struct both r1, r2, w1, w2;
+
+ setup_test("Combined read/write: ");
+ memset(&r1, 0, sizeof(r1));
+ memset(&r2, 0, sizeof(r2));
+ memset(&w1, 0, sizeof(w1));
+ memset(&w2, 0, sizeof(w2));
+
+ w1.nread = 4096;
+ w2.nread = 8192;
+
+ event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1);
+ event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1);
+ event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2);
+ event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2);
+ if (event_add(&r1.ev, NULL) == -1)
+ exit(1);
+ if (event_add(&w1.ev, NULL))
+ exit(1);
+ if (event_add(&r2.ev, NULL))
+ exit(1);
+ if (event_add(&w2.ev, NULL))
+ exit(1);
+
+ event_dispatch();
+
+ if (r1.nread == 8192 && r2.nread == 4096)
+ test_ok = 1;
+
+ cleanup_test();
+}
+
+static void
+test_simpletimeout(void)
+{
+ struct timeval tv;
+ struct event ev;
+
+ setup_test("Simple timeout: ");
+
+ tv.tv_usec = 0;
+ tv.tv_sec = SECONDS;
+ evtimer_set(&ev, timeout_cb, NULL);
+ evtimer_add(&ev, &tv);
+
+ evutil_gettimeofday(&tset, NULL);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+#ifndef WIN32
+extern struct event_base *current_base;
+
+static void
+child_signal_cb(int fd, short event, void *arg)
+{
+ struct timeval tv;
+ int *pint = arg;
+
+ *pint = 1;
+
+ tv.tv_usec = 500000;
+ tv.tv_sec = 0;
+ event_loopexit(&tv);
+}
+
+static void
+test_fork(void)
+{
+ int status, got_sigchld = 0;
+ struct event ev, sig_ev;
+ pid_t pid;
+
+ setup_test("After fork: ");
+
+ write(pair[0], TEST1, strlen(TEST1)+1);
+
+ event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+
+ signal_set(&sig_ev, SIGCHLD, child_signal_cb, &got_sigchld);
+ signal_add(&sig_ev, NULL);
+
+ if ((pid = fork()) == 0) {
+ /* in the child */
+ if (event_reinit(current_base) == -1) {
+ fprintf(stderr, "FAILED (reinit)\n");
+ exit(1);
+ }
+
+ signal_del(&sig_ev);
+
+ called = 0;
+
+ event_dispatch();
+
+ /* we do not send an EOF; simple_read_cb requires an EOF
+ * to set test_ok. we just verify that the callback was
+ * called. */
+ exit(test_ok != 0 || called != 2 ? -2 : 76);
+ }
+
+ /* wait for the child to read the data */
+ sleep(1);
+
+ write(pair[0], TEST1, strlen(TEST1)+1);
+
+ if (waitpid(pid, &status, 0) == -1) {
+ fprintf(stderr, "FAILED (fork)\n");
+ exit(1);
+ }
+
+ if (WEXITSTATUS(status) != 76) {
+ fprintf(stderr, "FAILED (exit): %d\n", WEXITSTATUS(status));
+ exit(1);
+ }
+
+ /* test that the current event loop still works */
+ write(pair[0], TEST1, strlen(TEST1)+1);
+ shutdown(pair[0], SHUT_WR);
+
+ event_dispatch();
+
+ if (!got_sigchld) {
+ fprintf(stdout, "FAILED (sigchld)\n");
+ exit(1);
+ }
+
+ signal_del(&sig_ev);
+
+ cleanup_test();
+}
+
+static void
+test_simplesignal(void)
+{
+ struct event ev;
+ struct itimerval itv;
+
+ setup_test("Simple signal: ");
+ signal_set(&ev, SIGALRM, signal_cb, &ev);
+ signal_add(&ev, NULL);
+ /* find bugs in which operations are re-ordered */
+ signal_del(&ev);
+ signal_add(&ev, NULL);
+
+ memset(&itv, 0, sizeof(itv));
+ itv.it_value.tv_sec = 1;
+ if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
+ goto skip_simplesignal;
+
+ event_dispatch();
+ skip_simplesignal:
+ if (signal_del(&ev) == -1)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+static void
+test_multiplesignal(void)
+{
+ struct event ev_one, ev_two;
+ struct itimerval itv;
+
+ setup_test("Multiple signal: ");
+
+ signal_set(&ev_one, SIGALRM, signal_cb, &ev_one);
+ signal_add(&ev_one, NULL);
+
+ signal_set(&ev_two, SIGALRM, signal_cb, &ev_two);
+ signal_add(&ev_two, NULL);
+
+ memset(&itv, 0, sizeof(itv));
+ itv.it_value.tv_sec = 1;
+ if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
+ goto skip_simplesignal;
+
+ event_dispatch();
+
+ skip_simplesignal:
+ if (signal_del(&ev_one) == -1)
+ test_ok = 0;
+ if (signal_del(&ev_two) == -1)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+static void
+test_immediatesignal(void)
+{
+ struct event ev;
+
+ test_ok = 0;
+ printf("Immediate signal: ");
+ signal_set(&ev, SIGUSR1, signal_cb, &ev);
+ signal_add(&ev, NULL);
+ raise(SIGUSR1);
+ event_loop(EVLOOP_NONBLOCK);
+ signal_del(&ev);
+ cleanup_test();
+}
+
+static void
+test_signal_dealloc(void)
+{
+ /* make sure that signal_event is event_del'ed and pipe closed */
+ struct event ev;
+ struct event_base *base = event_init();
+ printf("Signal dealloc: ");
+ signal_set(&ev, SIGUSR1, signal_cb, &ev);
+ signal_add(&ev, NULL);
+ signal_del(&ev);
+ event_base_free(base);
+ /* If we got here without asserting, we're fine. */
+ test_ok = 1;
+ cleanup_test();
+}
+
+static void
+test_signal_pipeloss(void)
+{
+ /* make sure that the base1 pipe is closed correctly. */
+ struct event_base *base1, *base2;
+ int pipe1;
+ test_ok = 0;
+ printf("Signal pipeloss: ");
+ base1 = event_init();
+ pipe1 = base1->sig.ev_signal_pair[0];
+ base2 = event_init();
+ event_base_free(base2);
+ event_base_free(base1);
+ if (close(pipe1) != -1 || errno!=EBADF) {
+ /* fd must be closed, so second close gives -1, EBADF */
+ printf("signal pipe not closed. ");
+ test_ok = 0;
+ } else {
+ test_ok = 1;
+ }
+ cleanup_test();
+}
+
+/*
+ * make two bases to catch signals, use both of them. this only works
+ * for event mechanisms that use our signal pipe trick. kqueue handles
+ * signals internally, and all interested kqueues get all the signals.
+ */
+static void
+test_signal_switchbase(void)
+{
+ struct event ev1, ev2;
+ struct event_base *base1, *base2;
+ int is_kqueue;
+ test_ok = 0;
+ printf("Signal switchbase: ");
+ base1 = event_init();
+ base2 = event_init();
+ is_kqueue = !strcmp(event_get_method(),"kqueue");
+ signal_set(&ev1, SIGUSR1, signal_cb, &ev1);
+ signal_set(&ev2, SIGUSR1, signal_cb, &ev2);
+ if (event_base_set(base1, &ev1) ||
+ event_base_set(base2, &ev2) ||
+ event_add(&ev1, NULL) ||
+ event_add(&ev2, NULL)) {
+ fprintf(stderr, "%s: cannot set base, add\n", __func__);
+ exit(1);
+ }
+
+ test_ok = 0;
+ /* can handle signal before loop is called */
+ raise(SIGUSR1);
+ event_base_loop(base2, EVLOOP_NONBLOCK);
+ if (is_kqueue) {
+ if (!test_ok)
+ goto done;
+ test_ok = 0;
+ }
+ event_base_loop(base1, EVLOOP_NONBLOCK);
+ if (test_ok && !is_kqueue) {
+ test_ok = 0;
+
+ /* set base1 to handle signals */
+ event_base_loop(base1, EVLOOP_NONBLOCK);
+ raise(SIGUSR1);
+ event_base_loop(base1, EVLOOP_NONBLOCK);
+ event_base_loop(base2, EVLOOP_NONBLOCK);
+ }
+ done:
+ event_base_free(base1);
+ event_base_free(base2);
+ cleanup_test();
+}
+
+/*
+ * assert that a signal event removed from the event queue really is
+ * removed - with no possibility of it's parent handler being fired.
+ */
+static void
+test_signal_assert(void)
+{
+ struct event ev;
+ struct event_base *base = event_init();
+ test_ok = 0;
+ printf("Signal handler assert: ");
+ /* use SIGCONT so we don't kill ourselves when we signal to nowhere */
+ signal_set(&ev, SIGCONT, signal_cb, &ev);
+ signal_add(&ev, NULL);
+ /*
+ * if signal_del() fails to reset the handler, it's current handler
+ * will still point to evsignal_handler().
+ */
+ signal_del(&ev);
+
+ raise(SIGCONT);
+ /* only way to verify we were in evsignal_handler() */
+ if (base->sig.evsignal_caught)
+ test_ok = 0;
+ else
+ test_ok = 1;
+
+ event_base_free(base);
+ cleanup_test();
+ return;
+}
+
+/*
+ * assert that we restore our previous signal handler properly.
+ */
+static void
+test_signal_restore(void)
+{
+ struct event ev;
+ struct event_base *base = event_init();
+#ifdef HAVE_SIGACTION
+ struct sigaction sa;
+#endif
+
+ test_ok = 0;
+ printf("Signal handler restore: ");
+#ifdef HAVE_SIGACTION
+ sa.sa_handler = signal_cb_sa;
+ sa.sa_flags = 0x0;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(SIGUSR1, &sa, NULL) == -1)
+ goto out;
+#else
+ if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR)
+ goto out;
+#endif
+ signal_set(&ev, SIGUSR1, signal_cb, &ev);
+ signal_add(&ev, NULL);
+ signal_del(&ev);
+
+ raise(SIGUSR1);
+ /* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */
+ if (test_ok != 2)
+ test_ok = 0;
+out:
+ event_base_free(base);
+ cleanup_test();
+ return;
+}
+
+static void
+signal_cb_swp(int sig, short event, void *arg)
+{
+ called++;
+ if (called < 5)
+ raise(sig);
+ else
+ event_loopexit(NULL);
+}
+static void
+timeout_cb_swp(int fd, short event, void *arg)
+{
+ if (called == -1) {
+ struct timeval tv = {5, 0};
+
+ called = 0;
+ evtimer_add((struct event *)arg, &tv);
+ raise(SIGUSR1);
+ return;
+ }
+ test_ok = 0;
+ event_loopexit(NULL);
+}
+
+static void
+test_signal_while_processing(void)
+{
+ struct event_base *base = event_init();
+ struct event ev, ev_timer;
+ struct timeval tv = {0, 0};
+
+ setup_test("Receiving a signal while processing other signal: ");
+
+ called = -1;
+ test_ok = 1;
+ signal_set(&ev, SIGUSR1, signal_cb_swp, NULL);
+ signal_add(&ev, NULL);
+ evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer);
+ evtimer_add(&ev_timer, &tv);
+ event_dispatch();
+
+ event_base_free(base);
+ cleanup_test();
+ return;
+}
+#endif
+
+static void
+test_free_active_base(void)
+{
+ struct event_base *base1;
+ struct event ev1;
+ setup_test("Free active base: ");
+ base1 = event_init();
+ event_set(&ev1, pair[1], EV_READ, simple_read_cb, &ev1);
+ event_base_set(base1, &ev1);
+ event_add(&ev1, NULL);
+ /* event_del(&ev1); */
+ event_base_free(base1);
+ test_ok = 1;
+ cleanup_test();
+}
+
+static void
+test_event_base_new(void)
+{
+ struct event_base *base;
+ struct event ev1;
+ setup_test("Event base new: ");
+
+ write(pair[0], TEST1, strlen(TEST1)+1);
+ shutdown(pair[0], SHUT_WR);
+
+ base = event_base_new();
+ event_set(&ev1, pair[1], EV_READ, simple_read_cb, &ev1);
+ event_base_set(base, &ev1);
+ event_add(&ev1, NULL);
+
+ event_base_dispatch(base);
+
+ event_base_free(base);
+ test_ok = 1;
+ cleanup_test();
+}
+
+static void
+test_loopexit(void)
+{
+ struct timeval tv, tv_start, tv_end;
+ struct event ev;
+
+ setup_test("Loop exit: ");
+
+ tv.tv_usec = 0;
+ tv.tv_sec = 60*60*24;
+ evtimer_set(&ev, timeout_cb, NULL);
+ evtimer_add(&ev, &tv);
+
+ tv.tv_usec = 0;
+ tv.tv_sec = 1;
+ event_loopexit(&tv);
+
+ evutil_gettimeofday(&tv_start, NULL);
+ event_dispatch();
+ evutil_gettimeofday(&tv_end, NULL);
+ evutil_timersub(&tv_end, &tv_start, &tv_end);
+
+ evtimer_del(&ev);
+
+ if (tv.tv_sec < 2)
+ test_ok = 1;
+
+ cleanup_test();
+}
+
+static void
+test_loopexit_multiple(void)
+{
+ struct timeval tv;
+ struct event_base *base;
+
+ setup_test("Loop Multiple exit: ");
+
+ base = event_base_new();
+
+ tv.tv_usec = 0;
+ tv.tv_sec = 1;
+ event_base_loopexit(base, &tv);
+
+ tv.tv_usec = 0;
+ tv.tv_sec = 2;
+ event_base_loopexit(base, &tv);
+
+ event_base_dispatch(base);
+
+ event_base_free(base);
+
+ test_ok = 1;
+
+ cleanup_test();
+}
+
+static void
+break_cb(int fd, short events, void *arg)
+{
+ test_ok = 1;
+ event_loopbreak();
+}
+
+static void
+fail_cb(int fd, short events, void *arg)
+{
+ test_ok = 0;
+}
+
+static void
+test_loopbreak(void)
+{
+ struct event ev1, ev2;
+ struct timeval tv;
+
+ setup_test("Loop break: ");
+
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ evtimer_set(&ev1, break_cb, NULL);
+ evtimer_add(&ev1, &tv);
+ evtimer_set(&ev2, fail_cb, NULL);
+ evtimer_add(&ev2, &tv);
+
+ event_dispatch();
+
+ evtimer_del(&ev1);
+ evtimer_del(&ev2);
+
+ cleanup_test();
+}
+
+static void
+test_evbuffer(void) {
+
+ struct evbuffer *evb = evbuffer_new();
+ setup_test("Testing Evbuffer: ");
+
+ evbuffer_add_printf(evb, "%s/%d", "hello", 1);
+
+ if (EVBUFFER_LENGTH(evb) == 7 &&
+ strcmp((char*)EVBUFFER_DATA(evb), "hello/1") == 0)
+ test_ok = 1;
+
+ evbuffer_free(evb);
+
+ cleanup_test();
+}
+
+static void
+test_evbuffer_find(void)
+{
+ u_char* p;
+ const char* test1 = "1234567890\r\n";
+ const char* test2 = "1234567890\r";
+#define EVBUFFER_INITIAL_LENGTH 256
+ char test3[EVBUFFER_INITIAL_LENGTH];
+ unsigned int i;
+ struct evbuffer * buf = evbuffer_new();
+
+ /* make sure evbuffer_find doesn't match past the end of the buffer */
+ fprintf(stdout, "Testing evbuffer_find 1: ");
+ evbuffer_add(buf, (u_char*)test1, strlen(test1));
+ evbuffer_drain(buf, strlen(test1));
+ evbuffer_add(buf, (u_char*)test2, strlen(test2));
+ p = evbuffer_find(buf, (u_char*)"\r\n", 2);
+ if (p == NULL) {
+ fprintf(stdout, "OK\n");
+ } else {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /*
+ * drain the buffer and do another find; in r309 this would
+ * read past the allocated buffer causing a valgrind error.
+ */
+ fprintf(stdout, "Testing evbuffer_find 2: ");
+ evbuffer_drain(buf, strlen(test2));
+ for (i = 0; i < EVBUFFER_INITIAL_LENGTH; ++i)
+ test3[i] = 'a';
+ test3[EVBUFFER_INITIAL_LENGTH - 1] = 'x';
+ evbuffer_add(buf, (u_char *)test3, EVBUFFER_INITIAL_LENGTH);
+ p = evbuffer_find(buf, (u_char *)"xy", 2);
+ if (p == NULL) {
+ printf("OK\n");
+ } else {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* simple test for match at end of allocated buffer */
+ fprintf(stdout, "Testing evbuffer_find 3: ");
+ p = evbuffer_find(buf, (u_char *)"ax", 2);
+ if (p != NULL && strncmp((char*)p, "ax", 2) == 0) {
+ printf("OK\n");
+ } else {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ evbuffer_free(buf);
+}
+
+/*
+ * simple bufferevent test
+ */
+
+static void
+readcb(struct bufferevent *bev, void *arg)
+{
+ if (EVBUFFER_LENGTH(bev->input) == 8333) {
+ bufferevent_disable(bev, EV_READ);
+ test_ok++;
+ }
+}
+
+static void
+writecb(struct bufferevent *bev, void *arg)
+{
+ if (EVBUFFER_LENGTH(bev->output) == 0)
+ test_ok++;
+}
+
+static void
+errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ test_ok = -2;
+}
+
+static void
+test_bufferevent(void)
+{
+ struct bufferevent *bev1, *bev2;
+ char buffer[8333];
+ int i;
+
+ setup_test("Bufferevent: ");
+
+ bev1 = bufferevent_new(pair[0], readcb, writecb, errorcb, NULL);
+ bev2 = bufferevent_new(pair[1], readcb, writecb, errorcb, NULL);
+
+ bufferevent_disable(bev1, EV_READ);
+ bufferevent_enable(bev2, EV_READ);
+
+ for (i = 0; i < sizeof(buffer); i++)
+ buffer[i] = i;
+
+ bufferevent_write(bev1, buffer, sizeof(buffer));
+
+ event_dispatch();
+
+ bufferevent_free(bev1);
+ bufferevent_free(bev2);
+
+ if (test_ok != 2)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+/*
+ * test watermarks and bufferevent
+ */
+
+static void
+wm_readcb(struct bufferevent *bev, void *arg)
+{
+ int len = EVBUFFER_LENGTH(bev->input);
+ static int nread;
+
+ assert(len >= 10 && len <= 20);
+
+ evbuffer_drain(bev->input, len);
+
+ nread += len;
+ if (nread == 65000) {
+ bufferevent_disable(bev, EV_READ);
+ test_ok++;
+ }
+}
+
+static void
+wm_writecb(struct bufferevent *bev, void *arg)
+{
+ if (EVBUFFER_LENGTH(bev->output) == 0)
+ test_ok++;
+}
+
+static void
+wm_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ test_ok = -2;
+}
+
+static void
+test_bufferevent_watermarks(void)
+{
+ struct bufferevent *bev1, *bev2;
+ char buffer[65000];
+ int i;
+
+ setup_test("Bufferevent Watermarks: ");
+
+ bev1 = bufferevent_new(pair[0], NULL, wm_writecb, wm_errorcb, NULL);
+ bev2 = bufferevent_new(pair[1], wm_readcb, NULL, wm_errorcb, NULL);
+
+ bufferevent_disable(bev1, EV_READ);
+ bufferevent_enable(bev2, EV_READ);
+
+ for (i = 0; i < sizeof(buffer); i++)
+ buffer[i] = i;
+
+ bufferevent_write(bev1, buffer, sizeof(buffer));
+
+ /* limit the reading on the receiving bufferevent */
+ bufferevent_setwatermark(bev2, EV_READ, 10, 20);
+
+ event_dispatch();
+
+ bufferevent_free(bev1);
+ bufferevent_free(bev2);
+
+ if (test_ok != 2)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+struct test_pri_event {
+ struct event ev;
+ int count;
+};
+
+static void
+test_priorities_cb(int fd, short what, void *arg)
+{
+ struct test_pri_event *pri = arg;
+ struct timeval tv;
+
+ if (pri->count == 3) {
+ event_loopexit(NULL);
+ return;
+ }
+
+ pri->count++;
+
+ evutil_timerclear(&tv);
+ event_add(&pri->ev, &tv);
+}
+
+static void
+test_priorities(int npriorities)
+{
+ char buf[32];
+ struct test_pri_event one, two;
+ struct timeval tv;
+
+ evutil_snprintf(buf, sizeof(buf), "Testing Priorities %d: ", npriorities);
+ setup_test(buf);
+
+ event_base_priority_init(global_base, npriorities);
+
+ memset(&one, 0, sizeof(one));
+ memset(&two, 0, sizeof(two));
+
+ timeout_set(&one.ev, test_priorities_cb, &one);
+ if (event_priority_set(&one.ev, 0) == -1) {
+ fprintf(stderr, "%s: failed to set priority", __func__);
+ exit(1);
+ }
+
+ timeout_set(&two.ev, test_priorities_cb, &two);
+ if (event_priority_set(&two.ev, npriorities - 1) == -1) {
+ fprintf(stderr, "%s: failed to set priority", __func__);
+ exit(1);
+ }
+
+ evutil_timerclear(&tv);
+
+ if (event_add(&one.ev, &tv) == -1)
+ exit(1);
+ if (event_add(&two.ev, &tv) == -1)
+ exit(1);
+
+ event_dispatch();
+
+ event_del(&one.ev);
+ event_del(&two.ev);
+
+ if (npriorities == 1) {
+ if (one.count == 3 && two.count == 3)
+ test_ok = 1;
+ } else if (npriorities == 2) {
+ /* Two is called once because event_loopexit is priority 1 */
+ if (one.count == 3 && two.count == 1)
+ test_ok = 1;
+ } else {
+ if (one.count == 3 && two.count == 0)
+ test_ok = 1;
+ }
+
+ cleanup_test();
+}
+
+static void
+test_multiple_cb(int fd, short event, void *arg)
+{
+ if (event & EV_READ)
+ test_ok |= 1;
+ else if (event & EV_WRITE)
+ test_ok |= 2;
+}
+
+static void
+test_multiple_events_for_same_fd(void)
+{
+ struct event e1, e2;
+
+ setup_test("Multiple events for same fd: ");
+
+ event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL);
+ event_add(&e1, NULL);
+ event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL);
+ event_add(&e2, NULL);
+ event_loop(EVLOOP_ONCE);
+ event_del(&e2);
+ write(pair[1], TEST1, strlen(TEST1)+1);
+ event_loop(EVLOOP_ONCE);
+ event_del(&e1);
+
+ if (test_ok != 3)
+ test_ok = 0;
+
+ cleanup_test();
+}
+
+int evtag_decode_int(uint32_t *pnumber, struct evbuffer *evbuf);
+int evtag_encode_tag(struct evbuffer *evbuf, uint32_t number);
+int evtag_decode_tag(uint32_t *pnumber, struct evbuffer *evbuf);
+
+static void
+read_once_cb(int fd, short event, void *arg)
+{
+ char buf[256];
+ int len;
+
+ len = read(fd, buf, sizeof(buf));
+
+ if (called) {
+ test_ok = 0;
+ } else if (len) {
+ /* Assumes global pair[0] can be used for writing */
+ write(pair[0], TEST1, strlen(TEST1)+1);
+ test_ok = 1;
+ }
+
+ called++;
+}
+
+static void
+test_want_only_once(void)
+{
+ struct event ev;
+ struct timeval tv;
+
+ /* Very simple read test */
+ setup_test("Want read only once: ");
+
+ write(pair[0], TEST1, strlen(TEST1)+1);
+
+ /* Setup the loop termination */
+ evutil_timerclear(&tv);
+ tv.tv_sec = 1;
+ event_loopexit(&tv);
+
+ event_set(&ev, pair[1], EV_READ, read_once_cb, &ev);
+ if (event_add(&ev, NULL) == -1)
+ exit(1);
+ event_dispatch();
+
+ cleanup_test();
+}
+
+#define TEST_MAX_INT 6
+
+static void
+evtag_int_test(void)
+{
+ struct evbuffer *tmp = evbuffer_new();
+ uint32_t integers[TEST_MAX_INT] = {
+ 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
+ };
+ uint32_t integer;
+ int i;
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ int oldlen, newlen;
+ oldlen = EVBUFFER_LENGTH(tmp);
+ encode_int(tmp, integers[i]);
+ newlen = EVBUFFER_LENGTH(tmp);
+ fprintf(stdout, "\t\tencoded 0x%08x with %d bytes\n",
+ integers[i], newlen - oldlen);
+ }
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ if (evtag_decode_int(&integer, tmp) == -1) {
+ fprintf(stderr, "decode %d failed", i);
+ exit(1);
+ }
+ if (integer != integers[i]) {
+ fprintf(stderr, "got %x, wanted %x",
+ integer, integers[i]);
+ exit(1);
+ }
+ }
+
+ if (EVBUFFER_LENGTH(tmp) != 0) {
+ fprintf(stderr, "trailing data");
+ exit(1);
+ }
+ evbuffer_free(tmp);
+
+ fprintf(stdout, "\t%s: OK\n", __func__);
+}
+
+static void
+evtag_fuzz(void)
+{
+ u_char buffer[4096];
+ struct evbuffer *tmp = evbuffer_new();
+ struct timeval tv;
+ int i, j;
+
+ int not_failed = 0;
+ for (j = 0; j < 100; j++) {
+ for (i = 0; i < sizeof(buffer); i++)
+ buffer[i] = rand();
+ evbuffer_drain(tmp, -1);
+ evbuffer_add(tmp, buffer, sizeof(buffer));
+
+ if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1)
+ not_failed++;
+ }
+
+ /* The majority of decodes should fail */
+ if (not_failed >= 10) {
+ fprintf(stderr, "evtag_unmarshal should have failed");
+ exit(1);
+ }
+
+ /* Now insert some corruption into the tag length field */
+ evbuffer_drain(tmp, -1);
+ evutil_timerclear(&tv);
+ tv.tv_sec = 1;
+ evtag_marshal_timeval(tmp, 0, &tv);
+ evbuffer_add(tmp, buffer, sizeof(buffer));
+
+ EVBUFFER_DATA(tmp)[1] = 0xff;
+ if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
+ fprintf(stderr, "evtag_unmarshal_timeval should have failed");
+ exit(1);
+ }
+
+ evbuffer_free(tmp);
+
+ fprintf(stdout, "\t%s: OK\n", __func__);
+}
+
+static void
+evtag_tag_encoding(void)
+{
+ struct evbuffer *tmp = evbuffer_new();
+ uint32_t integers[TEST_MAX_INT] = {
+ 0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
+ };
+ uint32_t integer;
+ int i;
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ int oldlen, newlen;
+ oldlen = EVBUFFER_LENGTH(tmp);
+ evtag_encode_tag(tmp, integers[i]);
+ newlen = EVBUFFER_LENGTH(tmp);
+ fprintf(stdout, "\t\tencoded 0x%08x with %d bytes\n",
+ integers[i], newlen - oldlen);
+ }
+
+ for (i = 0; i < TEST_MAX_INT; i++) {
+ if (evtag_decode_tag(&integer, tmp) == -1) {
+ fprintf(stderr, "decode %d failed", i);
+ exit(1);
+ }
+ if (integer != integers[i]) {
+ fprintf(stderr, "got %x, wanted %x",
+ integer, integers[i]);
+ exit(1);
+ }
+ }
+
+ if (EVBUFFER_LENGTH(tmp) != 0) {
+ fprintf(stderr, "trailing data");
+ exit(1);
+ }
+ evbuffer_free(tmp);
+
+ fprintf(stdout, "\t%s: OK\n", __func__);
+}
+
+static void
+evtag_test(void)
+{
+ fprintf(stdout, "Testing Tagging:\n");
+
+ evtag_init();
+ evtag_int_test();
+ evtag_fuzz();
+
+ evtag_tag_encoding();
+
+ fprintf(stdout, "OK\n");
+}
+
+#ifndef WIN32
+static void
+rpc_test(void)
+{
+ struct msg *msg, *msg2;
+ struct kill *attack;
+ struct run *run;
+ struct evbuffer *tmp = evbuffer_new();
+ struct timeval tv_start, tv_end;
+ uint32_t tag;
+ int i;
+
+ fprintf(stdout, "Testing RPC: ");
+
+ msg = msg_new();
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "phoenix");
+
+ if (EVTAG_GET(msg, attack, &attack) == -1) {
+ fprintf(stderr, "Failed to set kill message.\n");
+ exit(1);
+ }
+
+ EVTAG_ASSIGN(attack, weapon, "feather");
+ EVTAG_ASSIGN(attack, action, "tickle");
+
+ evutil_gettimeofday(&tv_start, NULL);
+ for (i = 0; i < 1000; ++i) {
+ run = EVTAG_ADD(msg, run);
+ if (run == NULL) {
+ fprintf(stderr, "Failed to add run message.\n");
+ exit(1);
+ }
+ EVTAG_ASSIGN(run, how, "very fast but with some data in it");
+ EVTAG_ASSIGN(run, fixed_bytes,
+ (unsigned char*)"012345678901234567890123");
+ }
+
+ if (msg_complete(msg) == -1) {
+ fprintf(stderr, "Failed to make complete message.\n");
+ exit(1);
+ }
+
+ evtag_marshal_msg(tmp, 0xdeaf, msg);
+
+ if (evtag_peek(tmp, &tag) == -1) {
+ fprintf(stderr, "Failed to peak tag.\n");
+ exit (1);
+ }
+
+ if (tag != 0xdeaf) {
+ fprintf(stderr, "Got incorrect tag: %0x.\n", tag);
+ exit (1);
+ }
+
+ msg2 = msg_new();
+ if (evtag_unmarshal_msg(tmp, 0xdeaf, msg2) == -1) {
+ fprintf(stderr, "Failed to unmarshal message.\n");
+ exit(1);
+ }
+
+ evutil_gettimeofday(&tv_end, NULL);
+ evutil_timersub(&tv_end, &tv_start, &tv_end);
+ fprintf(stderr, "(%.1f us/add) ",
+ (float)tv_end.tv_sec/(float)i * 1000000.0 +
+ tv_end.tv_usec / (float)i);
+
+ if (!EVTAG_HAS(msg2, from_name) ||
+ !EVTAG_HAS(msg2, to_name) ||
+ !EVTAG_HAS(msg2, attack)) {
+ fprintf(stderr, "Missing data structures.\n");
+ exit(1);
+ }
+
+ if (EVTAG_LEN(msg2, run) != i) {
+ fprintf(stderr, "Wrong number of run messages.\n");
+ exit(1);
+ }
+
+ msg_free(msg);
+ msg_free(msg2);
+
+ evbuffer_free(tmp);
+
+ fprintf(stdout, "OK\n");
+}
+#endif
+
+static void
+test_evutil_strtoll(void)
+{
+ const char *s;
+ char *endptr;
+ setup_test("evutil_stroll: ");
+ test_ok = 0;
+
+ if (evutil_strtoll("5000000000", NULL, 10) != ((ev_int64_t)5000000)*1000)
+ goto err;
+ if (evutil_strtoll("-5000000000", NULL, 10) != ((ev_int64_t)5000000)*-1000)
+ goto err;
+ s = " 99999stuff";
+ if (evutil_strtoll(s, &endptr, 10) != (ev_int64_t)99999)
+ goto err;
+ if (endptr != s+6)
+ goto err;
+ if (evutil_strtoll("foo", NULL, 10) != 0)
+ goto err;
+
+ test_ok = 1;
+ err:
+ cleanup_test();
+}
+
+
+int
+main (int argc, char **argv)
+{
+#ifdef WIN32
+ WORD wVersionRequested;
+ WSADATA wsaData;
+ int err;
+
+ wVersionRequested = MAKEWORD( 2, 2 );
+
+ err = WSAStartup( wVersionRequested, &wsaData );
+#endif
+
+#ifndef WIN32
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+ return (1);
+#endif
+ setvbuf(stdout, NULL, _IONBF, 0);
+
+ /* Initalize the event library */
+ global_base = event_init();
+
+ test_registerfds();
+
+ test_evutil_strtoll();
+
+ /* use the global event base and need to be called first */
+ test_priorities(1);
+ test_priorities(2);
+ test_priorities(3);
+
+ test_evbuffer();
+ test_evbuffer_find();
+
+ test_bufferevent();
+ test_bufferevent_watermarks();
+
+ test_free_active_base();
+
+ test_event_base_new();
+
+ http_suite();
+
+#ifndef WIN32
+ rpc_suite();
+#endif
+
+ dns_suite();
+
+#ifndef WIN32
+ test_fork();
+#endif
+
+ test_simpleread();
+
+ test_simplewrite();
+
+ test_multiple();
+
+ test_persistent();
+
+ test_combined();
+
+ test_simpletimeout();
+#ifndef WIN32
+ test_simplesignal();
+ test_multiplesignal();
+ test_immediatesignal();
+#endif
+ test_loopexit();
+ test_loopbreak();
+
+ test_loopexit_multiple();
+
+ test_multiple_events_for_same_fd();
+
+ test_want_only_once();
+
+ evtag_test();
+
+#ifndef WIN32
+ rpc_test();
+
+ test_signal_dealloc();
+ test_signal_pipeloss();
+ test_signal_switchbase();
+ test_signal_restore();
+ test_signal_assert();
+ test_signal_while_processing();
+#endif
+
+ return (0);
+}
+
diff --git a/libevent/test/regress.gen.c b/libevent/test/regress.gen.c
new file mode 100644
index 00000000000..ff31096a7c2
--- /dev/null
+++ b/libevent/test/regress.gen.c
@@ -0,0 +1,872 @@
+/*
+ * Automatically generated from ./regress.rpc
+ * by event_rpcgen.py/0.1. DO NOT EDIT THIS FILE.
+ */
+
+#include <sys/types.h>
+#include <sys/time.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <event.h>
+
+
+#include "./regress.gen.h"
+
+void event_err(int eval, const char *fmt, ...);
+void event_warn(const char *fmt, ...);
+void event_errx(int eval, const char *fmt, ...);
+void event_warnx(const char *fmt, ...);
+
+
+/*
+ * Implementation of msg
+ */
+
+static struct msg_access_ __msg_base = {
+ msg_from_name_assign,
+ msg_from_name_get,
+ msg_to_name_assign,
+ msg_to_name_get,
+ msg_attack_assign,
+ msg_attack_get,
+ msg_run_assign,
+ msg_run_get,
+ msg_run_add,
+};
+
+struct msg *
+msg_new(void)
+{
+ struct msg *tmp;
+ if ((tmp = malloc(sizeof(struct msg))) == NULL) {
+ event_warn("%s: malloc", __func__);
+ return (NULL);
+ }
+ tmp->base = &__msg_base;
+
+ tmp->from_name_data = NULL;
+ tmp->from_name_set = 0;
+
+ tmp->to_name_data = NULL;
+ tmp->to_name_set = 0;
+
+ tmp->attack_data = NULL;
+ tmp->attack_set = 0;
+
+ tmp->run_data = NULL;
+ tmp->run_length = 0;
+ tmp->run_num_allocated = 0;
+ tmp->run_set = 0;
+
+ return (tmp);
+}
+
+
+
+
+struct run *
+msg_run_add(struct msg *msg)
+{
+ if (++msg->run_length >= msg->run_num_allocated) {
+ int tobe_allocated = msg->run_num_allocated;
+ struct run ** new_data = NULL;
+ tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;
+ new_data = (struct run **) realloc(msg->run_data,
+ tobe_allocated * sizeof(struct run *));
+ if (new_data == NULL)
+ goto error;
+ msg->run_data = new_data;
+ msg->run_num_allocated = tobe_allocated;
+ }
+ msg->run_data[msg->run_length - 1] = run_new();
+ if (msg->run_data[msg->run_length - 1] == NULL)
+ goto error;
+ msg->run_set = 1;
+ return (msg->run_data[msg->run_length - 1]);
+error:
+ --msg->run_length;
+ return (NULL);
+}
+
+
+int
+msg_from_name_assign(struct msg *msg,
+ const char * value)
+{
+ if (msg->from_name_data != NULL)
+ free(msg->from_name_data);
+ if ((msg->from_name_data = strdup(value)) == NULL)
+ return (-1);
+ msg->from_name_set = 1;
+ return (0);
+}
+
+int
+msg_to_name_assign(struct msg *msg,
+ const char * value)
+{
+ if (msg->to_name_data != NULL)
+ free(msg->to_name_data);
+ if ((msg->to_name_data = strdup(value)) == NULL)
+ return (-1);
+ msg->to_name_set = 1;
+ return (0);
+}
+
+int
+msg_attack_assign(struct msg *msg,
+ const struct kill* value)
+{
+ struct evbuffer *tmp = NULL;
+ if (msg->attack_set) {
+ kill_clear(msg->attack_data);
+ msg->attack_set = 0;
+ } else {
+ msg->attack_data = kill_new();
+ if (msg->attack_data == NULL) {
+ event_warn("%s: kill_new()", __func__);
+ goto error;
+ }
+ }
+ if ((tmp = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new()", __func__);
+ goto error;
+ }
+ kill_marshal(tmp, value);
+ if (kill_unmarshal(msg->attack_data, tmp) == -1) {
+ event_warnx("%s: kill_unmarshal", __func__);
+ goto error;
+ }
+ msg->attack_set = 1;
+ evbuffer_free(tmp);
+ return (0);
+ error:
+ if (tmp != NULL)
+ evbuffer_free(tmp);
+ if (msg->attack_data != NULL) {
+ kill_free(msg->attack_data);
+ msg->attack_data = NULL;
+ }
+ return (-1);
+}
+
+int
+msg_run_assign(struct msg *msg, int off,
+ const struct run * value)
+{
+ struct evbuffer *tmp = NULL;
+ if (!msg->run_set || off < 0 || off >= msg->run_length)
+ return (-1);
+ run_clear(msg->run_data[off]);
+ if ((tmp = evbuffer_new()) == NULL) {
+ event_warn("%s: evbuffer_new()", __func__);
+ goto error;
+ }
+ run_marshal(tmp, value);
+ if (run_unmarshal(msg->run_data[off], tmp) == -1) {
+ event_warnx("%s: run_unmarshal", __func__);
+ goto error;
+ }
+ evbuffer_free(tmp);
+ return (0);
+error:
+ if (tmp != NULL)
+ evbuffer_free(tmp);
+ run_clear(msg->run_data[off]);
+ return (-1);
+}
+
+int
+msg_from_name_get(struct msg *msg, char * *value)
+{
+ if (msg->from_name_set != 1)
+ return (-1);
+ *value = msg->from_name_data;
+ return (0);
+}
+
+int
+msg_to_name_get(struct msg *msg, char * *value)
+{
+ if (msg->to_name_set != 1)
+ return (-1);
+ *value = msg->to_name_data;
+ return (0);
+}
+
+int
+msg_attack_get(struct msg *msg, struct kill* *value)
+{
+ if (msg->attack_set != 1) {
+ msg->attack_data = kill_new();
+ if (msg->attack_data == NULL)
+ return (-1);
+ msg->attack_set = 1;
+ }
+ *value = msg->attack_data;
+ return (0);
+}
+
+int
+msg_run_get(struct msg *msg, int offset,
+ struct run * *value)
+{
+ if (!msg->run_set || offset < 0 || offset >= msg->run_length)
+ return (-1);
+ *value = msg->run_data[offset];
+ return (0);
+}
+
+void
+msg_clear(struct msg *tmp)
+{
+ if (tmp->from_name_set == 1) {
+ free (tmp->from_name_data);
+ tmp->from_name_data = NULL;
+ tmp->from_name_set = 0;
+ }
+ if (tmp->to_name_set == 1) {
+ free (tmp->to_name_data);
+ tmp->to_name_data = NULL;
+ tmp->to_name_set = 0;
+ }
+ if (tmp->attack_set == 1) {
+ kill_free(tmp->attack_data);
+ tmp->attack_data = NULL;
+ tmp->attack_set = 0;
+ }
+ if (tmp->run_set == 1) {
+ int i;
+ for (i = 0; i < tmp->run_length; ++i) {
+ run_free(tmp->run_data[i]);
+ }
+ free(tmp->run_data);
+ tmp->run_data = NULL;
+ tmp->run_set = 0;
+ tmp->run_length = 0;
+ tmp->run_num_allocated = 0;
+ }
+}
+
+void
+msg_free(struct msg *tmp)
+{
+ if (tmp->from_name_data != NULL)
+ free (tmp->from_name_data);
+ if (tmp->to_name_data != NULL)
+ free (tmp->to_name_data);
+ if (tmp->attack_data != NULL)
+ kill_free(tmp->attack_data);
+ if (tmp->run_data != NULL) {
+ int i;
+ for (i = 0; i < tmp->run_length; ++i) {
+ run_free(tmp->run_data[i]);
+ tmp->run_data[i] = NULL;
+ }
+ free(tmp->run_data);
+ tmp->run_data = NULL;
+ tmp->run_length = 0;
+ tmp->run_num_allocated = 0;
+ }
+ free(tmp);
+}
+
+void
+msg_marshal(struct evbuffer *evbuf, const struct msg *tmp){
+ evtag_marshal_string(evbuf, MSG_FROM_NAME, tmp->from_name_data);
+ evtag_marshal_string(evbuf, MSG_TO_NAME, tmp->to_name_data);
+ if (tmp->attack_set) {
+ evtag_marshal_kill(evbuf, MSG_ATTACK, tmp->attack_data);
+ }
+ {
+ int i;
+ for (i = 0; i < tmp->run_length; ++i) {
+ evtag_marshal_run(evbuf, MSG_RUN, tmp->run_data[i]);
+ }
+ }
+}
+
+int
+msg_unmarshal(struct msg *tmp, struct evbuffer *evbuf)
+{
+ uint32_t tag;
+ while (EVBUFFER_LENGTH(evbuf) > 0) {
+ if (evtag_peek(evbuf, &tag) == -1)
+ return (-1);
+ switch (tag) {
+
+ case MSG_FROM_NAME:
+
+ if (tmp->from_name_set)
+ return (-1);
+ if (evtag_unmarshal_string(evbuf, MSG_FROM_NAME, &tmp->from_name_data) == -1) {
+ event_warnx("%s: failed to unmarshal from_name", __func__);
+ return (-1);
+ }
+ tmp->from_name_set = 1;
+ break;
+
+ case MSG_TO_NAME:
+
+ if (tmp->to_name_set)
+ return (-1);
+ if (evtag_unmarshal_string(evbuf, MSG_TO_NAME, &tmp->to_name_data) == -1) {
+ event_warnx("%s: failed to unmarshal to_name", __func__);
+ return (-1);
+ }
+ tmp->to_name_set = 1;
+ break;
+
+ case MSG_ATTACK:
+
+ if (tmp->attack_set)
+ return (-1);
+ tmp->attack_data = kill_new();
+ if (tmp->attack_data == NULL)
+ return (-1);
+ if (evtag_unmarshal_kill(evbuf, MSG_ATTACK, tmp->attack_data) == -1) {
+ event_warnx("%s: failed to unmarshal attack", __func__);
+ return (-1);
+ }
+ tmp->attack_set = 1;
+ break;
+
+ case MSG_RUN:
+
+ if (msg_run_add(tmp) == NULL)
+ return (-1);
+ if (evtag_unmarshal_run(evbuf, MSG_RUN,
+ tmp->run_data[tmp->run_length - 1]) == -1) {
+ --tmp->run_length;
+ event_warnx("%s: failed to unmarshal run", __func__);
+ return (-1);
+ }
+ tmp->run_set = 1;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (msg_complete(tmp) == -1)
+ return (-1);
+ return (0);
+}
+
+int
+msg_complete(struct msg *msg)
+{
+ if (!msg->from_name_set)
+ return (-1);
+ if (!msg->to_name_set)
+ return (-1);
+ if (msg->attack_set && kill_complete(msg->attack_data) == -1)
+ return (-1);
+ {
+ int i;
+ for (i = 0; i < msg->run_length; ++i) {
+ if (run_complete(msg->run_data[i]) == -1)
+ return (-1);
+ }
+ }
+ return (0);
+}
+
+int
+evtag_unmarshal_msg(struct evbuffer *evbuf, uint32_t need_tag, struct msg *msg)
+{
+ uint32_t tag;
+ int res = -1;
+
+ struct evbuffer *tmp = evbuffer_new();
+
+ if (evtag_unmarshal(evbuf, &tag, tmp) == -1 || tag != need_tag)
+ goto error;
+
+ if (msg_unmarshal(msg, tmp) == -1)
+ goto error;
+
+ res = 0;
+
+ error:
+ evbuffer_free(tmp);
+ return (res);
+}
+
+void
+evtag_marshal_msg(struct evbuffer *evbuf, uint32_t tag, const struct msg *msg)
+{
+ struct evbuffer *_buf = evbuffer_new();
+ assert(_buf != NULL);
+ evbuffer_drain(_buf, -1);
+ msg_marshal(_buf, msg);
+ evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), EVBUFFER_LENGTH(_buf));
+ evbuffer_free(_buf);
+}
+
+/*
+ * Implementation of kill
+ */
+
+static struct kill_access_ __kill_base = {
+ kill_weapon_assign,
+ kill_weapon_get,
+ kill_action_assign,
+ kill_action_get,
+ kill_how_often_assign,
+ kill_how_often_get,
+};
+
+struct kill *
+kill_new(void)
+{
+ struct kill *tmp;
+ if ((tmp = malloc(sizeof(struct kill))) == NULL) {
+ event_warn("%s: malloc", __func__);
+ return (NULL);
+ }
+ tmp->base = &__kill_base;
+
+ tmp->weapon_data = NULL;
+ tmp->weapon_set = 0;
+
+ tmp->action_data = NULL;
+ tmp->action_set = 0;
+
+ tmp->how_often_data = 0;
+ tmp->how_often_set = 0;
+
+ return (tmp);
+}
+
+
+
+
+int
+kill_weapon_assign(struct kill *msg,
+ const char * value)
+{
+ if (msg->weapon_data != NULL)
+ free(msg->weapon_data);
+ if ((msg->weapon_data = strdup(value)) == NULL)
+ return (-1);
+ msg->weapon_set = 1;
+ return (0);
+}
+
+int
+kill_action_assign(struct kill *msg,
+ const char * value)
+{
+ if (msg->action_data != NULL)
+ free(msg->action_data);
+ if ((msg->action_data = strdup(value)) == NULL)
+ return (-1);
+ msg->action_set = 1;
+ return (0);
+}
+
+int
+kill_how_often_assign(struct kill *msg, const uint32_t value)
+{
+ msg->how_often_set = 1;
+ msg->how_often_data = value;
+ return (0);
+}
+
+int
+kill_weapon_get(struct kill *msg, char * *value)
+{
+ if (msg->weapon_set != 1)
+ return (-1);
+ *value = msg->weapon_data;
+ return (0);
+}
+
+int
+kill_action_get(struct kill *msg, char * *value)
+{
+ if (msg->action_set != 1)
+ return (-1);
+ *value = msg->action_data;
+ return (0);
+}
+
+int
+kill_how_often_get(struct kill *msg, uint32_t *value)
+{
+ if (msg->how_often_set != 1)
+ return (-1);
+ *value = msg->how_often_data;
+ return (0);
+}
+
+void
+kill_clear(struct kill *tmp)
+{
+ if (tmp->weapon_set == 1) {
+ free (tmp->weapon_data);
+ tmp->weapon_data = NULL;
+ tmp->weapon_set = 0;
+ }
+ if (tmp->action_set == 1) {
+ free (tmp->action_data);
+ tmp->action_data = NULL;
+ tmp->action_set = 0;
+ }
+ tmp->how_often_set = 0;
+}
+
+void
+kill_free(struct kill *tmp)
+{
+ if (tmp->weapon_data != NULL)
+ free (tmp->weapon_data);
+ if (tmp->action_data != NULL)
+ free (tmp->action_data);
+ free(tmp);
+}
+
+void
+kill_marshal(struct evbuffer *evbuf, const struct kill *tmp){
+ evtag_marshal_string(evbuf, KILL_WEAPON, tmp->weapon_data);
+ evtag_marshal_string(evbuf, KILL_ACTION, tmp->action_data);
+ if (tmp->how_often_set) {
+ evtag_marshal_int(evbuf, KILL_HOW_OFTEN, tmp->how_often_data);
+ }
+}
+
+int
+kill_unmarshal(struct kill *tmp, struct evbuffer *evbuf)
+{
+ uint32_t tag;
+ while (EVBUFFER_LENGTH(evbuf) > 0) {
+ if (evtag_peek(evbuf, &tag) == -1)
+ return (-1);
+ switch (tag) {
+
+ case KILL_WEAPON:
+
+ if (tmp->weapon_set)
+ return (-1);
+ if (evtag_unmarshal_string(evbuf, KILL_WEAPON, &tmp->weapon_data) == -1) {
+ event_warnx("%s: failed to unmarshal weapon", __func__);
+ return (-1);
+ }
+ tmp->weapon_set = 1;
+ break;
+
+ case KILL_ACTION:
+
+ if (tmp->action_set)
+ return (-1);
+ if (evtag_unmarshal_string(evbuf, KILL_ACTION, &tmp->action_data) == -1) {
+ event_warnx("%s: failed to unmarshal action", __func__);
+ return (-1);
+ }
+ tmp->action_set = 1;
+ break;
+
+ case KILL_HOW_OFTEN:
+
+ if (tmp->how_often_set)
+ return (-1);
+ if (evtag_unmarshal_int(evbuf, KILL_HOW_OFTEN, &tmp->how_often_data) == -1) {
+ event_warnx("%s: failed to unmarshal how_often", __func__);
+ return (-1);
+ }
+ tmp->how_often_set = 1;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (kill_complete(tmp) == -1)
+ return (-1);
+ return (0);
+}
+
+int
+kill_complete(struct kill *msg)
+{
+ if (!msg->weapon_set)
+ return (-1);
+ if (!msg->action_set)
+ return (-1);
+ return (0);
+}
+
+int
+evtag_unmarshal_kill(struct evbuffer *evbuf, uint32_t need_tag, struct kill *msg)
+{
+ uint32_t tag;
+ int res = -1;
+
+ struct evbuffer *tmp = evbuffer_new();
+
+ if (evtag_unmarshal(evbuf, &tag, tmp) == -1 || tag != need_tag)
+ goto error;
+
+ if (kill_unmarshal(msg, tmp) == -1)
+ goto error;
+
+ res = 0;
+
+ error:
+ evbuffer_free(tmp);
+ return (res);
+}
+
+void
+evtag_marshal_kill(struct evbuffer *evbuf, uint32_t tag, const struct kill *msg)
+{
+ struct evbuffer *_buf = evbuffer_new();
+ assert(_buf != NULL);
+ evbuffer_drain(_buf, -1);
+ kill_marshal(_buf, msg);
+ evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), EVBUFFER_LENGTH(_buf));
+ evbuffer_free(_buf);
+}
+
+/*
+ * Implementation of run
+ */
+
+static struct run_access_ __run_base = {
+ run_how_assign,
+ run_how_get,
+ run_some_bytes_assign,
+ run_some_bytes_get,
+ run_fixed_bytes_assign,
+ run_fixed_bytes_get,
+};
+
+struct run *
+run_new(void)
+{
+ struct run *tmp;
+ if ((tmp = malloc(sizeof(struct run))) == NULL) {
+ event_warn("%s: malloc", __func__);
+ return (NULL);
+ }
+ tmp->base = &__run_base;
+
+ tmp->how_data = NULL;
+ tmp->how_set = 0;
+
+ tmp->some_bytes_data = NULL;
+ tmp->some_bytes_length = 0;
+ tmp->some_bytes_set = 0;
+
+ memset(tmp->fixed_bytes_data, 0, sizeof(tmp->fixed_bytes_data));
+ tmp->fixed_bytes_set = 0;
+
+ return (tmp);
+}
+
+
+
+
+int
+run_how_assign(struct run *msg,
+ const char * value)
+{
+ if (msg->how_data != NULL)
+ free(msg->how_data);
+ if ((msg->how_data = strdup(value)) == NULL)
+ return (-1);
+ msg->how_set = 1;
+ return (0);
+}
+
+int
+run_some_bytes_assign(struct run *msg, const uint8_t * value, uint32_t len)
+{
+ if (msg->some_bytes_data != NULL)
+ free (msg->some_bytes_data);
+ msg->some_bytes_data = malloc(len);
+ if (msg->some_bytes_data == NULL)
+ return (-1);
+ msg->some_bytes_set = 1;
+ msg->some_bytes_length = len;
+ memcpy(msg->some_bytes_data, value, len);
+ return (0);
+}
+
+int
+run_fixed_bytes_assign(struct run *msg, const uint8_t *value)
+{
+ msg->fixed_bytes_set = 1;
+ memcpy(msg->fixed_bytes_data, value, 24);
+ return (0);
+}
+
+int
+run_how_get(struct run *msg, char * *value)
+{
+ if (msg->how_set != 1)
+ return (-1);
+ *value = msg->how_data;
+ return (0);
+}
+
+int
+run_some_bytes_get(struct run *msg, uint8_t * *value, uint32_t *plen)
+{
+ if (msg->some_bytes_set != 1)
+ return (-1);
+ *value = msg->some_bytes_data;
+ *plen = msg->some_bytes_length;
+ return (0);
+}
+
+int
+run_fixed_bytes_get(struct run *msg, uint8_t **value)
+{
+ if (msg->fixed_bytes_set != 1)
+ return (-1);
+ *value = msg->fixed_bytes_data;
+ return (0);
+}
+
+void
+run_clear(struct run *tmp)
+{
+ if (tmp->how_set == 1) {
+ free (tmp->how_data);
+ tmp->how_data = NULL;
+ tmp->how_set = 0;
+ }
+ if (tmp->some_bytes_set == 1) {
+ free (tmp->some_bytes_data);
+ tmp->some_bytes_data = NULL;
+ tmp->some_bytes_length = 0;
+ tmp->some_bytes_set = 0;
+ }
+ tmp->fixed_bytes_set = 0;
+ memset(tmp->fixed_bytes_data, 0, sizeof(tmp->fixed_bytes_data));
+}
+
+void
+run_free(struct run *tmp)
+{
+ if (tmp->how_data != NULL)
+ free (tmp->how_data);
+ if (tmp->some_bytes_data != NULL)
+ free (tmp->some_bytes_data);
+ free(tmp);
+}
+
+void
+run_marshal(struct evbuffer *evbuf, const struct run *tmp){
+ evtag_marshal_string(evbuf, RUN_HOW, tmp->how_data);
+ if (tmp->some_bytes_set) {
+ evtag_marshal(evbuf, RUN_SOME_BYTES, tmp->some_bytes_data, tmp->some_bytes_length);
+ }
+ evtag_marshal(evbuf, RUN_FIXED_BYTES, tmp->fixed_bytes_data, sizeof(tmp->fixed_bytes_data));
+}
+
+int
+run_unmarshal(struct run *tmp, struct evbuffer *evbuf)
+{
+ uint32_t tag;
+ while (EVBUFFER_LENGTH(evbuf) > 0) {
+ if (evtag_peek(evbuf, &tag) == -1)
+ return (-1);
+ switch (tag) {
+
+ case RUN_HOW:
+
+ if (tmp->how_set)
+ return (-1);
+ if (evtag_unmarshal_string(evbuf, RUN_HOW, &tmp->how_data) == -1) {
+ event_warnx("%s: failed to unmarshal how", __func__);
+ return (-1);
+ }
+ tmp->how_set = 1;
+ break;
+
+ case RUN_SOME_BYTES:
+
+ if (tmp->some_bytes_set)
+ return (-1);
+ if (evtag_payload_length(evbuf, &tmp->some_bytes_length) == -1)
+ return (-1);
+ if (tmp->some_bytes_length > EVBUFFER_LENGTH(evbuf))
+ return (-1);
+ if ((tmp->some_bytes_data = malloc(tmp->some_bytes_length)) == NULL)
+ return (-1);
+ if (evtag_unmarshal_fixed(evbuf, RUN_SOME_BYTES, tmp->some_bytes_data, tmp->some_bytes_length) == -1) {
+ event_warnx("%s: failed to unmarshal some_bytes", __func__);
+ return (-1);
+ }
+ tmp->some_bytes_set = 1;
+ break;
+
+ case RUN_FIXED_BYTES:
+
+ if (tmp->fixed_bytes_set)
+ return (-1);
+ if (evtag_unmarshal_fixed(evbuf, RUN_FIXED_BYTES, tmp->fixed_bytes_data, sizeof(tmp->fixed_bytes_data)) == -1) {
+ event_warnx("%s: failed to unmarshal fixed_bytes", __func__);
+ return (-1);
+ }
+ tmp->fixed_bytes_set = 1;
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (run_complete(tmp) == -1)
+ return (-1);
+ return (0);
+}
+
+int
+run_complete(struct run *msg)
+{
+ if (!msg->how_set)
+ return (-1);
+ if (!msg->fixed_bytes_set)
+ return (-1);
+ return (0);
+}
+
+int
+evtag_unmarshal_run(struct evbuffer *evbuf, uint32_t need_tag, struct run *msg)
+{
+ uint32_t tag;
+ int res = -1;
+
+ struct evbuffer *tmp = evbuffer_new();
+
+ if (evtag_unmarshal(evbuf, &tag, tmp) == -1 || tag != need_tag)
+ goto error;
+
+ if (run_unmarshal(msg, tmp) == -1)
+ goto error;
+
+ res = 0;
+
+ error:
+ evbuffer_free(tmp);
+ return (res);
+}
+
+void
+evtag_marshal_run(struct evbuffer *evbuf, uint32_t tag, const struct run *msg)
+{
+ struct evbuffer *_buf = evbuffer_new();
+ assert(_buf != NULL);
+ evbuffer_drain(_buf, -1);
+ run_marshal(_buf, msg);
+ evtag_marshal(evbuf, tag, EVBUFFER_DATA(_buf), EVBUFFER_LENGTH(_buf));
+ evbuffer_free(_buf);
+}
+
diff --git a/libevent/test/regress.gen.h b/libevent/test/regress.gen.h
new file mode 100644
index 00000000000..09591f0584b
--- /dev/null
+++ b/libevent/test/regress.gen.h
@@ -0,0 +1,183 @@
+/*
+ * Automatically generated from ./regress.rpc
+ */
+
+#ifndef ___REGRESS_RPC_
+#define ___REGRESS_RPC_
+
+#include <event-config.h>
+#ifdef _EVENT_HAVE_STDINT_H
+#include <stdint.h>
+#endif
+#define EVTAG_HAS(msg, member) ((msg)->member##_set == 1)
+#ifdef __GNUC__
+#define EVTAG_ASSIGN(msg, member, args...) (*(msg)->base->member##_assign)(msg, ## args)
+#define EVTAG_GET(msg, member, args...) (*(msg)->base->member##_get)(msg, ## args)
+#else
+#define EVTAG_ASSIGN(msg, member, ...) (*(msg)->base->member##_assign)(msg, ## __VA_ARGS__)
+#define EVTAG_GET(msg, member, ...) (*(msg)->base->member##_get)(msg, ## __VA_ARGS__)
+#endif
+#define EVTAG_ADD(msg, member) (*(msg)->base->member##_add)(msg)
+#define EVTAG_LEN(msg, member) ((msg)->member##_length)
+
+struct msg;
+struct kill;
+struct run;
+
+/* Tag definition for msg */
+enum msg_ {
+ MSG_FROM_NAME=1,
+ MSG_TO_NAME=2,
+ MSG_ATTACK=3,
+ MSG_RUN=4,
+ MSG_MAX_TAGS
+};
+
+/* Structure declaration for msg */
+struct msg_access_ {
+ int (*from_name_assign)(struct msg *, const char *);
+ int (*from_name_get)(struct msg *, char * *);
+ int (*to_name_assign)(struct msg *, const char *);
+ int (*to_name_get)(struct msg *, char * *);
+ int (*attack_assign)(struct msg *, const struct kill*);
+ int (*attack_get)(struct msg *, struct kill* *);
+ int (*run_assign)(struct msg *, int, const struct run *);
+ int (*run_get)(struct msg *, int, struct run * *);
+ struct run * (*run_add)(struct msg *);
+};
+
+struct msg {
+ struct msg_access_ *base;
+
+ char *from_name_data;
+ char *to_name_data;
+ struct kill* attack_data;
+ struct run **run_data;
+ int run_length;
+ int run_num_allocated;
+
+ uint8_t from_name_set;
+ uint8_t to_name_set;
+ uint8_t attack_set;
+ uint8_t run_set;
+};
+
+struct msg *msg_new(void);
+void msg_free(struct msg *);
+void msg_clear(struct msg *);
+void msg_marshal(struct evbuffer *, const struct msg *);
+int msg_unmarshal(struct msg *, struct evbuffer *);
+int msg_complete(struct msg *);
+void evtag_marshal_msg(struct evbuffer *, uint32_t,
+ const struct msg *);
+int evtag_unmarshal_msg(struct evbuffer *, uint32_t,
+ struct msg *);
+int msg_from_name_assign(struct msg *, const char *);
+int msg_from_name_get(struct msg *, char * *);
+int msg_to_name_assign(struct msg *, const char *);
+int msg_to_name_get(struct msg *, char * *);
+int msg_attack_assign(struct msg *, const struct kill*);
+int msg_attack_get(struct msg *, struct kill* *);
+int msg_run_assign(struct msg *, int, const struct run *);
+int msg_run_get(struct msg *, int, struct run * *);
+struct run * msg_run_add(struct msg *);
+/* --- msg done --- */
+
+/* Tag definition for kill */
+enum kill_ {
+ KILL_WEAPON=65825,
+ KILL_ACTION=2,
+ KILL_HOW_OFTEN=3,
+ KILL_MAX_TAGS
+};
+
+/* Structure declaration for kill */
+struct kill_access_ {
+ int (*weapon_assign)(struct kill *, const char *);
+ int (*weapon_get)(struct kill *, char * *);
+ int (*action_assign)(struct kill *, const char *);
+ int (*action_get)(struct kill *, char * *);
+ int (*how_often_assign)(struct kill *, const uint32_t);
+ int (*how_often_get)(struct kill *, uint32_t *);
+};
+
+struct kill {
+ struct kill_access_ *base;
+
+ char *weapon_data;
+ char *action_data;
+ uint32_t how_often_data;
+
+ uint8_t weapon_set;
+ uint8_t action_set;
+ uint8_t how_often_set;
+};
+
+struct kill *kill_new(void);
+void kill_free(struct kill *);
+void kill_clear(struct kill *);
+void kill_marshal(struct evbuffer *, const struct kill *);
+int kill_unmarshal(struct kill *, struct evbuffer *);
+int kill_complete(struct kill *);
+void evtag_marshal_kill(struct evbuffer *, uint32_t,
+ const struct kill *);
+int evtag_unmarshal_kill(struct evbuffer *, uint32_t,
+ struct kill *);
+int kill_weapon_assign(struct kill *, const char *);
+int kill_weapon_get(struct kill *, char * *);
+int kill_action_assign(struct kill *, const char *);
+int kill_action_get(struct kill *, char * *);
+int kill_how_often_assign(struct kill *, const uint32_t);
+int kill_how_often_get(struct kill *, uint32_t *);
+/* --- kill done --- */
+
+/* Tag definition for run */
+enum run_ {
+ RUN_HOW=1,
+ RUN_SOME_BYTES=2,
+ RUN_FIXED_BYTES=3,
+ RUN_MAX_TAGS
+};
+
+/* Structure declaration for run */
+struct run_access_ {
+ int (*how_assign)(struct run *, const char *);
+ int (*how_get)(struct run *, char * *);
+ int (*some_bytes_assign)(struct run *, const uint8_t *, uint32_t);
+ int (*some_bytes_get)(struct run *, uint8_t * *, uint32_t *);
+ int (*fixed_bytes_assign)(struct run *, const uint8_t *);
+ int (*fixed_bytes_get)(struct run *, uint8_t **);
+};
+
+struct run {
+ struct run_access_ *base;
+
+ char *how_data;
+ uint8_t *some_bytes_data;
+ uint32_t some_bytes_length;
+ uint8_t fixed_bytes_data[24];
+
+ uint8_t how_set;
+ uint8_t some_bytes_set;
+ uint8_t fixed_bytes_set;
+};
+
+struct run *run_new(void);
+void run_free(struct run *);
+void run_clear(struct run *);
+void run_marshal(struct evbuffer *, const struct run *);
+int run_unmarshal(struct run *, struct evbuffer *);
+int run_complete(struct run *);
+void evtag_marshal_run(struct evbuffer *, uint32_t,
+ const struct run *);
+int evtag_unmarshal_run(struct evbuffer *, uint32_t,
+ struct run *);
+int run_how_assign(struct run *, const char *);
+int run_how_get(struct run *, char * *);
+int run_some_bytes_assign(struct run *, const uint8_t *, uint32_t);
+int run_some_bytes_get(struct run *, uint8_t * *, uint32_t *);
+int run_fixed_bytes_assign(struct run *, const uint8_t *);
+int run_fixed_bytes_get(struct run *, uint8_t **);
+/* --- run done --- */
+
+#endif /* ___REGRESS_RPC_ */
diff --git a/libevent/test/regress.h b/libevent/test/regress.h
new file mode 100644
index 00000000000..4060ff5c6ac
--- /dev/null
+++ b/libevent/test/regress.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _REGRESS_H_
+#define _REGRESS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void http_suite(void);
+void http_basic_test(void);
+
+void rpc_suite(void);
+
+void dns_suite(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _REGRESS_H_ */
diff --git a/libevent/test/regress.rpc b/libevent/test/regress.rpc
new file mode 100644
index 00000000000..65ca95de4cf
--- /dev/null
+++ b/libevent/test/regress.rpc
@@ -0,0 +1,20 @@
+/* tests data packing and unpacking */
+
+struct msg {
+ string from_name = 1;
+ string to_name = 2;
+ optional struct[kill] attack = 3;
+ array struct[run] run = 4;
+}
+
+struct kill {
+ string weapon = 0x10121;
+ string action = 2;
+ optional int how_often = 3;
+}
+
+struct run {
+ string how = 1;
+ optional bytes some_bytes = 2;
+ bytes fixed_bytes[24] = 3;
+}
diff --git a/libevent/test/regress_dns.c b/libevent/test/regress_dns.c
new file mode 100644
index 00000000000..129cdad498f
--- /dev/null
+++ b/libevent/test/regress_dns.c
@@ -0,0 +1,376 @@
+/*
+ * Copyright (c) 2003-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#endif
+#ifdef HAVE_NETINET_IN6_H
+#include <netinet/in6.h>
+#endif
+#ifdef HAVE_NETDB_H
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event.h"
+#include "evdns.h"
+#include "log.h"
+
+static int dns_ok = 0;
+static int dns_err = 0;
+
+void dns_suite(void);
+
+static void
+dns_gethostbyname_cb(int result, char type, int count, int ttl,
+ void *addresses, void *arg)
+{
+ dns_ok = dns_err = 0;
+
+ if (result == DNS_ERR_TIMEOUT) {
+ fprintf(stdout, "[Timed out] ");
+ dns_err = result;
+ goto out;
+ }
+
+ if (result != DNS_ERR_NONE) {
+ fprintf(stdout, "[Error code %d] ", result);
+ goto out;
+ }
+
+ fprintf(stderr, "type: %d, count: %d, ttl: %d: ", type, count, ttl);
+
+ switch (type) {
+ case DNS_IPv6_AAAA: {
+#if defined(HAVE_STRUCT_IN6_ADDR) && defined(HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN)
+ struct in6_addr *in6_addrs = addresses;
+ char buf[INET6_ADDRSTRLEN+1];
+ int i;
+ /* a resolution that's not valid does not help */
+ if (ttl < 0)
+ goto out;
+ for (i = 0; i < count; ++i) {
+ const char *b = inet_ntop(AF_INET6, &in6_addrs[i], buf,sizeof(buf));
+ if (b)
+ fprintf(stderr, "%s ", b);
+ else
+ fprintf(stderr, "%s ", strerror(errno));
+ }
+#endif
+ break;
+ }
+ case DNS_IPv4_A: {
+ struct in_addr *in_addrs = addresses;
+ int i;
+ /* a resolution that's not valid does not help */
+ if (ttl < 0)
+ goto out;
+ for (i = 0; i < count; ++i)
+ fprintf(stderr, "%s ", inet_ntoa(in_addrs[i]));
+ break;
+ }
+ case DNS_PTR:
+ /* may get at most one PTR */
+ if (count != 1)
+ goto out;
+
+ fprintf(stderr, "%s ", *(char **)addresses);
+ break;
+ default:
+ goto out;
+ }
+
+ dns_ok = type;
+
+out:
+ event_loopexit(NULL);
+}
+
+static void
+dns_gethostbyname(void)
+{
+ fprintf(stdout, "Simple DNS resolve: ");
+ dns_ok = 0;
+ evdns_resolve_ipv4("www.monkey.org", 0, dns_gethostbyname_cb, NULL);
+ event_dispatch();
+
+ if (dns_ok == DNS_IPv4_A) {
+ fprintf(stdout, "OK\n");
+ } else {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+}
+
+static void
+dns_gethostbyname6(void)
+{
+ fprintf(stdout, "IPv6 DNS resolve: ");
+ dns_ok = 0;
+ evdns_resolve_ipv6("www.ietf.org", 0, dns_gethostbyname_cb, NULL);
+ event_dispatch();
+
+ if (dns_ok == DNS_IPv6_AAAA) {
+ fprintf(stdout, "OK\n");
+ } else if (!dns_ok && dns_err == DNS_ERR_TIMEOUT) {
+ fprintf(stdout, "SKIPPED\n");
+ } else {
+ fprintf(stdout, "FAILED (%d)\n", dns_ok);
+ exit(1);
+ }
+}
+
+static void
+dns_gethostbyaddr(void)
+{
+ struct in_addr in;
+ in.s_addr = htonl(0x7f000001ul); /* 127.0.0.1 */
+ fprintf(stdout, "Simple reverse DNS resolve: ");
+ dns_ok = 0;
+ evdns_resolve_reverse(&in, 0, dns_gethostbyname_cb, NULL);
+ event_dispatch();
+
+ if (dns_ok == DNS_PTR) {
+ fprintf(stdout, "OK\n");
+ } else {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+}
+
+static int n_server_responses = 0;
+
+static void
+dns_server_request_cb(struct evdns_server_request *req, void *data)
+{
+ int i, r;
+ const char TEST_ARPA[] = "11.11.168.192.in-addr.arpa";
+ for (i = 0; i < req->nquestions; ++i) {
+ struct in_addr ans;
+ ans.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */
+ if (req->questions[i]->type == EVDNS_TYPE_A &&
+ req->questions[i]->dns_question_class == EVDNS_CLASS_INET &&
+ !strcmp(req->questions[i]->name, "zz.example.com")) {
+ r = evdns_server_request_add_a_reply(req, "zz.example.com",
+ 1, &ans.s_addr, 12345);
+ if (r<0)
+ dns_ok = 0;
+ } else if (req->questions[i]->type == EVDNS_TYPE_AAAA &&
+ req->questions[i]->dns_question_class == EVDNS_CLASS_INET &&
+ !strcmp(req->questions[i]->name, "zz.example.com")) {
+ char addr6[17] = "abcdefghijklmnop";
+ r = evdns_server_request_add_aaaa_reply(req, "zz.example.com",
+ 1, addr6, 123);
+ if (r<0)
+ dns_ok = 0;
+ } else if (req->questions[i]->type == EVDNS_TYPE_PTR &&
+ req->questions[i]->dns_question_class == EVDNS_CLASS_INET &&
+ !strcmp(req->questions[i]->name, TEST_ARPA)) {
+ r = evdns_server_request_add_ptr_reply(req, NULL, TEST_ARPA,
+ "ZZ.EXAMPLE.COM", 54321);
+ if (r<0)
+ dns_ok = 0;
+ } else {
+ fprintf(stdout, "Unexpected question %d %d \"%s\" ",
+ req->questions[i]->type,
+ req->questions[i]->dns_question_class,
+ req->questions[i]->name);
+ dns_ok = 0;
+ }
+ }
+ r = evdns_server_request_respond(req, 0);
+ if (r<0) {
+ fprintf(stdout, "Couldn't send reply. ");
+ dns_ok = 0;
+ }
+}
+
+static void
+dns_server_gethostbyname_cb(int result, char type, int count, int ttl,
+ void *addresses, void *arg)
+{
+ if (result != DNS_ERR_NONE) {
+ fprintf(stdout, "Unexpected result %d. ", result);
+ dns_ok = 0;
+ goto out;
+ }
+ if (count != 1) {
+ fprintf(stdout, "Unexpected answer count %d. ", count);
+ dns_ok = 0;
+ goto out;
+ }
+ switch (type) {
+ case DNS_IPv4_A: {
+ struct in_addr *in_addrs = addresses;
+ if (in_addrs[0].s_addr != htonl(0xc0a80b0bUL) || ttl != 12345) {
+ fprintf(stdout, "Bad IPv4 response \"%s\" %d. ",
+ inet_ntoa(in_addrs[0]), ttl);
+ dns_ok = 0;
+ goto out;
+ }
+ break;
+ }
+ case DNS_IPv6_AAAA: {
+#if defined (HAVE_STRUCT_IN6_ADDR) && defined(HAVE_INET_NTOP) && defined(INET6_ADDRSTRLEN)
+ struct in6_addr *in6_addrs = addresses;
+ char buf[INET6_ADDRSTRLEN+1];
+ if (memcmp(&in6_addrs[0].s6_addr, "abcdefghijklmnop", 16)
+ || ttl != 123) {
+ const char *b = inet_ntop(AF_INET6, &in6_addrs[0],buf,sizeof(buf));
+ fprintf(stdout, "Bad IPv6 response \"%s\" %d. ", b, ttl);
+ dns_ok = 0;
+ goto out;
+ }
+#endif
+ break;
+ }
+ case DNS_PTR: {
+ char **addrs = addresses;
+ if (strcmp(addrs[0], "ZZ.EXAMPLE.COM") || ttl != 54321) {
+ fprintf(stdout, "Bad PTR response \"%s\" %d. ",
+ addrs[0], ttl);
+ dns_ok = 0;
+ goto out;
+ }
+ break;
+ }
+ default:
+ fprintf(stdout, "Bad response type %d. ", type);
+ dns_ok = 0;
+ }
+
+ out:
+ if (++n_server_responses == 3) {
+ event_loopexit(NULL);
+ }
+}
+
+static void
+dns_server(void)
+{
+ int sock;
+ struct sockaddr_in my_addr;
+ struct evdns_server_port *port;
+ struct in_addr resolve_addr;
+
+ dns_ok = 1;
+ fprintf(stdout, "DNS server support: ");
+
+ /* Add ourself as the only nameserver, and make sure we really are
+ * the only nameserver. */
+ evdns_nameserver_ip_add("127.0.0.1:35353");
+ if (evdns_count_nameservers() != 1) {
+ fprintf(stdout, "Couldn't set up.\n");
+ exit(1);
+ }
+
+ /* Now configure a nameserver port. */
+ sock = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock == -1) {
+ perror("socket");
+ exit(1);
+ }
+#ifdef WIN32
+ {
+ u_long nonblocking = 1;
+ ioctlsocket(sock, FIONBIO, &nonblocking);
+ }
+#else
+ fcntl(sock, F_SETFL, O_NONBLOCK);
+#endif
+ memset(&my_addr, 0, sizeof(my_addr));
+ my_addr.sin_family = AF_INET;
+ my_addr.sin_port = htons(35353);
+ my_addr.sin_addr.s_addr = htonl(0x7f000001UL);
+ if (bind(sock, (struct sockaddr*)&my_addr, sizeof(my_addr)) < 0) {
+ perror("bind");
+ exit (1);
+ }
+ port = evdns_add_server_port(sock, 0, dns_server_request_cb, NULL);
+
+ /* Send two queries. */
+ evdns_resolve_ipv4("zz.example.com", DNS_QUERY_NO_SEARCH,
+ dns_server_gethostbyname_cb, NULL);
+ evdns_resolve_ipv6("zz.example.com", DNS_QUERY_NO_SEARCH,
+ dns_server_gethostbyname_cb, NULL);
+ resolve_addr.s_addr = htonl(0xc0a80b0bUL); /* 192.168.11.11 */
+ evdns_resolve_reverse(&resolve_addr, 0,
+ dns_server_gethostbyname_cb, NULL);
+
+ event_dispatch();
+
+ if (dns_ok) {
+ fprintf(stdout, "OK\n");
+ } else {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ evdns_close_server_port(port);
+ evdns_shutdown(0); /* remove ourself as nameserver. */
+#ifdef WIN32
+ closesocket(sock);
+#else
+ close(sock);
+#endif
+}
+
+void
+dns_suite(void)
+{
+ dns_server(); /* Do this before we call evdns_init. */
+
+ evdns_init();
+ dns_gethostbyname();
+ dns_gethostbyname6();
+ dns_gethostbyaddr();
+
+ evdns_shutdown(0);
+}
diff --git a/libevent/test/regress_http.c b/libevent/test/regress_http.c
new file mode 100644
index 00000000000..1e2a1eb062a
--- /dev/null
+++ b/libevent/test/regress_http.c
@@ -0,0 +1,1476 @@
+/*
+ * Copyright (c) 2003-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+
+#include "event.h"
+#include "evhttp.h"
+#include "log.h"
+#include "http-internal.h"
+
+extern int pair[];
+extern int test_ok;
+
+static struct evhttp *http;
+/* set if a test needs to call loopexit on a base */
+static struct event_base *base;
+
+void http_suite(void);
+
+void http_basic_cb(struct evhttp_request *req, void *arg);
+static void http_chunked_cb(struct evhttp_request *req, void *arg);
+void http_post_cb(struct evhttp_request *req, void *arg);
+void http_dispatcher_cb(struct evhttp_request *req, void *arg);
+static void http_large_delay_cb(struct evhttp_request *req, void *arg);
+
+static struct evhttp *
+http_setup(short *pport, struct event_base *base)
+{
+ int i;
+ struct evhttp *myhttp;
+ short port = -1;
+
+ /* Try a few different ports */
+ myhttp = evhttp_new(base);
+ for (i = 0; i < 50; ++i) {
+ if (evhttp_bind_socket(myhttp, "127.0.0.1", 8080 + i) != -1) {
+ port = 8080 + i;
+ break;
+ }
+ }
+
+ if (port == -1)
+ event_errx(1, "Could not start web server");
+
+ /* Register a callback for certain types of requests */
+ evhttp_set_cb(myhttp, "/test", http_basic_cb, NULL);
+ evhttp_set_cb(myhttp, "/chunked", http_chunked_cb, NULL);
+ evhttp_set_cb(myhttp, "/postit", http_post_cb, NULL);
+ evhttp_set_cb(myhttp, "/largedelay", http_large_delay_cb, NULL);
+ evhttp_set_cb(myhttp, "/", http_dispatcher_cb, NULL);
+
+ *pport = port;
+ return (myhttp);
+}
+
+#ifndef NI_MAXSERV
+#define NI_MAXSERV 1024
+#endif
+
+static int
+http_connect(const char *address, u_short port)
+{
+ /* Stupid code for connecting */
+#ifdef WIN32
+ struct hostent *he;
+ struct sockaddr_in sin;
+#else
+ struct addrinfo ai, *aitop;
+ char strport[NI_MAXSERV];
+#endif
+ struct sockaddr *sa;
+ int slen;
+ int fd;
+
+#ifdef WIN32
+ if (!(he = gethostbyname(address))) {
+ event_warn("gethostbyname");
+ }
+ memcpy(&sin.sin_addr, he->h_addr_list[0], he->h_length);
+ sin.sin_family = AF_INET;
+ sin.sin_port = htons(port);
+ slen = sizeof(struct sockaddr_in);
+ sa = (struct sockaddr*)&sin;
+#else
+ memset(&ai, 0, sizeof (ai));
+ ai.ai_family = AF_INET;
+ ai.ai_socktype = SOCK_STREAM;
+ snprintf(strport, sizeof (strport), "%d", port);
+ if (getaddrinfo(address, strport, &ai, &aitop) != 0) {
+ event_warn("getaddrinfo");
+ return (-1);
+ }
+ sa = aitop->ai_addr;
+ slen = aitop->ai_addrlen;
+#endif
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ if (fd == -1)
+ event_err(1, "socket failed");
+
+ if (connect(fd, sa, slen) == -1)
+ event_err(1, "connect failed");
+
+#ifndef WIN32
+ freeaddrinfo(aitop);
+#endif
+
+ return (fd);
+}
+
+static void
+http_readcb(struct bufferevent *bev, void *arg)
+{
+ const char *what = "This is funny";
+
+ event_debug(("%s: %s\n", __func__, EVBUFFER_DATA(bev->input)));
+
+ if (evbuffer_find(bev->input,
+ (const unsigned char*) what, strlen(what)) != NULL) {
+ struct evhttp_request *req = evhttp_request_new(NULL, NULL);
+ enum message_read_status done;
+
+ req->kind = EVHTTP_RESPONSE;
+ done = evhttp_parse_firstline(req, bev->input);
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ done = evhttp_parse_headers(req, bev->input);
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ if (done == 1 &&
+ evhttp_find_header(req->input_headers,
+ "Content-Type") != NULL)
+ test_ok++;
+
+ out:
+ evhttp_request_free(req);
+ bufferevent_disable(bev, EV_READ);
+ if (base)
+ event_base_loopexit(base, NULL);
+ else
+ event_loopexit(NULL);
+ }
+}
+
+static void
+http_writecb(struct bufferevent *bev, void *arg)
+{
+ if (EVBUFFER_LENGTH(bev->output) == 0) {
+ /* enable reading of the reply */
+ bufferevent_enable(bev, EV_READ);
+ test_ok++;
+ }
+}
+
+static void
+http_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ test_ok = -2;
+ event_loopexit(NULL);
+}
+
+void
+http_basic_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+ int empty = evhttp_find_header(req->input_headers, "Empty") != NULL;
+ event_debug(("%s: called\n", __func__));
+ evbuffer_add_printf(evb, "This is funny");
+
+ /* For multi-line headers test */
+ {
+ const char *multi =
+ evhttp_find_header(req->input_headers,"X-multi");
+ if (multi) {
+ if (strcmp("END", multi + strlen(multi) - 3) == 0)
+ test_ok++;
+ if (evhttp_find_header(req->input_headers, "X-Last"))
+ test_ok++;
+ }
+ }
+
+ /* injecting a bad content-length */
+ if (evhttp_find_header(req->input_headers, "X-Negative"))
+ evhttp_add_header(req->output_headers,
+ "Content-Length", "-100");
+
+ /* allow sending of an empty reply */
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine",
+ !empty ? evb : NULL);
+
+ evbuffer_free(evb);
+}
+
+static char const* const CHUNKS[] = {
+ "This is funny",
+ "but not hilarious.",
+ "bwv 1052"
+};
+
+struct chunk_req_state {
+ struct evhttp_request *req;
+ int i;
+};
+
+static void
+http_chunked_trickle_cb(int fd, short events, void *arg)
+{
+ struct evbuffer *evb = evbuffer_new();
+ struct chunk_req_state *state = arg;
+ struct timeval when = { 0, 0 };
+
+ evbuffer_add_printf(evb, "%s", CHUNKS[state->i]);
+ evhttp_send_reply_chunk(state->req, evb);
+ evbuffer_free(evb);
+
+ if (++state->i < sizeof(CHUNKS)/sizeof(CHUNKS[0])) {
+ event_once(-1, EV_TIMEOUT,
+ http_chunked_trickle_cb, state, &when);
+ } else {
+ evhttp_send_reply_end(state->req);
+ free(state);
+ }
+}
+
+static void
+http_chunked_cb(struct evhttp_request *req, void *arg)
+{
+ struct timeval when = { 0, 0 };
+ struct chunk_req_state *state = malloc(sizeof(struct chunk_req_state));
+ event_debug(("%s: called\n", __func__));
+
+ memset(state, 0, sizeof(struct chunk_req_state));
+ state->req = req;
+
+ /* generate a chunked reply */
+ evhttp_send_reply_start(req, HTTP_OK, "Everything is fine");
+
+ /* but trickle it across several iterations to ensure we're not
+ * assuming it comes all at once */
+ event_once(-1, EV_TIMEOUT, http_chunked_trickle_cb, state, &when);
+}
+
+static void
+http_complete_write(int fd, short what, void *arg)
+{
+ struct bufferevent *bev = arg;
+ const char *http_request = "host\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+ bufferevent_write(bev, http_request, strlen(http_request));
+}
+
+static void
+http_basic_test(void)
+{
+ struct timeval tv;
+ struct bufferevent *bev;
+ int fd;
+ const char *http_request;
+ short port = -1;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing Basic HTTP Server: ");
+
+ http = http_setup(&port, NULL);
+
+ /* bind to a second socket */
+ if (evhttp_bind_socket(http, "127.0.0.1", port + 1) == -1) {
+ fprintf(stdout, "FAILED (bind)\n");
+ exit(1);
+ }
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_new(fd, http_readcb, http_writecb,
+ http_errorcb, NULL);
+
+ /* first half of the http request */
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: some";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+ timerclear(&tv);
+ tv.tv_usec = 10000;
+ event_once(-1, EV_TIMEOUT, http_complete_write, bev, &tv);
+
+ event_dispatch();
+
+ if (test_ok != 3) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* connect to the second port */
+ bufferevent_free(bev);
+ EVUTIL_CLOSESOCKET(fd);
+
+ fd = http_connect("127.0.0.1", port + 1);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_new(fd, http_readcb, http_writecb,
+ http_errorcb, NULL);
+
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_dispatch();
+
+ bufferevent_free(bev);
+ EVUTIL_CLOSESOCKET(fd);
+
+ evhttp_free(http);
+
+ if (test_ok != 5) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+}
+
+static struct evhttp_connection *delayed_client;
+
+static void
+http_delay_reply(int fd, short what, void *arg)
+{
+ struct evhttp_request *req = arg;
+
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", NULL);
+
+ ++test_ok;
+}
+
+static void
+http_large_delay_cb(struct evhttp_request *req, void *arg)
+{
+ struct timeval tv;
+ timerclear(&tv);
+ tv.tv_sec = 3;
+
+ event_once(-1, EV_TIMEOUT, http_delay_reply, req, &tv);
+
+ /* here we close the client connection which will cause an EOF */
+ evhttp_connection_fail(delayed_client, EVCON_HTTP_EOF);
+}
+
+void http_request_done(struct evhttp_request *, void *);
+void http_request_empty_done(struct evhttp_request *, void *);
+
+static void
+http_connection_test(int persistent)
+{
+ short port = -1;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing Request Connection Pipeline %s: ",
+ persistent ? "(persistent)" : "");
+
+ http = http_setup(&port, NULL);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_request_done, NULL);
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* try to make another request over the same connection */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_done, NULL);
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ /*
+ * if our connections are not supposed to be persistent; request
+ * a close from the server.
+ */
+ if (!persistent)
+ evhttp_add_header(req->output_headers, "Connection", "close");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ /* make another request: request empty reply */
+ test_ok = 0;
+
+ req = evhttp_request_new(http_request_empty_done, NULL);
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Empty", "itis");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+ fprintf(stdout, "OK\n");
+}
+
+void
+http_request_done(struct evhttp_request *req, void *arg)
+{
+ const char *what = "This is funny";
+
+ if (req->response_code != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(req->input_headers, "Content-Type") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (EVBUFFER_LENGTH(req->input_buffer) != strlen(what)) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (memcmp(EVBUFFER_DATA(req->input_buffer), what, strlen(what)) != 0) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+/* test date header and content length */
+
+void
+http_request_empty_done(struct evhttp_request *req, void *arg)
+{
+ if (req->response_code != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(req->input_headers, "Date") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+
+ if (evhttp_find_header(req->input_headers, "Content-Length") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (strcmp(evhttp_find_header(req->input_headers, "Content-Length"),
+ "0")) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (EVBUFFER_LENGTH(req->input_buffer) != 0) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+/*
+ * HTTP DISPATCHER test
+ */
+
+void
+http_dispatcher_cb(struct evhttp_request *req, void *arg)
+{
+
+ struct evbuffer *evb = evbuffer_new();
+ event_debug(("%s: called\n", __func__));
+ evbuffer_add_printf(evb, "DISPATCHER_TEST");
+
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+ evbuffer_free(evb);
+}
+
+static void
+http_dispatcher_test_done(struct evhttp_request *req, void *arg)
+{
+ const char *what = "DISPATCHER_TEST";
+
+ if (req->response_code != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(req->input_headers, "Content-Type") == NULL) {
+ fprintf(stderr, "FAILED (content type)\n");
+ exit(1);
+ }
+
+ if (EVBUFFER_LENGTH(req->input_buffer) != strlen(what)) {
+ fprintf(stderr, "FAILED (length %zu vs %zu)\n",
+ EVBUFFER_LENGTH(req->input_buffer), strlen(what));
+ exit(1);
+ }
+
+ if (memcmp(EVBUFFER_DATA(req->input_buffer), what, strlen(what)) != 0) {
+ fprintf(stderr, "FAILED (data)\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+static void
+http_dispatcher_test(void)
+{
+ short port = -1;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing HTTP Dispatcher: ");
+
+ http = http_setup(&port, NULL);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* also bind to local host */
+ evhttp_connection_set_local_address(evcon, "127.0.0.1");
+
+ /*
+ * At this point, we want to schedule an HTTP GET request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_dispatcher_test_done, NULL);
+ if (req == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/?arg=val") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED: %d\n", test_ok);
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+}
+
+/*
+ * HTTP POST test.
+ */
+
+void http_postrequest_done(struct evhttp_request *, void *);
+
+#define POST_DATA "Okay. Not really printf"
+
+static void
+http_post_test(void)
+{
+ short port = -1;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing HTTP POST Request: ");
+
+ http = http_setup(&port, NULL);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /*
+ * At this point, we want to schedule an HTTP POST request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_postrequest_done, NULL);
+ if (req == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+ evbuffer_add_printf(req->output_buffer, POST_DATA);
+
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_POST, "/postit") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED: %d\n", test_ok);
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+}
+
+void
+http_post_cb(struct evhttp_request *req, void *arg)
+{
+ struct evbuffer *evb;
+ event_debug(("%s: called\n", __func__));
+
+ /* Yes, we are expecting a post request */
+ if (req->type != EVHTTP_REQ_POST) {
+ fprintf(stdout, "FAILED (post type)\n");
+ exit(1);
+ }
+
+ if (EVBUFFER_LENGTH(req->input_buffer) != strlen(POST_DATA)) {
+ fprintf(stdout, "FAILED (length: %zu vs %zu)\n",
+ EVBUFFER_LENGTH(req->input_buffer), strlen(POST_DATA));
+ exit(1);
+ }
+
+ if (memcmp(EVBUFFER_DATA(req->input_buffer), POST_DATA,
+ strlen(POST_DATA))) {
+ fprintf(stdout, "FAILED (data)\n");
+ fprintf(stdout, "Got :%s\n", EVBUFFER_DATA(req->input_buffer));
+ fprintf(stdout, "Want:%s\n", POST_DATA);
+ exit(1);
+ }
+
+ evb = evbuffer_new();
+ evbuffer_add_printf(evb, "This is funny");
+
+ evhttp_send_reply(req, HTTP_OK, "Everything is fine", evb);
+
+ evbuffer_free(evb);
+}
+
+void
+http_postrequest_done(struct evhttp_request *req, void *arg)
+{
+ const char *what = "This is funny";
+
+ if (req == NULL) {
+ fprintf(stderr, "FAILED (timeout)\n");
+ exit(1);
+ }
+
+ if (req->response_code != HTTP_OK) {
+
+ fprintf(stderr, "FAILED (response code)\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(req->input_headers, "Content-Type") == NULL) {
+ fprintf(stderr, "FAILED (content type)\n");
+ exit(1);
+ }
+
+ if (EVBUFFER_LENGTH(req->input_buffer) != strlen(what)) {
+ fprintf(stderr, "FAILED (length %zu vs %zu)\n",
+ EVBUFFER_LENGTH(req->input_buffer), strlen(what));
+ exit(1);
+ }
+
+ if (memcmp(EVBUFFER_DATA(req->input_buffer), what, strlen(what)) != 0) {
+ fprintf(stderr, "FAILED (data)\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+static void
+http_failure_readcb(struct bufferevent *bev, void *arg)
+{
+ const char *what = "400 Bad Request";
+ if (evbuffer_find(bev->input, (const unsigned char*) what, strlen(what)) != NULL) {
+ test_ok = 2;
+ bufferevent_disable(bev, EV_READ);
+ event_loopexit(NULL);
+ }
+}
+
+/*
+ * Testing that the HTTP server can deal with a malformed request.
+ */
+static void
+http_failure_test(void)
+{
+ struct bufferevent *bev;
+ int fd;
+ const char *http_request;
+ short port = -1;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing Bad HTTP Request: ");
+
+ http = http_setup(&port, NULL);
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_new(fd, http_failure_readcb, http_writecb,
+ http_errorcb, NULL);
+
+ http_request = "illegal request\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_dispatch();
+
+ bufferevent_free(bev);
+ EVUTIL_CLOSESOCKET(fd);
+
+ evhttp_free(http);
+
+ if (test_ok != 2) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+}
+
+static void
+close_detect_done(struct evhttp_request *req, void *arg)
+{
+ struct timeval tv;
+ if (req == NULL || req->response_code != HTTP_OK) {
+
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+
+ timerclear(&tv);
+ tv.tv_sec = 3; /* longer than the http time out */
+
+ event_loopexit(&tv);
+}
+
+static void
+close_detect_launch(int fd, short what, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct evhttp_request *req;
+
+ req = evhttp_request_new(close_detect_done, NULL);
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+}
+
+static void
+close_detect_cb(struct evhttp_request *req, void *arg)
+{
+ struct evhttp_connection *evcon = arg;
+ struct timeval tv;
+
+ if (req != NULL && req->response_code != HTTP_OK) {
+
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ timerclear(&tv);
+ tv.tv_sec = 3; /* longer than the http time out */
+
+ /* launch a new request on the persistent connection in 6 seconds */
+ event_once(-1, EV_TIMEOUT, close_detect_launch, evcon, &tv);
+}
+
+
+static void
+http_close_detection(int with_delay)
+{
+ short port = -1;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing Connection Close Detection%s: ",
+ with_delay ? " (with delay)" : "");
+
+ http = http_setup(&port, NULL);
+
+ /* 2 second timeout */
+ evhttp_set_timeout(http, 2);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ delayed_client = evcon;
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(close_detect_cb, evcon);
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon,
+ req, EVHTTP_REQ_GET, with_delay ? "/largedelay" : "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* at this point, the http server should have no connection */
+ if (TAILQ_FIRST(&http->connections) != NULL) {
+ fprintf(stdout, "FAILED (left connections)\n");
+ exit(1);
+ }
+
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+ fprintf(stdout, "OK\n");
+}
+
+static void
+http_highport_test(void)
+{
+ int i = -1;
+ struct evhttp *myhttp = NULL;
+
+ fprintf(stdout, "Testing HTTP Server with high port: ");
+
+ /* Try a few different ports */
+ for (i = 0; i < 50; ++i) {
+ myhttp = evhttp_start("127.0.0.1", 65535 - i);
+ if (myhttp != NULL) {
+ fprintf(stdout, "OK\n");
+ evhttp_free(myhttp);
+ return;
+ }
+ }
+
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+}
+
+static void
+http_bad_header_test(void)
+{
+ struct evkeyvalq headers;
+
+ fprintf(stdout, "Testing HTTP Header filtering: ");
+
+ TAILQ_INIT(&headers);
+
+ if (evhttp_add_header(&headers, "One", "Two") != 0)
+ goto fail;
+
+ if (evhttp_add_header(&headers, "One\r", "Two") != -1)
+ goto fail;
+ if (evhttp_add_header(&headers, "One", "Two") != 0)
+ goto fail;
+ if (evhttp_add_header(&headers, "One", "Two\r\n Three") != 0)
+ goto fail;
+ if (evhttp_add_header(&headers, "One\r", "Two") != -1)
+ goto fail;
+ if (evhttp_add_header(&headers, "One\n", "Two") != -1)
+ goto fail;
+ if (evhttp_add_header(&headers, "One", "Two\r") != -1)
+ goto fail;
+ if (evhttp_add_header(&headers, "One", "Two\n") != -1)
+ goto fail;
+
+ evhttp_clear_headers(&headers);
+
+ fprintf(stdout, "OK\n");
+ return;
+fail:
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+}
+
+static int validate_header(
+ const struct evkeyvalq* headers,
+ const char *key, const char *value)
+{
+ const char *real_val = evhttp_find_header(headers, key);
+ if (real_val == NULL)
+ return (-1);
+ if (strcmp(real_val, value) != 0)
+ return (-1);
+ return (0);
+}
+
+static void
+http_parse_query_test(void)
+{
+ struct evkeyvalq headers;
+
+ fprintf(stdout, "Testing HTTP query parsing: ");
+
+ TAILQ_INIT(&headers);
+
+ evhttp_parse_query("http://www.test.com/?q=test", &headers);
+ if (validate_header(&headers, "q", "test") != 0)
+ goto fail;
+ evhttp_clear_headers(&headers);
+
+ evhttp_parse_query("http://www.test.com/?q=test&foo=bar", &headers);
+ if (validate_header(&headers, "q", "test") != 0)
+ goto fail;
+ if (validate_header(&headers, "foo", "bar") != 0)
+ goto fail;
+ evhttp_clear_headers(&headers);
+
+ evhttp_parse_query("http://www.test.com/?q=test+foo", &headers);
+ if (validate_header(&headers, "q", "test foo") != 0)
+ goto fail;
+ evhttp_clear_headers(&headers);
+
+ evhttp_parse_query("http://www.test.com/?q=test%0Afoo", &headers);
+ if (validate_header(&headers, "q", "test\nfoo") != 0)
+ goto fail;
+ evhttp_clear_headers(&headers);
+
+ evhttp_parse_query("http://www.test.com/?q=test%0Dfoo", &headers);
+ if (validate_header(&headers, "q", "test\rfoo") != 0)
+ goto fail;
+ evhttp_clear_headers(&headers);
+
+ fprintf(stdout, "OK\n");
+ return;
+fail:
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+}
+
+static void
+http_base_test(void)
+{
+ struct bufferevent *bev;
+ int fd;
+ const char *http_request;
+ short port = -1;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing HTTP Server Event Base: ");
+
+ base = event_init();
+
+ /*
+ * create another bogus base - which is being used by all subsequen
+ * tests - yuck!
+ */
+ event_init();
+
+ http = http_setup(&port, base);
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_new(fd, http_readcb, http_writecb,
+ http_errorcb, NULL);
+ bufferevent_base_set(base, bev);
+
+ http_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ event_base_dispatch(base);
+
+ bufferevent_free(bev);
+ EVUTIL_CLOSESOCKET(fd);
+
+ evhttp_free(http);
+
+ event_base_free(base);
+ base = NULL;
+
+ if (test_ok != 2) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+}
+
+/*
+ * the server is going to reply with chunked data.
+ */
+
+static void
+http_chunked_readcb(struct bufferevent *bev, void *arg)
+{
+ /* nothing here */
+}
+
+static void
+http_chunked_errorcb(struct bufferevent *bev, short what, void *arg)
+{
+ if (!test_ok)
+ goto out;
+
+ test_ok = -1;
+
+ if ((what & EVBUFFER_EOF) != 0) {
+ struct evhttp_request *req = evhttp_request_new(NULL, NULL);
+ const char *header;
+ enum message_read_status done;
+
+ req->kind = EVHTTP_RESPONSE;
+ done = evhttp_parse_firstline(req, EVBUFFER_INPUT(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ done = evhttp_parse_headers(req, EVBUFFER_INPUT(bev));
+ if (done != ALL_DATA_READ)
+ goto out;
+
+ header = evhttp_find_header(req->input_headers, "Transfer-Encoding");
+ if (header == NULL || strcmp(header, "chunked"))
+ goto out;
+
+ header = evhttp_find_header(req->input_headers, "Connection");
+ if (header == NULL || strcmp(header, "close"))
+ goto out;
+
+ header = evbuffer_readline(EVBUFFER_INPUT(bev));
+ if (header == NULL)
+ goto out;
+ /* 13 chars */
+ if (strcmp(header, "d"))
+ goto out;
+ free((char*)header);
+
+ if (strncmp((char *)EVBUFFER_DATA(EVBUFFER_INPUT(bev)),
+ "This is funny", 13))
+ goto out;
+
+ evbuffer_drain(EVBUFFER_INPUT(bev), 13 + 2);
+
+ header = evbuffer_readline(EVBUFFER_INPUT(bev));
+ if (header == NULL)
+ goto out;
+ /* 18 chars */
+ if (strcmp(header, "12"))
+ goto out;
+ free((char *)header);
+
+ if (strncmp((char *)EVBUFFER_DATA(EVBUFFER_INPUT(bev)),
+ "but not hilarious.", 18))
+ goto out;
+
+ evbuffer_drain(EVBUFFER_INPUT(bev), 18 + 2);
+
+ header = evbuffer_readline(EVBUFFER_INPUT(bev));
+ if (header == NULL)
+ goto out;
+ /* 8 chars */
+ if (strcmp(header, "8"))
+ goto out;
+ free((char *)header);
+
+ if (strncmp((char *)EVBUFFER_DATA(EVBUFFER_INPUT(bev)),
+ "bwv 1052.", 8))
+ goto out;
+
+ evbuffer_drain(EVBUFFER_INPUT(bev), 8 + 2);
+
+ header = evbuffer_readline(EVBUFFER_INPUT(bev));
+ if (header == NULL)
+ goto out;
+ /* 0 chars */
+ if (strcmp(header, "0"))
+ goto out;
+ free((char *)header);
+
+ test_ok = 2;
+ }
+
+out:
+ event_loopexit(NULL);
+}
+
+static void
+http_chunked_writecb(struct bufferevent *bev, void *arg)
+{
+ if (EVBUFFER_LENGTH(EVBUFFER_OUTPUT(bev)) == 0) {
+ /* enable reading of the reply */
+ bufferevent_enable(bev, EV_READ);
+ test_ok++;
+ }
+}
+
+static void
+http_chunked_request_done(struct evhttp_request *req, void *arg)
+{
+ if (req->response_code != HTTP_OK) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (evhttp_find_header(req->input_headers,
+ "Transfer-Encoding") == NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (EVBUFFER_LENGTH(req->input_buffer) != 13 + 18 + 8) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ if (strncmp((char *)EVBUFFER_DATA(req->input_buffer),
+ "This is funnybut not hilarious.bwv 1052",
+ 13 + 18 + 8)) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+static void
+http_chunked_test(void)
+{
+ struct bufferevent *bev;
+ int fd;
+ const char *http_request;
+ short port = -1;
+ struct timeval tv_start, tv_end;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ int i;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing Chunked HTTP Reply: ");
+
+ http = http_setup(&port, NULL);
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_new(fd,
+ http_chunked_readcb, http_chunked_writecb,
+ http_chunked_errorcb, NULL);
+
+ http_request =
+ "GET /chunked HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_request, strlen(http_request));
+
+ evutil_gettimeofday(&tv_start, NULL);
+
+ event_dispatch();
+
+ evutil_gettimeofday(&tv_end, NULL);
+ evutil_timersub(&tv_end, &tv_start, &tv_end);
+
+ if (tv_end.tv_sec >= 1) {
+ fprintf(stdout, "FAILED (time)\n");
+ exit (1);
+ }
+
+
+ if (test_ok != 2) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* now try again with the regular connection object */
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* make two requests to check the keepalive behavior */
+ for (i = 0; i < 2; i++) {
+ test_ok = 0;
+ req = evhttp_request_new(http_chunked_request_done, NULL);
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req,
+ EVHTTP_REQ_GET, "/chunked") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+ }
+
+ evhttp_connection_free(evcon);
+ evhttp_free(http);
+
+ fprintf(stdout, "OK\n");
+}
+
+static void
+http_multi_line_header_test(void)
+{
+ struct bufferevent *bev;
+ int fd;
+ const char *http_start_request;
+ short port = -1;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing HTTP Server with multi line: ");
+
+ http = http_setup(&port, NULL);
+
+ fd = http_connect("127.0.0.1", port);
+
+ /* Stupid thing to send a request */
+ bev = bufferevent_new(fd, http_readcb, http_writecb,
+ http_errorcb, NULL);
+
+ http_start_request =
+ "GET /test HTTP/1.1\r\n"
+ "Host: somehost\r\n"
+ "Connection: close\r\n"
+ "X-Multi: aaaaaaaa\r\n"
+ " a\r\n"
+ "\tEND\r\n"
+ "X-Last: last\r\n"
+ "\r\n";
+
+ bufferevent_write(bev, http_start_request, strlen(http_start_request));
+
+ event_dispatch();
+
+ bufferevent_free(bev);
+ EVUTIL_CLOSESOCKET(fd);
+
+ evhttp_free(http);
+
+ if (test_ok != 4) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+}
+
+static void
+http_request_bad(struct evhttp_request *req, void *arg)
+{
+ if (req != NULL) {
+ fprintf(stderr, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+static void
+http_negative_content_length_test(void)
+{
+ short port = -1;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ test_ok = 0;
+ fprintf(stdout, "Testing HTTP Negative Content Length: ");
+
+ http = http_setup(&port, NULL);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /*
+ * At this point, we want to schedule a request to the HTTP
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(http_request_bad, NULL);
+
+ /* Cause the response to have a negative content-length */
+ evhttp_add_header(req->output_headers, "X-Negative", "makeitso");
+
+ /* We give ownership of the request to the connection */
+ if (evhttp_make_request(evcon, req, EVHTTP_REQ_GET, "/test") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ event_dispatch();
+
+ evhttp_free(http);
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+}
+
+void
+http_suite(void)
+{
+ http_base_test();
+ http_bad_header_test();
+ http_parse_query_test();
+ http_basic_test();
+ http_connection_test(0 /* not-persistent */);
+ http_connection_test(1 /* persistent */);
+ http_close_detection(0 /* with delay */);
+ http_close_detection(1 /* with delay */);
+ http_post_test();
+ http_failure_test();
+ http_highport_test();
+ http_dispatcher_test();
+
+ http_multi_line_header_test();
+ http_negative_content_length_test();
+
+ http_chunked_test();
+}
diff --git a/libevent/test/regress_rpc.c b/libevent/test/regress_rpc.c
new file mode 100644
index 00000000000..760934766a1
--- /dev/null
+++ b/libevent/test/regress_rpc.c
@@ -0,0 +1,631 @@
+/*
+ * Copyright (c) 2003-2006 Niels Provos <provos@citi.umich.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef WIN32
+#include <winsock2.h>
+#include <windows.h>
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#ifdef HAVE_SYS_TIME_H
+#include <sys/time.h>
+#endif
+#include <sys/queue.h>
+#ifndef WIN32
+#include <sys/socket.h>
+#include <signal.h>
+#include <unistd.h>
+#include <netdb.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+
+#include "event.h"
+#include "evhttp.h"
+#include "log.h"
+#include "evrpc.h"
+
+#include "regress.gen.h"
+
+void rpc_suite(void);
+
+extern int test_ok;
+
+static struct evhttp *
+http_setup(short *pport)
+{
+ int i;
+ struct evhttp *myhttp;
+ short port = -1;
+
+ /* Try a few different ports */
+ for (i = 0; i < 50; ++i) {
+ myhttp = evhttp_start("127.0.0.1", 8080 + i);
+ if (myhttp != NULL) {
+ port = 8080 + i;
+ break;
+ }
+ }
+
+ if (port == -1)
+ event_errx(1, "Could not start web server");
+
+ *pport = port;
+ return (myhttp);
+}
+
+EVRPC_HEADER(Message, msg, kill);
+EVRPC_HEADER(NeverReply, msg, kill);
+
+EVRPC_GENERATE(Message, msg, kill);
+EVRPC_GENERATE(NeverReply, msg, kill);
+
+static int need_input_hook = 0;
+static int need_output_hook = 0;
+
+static void
+MessageCb(EVRPC_STRUCT(Message)* rpc, void *arg)
+{
+ struct kill* kill_reply = rpc->reply;
+
+ if (need_input_hook) {
+ struct evhttp_request* req = EVRPC_REQUEST_HTTP(rpc);
+ const char *header = evhttp_find_header(
+ req->input_headers, "X-Hook");
+ assert(strcmp(header, "input") == 0);
+ }
+
+ /* we just want to fill in some non-sense */
+ EVTAG_ASSIGN(kill_reply, weapon, "dagger");
+ EVTAG_ASSIGN(kill_reply, action, "wave around like an idiot");
+
+ /* no reply to the RPC */
+ EVRPC_REQUEST_DONE(rpc);
+}
+
+static EVRPC_STRUCT(NeverReply) *saved_rpc;
+
+static void
+NeverReplyCb(EVRPC_STRUCT(NeverReply)* rpc, void *arg)
+{
+ test_ok += 1;
+ saved_rpc = rpc;
+}
+
+static void
+rpc_setup(struct evhttp **phttp, short *pport, struct evrpc_base **pbase)
+{
+ short port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+
+ http = http_setup(&port);
+ base = evrpc_init(http);
+
+ EVRPC_REGISTER(base, Message, msg, kill, MessageCb, NULL);
+ EVRPC_REGISTER(base, NeverReply, msg, kill, NeverReplyCb, NULL);
+
+ *phttp = http;
+ *pport = port;
+ *pbase = base;
+
+ need_input_hook = 0;
+ need_output_hook = 0;
+}
+
+static void
+rpc_teardown(struct evrpc_base *base)
+{
+ assert(EVRPC_UNREGISTER(base, Message) == 0);
+ assert(EVRPC_UNREGISTER(base, NeverReply) == 0);
+
+ evrpc_free(base);
+}
+
+static void
+rpc_postrequest_failure(struct evhttp_request *req, void *arg)
+{
+ if (req->response_code != HTTP_SERVUNAVAIL) {
+
+ fprintf(stderr, "FAILED (response code)\n");
+ exit(1);
+ }
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+/*
+ * Test a malformed payload submitted as an RPC
+ */
+
+static void
+rpc_basic_test(void)
+{
+ short port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+
+ fprintf(stdout, "Testing Basic RPC Support: ");
+
+ rpc_setup(&http, &port, &base);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /*
+ * At this point, we want to schedule an HTTP POST request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(rpc_postrequest_failure, NULL);
+ if (req == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+ evbuffer_add_printf(req->output_buffer, "Some Nonsense");
+
+ if (evhttp_make_request(evcon, req,
+ EVHTTP_REQ_POST,
+ "/.rpc.Message") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ evhttp_connection_free(evcon);
+
+ rpc_teardown(base);
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+
+ evhttp_free(http);
+}
+
+static void
+rpc_postrequest_done(struct evhttp_request *req, void *arg)
+{
+ struct kill* kill_reply = NULL;
+
+ if (req->response_code != HTTP_OK) {
+
+ fprintf(stderr, "FAILED (response code)\n");
+ exit(1);
+ }
+
+ kill_reply = kill_new();
+
+ if ((kill_unmarshal(kill_reply, req->input_buffer)) == -1) {
+ fprintf(stderr, "FAILED (unmarshal)\n");
+ exit(1);
+ }
+
+ kill_free(kill_reply);
+
+ test_ok = 1;
+ event_loopexit(NULL);
+}
+
+static void
+rpc_basic_message(void)
+{
+ short port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evhttp_connection *evcon = NULL;
+ struct evhttp_request *req = NULL;
+ struct msg *msg;
+
+ fprintf(stdout, "Testing Good RPC Post: ");
+
+ rpc_setup(&http, &port, &base);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ if (evcon == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /*
+ * At this point, we want to schedule an HTTP POST request
+ * server using our make request method.
+ */
+
+ req = evhttp_request_new(rpc_postrequest_done, NULL);
+ if (req == NULL) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ /* Add the information that we care about */
+ evhttp_add_header(req->output_headers, "Host", "somehost");
+
+ /* set up the basic message */
+ msg = msg_new();
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+ msg_marshal(req->output_buffer, msg);
+ msg_free(msg);
+
+ if (evhttp_make_request(evcon, req,
+ EVHTTP_REQ_POST,
+ "/.rpc.Message") == -1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ evhttp_connection_free(evcon);
+
+ rpc_teardown(base);
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+
+ evhttp_free(http);
+}
+
+static struct evrpc_pool *
+rpc_pool_with_connection(short port)
+{
+ struct evhttp_connection *evcon;
+ struct evrpc_pool *pool;
+
+ pool = evrpc_pool_new(NULL);
+ assert(pool != NULL);
+
+ evcon = evhttp_connection_new("127.0.0.1", port);
+ assert(evcon != NULL);
+
+ evrpc_pool_add_connection(pool, evcon);
+
+ return (pool);
+}
+
+static void
+GotKillCb(struct evrpc_status *status,
+ struct msg *msg, struct kill *kill, void *arg)
+{
+ char *weapon;
+ char *action;
+
+ if (need_output_hook) {
+ struct evhttp_request *req = status->http_req;
+ const char *header = evhttp_find_header(
+ req->input_headers, "X-Pool-Hook");
+ assert(strcmp(header, "ran") == 0);
+ }
+
+ if (status->error != EVRPC_STATUS_ERR_NONE)
+ goto done;
+
+ if (EVTAG_GET(kill, weapon, &weapon) == -1) {
+ fprintf(stderr, "get weapon\n");
+ goto done;
+ }
+ if (EVTAG_GET(kill, action, &action) == -1) {
+ fprintf(stderr, "get action\n");
+ goto done;
+ }
+
+ if (strcmp(weapon, "dagger"))
+ goto done;
+
+ if (strcmp(action, "wave around like an idiot"))
+ goto done;
+
+ test_ok += 1;
+
+done:
+ event_loopexit(NULL);
+}
+
+static void
+GotKillCbTwo(struct evrpc_status *status,
+ struct msg *msg, struct kill *kill, void *arg)
+{
+ char *weapon;
+ char *action;
+
+ if (status->error != EVRPC_STATUS_ERR_NONE)
+ goto done;
+
+ if (EVTAG_GET(kill, weapon, &weapon) == -1) {
+ fprintf(stderr, "get weapon\n");
+ goto done;
+ }
+ if (EVTAG_GET(kill, action, &action) == -1) {
+ fprintf(stderr, "get action\n");
+ goto done;
+ }
+
+ if (strcmp(weapon, "dagger"))
+ goto done;
+
+ if (strcmp(action, "wave around like an idiot"))
+ goto done;
+
+ test_ok += 1;
+
+done:
+ if (test_ok == 2)
+ event_loopexit(NULL);
+}
+
+static int
+rpc_hook_add_header(struct evhttp_request *req,
+ struct evbuffer *evbuf, void *arg)
+{
+ const char *hook_type = arg;
+ if (strcmp("input", hook_type) == 0)
+ evhttp_add_header(req->input_headers, "X-Hook", hook_type);
+ else
+ evhttp_add_header(req->output_headers, "X-Hook", hook_type);
+ return (0);
+}
+
+static int
+rpc_hook_remove_header(struct evhttp_request *req,
+ struct evbuffer *evbuf, void *arg)
+{
+ const char *header = evhttp_find_header(req->input_headers, "X-Hook");
+ assert(header != NULL);
+ assert(strcmp(header, arg) == 0);
+ evhttp_remove_header(req->input_headers, "X-Hook");
+ evhttp_add_header(req->input_headers, "X-Pool-Hook", "ran");
+
+ return (0);
+}
+
+static void
+rpc_basic_client(void)
+{
+ short port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evrpc_pool *pool = NULL;
+ struct msg *msg;
+ struct kill *kill;
+
+ fprintf(stdout, "Testing RPC Client: ");
+
+ rpc_setup(&http, &port, &base);
+
+ need_input_hook = 1;
+ need_output_hook = 1;
+
+ assert(evrpc_add_hook(base, EVRPC_INPUT, rpc_hook_add_header, (void*)"input")
+ != NULL);
+ assert(evrpc_add_hook(base, EVRPC_OUTPUT, rpc_hook_add_header, (void*)"output")
+ != NULL);
+
+ pool = rpc_pool_with_connection(port);
+
+ assert(evrpc_add_hook(pool, EVRPC_INPUT, rpc_hook_remove_header, (void*)"output"));
+
+ /* set up the basic message */
+ msg = msg_new();
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+
+ kill = kill_new();
+
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill, GotKillCb, NULL);
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ if (test_ok != 1) {
+ fprintf(stdout, "FAILED (1)\n");
+ exit(1);
+ }
+
+ /* we do it twice to make sure that reuse works correctly */
+ kill_clear(kill);
+
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill, GotKillCb, NULL);
+
+ event_dispatch();
+
+ rpc_teardown(base);
+
+ if (test_ok != 2) {
+ fprintf(stdout, "FAILED (2)\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+
+ msg_free(msg);
+ kill_free(kill);
+
+ evrpc_pool_free(pool);
+ evhttp_free(http);
+}
+
+/*
+ * We are testing that the second requests gets send over the same
+ * connection after the first RPCs completes.
+ */
+static void
+rpc_basic_queued_client(void)
+{
+ short port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evrpc_pool *pool = NULL;
+ struct msg *msg;
+ struct kill *kill_one, *kill_two;
+
+ fprintf(stdout, "Testing RPC (Queued) Client: ");
+
+ rpc_setup(&http, &port, &base);
+
+ pool = rpc_pool_with_connection(port);
+
+ /* set up the basic message */
+ msg = msg_new();
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+
+ kill_one = kill_new();
+ kill_two = kill_new();
+
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill_one, GotKillCbTwo, NULL);
+ EVRPC_MAKE_REQUEST(Message, pool, msg, kill_two, GotKillCb, NULL);
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ rpc_teardown(base);
+
+ if (test_ok != 2) {
+ fprintf(stdout, "FAILED (1)\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+
+ msg_free(msg);
+ kill_free(kill_one);
+ kill_free(kill_two);
+
+ evrpc_pool_free(pool);
+ evhttp_free(http);
+}
+
+static void
+GotErrorCb(struct evrpc_status *status,
+ struct msg *msg, struct kill *kill, void *arg)
+{
+ if (status->error != EVRPC_STATUS_ERR_TIMEOUT)
+ goto done;
+
+ /* should never be complete but just to check */
+ if (kill_complete(kill) == 0)
+ goto done;
+
+ test_ok += 1;
+
+done:
+ event_loopexit(NULL);
+}
+
+static void
+rpc_client_timeout(void)
+{
+ short port;
+ struct evhttp *http = NULL;
+ struct evrpc_base *base = NULL;
+ struct evrpc_pool *pool = NULL;
+ struct msg *msg;
+ struct kill *kill;
+
+ fprintf(stdout, "Testing RPC Client Timeout: ");
+
+ rpc_setup(&http, &port, &base);
+
+ pool = rpc_pool_with_connection(port);
+
+ /* set the timeout to 5 seconds */
+ evrpc_pool_set_timeout(pool, 5);
+
+ /* set up the basic message */
+ msg = msg_new();
+ EVTAG_ASSIGN(msg, from_name, "niels");
+ EVTAG_ASSIGN(msg, to_name, "tester");
+
+ kill = kill_new();
+
+ EVRPC_MAKE_REQUEST(NeverReply, pool, msg, kill, GotErrorCb, NULL);
+
+ test_ok = 0;
+
+ event_dispatch();
+
+ /* free the saved RPC structure up */
+ EVRPC_REQUEST_DONE(saved_rpc);
+
+ rpc_teardown(base);
+
+ if (test_ok != 2) {
+ fprintf(stdout, "FAILED (1)\n");
+ exit(1);
+ }
+
+ fprintf(stdout, "OK\n");
+
+ msg_free(msg);
+ kill_free(kill);
+
+ evrpc_pool_free(pool);
+ evhttp_free(http);
+}
+
+void
+rpc_suite(void)
+{
+ rpc_basic_test();
+ rpc_basic_message();
+ rpc_basic_client();
+ rpc_basic_queued_client();
+ rpc_client_timeout();
+}
diff --git a/libevent/test/test-eof.c b/libevent/test/test-eof.c
new file mode 100644
index 00000000000..4fc1a19f224
--- /dev/null
+++ b/libevent/test/test-eof.c
@@ -0,0 +1,82 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+int test_okay = 1;
+int called = 0;
+
+static void
+read_cb(int fd, short event, void *arg)
+{
+ char buf[256];
+ int len;
+
+ len = read(fd, buf, sizeof(buf));
+
+ printf("%s: read %d%s\n", __func__,
+ len, len ? "" : " - means EOF");
+
+ if (len) {
+ if (!called)
+ event_add(arg, NULL);
+ } else if (called == 1)
+ test_okay = 0;
+
+ called++;
+}
+
+#ifndef SHUT_WR
+#define SHUT_WR 1
+#endif
+
+int
+main (int argc, char **argv)
+{
+ struct event ev;
+ const char *test = "test string";
+ int pair[2];
+
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+ return (1);
+
+
+ write(pair[0], test, strlen(test)+1);
+ shutdown(pair[0], SHUT_WR);
+
+ /* Initalize the event library */
+ event_init();
+
+ /* Initalize one event */
+ event_set(&ev, pair[1], EV_READ, read_cb, &ev);
+
+ event_add(&ev, NULL);
+
+ event_dispatch();
+
+ return (test_okay);
+}
+
diff --git a/libevent/test/test-init.c b/libevent/test/test-init.c
new file mode 100644
index 00000000000..c368715fd67
--- /dev/null
+++ b/libevent/test/test-init.c
@@ -0,0 +1,33 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <event.h>
+
+int
+main(int argc, char **argv)
+{
+ /* Initalize the event library */
+ event_init();
+
+ return (0);
+}
+
diff --git a/libevent/test/test-time.c b/libevent/test/test-time.c
new file mode 100644
index 00000000000..a847d55ef38
--- /dev/null
+++ b/libevent/test/test-time.c
@@ -0,0 +1,82 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <event.h>
+
+int called = 0;
+
+#define NEVENT 20000
+
+struct event *ev[NEVENT];
+
+static int
+rand_int(int n)
+{
+#ifdef WIN32
+ return (int)(rand() * n);
+#else
+ return (int)(random() % n);
+#endif
+}
+
+static void
+time_cb(int fd, short event, void *arg)
+{
+ struct timeval tv;
+ int i, j;
+
+ called++;
+
+ if (called < 10*NEVENT) {
+ for (i = 0; i < 10; i++) {
+ j = rand_int(NEVENT);
+ tv.tv_sec = 0;
+ tv.tv_usec = rand_int(50000);
+ if (tv.tv_usec % 2)
+ evtimer_add(ev[j], &tv);
+ else
+ evtimer_del(ev[j]);
+ }
+ }
+}
+
+int
+main (int argc, char **argv)
+{
+ struct timeval tv;
+ int i;
+
+ /* Initalize the event library */
+ event_init();
+
+ for (i = 0; i < NEVENT; i++) {
+ ev[i] = malloc(sizeof(struct event));
+
+ /* Initalize one event */
+ evtimer_set(ev[i], time_cb, ev[i]);
+ tv.tv_sec = 0;
+ tv.tv_usec = rand_int(50000);
+ evtimer_add(ev[i], &tv);
+ }
+
+ event_dispatch();
+
+ return (called < NEVENT);
+}
+
diff --git a/libevent/test/test-weof.c b/libevent/test/test-weof.c
new file mode 100644
index 00000000000..5d87ceb8eb7
--- /dev/null
+++ b/libevent/test/test-weof.c
@@ -0,0 +1,80 @@
+/*
+ * Compile with:
+ * cc -I/usr/local/include -o time-test time-test.c -L/usr/local/lib -levent
+ */
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+
+#ifdef WIN32
+#include <winsock2.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <unistd.h>
+#include <errno.h>
+
+#include <event.h>
+#include <evutil.h>
+
+int pair[2];
+int test_okay = 1;
+int called = 0;
+
+static void
+write_cb(int fd, short event, void *arg)
+{
+ const char *test = "test string";
+ int len;
+
+ len = write(fd, test, strlen(test) + 1);
+
+ printf("%s: write %d%s\n", __func__,
+ len, len ? "" : " - means EOF");
+
+ if (len > 0) {
+ if (!called)
+ event_add(arg, NULL);
+ close(pair[0]);
+ } else if (called == 1)
+ test_okay = 0;
+
+ called++;
+}
+
+int
+main (int argc, char **argv)
+{
+ struct event ev;
+
+#ifndef WIN32
+ if (signal(SIGPIPE, SIG_IGN) == SIG_ERR)
+ return (1);
+#endif
+
+ if (evutil_socketpair(AF_UNIX, SOCK_STREAM, 0, pair) == -1)
+ return (1);
+
+ /* Initalize the event library */
+ event_init();
+
+ /* Initalize one event */
+ event_set(&ev, pair[1], EV_WRITE, write_cb, &ev);
+
+ event_add(&ev, NULL);
+
+ event_dispatch();
+
+ return (test_okay);
+}
+
diff --git a/libevent/test/test.sh b/libevent/test/test.sh
new file mode 100644
index 00000000000..506a1988c34
--- /dev/null
+++ b/libevent/test/test.sh
@@ -0,0 +1,91 @@
+#!/bin/sh
+
+setup () {
+ EVENT_NOKQUEUE=yes; export EVENT_NOKQUEUE
+ EVENT_NODEVPOLL=yes; export EVENT_NODEVPOLL
+ EVENT_NOPOLL=yes; export EVENT_NOPOLL
+ EVENT_NOSELECT=yes; export EVENT_NOSELECT
+ EVENT_NOEPOLL=yes; export EVENT_NOEPOLL
+ EVENT_NOEVPORT=yes; export EVENT_NOEVPORT
+}
+
+test () {
+ if ./test-init 2>/dev/null ;
+ then
+ true
+ else
+ echo Skipping test
+ return
+ fi
+
+echo -n " test-eof: "
+if ./test-eof >/dev/null ;
+then
+ echo OKAY ;
+else
+ echo FAILED ;
+fi
+echo -n " test-weof: "
+if ./test-weof >/dev/null ;
+then
+ echo OKAY ;
+else
+ echo FAILED ;
+fi
+echo -n " test-time: "
+if ./test-time >/dev/null ;
+then
+ echo OKAY ;
+else
+ echo FAILED ;
+fi
+echo -n " regress: "
+if ./regress >/dev/null ;
+then
+ echo OKAY ;
+else
+ echo FAILED ;
+fi
+}
+
+echo "Running tests:"
+
+# Need to do this by hand?
+setup
+unset EVENT_NOKQUEUE
+export EVENT_NOKQUEUE
+echo "KQUEUE"
+test
+
+setup
+unset EVENT_NODEVPOLL
+export EVENT_NODEVPOLL
+echo "DEVPOLL"
+test
+
+setup
+unset EVENT_NOPOLL
+export EVENT_NOPOLL
+echo "POLL"
+test
+
+setup
+unset EVENT_NOSELECT
+export EVENT_NOSELECT
+echo "SELECT"
+test
+
+setup
+unset EVENT_NOEPOLL
+export EVENT_NOEPOLL
+echo "EPOLL"
+test
+
+setup
+unset EVENT_NOEVPORT
+export EVENT_NOEVPORT
+echo "EVPORT"
+test
+
+
+
diff --git a/libmysql/CMakeLists.txt b/libmysql/CMakeLists.txt
index 1ddb0f7db98..b818ae58524 100644
--- a/libmysql/CMakeLists.txt
+++ b/libmysql/CMakeLists.txt
@@ -334,7 +334,8 @@ SET(CLIENT_SOURCES
../sql-common/client.c
../sql-common/mysql_async.c
../sql-common/my_time.c
- ../sql-common/client_plugin.c
+ ../sql-common/client_plugin.c
+ ../sql-common/client_authentication.cc
../sql/net_serv.cc
../sql-common/pack.c
../sql/password.c
@@ -344,7 +345,7 @@ ADD_CONVENIENCE_LIBRARY(clientlib ${CLIENT_SOURCES})
DTRACE_INSTRUMENT(clientlib)
ADD_DEPENDENCIES(clientlib GenError)
-SET(LIBS clientlib dbug strings vio mysys ${ZLIB_LIBRARY} ${SSL_LIBRARIES} ${LIBDL})
+SET(LIBS clientlib dbug strings vio mysys mysys_ssl ${ZLIB_LIBRARY} ${SSL_LIBRARIES} ${LIBDL})
# Merge several convenience libraries into one big mysqlclient
# and link them together into shared library.
diff --git a/libmysql/errmsg.c b/libmysql/errmsg.c
index 4c4485f7ec4..9985fa2233c 100644
--- a/libmysql/errmsg.c
+++ b/libmysql/errmsg.c
@@ -85,6 +85,8 @@ const char *client_errors[]=
"The number of columns in the result set differs from the number of bound buffers. You must reset the statement, rebind the result set columns, and execute the statement again",
"This handle is already connected. Use a separate handle for each connection.",
"Authentication plugin '%s' cannot be loaded: %s",
+ "There is an attribute with the same name already",
+ "Authentication plugin '%s' reported error: %s",
""
};
diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c
index fdc7ac98468..45c141649e1 100644
--- a/libmysql/libmysql.c
+++ b/libmysql/libmysql.c
@@ -1139,7 +1139,7 @@ void my_net_local_init(NET *net)
my_net_set_read_timeout(net, CLIENT_NET_READ_TIMEOUT);
my_net_set_write_timeout(net, CLIENT_NET_WRITE_TIMEOUT);
net->retry_count= 1;
- net->max_packet_size= max(net_buffer_length, max_allowed_packet);
+ net->max_packet_size= MY_MAX(net_buffer_length, max_allowed_packet);
}
/*
@@ -3239,7 +3239,7 @@ static void fetch_string_with_conversion(MYSQL_BIND *param, char *value,
copy_length= end - start;
/* We've got some data beyond offset: copy up to buffer_length bytes */
if (param->buffer_length)
- memcpy(buffer, start, min(copy_length, param->buffer_length));
+ memcpy(buffer, start, MY_MIN(copy_length, param->buffer_length));
}
else
copy_length= 0;
@@ -3464,7 +3464,7 @@ static void fetch_float_with_conversion(MYSQL_BIND *param, MYSQL_FIELD *field,
size_t len;
if (field->decimals >= NOT_FIXED_DEC)
len= my_gcvt(value, type,
- (int) min(sizeof(buff)-1, param->buffer_length),
+ (int) MY_MIN(sizeof(buff)-1, param->buffer_length),
buff, NULL);
else
len= my_fcvt(value, (int) field->decimals, buff, NULL);
@@ -3774,7 +3774,7 @@ static void fetch_result_bin(MYSQL_BIND *param,
uchar **row)
{
ulong length= net_field_length(row);
- ulong copy_length= min(length, param->buffer_length);
+ ulong copy_length= MY_MIN(length, param->buffer_length);
memcpy(param->buffer, (char *)*row, copy_length);
*param->length= length;
*param->error= copy_length < length;
@@ -3786,7 +3786,7 @@ static void fetch_result_str(MYSQL_BIND *param,
uchar **row)
{
ulong length= net_field_length(row);
- ulong copy_length= min(length, param->buffer_length);
+ ulong copy_length= MY_MIN(length, param->buffer_length);
memcpy(param->buffer, (char *)*row, copy_length);
/* Add an end null if there is room in the buffer */
if (copy_length != param->buffer_length)
diff --git a/libmysqld/CMakeLists.txt b/libmysqld/CMakeLists.txt
index d4a57b19b8e..6f7a630663e 100644
--- a/libmysqld/CMakeLists.txt
+++ b/libmysqld/CMakeLists.txt
@@ -64,7 +64,8 @@ SET(SQL_EMBEDDED_SOURCES emb_qcache.cc libmysqld.c lib_sql.cc
../sql/sql_analyse.cc ../sql/sql_base.cc ../sql/sql_cache.cc
../sql/sql_class.cc ../sql/sql_crypt.cc ../sql/sql_cursor.cc
../sql/sql_db.cc ../sql/sql_delete.cc ../sql/sql_derived.cc
- ../sql/sql_do.cc ../sql/sql_error.cc ../sql/sql_handler.cc
+ ../sql/sql_do.cc ../sql/sql_error.cc ../sql/sql_handler.cc
+ ../sql/sql_get_diagnostics.cc
../sql/sql_help.cc ../sql/sql_insert.cc ../sql/datadict.cc
../sql/sql_admin.cc ../sql/sql_truncate.cc ../sql/sql_reload.cc
../sql/sql_lex.cc ../sql/keycaches.cc
@@ -120,7 +121,7 @@ ENDIF()
SET(LIBS
- dbug strings regex mysys vio
+ dbug strings regex mysys mysys_ssl vio
${ZLIB_LIBRARY} ${SSL_LIBRARIES}
${LIBWRAP} ${LIBCRYPT} ${LIBDL}
${MYSQLD_STATIC_PLUGIN_LIBS}
diff --git a/libmysqld/emb_qcache.cc b/libmysqld/emb_qcache.cc
index abb0631ebfb..db236d826c9 100644
--- a/libmysqld/emb_qcache.cc
+++ b/libmysqld/emb_qcache.cc
@@ -487,7 +487,7 @@ int emb_load_querycache_result(THD *thd, Querycache_stream *src)
data->embedded_info->prev_ptr= prev_row;
return_ok:
net_send_eof(thd, thd->server_status,
- thd->warning_info->statement_warn_count());
+ thd->get_stmt_da()->statement_warn_count());
DBUG_RETURN(0);
err:
DBUG_RETURN(1);
diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc
index 7450dc22184..3502ebb1ad3 100644
--- a/libmysqld/lib_sql.cc
+++ b/libmysqld/lib_sql.cc
@@ -130,7 +130,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command,
/* Clear result variables */
thd->clear_error();
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
mysql->affected_rows= ~(my_ulonglong) 0;
mysql->field_count= 0;
net_clear_error(net);
@@ -241,7 +241,7 @@ static my_bool emb_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt)
stmt->stmt_id= thd->client_stmt_id;
stmt->param_count= thd->client_param_count;
stmt->field_count= 0;
- mysql->warning_count= thd->warning_info->statement_warn_count();
+ mysql->warning_count= thd->get_stmt_da()->statement_warn_count();
if (thd->first_data)
{
@@ -428,7 +428,7 @@ static void emb_free_embedded_thd(MYSQL *mysql)
static const char * emb_read_statistics(MYSQL *mysql)
{
THD *thd= (THD*)mysql->thd;
- return thd->is_error() ? thd->stmt_da->message() : "";
+ return thd->is_error() ? thd->get_stmt_da()->message() : "";
}
@@ -885,7 +885,7 @@ write_eof_packet(THD *thd, uint server_status, uint statement_warn_count)
is cleared between substatements, and mysqltest gets confused
*/
thd->cur_data->embedded_info->warning_count=
- (thd->spcont ? 0 : min(statement_warn_count, 65535));
+ (thd->spcont ? 0 : MY_MIN(statement_warn_count, 65535));
return FALSE;
}
@@ -1045,7 +1045,7 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
if (flags & SEND_EOF)
write_eof_packet(thd, thd->server_status,
- thd->warning_info->statement_warn_count());
+ thd->get_stmt_da()->statement_warn_count());
DBUG_RETURN(prepare_for_send(list->elements));
err:
diff --git a/libservices/CMakeLists.txt b/libservices/CMakeLists.txt
index e7dcf20e547..c6e077b0a03 100644
--- a/libservices/CMakeLists.txt
+++ b/libservices/CMakeLists.txt
@@ -22,6 +22,7 @@ SET(MYSQLSERVICES_SOURCES
thd_timezone_service.c
progress_report_service.c
debug_sync_service.c
+ my_sha1_service.c
kill_statement_service.c)
ADD_CONVENIENCE_LIBRARY(mysqlservices ${MYSQLSERVICES_SOURCES})
diff --git a/libservices/my_sha1_service.c b/libservices/my_sha1_service.c
new file mode 100644
index 00000000000..196c1939082
--- /dev/null
+++ b/libservices/my_sha1_service.c
@@ -0,0 +1,18 @@
+/* Copyright (c) 2013 Monty Program Ab
+ Use is subject to license terms.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include <service_versions.h>
+SERVICE_VERSION my_sha1_service= (void*)VERSION_my_sha1;
diff --git a/mysql-test/include/commit.inc b/mysql-test/include/commit.inc
index fdb9ef1f563..bdb6f48f095 100644
--- a/mysql-test/include/commit.inc
+++ b/mysql-test/include/commit.inc
@@ -7,7 +7,7 @@
##
set sql_mode=no_engine_substitution;
-eval set storage_engine = $engine_type;
+eval set default_storage_engine = $engine_type;
set autocommit=1;
--disable_warnings
@@ -757,7 +757,7 @@ call p_verify_status_increment(2, 0, 4, 4);
alter table t3 add column (b int);
call p_verify_status_increment(2, 0, 2, 0);
alter table t3 rename t4;
-call p_verify_status_increment(2, 0, 2, 0);
+call p_verify_status_increment(0, 0, 0, 0);
rename table t4 to t3;
call p_verify_status_increment(0, 0, 0, 0);
truncate table t3;
diff --git a/mysql-test/include/default_mysqld.cnf b/mysql-test/include/default_mysqld.cnf
index 1c21a4f03cc..5f93428e599 100644
--- a/mysql-test/include/default_mysqld.cnf
+++ b/mysql-test/include/default_mysqld.cnf
@@ -1,4 +1,4 @@
-# Copyright (c) 2007, 2010, Oracle and/or its affiliates
+# Copyright (c) 2007, 2012, Oracle and/or its affiliates
# Copyright (c) 2010, 2012, Monty Program Ab
#
# This program is free software; you can redistribute it and/or modify
@@ -48,21 +48,40 @@ loose-innodb_read_io_threads= 2
loose-innodb_log_buffer_size= 1M
loose-innodb_log_file_size= 5M
loose-innodb_log_files_in_group= 2
+loose-innodb-stats-persistent= OFF
slave-net-timeout=120
+# MAINTAINER:
+# the loose- syntax is to make sure the cnf file is also
+# valid when building without the performance schema.
+
# Run tests with the performance schema instrumentation
loose-enable-performance-schema
# Run tests with a small number of instrumented objects
# to limit memory consumption with MTR
+loose-performance-schema-accounts-size=100
+loose-performance-schema-digests-size=200
+loose-performance-schema-hosts-size=100
+loose-performance-schema-users-size=100
loose-performance-schema-max-mutex-instances=5000
loose-performance-schema-max-rwlock-instances=5000
+loose-performance-schema-max-cond-instances=1000
+loose-performance-schema-max-file-instances=10000
+loose-performance-schema-max-socket-instances=1000
loose-performance-schema-max-table-instances=500
loose-performance-schema-max-table-handles=1000
+loose-performance-schema-events-waits-history-size=10
+loose-performance-schema-events-waits-history-long-size=10000
+loose-performance-schema-events-stages-history-size=10
loose-performance-schema-events-stages-history-long-size=1000
+loose-performance-schema-events-statements-history-size=10
loose-performance-schema-events-statements-history-long-size=1000
loose-performance-schema-max-thread-instances=200
+loose-performance-schema-session-connect-attrs-size=2048
+
+# Enable everything, for maximun code exposure during testing
loose-performance-schema-instrument='%=ON'
diff --git a/mysql-test/include/have_innodb.combinations b/mysql-test/include/have_innodb.combinations
index 55107204097..b76f783b928 100644
--- a/mysql-test/include/have_innodb.combinations
+++ b/mysql-test/include/have_innodb.combinations
@@ -7,6 +7,8 @@ innodb-trx
innodb-buffer-pool-stats
innodb-buffer-page
innodb-buffer-page-lru
+innodb-sys-foreign
+innodb-sys-foreign-col
[xtradb_plugin]
ignore-builtin-innodb
@@ -17,6 +19,8 @@ innodb-trx
innodb-buffer-pool-stats
innodb-buffer-page
innodb-buffer-page-lru
+innodb-sys-foreign
+innodb-sys-foreign-col
[xtradb]
innodb
@@ -26,3 +30,5 @@ innodb-metrics
innodb-buffer-pool-stats
innodb-buffer-page
innodb-buffer-page-lru
+innodb-sys-foreign
+innodb-sys-foreign-col
diff --git a/mysql-test/include/have_ipv6.inc b/mysql-test/include/have_ipv6.inc
new file mode 100644
index 00000000000..752dd0db53e
--- /dev/null
+++ b/mysql-test/include/have_ipv6.inc
@@ -0,0 +1,20 @@
+# Check if ipv6 is available.
+#
+--disable_query_log
+--disable_result_log
+--disable_abort_on_error
+connect (checkcon123456789,::1,root,,test);
+if($mysql_errno)
+{
+ skip No IPv6 support;
+}
+connection default;
+if(!$mysql_errno)
+{
+ disconnect checkcon123456789;
+}
+--enable_abort_on_error
+--enable_result_log
+--enable_query_log
+# end check
+
diff --git a/mysql-test/include/mix1.inc b/mysql-test/include/mix1.inc
index 75ba0e43221..15a69fc04ff 100644
--- a/mysql-test/include/mix1.inc
+++ b/mysql-test/include/mix1.inc
@@ -25,7 +25,7 @@
# where just some indexes have been created must be used.
#
-eval SET SESSION STORAGE_ENGINE = $engine_type;
+eval SET SESSION DEFAULT_STORAGE_ENGINE = $engine_type;
--disable_warnings
drop table if exists t1,t2,t3,t1m,t1i,t2m,t2i,t4;
@@ -388,7 +388,7 @@ drop table t1;
# Bug #13293 Wrongly used index results in endless loop.
# (was part of group_min_max.test)
#
-create table t1 (f1 int, f2 char(1), primary key(f1,f2));
+create table t1 (f1 int, f2 char(1), primary key(f1,f2)) stats_persistent=0;
insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d");
alter table t1 drop primary key, add primary key (f2, f1);
explain select distinct f1 a, f1 b from t1;
@@ -432,7 +432,7 @@ CREATE TABLE t1 (
age tinyint(3) unsigned NOT NULL,
PRIMARY KEY (id),
INDEX (name,dept)
-) ENGINE=InnoDB;
+) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1(id, dept, age, name) VALUES
(3987, 'cs1', 10, 'rs1'), (3988, 'cs2', 20, 'rs1'), (3995, 'cs3', 10, 'rs2'),
(3996, 'cs4', 20, 'rs2'), (4003, 'cs5', 10, 'rs3'), (4004, 'cs6', 20, 'rs3'),
@@ -546,7 +546,7 @@ CREATE TABLE t2(
acct_id int DEFAULT NULL,
INDEX idx1 (stat_id, acct_id),
INDEX idx2 (acct_id)
-) ENGINE=InnoDB;
+) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1(stat_id,acct_id) VALUES
(1,759), (2,831), (3,785), (4,854), (1,921),
@@ -1580,6 +1580,29 @@ DROP TABLE t1;
--echo End of 5.1 tests
--echo #
+--echo # Bug#43600: Incorrect type conversion caused wrong result.
+--echo #
+CREATE TABLE t1 (
+ a int NOT NULL
+) engine= innodb;
+
+CREATE TABLE t2 (
+ a int NOT NULL,
+ b int NOT NULL,
+ filler char(100) DEFAULT NULL,
+ KEY a (a,b)
+) engine= innodb;
+
+insert into t1 values (0),(1),(2),(3),(4);
+insert into t2 select A.a + 10 *B.a, 1, 'filler' from t1 A, t1 B;
+
+explain select * from t1, t2 where t2.a=t1.a and t2.b + 1;
+select * from t1, t2 where t2.a=t1.a and t2.b + 1;
+
+drop table t1,t2;
+--echo # End of test case for the bug#43600
+
+--echo #
--echo # Bug#42643: InnoDB does not support replication of TRUNCATE TABLE
--echo #
--echo # Check that a TRUNCATE TABLE statement, needing an exclusive meta
diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl
index ea5b556fbaf..2cfa9a7e066 100755
--- a/mysql-test/mysql-test-run.pl
+++ b/mysql-test/mysql-test-run.pl
@@ -3495,6 +3495,7 @@ sub mysql_install_db {
mtr_add_arg($args, "--skip-plugin-$_") for @optional_plugins;
# starting from 10.0 bootstrap scripts require InnoDB
mtr_add_arg($args, "--loose-innodb");
+ mtr_add_arg($args, "--loose-innodb-log-file-size=5M");
mtr_add_arg($args, "--disable-sync-frm");
mtr_add_arg($args, "--tmpdir=%s", "$opt_vardir/tmp/");
mtr_add_arg($args, "--core-file");
diff --git a/mysql-test/r/1st.result b/mysql-test/r/1st.result
index 7e4ab09b09d..cb82cb5fe7d 100644
--- a/mysql-test/r/1st.result
+++ b/mysql-test/r/1st.result
@@ -22,15 +22,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
diff --git a/mysql-test/r/alter_table.result b/mysql-test/r/alter_table.result
index b6e99952c23..f01eba1aa05 100644
--- a/mysql-test/r/alter_table.result
+++ b/mysql-test/r/alter_table.result
@@ -270,8 +270,8 @@ ERROR 42000: Incorrect table name ''
drop table t1;
drop table if exists t1, t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
create table t1 ( a varchar(10) not null primary key ) engine=myisam;
create table t2 ( a varchar(10) not null primary key ) engine=merge union=(t1);
flush tables;
@@ -556,7 +556,7 @@ create database mysqltest;
create table t1 (c1 int);
alter table t1 rename mysqltest.t1;
drop table t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
alter table mysqltest.t1 rename t1;
drop table t1;
create table t1 (c1 int);
@@ -973,7 +973,7 @@ SHOW CREATE TABLE `tt+1`;
Table Create Table
tt+1 CREATE TEMPORARY TABLE `tt+1` (
`c1` int(11) DEFAULT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=latin1
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1
SHOW CREATE TABLE `tt+2`;
Table Create Table
tt+2 CREATE TEMPORARY TABLE `tt+2` (
@@ -1327,6 +1327,16 @@ CREATE DATABASE db1 CHARACTER SET utf8;
CREATE TABLE db1.t1 (bar TINYTEXT, KEY (bar(100)));
ALTER TABLE db1.t1 ADD baz INT;
DROP DATABASE db1;
+# Additional coverage for refactoring which is made as part
+# of fix for bug #27480 "Extend CREATE TEMPORARY TABLES privilege
+# to allow temp table operations".
+#
+# At some point the below test case failed on assertion.
+DROP TABLE IF EXISTS t1;
+CREATE TEMPORARY TABLE t1 (i int) ENGINE=MyISAM;
+ALTER TABLE t1 DISCARD TABLESPACE;
+ERROR HY000: Storage engine MyISAM of the table `test`.`t1` doesn't have this option
+DROP TABLE t1;
#
# Bug#11938039 RE-EXECUTION OF FRM-ONLY ALTER TABLE WITH RENAME
# CLAUSE FAILS OR ABORTS SERVER.
@@ -1380,3 +1390,480 @@ t1 CREATE TABLE `t1` (
KEY `x_param1` (`x_param`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE t1;
+#
+# Bug#11938817 ALTER BEHAVIOR DIFFERENT THEN DOCUMENTED
+#
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(a INT) engine=innodb;
+INSERT INTO t1 VALUES (1), (2);
+# This should not do anything
+ALTER TABLE t1;
+affected rows: 0
+# Check that we rebuild the table
+ALTER TABLE t1 engine=innodb;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+# This should also rebuild the table
+ALTER TABLE t1 FORCE;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+DROP TABLE t1;
+# Bug#11748057 (formerly known as 34972): ALTER TABLE statement doesn't
+# identify correct column name.
+#
+CREATE TABLE t1 (c1 int unsigned , c2 char(100) not null default '');
+ALTER TABLE t1 ADD c3 char(16) NOT NULL DEFAULT '' AFTER c2,
+MODIFY c2 char(100) NOT NULL DEFAULT '' AFTER c1;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` int(10) unsigned DEFAULT NULL,
+ `c2` char(100) NOT NULL DEFAULT '',
+ `c3` char(16) NOT NULL DEFAULT ''
+) ENGINE=MyISAM DEFAULT CHARSET=latin1
+DROP TABLE t1;
+#
+# WL#5534 Online ALTER, Phase 1
+#
+# Single thread tests.
+# See innodb_mysql_sync.test for multi thread tests.
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(a INT PRIMARY KEY, b INT) engine=InnoDB;
+CREATE TABLE m1(a INT PRIMARY KEY, b INT) engine=MyISAM;
+INSERT INTO t1 VALUES (1,1), (2,2);
+INSERT INTO m1 VALUES (1,1), (2,2);
+#
+# 1: Test ALGORITHM keyword
+#
+# --enable_info allows us to see how many rows were updated
+# by ALTER TABLE. in-place will show 0 rows, while copy > 0.
+ALTER TABLE t1 ADD INDEX i1(b);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= INVALID;
+ERROR HY000: Unknown ALGORITHM 'INVALID'
+ALTER TABLE m1 ENABLE KEYS;
+affected rows: 0
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= DEFAULT;
+affected rows: 0
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE;
+affected rows: 0
+ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4;
+#
+# 2: Test ALGORITHM + old_alter_table
+#
+SET SESSION old_alter_table= 1;
+affected rows: 0
+ALTER TABLE t1 ADD INDEX i1(b);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+SET SESSION old_alter_table= 0;
+affected rows: 0
+ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4;
+#
+# 3: Test unsupported in-place operation
+#
+ALTER TABLE t1 ADD COLUMN (c1 INT);
+ALTER TABLE t1 ADD COLUMN (c2 INT), ALGORITHM= DEFAULT;
+ALTER TABLE t1 ADD COLUMN (c3 INT), ALGORITHM= COPY;
+ALTER TABLE t1 ADD COLUMN (c4 INT), ALGORITHM= INPLACE;
+ALTER TABLE t1 DROP COLUMN c1, DROP COLUMN c2, DROP COLUMN c3, DROP COLUMN c4;
+#
+# 4: Test LOCK keyword
+#
+ALTER TABLE t1 ADD INDEX i1(b), LOCK= DEFAULT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i2(b), LOCK= NONE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i3(b), LOCK= SHARED;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i4(b), LOCK= EXCLUSIVE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i5(b), LOCK= INVALID;
+ERROR HY000: Unknown LOCK type 'INVALID'
+ALTER TABLE m1 ENABLE KEYS, LOCK= DEFAULT;
+ALTER TABLE m1 ENABLE KEYS, LOCK= NONE;
+ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
+ALTER TABLE m1 ENABLE KEYS, LOCK= SHARED;
+ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
+ALTER TABLE m1 ENABLE KEYS, LOCK= EXCLUSIVE;
+ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4;
+#
+# 5: Test ALGORITHM + LOCK
+#
+ALTER TABLE t1 ADD INDEX i1(b), ALGORITHM= INPLACE, LOCK= NONE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= INPLACE, LOCK= SHARED;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= INPLACE, LOCK= EXCLUSIVE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= COPY, LOCK= NONE;
+ERROR 0A000: LOCK=NONE is not supported. Reason: COPY algorithm requires a lock. Try LOCK=SHARED.
+ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= COPY, LOCK= SHARED;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE t1 ADD INDEX i6(b), ALGORITHM= COPY, LOCK= EXCLUSIVE;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= NONE;
+ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= SHARED;
+ERROR 0A000: LOCK=NONE/SHARED is not supported for this operation. Try LOCK=EXCLUSIVE.
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= EXCLUSIVE;
+affected rows: 0
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= NONE;
+ERROR 0A000: LOCK=NONE is not supported. Reason: COPY algorithm requires a lock. Try LOCK=SHARED.
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= SHARED;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= EXCLUSIVE;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+DROP TABLE t1, m1;
+#
+# 6: Possible deadlock involving thr_lock.c
+#
+CREATE TABLE t1(a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2);
+START TRANSACTION;
+INSERT INTO t1 VALUES (3,3);
+# Connection con1
+# Sending:
+ALTER TABLE t1 DISABLE KEYS;
+# Connection default
+# Waiting until ALTER TABLE is blocked.
+UPDATE t1 SET b = 4;
+COMMIT;
+# Connection con1
+# Reaping: ALTER TABLE t1 DISABLE KEYS
+# Connection default
+DROP TABLE t1;
+#
+# 7: Which operations require copy and which can be done in-place?
+#
+# Test which ALTER TABLE operations are done in-place and
+# which operations are done using temporary table copy.
+#
+# --enable_info allows us to see how many rows were updated
+# by ALTER TABLE. in-place will show 0 rows, while copy > 0.
+#
+DROP TABLE IF EXISTS ti1, ti2, ti3, tm1, tm2, tm3;
+# Single operation tests
+CREATE TABLE ti1(a INT NOT NULL, b INT, c INT) engine=InnoDB;
+CREATE TABLE tm1(a INT NOT NULL, b INT, c INT) engine=MyISAM;
+CREATE TABLE ti2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=InnoDB;
+CREATE TABLE tm2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=MyISAM;
+INSERT INTO ti1 VALUES (1,1,1), (2,2,2);
+INSERT INTO ti2 VALUES (1,1,1), (2,2,2);
+INSERT INTO tm1 VALUES (1,1,1), (2,2,2);
+INSERT INTO tm2 VALUES (1,1,1), (2,2,2);
+ALTER TABLE ti1;
+affected rows: 0
+ALTER TABLE tm1;
+affected rows: 0
+ALTER TABLE ti1 ADD COLUMN d VARCHAR(200);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD COLUMN d VARCHAR(200);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD COLUMN d2 VARCHAR(200);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD COLUMN d2 VARCHAR(200);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD COLUMN e ENUM('a', 'b') FIRST;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD COLUMN e ENUM('a', 'b') FIRST;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD COLUMN f INT AFTER a;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD COLUMN f INT AFTER a;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD INDEX ii1(b);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD INDEX im1(b);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD UNIQUE INDEX ii2 (c);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD UNIQUE INDEX im2 (c);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD FULLTEXT INDEX ii3 (d);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 1
+Warnings:
+Warning 124 InnoDB rebuilding table to add column FTS_DOC_ID
+ALTER TABLE tm1 ADD FULLTEXT INDEX im3 (d);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD FULLTEXT INDEX ii4 (d2);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD FULLTEXT INDEX im4 (d2);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD PRIMARY KEY(a), ALGORITHM=INPLACE;
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: InnoDB presently supports one FULLTEXT index creation at a time. Try ALGORITHM=COPY.
+ALTER TABLE ti1 ADD PRIMARY KEY(a);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD PRIMARY KEY(a);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 DROP INDEX ii3;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 DROP INDEX im3;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 DROP COLUMN d2;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 DROP COLUMN d2;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ADD CONSTRAINT fi1 FOREIGN KEY (b) REFERENCES ti2(a);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ADD CONSTRAINT fm1 FOREIGN KEY (b) REFERENCES tm2(a);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ALTER COLUMN b SET DEFAULT 1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ALTER COLUMN b SET DEFAULT 1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 ALTER COLUMN b DROP DEFAULT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ALTER COLUMN b DROP DEFAULT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 CHANGE COLUMN f g INT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 CHANGE COLUMN f g INT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 CHANGE COLUMN g h VARCHAR(20);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 CHANGE COLUMN g h VARCHAR(20);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MODIFY COLUMN e ENUM('a', 'b', 'c');
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MODIFY COLUMN e ENUM('a', 'b', 'c');
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MODIFY COLUMN e INT;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MODIFY COLUMN e INT;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MODIFY COLUMN e INT AFTER h;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MODIFY COLUMN e INT AFTER h;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MODIFY COLUMN e INT FIRST;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MODIFY COLUMN e INT FIRST;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+SET @orig_sql_mode = @@sql_mode;
+SET @@sql_mode = 'STRICT_TRANS_TABLES';
+ALTER TABLE ti1 MODIFY COLUMN c INT NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+SET @@sql_mode = @orig_sql_mode;
+ALTER TABLE tm1 MODIFY COLUMN c INT NOT NULL;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MODIFY COLUMN c INT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MODIFY COLUMN c INT NULL;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30);
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30) AFTER d;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30) AFTER d;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 DROP COLUMN h;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 DROP COLUMN h;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 DROP INDEX ii2;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 DROP INDEX im2;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 DROP PRIMARY KEY;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 DROP PRIMARY KEY;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 DROP FOREIGN KEY fi1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 DROP FOREIGN KEY fm1;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 RENAME TO ti3;
+affected rows: 0
+ALTER TABLE tm1 RENAME TO tm3;
+affected rows: 0
+ALTER TABLE ti3 RENAME TO ti1;
+affected rows: 0
+ALTER TABLE tm3 RENAME TO tm1;
+affected rows: 0
+ALTER TABLE ti1 ORDER BY b;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 ORDER BY b;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 CONVERT TO CHARACTER SET utf16;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 CONVERT TO CHARACTER SET utf16;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 DEFAULT CHARACTER SET utf8;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 DEFAULT CHARACTER SET utf8;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 FORCE;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 FORCE;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 AUTO_INCREMENT 3;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 AUTO_INCREMENT 3;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 AVG_ROW_LENGTH 10;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 AVG_ROW_LENGTH 10;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 CHECKSUM 1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 CHECKSUM 1;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 COMMENT 'test';
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 COMMENT 'test';
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MAX_ROWS 100;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MAX_ROWS 100;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 MIN_ROWS 1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 MIN_ROWS 1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti1 PACK_KEYS 1;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE tm1 PACK_KEYS 1;
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
+DROP TABLE ti1, ti2, tm1, tm2;
+# Tests of >1 operation (InnoDB)
+CREATE TABLE ti1(a INT PRIMARY KEY AUTO_INCREMENT, b INT) engine=InnoDB;
+INSERT INTO ti1(b) VALUES (1), (2);
+ALTER TABLE ti1 RENAME TO ti3, ADD INDEX ii1(b);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE ti3 DROP INDEX ii1, AUTO_INCREMENT 5;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+INSERT INTO ti3(b) VALUES (5);
+ALTER TABLE ti3 ADD INDEX ii1(b), AUTO_INCREMENT 7;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+INSERT INTO ti3(b) VALUES (7);
+SELECT * FROM ti3;
+a b
+1 1
+2 2
+5 5
+7 7
+DROP TABLE ti3;
+#
+# 8: Scenario in which ALTER TABLE was returning an unwarranted
+# ER_ILLEGAL_HA error at some point during work on this WL.
+#
+CREATE TABLE tm1(i INT DEFAULT 1) engine=MyISAM;
+ALTER TABLE tm1 ADD INDEX ii1(i), ALTER COLUMN i DROP DEFAULT;
+DROP TABLE tm1;
diff --git a/mysql-test/r/alter_table_online.result b/mysql-test/r/alter_table_online.result
index 83e82191541..1e7bc5e83cd 100644
--- a/mysql-test/r/alter_table_online.result
+++ b/mysql-test/r/alter_table_online.result
@@ -11,61 +11,59 @@ drop table t1;
create temporary table t1 (a int not null primary key, b int, c varchar(80), e enum('a','b'));
insert into t1 (a) values (1),(2),(3);
alter online table t1 modify b int default 5;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 change b new_name int;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify e enum('a','b','c');
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 comment "new comment";
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 rename to t2;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
drop table t1;
create table t1 (a int not null primary key, b int, c varchar(80), e enum('a','b'));
insert into t1 (a) values (1),(2),(3);
alter online table t1 drop column b, add b int;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify b bigint;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify e enum('c','a','b');
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify c varchar(50);
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify c varchar(100);
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 add f int;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 engine=memory;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter table t1 engine=innodb;
alter table t1 add index (b);
alter online table t1 add index c (c);
-ERROR HY000: Can't execute the given 'ALTER' command as online
alter online table t1 drop index b;
-ERROR HY000: Can't execute the given 'ALTER' command as online
drop table t1;
create temporary table t1 (a int not null primary key, b int, c varchar(80), e enum('a','b'));
insert into t1 (a) values (1),(2),(3);
alter online table t1 drop column b, add b int;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify b bigint;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify e enum('c','a','b');
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify c varchar(50);
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 modify c varchar(100);
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 add f int;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 engine=memory;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter table t1 engine=innodb;
alter table t1 add index (b);
alter online table t1 add index c (c);
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
alter online table t1 drop index b;
-ERROR HY000: Can't execute the given 'ALTER' command as online
+ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY.
drop table t1;
create table t1 (a int not null primary key, b int, c varchar(80));
create table t2 (a int not null primary key, b int, c varchar(80));
diff --git a/mysql-test/r/bootstrap.result b/mysql-test/r/bootstrap.result
index 8bef6f90ab4..2e2082441f8 100644
--- a/mysql-test/r/bootstrap.result
+++ b/mysql-test/r/bootstrap.result
@@ -1,7 +1,7 @@
drop table if exists t1;
drop table t1;
drop table t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
set @my_max_allowed_packet= @@max_allowed_packet;
set global max_allowed_packet=100*@@max_allowed_packet;
set global max_allowed_packet=@my_max_allowed_packet;
diff --git a/mysql-test/r/cast.result b/mysql-test/r/cast.result
index 03b4b84e461..3b57b4833a9 100644
--- a/mysql-test/r/cast.result
+++ b/mysql-test/r/cast.result
@@ -768,13 +768,19 @@ CAST(CAST('20:05:05' AS TIME) as date)
set sql_mode= TRADITIONAL;
select cast("2101-00-01 02:03:04" as datetime);
cast("2101-00-01 02:03:04" as datetime)
-2101-00-01 02:03:04
+NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '2101-00-01 02:03:04'
select cast(cast("2101-00-01 02:03:04" as datetime) as time);
cast(cast("2101-00-01 02:03:04" as datetime) as time)
-02:03:04
+NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '2101-00-01 02:03:04'
SELECT CAST(CAST('20:05:05' AS TIME) as date);
CAST(CAST('20:05:05' AS TIME) as date)
-0000-00-00
+NULL
+Warnings:
+Warning 1292 Truncated incorrect date value: '0000-00-00'
set sql_mode=DEFAULT;
create table t1 (f1 time, f2 date, f3 datetime);
insert into t1 values ('11:22:33','2011-12-13','2011-12-13 11:22:33');
diff --git a/mysql-test/r/commit_1innodb.result b/mysql-test/r/commit_1innodb.result
index af198edc4ca..3583e8ed396 100644
--- a/mysql-test/r/commit_1innodb.result
+++ b/mysql-test/r/commit_1innodb.result
@@ -1,6 +1,6 @@
call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT");
set sql_mode=no_engine_substitution;
-set storage_engine = InnoDB;
+set default_storage_engine = InnoDB;
set autocommit=1;
drop table if exists t1;
drop table if exists t2;
@@ -842,7 +842,7 @@ call p_verify_status_increment(2, 0, 2, 0);
SUCCESS
alter table t3 rename t4;
-call p_verify_status_increment(2, 0, 2, 0);
+call p_verify_status_increment(0, 0, 0, 0);
SUCCESS
rename table t4 to t3;
diff --git a/mysql-test/r/connect.result b/mysql-test/r/connect.result
index 3fcc5b5d2c1..84a1e9dbab2 100644
--- a/mysql-test/r/connect.result
+++ b/mysql-test/r/connect.result
@@ -16,15 +16,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
@@ -59,15 +55,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
@@ -110,15 +102,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result
index d0d953f4e38..fff8733cfdf 100644
--- a/mysql-test/r/create.result
+++ b/mysql-test/r/create.result
@@ -28,15 +28,15 @@ create table t2 select auto+1 from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
create table t1 (b char(0) not null, index(b));
ERROR 42000: The storage engine MyISAM can't index column `b`
create table t1 (a int not null,b text) engine=heap;
ERROR 42000: Storage engine MEMORY doesn't support BLOB/TEXT columns
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap;
ERROR 42000: Incorrect table definition; there can be only one auto column and it must be defined as a key
create table not_existing_database.test (a int);
@@ -157,17 +157,17 @@ create table t2 (a int, a float) select * from t1;
ERROR 42S21: Duplicate column name 'a'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
create table t2 (a int) select a as b, a+1 as b from t1;
ERROR 42S21: Duplicate column name 'b'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
create table t2 (b int) select a as b, a+1 as b from t1;
ERROR 42S21: Duplicate column name 'b'
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t1 (a int not null);
INSERT INTO t1 values (1),(2),(1);
CREATE TABLE t2 (primary key(a)) SELECT * FROM t1;
@@ -177,7 +177,7 @@ ERROR 42S02: Table 'test.t2' doesn't exist
DROP TABLE t1;
DROP TABLE IF EXISTS t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
create table t1 (a int not null, b int, primary key(a), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b), key (b));
show create table t1;
Table Create Table
@@ -1610,12 +1610,12 @@ CREATE TABLE t2 (primary key (a)) select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t2 (a int, b int, primary key (a));
INSERT INTO t2 select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
diff --git a/mysql-test/r/ctype_errors.result b/mysql-test/r/ctype_errors.result
index 90d0c28eebf..5ae8c53ce8b 100644
--- a/mysql-test/r/ctype_errors.result
+++ b/mysql-test/r/ctype_errors.result
@@ -24,11 +24,11 @@ lc_messages ru_RU
SET GLOBAL lc_messages=en_US;
DROP TABLE t1;
drop table `ק`;
-ERROR 42S02: Unknown table 'ק'
+ERROR 42S02: Unknown table 'test.ק'
SET lc_messages=cs_CZ;
SET NAMES UTF8;
USE nonexistant;
-ERROR 42000: Nezn-Bámá databáze 'nonexistant'
+ERROR 42000: Neznámá databáze 'nonexistant'
#
# Bug#12736295: Buffer overflow for variable converted_err
# with non-latin1 server error message
diff --git a/mysql-test/r/ctype_tis620.result b/mysql-test/r/ctype_tis620.result
index 5699c044d70..c86b8392b32 100644
--- a/mysql-test/r/ctype_tis620.result
+++ b/mysql-test/r/ctype_tis620.result
@@ -138,7 +138,7 @@ year
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1
(
name varchar(50) NOT NULL default '',
diff --git a/mysql-test/r/ctype_ujis.result b/mysql-test/r/ctype_ujis.result
index b801a7f45a4..3db6aee37cc 100644
--- a/mysql-test/r/ctype_ujis.result
+++ b/mysql-test/r/ctype_ujis.result
@@ -94,7 +94,7 @@ select @ujis4 = CONVERT(@utf84 USING ujis);
1
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (c1 varchar(8)) default character set 'ujis';
insert into t1 values (0xA4A2),(0xA2A2),(0xA4A2);
select c1 as 'no index' from t1 where c1 like cast(concat(0xA4A2, '%') as char character set ujis);
@@ -168,7 +168,7 @@ a b
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(c char(1)) character set ujis;
INSERT INTO t1 VALUES(0xA2AF);
INSERT INTO t1 VALUES(0xA2B0);
diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result
index d25c454913d..714b4183594 100644
--- a/mysql-test/r/ctype_utf8.result
+++ b/mysql-test/r/ctype_utf8.result
@@ -1229,7 +1229,7 @@ DROP TABLE t1;
SET NAMES utf8;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8;
INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
INSERT INTO t1 VALUES('uu');
diff --git a/mysql-test/r/ctype_utf8mb4.result b/mysql-test/r/ctype_utf8mb4.result
index d8642955b89..f4be208e0f7 100644
--- a/mysql-test/r/ctype_utf8mb4.result
+++ b/mysql-test/r/ctype_utf8mb4.result
@@ -1256,7 +1256,7 @@ DROP TABLE t1;
SET NAMES utf8mb4;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
Warnings:
Warning 1071 Specified key was too long; max key length is 1000 bytes
@@ -2452,7 +2452,6 @@ MODIFY subject varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci,
MODIFY p varchar(255) CHARACTER SET utf8;
Warnings:
Warning 1071 Specified key was too long; max key length is 1000 bytes
-Warning 1071 Specified key was too long; max key length is 1000 bytes
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2539,8 +2538,7 @@ t2 CREATE TABLE `t2` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE t1, t2;
#
-# Bug#13581962 HIGH MEMORY USAGE ATTEMPT, THEN CRASH WITH
-# LONGTEXT, UNION, USER VARIABLE
+# Bug#13581962 HIGH MEMORY USAGE ATTEMPT, THEN CRASH WITH LONGTEXT, UNION, USER VARIABLE
# Bug#14096619 UNABLE TO RESTORE DATABASE DUMP
#
CREATE TABLE t1(f1 LONGTEXT CHARACTER SET utf8mb4);
diff --git a/mysql-test/r/ctype_utf8mb4_heap.result b/mysql-test/r/ctype_utf8mb4_heap.result
index 63de75b37b7..94ea59c1a0c 100644
--- a/mysql-test/r/ctype_utf8mb4_heap.result
+++ b/mysql-test/r/ctype_utf8mb4_heap.result
@@ -1160,7 +1160,7 @@ DROP TABLE t1;
SET NAMES utf8mb4;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=heap DEFAULT CHARSET=utf8mb4;
INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
INSERT INTO t1 VALUES('uu');
diff --git a/mysql-test/r/ctype_utf8mb4_innodb.result b/mysql-test/r/ctype_utf8mb4_innodb.result
index 2db7066d478..b0e5bcef176 100644
--- a/mysql-test/r/ctype_utf8mb4_innodb.result
+++ b/mysql-test/r/ctype_utf8mb4_innodb.result
@@ -1231,7 +1231,7 @@ DROP TABLE t1;
SET NAMES utf8mb4;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
Warnings:
Warning 1071 Specified key was too long; max key length is 767 bytes
diff --git a/mysql-test/r/ctype_utf8mb4_myisam.result b/mysql-test/r/ctype_utf8mb4_myisam.result
index b82e5687eda..6f5d79ff6df 100644
--- a/mysql-test/r/ctype_utf8mb4_myisam.result
+++ b/mysql-test/r/ctype_utf8mb4_myisam.result
@@ -1231,7 +1231,7 @@ DROP TABLE t1;
SET NAMES utf8mb4;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4;
Warnings:
Warning 1071 Specified key was too long; max key length is 1000 bytes
diff --git a/mysql-test/r/drop.result b/mysql-test/r/drop.result
index 55309e54fb3..209eb896978 100644
--- a/mysql-test/r/drop.result
+++ b/mysql-test/r/drop.result
@@ -2,7 +2,7 @@ drop table if exists t1;
drop database if exists mysqltest;
drop database if exists client_test_db;
drop table t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
create table t1(n int);
insert into t1 values(1);
create temporary table t1( n int);
@@ -30,13 +30,13 @@ table7, table8, table9, table10, table11, table12, table13,
table14, table15, table16, table17, table18, table19, table20,
table21, table22, table23, table24, table25, table26, table27,
table28;
-ERROR 42S02: Unknown table 'table1,table2,table3,table4,table5,table6,table7,table8,table9,table10,table11,table12,table13,table'
+ERROR 42S02: Unknown table 'mysqltest.table1,mysqltest.table2,mysqltest.table3,mysqltest.table4,mysqltest.table5,mysqltest.table'
drop table table1, table2, table3, table4, table5, table6,
table7, table8, table9, table10, table11, table12, table13,
table14, table15, table16, table17, table18, table19, table20,
table21, table22, table23, table24, table25, table26, table27,
table28, table29, table30;
-ERROR 42S02: Unknown table 'table1,table2,table3,table4,table5,table6,table7,table8,table9,table10,table11,table12,table13,table'
+ERROR 42S02: Unknown table 'mysqltest.table1,mysqltest.table2,mysqltest.table3,mysqltest.table4,mysqltest.table5,mysqltest.table'
use test;
drop database mysqltest;
flush tables with read lock;
@@ -154,10 +154,10 @@ End of 5.1 tests
# --
DROP TABLE IF EXISTS t1;
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW WARNINGS;
Level Code Message
-Error 1051 Unknown table 't1'
+Error 1051 Unknown table 'test.t1'
# --
# -- End of Bug#37431.
diff --git a/mysql-test/r/dyncol.result b/mysql-test/r/dyncol.result
index 172179bd7f4..c5040728af7 100644
--- a/mysql-test/r/dyncol.result
+++ b/mysql-test/r/dyncol.result
@@ -1036,9 +1036,7 @@ Warnings:
Warning 1292 Truncated incorrect time value: '2011-13-01 8:46:06.23434'
select column_get(column_create(1, "2011-02-30 8:46:06.23434" AS CHAR), 1 as time);
column_get(column_create(1, "2011-02-30 8:46:06.23434" AS CHAR), 1 as time)
-NULL
-Warnings:
-Warning 1292 Truncated incorrect time value: '2011-02-30 8:46:06.23434'
+08:46:06
select column_get(column_create(1, "2001-02-03"), 1 as time);
column_get(column_create(1, "2001-02-03"), 1 as time)
00:20:01
diff --git a/mysql-test/r/error_simulation.result b/mysql-test/r/error_simulation.result
index d2f5a24ef1d..88a9d114bc6 100644
--- a/mysql-test/r/error_simulation.result
+++ b/mysql-test/r/error_simulation.result
@@ -1,6 +1,6 @@
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (
a varchar(32) character set utf8 collate utf8_bin NOT NULL,
b varchar(32) character set utf8 collate utf8_bin NOT NULL )
diff --git a/mysql-test/r/events_restart.result b/mysql-test/r/events_restart.result
index 6a751fa29f8..ba3aa503b63 100644
--- a/mysql-test/r/events_restart.result
+++ b/mysql-test/r/events_restart.result
@@ -65,3 +65,26 @@ select @@event_scheduler;
ON
drop table execution_log;
drop database events_test;
+#
+# Test for bug#11748899 -- EVENT SET TO DISABLED AND ON COMPLETION
+# NOT PRESERVE IS DELETED AT SERVER
+#
+SELECT @@event_scheduler;
+@@event_scheduler
+ON
+USE test;
+DROP EVENT IF EXISTS e1;
+CREATE EVENT e1 ON SCHEDULE EVERY 1 SECOND DISABLE DO SELECT 1;
+SHOW EVENTS;
+Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
+test e1 root@localhost SYSTEM RECURRING # 1 SECOND # # DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
+"Now we restart the server"
+USE test;
+SELECT @@event_scheduler;
+@@event_scheduler
+ON
+SHOW EVENTS;
+Db Name Definer Time zone Type Execute at Interval value Interval field Starts Ends Status Originator character_set_client collation_connection Database Collation
+test e1 root@localhost SYSTEM RECURRING # 1 SECOND # # DISABLED 1 latin1 latin1_swedish_ci latin1_swedish_ci
+DROP EVENT e1;
+# end test for bug#11748899
diff --git a/mysql-test/r/flush_read_lock.result b/mysql-test/r/flush_read_lock.result
index 05fab64330d..c2e8531d01f 100644
--- a/mysql-test/r/flush_read_lock.result
+++ b/mysql-test/r/flush_read_lock.result
@@ -544,11 +544,10 @@ Success: Was not able to run 'drop table t2_base' under FTWRL.
Success: 'drop table t2_base' is blocked by FTWRL active in another connection.
Success: FTWRL is blocked when 'drop table t2_base' is active in another connection.
# 13.1.b) DROP TABLES which affects only temporary tables
-# in theory can be compatible with FTWRL.
-# In practice it is not yet.
-Success: Was not able to run 'drop table t2_temp' under FTWRL.
-Success: 'drop table t2_temp' is blocked by FTWRL active in another connection.
-Success: FTWRL is blocked when 'drop table t2_temp' is active in another connection.
+# is compatible with FTWRL.
+Success: Was able to run 'drop table t2_temp' under FTWRL.
+Success: Was able to run 'drop table t2_temp' with FTWRL active in another connection.
+Success: Was able to run FTWRL while 'drop table t2_temp' was active in another connection.
#
# 13.1.c) DROP TEMPORARY TABLES should be compatible with FTWRL.
Success: Was able to run 'drop temporary table t2_temp' under FTWRL.
@@ -1461,24 +1460,10 @@ Success: Was able to run 'analyze table t3_temp_trans' under FTWRL.
Success: Was able to run 'analyze table t3_temp_trans' with FTWRL active in another connection.
Success: Was able to run FTWRL while 'analyze table t3_temp_trans' was active in another connection.
#
-# 39.2.c) Some statements do implicit commit and not
-# considered read-only. As result they are
-# not compatible with FTWRL.
-#
-flush tables with read lock;
-# Implicit commits are allowed under FTWRL.
-alter table t3_temp_trans add column c1 int;
-unlock tables;
-#
-# Switching to connection 'con1'.
-flush tables with read lock;
-# Switching to connection 'default'.
-alter table t3_temp_trans drop column c1;
-# Switching to connection 'con1'.
-# Check that ALTER TABLE is blocked.
-unlock tables;
-# Switching to connection 'default'.
-# Reap ALTER TABLE
+# And ALTER TABLE:
+Success: Was able to run 'alter table t3_temp_trans add column c1 int' under FTWRL.
+Success: Was able to run 'alter table t3_temp_trans add column c1 int' with FTWRL active in another connection.
+Success: Was able to run FTWRL while 'alter table t3_temp_trans add column c1 int' was active in another connection.
#
# 40) Test effect of implicit commit for DDL which is otherwise
# compatible with FTWRL. Implicit commit at the start of DDL
diff --git a/mysql-test/r/func_analyse.result b/mysql-test/r/func_analyse.result
index f82439090f6..2c300559a32 100644
--- a/mysql-test/r/func_analyse.result
+++ b/mysql-test/r/func_analyse.result
@@ -128,7 +128,7 @@ End of 5.0 tests
#
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (a VARCHAR(2) CHARSET UTF8 NOT NULL);
INSERT INTO t1 VALUES ('e'),('e'),('e-');
SELECT * FROM t1 PROCEDURE ANALYSE();
diff --git a/mysql-test/r/func_crypt.result b/mysql-test/r/func_crypt.result
index c2f369b3941..1eda56ac114 100644
--- a/mysql-test/r/func_crypt.result
+++ b/mysql-test/r/func_crypt.result
@@ -43,7 +43,7 @@ old_password(NULL)
NULL
select password(NULL);
password(NULL)
-NULL
+
set global old_passwords=on;
select password('');
password('')
diff --git a/mysql-test/r/func_rollback.result b/mysql-test/r/func_rollback.result
index 57968910051..91151302a06 100644
--- a/mysql-test/r/func_rollback.result
+++ b/mysql-test/r/func_rollback.result
@@ -190,8 +190,6 @@ END;
SELECT f1_insert_select(2);
f1_insert_select(2)
1
-Warnings:
-Warning 1048 Column 'f2' cannot be null
SELECT * FROM t1_not_null ORDER BY f1,f2;
f1 f2
2 0
@@ -267,8 +265,6 @@ END;
SELECT f1_insert_with_two_rows();
f1_insert_with_two_rows()
1
-Warnings:
-Warning 1048 Column 'f2' cannot be null
SELECT * FROM t1_not_null ORDER BY f1,f2;
f1 f2
10 0
diff --git a/mysql-test/r/func_sapdb.result b/mysql-test/r/func_sapdb.result
index 72c7a5a128f..ace7283e192 100644
--- a/mysql-test/r/func_sapdb.result
+++ b/mysql-test/r/func_sapdb.result
@@ -62,7 +62,9 @@ datediff("1997-11-30 23:59:59.000001","1997-12-31")
SET @@SQL_MODE="ALLOW_INVALID_DATES";
select datediff("1997-11-31 23:59:59.000001","1997-12-31");
datediff("1997-11-31 23:59:59.000001","1997-12-31")
--30
+NULL
+Warnings:
+Warning 1292 Incorrect datetime value: '1997-11-31 23:59:59.000001'
SET @@SQL_MODE="";
select datediff("1997-11-31 23:59:59.000001","1997-12-31");
datediff("1997-11-31 23:59:59.000001","1997-12-31")
diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result
index 22c30479125..c400ebb39ef 100644
--- a/mysql-test/r/gis-rtree.result
+++ b/mysql-test/r/gis-rtree.result
@@ -712,7 +712,7 @@ count(*)
DROP TABLE t2;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (a geometry NOT NULL, SPATIAL (a));
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
diff --git a/mysql-test/r/grant.result b/mysql-test/r/grant.result
index f1cf94e4f19..be05f17c281 100644
--- a/mysql-test/r/grant.result
+++ b/mysql-test/r/grant.result
@@ -55,6 +55,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' REQUIRE CIPHER 'EDH-RSA-DES-CBC3-SHA'
@@ -126,6 +127,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' WITH MAX_QUERIES_PER_HOUR 10
@@ -173,6 +175,7 @@ max_connections 30
max_user_connections 0
plugin
authentication_string
+password_expired N
show grants for mysqltest_1@localhost;
Grants for mysqltest_1@localhost
GRANT USAGE ON *.* TO 'mysqltest_1'@'localhost' WITH MAX_QUERIES_PER_HOUR 10 MAX_UPDATES_PER_HOUR 20 MAX_CONNECTIONS_PER_HOUR 30
@@ -1353,7 +1356,7 @@ FLUSH PRIVILEGES;
DROP TABLE mysql.user;
drop table if exists test;
Warnings:
-Note 1051 Unknown table 'test'
+Note 1051 Unknown table 'test.test'
drop function if exists test_function;
Warnings:
Note 1305 FUNCTION test.test_function does not exist
diff --git a/mysql-test/r/handlersocket.result b/mysql-test/r/handlersocket.result
index 765d954d3dc..e1fbc2d9840 100644
--- a/mysql-test/r/handlersocket.result
+++ b/mysql-test/r/handlersocket.result
@@ -5,7 +5,7 @@ plugin_version 1.0
plugin_status ACTIVE
plugin_type DAEMON
plugin_library handlersocket.so
-plugin_library_version 1.5
+plugin_library_version 1.7
plugin_author higuchi dot akira at dena dot jp
plugin_description Direct access into InnoDB
plugin_license BSD
diff --git a/mysql-test/r/innodb_mysql_sync.result b/mysql-test/r/innodb_mysql_sync.result
index 7c41ffec344..2164b936938 100644
--- a/mysql-test/r/innodb_mysql_sync.result
+++ b/mysql-test/r/innodb_mysql_sync.result
@@ -101,7 +101,7 @@ DROP TABLE IF EXISTS t1;
CREATE DATABASE db1;
CREATE TABLE db1.t1(id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, value INT) engine=innodb;
INSERT INTO db1.t1(value) VALUES (1), (2);
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
# Sending:
ALTER TABLE db1.t1 ADD INDEX(value);
# Connection con1
@@ -115,45 +115,47 @@ SET DEBUG_SYNC= "now SIGNAL query";
# Connection default
# Reaping: ALTER TABLE db1.t1 ADD INDEX(value)
DROP DATABASE db1;
-# Test 2: Primary index (implicit), should block reads.
+# Test 2: Primary index (implicit), should block writes.
CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
# Sending:
-ALTER TABLE t1 ADD UNIQUE INDEX(a);
+ALTER TABLE t1 ADD UNIQUE INDEX(a), LOCK=SHARED;
# Connection con1
SET DEBUG_SYNC= "now WAIT_FOR manage";
USE test;
-# Sending:
SELECT * FROM t1;
+a b
+# Sending:
+UPDATE t1 SET a=NULL;
# Connection con2
# Waiting for SELECT to be blocked by the metadata lock on t1
SET DEBUG_SYNC= "now SIGNAL query";
# Connection default
# Reaping: ALTER TABLE t1 ADD UNIQUE INDEX(a)
# Connection con1
-# Reaping: SELECT * FROM t1
-a b
-# Test 3: Primary index (explicit), should block reads.
+# Reaping: UPDATE t1 SET a=NULL
+# Test 3: Primary index (explicit), should block writes.
# Connection default
ALTER TABLE t1 DROP INDEX a;
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
# Sending:
-ALTER TABLE t1 ADD PRIMARY KEY (a);
+ALTER TABLE t1 ADD PRIMARY KEY (a), LOCK=SHARED;
# Connection con1
SET DEBUG_SYNC= "now WAIT_FOR manage";
-# Sending:
SELECT * FROM t1;
+a b
+# Sending:
+UPDATE t1 SET a=NULL;
# Connection con2
# Waiting for SELECT to be blocked by the metadata lock on t1
SET DEBUG_SYNC= "now SIGNAL query";
# Connection default
# Reaping: ALTER TABLE t1 ADD PRIMARY KEY (a)
# Connection con1
-# Reaping: SELECT * FROM t1
-a b
+# Reaping: UPDATE t1 SET a=NULL
# Test 4: Secondary unique index, should not block reads.
# Connection default
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
# Sending:
ALTER TABLE t1 ADD UNIQUE (b);
# Connection con1
@@ -186,3 +188,170 @@ a b
1 12345
2 23456
DROP TABLE t1;
+#
+# Bug#13417754 ASSERT IN ROW_DROP_DATABASE_FOR_MYSQL DURING DROP SCHEMA
+#
+DROP TABLE IF EXISTS t1;
+DROP DATABASE IF EXISTS db1;
+CREATE TABLE t1(a int) engine=InnoDB;
+CREATE DATABASE db1;
+# Connection con1
+SET DEBUG_SYNC= 'after_innobase_rename_table SIGNAL locked WAIT_FOR continue';
+# Sending:
+ALTER TABLE t1 RENAME db1.t1;
+# Connection con2
+SET DEBUG_SYNC= 'now WAIT_FOR locked';
+# DROP DATABASE db1 should now be blocked by ALTER TABLE
+# Sending:
+DROP DATABASE db1;
+# Connection default
+# Check that DROP DATABASE is blocked by IX lock on db1
+# Resume ALTER TABLE
+SET DEBUG_SYNC= 'now SIGNAL continue';
+# Connection con1
+# Reaping: ALTER TABLE t1 RENAME db1.t1;
+# Connection con2
+# Reaping: DROP DATABASE db1
+# Connection default;
+SET DEBUG_SYNC= 'RESET';
+#
+# WL#5534 Online ALTER, Phase 1
+#
+# Multi thread tests.
+# See alter_table.test for single thread tests.
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(a INT PRIMARY KEY, b INT) engine=InnoDB;
+INSERT INTO t1 VALUES (1,1), (2,2);
+SET DEBUG_SYNC= 'RESET';
+SET SESSION lock_wait_timeout= 1;
+#
+# 1: In-place + writes blocked.
+#
+# Connection default
+SET DEBUG_SYNC= 'alter_opened_table SIGNAL opened WAIT_FOR continue1';
+SET DEBUG_SYNC= 'alter_table_inplace_after_lock_upgrade SIGNAL upgraded WAIT_FOR continue2';
+SET DEBUG_SYNC= 'alter_table_inplace_before_commit SIGNAL beforecommit WAIT_FOR continue3';
+SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL binlog WAIT_FOR continue4';
+# Sending:
+ALTER TABLE t1 ADD INDEX i1(b), ALGORITHM= INPLACE, LOCK= SHARED;
+# Connection con1;
+SET DEBUG_SYNC= 'now WAIT_FOR opened';
+# At this point, neither reads nor writes should be blocked.
+SELECT * FROM t1;
+a b
+1 1
+2 2
+INSERT INTO t1 VALUES (3,3);
+SET DEBUG_SYNC= 'now SIGNAL continue1';
+SET DEBUG_SYNC= 'now WAIT_FOR upgraded';
+# Now both reads and writes should be blocked
+SELECT * FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 VALUES (4,4);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue2';
+SET DEBUG_SYNC= 'now WAIT_FOR beforecommit';
+# Still both reads and writes should be blocked.
+SELECT * FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 VALUES (5,5);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue3';
+SET DEBUG_SYNC= 'now WAIT_FOR binlog';
+# Same here.
+SELECT * FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 VALUES (6,6);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue4';
+# Connection default
+# Reaping ALTER TABLE ...
+SET DEBUG_SYNC= 'RESET';
+DELETE FROM t1 WHERE a= 3;
+#
+# 2: Copy + writes blocked.
+#
+SET DEBUG_SYNC= 'alter_opened_table SIGNAL opened WAIT_FOR continue1';
+SET DEBUG_SYNC= 'alter_table_copy_after_lock_upgrade SIGNAL upgraded WAIT_FOR continue2';
+SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL binlog WAIT_FOR continue3';
+# Sending:
+ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= COPY, LOCK= SHARED;
+# Connection con1;
+SET DEBUG_SYNC= 'now WAIT_FOR opened';
+# At this point, neither reads nor writes should be blocked.
+SELECT * FROM t1;
+a b
+1 1
+2 2
+INSERT INTO t1 VALUES (3,3);
+SET DEBUG_SYNC= 'now SIGNAL continue1';
+SET DEBUG_SYNC= 'now WAIT_FOR upgraded';
+# Now writes should be blocked, reads still allowed.
+SELECT * FROM t1;
+a b
+1 1
+2 2
+3 3
+INSERT INTO t1 VALUES (4,4);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue2';
+SET DEBUG_SYNC= 'now WAIT_FOR binlog';
+# Now both reads and writes should be blocked.
+SELECT * FROM t1 limit 1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 VALUES (5,5);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue3';
+# Connection default
+# Reaping ALTER TABLE ...
+SET DEBUG_SYNC= 'RESET';
+DELETE FROM t1 WHERE a= 3;
+#
+# 3: In-place + writes allowed.
+#
+# TODO: Enable this test once WL#5526 is pushed
+#
+# 4: In-place + reads and writes blocked.
+#
+# Connection default
+SET DEBUG_SYNC= 'alter_opened_table SIGNAL opened WAIT_FOR continue1';
+SET DEBUG_SYNC= 'alter_table_inplace_after_lock_upgrade SIGNAL upgraded WAIT_FOR continue2';
+SET DEBUG_SYNC= 'alter_table_inplace_before_commit SIGNAL beforecommit WAIT_FOR continue3';
+SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL binlog WAIT_FOR continue4';
+# Sending:
+ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE, LOCK= EXCLUSIVE;
+# Connection con1;
+SET DEBUG_SYNC= 'now WAIT_FOR opened';
+# At this point, neither reads nor writes should be blocked.
+SELECT * FROM t1;
+a b
+1 1
+2 2
+INSERT INTO t1 VALUES (3,3);
+SET DEBUG_SYNC= 'now SIGNAL continue1';
+SET DEBUG_SYNC= 'now WAIT_FOR upgraded';
+# Now both reads and writes should be blocked.
+SELECT * FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 VALUES (4,4);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue2';
+SET DEBUG_SYNC= 'now WAIT_FOR beforecommit';
+# Same here.
+SELECT * FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 VALUES (5,5);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue3';
+SET DEBUG_SYNC= 'now WAIT_FOR binlog';
+# Same here.
+SELECT * FROM t1;
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+INSERT INTO t1 VALUES (6,6);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+SET DEBUG_SYNC= 'now SIGNAL continue4';
+# Connection default
+# Reaping ALTER TABLE ...
+SET DEBUG_SYNC= 'RESET';
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result
index e63afeab126..cb21e5e8134 100644
--- a/mysql-test/r/key.result
+++ b/mysql-test/r/key.result
@@ -273,7 +273,7 @@ t
drop table t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (
c1 int,
c2 varbinary(240),
diff --git a/mysql-test/r/log_slow.result b/mysql-test/r/log_slow.result
index 6500ba3ca53..4414a32d821 100644
--- a/mysql-test/r/log_slow.result
+++ b/mysql-test/r/log_slow.result
@@ -56,6 +56,7 @@ last_insert_id int(11) NO NULL
insert_id int(11) NO NULL
server_id int(10) unsigned NO NULL
sql_text mediumtext NO NULL
+thread_id bigint(21) unsigned NO NULL
flush slow logs;
set long_query_time=0.1;
set log_slow_filter='';
diff --git a/mysql-test/r/log_state.result b/mysql-test/r/log_state.result
index 3ccd1451bc4..1ce7eb0d2aa 100644
--- a/mysql-test/r/log_state.result
+++ b/mysql-test/r/log_state.result
@@ -45,7 +45,7 @@ select sleep(@long_query_time + 1);
sleep(@long_query_time + 1)
0
select * from mysql.slow_log where sql_text NOT LIKE '%slow_log%';
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
# Switch to connection default
set global slow_query_log= ON;
# Switch to connection con1
@@ -54,8 +54,8 @@ select sleep(@long_query_time + 1);
sleep(@long_query_time + 1)
0
select * from mysql.slow_log where sql_text NOT LIKE '%slow_log%';
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
-TIMESTAMP USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 select sleep(@long_query_time + 1)
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
+TIMESTAMP USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 select sleep(@long_query_time + 1) THREAD_ID
# Switch to connection default
show global variables
where Variable_name = 'log' or Variable_name = 'log_slow_queries' or
diff --git a/mysql-test/r/log_tables.result b/mysql-test/r/log_tables.result
index 18da8765d4b..10eb5ca5fcd 100644
--- a/mysql-test/r/log_tables.result
+++ b/mysql-test/r/log_tables.result
@@ -17,7 +17,7 @@ event_time user_host thread_id server_id command_type argument
TIMESTAMP USER_HOST THREAD_ID 1 Query select * from general_log
truncate table slow_log;
select * from slow_log;
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
truncate table general_log;
select * from general_log where argument like '%general_log%';
event_time user_host thread_id server_id command_type argument
@@ -55,7 +55,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -64,7 +64,7 @@ show fields from mysql.general_log;
Field Type Null Key Default Extra
event_time timestamp(6) NO CURRENT_TIMESTAMP(6) on update CURRENT_TIMESTAMP
user_host mediumtext NO NULL
-thread_id int(11) NO NULL
+thread_id bigint(21) unsigned NO NULL
server_id int(10) unsigned NO NULL
command_type varchar(64) NO NULL
argument mediumtext NO NULL
@@ -81,7 +81,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
show fields from mysql.slow_log;
Field Type Null Key Default Extra
@@ -96,6 +97,7 @@ last_insert_id int(11) NO NULL
insert_id int(11) NO NULL
server_id int(10) unsigned NO NULL
sql_text mediumtext NO NULL
+thread_id bigint(21) unsigned NO NULL
flush logs;
flush tables;
SET GLOBAL GENERAL_LOG=ON;
@@ -146,8 +148,8 @@ select sleep(2);
sleep(2)
0
select * from mysql.slow_log;
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
-TIMESTAMP USER_HOST QUERY_TIME 00:00:00.000000 1 0 mysql 0 0 1 select sleep(2)
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
+TIMESTAMP USER_HOST QUERY_TIME 00:00:00.000000 1 0 mysql 0 0 1 select sleep(2) THREAD_ID
set @@session.long_query_time = @saved_long_query_time;
alter table mysql.general_log engine=myisam;
ERROR HY000: You cannot 'ALTER' a log table if logging is enabled
@@ -166,7 +168,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -184,7 +186,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
alter table mysql.general_log engine=myisam;
alter table mysql.slow_log engine=myisam;
@@ -193,7 +196,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -211,7 +214,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Slow log'
set global general_log='ON';
set global slow_query_log='ON';
@@ -256,15 +260,15 @@ set storage_engine= @save_storage_engine;
drop table mysql.slow_log;
drop table mysql.general_log;
drop table mysql.general_log;
-ERROR 42S02: Unknown table 'general_log'
+ERROR 42S02: Unknown table 'mysql.general_log'
drop table mysql.slow_log;
-ERROR 42S02: Unknown table 'slow_log'
+ERROR 42S02: Unknown table 'mysql.slow_log'
use mysql;
CREATE TABLE `general_log` (
-`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP
+`event_time` TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP
ON UPDATE CURRENT_TIMESTAMP,
`user_host` mediumtext NOT NULL,
-`thread_id` int(11) NOT NULL,
+`thread_id` BIGINT(21) UNSIGNED NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -281,7 +285,8 @@ ON UPDATE CURRENT_TIMESTAMP,
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
-`sql_text` mediumtext NOT NULL
+`sql_text` mediumtext NOT NULL,
+`thread_id` BIGINT(21) UNSIGNED NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
set global general_log='ON';
set global slow_query_log='ON';
@@ -308,7 +313,7 @@ event_time user_host thread_id server_id command_type argument
TIMESTAMP USER_HOST THREAD_ID 1 Query select * from general_log
truncate table slow_log;
select * from slow_log;
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
create table general_log_new like general_log;
rename table general_log TO renamed_general_log, general_log_new TO general_log;
create table slow_log_new like slow_log;
@@ -329,9 +334,9 @@ TIMESTAMP USER_HOST THREAD_ID 1 Query select * from slow_log
TIMESTAMP USER_HOST THREAD_ID 1 Query create table general_log_new like general_log
TIMESTAMP USER_HOST THREAD_ID 1 Query rename table general_log TO renamed_general_log, general_log_new TO general_log
select * from slow_log;
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
select * from renamed_slow_log;
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id
set global general_log='OFF';
RENAME TABLE general_log TO general_log2;
set global slow_query_log='OFF';
@@ -362,8 +367,6 @@ show tables like "%log%";
Tables_in_mysql (%log%)
general_log
general_log_new
-ndb_binlog_index
-slave_relay_log_info
slow_log
slow_log_new
drop table slow_log_new, general_log_new;
@@ -426,10 +429,10 @@ SELECT "My own slow query", sleep(2);
My own slow query sleep(2)
My own slow query 0
SELECT * FROM mysql.slow_log WHERE seq >= 2 LIMIT 3;
-start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text seq
-START_TIME USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 SELECT "My own slow query", sleep(2) 2
-START_TIME USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 SELECT "My own slow query", sleep(2) 3
-START_TIME USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 SELECT "My own slow query", sleep(2) 4
+start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id seq
+START_TIME USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 SELECT "My own slow query", sleep(2) 2 2
+START_TIME USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 SELECT "My own slow query", sleep(2) 2 3
+START_TIME USER_HOST QUERY_TIME 00:00:00.000000 1 0 test 0 0 1 SELECT "My own slow query", sleep(2) 2 4
SET GLOBAL slow_query_log = 0;
SET SESSION long_query_time =@saved_long_query_time;
FLUSH LOGS;
@@ -548,6 +551,7 @@ BEGIN
DECLARE start_time, query_time, lock_time CHAR(28);
DECLARE user_host MEDIUMTEXT;
DECLARE rows_set, rows_examined, last_insert_id, insert_id, server_id INT;
+DECLARE thread_id BIGINT UNSIGNED;
DECLARE dbname MEDIUMTEXT;
DECLARE sql_text BLOB;
DECLARE done INT DEFAULT 0;
@@ -561,14 +565,14 @@ DECLARE CONTINUE HANDLER FOR ER_SP_FETCH_NO_DATA SET done = 1;
FETCH cur1 INTO
start_time, user_host, query_time, lock_time,
rows_set, rows_examined, dbname, last_insert_id,
-insert_id, server_id, sql_text;
+insert_id, server_id, sql_text, thread_id;
END;
IF NOT done THEN
BEGIN
INSERT INTO
`db_17876.slow_log_data`
VALUES(start_time, user_host, query_time, lock_time, rows_set, rows_examined,
-dbname, last_insert_id, insert_id, server_id, sql_text);
+dbname, last_insert_id, insert_id, server_id, sql_text, thread_id);
END;
END IF;
END;
diff --git a/mysql-test/r/log_tables_upgrade.result b/mysql-test/r/log_tables_upgrade.result
index 5732b94a90c..5a53ca03736 100644
--- a/mysql-test/r/log_tables_upgrade.result
+++ b/mysql-test/r/log_tables_upgrade.result
@@ -33,16 +33,12 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.renamed_general_log OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
diff --git a/mysql-test/r/lowercase_table4.result b/mysql-test/r/lowercase_table4.result
index aa81eff5194..02e2012a186 100644
--- a/mysql-test/r/lowercase_table4.result
+++ b/mysql-test/r/lowercase_table4.result
@@ -28,18 +28,7 @@ Create Table CREATE TABLE `Table2` (
KEY `fk1` (`c2`),
CONSTRAINT `fk1` FOREIGN KEY (`c2`) REFERENCES `Table1` (`c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
-SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
-CONSTRAINT_CATALOG def
-CONSTRAINT_SCHEMA mysql
-CONSTRAINT_NAME innodb_index_stats_ibfk_1
-UNIQUE_CONSTRAINT_CATALOG def
-UNIQUE_CONSTRAINT_SCHEMA mysql
-UNIQUE_CONSTRAINT_NAME PRIMARY
-MATCH_OPTION NONE
-UPDATE_RULE RESTRICT
-DELETE_RULE RESTRICT
-TABLE_NAME innodb_index_stats
-REFERENCED_TABLE_NAME innodb_table_stats
+SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS WHERE CONSTRAINT_SCHEMA='test';
CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME fk1
@@ -98,18 +87,7 @@ Create Table CREATE TABLE `Customer` (
`Id` int(11) NOT NULL,
PRIMARY KEY (`Id`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
-SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
-CONSTRAINT_CATALOG def
-CONSTRAINT_SCHEMA mysql
-CONSTRAINT_NAME innodb_index_stats_ibfk_1
-UNIQUE_CONSTRAINT_CATALOG def
-UNIQUE_CONSTRAINT_SCHEMA mysql
-UNIQUE_CONSTRAINT_NAME PRIMARY
-MATCH_OPTION NONE
-UPDATE_RULE RESTRICT
-DELETE_RULE RESTRICT
-TABLE_NAME innodb_index_stats
-REFERENCED_TABLE_NAME innodb_table_stats
+SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS WHERE CONSTRAINT_SCHEMA='test';
CONSTRAINT_CATALOG def
CONSTRAINT_SCHEMA test
CONSTRAINT_NAME product_order_ibfk_1
diff --git a/mysql-test/r/mdl_sync.result b/mysql-test/r/mdl_sync.result
index 1c94f867a54..d71498f90dd 100644
--- a/mysql-test/r/mdl_sync.result
+++ b/mysql-test/r/mdl_sync.result
@@ -5,7 +5,7 @@ create table t2 (i int);
connection: default
lock tables t2 read;
connection: con1
-set debug_sync='mdl_upgrade_shared_lock_to_exclusive SIGNAL parked WAIT_FOR go';
+set debug_sync='mdl_upgrade_lock SIGNAL parked WAIT_FOR go';
alter table t1 rename t3;
connection: default
set debug_sync= 'now WAIT_FOR parked';
@@ -16,7 +16,7 @@ connection: con1
connection: default
unlock tables;
connection: con2
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
drop table t3;
SET DEBUG_SYNC= 'RESET';
#
@@ -48,8 +48,13 @@ select count(*) from t1;
count(*)
0
insert into t1 values (1), (1);
+# Check that SU lock is compatible with it. To do this use ALTER TABLE
+# which will fail when constructing .frm and thus obtaining SU metadata
+# lock.
+alter table t1 add index (not_exist);
+ERROR 42000: Key column 'not_exist' doesn't exist in table
# Check that SNW lock is compatible with it. To do this use ALTER TABLE
-# which will fail after opening the table and thus obtaining SNW metadata
+# which will fail during copying the table and thus obtaining SNW metadata
# lock.
alter table t1 add primary key (c1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
@@ -139,8 +144,13 @@ select count(*) from t1;
count(*)
3
insert into t1 values (1);
+# Check that SU lock is compatible with it. To do this use ALTER TABLE
+# which will fail when constructing .frm and thus obtaining SU metadata
+# lock.
+alter table t1 add index (not_exist);
+ERROR 42000: Key column 'not_exist' doesn't exist in table
# Check that SNW lock is compatible with it. To do this use ALTER TABLE
-# which will fail after opening the table and thus obtaining SNW metadata
+# which will fail during copying the table and thus obtaining SNW metadata
# lock.
alter table t1 add primary key (c1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
@@ -244,8 +254,13 @@ select count(*) from t1;
count(*)
3
insert into t1 values (1);
+# Check that SU lock is compatible with it. To do this use ALTER TABLE
+# which will fail when constructing .frm and thus obtaining SU metadata
+# lock.
+alter table t1 add index (not_exist);
+ERROR 42000: Key column 'not_exist' doesn't exist in table
# Check that SNW lock is compatible with it. To do this use ALTER TABLE
-# which will fail after opening the table and thus obtaining SNW metadata
+# which will fail during copying the table and thus obtaining SNW metadata
# lock.
alter table t1 add primary key (c1);
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
@@ -334,8 +349,13 @@ c1
# effects of concurrent insert.
select * from t1;
insert into t1 values (1);
+# Check that SU lock is compatible with it. To do this use ALTER TABLE
+# which will fail when constructing .frm and thus obtaining SU metadata
+# lock.
+alter table t1 add index (not_exist);
+ERROR 42000: Key column 'not_exist' doesn't exist in table
# Check that SNW lock is not compatible with SW lock.
-# Again we use ALTER TABLE which fails after opening
+# Again we use ALTER TABLE which fails during copying
# the table to avoid upgrade of SNW -> X.
# Sending:
alter table t1 add primary key (c1);;
@@ -397,15 +417,111 @@ rename table t2 to t1;
# Switching to connection 'default'.
#
#
-# 5) Acquire SNW lock on the table. We have to use DEBUG_SYNC for
-# this, to prevent SNW from being immediately upgraded to X.
+# 5) Acquire SU lock on the table. We have to use DEBUG_SYNC for
+# this, to prevent SU from being immediately upgraded to X.
#
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_opened_table SIGNAL locked WAIT_FOR finish';
+# Sending:
+alter table t1 add primary key (c1);;
+#
+# Switching to connection 'mdl_con1'.
+set debug_sync= 'now WAIT_FOR locked';
+# Check that S, SH, SR and SW locks are compatible with it.
+handler t1 open;
+handler t1 close;
+select column_name from information_schema.columns where
+table_schema='test' and table_name='t1';
+column_name
+c1
+select count(*) from t1;
+count(*)
+5
+delete from t1 limit 1;
+# Check that SU lock is incompatible with SU lock.
+# Sending:
+alter table t1 add primary key (c1);;
+#
+# Switching to connection 'mdl_con2'.
+# Check that the above ALTER is blocked because of SU lock.
+# Unblock ALTERs.
+set debug_sync= 'now SIGNAL finish';
+#
+# Switching to connection 'default'.
+# Reaping first ALTER TABLE.
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+#
+# Switching to connection 'mdl_con1'.
+# Reaping another ALTER TABLE.
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+#
+# Switching to connection 'default'.
+set debug_sync= 'alter_opened_table SIGNAL locked WAIT_FOR finish';
# Sending:
alter table t1 add primary key (c1);;
#
# Switching to connection 'mdl_con1'.
set debug_sync= 'now WAIT_FOR locked';
+# Check that SNRW lock is incompatible with SU lock.
+# Sending:
+lock table t1 write;;
+#
+# Switching to connection 'mdl_con2'.
+# Check that the above LOCK TABLES is blocked because of SU lock.
+# Unblock ALTER and thus LOCK TABLES.
+set debug_sync= 'now SIGNAL finish';
+#
+# Switching to connection 'default'.
+# Reaping ALTER TABLE.
+ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
+#
+# Switching to connection 'mdl_con1'.
+# Reaping LOCK TABLES
+insert into t1 values (1);
+unlock tables;
+#
+# Switching to connection 'default'.
+set debug_sync= 'alter_opened_table SIGNAL locked WAIT_FOR finish';
+# Sending:
+alter table t1 add primary key (c1);;
+#
+# Switching to connection 'mdl_con1'.
+set debug_sync= 'now WAIT_FOR locked';
+# Check that X lock is incompatible with SU lock.
+# Sending:
+rename table t1 to t2;;
+#
+# Switching to connection 'mdl_con2'.
+# Check that the above RENAME is blocked because of SU lock.
+# Unblock ALTER and thus RENAME TABLE.
+set debug_sync= 'now SIGNAL finish';
+#
+# Switching to connection 'default'.
+# Now we have ALTER TABLE with SU->SNW and RENAME TABLE with pending
+# X-lock. In this case ALTER TABLE should be chosen as victim.
+# Reaping ALTER TABLE.
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
+#
+# Switching to connection 'mdl_con1'.
+# Reaping RENAME TABLE
+# Revert back to original state of things.
+rename table t2 to t1;
+#
+# There is no need to check that upgrade from SNW/SNRW to X is
+# blocked by presence of another SU lock because SNW/SNRW is
+# incompatible with SU anyway.
+#
+# Switching to connection 'default'.
+#
+#
+# 6) Acquire SNW lock on the table. We have to use DEBUG_SYNC for
+# this, to prevent SNW from being immediately upgraded to X.
+#
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
+# Sending:
+alter table t1 add primary key (c1), lock=shared, algorithm=copy;;
+#
+# Switching to connection 'mdl_con1'.
+set debug_sync= 'now WAIT_FOR locked';
# Check that S, SH and SR locks are compatible with it.
handler t1 open;
handler t1 close;
@@ -433,13 +549,13 @@ ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
# Reaping DELETE.
#
# Switching to connection 'default'.
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
# Sending:
-alter table t1 add primary key (c1);;
+alter table t1 add primary key (c1), lock=shared, algorithm=copy;;
#
# Switching to connection 'mdl_con1'.
set debug_sync= 'now WAIT_FOR locked';
-# Check that SNW lock is incompatible with SNW lock.
+# Check that SU lock is incompatible with SNW lock.
# Sending:
alter table t1 add primary key (c1);;
#
@@ -456,10 +572,14 @@ ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
# Reaping another ALTER TABLE.
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
#
+# Note that we can't easily check SNW vs SNW locks since
+# SNW is only used by ALTER TABLE after upgrading from SU
+# and SU is also incompatible with SNW.
+#
# Switching to connection 'default'.
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
# Sending:
-alter table t1 add primary key (c1);;
+alter table t1 add primary key (c1), lock=shared, algorithm=copy;;
#
# Switching to connection 'mdl_con1'.
set debug_sync= 'now WAIT_FOR locked';
@@ -482,9 +602,9 @@ insert into t1 values (1);
unlock tables;
#
# Switching to connection 'default'.
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
# Sending:
-alter table t1 add primary key (c1);;
+alter table t1 add primary key (c1), algorithm=copy, lock=shared;;
#
# Switching to connection 'mdl_con1'.
set debug_sync= 'now WAIT_FOR locked';
@@ -513,7 +633,7 @@ rename table t2 to t1;
# Switching to connection 'default'.
#
#
-# 6) Acquire SNRW lock on the table.
+# 7) Acquire SNRW lock on the table.
#
#
lock table t1 write;
@@ -560,12 +680,12 @@ unlock tables;
lock table t1 write;
#
# Switching to connection 'mdl_con1'.
-# Check that SNW lock is incompatible with SNRW lock.
+# Check that SU lock is incompatible with SNRW lock.
# Sending:
alter table t1 add primary key (c1);;
#
# Switching to connection 'default'.
-# Check that the above ALTER is blocked because of UNWR lock.
+# Check that the above ALTER is blocked because of SNRW lock.
# Unblock ALTER.
unlock tables;
#
@@ -573,6 +693,10 @@ unlock tables;
# Reaping ALTER TABLE.
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
#
+# Note that we can't easily check SNW vs SNRW locks since
+# SNW is only used by ALTER TABLE after upgrading from SU
+# and SU is also incompatible with SNRW.
+#
# Switching to connection 'default'.
lock table t1 write;
#
@@ -616,7 +740,7 @@ rename table t2 to t1;
# Switching to connection 'default'.
#
#
-# 7) Now do the same round of tests for X lock. We use additional
+# 8) Now do the same round of tests for X lock. We use additional
# table to get long-lived lock of this type.
#
create table t2 (c1 int);
@@ -744,7 +868,7 @@ rename table t1 to t2;;
#
# Switching to connection 'mdl_con1'.
# Check that RENAME has acquired X lock on t1 and is waiting for t2.
-# Check that SNW lock is incompatible with X lock.
+# Check that SU lock is incompatible with X lock.
# Sending:
alter table t1 add primary key (c1);;
#
@@ -761,7 +885,11 @@ ERROR 42S01: Table 't2' already exists
# Switching to connection 'mdl_con1'.
# Reaping ALTER.
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
-#
+#
+# Note that we can't easily check SNW vs X locks since
+# SNW is only used by ALTER TABLE after upgrading from SU
+# and SU is also incompatible with X.
+#
# Switching to connection 'mdl_con2'.
# Prepare for blocking RENAME TABLE.
lock tables t2 read;
@@ -822,6 +950,9 @@ rename table t3 to t1;
# are pending. I.e. let us test rules for priorities between
# different types of metadata locks.
#
+# Note: No tests for pending SU lock as this lock requires
+# even stronger active or pending lock.
+#
#
# Switching to connection 'mdl_con2'.
#
@@ -1138,6 +1269,9 @@ unlock tables;
# transactional context. Obviously we are mostly interested
# in conflicting types of locks.
#
+# Note: No tests for active/pending SU lock since
+# ALTER TABLE is in its own transaction.
+#
#
# 1) Let us check how various locks used within transactional
# context interact with active/pending SNW lock.
@@ -1154,9 +1288,9 @@ count(*)
# We have to use DEBUG_SYNC facility as otherwise SNW lock
# will be immediately released (or upgraded to X lock).
insert into t2 values (1), (1);
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
# Sending:
-alter table t2 add primary key (c1);;
+alter table t2 add primary key (c1), algorithm=copy, lock=shared;;
#
# Switching to connection 'default'.
set debug_sync= 'now WAIT_FOR locked';
@@ -1199,9 +1333,9 @@ count(*)
#
# Switching to connection 'mdl_con1'.
# Create an active SNW lock on t1.
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
# Sending:
-alter table t1 add primary key (c1);;
+alter table t1 add primary key (c1), algorithm=copy, lock=shared;;
#
# Switching to connection 'default'.
set debug_sync= 'now WAIT_FOR locked';
@@ -1986,7 +2120,7 @@ drop tables t1, t2;
#
create table t1 (i int);
# Ensure that ALTER waits once it has acquired SNW lock.
-set debug_sync='after_open_table_mdl_shared SIGNAL parked1 WAIT_FOR go1';
+set debug_sync='alter_table_copy_after_lock_upgrade SIGNAL parked1 WAIT_FOR go1';
# Sending:
alter table t1 add column j int;
#
@@ -2293,13 +2427,18 @@ set global log_output=@save_log_output;
#
drop tables if exists t1, t2;
create table t1 (i int);
+insert into t1 values(1);
# Let us check that we won't deadlock if during filling
# of I_S table we encounter conflicting metadata lock
# which owner is in its turn waiting for our connection.
lock tables t1 read;
+# Switching to connection 'con46044_2'.
+# Sending:
+update t1 set i = 2;
# Switching to connection 'con46044'.
+# Waiting until UPDATE t1 SET ... is blocked.
# Sending:
-create table t2 select * from t1 for update;;
+create table t2 select * from t1;;
# Switching to connection 'default'.
# Waiting until CREATE TABLE ... SELECT ... is blocked.
# First let us check that SHOW FIELDS/DESCRIBE doesn't
@@ -2329,6 +2468,7 @@ unlock tables;
# Switching to connection 'con46044'.
# Reaping CREATE TABLE ... SELECT ... .
drop table t2;
+# Reaping UPDATE t1 statement
#
# Let us also check that queries to I_S wait for conflicting metadata
# locks to go away instead of skipping table with a warning in cases
@@ -2338,9 +2478,13 @@ drop table t2;
# We check same three queries to I_S in this new situation.
# Switching to connection 'con46044_2'.
lock tables t1 read;
+# Switching to connection 'con46044_3'.
+# Sending:
+update t1 set i = 3;
# Switching to connection 'con46044'.
+# Waiting until UPDATE t1 SET ... is blocked.
# Sending:
-create table t2 select * from t1 for update;;
+create table t2 select * from t1;;
# Switching to connection 'default'.
# Waiting until CREATE TABLE ... SELECT ... is blocked.
# Let us check that SHOW FIELDS/DESCRIBE gets blocked.
@@ -2356,11 +2500,16 @@ unlock tables;
Field Type Null Key Default Extra
i int(11) YES NULL
drop table t2;
+# Reaping UPDATE t1 statement
# Switching to connection 'con46044_2'.
lock tables t1 read;
+# Switching to connection 'con46044_3'.
+# Sending:
+update t1 set i = 4;
# Switching to connection 'con46044'.
+# Waiting until UPDATE t1 SET ... is blocked.
# Sending:
-create table t2 select * from t1 for update;;
+create table t2 select * from t1;;
# Switching to connection 'default'.
# Waiting until CREATE TABLE ... SELECT ... is blocked.
# Check that I_S query which reads only .FRMs gets blocked.
@@ -2376,11 +2525,16 @@ unlock tables;
column_name
i
drop table t2;
+# Reaping UPDATE t1 statement
# Switching to connection 'con46044_2'.
lock tables t1 read;
+# Switching to connection 'con46044_3'.
+# Sending:
+update t1 set i = 5;
# Switching to connection 'con46044'.
+# Waiting until UPDATE t1 SET ... is blocked.
# Sending:
-create table t2 select * from t1 for update;;
+create table t2 select * from t1;;
# Switching to connection 'default'.
# Waiting until CREATE TABLE ... SELECT ... is blocked.
# Finally, check that I_S query which does full-blown table open
@@ -2397,6 +2551,7 @@ unlock tables;
table_name table_type auto_increment table_comment
t2 BASE TABLE NULL
drop table t2;
+# Reaping UPDATE t1 statement
# Switching to connection 'default'.
# Clean-up.
drop table t1;
@@ -2414,7 +2569,7 @@ c1 c2 c3
3 3 0
#
# Switching to connection 'con46273'.
-set debug_sync='after_lock_tables_takes_lock SIGNAL alter_table_locked WAIT_FOR alter_go';
+set debug_sync='alter_table_copy_after_lock_upgrade SIGNAL alter_table_locked WAIT_FOR alter_go';
alter table t1 add column e int, rename to t2;;
#
# Switching to connection 'default'.
@@ -2558,9 +2713,9 @@ drop table if exists t1;
set debug_sync= 'RESET';
create table t1 (i int) engine=InnoDB;
# Switching to connection 'con50913_1'.
-set debug_sync= 'thr_multi_lock_after_thr_lock SIGNAL parked WAIT_FOR go';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL parked WAIT_FOR go';
# Sending:
-alter table t1 add column j int;
+alter table t1 add column j int, ALGORITHM=COPY;
# Switching to connection 'default'.
# Wait until ALTER TABLE gets blocked on a sync point after
# acquiring thr_lock.c lock.
@@ -2600,7 +2755,7 @@ i
# Switching to connection 'default'.
# Start ALTER TABLE which will acquire SNW lock and
# table lock and get blocked on sync point.
-set debug_sync= 'thr_multi_lock_after_thr_lock SIGNAL parked WAIT_FOR go';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL parked WAIT_FOR go';
# Sending:
alter table t1 add column j int;
# Switching to connection 'con1'.
@@ -2889,7 +3044,7 @@ SET DEBUG_SYNC= 'now SIGNAL blocked';
# Reaping: DROP DATABASE db1
# Connection con2
# Reaping: DROP TABLE db1.t1
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'db1.t1'
# Connection default
SET DEBUG_SYNC= 'RESET';
#
@@ -2934,12 +3089,16 @@ CREATE TABLE m1(a INT) engine=MERGE UNION=(t1, t2);
INSERT INTO t1 VALUES (1), (2);
INSERT INTO t2 VALUES (3), (4);
# Connection con1
-SET DEBUG_SYNC= 'mdl_upgrade_shared_lock_to_exclusive SIGNAL upgrade WAIT_FOR continue';
+# We need EXECUTE 2 since ALTER TABLE does SU => SNW => X and we want
+# to stop at the second upgrade.
+SET DEBUG_SYNC= 'mdl_upgrade_lock SIGNAL upgrade WAIT_FOR continue EXECUTE 2';
# Sending:
ALTER TABLE m1 engine=MERGE UNION=(t2, t1);
# Connection con2
# Waiting for ALTER TABLE to try lock upgrade
SET DEBUG_SYNC= 'now WAIT_FOR upgrade';
+SET DEBUG_SYNC= 'now SIGNAL continue';
+SET DEBUG_SYNC= 'now WAIT_FOR upgrade';
# Sending:
DELETE FROM t2 WHERE a = 3;
# Connection default
diff --git a/mysql-test/r/myisam-system.result b/mysql-test/r/myisam-system.result
index 9d5a59459ec..af5de8f2749 100644
--- a/mysql-test/r/myisam-system.result
+++ b/mysql-test/r/myisam-system.result
@@ -16,4 +16,4 @@ drop table t1;
Warnings:
Warning 2 Can't find file: './test/t1.MYI' (errno: 2 "No such file or directory")
drop table t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result
index 267110be487..5b0dbbf6957 100644
--- a/mysql-test/r/myisam.result
+++ b/mysql-test/r/myisam.result
@@ -454,7 +454,7 @@ a b c
drop table t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (a varchar(150) NOT NULL, KEY (a));
INSERT t1 VALUES ("can \tcan");
INSERT t1 VALUES ("can can");
diff --git a/mysql-test/r/mysql_upgrade.result b/mysql-test/r/mysql_upgrade.result
index a08e6f63fb4..74832162afb 100644
--- a/mysql-test/r/mysql_upgrade.result
+++ b/mysql-test/r/mysql_upgrade.result
@@ -21,15 +21,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -67,15 +63,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -113,15 +105,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -161,15 +149,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -213,15 +197,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -268,15 +248,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -326,15 +302,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
diff --git a/mysql-test/r/mysql_upgrade_ssl.result b/mysql-test/r/mysql_upgrade_ssl.result
index 60bf427cef6..d0609deb552 100644
--- a/mysql-test/r/mysql_upgrade_ssl.result
+++ b/mysql-test/r/mysql_upgrade_ssl.result
@@ -23,15 +23,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
diff --git a/mysql-test/r/mysqlcheck.result b/mysql-test/r/mysqlcheck.result
index 17dc9ad9a35..ce9bf367945 100644
--- a/mysql-test/r/mysqlcheck.result
+++ b/mysql-test/r/mysqlcheck.result
@@ -17,15 +17,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -54,15 +50,11 @@ status : OK
mysql.innodb_table_stats
note : Table does not support optimize, doing recreate + analyze instead
status : OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -85,15 +77,11 @@ mysql.host OK
mysql.index_stats OK
mysql.innodb_index_stats OK
mysql.innodb_table_stats OK
-mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxies_priv OK
mysql.servers OK
-mysql.slave_master_info OK
-mysql.slave_relay_log_info OK
-mysql.slave_worker_info OK
mysql.table_stats OK
mysql.tables_priv OK
mysql.time_zone OK
@@ -120,15 +108,11 @@ status : OK
mysql.innodb_table_stats
note : Table does not support optimize, doing recreate + analyze instead
status : OK
-mysql.ndb_binlog_index Table is already up to date
mysql.plugin Table is already up to date
mysql.proc Table is already up to date
mysql.procs_priv Table is already up to date
mysql.proxies_priv Table is already up to date
mysql.servers Table is already up to date
-mysql.slave_master_info Table is already up to date
-mysql.slave_relay_log_info Table is already up to date
-mysql.slave_worker_info Table is already up to date
mysql.table_stats Table is already up to date
mysql.tables_priv Table is already up to date
mysql.time_zone Table is already up to date
diff --git a/mysql-test/r/mysqld--help.result b/mysql-test/r/mysqld--help.result
index 5f8cf38ac09..5fec9b50596 100644
--- a/mysql-test/r/mysqld--help.result
+++ b/mysql-test/r/mysqld--help.result
@@ -217,6 +217,7 @@ The following options may be given as the first argument:
Possible values are: SINGLE_PREC_HB - single precision
height-balanced, DOUBLE_PREC_HB - double precision
height-balanced.
+ --host-cache-size=# How many host names should be cached to avoid resolving.
--ignore-builtin-innodb
Disable initialization of builtin InnoDB plugin
--ignore-db-dirs=name
@@ -422,6 +423,8 @@ The following options may be given as the first argument:
--memlock Lock mysqld in memory.
--metadata-locks-cache-size=#
Size of unused metadata locks cache
+ --metadata-locks-hash-instances=#
+ Number of metadata locks hash instances
--min-examined-row-limit=#
Don't write queries to slow log that examine fewer rows
than that
@@ -535,8 +538,10 @@ The following options may be given as the first argument:
record samples
--performance-schema
Enable the performance schema.
+ (Defaults to on; use --skip-performance-schema to disable.)
--performance-schema-accounts-size=#
- Maximum number of instrumented user@host accounts.
+ Maximum number of instrumented user@host accounts. Use 0
+ to disable, -1 for automated sizing.
--performance-schema-consumer-events-stages-current
Default startup value for the events_stages_current
consumer.
@@ -577,64 +582,84 @@ The following options may be given as the first argument:
consumer.
(Defaults to on; use --skip-performance-schema-consumer-thread-instrumentation to disable.)
--performance-schema-digests-size=#
- Size of the statement digest.
+ Size of the statement digest. Use 0 to disable, -1 for
+ automated sizing.
--performance-schema-events-stages-history-long-size=#
- Number of rows in EVENTS_STAGES_HISTORY_LONG.
+ Number of rows in EVENTS_STAGES_HISTORY_LONG. Use 0 to
+ disable, -1 for automated sizing.
--performance-schema-events-stages-history-size=#
- Number of rows per thread in EVENTS_STAGES_HISTORY.
+ Number of rows per thread in EVENTS_STAGES_HISTORY. Use 0
+ to disable, -1 for automated sizing.
--performance-schema-events-statements-history-long-size=#
- Number of rows in EVENTS_STATEMENTS_HISTORY_LONG.
+ Number of rows in EVENTS_STATEMENTS_HISTORY_LONG. Use 0
+ to disable, -1 for automated sizing.
--performance-schema-events-statements-history-size=#
Number of rows per thread in EVENTS_STATEMENTS_HISTORY.
+ Use 0 to disable, -1 for automated sizing.
--performance-schema-events-waits-history-long-size=#
- Number of rows in EVENTS_WAITS_HISTORY_LONG.
+ Number of rows in EVENTS_WAITS_HISTORY_LONG. Use 0 to
+ disable, -1 for automated sizing.
--performance-schema-events-waits-history-size=#
- Number of rows per thread in EVENTS_WAITS_HISTORY.
+ Number of rows per thread in EVENTS_WAITS_HISTORY. Use 0
+ to disable, -1 for automated sizing.
--performance-schema-hosts-size=#
- Maximum number of instrumented hosts.
+ Maximum number of instrumented hosts. Use 0 to disable,
+ -1 for automated sizing.
--performance-schema-instrument[=name]
Default startup value for a performance schema
instrument.
--performance-schema-max-cond-classes=#
Maximum number of condition instruments.
--performance-schema-max-cond-instances=#
- Maximum number of instrumented condition objects.
+ Maximum number of instrumented condition objects. Use 0
+ to disable, -1 for automated sizing.
--performance-schema-max-file-classes=#
Maximum number of file instruments.
--performance-schema-max-file-handles=#
Maximum number of opened instrumented files.
--performance-schema-max-file-instances=#
- Maximum number of instrumented files.
+ Maximum number of instrumented files. Use 0 to disable,
+ -1 for automated sizing.
--performance-schema-max-mutex-classes=#
Maximum number of mutex instruments.
--performance-schema-max-mutex-instances=#
- Maximum number of instrumented MUTEX objects.
+ Maximum number of instrumented MUTEX objects. Use 0 to
+ disable, -1 for automated sizing.
--performance-schema-max-rwlock-classes=#
Maximum number of rwlock instruments.
--performance-schema-max-rwlock-instances=#
- Maximum number of instrumented RWLOCK objects.
+ Maximum number of instrumented RWLOCK objects. Use 0 to
+ disable, -1 for automated sizing.
--performance-schema-max-socket-classes=#
Maximum number of socket instruments.
--performance-schema-max-socket-instances=#
- Maximum number of opened instrumented sockets.
+ Maximum number of opened instrumented sockets. Use 0 to
+ disable, -1 for automated sizing.
--performance-schema-max-stage-classes=#
Maximum number of stage instruments.
--performance-schema-max-statement-classes=#
Maximum number of statement instruments.
--performance-schema-max-table-handles=#
- Maximum number of opened instrumented tables.
+ Maximum number of opened instrumented tables. Use 0 to
+ disable, -1 for automated sizing.
--performance-schema-max-table-instances=#
- Maximum number of instrumented tables.
+ Maximum number of instrumented tables. Use 0 to disable,
+ -1 for automated sizing.
--performance-schema-max-thread-classes=#
Maximum number of thread instruments.
--performance-schema-max-thread-instances=#
- Maximum number of instrumented threads.
+ Maximum number of instrumented threads. Use 0 to disable,
+ -1 for automated sizing.
+ --performance-schema-session-connect-attrs-size=#
+ Size of session attribute string buffer per thread. Use 0
+ to disable, -1 for automated sizing.
--performance-schema-setup-actors-size=#
Maximum number of rows in SETUP_ACTORS.
--performance-schema-setup-objects-size=#
Maximum number of rows in SETUP_OBJECTS.
--performance-schema-users-size=#
- Maximum number of instrumented users.
+ Maximum number of instrumented users. Use 0 to disable,
+ -1 for automated sizing.
--pid-file=name Pid file used by safe_mysqld
--plugin-dir=name Directory for plugins
--plugin-load=name Semicolon-separated list of plugins to load, where each
@@ -1047,6 +1072,7 @@ gtid-strict-mode FALSE
help TRUE
histogram-size 0
histogram-type SINGLE_PREC_HB
+host-cache-size 128
ignore-builtin-innodb FALSE
ignore-db-dirs
init-connect
@@ -1096,7 +1122,7 @@ max-allowed-packet 1048576
max-binlog-cache-size 18446744073709547520
max-binlog-size 1073741824
max-binlog-stmt-cache-size 18446744073709547520
-max-connect-errors 10
+max-connect-errors 100
max-connections 151
max-delayed-threads 20
max-error-count 64
@@ -1114,6 +1140,7 @@ max-user-connections 0
max-write-lock-count 18446744073709551615
memlock FALSE
metadata-locks-cache-size 1024
+metadata-locks-hash-instances 8
min-examined-row-limit 0
mrr-buffer-size 262144
multi-range-count 256
@@ -1139,8 +1166,8 @@ optimizer-search-depth 62
optimizer-selectivity-sampling-limit 100
optimizer-switch index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,index_merge_sort_intersection=off,engine_condition_pushdown=off,index_condition_pushdown=on,derived_merge=on,derived_with_keys=on,firstmatch=on,loosescan=on,materialization=on,in_to_exists=on,semijoin=on,partial_match_rowid_merge=on,partial_match_table_scan=on,subquery_cache=on,mrr=off,mrr_cost_based=off,mrr_sort_keys=off,outer_join_with_cache=on,semijoin_with_cache=on,join_cache_incremental=on,join_cache_hashed=on,join_cache_bka=on,optimize_join_buffer_size=off,table_elimination=on
optimizer-use-condition-selectivity 1
-performance-schema FALSE
-performance-schema-accounts-size 100
+performance-schema TRUE
+performance-schema-accounts-size 10
performance-schema-consumer-events-stages-current FALSE
performance-schema-consumer-events-stages-history FALSE
performance-schema-consumer-events-stages-history-long FALSE
@@ -1153,35 +1180,36 @@ performance-schema-consumer-events-waits-history-long FALSE
performance-schema-consumer-global-instrumentation TRUE
performance-schema-consumer-statements-digest TRUE
performance-schema-consumer-thread-instrumentation TRUE
-performance-schema-digests-size 200
-performance-schema-events-stages-history-long-size 10000
-performance-schema-events-stages-history-size 10
-performance-schema-events-statements-history-long-size 10000
-performance-schema-events-statements-history-size 10
-performance-schema-events-waits-history-long-size 10000
-performance-schema-events-waits-history-size 10
-performance-schema-hosts-size 100
+performance-schema-digests-size 1000
+performance-schema-events-stages-history-long-size 100
+performance-schema-events-stages-history-size 5
+performance-schema-events-statements-history-long-size 100
+performance-schema-events-statements-history-size 5
+performance-schema-events-waits-history-long-size 100
+performance-schema-events-waits-history-size 5
+performance-schema-hosts-size 20
performance-schema-instrument
performance-schema-max-cond-classes 80
-performance-schema-max-cond-instances 1000
+performance-schema-max-cond-instances 836
performance-schema-max-file-classes 50
performance-schema-max-file-handles 32768
-performance-schema-max-file-instances 10000
+performance-schema-max-file-instances 1556
performance-schema-max-mutex-classes 200
-performance-schema-max-mutex-instances 1000000
+performance-schema-max-mutex-instances 3282
performance-schema-max-rwlock-classes 30
-performance-schema-max-rwlock-instances 1000000
+performance-schema-max-rwlock-instances 1724
performance-schema-max-socket-classes 10
-performance-schema-max-socket-instances 1000
+performance-schema-max-socket-instances 179
performance-schema-max-stage-classes 150
-performance-schema-max-statement-classes 174
-performance-schema-max-table-handles 10000
-performance-schema-max-table-instances 1000
+performance-schema-max-statement-classes 175
+performance-schema-max-table-handles 445
+performance-schema-max-table-instances 445
performance-schema-max-thread-classes 50
-performance-schema-max-thread-instances 1000
+performance-schema-max-thread-instances 224
+performance-schema-session-connect-attrs-size 512
performance-schema-setup-actors-size 100
performance-schema-setup-objects-size 100
-performance-schema-users-size 100
+performance-schema-users-size 5
plugin-maturity unknown
port 3306
port-open-timeout 0
diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result
index 33e10f29201..ffaa1a06b95 100644
--- a/mysql-test/r/mysqldump.result
+++ b/mysql-test/r/mysqldump.result
@@ -2698,13 +2698,13 @@ DROP TABLE t1, t2;
#
DROP TABLE IF EXISTS `test1`;
Warnings:
-Note 1051 Unknown table 'test1'
+Note 1051 Unknown table 'test.test1'
CREATE TABLE `test1` (
`a1` int(11) default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
DROP TABLE IF EXISTS `test2`;
Warnings:
-Note 1051 Unknown table 'test2'
+Note 1051 Unknown table 'test.test2'
CREATE TABLE `test2` (
`a2` int(11) default NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
@@ -5241,7 +5241,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -5259,7 +5259,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
SET @@global.log_output= @old_log_output_state;
SET @@global.slow_query_log= @old_slow_query_log_state;
diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result
index c68d43831ad..5ee29f8584f 100644
--- a/mysql-test/r/partition.result
+++ b/mysql-test/r/partition.result
@@ -67,6 +67,19 @@ AND A.b = '06'
AND A.c = 343;
DROP TABLE t1;
#
+# Bug#59503: explain extended crash in get_mm_leaf
+#
+CREATE TABLE t1 (a VARCHAR(51) CHARACTER SET latin1)
+ENGINE=MyISAM
+PARTITION BY KEY (a) PARTITIONS 1;
+INSERT INTO t1 VALUES ('a'),('b'),('c');
+EXPLAIN EXTENDED SELECT 1 FROM t1 WHERE a > 1;
+id select_type table type possible_keys key key_len ref rows filtered Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 3 100.00 Using where
+Warnings:
+Note 1003 select 1 AS `1` from `test`.`t1` where (`test`.`t1`.`a` > 1)
+DROP TABLE t1;
+#
# Bug#57778: failed primary key add to partitioned innodb table
# inconsistent and crashes
#
@@ -303,6 +316,32 @@ INSERT INTO t1 VALUES (NULL);
SELECT * FROM t1 WHERE pk < 0 ORDER BY pk;
pk
DROP TABLE t1;
+SET sql_mode=no_engine_substitution;
+CREATE TABLE t1 (a INT)
+ENGINE=NonExistentEngine;
+ERROR 42000: Unknown storage engine 'NonExistentEngine'
+CREATE TABLE t1 (a INT)
+ENGINE=NonExistentEngine
+PARTITION BY HASH (a);
+ERROR 42000: Unknown storage engine 'NonExistentEngine'
+CREATE TABLE t1 (a INT)
+ENGINE=Memory;
+ALTER TABLE t1 ENGINE=NonExistentEngine;
+ERROR 42000: Unknown storage engine 'NonExistentEngine'
+ALTER TABLE t1
+PARTITION BY HASH (a)
+(PARTITION p0 ENGINE=Memory,
+PARTITION p1 ENGINE=NonExistentEngine);
+ERROR 42000: Unknown storage engine 'NonExistentEngine'
+ALTER TABLE t1 ENGINE=NonExistentEngine;
+ERROR 42000: Unknown storage engine 'NonExistentEngine'
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL
+) ENGINE=MEMORY DEFAULT CHARSET=latin1
+DROP TABLE t1;
+SET sql_mode='';
CREATE TABLE t1 (a INT)
ENGINE=NonExistentEngine;
Warnings:
@@ -339,6 +378,7 @@ t1 CREATE TABLE `t1` (
(PARTITION p0 ENGINE = MEMORY,
PARTITION p1 ENGINE = MEMORY) */
DROP TABLE t1;
+SET sql_mode=DEFAULT;
CREATE TABLE t1 (a INT NOT NULL, KEY(a))
PARTITION BY RANGE(a)
(PARTITION p1 VALUES LESS THAN (200), PARTITION pmax VALUES LESS THAN MAXVALUE);
@@ -1056,13 +1096,13 @@ select * from t1 where f1 = 10;
f1 f2
10 1
drop table t1;
-set session storage_engine= 'memory';
+set session default_storage_engine= 'memory';
create table t1 (f_int1 int(11) default null) engine = memory
partition by range (f_int1) subpartition by hash (f_int1)
(partition part1 values less than (1000)
(subpartition subpart11 engine = memory));
drop table t1;
-set session storage_engine='myisam';
+set session default_storage_engine='myisam';
create table t1 (f_int1 integer, f_int2 integer, primary key (f_int1))
partition by hash(f_int1) partitions 2;
insert into t1 values (1,1),(2,2);
@@ -1885,8 +1925,7 @@ WHERE t1.id IN (
SELECT distinct id
FROM t4
WHERE taken BETWEEN @f_date AND date_add(@t_date, INTERVAL 1 DAY))
-ORDER BY t1.id
-;
+ORDER BY t1.id;
MyISAM_part
16421
19092
@@ -1907,7 +1946,7 @@ INSERT INTO t1 VALUES
('2006-09-29 21:50:01',22589,'Verified');
DROP TABLE IF EXISTS t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t2 (
id int(8) NOT NULL,
severity tinyint(4) NOT NULL DEFAULT '0',
diff --git a/mysql-test/r/partition_binlog.result b/mysql-test/r/partition_binlog.result
index 1db427868f4..21eca8f1c00 100644
--- a/mysql-test/r/partition_binlog.result
+++ b/mysql-test/r/partition_binlog.result
@@ -9,7 +9,7 @@ PARTITION BY RANGE (id)
PARTITION pmax VALUES LESS THAN (MAXVALUE));
INSERT INTO t1 VALUES (1), (10), (100), (1000);
ALTER TABLE t1 TRUNCATE PARTITION p1;
-ERROR HY000: Incorrect partition name
+ERROR HY000: Unknown partition 'p1' in table 't1'
ALTER TABLE t1 DROP PARTITION p1;
ERROR HY000: Error in list of partitions to DROP
# No error returned, output in table format instead:
diff --git a/mysql-test/r/partition_debug_sync.result b/mysql-test/r/partition_debug_sync.result
index 0549a6a8bdd..c30651c1c0d 100644
--- a/mysql-test/r/partition_debug_sync.result
+++ b/mysql-test/r/partition_debug_sync.result
@@ -5,7 +5,9 @@ SET DEBUG_SYNC= 'RESET';
# Test when remove partitioning is done while drop table is waiting
# for the table.
# After MDL was introduced, there is no longer any race, so test is done
-# by adding a small sleep to verify that the delete waits.
+# by adding a small sleep to verify that the delete waits. This happens
+# only until ALTER tries to upgrade its MDL lock, which ends up in MDL
+# deadlock which is correctly reported.
# Con 1
SET DEBUG_SYNC= 'RESET';
CREATE TABLE t1
@@ -19,14 +21,15 @@ PARTITION p1 VALUES LESS THAN (20),
PARTITION p2 VALUES LESS THAN (100),
PARTITION p3 VALUES LESS THAN MAXVALUE ) */;
SET DEBUG_SYNC= 'alter_table_before_create_table_no_lock SIGNAL removing_partitioning WAIT_FOR waiting_for_alter';
-SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL partitioning_removed';
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL waiting_for_upgrade';
ALTER TABLE t1 REMOVE PARTITIONING;
# Con default
SET DEBUG_SYNC= 'now WAIT_FOR removing_partitioning';
SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL waiting_for_alter';
-SET DEBUG_SYNC= 'rm_table_no_locks_before_delete_table WAIT_FOR partitioning_removed';
+SET DEBUG_SYNC= 'rm_table_no_locks_before_delete_table WAIT_FOR waiting_for_upgrade';
DROP TABLE IF EXISTS t1;
# Con 1
+ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SET DEBUG_SYNC= 'RESET';
SET DEBUG_SYNC= 'RESET';
#
@@ -58,3 +61,27 @@ SET DEBUG_SYNC= 'RESET';
# Con default
SET DEBUG_SYNC= 'RESET';
End of 5.1 tests
+#
+# Coverage test for non pruned ha_partition::store_lock()
+#
+CREATE TABLE t1 (a int) ENGINE = InnoDB;
+CREATE TABLE t2 (a int PRIMARY KEY)
+ENGINE = InnoDB PARTITION BY HASH (a) PARTITIONS 3;
+HANDLER t1 OPEN;
+# Con1
+LOCK TABLES t1 WRITE, t2 READ;
+# Default
+SET DEBUG_SYNC="wait_for_lock SIGNAL locking";
+INSERT INTO t2 VALUES (1), (2), (3);
+# Con1
+SET DEBUG_SYNC="now WAIT_FOR locking";
+ALTER TABLE t1 ADD COLUMN b int;
+# Default
+ERROR HY000: Wait on a lock was aborted due to a pending exclusive lock
+SELECT 1;
+1
+1
+# Con1
+UNLOCK TABLES;
+# Default
+DROP TABLE t1, t2;
diff --git a/mysql-test/r/partition_disabled.result b/mysql-test/r/partition_disabled.result
index 505bec79610..edf3a56d9b2 100644
--- a/mysql-test/r/partition_disabled.result
+++ b/mysql-test/r/partition_disabled.result
@@ -56,7 +56,7 @@ ERROR HY000: The MariaDB server is running with the --skip-partition option so i
ALTER TABLE t1 PARTITION BY KEY(joined) PARTITIONS 2;
ERROR HY000: The MariaDB server is running with the --skip-partition option so it cannot execute this statement
drop table t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
CREATE TABLE t1 (
firstname VARCHAR(25) NOT NULL,
lastname VARCHAR(25) NOT NULL,
@@ -73,7 +73,7 @@ PARTITION p4 VALUES LESS THAN MAXVALUE
);
ERROR HY000: The MariaDB server is running with the --skip-partition option so it cannot execute this statement
drop table t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
CREATE TABLE t1 (id INT, purchased DATE)
PARTITION BY RANGE( YEAR(purchased) )
SUBPARTITION BY HASH( TO_DAYS(purchased) )
@@ -84,7 +84,7 @@ PARTITION p2 VALUES LESS THAN MAXVALUE
);
ERROR HY000: The MariaDB server is running with the --skip-partition option so it cannot execute this statement
drop table t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
create table t1 (a varchar(10) charset latin1 collate latin1_bin);
insert into t1 values (''),(' '),('a'),('a '),('a ');
explain partitions select * from t1 where a='a ' OR a='a';
diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result
index a1accfa8e3d..2d35fe0bf07 100644
--- a/mysql-test/r/partition_innodb.result
+++ b/mysql-test/r/partition_innodb.result
@@ -31,6 +31,17 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 range PRIMARY,b b 67 NULL 18 Using where; Using index
DROP TABLE t1;
#
+# Bug#13007154: Crash in keys_to_use_for_scanning with ORDER BY
+# and PARTITIONING
+#
+CREATE TABLE t1 (a INT, KEY(a))
+ENGINE = InnoDB
+PARTITION BY KEY (a) PARTITIONS 1;
+SELECT 1 FROM t1 WHERE a > (SELECT LAST_INSERT_ID() FROM t1 LIMIT 0)
+ORDER BY a;
+1
+DROP TABLE t1;
+#
# Bug#56287: crash when using Partition datetime in sub in query
#
CREATE TABLE t1
@@ -60,7 +71,7 @@ DROP TABLE t1;
# Bug#54747: Deadlock between REORGANIZE PARTITION and
# SELECT is not detected
#
-SET @old_innodb_thread_concurrency:= @@innodb_thread_concurrency;
+SET @old_innodb_thread_concurrency := @@innodb_thread_concurrency;
SET @old_innodb_thread_sleep_delay := @@innodb_thread_sleep_delay;
SET GLOBAL innodb_thread_concurrency = 1;
CREATE TABLE t1
diff --git a/mysql-test/r/partition_innodb_plugin.result b/mysql-test/r/partition_innodb_plugin.result
index ceade2a793c..7a84745e611 100644
--- a/mysql-test/r/partition_innodb_plugin.result
+++ b/mysql-test/r/partition_innodb_plugin.result
@@ -76,18 +76,18 @@ t1.par
SET innodb_strict_mode = OFF;
ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
t1.frm
t1.par
ALTER TABLE t1 REBUILD PARTITION p0;
Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
UNLOCK TABLES;
SHOW CREATE TABLE t1;
Table Create Table
diff --git a/mysql-test/r/partition_mgm_err.result b/mysql-test/r/partition_mgm_err.result
index a13278d724e..cbf45a2b7be 100644
--- a/mysql-test/r/partition_mgm_err.result
+++ b/mysql-test/r/partition_mgm_err.result
@@ -98,7 +98,7 @@ PARTITION BY KEY (a)
(PARTITION x0, PARTITION x1);
ALTER TABLE t1 ADD PARTITION PARTITIONS 0;
ERROR HY000: At least one partition must be added
-ALTER TABLE t1 ADD PARTITION PARTITIONS 1024;
+ALTER TABLE t1 ADD PARTITION PARTITIONS 8192;
ERROR HY000: Too many partitions (including subpartitions) were defined
ALTER TABLE t1 DROP PARTITION x0;
ERROR HY000: DROP PARTITION can only be used on RANGE/LIST partitions
diff --git a/mysql-test/r/partition_myisam.result b/mysql-test/r/partition_myisam.result
index 10586ddc548..80b3a9511ea 100644
--- a/mysql-test/r/partition_myisam.result
+++ b/mysql-test/r/partition_myisam.result
@@ -1,80 +1,62 @@
DROP TABLE IF EXISTS t1, t2;
#
-# Bug#50036: Inconsistent errors when using TIMESTAMP
-# columns/expressions
-# Added test with existing TIMESTAMP partitioning (when it was allowed).
-CREATE TABLE t1 (a TIMESTAMP)
-ENGINE = MyISAM
-PARTITION BY HASH (UNIX_TIMESTAMP(a));
-INSERT INTO t1 VALUES ('2000-01-02 03:04:05');
-SELECT * FROM t1;
-a
-2000-01-02 03:04:05
-FLUSH TABLES;
-# replacing t1.frm with TO_DAYS(a) which was allowed earlier.
-# Disable warnings, since the result would differ when running with
-# --ps-protocol (only for the 'SELECT * FROM t1' statement).
-SELECT * FROM t1;
-a
-2000-01-02 03:04:05
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=<curr_engine> DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (TO_DAYS(a)) */
-INSERT INTO t1 VALUES ('2001-02-03 04:05:06');
-SELECT * FROM t1;
-a
-2000-01-02 03:04:05
-2001-02-03 04:05:06
-ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
-Warnings:
-Warning 1486 Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed
-ALTER TABLE t1
-PARTITION BY RANGE (TO_DAYS(a))
-(PARTITION p0 VALUES LESS THAN (10000),
-PARTITION p1 VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed
+# BUG#11933226 - 60681: CHECKSUM TABLE RETURNS 0 FOR PARTITIONED TABLE
+#
+CREATE TABLE t1 (
+i INT
+)
+ENGINE=MyISAM
+PARTITION BY RANGE (i)
+(PARTITION p3 VALUES LESS THAN (3),
+PARTITION p5 VALUES LESS THAN (5),
+PARTITION pMax VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6);
+CHECKSUM TABLE t1;
+Table Checksum
+test.t1 2653438147
+ALTER TABLE t1 CHECKSUM = 1;
+CHECKSUM TABLE t1 EXTENDED;
+Table Checksum
+test.t1 2653438147
+# Before patch this returned 0!
+CHECKSUM TABLE t1;
+Table Checksum
+test.t1 2653438147
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (TO_DAYS(a))
-PARTITIONS 3 */
-CREATE TABLE t2 LIKE t1;
-SHOW CREATE TABLE t2;
-Table Create Table
-t2 CREATE TABLE `t2` (
- `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (TO_DAYS(a))
-PARTITIONS 3 */
-Warnings:
-Warning 1486 Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed
-DROP TABLE t2;
-CREATE TABLE t2 SELECT * FROM t1;
-DROP TABLE t2;
-ALTER TABLE t1 PARTITION BY HASH (UNIX_TIMESTAMP(a));
+ `i` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 CHECKSUM=1
+/*!50100 PARTITION BY RANGE (i)
+(PARTITION p3 VALUES LESS THAN (3) ENGINE = MyISAM,
+ PARTITION p5 VALUES LESS THAN (5) ENGINE = MyISAM,
+ PARTITION pMax VALUES LESS THAN MAXVALUE ENGINE = MyISAM) */
+DROP TABLE t1;
+# Same test without partitioning
+CREATE TABLE t1 (
+i INT
+) ENGINE=MyISAM;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
+ `i` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (UNIX_TIMESTAMP(a)) */
-ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6);
+CHECKSUM TABLE t1;
+Table Checksum
+test.t1 2653438147
+ALTER TABLE t1 CHECKSUM = 1;
+CHECKSUM TABLE t1 EXTENDED;
+Table Checksum
+test.t1 2653438147
+CHECKSUM TABLE t1;
+Table Checksum
+test.t1 2653438147
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
- `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (UNIX_TIMESTAMP(a))
-PARTITIONS 3 */
-SELECT * FROM t1;
-a
-2000-01-02 03:04:05
-2001-02-03 04:05:06
+ `i` int(11) DEFAULT NULL
+) ENGINE=MyISAM DEFAULT CHARSET=latin1 CHECKSUM=1
DROP TABLE t1;
#
# Bug#31931: Mix of handlers error message
@@ -108,7 +90,7 @@ ERROR HY000: Failed to read from the .par file
# Note that it is currently impossible to drop a partitioned table
# without the .par file
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
#
# Bug#50392: insert_id is not reset for partitioned tables
# auto_increment on duplicate entry
@@ -247,3 +229,18 @@ PARTITION p1 VALUES LESS THAN (100) MAX_ROWS=100,
PARTITION pMax VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Partition p1, first row");
DROP TABLE t1;
+#
+# bug#11760213-52599: ALTER TABLE REMOVE PARTITIONING ON NON-PARTITIONED
+# TABLE CORRUPTS MYISAM
+DROP TABLE if exists `t1`;
+CREATE TABLE `t1`(`a` INT)ENGINE=myisam;
+ALTER TABLE `t1` ADD COLUMN `b` INT;
+CREATE UNIQUE INDEX `i1` ON `t1`(`b`);
+CREATE UNIQUE INDEX `i2` ON `t1`(`a`);
+ALTER TABLE `t1` ADD PRIMARY KEY (`a`);
+ALTER TABLE `t1` REMOVE PARTITIONING;
+ERROR HY000: Partition management on a not partitioned table is not possible
+CHECK TABLE `t1` EXTENDED;
+Table Op Msg_type Msg_text
+test.t1 check status OK
+DROP TABLE t1;
diff --git a/mysql-test/r/partition_not_blackhole.result b/mysql-test/r/partition_not_blackhole.result
index dc0339f8c48..923d70c0ad6 100644
--- a/mysql-test/r/partition_not_blackhole.result
+++ b/mysql-test/r/partition_not_blackhole.result
@@ -11,6 +11,6 @@ t1
SHOW CREATE TABLE t1;
ERROR HY000: Incorrect information in file: './test/t1.frm'
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
t1.frm
t1.par
diff --git a/mysql-test/r/partition_truncate.result b/mysql-test/r/partition_truncate.result
index 66c0cd3d9da..7a82e47d818 100644
--- a/mysql-test/r/partition_truncate.result
+++ b/mysql-test/r/partition_truncate.result
@@ -5,7 +5,7 @@ partition by list (a)
alter table t1 truncate partition p1,p1;
ERROR HY000: Incorrect partition name
alter table t1 truncate partition p0;
-ERROR HY000: Incorrect partition name
+ERROR HY000: Unknown partition 'p0' in table 't1'
drop table t1;
create table t1 (a int)
partition by list (a)
diff --git a/mysql-test/r/plugin.result b/mysql-test/r/plugin.result
index 241e7a11ecc..ce338938d6f 100644
--- a/mysql-test/r/plugin.result
+++ b/mysql-test/r/plugin.result
@@ -15,7 +15,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE STORAGE ENGINE
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
-PLUGIN_LIBRARY_VERSION 1.5
+PLUGIN_LIBRARY_VERSION 1.7
PLUGIN_AUTHOR Brian Aker, MySQL AB
PLUGIN_DESCRIPTION Example storage engine
PLUGIN_LICENSE GPL
@@ -28,7 +28,7 @@ PLUGIN_STATUS ACTIVE
PLUGIN_TYPE DAEMON
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
-PLUGIN_LIBRARY_VERSION 1.5
+PLUGIN_LIBRARY_VERSION 1.7
PLUGIN_AUTHOR Sergei Golubchik
PLUGIN_DESCRIPTION Unusable Daemon
PLUGIN_LICENSE GPL
@@ -60,7 +60,7 @@ PLUGIN_STATUS DELETED
PLUGIN_TYPE STORAGE ENGINE
PLUGIN_TYPE_VERSION #
PLUGIN_LIBRARY ha_example.so
-PLUGIN_LIBRARY_VERSION 1.5
+PLUGIN_LIBRARY_VERSION 1.7
PLUGIN_AUTHOR Brian Aker, MySQL AB
PLUGIN_DESCRIPTION Example storage engine
PLUGIN_LICENSE GPL
@@ -137,12 +137,16 @@ t1 CREATE TABLE `t1` (
) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ULL`=10000000000000000000 `one_or_two`='ttt' `YESNO`=SSS `VAROPT`='5'
#alter table
alter table t1 ULL=10000000;
+Warnings:
+Note 1105 EXAMPLE DEBUG: ULL 4294967290 -> 10000000
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
`a` int(11) DEFAULT NULL
) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `one_or_two`='ttt' `YESNO`=SSS `VAROPT`='5' `ULL`=10000000
alter table t1 change a a int complex='c,c,c';
+Warnings:
+Note 1105 EXAMPLE DEBUG: Field `a` COMPLEX '(null)' -> 'c,c,c'
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -168,6 +172,8 @@ select create_options from information_schema.tables where table_schema='test' a
create_options
`ULL`=4660 `VAROPT`='5'
ALTER TABLE t1 ULL=DEFAULT;
+Warnings:
+Note 1105 EXAMPLE DEBUG: ULL 4660 -> 4294967295
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
diff --git a/mysql-test/r/profiling.result b/mysql-test/r/profiling.result
index 6292cd085e4..4c531a8a5f7 100644
--- a/mysql-test/r/profiling.result
+++ b/mysql-test/r/profiling.result
@@ -123,8 +123,8 @@ select query_id, count(*), sum(duration) from information_schema.profiling group
select CPU_user, CPU_system, Context_voluntary, Context_involuntary, Block_ops_in, Block_ops_out, Messages_sent, Messages_received, Page_faults_major, Page_faults_minor, Swaps, Source_function, Source_file, Source_line from information_schema.profiling;
drop table if exists t1, t2, t3;
Warnings:
-Note 1051 Unknown table 't2'
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t2'
+Note 1051 Unknown table 'test.t3'
create table t1 (id int );
create table t2 (id int not null);
create table t3 (id int not null primary key);
@@ -309,7 +309,7 @@ select @@profiling;
set session profiling = OFF;
drop table if exists profile_log;
Warnings:
-Note 1051 Unknown table 'profile_log'
+Note 1051 Unknown table 'test.profile_log'
create table profile_log (how_many int);
drop procedure if exists p1;
drop procedure if exists p2;
diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result
index 95217d9716a..f6a2a16f038 100644
--- a/mysql-test/r/ps.result
+++ b/mysql-test/r/ps.result
@@ -51,7 +51,7 @@ execute stmt4;
prepare stmt4 from 'drop table t2';
execute stmt4;
execute stmt4;
-ERROR 42S02: Unknown table 't2'
+ERROR 42S02: Unknown table 'test.t2'
prepare stmt5 from 'select ? + a from t1';
set @a=1;
execute stmt5 using @a;
@@ -526,7 +526,7 @@ FOUND_ROWS()
deallocate prepare stmt;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (c1 int(11) not null, c2 int(11) not null,
primary key (c1,c2), key c2 (c2), key c1 (c1));
insert into t1 values (200887, 860);
@@ -1201,13 +1201,13 @@ SET @aux= "SELECT COUNT(*)
prepare my_stmt from @aux;
execute my_stmt;
COUNT(*)
-42
+43
execute my_stmt;
COUNT(*)
-42
+43
execute my_stmt;
COUNT(*)
-42
+43
deallocate prepare my_stmt;
drop procedure if exists p1|
drop table if exists t1|
@@ -2799,48 +2799,48 @@ drop table if exists t2;
create procedure proc_1() show warnings;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
call proc_1();
Level Code Message
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
call proc_1();
Level Code Message
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
drop table if exists t1, t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
call proc_1();
Level Code Message
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
drop procedure proc_1;
create function func_1() returns int begin show warnings; return 1; end|
ERROR 0A000: Not allowed to return a result set from a function
prepare abc from "show warnings";
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
execute abc;
Level Code Message
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
execute abc;
Level Code Message
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
drop table if exists t1, t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
execute abc;
Level Code Message
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
deallocate prepare abc;
set @my_password="password";
set @my_data="clear text to encode";
@@ -2926,7 +2926,7 @@ i j
DROP TABLE t1, t2;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
prepare stmt
from "create table t1 (c char(100) character set utf8, key (c(10)))";
execute stmt;
diff --git a/mysql-test/r/ps_1general.result b/mysql-test/r/ps_1general.result
index 0a8aea94e8a..9981156bc5f 100644
--- a/mysql-test/r/ps_1general.result
+++ b/mysql-test/r/ps_1general.result
@@ -325,10 +325,10 @@ drop table if exists t5;
prepare stmt1 from ' drop table if exists t5 ' ;
execute stmt1 ;
Warnings:
-Note 1051 Unknown table 't5'
+Note 1051 Unknown table 'test.t5'
prepare stmt1 from ' drop table t5 ' ;
execute stmt1 ;
-ERROR 42S02: Unknown table 't5'
+ERROR 42S02: Unknown table 'test.t5'
prepare stmt1 from ' SELECT @@version ' ;
execute stmt1 ;
@@version
diff --git a/mysql-test/r/ps_ddl1.result b/mysql-test/r/ps_ddl1.result
index 87abcd90590..667cbed8a7a 100644
--- a/mysql-test/r/ps_ddl1.result
+++ b/mysql-test/r/ps_ddl1.result
@@ -420,7 +420,7 @@ call p_verify_reprepare_count(0);
SUCCESS
execute stmt;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
call p_verify_reprepare_count(0);
SUCCESS
diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result
index 62f8a9728c3..18f9db1743d 100644
--- a/mysql-test/r/query_cache.result
+++ b/mysql-test/r/query_cache.result
@@ -1944,6 +1944,41 @@ COUNT(*)
DROP TABLE t1;
SET GLOBAL query_cache_size= @qc;
#
+End of 5.5 tests
+#
+# MDEV-617 LP:671189 - Query cache is not used for tables or
+# databases with dots in their names
+#
+CREATE DATABASE `foo.bar`;
+use `foo.bar`;
+flush status;
+CREATE TABLE moocow (a int);
+INSERT INTO moocow VALUES (1), (2), (3);
+SHOW STATUS LIKE 'Qcache_inserts';
+Variable_name Value
+Qcache_inserts 0
+SELECT * FROM moocow;
+a
+1
+2
+3
+SHOW STATUS LIKE 'Qcache_inserts';
+Variable_name Value
+Qcache_inserts 1
+SHOW STATUS LIKE 'Qcache_hits';
+Variable_name Value
+Qcache_hits 0
+SELECT * FROM moocow;
+a
+1
+2
+3
+SHOW STATUS LIKE 'Qcache_hits';
+Variable_name Value
+Qcache_hits 1
+use test;
+drop database `foo.bar`;
+End of 10.0 tests
restore defaults
SET GLOBAL query_cache_type= default;
SET GLOBAL query_cache_size= default;
diff --git a/mysql-test/r/read_only.result b/mysql-test/r/read_only.result
index 3811c6c5487..c9c569137b2 100644
--- a/mysql-test/r/read_only.result
+++ b/mysql-test/r/read_only.result
@@ -117,10 +117,10 @@ select @@global.read_only;
1
unlock tables;
drop temporary table ttt;
-ERROR 42S02: Unknown table 'ttt'
+ERROR 42S02: Unknown table 'test.ttt'
drop temporary table if exists ttt;
Warnings:
-Note 1051 Unknown table 'ttt'
+Note 1051 Unknown table 'test.ttt'
connection default;
set global read_only=0;
drop table t1,t2;
diff --git a/mysql-test/r/rename.result b/mysql-test/r/rename.result
index 7433ab8a0c9..74370ba74dd 100644
--- a/mysql-test/r/rename.result
+++ b/mysql-test/r/rename.result
@@ -39,7 +39,7 @@ select * from t3;
3 table 3
drop table if exists t1,t2,t3,t4;
Warnings:
-Note 1051 Unknown table 't4'
+Note 1051 Unknown table 'test.t4'
CREATE TABLE t1 (a int);
CREATE TABLE t3 (a int);
FLUSH TABLES WITH READ LOCK;
diff --git a/mysql-test/r/row-checksum-old.result b/mysql-test/r/row-checksum-old.result
index 87f0bb8af2d..ef523463860 100644
--- a/mysql-test/r/row-checksum-old.result
+++ b/mysql-test/r/row-checksum-old.result
@@ -73,7 +73,7 @@ test.t1 4108368782
drop table if exists t1;
create table t1 (a int null, v varchar(100)) engine=innodb checksum=0 row_format=fixed;
Warnings:
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
insert into t1 values(null, null), (1, "hello");
checksum table t1;
Table Checksum
diff --git a/mysql-test/r/row-checksum.result b/mysql-test/r/row-checksum.result
index 9e58d6fa96e..fb8a1260a1d 100644
--- a/mysql-test/r/row-checksum.result
+++ b/mysql-test/r/row-checksum.result
@@ -73,7 +73,7 @@ test.t1 3885665021
drop table if exists t1;
create table t1 (a int null, v varchar(100)) engine=innodb checksum=0 row_format=fixed;
Warnings:
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
insert into t1 values(null, null), (1, "hello");
checksum table t1;
Table Checksum
diff --git a/mysql-test/r/select.result b/mysql-test/r/select.result
index c086a62275a..d639580acf9 100644
--- a/mysql-test/r/select.result
+++ b/mysql-test/r/select.result
@@ -4175,18 +4175,24 @@ str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01 00:00:00'
set SQL_MODE=TRADITIONAL;
select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34'
-1
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00 12:34' for function str_to_date
select str_to_date('2007-10-01 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34';
str_to_date('2007-10-01 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34'
0
select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-01 12:34';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-01 12:34'
-0
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00 12:34' for function str_to_date
select str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01'
and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01'
and '2007/10/20'
-1
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00' for function str_to_date
set SQL_MODE=DEFAULT;
select str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20'
diff --git a/mysql-test/r/select_jcl6.result b/mysql-test/r/select_jcl6.result
index 00b356fc1c0..18b050b53e7 100644
--- a/mysql-test/r/select_jcl6.result
+++ b/mysql-test/r/select_jcl6.result
@@ -4186,18 +4186,24 @@ str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01 00:00:00'
set SQL_MODE=TRADITIONAL;
select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34'
-1
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00 12:34' for function str_to_date
select str_to_date('2007-10-01 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34';
str_to_date('2007-10-01 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34'
0
select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-01 12:34';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-01 12:34'
-0
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00 12:34' for function str_to_date
select str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01'
and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01'
and '2007/10/20'
-1
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00' for function str_to_date
set SQL_MODE=DEFAULT;
select str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20'
diff --git a/mysql-test/r/select_pkeycache.result b/mysql-test/r/select_pkeycache.result
index c086a62275a..d639580acf9 100644
--- a/mysql-test/r/select_pkeycache.result
+++ b/mysql-test/r/select_pkeycache.result
@@ -4175,18 +4175,24 @@ str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01 00:00:00'
set SQL_MODE=TRADITIONAL;
select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34'
-1
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00 12:34' for function str_to_date
select str_to_date('2007-10-01 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34';
str_to_date('2007-10-01 12:34','%Y-%m-%d %H:%i') = '2007-10-00 12:34'
0
select str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-01 12:34';
str_to_date('2007-10-00 12:34','%Y-%m-%d %H:%i') = '2007-10-01 12:34'
-0
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00 12:34' for function str_to_date
select str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01'
and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '2007/09/01'
and '2007/10/20'
-1
+NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2007-10-00' for function str_to_date
set SQL_MODE=DEFAULT;
select str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20';
str_to_date('2007-10-00','%Y-%m-%d') between '' and '2007/10/20'
diff --git a/mysql-test/r/signal.result b/mysql-test/r/signal.result
index 062b866475d..a5eb24442b4 100644
--- a/mysql-test/r/signal.result
+++ b/mysql-test/r/signal.result
@@ -1191,8 +1191,6 @@ end $$
call test_signal() $$
Caught by SQLSTATE
Caught by SQLSTATE
-Warnings:
-Warning 1012 Raising a warning
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1208,8 +1206,6 @@ end $$
call test_signal() $$
Caught by number
Caught by number
-Warnings:
-Warning 1012 Raising a warning
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1225,8 +1221,6 @@ end $$
call test_signal() $$
Caught by SQLWARNING
Caught by SQLWARNING
-Warnings:
-Warning 1012 Raising a warning
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1242,8 +1236,6 @@ end $$
call test_signal() $$
Caught by SQLSTATE
Caught by SQLSTATE
-Warnings:
-Error 1012 Raising a not found
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1259,8 +1251,6 @@ end $$
call test_signal() $$
Caught by number
Caught by number
-Warnings:
-Error 1012 Raising a not found
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1276,8 +1266,6 @@ end $$
call test_signal() $$
Caught by NOT FOUND
Caught by NOT FOUND
-Warnings:
-Error 1012 Raising a not found
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1293,8 +1281,6 @@ end $$
call test_signal() $$
Caught by SQLSTATE
Caught by SQLSTATE
-Warnings:
-Error 1012 Raising an error
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1310,8 +1296,6 @@ end $$
call test_signal() $$
Caught by number
Caught by number
-Warnings:
-Error 1012 Raising an error
drop procedure test_signal $$
create procedure test_signal()
begin
@@ -1327,25 +1311,29 @@ end $$
call test_signal() $$
Caught by SQLEXCEPTION
Caught by SQLEXCEPTION
-Warnings:
-Error 1012 Raising an error
drop procedure test_signal $$
#
# Test where SIGNAL can be used
#
+
+# RETURN statement clears Diagnostics Area, thus
+# the warnings raised in a stored function are not
+# visible outsidef the stored function. So, we're using
+# @@warning_count variable to check that SIGNAL succeeded.
+
create function test_signal_func() returns integer
begin
+DECLARE v INT;
DECLARE warn CONDITION FOR SQLSTATE "01XXX";
SIGNAL warn SET
MESSAGE_TEXT = "This function SIGNAL a warning",
MYSQL_ERRNO = 1012;
-return 5;
+SELECT @@warning_count INTO v;
+return v;
end $$
select test_signal_func() $$
test_signal_func()
-5
-Warnings:
-Warning 1012 This function SIGNAL a warning
+1
drop function test_signal_func $$
create function test_signal_func() returns integer
begin
@@ -1468,7 +1456,6 @@ after RESIGNAL
after RESIGNAL
Warnings:
Warning 1012 Raising a warning
-Warning 1012 Raising a warning
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1523,7 +1510,6 @@ after RESIGNAL
after RESIGNAL
Warnings:
Warning 1264 Out of range value for column 'a' at row 1
-Warning 1264 Out of range value for column 'a' at row 1
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1557,7 +1543,7 @@ end $$
call test_resignal() $$
before RESIGNAL
before RESIGNAL
-ERROR 42S02: Unknown table 'no_such_table'
+ERROR 42S02: Unknown table 'test.no_such_table'
drop procedure test_resignal $$
create procedure test_resignal()
begin
@@ -1580,7 +1566,6 @@ before RESIGNAL
after RESIGNAL
after RESIGNAL
Warnings:
-Warning 1012 Raising a warning
Warning 5555 RESIGNAL of a warning
drop procedure test_resignal $$
create procedure test_resignal()
@@ -1641,7 +1626,6 @@ before RESIGNAL
after RESIGNAL
after RESIGNAL
Warnings:
-Warning 1264 Out of range value for column 'a' at row 1
Warning 5555 RESIGNAL of a warning
drop procedure test_resignal $$
create procedure test_resignal()
@@ -2054,7 +2038,7 @@ before RESIGNAL
after RESIGNAL
after RESIGNAL
Warnings:
-Error 1051 Unknown table 'no_such_table'
+Error 1051 Unknown table 'test.no_such_table'
Warning 5555 RESIGNAL to a warning
drop procedure test_resignal $$
create procedure test_resignal()
@@ -2075,7 +2059,7 @@ before RESIGNAL
ERROR 02444: RESIGNAL to a not found
show warnings $$
Level Code Message
-Error 1051 Unknown table 'no_such_table'
+Error 1051 Unknown table 'test.no_such_table'
Error 5555 RESIGNAL to a not found
drop procedure test_resignal $$
create procedure test_resignal()
@@ -2096,7 +2080,7 @@ before RESIGNAL
ERROR 44444: RESIGNAL to an error
show warnings $$
Level Code Message
-Error 1051 Unknown table 'no_such_table'
+Error 1051 Unknown table 'test.no_such_table'
Error 5555 RESIGNAL to an error
drop procedure test_resignal $$
#
@@ -2143,9 +2127,6 @@ CALL peter_p2() $$
ERROR 42000: Hi, I am a useless error message
show warnings $$
Level Code Message
-Error 1231 Variable 'sql_mode' can't be set to the value of 'NULL'
-Error 1231 Variable 'sql_mode' can't be set to the value of 'NULL'
-Error 9999 Variable 'sql_mode' can't be set to the value of 'NULL'
Error 9999 Hi, I am a useless error message
drop procedure peter_p1 $$
drop procedure peter_p2 $$
diff --git a/mysql-test/r/signal_demo2.result b/mysql-test/r/signal_demo2.result
index 223030b0624..5c8ac328a4a 100644
--- a/mysql-test/r/signal_demo2.result
+++ b/mysql-test/r/signal_demo2.result
@@ -74,7 +74,7 @@ In proc_bottom()
In proc_bottom()
Doing something that fail (simulate an error) ...
Doing something that fail (simulate an error) ...
-ERROR 42S02: Unknown table 'no_such_table'
+ERROR 42S02: Unknown table 'demo.no_such_table'
call proc_top_a(3);
Starting ...
Starting ...
@@ -167,7 +167,7 @@ Doing something that fail (simulate an error) ...
Doing something that fail (simulate an error) ...
Doing cleanup !
Doing cleanup !
-ERROR 42S02: Unknown table 'no_such_table'
+ERROR 42S02: Unknown table 'demo.no_such_table'
call proc_top_b(3);
Starting ...
Starting ...
diff --git a/mysql-test/r/signal_demo3.result b/mysql-test/r/signal_demo3.result
index a89ce703d20..cc7042269bb 100644
--- a/mysql-test/r/signal_demo3.result
+++ b/mysql-test/r/signal_demo3.result
@@ -77,7 +77,7 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
-Error 1051 Unknown table 'oops_it_is_not_here'
+Error 1051 Unknown table 'demo.oops_it_is_not_here'
Error 1644 Oops in proc_9
Error 1644 Oops in proc_8
Error 1644 Oops in proc_7
@@ -95,11 +95,11 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
-Error 1051 Unknown table 'oops_it_is_not_here'
-Error 1644 Oops in proc_9
-Error 1644 Oops in proc_8
-Error 1644 Oops in proc_7
-Error 1644 Oops in proc_6
+Error 1644 Oops in proc_5
+Error 1644 Oops in proc_4
+Error 1644 Oops in proc_3
+Error 1644 Oops in proc_2
+Error 1644 Oops in proc_1
SET @@session.max_error_count = 7;
SELECT @@session.max_error_count;
@@session.max_error_count
@@ -108,13 +108,13 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
-Error 1051 Unknown table 'oops_it_is_not_here'
-Error 1644 Oops in proc_9
-Error 1644 Oops in proc_8
Error 1644 Oops in proc_7
Error 1644 Oops in proc_6
Error 1644 Oops in proc_5
Error 1644 Oops in proc_4
+Error 1644 Oops in proc_3
+Error 1644 Oops in proc_2
+Error 1644 Oops in proc_1
SET @@session.max_error_count = 9;
SELECT @@session.max_error_count;
@@session.max_error_count
@@ -123,7 +123,6 @@ call proc_1();
ERROR 45000: Oops in proc_1
show warnings;
Level Code Message
-Error 1051 Unknown table 'oops_it_is_not_here'
Error 1644 Oops in proc_9
Error 1644 Oops in proc_8
Error 1644 Oops in proc_7
@@ -132,6 +131,7 @@ Error 1644 Oops in proc_5
Error 1644 Oops in proc_4
Error 1644 Oops in proc_3
Error 1644 Oops in proc_2
+Error 1644 Oops in proc_1
drop database demo;
SET @@global.max_error_count = @start_global_value;
SELECT @@global.max_error_count;
diff --git a/mysql-test/r/sp-big.result b/mysql-test/r/sp-big.result
index d28b7004330..9765508859c 100644
--- a/mysql-test/r/sp-big.result
+++ b/mysql-test/r/sp-big.result
@@ -46,8 +46,6 @@ end while;
close cur1;
end|
call p1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select count(*) from t1;
count(*)
256
diff --git a/mysql-test/r/sp-bugs.result b/mysql-test/r/sp-bugs.result
index 9d9deaebcc3..e34f8f9e63a 100644
--- a/mysql-test/r/sp-bugs.result
+++ b/mysql-test/r/sp-bugs.result
@@ -43,8 +43,6 @@ END|
SELECT f2 ();
f2 ()
NULL
-Warnings:
-Error 1305 FUNCTION testdb.f_not_exists does not exist
DROP SCHEMA testdb;
USE test;
#
@@ -134,6 +132,15 @@ DROP DATABASE testdb;
USE test;
End of 5.1 tests
#
+# BUG#13489996 valgrind:conditional jump or move depends on
+# uninitialised values-field_blob
+#
+CREATE FUNCTION sf() RETURNS BLOB RETURN "";
+SELECT sf();
+sf()
+
+DROP FUNCTION sf;
+#
# Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
#
SET @@SQL_MODE = '';
diff --git a/mysql-test/r/sp-code.result b/mysql-test/r/sp-code.result
index 3bead4fc826..c9d2f7b023a 100644
--- a/mysql-test/r/sp-code.result
+++ b/mysql-test/r/sp-code.result
@@ -711,8 +711,6 @@ looping i
looping 1
looping i
looping 0
-Warnings:
-Error 1062 Duplicate entry '1' for key 'a'
call proc_26977_works(2);
do something
do something
@@ -732,8 +730,6 @@ looping i
looping 0
optimizer: keep hreturn
optimizer: keep hreturn
-Warnings:
-Error 1062 Duplicate entry '2' for key 'a'
drop table t1;
drop procedure proc_26977_broken;
drop procedure proc_26977_works;
diff --git a/mysql-test/r/sp-destruct.result b/mysql-test/r/sp-destruct.result
index 6d85c3ce496..172e40cb40c 100644
--- a/mysql-test/r/sp-destruct.result
+++ b/mysql-test/r/sp-destruct.result
@@ -149,7 +149,7 @@ alter table mysql.proc drop column type;
# The below statement should not cause assertion failure.
drop database mysqltest;
Warnings:
-Error 1728 Column count of mysql.proc is wrong. Expected 20, found 19. The table is probably corrupted
+Error 1805 Column count of mysql.proc is wrong. Expected 20, found 19. The table is probably corrupted
# Restore mysql.proc.
drop table mysql.proc;
#
@@ -166,7 +166,7 @@ CREATE PROCEDURE db1.p1() SET @foo = 10;
ALTER TABLE mysql.proc MODIFY comment CHAR (32);
DROP DATABASE db1;
Warnings:
-Error 1729 Cannot load from mysql.proc. The table is probably corrupted
+Error 1728 Cannot load from mysql.proc. The table is probably corrupted
# Restore mysql.proc
DROP TABLE mysql.proc;
RENAME TABLE proc_backup TO mysql.proc;
diff --git a/mysql-test/r/sp-dynamic.result b/mysql-test/r/sp-dynamic.result
index cdfeb8ab020..7309ba4c765 100644
--- a/mysql-test/r/sp-dynamic.result
+++ b/mysql-test/r/sp-dynamic.result
@@ -249,7 +249,7 @@ drop procedure p1|
drop table if exists t1|
drop table if exists t2|
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
create table t1 (id integer primary key auto_increment,
stmt_text char(35), status varchar(20))|
insert into t1 (stmt_text) values
diff --git a/mysql-test/r/sp-error.result b/mysql-test/r/sp-error.result
index 24cbb945fd2..71fab8c9654 100644
--- a/mysql-test/r/sp-error.result
+++ b/mysql-test/r/sp-error.result
@@ -466,7 +466,7 @@ drop table t5;
end|
create table t5 (x int)|
call bug3294()|
-ERROR 42S02: Unknown table 't5'
+ERROR 42S02: Unknown table 'test.t5'
drop procedure bug3294|
drop procedure if exists bug8776_1|
drop procedure if exists bug8776_2|
@@ -1344,8 +1344,6 @@ set @in_func := 0;
select func_20713_a();
func_20713_a()
NULL
-Warnings:
-Error 1146 Table 'test.bogus_table_20713' doesn't exist
select @in_func;
@in_func
2
@@ -1353,8 +1351,6 @@ set @in_func := 0;
select func_20713_b();
func_20713_b()
NULL
-Warnings:
-Error 1146 Table 'test.bogus_table_20713' doesn't exist
select @in_func;
@in_func
2
@@ -1567,7 +1563,7 @@ f2()
1
drop function f2;
drop table t2;
-ERROR 42S02: Unknown table 't2'
+ERROR 42S02: Unknown table 'test.t2'
End of 5.1 tests
drop procedure if exists proc_33983_a;
drop procedure if exists proc_33983_b;
@@ -1821,11 +1817,8 @@ CAST('10 ' as unsigned integer)
c
3
@@warning_count
-1
+0
Level Code Message
-Warning 1292 Truncated incorrect INTEGER value: '10 '
-Warnings:
-Warning 1292 Truncated incorrect INTEGER value: '10 '
CALL p6();
CAST('10 ' as unsigned integer)
10
@@ -1833,8 +1826,6 @@ Level Code Message
Warning 1292 Truncated incorrect INTEGER value: '10 '
c
1
-Warnings:
-Warning 1292 Truncated incorrect INTEGER value: '10 '
DROP PROCEDURE p1;
DROP PROCEDURE p2;
DROP PROCEDURE p3;
@@ -1885,9 +1876,6 @@ END|
CALL p1();
exception
exception
-Warnings:
-Warning 1292 Truncated incorrect INTEGER value: '10 '
-Error 1048 Column 'b' cannot be null
DROP TABLE t1;
DROP PROCEDURE p1;
#
@@ -1931,11 +1919,8 @@ CALL p1();
NULL
warning caught (expected)
warning caught (expected)
-Warnings:
-Warning 1365 Division by 0
SHOW WARNINGS;
Level Code Message
-Warning 1365 Division by 0
CALL p2();
5 / 0
NULL
@@ -2008,3 +1993,878 @@ Error 1048 Column 'c' cannot be null
DROP TABLE t1;
DROP TABLE t2;
DROP PROCEDURE p1;
+
+###################################################################
+# Tests for the following bugs:
+# - Bug#11763171: 55852 - Possibly inappropriate handler activation.
+# - Bug#11749343: 38806 - Wrong scope for SQL HANDLERS in SP.
+###################################################################
+
+
+# -- Check that SQL-conditions thrown by Statement-blocks are
+# -- handled by Handler-decl blocks properly.
+
+CREATE PROCEDURE p1()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H2' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should be handled by H2.
+END|
+
+CALL p1()|
+HandlerId
+H2
+
+# -- Check that SQL-conditions thrown by Statement-blocks are
+# -- handled by Handler-decl blocks properly in case of nested
+# -- SQL-blocks.
+
+CREATE PROCEDURE p2()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H2' AS HandlerId;
+BEGIN
+SELECT 'B1' AS BlockId;
+BEGIN
+SELECT 'B2' AS BlockId;
+BEGIN
+SELECT 'B3' AS BlockId;
+SIGNAL SQLSTATE '01000'; # Should be handled by H2.
+END;
+END;
+END;
+END|
+
+CALL p2()|
+BlockId
+B1
+BlockId
+B2
+BlockId
+B3
+HandlerId
+H2
+
+# -- Check SQL-handler resolution rules.
+
+CREATE PROCEDURE p3()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H3' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should be handled by H3.
+END|
+
+CALL p3()|
+HandlerId
+H3
+
+CREATE PROCEDURE p4()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H3' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should be handled by H2.
+END|
+
+CALL p4()|
+HandlerId
+H2
+
+CREATE PROCEDURE p5()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H2' AS HandlerId;
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H3' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should be handled by H3.
+END;
+END|
+
+CALL p5()|
+HandlerId
+H3
+
+# -- Check that handlers don't handle its own exceptions.
+
+CREATE PROCEDURE p6()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+SELECT 'H1' AS HandlerId;
+SIGNAL SQLSTATE 'HY000'; # Should *not* be handled by H1.
+END;
+SELECT 'S1' AS SignalId;
+SIGNAL SQLSTATE 'HY000'; # Should be handled by H1.
+END|
+
+CALL p6()|
+SignalId
+S1
+HandlerId
+H1
+ERROR HY000: Unhandled user-defined exception condition
+
+# -- Check that handlers don't handle its own warnings.
+
+CREATE PROCEDURE p7()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+SELECT 'H1' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should *not* be handled by H1.
+END;
+SELECT 'S1' AS SignalId;
+SIGNAL SQLSTATE '01000'; # Should be handled by H1.
+END|
+
+CALL p7()|
+SignalId
+S1
+HandlerId
+H1
+Warnings:
+Warning 1642 Unhandled user-defined warning condition
+
+# -- Check that conditions for handlers are not handled by the handlers
+# -- from the same block.
+
+CREATE PROCEDURE p8()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+SELECT 'H2' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should *not* be handled by H1.
+END;
+SELECT 'S1' AS SignalId;
+SIGNAL SQLSTATE 'HY000'; # Should be handled by H2.
+END|
+
+CALL p8()|
+SignalId
+S1
+HandlerId
+H2
+Warnings:
+Warning 1642 Unhandled user-defined warning condition
+
+# -- Check that conditions for handlers are not handled by the handlers
+# -- from the same block even if they are thrown deep down the stack.
+
+CREATE PROCEDURE p9()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H1:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H1:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H2:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H2:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H3:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H3:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H4:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H4:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H5:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H5:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H6:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H6:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+SELECT 'H2' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should *not* be handled by H1.
+END;
+SELECT 'S6' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S5' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S4' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S3' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S2' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S1' AS SignalId;
+SIGNAL SQLSTATE 'HY000'; # Should be handled by H2.
+END|
+
+CALL p9()|
+SignalId
+S1
+SignalId
+S2
+SignalId
+S3
+SignalId
+S4
+SignalId
+S5
+SignalId
+S6
+HandlerId
+H2
+Warnings:
+Warning 1642 Unhandled user-defined warning condition
+
+# -- Check that handlers are choosen properly in case of deep stack and
+# -- nested SQL-blocks.
+
+CREATE PROCEDURE p10()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H2' AS HandlerId;
+BEGIN
+BEGIN
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H1:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H1:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H2:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H2:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H3:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H3:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H4:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H4:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H5:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H5:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H6:1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'Wrong:H6:2' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+BEGIN
+SELECT 'H2' AS HandlerId;
+SIGNAL SQLSTATE '01000'; # Should be handled by H1.
+END;
+SELECT 'S6' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S5' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S4' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S3' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S2' AS SignalId;
+SIGNAL SQLSTATE 'HY000';
+END;
+SELECT 'S1' AS SignalId;
+SIGNAL SQLSTATE 'HY000'; # Should be handled by H2.
+END;
+END;
+END;
+END|
+
+CALL p10()|
+SignalId
+S1
+SignalId
+S2
+SignalId
+S3
+SignalId
+S4
+SignalId
+S5
+SignalId
+S6
+HandlerId
+H2
+HandlerId
+H1
+
+# -- Test stored procedure from Peter's mail.
+
+CREATE PROCEDURE p11()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+SELECT 'H1' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H2' AS HandlerId;
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01000', 1249
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+SELECT 'H3' AS HandlerId;
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+SELECT 'H4' AS HandlerId;
+BEGIN
+SELECT 'H5' AS HandlerId;
+SELECT 'S3' AS SignalId;
+SIGNAL SQLSTATE 'HY000'; # H3
+SELECT 'S4' AS SignalId;
+SIGNAL SQLSTATE '22003'; # H3
+SELECT 'S5' AS SignalId;
+SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1249; # H4
+END;
+END;
+SELECT 'S6' AS SignalId;
+SIGNAL SQLSTATE 'HY000'; # H1
+SELECT 'S7' AS SignalId;
+SIGNAL SQLSTATE '22003'; # H1
+SELECT 'S8' AS SignalId;
+SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1249; # H5
+END;
+SELECT 'S1' AS SignalId;
+SIGNAL SQLSTATE 'HY000'; # H1
+SELECT 'S2' AS SignalId;
+SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1249; # H2
+END|
+
+CALL p11()|
+SignalId
+S6
+HandlerId
+H1
+SignalId
+S7
+HandlerId
+H1
+SignalId
+S8
+HandlerId
+H5
+SignalId
+S3
+HandlerId
+H3
+SignalId
+S4
+HandlerId
+H3
+SignalId
+S5
+HandlerId
+H4
+SignalId
+S1
+HandlerId
+H1
+SignalId
+S2
+HandlerId
+H2
+
+# -- Check that runtime stack-trace can be deeper than parsing-time one.
+
+CREATE PROCEDURE p12()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+SELECT 'H1:5' AS HandlerId;
+SIGNAL SQLSTATE '01002';
+END;
+SELECT 'H1:4' AS HandlerId;
+SIGNAL SQLSTATE '01001';
+END;
+SELECT 'H1:3' AS HandlerId;
+SIGNAL SQLSTATE '01001';
+END;
+SELECT 'H1:2' AS HandlerId;
+SIGNAL SQLSTATE '01001';
+END;
+SELECT 'H1:1' AS HandlerId;
+SIGNAL SQLSTATE '01001';
+END;
+#########################################################
+DECLARE CONTINUE HANDLER FOR SQLSTATE '01002'
+ SELECT 'OK' AS Msg;
+#########################################################
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+SELECT 'H2:5' AS HandlerId;
+SIGNAL SQLSTATE '01001';
+END;
+SELECT 'H2:4' AS HandlerId;
+SIGNAL SQLSTATE '01000';
+END;
+SELECT 'H2:3' AS HandlerId;
+SIGNAL SQLSTATE '01000';
+END;
+SELECT 'H2:2' AS HandlerId;
+SIGNAL SQLSTATE '01000';
+END;
+SELECT 'H2:1' AS HandlerId;
+SIGNAL SQLSTATE '01000';
+END;
+#######################################################
+SELECT 'Throw 01000' AS Msg;
+SIGNAL SQLSTATE '01000';
+END;
+END|
+
+CALL p12()|
+Msg
+Throw 01000
+HandlerId
+H2:1
+HandlerId
+H2:2
+HandlerId
+H2:3
+HandlerId
+H2:4
+HandlerId
+H2:5
+HandlerId
+H1:1
+HandlerId
+H1:2
+HandlerId
+H1:3
+HandlerId
+H1:4
+HandlerId
+H1:5
+Warnings:
+Warning 1642 Unhandled user-defined warning condition
+
+# -- Check that handler-call-frames are removed properly for EXIT
+# -- handlers.
+
+CREATE PROCEDURE p13()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING
+BEGIN
+DECLARE EXIT HANDLER FOR SQLWARNING
+BEGIN
+SELECT 'EXIT handler 3' AS Msg;
+END;
+SELECT 'CONTINUE handler 2: 1' AS Msg;
+SIGNAL SQLSTATE '01000';
+SELECT 'CONTINUE handler 2: 2' AS Msg;
+END;
+SELECT 'CONTINUE handler 1: 1' AS Msg;
+SIGNAL SQLSTATE '01000';
+SELECT 'CONTINUE handler 1: 2' AS Msg;
+END;
+SELECT 'Throw 01000' AS Msg;
+SIGNAL SQLSTATE '01000';
+END|
+
+CALL p13()|
+Msg
+Throw 01000
+Msg
+CONTINUE handler 1: 1
+Msg
+CONTINUE handler 2: 1
+Msg
+EXIT handler 3
+Msg
+CONTINUE handler 1: 2
+
+# That's it. Cleanup.
+
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+DROP PROCEDURE p3;
+DROP PROCEDURE p4;
+DROP PROCEDURE p5;
+DROP PROCEDURE p6;
+DROP PROCEDURE p7;
+DROP PROCEDURE p8;
+DROP PROCEDURE p9;
+DROP PROCEDURE p10;
+DROP PROCEDURE p11;
+DROP PROCEDURE p12;
+DROP PROCEDURE p13;
+
+# Bug#12731619: NESTED SP HANDLERS CAN TRIGGER ASSERTION
+
+DROP FUNCTION IF EXISTS f1;
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1(msg VARCHAR(255));
+CREATE FUNCTION f1() RETURNS INT
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION # handler 1
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION # handler 2
+BEGIN
+INSERT INTO t1 VALUE('WRONG: Inside H2');
+RETURN 2;
+END;
+INSERT INTO t1 VALUE('CORRECT: Inside H1');
+RETURN 1;
+END;
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING # handler 3
+BEGIN
+INSERT INTO t1 VALUE('WRONG: Inside H3');
+RETURN 3;
+END;
+INSERT INTO t1 VALUE('CORRECT: Calling f1()');
+RETURN f1(); # -- exception here
+END;
+INSERT INTO t1 VALUE('WRONG: Returning 10');
+RETURN 10;
+END|
+
+SELECT f1();
+f1()
+1
+
+SELECT * FROM t1;
+msg
+CORRECT: Calling f1()
+CORRECT: Inside H1
+
+DROP FUNCTION f1;
+DROP TABLE t1;
+
+# Check that handled SQL-conditions are properly cleared from DA.
+
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP PROCEDURE IF EXISTS p1;
+DROP PROCEDURE IF EXISTS p2;
+DROP PROCEDURE IF EXISTS p3;
+DROP PROCEDURE IF EXISTS p4;
+DROP PROCEDURE IF EXISTS p5;
+CREATE TABLE t1(a CHAR, b CHAR, c CHAR);
+CREATE TABLE t2(a SMALLINT, b SMALLINT, c SMALLINT);
+
+# Check that SQL-conditions for which SQL-handler has been invoked,
+# are cleared from the Diagnostics Area. Note, there might be several
+# SQL-conditions, but SQL-handler must be invoked only once.
+
+CREATE PROCEDURE p1()
+BEGIN
+DECLARE EXIT HANDLER FOR SQLWARNING
+SELECT 'Warning caught' AS msg;
+# The INSERT below raises 3 SQL-conditions (warnings). The EXIT HANDLER
+# above must be invoked once (for one condition), but all three conditions
+# must be cleared from the Diagnostics Area.
+INSERT INTO t1 VALUES('qqqq', 'ww', 'eee');
+# The following INSERT will not be executed, because of the EXIT HANDLER.
+INSERT INTO t1 VALUES('zzz', 'xx', 'yyyy');
+END|
+
+CALL p1()|
+msg
+Warning caught
+
+SELECT * FROM t1|
+a b c
+q w e
+
+# Check that SQL-conditions for which SQL-handler has *not* been
+# invoked, are *still* cleared from the Diagnostics Area.
+
+CREATE PROCEDURE p2()
+BEGIN
+DECLARE CONTINUE HANDLER FOR 1292
+SELECT 'Warning 1292 caught' AS msg;
+# The following INSERT raises 6 SQL-warnings with code 1292,
+# and 3 SQL-warnings with code 1264. The CONTINUE HANDLER above must be
+# invoked once, and all nine SQL-warnings must be cleared from
+# the Diagnostics Area.
+INSERT INTO t2
+SELECT
+CAST(CONCAT(CAST('1 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+CALL p2()|
+msg
+Warning 1292 caught
+
+# Check that if there are two equally ranked SQL-handlers to handle
+# SQL-conditions from SQL-statement, only one of them will be invoked.
+
+CREATE PROCEDURE p3()
+BEGIN
+DECLARE CONTINUE HANDLER FOR 1292
+SELECT 'Warning 1292 caught' AS msg;
+DECLARE CONTINUE HANDLER FOR 1264
+SELECT 'Warning 1264 caught' AS msg;
+# The following INSERT raises 6 SQL-warnings with code 1292,
+# and 3 SQL-warnings with code 1264. Only one of the CONTINUE HANDLERs above
+# must be called, and only once. The SQL Standard does not define, which one
+# should be invoked.
+INSERT INTO t2
+SELECT
+CAST(CONCAT(CAST('1 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+CALL p3()|
+msg
+Warning 1264 caught
+
+# The same as p3, but 1264 comes first.
+
+CREATE PROCEDURE p4()
+BEGIN
+DECLARE CONTINUE HANDLER FOR 1292
+SELECT 'Warning 1292 caught' AS msg;
+DECLARE CONTINUE HANDLER FOR 1264
+SELECT 'Warning 1264 caught' AS msg;
+# The following INSERT raises 4 SQL-warnings with code 1292,
+# and 3 SQL-warnings with code 1264. Only one of the CONTINUE HANDLERs above
+# must be called, and only once. The SQL Standard does not define, which one
+# should be invoked.
+INSERT INTO t2
+SELECT
+CAST(999999 AS SIGNED INTEGER),
+CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+CALL p4()|
+msg
+Warning 1264 caught
+
+# Check that if a SQL-handler raised its own SQL-conditions, there are
+# preserved after handler exit.
+
+CREATE PROCEDURE p5()
+BEGIN
+DECLARE EXIT HANDLER FOR 1292
+BEGIN
+SELECT 'Handler for 1292 (1)' AS Msg;
+SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1234;
+SHOW WARNINGS;
+SELECT 'Handler for 1292 (2)' AS Msg;
+END;
+INSERT INTO t2
+SELECT
+CAST(999999 AS SIGNED INTEGER),
+CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+CALL p5()|
+Msg
+Handler for 1292 (1)
+Level Code Message
+Warning 1234 Unhandled user-defined warning condition
+Msg
+Handler for 1292 (2)
+Warnings:
+Warning 1234 Unhandled user-defined warning condition
+
+# Check that SQL-conditions are available inside the handler, but
+# cleared after the handler exits.
+
+CREATE PROCEDURE p6()
+BEGIN
+DECLARE CONTINUE HANDLER FOR 1292
+BEGIN
+SHOW WARNINGS;
+SELECT 'Handler for 1292' Msg;
+END;
+INSERT INTO t2
+SELECT
+CAST(CONCAT(CAST('1 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+CALL p6()|
+Level Code Message
+Warning 1292 Truncated incorrect INTEGER value: '1 '
+Warning 1292 Truncated incorrect INTEGER value: '1999999 '
+Warning 1264 Out of range value for column 'a' at row 1
+Warning 1292 Truncated incorrect INTEGER value: '2 '
+Warning 1292 Truncated incorrect INTEGER value: '2999999 '
+Warning 1264 Out of range value for column 'b' at row 1
+Warning 1292 Truncated incorrect INTEGER value: '3 '
+Warning 1292 Truncated incorrect INTEGER value: '3999999 '
+Warning 1264 Out of range value for column 'c' at row 1
+Msg
+Handler for 1292
+
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+DROP PROCEDURE p3;
+DROP PROCEDURE p4;
+DROP PROCEDURE p5;
+DROP PROCEDURE p6;
+DROP TABLE t1;
+DROP TABLE t2;
+
+# Bug#13059316: ASSERTION FAILURE IN SP_RCONTEXT.CC
+# Check DECLARE statements that raise conditions before handlers
+# are declared.
+
+DROP PROCEDURE IF EXISTS p1;
+DROP PROCEDURE IF EXISTS p2;
+CREATE PROCEDURE p1()
+BEGIN
+DECLARE var1 INTEGER DEFAULT 'string';
+DECLARE EXIT HANDLER FOR SQLWARNING SELECT 'H1';
+END|
+
+CALL p1()|
+Warnings:
+Warning 1366 Incorrect integer value: 'string' for column 'var1' at row 1
+
+CREATE PROCEDURE p2()
+BEGIN
+DECLARE EXIT HANDLER FOR SQLWARNING SELECT 'H2';
+CALL p1();
+END|
+
+CALL p2()|
+H2
+H2
+
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+#
+# Bug#13113222 RQG_SIGNAL_RESIGNAL FAILED WITH ASSERTION.
+#
+DROP PROCEDURE IF EXISTS p1;
+DROP PROCEDURE IF EXISTS p2;
+CREATE PROCEDURE p1()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SELECT 'triggered p1';
+# This will trigger an error.
+SIGNAL SQLSTATE 'HY000';
+END|
+CREATE PROCEDURE p2()
+BEGIN
+DECLARE CONTINUE HANDLER FOR SQLWARNING SELECT 'triggered p2';
+# This will trigger a warning.
+SIGNAL SQLSTATE '01000';
+END|
+SET @old_max_error_count= @@session.max_error_count;
+SET SESSION max_error_count= 0;
+CALL p1();
+triggered p1
+triggered p1
+CALL p2();
+SET SESSION max_error_count= @old_max_error_count;
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+
+# Bug#12652873: 61392: Continue handler for NOT FOUND being triggered
+# from internal stored function.
+
+DROP FUNCTION IF EXISTS f1;
+DROP FUNCTION IF EXISTS f2;
+DROP TABLE IF EXISTS t1;
+
+CREATE TABLE t1 (a INT, b INT);
+INSERT INTO t1 VALUES (1, 2);
+
+# f1() raises NOT_FOUND condition.
+# Raising NOT_FOUND can not be simulated by SIGNAL,
+# because SIGNAL would raise SQL-error in that case.
+
+CREATE FUNCTION f1() RETURNS INTEGER
+BEGIN
+DECLARE v VARCHAR(5) DEFAULT -1;
+SELECT b FROM t1 WHERE a = 2 INTO v;
+RETURN v;
+END|
+
+# Here we check that the NOT_FOUND condition raised in f1()
+# is not visible in the outer function (f2), i.e. the continue
+# handler in f2() will not be called.
+
+CREATE FUNCTION f2() RETURNS INTEGER
+BEGIN
+DECLARE v INTEGER;
+DECLARE CONTINUE HANDLER FOR NOT FOUND
+SET @msg = 'Handler activated.';
+SELECT f1() INTO v;
+RETURN v;
+END|
+SET @msg = '';
+
+SELECT f2();
+f2()
+-1
+
+SELECT @msg;
+@msg
+
+
+DROP FUNCTION f1;
+DROP FUNCTION f2;
+DROP TABLE t1;
diff --git a/mysql-test/r/sp-prelocking.result b/mysql-test/r/sp-prelocking.result
index 186b2c05d34..9e82a966268 100644
--- a/mysql-test/r/sp-prelocking.result
+++ b/mysql-test/r/sp-prelocking.result
@@ -22,7 +22,7 @@ call sp1();
my-col
1
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'mysqltest.t1'
select database();
database()
mysqltest
@@ -34,7 +34,7 @@ call mysqltest.sp1();
my-col
1
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'mysqltest.t1'
select database();
database()
test
diff --git a/mysql-test/r/sp-vars.result b/mysql-test/r/sp-vars.result
index a465a29ee4f..0a7ea0d68c5 100644
--- a/mysql-test/r/sp-vars.result
+++ b/mysql-test/r/sp-vars.result
@@ -400,7 +400,7 @@ Warnings:
Note 1305 PROCEDURE test.p2 does not exist
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(log_msg VARCHAR(1024));
CREATE PROCEDURE p1(arg VARCHAR(255))
BEGIN
diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result
index 6e6f05667ed..75a6c6278be 100644
--- a/mysql-test/r/sp.result
+++ b/mysql-test/r/sp.result
@@ -737,8 +737,6 @@ close c;
end|
insert into t2 values ("foo", 42, -1.9), ("bar", 3, 12.1), ("zap", 666, -3.14)|
call cur1()|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select * from t1|
id data
foo 40
@@ -774,8 +772,6 @@ close c1;
close c2;
end|
call cur2()|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select * from t3 order by i,s|
s i
bar 3
@@ -865,8 +861,6 @@ end$
set @@sql_mode = ''|
set sql_select_limit = 1|
call modes(@c1, @c2)|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
set sql_select_limit = default|
select @c1, @c2|
@c1 @c2
@@ -1688,64 +1682,42 @@ end|
call h_ee()|
h_ee
Inner (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
call h_es()|
h_es
-Outer (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
+Inner (bad)
call h_en()|
h_en
-Outer (good)
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
+Inner (bad)
call h_ew()|
h_ew
-Outer (good)
+Inner (bad)
call h_ex()|
h_ex
-Outer (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
+Inner (bad)
call h_se()|
h_se
Inner (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
call h_ss()|
h_ss
Inner (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
call h_sn()|
h_sn
-Outer (good)
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
+Inner (bad)
call h_sw()|
h_sw
-Outer (good)
+Inner (bad)
call h_sx()|
h_sx
-Outer (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
+Inner (bad)
call h_ne()|
h_ne
Inner (good)
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
call h_ns()|
h_ns
Inner (good)
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
call h_nn()|
h_nn
Inner (good)
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
call h_we()|
h_we
Inner (good)
@@ -1758,18 +1730,12 @@ Inner (good)
call h_xe()|
h_xe
Inner (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
call h_xs()|
h_xs
Inner (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
call h_xx()|
h_xx
Inner (good)
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
drop table t3|
drop procedure h_ee|
drop procedure h_es|
@@ -1918,8 +1884,6 @@ set @x2 = 2;
close c1;
end|
call bug2260()|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select @x2|
@x2
2
@@ -2063,8 +2027,6 @@ insert into t3 values (123456789012);
insert into t3 values (0);
end|
call bug2780()|
-Warnings:
-Warning 1264 Out of range value for column 's1' at row 1
select @x|
@x
1
@@ -2487,8 +2449,6 @@ declare continue handler for sqlstate 'HY000' begin end;
select s1 from t3 union select s2 from t3;
end|
call bug4904()|
-Warnings:
-Error 1267 Illegal mix of collations (latin1_swedish_ci,IMPLICIT) and (latin2_general_ci,IMPLICIT) for operation 'UNION'
drop procedure bug4904|
drop table t3|
drop procedure if exists bug336|
@@ -2628,17 +2588,13 @@ select row_count()|
row_count()
1
call bug4905()|
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
select row_count()|
row_count()
--1
+0
call bug4905()|
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
select row_count()|
row_count()
--1
+0
select * from t3|
s1
1
@@ -2659,14 +2615,10 @@ insert into t3 values (1)|
call bug6029()|
sqlstate 23000
sqlstate 23000
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
delete from t3|
call bug6029()|
1136
1136
-Warnings:
-Error 1136 Column count doesn't match value count at row 1
drop procedure bug6029|
drop table t3|
drop procedure if exists bug8540|
@@ -2961,23 +2913,15 @@ end|
call bug6900()|
2
2
-Warnings:
-Error 1136 Column count doesn't match value count at row 1
call bug9074()|
x1 x2 x3 x4 x5 x6
1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
call bug6900_9074(0)|
sqlexception
sqlexception
-Warnings:
-Error 1136 Column count doesn't match value count at row 1
call bug6900_9074(1)|
-23000
-23000
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
+sqlexception
+sqlexception
drop procedure bug6900|
drop procedure bug9074|
drop procedure bug6900_9074|
@@ -3020,13 +2964,9 @@ delete from t1|
call bug9856()|
16
16
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
call bug9856()|
16
16
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
drop procedure bug9856|
drop procedure if exists bug9674_1|
drop procedure if exists bug9674_2|
@@ -3256,8 +3196,6 @@ x
2
x
3
-Warnings:
-Error 1326 Cursor is not open
call bug10961()|
x
1
@@ -3265,8 +3203,6 @@ x
2
x
3
-Warnings:
-Error 1326 Cursor is not open
drop procedure bug10961|
DROP PROCEDURE IF EXISTS bug6866|
DROP VIEW IF EXISTS tv|
@@ -3274,9 +3210,9 @@ Warnings:
Note 1051 Unknown table 'test.tv'
DROP TABLE IF EXISTS tt1,tt2,tt3|
Warnings:
-Note 1051 Unknown table 'tt1'
-Note 1051 Unknown table 'tt2'
-Note 1051 Unknown table 'tt3'
+Note 1051 Unknown table 'test.tt1'
+Note 1051 Unknown table 'test.tt2'
+Note 1051 Unknown table 'test.tt3'
CREATE TABLE tt1 (a1 int, a2 int, a3 int, data varchar(10))|
CREATE TABLE tt2 (a2 int, data2 varchar(10))|
CREATE TABLE tt3 (a3 int, data3 varchar(10))|
@@ -3382,11 +3318,7 @@ insert into t1 values
('Name4', 13),
('Name5', 14)|
call bug11529()|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
call bug11529()|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
delete from t1|
drop procedure bug11529|
set character set utf8|
@@ -3560,32 +3492,24 @@ end;
end if;
end|
call bug12168('a')|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select * from t4|
a
1
3
truncate t4|
call bug12168('b')|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select * from t4|
a
2
4
truncate t4|
call bug12168('a')|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select * from t4|
a
1
3
truncate t4|
call bug12168('b')|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select * from t4|
a
2
@@ -3885,8 +3809,6 @@ end|
call bug7049_2()|
Result
Caught it
-Warnings:
-Error 1062 Duplicate entry '42' for key 'x'
select * from t3|
x
42
@@ -3894,16 +3816,12 @@ delete from t3|
call bug7049_4()|
Result
Caught it
-Warnings:
-Error 1062 Duplicate entry '42' for key 'x'
select * from t3|
x
42
select bug7049_2()|
bug7049_2()
1
-Warnings:
-Error 1062 Duplicate entry '42' for key 'x'
drop table t3|
drop procedure bug7049_1|
drop procedure bug7049_2|
@@ -4031,8 +3949,6 @@ end|
call bug14845()|
a
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
drop procedure bug14845|
drop procedure if exists bug13549_1|
drop procedure if exists bug13549_2|
@@ -4236,8 +4152,6 @@ end|
call bug13729()|
55
55
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
select * from t3|
s1
1
@@ -4274,15 +4188,11 @@ Handler
boo
v isnull(v)
NULL 1
-Warnings:
-Error 1054 Unknown column 'undefined_var' in 'field list'
call bug14643_2()|
Handler
boo
Handler
boo
-Warnings:
-Error 1054 Unknown column 'undefined_var' in 'field list'
drop procedure bug14643_1|
drop procedure bug14643_2|
drop procedure if exists bug14304|
@@ -4606,15 +4516,11 @@ Handler
error
End
done
-Warnings:
-Error 1054 Unknown column 'v' in 'field list'
call bug14498_2()|
Handler
error
End
done
-Warnings:
-Error 1054 Unknown column 'v' in 'field list'
call bug14498_3()|
v
maybe
@@ -4622,22 +4528,16 @@ Handler
error
End
done
-Warnings:
-Error 1054 Unknown column 'v' in 'field list'
call bug14498_4()|
Handler
error
End
done
-Warnings:
-Error 1054 Unknown column 'v' in 'field list'
call bug14498_5()|
Handler
error
End
done
-Warnings:
-Error 1054 Unknown column 'v' in 'field list'
drop procedure bug14498_1|
drop procedure bug14498_2|
drop procedure bug14498_3|
@@ -4702,8 +4602,6 @@ Before NOT FOUND condition is triggered
After NOT FOUND condtition is triggered
xid xdone
1 1
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
call bug15231_3()|
Result
Missed it (correct)
@@ -4711,8 +4609,6 @@ Level Code Message
Warning 1366 Incorrect decimal value: 'zap' for column 'x' at row 1
Result
Caught it (correct)
-Warnings:
-Warning 1366 Incorrect decimal value: 'zap' for column 'x' at row 1
call bug15231_5()|
Result
Missed it (correct)
@@ -4741,8 +4637,6 @@ end|
call bug15011()|
Handler
Inner
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
drop procedure bug15011|
drop table t3|
drop procedure if exists bug17476|
@@ -4818,8 +4712,6 @@ i
1
i
0
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
drop table t3|
drop procedure bug16887|
drop procedure if exists bug16474_1|
@@ -4892,8 +4784,6 @@ declare continue handler for sqlexception begin end;
select no_such_function();
end|
call bug18787()|
-Warnings:
-Error 1305 FUNCTION test.no_such_function does not exist
drop procedure bug18787|
create database bug18344_012345678901|
use bug18344_012345678901|
@@ -5267,8 +5157,6 @@ statement failed
statement failed
statement after update
statement after update
-Warnings:
-Error 1242 Subquery returns more than 1 row
select * from t3|
a
1
@@ -5280,8 +5168,6 @@ statement failed
statement failed
statement after update
statement after update
-Warnings:
-Error 1242 Subquery returns more than 1 row
select * from t3|
a
1
@@ -5314,8 +5200,6 @@ in continue handler
in continue handler
reachable code a2
reachable code a2
-Warnings:
-Error 1242 Subquery returns more than 1 row
select * from t3|
a
1
@@ -5331,8 +5215,6 @@ in continue handler
in continue handler
reachable code a2
reachable code a2
-Warnings:
-Error 1242 Subquery returns more than 1 row
select * from t3|
a
1
@@ -5366,8 +5248,6 @@ in continue handler
in continue handler
reachable code a2
reachable code a2
-Warnings:
-Error 1305 FUNCTION test.no_such_function does not exist
drop procedure bug8153_proc_a|
drop procedure bug8153_proc_b|
drop table t3|
@@ -5547,13 +5427,9 @@ end|
select func_20028_a()|
func_20028_a()
0
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select func_20028_b()|
func_20028_b()
0
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select func_20028_c()|
ERROR 22012: Division by 0
call proc_20028_a()|
@@ -5606,13 +5482,9 @@ end|
select func_20028_a()|
func_20028_a()
0
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select func_20028_b()|
func_20028_b()
0
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select func_20028_c()|
func_20028_c()
NULL
@@ -5936,13 +5808,9 @@ end|
select func_8407_a()|
func_8407_a()
NULL
-Warnings:
-Error 1146 Table 'test.no_such_view' doesn't exist
select func_8407_b()|
func_8407_b()
1500
-Warnings:
-Error 1146 Table 'test.no_such_view' doesn't exist
drop function func_8407_a|
drop function func_8407_b|
drop table if exists table_26503|
@@ -6064,8 +5932,6 @@ looping i
looping 0
leaving handler
leaving handler
-Warnings:
-Error 1062 Duplicate entry '1' for key 'a'
call proc_26503_ok_2(2)|
do something
do something
@@ -6077,8 +5943,6 @@ looping i
looping 4
leaving handler
leaving handler
-Warnings:
-Error 1062 Duplicate entry '2' for key 'a'
call proc_26503_ok_3(3)|
do something
do something
@@ -6098,8 +5962,6 @@ looping i
looping 0
leaving handler
leaving handler
-Warnings:
-Error 1062 Duplicate entry '3' for key 'a'
call proc_26503_ok_4(4)|
do something
do something
@@ -6111,8 +5973,6 @@ looping i
looping 4
leaving handler
leaving handler
-Warnings:
-Error 1062 Duplicate entry '4' for key 'a'
drop table table_26503|
drop procedure proc_26503_ok_1|
drop procedure proc_26503_ok_2|
@@ -6244,8 +6104,6 @@ END|
SELECT bug5274_f2()|
bug5274_f2()
x
-Warnings:
-Warning 1265 Data truncated for column 'bug5274_f1' at row 1
DROP FUNCTION bug5274_f1|
DROP FUNCTION bug5274_f2|
drop procedure if exists proc_21513|
@@ -6338,19 +6196,13 @@ c1
SELECT f1(2);
f1(2)
0
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
PREPARE s1 FROM 'SELECT f1(2)';
EXECUTE s1;
f1(2)
0
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
EXECUTE s1;
f1(2)
0
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
DROP PROCEDURE p1;
DROP PROCEDURE p2;
DROP FUNCTION f1;
@@ -6826,8 +6678,6 @@ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SET @exception:= 'run';
SELECT x FROM t1;
END|
CALL bug29770();
-Warnings:
-Error 1054 Unknown column 'x' in 'field list'
SELECT @state, @exception;
@state @exception
run NULL
@@ -6866,8 +6716,6 @@ end;
end while;
end//
call proc_33618(20);
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
drop table t_33618;
drop procedure proc_33618;
#
@@ -7803,9 +7651,6 @@ END $
SELECT f1();
f1()
1
-Warnings:
-Error 1424 Recursive stored functions and triggers are not allowed.
-Error 1305 FUNCTION test.f1 does not exist
DROP FUNCTION f1;
# ------------------------------------------------------------------
# -- End of 5.1 tests
diff --git a/mysql-test/r/sp_trans.result b/mysql-test/r/sp_trans.result
index b91dc898f12..5526fc19aae 100644
--- a/mysql-test/r/sp_trans.result
+++ b/mysql-test/r/sp_trans.result
@@ -99,8 +99,6 @@ return i;
end|
set @error_in_func:= 0|
insert into t1 values (bug10015_6(5)), (bug10015_6(6))|
-Warnings:
-Error 1062 Duplicate entry '1' for key 'PRIMARY'
select @error_in_func|
@error_in_func
1
@@ -526,8 +524,6 @@ until done end repeat;
close c;
end|
call bug14210()|
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
select count(*) from t4|
count(*)
256
diff --git a/mysql-test/r/statistics.result b/mysql-test/r/statistics.result
index ad12c60cce5..e7d25f3476c 100644
--- a/mysql-test/r/statistics.result
+++ b/mysql-test/r/statistics.result
@@ -1113,9 +1113,19 @@ test t2 idx4 4 1.0000
ALTER TABLE t2 CHANGE COLUMN b b varchar(32);
SELECT * FROM mysql.index_stats ORDER BY index_name, prefix_arity, table_name;
db_name table_name index_name prefix_arity avg_frequency
+test t2 PRIMARY 1 1.0000
+test t2 PRIMARY 2 1.0000
test t2 idx2 1 7.0000
test t2 idx2 2 2.3846
+test t2 idx2 3 1.0000
+test t2 idx2 4 1.0000
test t2 idx3 1 8.5000
+test t2 idx3 2 1.0000
+test t2 idx3 3 1.0000
+test t2 idx4 1 6.2000
+test t2 idx4 2 1.7222
+test t2 idx4 3 1.1154
+test t2 idx4 4 1.0000
ANALYZE TABLE t2 PERSISTENT FOR COLUMNS ALL INDEXES ALL;
Table Op Msg_type Msg_text
test.t2 analyze status OK
@@ -1172,13 +1182,13 @@ test t2 f 1 5 0.2000 1.0000 6.4000 0 NULL NULL
SELECT * FROM mysql.index_stats;
db_name table_name index_name prefix_arity avg_frequency
test t2 idx3 1 8.5000
-test t2 idx3 2 1.0000
-test t2 idx2 3 1.0000
test t2 idx2 1 7.0000
test t2 idx2 2 2.3846
-test t2 idx4 3 1.0000
+test t2 idx2 3 1.0000
test t2 idx4 1 6.2000
test t2 idx4 2 2.2308
+test t2 idx4 3 1.0000
+test t2 idx3 2 1.0000
test t2 PRIMARY 1 1.0000
ANALYZE TABLE t1;
Table Op Msg_type Msg_text
@@ -1198,17 +1208,17 @@ test t1 f 1 5 0.2000 1.0000 6.4000 0 NULL NULL
test t1 b NULL NULL 0.2000 17.1250 NULL NULL NULL NULL
SELECT * FROM mysql.index_stats;
db_name table_name index_name prefix_arity avg_frequency
-test t2 idx3 1 8.5000
-test t2 idx3 2 1.0000
-test t2 idx2 3 1.0000
-test t2 idx2 1 7.0000
-test t2 idx2 2 2.3846
test t1 idx2 1 7.0000
+test t2 idx3 1 8.5000
test t1 idx3 1 8.5000
test t1 PRIMARY 1 1.0000
-test t2 idx4 3 1.0000
+test t2 idx2 1 7.0000
+test t2 idx2 2 2.3846
+test t2 idx2 3 1.0000
test t2 idx4 1 6.2000
test t2 idx4 2 2.2308
+test t2 idx4 3 1.0000
+test t2 idx3 2 1.0000
test t2 PRIMARY 1 1.0000
test t1 idx2 2 2.3846
test t1 idx1 1 NULL
diff --git a/mysql-test/r/strict.result b/mysql-test/r/strict.result
index cee4cf3ebe6..1321545798f 100644
--- a/mysql-test/r/strict.result
+++ b/mysql-test/r/strict.result
@@ -1190,8 +1190,6 @@ select'a'; insert into t1 values (200); end;|
call t1();
a
a
-Warnings:
-Error 1264 Out of range value for column 'col1' at row 1
select * from t1;
col1
drop procedure t1;
@@ -1501,3 +1499,30 @@ count(*)
0
drop table t1;
End of 5.0 tests
+#
+# Start of 5.6 tests
+#
+#
+# WL#946 TIME/TIMESTAMP/DATETIME with fractional seconds: CAST to DATETIME
+#
+#
+# STR_TO_DATE with NO_ZERO_DATE did not return NULL (with warning)
+# in get_date(). Only did in val_str() and val_int().
+SET sql_mode='NO_ZERO_DATE';
+SELECT STR_TO_DATE('2001','%Y'),CONCAT(STR_TO_DATE('2001','%Y')), STR_TO_DATE('2001','%Y')+1, STR_TO_DATE('0','%Y')+1, STR_TO_DATE('0000','%Y')+1;
+STR_TO_DATE('2001','%Y') CONCAT(STR_TO_DATE('2001','%Y')) STR_TO_DATE('2001','%Y')+1 STR_TO_DATE('0','%Y')+1 STR_TO_DATE('0000','%Y')+1
+2001-00-00 2001-00-00 20010001 20000001 NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '0000' for function str_to_date
+SET sql_mode='NO_ZERO_IN_DATE';
+SELECT STR_TO_DATE('2001','%Y'),CONCAT(STR_TO_DATE('2001','%Y')), STR_TO_DATE('2001','%Y')+1, STR_TO_DATE('0000','%Y')+1;
+STR_TO_DATE('2001','%Y') CONCAT(STR_TO_DATE('2001','%Y')) STR_TO_DATE('2001','%Y')+1 STR_TO_DATE('0000','%Y')+1
+NULL NULL NULL NULL
+Warnings:
+Warning 1411 Incorrect datetime value: '2001' for function str_to_date
+Warning 1411 Incorrect datetime value: '2001' for function str_to_date
+Warning 1411 Incorrect datetime value: '2001' for function str_to_date
+Warning 1411 Incorrect datetime value: '0000' for function str_to_date
+#
+# End of 5.6 tests
+#
diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result
index eac53365329..824079c3a59 100644
--- a/mysql-test/r/subselect.result
+++ b/mysql-test/r/subselect.result
@@ -5898,7 +5898,7 @@ CREATE TABLE t2 ( f3 int, f10 int, KEY (f10,f3)) ;
INSERT IGNORE INTO t2 VALUES (NULL,NULL),(5,0);
DROP TABLE IF EXISTS t3;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
CREATE TABLE t3 ( f3 int) ;
INSERT INTO t3 VALUES (0),(0);
SELECT a1.f3 AS r FROM t2 AS a1 , t1 WHERE a1.f3 < ALL ( SELECT f3 FROM t3 WHERE f3 = 1 ) ;
diff --git a/mysql-test/r/subselect2.result b/mysql-test/r/subselect2.result
index 4fd303dfd44..38e955e349c 100644
--- a/mysql-test/r/subselect2.result
+++ b/mysql-test/r/subselect2.result
@@ -184,7 +184,7 @@ DROP TABLE t1,t2,t3;
#
DROP TABLE IF EXISTS `t1`;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE `t1` (
`node_uid` bigint(20) unsigned DEFAULT NULL,
`date` datetime DEFAULT NULL,
diff --git a/mysql-test/r/subselect4.result b/mysql-test/r/subselect4.result
index ff768886434..df7733a1cd3 100644
--- a/mysql-test/r/subselect4.result
+++ b/mysql-test/r/subselect4.result
@@ -2174,7 +2174,7 @@ FROM t1 AS alias1
) IS NOT NULL;
ERROR 21000: Subquery returns more than 1 row
DROP TABLE t2;
-ERROR 42S02: Unknown table 't2'
+ERROR 42S02: Unknown table 'test.t2'
DROP TABLE t1;
#
# LP BUG#1000649 EXPLAIN shows incorrectly a non-correlated constant IN subquery is correlated
diff --git a/mysql-test/r/subselect_innodb.result b/mysql-test/r/subselect_innodb.result
index a5800883711..e9e1ccd0bf6 100644
--- a/mysql-test/r/subselect_innodb.result
+++ b/mysql-test/r/subselect_innodb.result
@@ -437,8 +437,8 @@ drop table t1;
#
drop table if exists `t1`,`t2`;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
create table `t1`(`a` char(1) character set utf8)engine=innodb;
create table `t2`(`b` char(1) character set utf8)engine=memory;
select distinct (select 1 from `t2` where `a`) `d2` from `t1`;
diff --git a/mysql-test/r/subselect_no_mat.result b/mysql-test/r/subselect_no_mat.result
index bf98b912a9e..627e5b03e32 100644
--- a/mysql-test/r/subselect_no_mat.result
+++ b/mysql-test/r/subselect_no_mat.result
@@ -5899,7 +5899,7 @@ CREATE TABLE t2 ( f3 int, f10 int, KEY (f10,f3)) ;
INSERT IGNORE INTO t2 VALUES (NULL,NULL),(5,0);
DROP TABLE IF EXISTS t3;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
CREATE TABLE t3 ( f3 int) ;
INSERT INTO t3 VALUES (0),(0);
SELECT a1.f3 AS r FROM t2 AS a1 , t1 WHERE a1.f3 < ALL ( SELECT f3 FROM t3 WHERE f3 = 1 ) ;
diff --git a/mysql-test/r/subselect_no_opts.result b/mysql-test/r/subselect_no_opts.result
index 05f7a25e0ef..5368198f77a 100644
--- a/mysql-test/r/subselect_no_opts.result
+++ b/mysql-test/r/subselect_no_opts.result
@@ -5895,7 +5895,7 @@ CREATE TABLE t2 ( f3 int, f10 int, KEY (f10,f3)) ;
INSERT IGNORE INTO t2 VALUES (NULL,NULL),(5,0);
DROP TABLE IF EXISTS t3;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
CREATE TABLE t3 ( f3 int) ;
INSERT INTO t3 VALUES (0),(0);
SELECT a1.f3 AS r FROM t2 AS a1 , t1 WHERE a1.f3 < ALL ( SELECT f3 FROM t3 WHERE f3 = 1 ) ;
diff --git a/mysql-test/r/subselect_no_scache.result b/mysql-test/r/subselect_no_scache.result
index ee84bfd1eca..ad0d2ffe6a6 100644
--- a/mysql-test/r/subselect_no_scache.result
+++ b/mysql-test/r/subselect_no_scache.result
@@ -5904,7 +5904,7 @@ CREATE TABLE t2 ( f3 int, f10 int, KEY (f10,f3)) ;
INSERT IGNORE INTO t2 VALUES (NULL,NULL),(5,0);
DROP TABLE IF EXISTS t3;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
CREATE TABLE t3 ( f3 int) ;
INSERT INTO t3 VALUES (0),(0);
SELECT a1.f3 AS r FROM t2 AS a1 , t1 WHERE a1.f3 < ALL ( SELECT f3 FROM t3 WHERE f3 = 1 ) ;
diff --git a/mysql-test/r/subselect_no_semijoin.result b/mysql-test/r/subselect_no_semijoin.result
index 5a7e303f4b9..39cc060e955 100644
--- a/mysql-test/r/subselect_no_semijoin.result
+++ b/mysql-test/r/subselect_no_semijoin.result
@@ -5895,7 +5895,7 @@ CREATE TABLE t2 ( f3 int, f10 int, KEY (f10,f3)) ;
INSERT IGNORE INTO t2 VALUES (NULL,NULL),(5,0);
DROP TABLE IF EXISTS t3;
Warnings:
-Note 1051 Unknown table 't3'
+Note 1051 Unknown table 'test.t3'
CREATE TABLE t3 ( f3 int) ;
INSERT INTO t3 VALUES (0),(0);
SELECT a1.f3 AS r FROM t2 AS a1 , t1 WHERE a1.f3 < ALL ( SELECT f3 FROM t3 WHERE f3 = 1 ) ;
diff --git a/mysql-test/r/system_mysql_db.result b/mysql-test/r/system_mysql_db.result
index 08c17b1afd5..eda11d95b22 100644
--- a/mysql-test/r/system_mysql_db.result
+++ b/mysql-test/r/system_mysql_db.result
@@ -15,15 +15,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
@@ -131,6 +127,7 @@ user CREATE TABLE `user` (
`max_user_connections` int(11) NOT NULL DEFAULT '0',
`plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
`authentication_string` text COLLATE utf8_bin NOT NULL,
+ `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`User`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
show create table func;
@@ -253,7 +250,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -271,7 +268,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
show create table table_stats;
Table Create Table
diff --git a/mysql-test/r/system_mysql_db_fix40123.result b/mysql-test/r/system_mysql_db_fix40123.result
index 2f76ee654c8..71e94a7432d 100644
--- a/mysql-test/r/system_mysql_db_fix40123.result
+++ b/mysql-test/r/system_mysql_db_fix40123.result
@@ -15,15 +15,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
@@ -131,6 +127,7 @@ user CREATE TABLE `user` (
`max_user_connections` int(11) NOT NULL DEFAULT '0',
`plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
`authentication_string` text COLLATE utf8_bin NOT NULL,
+ `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`User`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
show create table func;
@@ -253,7 +250,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -271,7 +268,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
show create table table_stats;
Table Create Table
diff --git a/mysql-test/r/system_mysql_db_fix50030.result b/mysql-test/r/system_mysql_db_fix50030.result
index 2f76ee654c8..71e94a7432d 100644
--- a/mysql-test/r/system_mysql_db_fix50030.result
+++ b/mysql-test/r/system_mysql_db_fix50030.result
@@ -15,15 +15,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
@@ -131,6 +127,7 @@ user CREATE TABLE `user` (
`max_user_connections` int(11) NOT NULL DEFAULT '0',
`plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
`authentication_string` text COLLATE utf8_bin NOT NULL,
+ `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`User`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
show create table func;
@@ -253,7 +250,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -271,7 +268,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
show create table table_stats;
Table Create Table
diff --git a/mysql-test/r/system_mysql_db_fix50117.result b/mysql-test/r/system_mysql_db_fix50117.result
index 2f76ee654c8..71e94a7432d 100644
--- a/mysql-test/r/system_mysql_db_fix50117.result
+++ b/mysql-test/r/system_mysql_db_fix50117.result
@@ -15,15 +15,11 @@ host
index_stats
innodb_index_stats
innodb_table_stats
-ndb_binlog_index
plugin
proc
procs_priv
proxies_priv
servers
-slave_master_info
-slave_relay_log_info
-slave_worker_info
slow_log
table_stats
tables_priv
@@ -131,6 +127,7 @@ user CREATE TABLE `user` (
`max_user_connections` int(11) NOT NULL DEFAULT '0',
`plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '',
`authentication_string` text COLLATE utf8_bin NOT NULL,
+ `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N',
PRIMARY KEY (`Host`,`User`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges'
show create table func;
@@ -253,7 +250,7 @@ Table Create Table
general_log CREATE TABLE `general_log` (
`event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6),
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -271,7 +268,8 @@ slow_log CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` bigint(21) unsigned NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'
show create table table_stats;
Table Create Table
diff --git a/mysql-test/r/temp_table.result b/mysql-test/r/temp_table.result
index 33f5c6b5165..a5182a03e63 100644
--- a/mysql-test/r/temp_table.result
+++ b/mysql-test/r/temp_table.result
@@ -1,5 +1,25 @@
drop table if exists t1,t2;
drop view if exists v1;
+#
+# test basic creation of temporary tables together with normal table
+#
+create table t1 (a int);
+create temporary table t1 AS SELECT 1;
+create temporary table t1 AS SELECT 1;
+ERROR 42S01: Table 't1' already exists
+create temporary table t1 (a int);
+ERROR 42S01: Table 't1' already exists
+drop temporary table t1;
+drop table t1;
+create temporary table t1 AS SELECT 1;
+create temporary table t1 AS SELECT 1;
+ERROR 42S01: Table 't1' already exists
+create temporary table t1 (a int);
+ERROR 42S01: Table 't1' already exists
+drop temporary table t1;
+#
+# Test with rename
+#
CREATE TABLE t1 (c int not null, d char (10) not null);
insert into t1 values(1,""),(2,"a"),(3,"b");
CREATE TEMPORARY TABLE t1 (a int not null, b char (10) not null);
@@ -145,7 +165,7 @@ DROP TABLE t1;
CREATE TABLE t1 (i INT);
CREATE TEMPORARY TABLE t2 (i INT);
DROP TEMPORARY TABLE t2, t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SELECT * FROM t2;
ERROR 42S02: Table 'test.t2' doesn't exist
SELECT * FROM t1;
diff --git a/mysql-test/r/truncate_coverage.result b/mysql-test/r/truncate_coverage.result
index 728702f7ab5..395c71b2e6b 100644
--- a/mysql-test/r/truncate_coverage.result
+++ b/mysql-test/r/truncate_coverage.result
@@ -11,7 +11,7 @@ HANDLER t1 OPEN;
#
# connection default
LOCK TABLE t1 WRITE;
-SET DEBUG_SYNC='mdl_upgrade_shared_lock_to_exclusive SIGNAL waiting';
+SET DEBUG_SYNC='mdl_upgrade_lock SIGNAL waiting';
TRUNCATE TABLE t1;
#
# connection con2
@@ -37,7 +37,7 @@ HANDLER t1 OPEN;
#
# connection default
LOCK TABLE t1 WRITE;
-SET DEBUG_SYNC='mdl_upgrade_shared_lock_to_exclusive SIGNAL waiting';
+SET DEBUG_SYNC='mdl_upgrade_lock SIGNAL waiting';
TRUNCATE TABLE t1;
#
# connection con2
@@ -50,7 +50,7 @@ HANDLER t1 CLOSE;
ERROR 42S02: Table 'test.t1' doesn't exist
UNLOCK TABLES;
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SET DEBUG_SYNC='RESET';
CREATE TABLE t1 (c1 INT);
INSERT INTO t1 VALUES (1);
diff --git a/mysql-test/r/type_newdecimal.result b/mysql-test/r/type_newdecimal.result
index 6b6b5a2392c..5b3594fe503 100644
--- a/mysql-test/r/type_newdecimal.result
+++ b/mysql-test/r/type_newdecimal.result
@@ -680,7 +680,7 @@ select 0.8 = 0.7 + 0.1;
1
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (col1 decimal(38));
insert into t1 values (12345678901234567890123456789012345678);
select * from t1;
diff --git a/mysql-test/r/upgrade.result b/mysql-test/r/upgrade.result
index d565bb2dbd6..d9252791c0a 100644
--- a/mysql-test/r/upgrade.result
+++ b/mysql-test/r/upgrade.result
@@ -50,6 +50,10 @@ show tables;
Tables_in_test
txu#p#p1
txu@0023p@0023p1
+insert into `txu@0023p@0023p1` values (2);
+select * from `txu@0023p@0023p1`;
+s1
+2
select * from `txu#p#p1`;
s1
1
diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result
index 40d6c71b5af..98b4b77e425 100644
--- a/mysql-test/r/view.result
+++ b/mysql-test/r/view.result
@@ -205,7 +205,7 @@ ERROR 42S02: Unknown table 'v100'
drop view t1;
ERROR HY000: 'test.t1' is not VIEW
drop table v1;
-ERROR 42S02: Unknown table 'v1'
+ERROR 42S02: Unknown table 'test.v1'
drop view v1,v2;
drop table t1;
create table t1 (a int);
@@ -3951,8 +3951,6 @@ create view a as select 1;
end|
call p();
call p();
-Warnings:
-Error 1050 Table 'a' already exists
drop view a;
drop procedure p;
#
diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result
index bfd09bfa9cd..2078bc6d63f 100644
--- a/mysql-test/r/view_grant.result
+++ b/mysql-test/r/view_grant.result
@@ -357,13 +357,9 @@ use mysqltest;
select * from v1;
f2()
NULL
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select * from v2;
f2()
NULL
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select * from v3;
ERROR HY000: View 'mysqltest.v3' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
select * from v4;
@@ -403,13 +399,9 @@ ERROR HY000: View 'mysqltest.v2' references invalid table(s) or column(s) or fun
select * from v3;
f2()
NULL
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select * from v4;
f2()
NULL
-Warnings:
-Warning 1329 No data - zero rows fetched, selected, or processed
select * from v5;
ERROR HY000: View 'mysqltest.v5' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them
drop view v1, v2, v3, v4, v5;
diff --git a/mysql-test/r/warnings.result b/mysql-test/r/warnings.result
index e033b358b6b..b4b345ca260 100644
--- a/mysql-test/r/warnings.result
+++ b/mysql-test/r/warnings.result
@@ -48,13 +48,13 @@ drop table t1;
set SQL_WARNINGS=0;
drop temporary table if exists not_exists;
Warnings:
-Note 1051 Unknown table 'not_exists'
+Note 1051 Unknown table 'test.not_exists'
drop table if exists not_exists_table;
Warnings:
-Note 1051 Unknown table 'not_exists_table'
+Note 1051 Unknown table 'test.not_exists_table'
show warnings limit 1;
Level Code Message
-Note 1051 Unknown table 'not_exists_table'
+Note 1051 Unknown table 'test.not_exists_table'
drop database if exists not_exists_db;
Warnings:
Note 1008 Can't drop database 'not_exists_db'; database doesn't exist
@@ -311,10 +311,10 @@ insert into t2 values(@q);
ERROR 22001: Data too long for column 'c_tinyblob' at row 1
drop table t1, t2;
DROP TABLE t1;
-ERROR 42S02: Unknown table 't1'
+ERROR 42S02: Unknown table 'test.t1'
SHOW ERRORS;
Level Code Message
-Error 1051 Unknown table 't1'
+Error 1051 Unknown table 'test.t1'
End of 5.0 tests
set sql_mode = default;
select CAST(a AS DECIMAL(13,5)) FROM (SELECT '' as a) t;
diff --git a/mysql-test/suite/archive/archive.result b/mysql-test/suite/archive/archive.result
index a491b5bb9ee..67bbd3eb5e6 100644
--- a/mysql-test/suite/archive/archive.result
+++ b/mysql-test/suite/archive/archive.result
@@ -12731,15 +12731,6 @@ id id name name
DROP TABLE t1,t2;
flush tables;
SHOW CREATE TABLE t1;
-ERROR HY000: Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
-SELECT * FROM t1;
-ERROR HY000: Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
-INSERT INTO t1 (col1, col2) VALUES (1, "value");
-ERROR HY000: Table upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it!
-REPAIR TABLE t1;
-Table Op Msg_type Msg_text
-test.t1 repair status OK
-SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`col1` int(11) DEFAULT NULL,
@@ -12747,6 +12738,10 @@ t1 CREATE TABLE `t1` (
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
SELECT * FROM t1;
col1 col2
+INSERT INTO t1 (col1, col2) VALUES (1, "value");
+REPAIR TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 repair error Corrupt
DROP TABLE t1;
#
# Ensure that TRUNCATE fails for non-empty archive tables.
@@ -12811,6 +12806,14 @@ Table Op Msg_type Msg_text
test.t1 optimize status OK
DROP TABLE t1;
#
+# Bug#13907676: HA_ARCHIVE::INFO
+#
+CREATE TABLE t1 (a INT) ENGINE=ARCHIVE;
+CREATE TABLE t2 SELECT * FROM t1;
+SELECT * FROM t2;
+a
+DROP TABLE t1, t2;
+#
# BUG#917689 Using wrong archive table causes crash
#
create table t1 (a int, b char(50)) engine=archive;
@@ -12821,7 +12824,6 @@ select * from t1;
ERROR HY000: Table 't1' is marked as crashed and should be repaired
show warnings;
Level Code Message
-Warning 127 Got error 127 when reading table `test`.`t1`
Error 1194 Table 't1' is marked as crashed and should be repaired
drop table t1;
create temporary table t1 (a int) engine=archive;
@@ -12835,7 +12837,7 @@ show create table t1;
Table Create Table
t1 CREATE TEMPORARY TABLE `t1` (
`a` int(11) DEFAULT NULL
-) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
+) ENGINE=ARCHIVE DEFAULT CHARSET=latin1 DELAY_KEY_WRITE=1
alter table t1 add column b varchar(10);
select * from t1;
a b
diff --git a/mysql-test/suite/archive/archive.test b/mysql-test/suite/archive/archive.test
index 1114eb4e89a..5d96aa256fb 100644
--- a/mysql-test/suite/archive/archive.test
+++ b/mysql-test/suite/archive/archive.test
@@ -1653,18 +1653,13 @@ copy_file std_data/bug47012.frm $MYSQLD_DATADIR/test/t1.frm;
copy_file std_data/bug47012.ARZ $MYSQLD_DATADIR/test/t1.ARZ;
copy_file std_data/bug47012.ARM $MYSQLD_DATADIR/test/t1.ARM;
---error ER_TABLE_NEEDS_UPGRADE
SHOW CREATE TABLE t1;
---error ER_TABLE_NEEDS_UPGRADE
SELECT * FROM t1;
---error ER_TABLE_NEEDS_UPGRADE
INSERT INTO t1 (col1, col2) VALUES (1, "value");
REPAIR TABLE t1;
-SHOW CREATE TABLE t1;
-SELECT * FROM t1;
DROP TABLE t1;
remove_file $MYSQLD_DATADIR/test/t1.ARM;
@@ -1700,6 +1695,7 @@ REPAIR TABLE t1 EXTENDED;
SELECT * FROM t1;
DROP TABLE t1;
+
--echo #
--echo # BUG#57162 - valgrind errors, random data when returning
--echo # ordered data from archive tables
@@ -1715,7 +1711,6 @@ SELECT * FROM t1 ORDER BY f LIMIT 1;
DROP TABLE t1;
SET sort_buffer_size=DEFAULT;
-
--echo #
--echo # BUG#11758979 - 51252: ARCHIVE TABLES STILL FAIL UNDER STRESS
--echo # TESTS: CRASH, CORRUPTION, 4G MEMOR
@@ -1731,6 +1726,14 @@ OPTIMIZE TABLE t1;
DROP TABLE t1;
--echo #
+--echo # Bug#13907676: HA_ARCHIVE::INFO
+--echo #
+CREATE TABLE t1 (a INT) ENGINE=ARCHIVE;
+CREATE TABLE t2 SELECT * FROM t1;
+SELECT * FROM t2;
+DROP TABLE t1, t2;
+
+--echo #
--echo # BUG#917689 Using wrong archive table causes crash
--echo #
create table t1 (a int, b char(50)) engine=archive;
diff --git a/mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result b/mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result
index c0062ddbaaa..8c008ae0bce 100644
--- a/mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result
+++ b/mysql-test/suite/binlog/r/binlog_row_mix_innodb_myisam.result
@@ -315,7 +315,7 @@ CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
DROP TABLE if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
INSERT INTO t1 values (3,3);
CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
@@ -324,7 +324,7 @@ Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
DROP TABLE IF EXISTS t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
INSERT INTO t1 VALUES (4,4);
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
diff --git a/mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result b/mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result
index 03fcb481245..10f3f5ddf2a 100644
--- a/mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result
+++ b/mysql-test/suite/binlog/r/binlog_stm_mix_innodb_myisam.result
@@ -309,7 +309,7 @@ CREATE TABLE t2 (primary key (a)) engine=innodb select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
DROP TABLE if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
INSERT INTO t1 values (3,3);
CREATE TEMPORARY TABLE t2 (primary key (a)) engine=innodb select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
@@ -318,7 +318,7 @@ Warnings:
Warning 1196 Some non-transactional changed tables couldn't be rolled back
DROP TABLE IF EXISTS t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t2 (a int, b int, primary key (a)) engine=innodb;
INSERT INTO t1 VALUES (4,4);
CREATE TABLE IF NOT EXISTS t2 (primary key (a)) engine=innodb select * from t1;
diff --git a/mysql-test/suite/csv/csv.result b/mysql-test/suite/csv/csv.result
index be8ffa8f9e5..5ac79a23c47 100644
--- a/mysql-test/suite/csv/csv.result
+++ b/mysql-test/suite/csv/csv.result
@@ -4927,9 +4927,9 @@ period
9410
drop table if exists t1,t2,t3,t4;
Warnings:
-Note 1051 Unknown table 't2'
-Note 1051 Unknown table 't3'
-Note 1051 Unknown table 't4'
+Note 1051 Unknown table 'test.t2'
+Note 1051 Unknown table 'test.t3'
+Note 1051 Unknown table 'test.t4'
DROP TABLE IF EXISTS bug13894;
CREATE TABLE bug13894 ( val integer not null ) ENGINE = CSV;
INSERT INTO bug13894 VALUES (5);
diff --git a/mysql-test/suite/federated/federated_server.result b/mysql-test/suite/federated/federated_server.result
index e2a5d60302b..3855964e2ba 100644
--- a/mysql-test/suite/federated/federated_server.result
+++ b/mysql-test/suite/federated/federated_server.result
@@ -5,7 +5,7 @@ create database second_db;
use first_db;
DROP TABLE IF EXISTS first_db.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'first_db.t1'
CREATE TABLE first_db.t1 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -13,7 +13,7 @@ CREATE TABLE first_db.t1 (
DEFAULT CHARSET=latin1;
DROP TABLE IF EXISTS first_db.t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'first_db.t2'
CREATE TABLE first_db.t2 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -22,7 +22,7 @@ DEFAULT CHARSET=latin1;
use second_db;
DROP TABLE IF EXISTS second_db.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'second_db.t1'
CREATE TABLE second_db.t1 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -30,7 +30,7 @@ CREATE TABLE second_db.t1 (
DEFAULT CHARSET=latin1;
DROP TABLE IF EXISTS second_db.t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'second_db.t2'
CREATE TABLE second_db.t2 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -60,7 +60,7 @@ server_one 127.0.0.1 first_db root SLAVE_PORT mysql root
server_two 127.0.0.1 second_db root SLAVE_PORT mysql root
DROP TABLE IF EXISTS federated.old;
Warnings:
-Note 1051 Unknown table 'old'
+Note 1051 Unknown table 'federated.old'
CREATE TABLE federated.old (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -73,7 +73,7 @@ id name
1 federated.old-> first_db.t1, url format
DROP TABLE IF EXISTS federated.old2;
Warnings:
-Note 1051 Unknown table 'old2'
+Note 1051 Unknown table 'federated.old2'
CREATE TABLE federated.old2 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -86,7 +86,7 @@ id name
1 federated.old2-> first_db.t2, url format
DROP TABLE IF EXISTS federated.urldb2t1;
Warnings:
-Note 1051 Unknown table 'urldb2t1'
+Note 1051 Unknown table 'federated.urldb2t1'
CREATE TABLE federated.urldb2t1 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -99,7 +99,7 @@ id name
1 federated.urldb2t1 -> second_db.t1, url format
DROP TABLE IF EXISTS federated.urldb2t2;
Warnings:
-Note 1051 Unknown table 'urldb2t2'
+Note 1051 Unknown table 'federated.urldb2t2'
CREATE TABLE federated.urldb2t2 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -112,7 +112,7 @@ id name
1 federated.urldb2t2 -> second_db.t2, url format
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
@@ -126,7 +126,7 @@ id name
1 server_one, new scheme, first_db.t1
DROP TABLE IF EXISTS federated.whatever;
Warnings:
-Note 1051 Unknown table 'whatever'
+Note 1051 Unknown table 'federated.whatever'
CREATE TABLE federated.whatever (
`id` int(20) NOT NULL,
`name` varchar(64) NOT NULL default ''
diff --git a/mysql-test/suite/federated/federated_transactions.result b/mysql-test/suite/federated/federated_transactions.result
index 2b88f4d0f36..52a0686741e 100644
--- a/mysql-test/suite/federated/federated_transactions.result
+++ b/mysql-test/suite/federated/federated_transactions.result
@@ -2,7 +2,7 @@ CREATE DATABASE federated;
CREATE DATABASE federated;
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`name` varchar(32) NOT NULL default ''
@@ -10,7 +10,7 @@ CREATE TABLE federated.t1 (
DEFAULT CHARSET=latin1 ENGINE=innodb;
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`name` varchar(32) NOT NULL default ''
diff --git a/mysql-test/suite/federated/federatedx.result b/mysql-test/suite/federated/federatedx.result
index 5ae2abb46e5..c93ff1493d9 100644
--- a/mysql-test/suite/federated/federatedx.result
+++ b/mysql-test/suite/federated/federatedx.result
@@ -6,7 +6,7 @@ SET @OLD_SLAVE_CONCURRENT_INSERT= @@GLOBAL.CONCURRENT_INSERT;
SET @@GLOBAL.CONCURRENT_INSERT= 0;
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -17,7 +17,7 @@ CREATE TABLE federated.t1 (
DEFAULT CHARSET=latin1;
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -50,7 +50,7 @@ CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t3';
ERROR HY000: Can't create federated table. Foreign data src error: database: 'federated' username: 'root' hostname: '127.0.0.1'
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -63,7 +63,7 @@ CONNECTION='mysql://user:pass@127.0.0.1:SLAVE_PORT/federated/t1';
ERROR HY000: Can't create federated table. Foreign data src error: database: 'federated' username: 'user' hostname: '127.0.0.1'
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`group` int NOT NULL default 0,
@@ -89,7 +89,7 @@ DELETE FROM federated.t1;
DROP TABLE federated.t1;
DROP TABLE IF EXISTS federated.t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'federated.t2'
CREATE TABLE federated.t2 (
`id` int(20) NOT NULL,
`name` varchar(32) NOT NULL default ''
@@ -112,7 +112,7 @@ DROP TABLE federated.t2;
DROP TABLE IF EXISTS federated.t1;
DROP TABLE IF EXISTS federated.`t1%`;
Warnings:
-Note 1051 Unknown table 't1%'
+Note 1051 Unknown table 'federated.t1%'
CREATE TABLE federated.`t1%` (
`id` int(20) NOT NULL,
`name` varchar(32) NOT NULL default ''
@@ -120,7 +120,7 @@ CREATE TABLE federated.`t1%` (
DEFAULT CHARSET=latin1;
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL,
`name` varchar(32) NOT NULL default ''
@@ -152,7 +152,7 @@ DROP TABLE IF EXISTS federated.`t1%`;
DROP TABLE IF EXISTS federated.`t1%`;
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL auto_increment,
`name` varchar(32) NOT NULL default '',
@@ -1391,7 +1391,7 @@ PRIMARY KEY (`id`),
key (country_id));
DROP TABLE IF EXISTS federated.countries;
Warnings:
-Note 1051 Unknown table 'countries'
+Note 1051 Unknown table 'federated.countries'
CREATE TABLE federated.countries (
`id` int(20) NOT NULL auto_increment,
`country` varchar(32),
@@ -1544,13 +1544,13 @@ drop table federated.t1;
drop table federated.t1;
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL auto_increment,
PRIMARY KEY (`id`));
DROP TABLE IF EXISTS federated.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'federated.t1'
CREATE TABLE federated.t1 (
`id` int(20) NOT NULL auto_increment,
PRIMARY KEY (`id`)
diff --git a/mysql-test/suite/funcs_1/r/innodb_storedproc_02.result b/mysql-test/suite/funcs_1/r/innodb_storedproc_02.result
index 126d6c2080a..8b96e54eefd 100644
--- a/mysql-test/suite/funcs_1/r/innodb_storedproc_02.result
+++ b/mysql-test/suite/funcs_1/r/innodb_storedproc_02.result
@@ -347,8 +347,6 @@ CALL h1();
-7- 1 1 1 1 1 1
END x1 x2 x3 x4 x5 x6
END 1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
DROP TABLE IF EXISTS tnull;
DROP PROCEDURE IF EXISTS sp1;
CREATE TABLE tnull(f1 int);
@@ -447,8 +445,6 @@ END//
CALL h2();
x1 x2 x3 x4 x5 x6
1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
SELECT * FROM res_t1;
w x
a b
@@ -554,8 +550,6 @@ exit handler 2
exit handler 2
exit handler 1
exit handler 1
-Warnings:
-Error 1146 Table 'db_storedproc.tqq' doesn't exist
create table res_t1(w char unique, x char);
insert into res_t1 values ('a', 'b');
CREATE PROCEDURE h1 ()
@@ -586,8 +580,6 @@ END//
CALL h1();
x1 x2 x3 x4 x5 x6
1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
This will fail, SQLSTATE 00000 is not allowed
CREATE PROCEDURE sp1()
begin1_label:BEGIN
@@ -631,8 +623,6 @@ CALL sp2();
NULL
@x2 @x
1 2
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE sp1;
DROP PROCEDURE sp2;
@@ -664,8 +654,6 @@ ERROR 42000: Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expe
CALL sp2();
-1- @x2 @x
-1- 0 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
SELECT '-3-', @x2, @x;
-3- @x2 @x
-3- 1 1
@@ -708,8 +696,6 @@ CALL sp2();
-2- 1 20
-4- @x2 @x
-4- 11 22
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE sp1;
DROP PROCEDURE sp2;
@@ -776,33 +762,21 @@ SELECT @done, @x;
0 1
INSERT INTO temp VALUES('1', NULL);
CALL sp1();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 1
INSERT INTO temp VALUES('2', NULL);
CALL sp2();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 1
INSERT INTO temp VALUES('3', NULL);
CALL sp3();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 0
INSERT INTO temp VALUES('4', NULL);
CALL sp4();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 0
@@ -911,26 +885,18 @@ SELECT @done, @x;
@done @x
0 1
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 2
CALL sp2();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 2
CALL sp3();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 1
CALL sp4();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 1
@@ -1065,8 +1031,6 @@ SQLSTATE
21000
SQLSTATE
24000
-Warnings:
-Error 1326 Cursor is not open
SELECT '-1-', @x;
-1- @x
-1- 6
@@ -1077,24 +1041,18 @@ SQLSTATE
SQLEXCEPTION
SQLSTATE
24000
-Warnings:
-Error 1326 Cursor is not open
SELECT '-2-', @x;
-2- @x
-2- 6
CALL sp3();
SQLSTATE
20000
-Warnings:
-Error 1339 Case not found for CASE statement
SELECT '-3-', @x;
-3- @x
-3- 1
CALL sp4();
SQLSTATE
SQLEXCEPTION
-Warnings:
-Error 1339 Case not found for CASE statement
SELECT '-4-', @x;
-4- @x
-4- 1
@@ -1377,8 +1335,6 @@ CLOSE cur1;
CLOSE cur2;
END//
CALL sp_outer();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM temp1;
f0 cnt f1 f2 f3 f4
_sp_out_ 1 a` a` 1000-01-01 -5000
diff --git a/mysql-test/suite/funcs_1/r/innodb_trig_0102.result b/mysql-test/suite/funcs_1/r/innodb_trig_0102.result
index 330a9a315ee..77fabeaef77 100644
--- a/mysql-test/suite/funcs_1/r/innodb_trig_0102.result
+++ b/mysql-test/suite/funcs_1/r/innodb_trig_0102.result
@@ -185,7 +185,7 @@ Testcase 3.5.1.7: - need to fix
-------------------------------
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (f1 int, f2 char(25),f3 int) engine = <engine_to_be_used>;
CREATE TRIGGER trg5_1 BEFORE INSERT on test.t1
for each row set new.f3 = '14';
diff --git a/mysql-test/suite/funcs_1/r/is_columns_mysql.result b/mysql-test/suite/funcs_1/r/is_columns_mysql.result
index 7b7b13c6a8c..e2fba35c7f8 100644
--- a/mysql-test/suite/funcs_1/r/is_columns_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_columns_mysql.result
@@ -72,7 +72,7 @@ def mysql general_log argument 6 NULL NO mediumtext 16777215 16777215 NULL NULL
def mysql general_log command_type 5 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
def mysql general_log event_time 1 CURRENT_TIMESTAMP(6) NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP select,insert,update,references
def mysql general_log server_id 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
-def mysql general_log thread_id 3 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def mysql general_log thread_id 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(21) unsigned select,insert,update,references
def mysql general_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references
def mysql gtid_slave_pos domain_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI select,insert,update,references
def mysql gtid_slave_pos seq_no 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
@@ -117,13 +117,6 @@ def mysql index_stats db_name 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_
def mysql index_stats index_name 3 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
def mysql index_stats prefix_arity 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned PRI select,insert,update,references
def mysql index_stats table_name 2 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_bin varchar(64) PRI select,insert,update,references
-def mysql ndb_binlog_index deletes 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql ndb_binlog_index epoch 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned PRI select,insert,update,references
-def mysql ndb_binlog_index File 2 NULL NO varchar 255 255 NULL NULL NULL latin1 latin1_swedish_ci varchar(255) select,insert,update,references
-def mysql ndb_binlog_index inserts 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql ndb_binlog_index Position 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql ndb_binlog_index schemaops 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql ndb_binlog_index updates 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def mysql plugin dl 2 NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def mysql plugin name 1 NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) PRI select,insert,update,references
def mysql proc body 11 NULL NO longblob 4294967295 4294967295 NULL NULL NULL NULL NULL longblob select,insert,update,references
@@ -170,50 +163,6 @@ def mysql servers Server_name 1 NO char 64 192 NULL NULL NULL utf8 utf8_general
def mysql servers Socket 7 NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) select,insert,update,references
def mysql servers Username 4 NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) select,insert,update,references
def mysql servers Wrapper 8 NO char 64 192 NULL NULL NULL utf8 utf8_general_ci char(64) select,insert,update,references
-def mysql slave_master_info Bind 18 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references Displays which interface is employed when connecting to the MySQL server
-def mysql slave_master_info Connect_retry 9 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references The period (in seconds) that the slave will wait before trying to reconnect to the master.
-def mysql slave_master_info Enabled_ssl 10 NULL NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1) select,insert,update,references Indicates whether the server supports SSL connections.
-def mysql slave_master_info Heartbeat 17 NULL NO float NULL NULL 12 NULL NULL NULL NULL float select,insert,update,references
-def mysql slave_master_info Host 5 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The host name of the master.
-def mysql slave_master_info Ignored_server_ids 19 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The number of server IDs to be ignored, followed by the actual server IDs
-def mysql slave_master_info Master_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI select,insert,update,references
-def mysql slave_master_info Master_log_name 3 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The name of the master binary log currently being read from the master.
-def mysql slave_master_info Master_log_pos 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references The master log position of the last read event.
-def mysql slave_master_info Number_of_lines 2 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references Number of lines in the file.
-def mysql slave_master_info Port 8 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references The network port used to connect to the master.
-def mysql slave_master_info Retry_count 21 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references Number of reconnect attempts, to the master, before giving up.
-def mysql slave_master_info Ssl_ca 11 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The file used for the Certificate Authority (CA) certificate.
-def mysql slave_master_info Ssl_capath 12 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The path to the Certificate Authority (CA) certificates.
-def mysql slave_master_info Ssl_cert 13 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The name of the SSL certificate file.
-def mysql slave_master_info Ssl_cipher 14 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The name of the cipher in use for the SSL connection.
-def mysql slave_master_info Ssl_crl 22 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The file used for the Certificate Revocation List (CRL)
-def mysql slave_master_info Ssl_crlpath 23 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The path used for Certificate Revocation List (CRL) files
-def mysql slave_master_info Ssl_key 15 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The name of the SSL key file.
-def mysql slave_master_info Ssl_verify_server_cert 16 NULL NO tinyint NULL NULL 3 0 NULL NULL NULL tinyint(1) select,insert,update,references Whether to verify the server certificate.
-def mysql slave_master_info User_name 6 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The user name used to connect to the master.
-def mysql slave_master_info User_password 7 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The password used to connect to the master.
-def mysql slave_master_info Uuid 20 NULL YES text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The master server uuid.
-def mysql slave_relay_log_info Master_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI select,insert,update,references
-def mysql slave_relay_log_info Master_log_name 5 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The name of the master binary log file from which the events in the relay log file were read.
-def mysql slave_relay_log_info Master_log_pos 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references The master log position of the last executed event.
-def mysql slave_relay_log_info Number_of_lines 2 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references Number of lines in the file or rows in the table. Used to version table definitions.
-def mysql slave_relay_log_info Number_of_workers 8 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
-def mysql slave_relay_log_info Relay_log_name 3 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references The name of the current relay log file.
-def mysql slave_relay_log_info Relay_log_pos 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references The relay log position of the last executed event.
-def mysql slave_relay_log_info Sql_delay 7 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references The number of seconds that the slave must lag behind the master.
-def mysql slave_worker_info Checkpoint_group_bitmap 13 NULL NO blob 65535 65535 NULL NULL NULL NULL NULL blob select,insert,update,references
-def mysql slave_worker_info Checkpoint_group_size 12 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
-def mysql slave_worker_info Checkpoint_master_log_name 9 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references
-def mysql slave_worker_info Checkpoint_master_log_pos 10 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql slave_worker_info Checkpoint_relay_log_name 7 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references
-def mysql slave_worker_info Checkpoint_relay_log_pos 8 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql slave_worker_info Checkpoint_seqno 11 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
-def mysql slave_worker_info Master_id 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI select,insert,update,references
-def mysql slave_worker_info Master_log_name 5 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references
-def mysql slave_worker_info Master_log_pos 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql slave_worker_info Relay_log_name 3 NULL NO text 65535 65535 NULL NULL NULL utf8 utf8_bin text select,insert,update,references
-def mysql slave_worker_info Relay_log_pos 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def mysql slave_worker_info Worker_id 2 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned PRI select,insert,update,references
def mysql slow_log db 7 NULL NO varchar 512 1536 NULL NULL NULL utf8 utf8_general_ci varchar(512) select,insert,update,references
def mysql slow_log insert_id 9 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
def mysql slow_log last_insert_id 8 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
@@ -224,6 +173,7 @@ def mysql slow_log rows_sent 5 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11)
def mysql slow_log server_id 10 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
def mysql slow_log sql_text 11 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references
def mysql slow_log start_time 1 CURRENT_TIMESTAMP(6) NO timestamp NULL NULL NULL NULL 6 NULL NULL timestamp(6) on update CURRENT_TIMESTAMP select,insert,update,references
+def mysql slow_log thread_id 12 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(21) unsigned select,insert,update,references
def mysql slow_log user_host 2 NULL NO mediumtext 16777215 16777215 NULL NULL NULL utf8 utf8_general_ci mediumtext select,insert,update,references
def mysql tables_priv Column_priv 8 NO set 31 93 NULL NULL NULL utf8 utf8_general_ci set('Select','Insert','Update','References') select,insert,update,references
def mysql tables_priv Db 2 NO char 64 192 NULL NULL NULL utf8 utf8_bin char(64) PRI select,insert,update,references
@@ -274,6 +224,7 @@ def mysql user max_questions 37 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) u
def mysql user max_updates 38 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) unsigned select,insert,update,references
def mysql user max_user_connections 40 0 NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
def mysql user Password 3 NO char 41 41 NULL NULL NULL latin1 latin1_bin char(41) select,insert,update,references
+def mysql user password_expired 43 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
def mysql user plugin 41 NO char 64 64 NULL NULL NULL latin1 latin1_swedish_ci char(64) select,insert,update,references
def mysql user Process_priv 12 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
def mysql user References_priv 15 N NO enum 1 3 NULL NULL NULL utf8 utf8_general_ci enum('N','Y') select,insert,update,references
@@ -310,7 +261,6 @@ COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
1.0000 varbinary NULL NULL
1.0000 char latin1 latin1_bin
1.0000 char latin1 latin1_swedish_ci
-1.0000 varchar latin1 latin1_swedish_ci
1.0000 text utf8 utf8_bin
1.0000 mediumtext utf8 utf8_general_ci
1.0000 text utf8 utf8_general_ci
@@ -344,7 +294,6 @@ COL_CML DATA_TYPE CHARACTER_SET_NAME COLLATION_NAME
NULL bigint NULL NULL
NULL datetime NULL NULL
NULL decimal NULL NULL
-NULL float NULL NULL
NULL int NULL NULL
NULL smallint NULL NULL
NULL time NULL NULL
@@ -434,7 +383,7 @@ NULL mysql func ret tinyint NULL NULL NULL NULL tinyint(1)
3.0000 mysql func type enum 9 27 utf8 utf8_general_ci enum('function','aggregate')
NULL mysql general_log event_time timestamp NULL NULL NULL NULL timestamp(6)
1.0000 mysql general_log user_host mediumtext 16777215 16777215 utf8 utf8_general_ci mediumtext
-NULL mysql general_log thread_id int NULL NULL NULL NULL int(11)
+NULL mysql general_log thread_id bigint NULL NULL NULL NULL bigint(21) unsigned
NULL mysql general_log server_id int NULL NULL NULL NULL int(10) unsigned
3.0000 mysql general_log command_type varchar 64 192 utf8 utf8_general_ci varchar(64)
1.0000 mysql general_log argument mediumtext 16777215 16777215 utf8 utf8_general_ci mediumtext
@@ -481,13 +430,6 @@ NULL mysql help_topic help_category_id smallint NULL NULL NULL NULL smallint(5)
3.0000 mysql index_stats index_name varchar 64 192 utf8 utf8_bin varchar(64)
NULL mysql index_stats prefix_arity int NULL NULL NULL NULL int(11) unsigned
NULL mysql index_stats avg_frequency decimal NULL NULL NULL NULL decimal(12,4)
-NULL mysql ndb_binlog_index Position bigint NULL NULL NULL NULL bigint(20) unsigned
-1.0000 mysql ndb_binlog_index File varchar 255 255 latin1 latin1_swedish_ci varchar(255)
-NULL mysql ndb_binlog_index epoch bigint NULL NULL NULL NULL bigint(20) unsigned
-NULL mysql ndb_binlog_index inserts bigint NULL NULL NULL NULL bigint(20) unsigned
-NULL mysql ndb_binlog_index updates bigint NULL NULL NULL NULL bigint(20) unsigned
-NULL mysql ndb_binlog_index deletes bigint NULL NULL NULL NULL bigint(20) unsigned
-NULL mysql ndb_binlog_index schemaops bigint NULL NULL NULL NULL bigint(20) unsigned
3.0000 mysql plugin name varchar 64 192 utf8 utf8_general_ci varchar(64)
3.0000 mysql plugin dl varchar 128 384 utf8 utf8_general_ci varchar(128)
3.0000 mysql proc db char 64 192 utf8 utf8_bin char(64)
@@ -534,50 +476,6 @@ NULL mysql servers Port int NULL NULL NULL NULL int(4)
3.0000 mysql servers Socket char 64 192 utf8 utf8_general_ci char(64)
3.0000 mysql servers Wrapper char 64 192 utf8 utf8_general_ci char(64)
3.0000 mysql servers Owner char 64 192 utf8 utf8_general_ci char(64)
-NULL mysql slave_master_info Master_id int NULL NULL NULL NULL int(10) unsigned
-NULL mysql slave_master_info Number_of_lines int NULL NULL NULL NULL int(10) unsigned
-1.0000 mysql slave_master_info Master_log_name text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_master_info Master_log_pos bigint NULL NULL NULL NULL bigint(20) unsigned
-1.0000 mysql slave_master_info Host text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info User_name text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info User_password text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_master_info Port int NULL NULL NULL NULL int(10) unsigned
-NULL mysql slave_master_info Connect_retry int NULL NULL NULL NULL int(10) unsigned
-NULL mysql slave_master_info Enabled_ssl tinyint NULL NULL NULL NULL tinyint(1)
-1.0000 mysql slave_master_info Ssl_ca text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info Ssl_capath text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info Ssl_cert text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info Ssl_cipher text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info Ssl_key text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_master_info Ssl_verify_server_cert tinyint NULL NULL NULL NULL tinyint(1)
-NULL mysql slave_master_info Heartbeat float NULL NULL NULL NULL float
-1.0000 mysql slave_master_info Bind text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info Ignored_server_ids text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info Uuid text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_master_info Retry_count bigint NULL NULL NULL NULL bigint(20) unsigned
-1.0000 mysql slave_master_info Ssl_crl text 65535 65535 utf8 utf8_bin text
-1.0000 mysql slave_master_info Ssl_crlpath text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_relay_log_info Master_id int NULL NULL NULL NULL int(10) unsigned
-NULL mysql slave_relay_log_info Number_of_lines int NULL NULL NULL NULL int(10) unsigned
-1.0000 mysql slave_relay_log_info Relay_log_name text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_relay_log_info Relay_log_pos bigint NULL NULL NULL NULL bigint(20) unsigned
-1.0000 mysql slave_relay_log_info Master_log_name text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_relay_log_info Master_log_pos bigint NULL NULL NULL NULL bigint(20) unsigned
-NULL mysql slave_relay_log_info Sql_delay int NULL NULL NULL NULL int(11)
-NULL mysql slave_relay_log_info Number_of_workers int NULL NULL NULL NULL int(10) unsigned
-NULL mysql slave_worker_info Master_id int NULL NULL NULL NULL int(10) unsigned
-NULL mysql slave_worker_info Worker_id int NULL NULL NULL NULL int(10) unsigned
-1.0000 mysql slave_worker_info Relay_log_name text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_worker_info Relay_log_pos bigint NULL NULL NULL NULL bigint(20) unsigned
-1.0000 mysql slave_worker_info Master_log_name text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_worker_info Master_log_pos bigint NULL NULL NULL NULL bigint(20) unsigned
-1.0000 mysql slave_worker_info Checkpoint_relay_log_name text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_worker_info Checkpoint_relay_log_pos bigint NULL NULL NULL NULL bigint(20) unsigned
-1.0000 mysql slave_worker_info Checkpoint_master_log_name text 65535 65535 utf8 utf8_bin text
-NULL mysql slave_worker_info Checkpoint_master_log_pos bigint NULL NULL NULL NULL bigint(20) unsigned
-NULL mysql slave_worker_info Checkpoint_seqno int NULL NULL NULL NULL int(10) unsigned
-NULL mysql slave_worker_info Checkpoint_group_size int NULL NULL NULL NULL int(10) unsigned
-1.0000 mysql slave_worker_info Checkpoint_group_bitmap blob 65535 65535 NULL NULL blob
NULL mysql slow_log start_time timestamp NULL NULL NULL NULL timestamp(6)
1.0000 mysql slow_log user_host mediumtext 16777215 16777215 utf8 utf8_general_ci mediumtext
NULL mysql slow_log query_time time NULL NULL NULL NULL time(6)
@@ -589,6 +487,7 @@ NULL mysql slow_log last_insert_id int NULL NULL NULL NULL int(11)
NULL mysql slow_log insert_id int NULL NULL NULL NULL int(11)
NULL mysql slow_log server_id int NULL NULL NULL NULL int(10) unsigned
1.0000 mysql slow_log sql_text mediumtext 16777215 16777215 utf8 utf8_general_ci mediumtext
+NULL mysql slow_log thread_id bigint NULL NULL NULL NULL bigint(21) unsigned
3.0000 mysql tables_priv Host char 60 180 utf8 utf8_bin char(60)
3.0000 mysql tables_priv Db char 64 192 utf8 utf8_bin char(64)
3.0000 mysql tables_priv User char 16 48 utf8 utf8_bin char(16)
@@ -656,3 +555,4 @@ NULL mysql user max_connections int NULL NULL NULL NULL int(11) unsigned
NULL mysql user max_user_connections int NULL NULL NULL NULL int(11)
1.0000 mysql user plugin char 64 64 latin1 latin1_swedish_ci char(64)
1.0000 mysql user authentication_string text 65535 65535 utf8 utf8_bin text
+3.0000 mysql user password_expired enum 1 3 utf8 utf8_general_ci enum('N','Y')
diff --git a/mysql-test/suite/funcs_1/r/is_key_column_usage.result b/mysql-test/suite/funcs_1/r/is_key_column_usage.result
index 66967267202..75cd59604ff 100644
--- a/mysql-test/suite/funcs_1/r/is_key_column_usage.result
+++ b/mysql-test/suite/funcs_1/r/is_key_column_usage.result
@@ -109,11 +109,8 @@ def mysql PRIMARY def mysql innodb_index_stats database_name
def mysql PRIMARY def mysql innodb_index_stats table_name
def mysql PRIMARY def mysql innodb_index_stats index_name
def mysql PRIMARY def mysql innodb_index_stats stat_name
-def mysql innodb_index_stats_ibfk_1 def mysql innodb_index_stats database_name
-def mysql innodb_index_stats_ibfk_1 def mysql innodb_index_stats table_name
def mysql PRIMARY def mysql innodb_table_stats database_name
def mysql PRIMARY def mysql innodb_table_stats table_name
-def mysql PRIMARY def mysql ndb_binlog_index epoch
def mysql PRIMARY def mysql plugin name
def mysql PRIMARY def mysql proc db
def mysql PRIMARY def mysql proc name
@@ -128,10 +125,6 @@ def mysql PRIMARY def mysql proxies_priv User
def mysql PRIMARY def mysql proxies_priv Proxied_host
def mysql PRIMARY def mysql proxies_priv Proxied_user
def mysql PRIMARY def mysql servers Server_name
-def mysql PRIMARY def mysql slave_master_info Master_id
-def mysql PRIMARY def mysql slave_relay_log_info Master_id
-def mysql PRIMARY def mysql slave_worker_info Master_id
-def mysql PRIMARY def mysql slave_worker_info Worker_id
def mysql PRIMARY def mysql table_stats db_name
def mysql PRIMARY def mysql table_stats table_name
def mysql PRIMARY def mysql tables_priv Host
diff --git a/mysql-test/suite/funcs_1/r/is_statistics.result b/mysql-test/suite/funcs_1/r/is_statistics.result
index 73407fa0a5a..c1afeb8db9b 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics.result
@@ -116,7 +116,6 @@ def mysql index_stats mysql PRIMARY
def mysql index_stats mysql PRIMARY
def mysql index_stats mysql PRIMARY
def mysql index_stats mysql PRIMARY
-def mysql ndb_binlog_index mysql PRIMARY
def mysql plugin mysql PRIMARY
def mysql proc mysql PRIMARY
def mysql proc mysql PRIMARY
@@ -133,10 +132,6 @@ def mysql proxies_priv mysql PRIMARY
def mysql proxies_priv mysql PRIMARY
def mysql proxies_priv mysql Grantor
def mysql servers mysql PRIMARY
-def mysql slave_master_info mysql PRIMARY
-def mysql slave_relay_log_info mysql PRIMARY
-def mysql slave_worker_info mysql PRIMARY
-def mysql slave_worker_info mysql PRIMARY
def mysql table_stats mysql PRIMARY
def mysql table_stats mysql PRIMARY
def mysql tables_priv mysql PRIMARY
diff --git a/mysql-test/suite/funcs_1/r/is_statistics_mysql.result b/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
index 1bee7c44cb1..5dbf3bc1488 100644
--- a/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_statistics_mysql.result
@@ -44,7 +44,6 @@ def mysql innodb_index_stats 0 mysql PRIMARY 3 index_name A #CARD# NULL NULL BT
def mysql innodb_index_stats 0 mysql PRIMARY 4 stat_name A #CARD# NULL NULL BTREE
def mysql innodb_table_stats 0 mysql PRIMARY 1 database_name A #CARD# NULL NULL BTREE
def mysql innodb_table_stats 0 mysql PRIMARY 2 table_name A #CARD# NULL NULL BTREE
-def mysql ndb_binlog_index 0 mysql PRIMARY 1 epoch A #CARD# NULL NULL BTREE
def mysql plugin 0 mysql PRIMARY 1 name A #CARD# NULL NULL BTREE
def mysql proc 0 mysql PRIMARY 1 db A #CARD# NULL NULL BTREE
def mysql proc 0 mysql PRIMARY 2 name A #CARD# NULL NULL BTREE
@@ -61,10 +60,6 @@ def mysql proxies_priv 0 mysql PRIMARY 2 User A #CARD# NULL NULL BTREE
def mysql proxies_priv 0 mysql PRIMARY 3 Proxied_host A #CARD# NULL NULL BTREE
def mysql proxies_priv 0 mysql PRIMARY 4 Proxied_user A #CARD# NULL NULL BTREE
def mysql servers 0 mysql PRIMARY 1 Server_name A #CARD# NULL NULL BTREE
-def mysql slave_master_info 0 mysql PRIMARY 1 Master_id A #CARD# NULL NULL BTREE
-def mysql slave_relay_log_info 0 mysql PRIMARY 1 Master_id A #CARD# NULL NULL BTREE
-def mysql slave_worker_info 0 mysql PRIMARY 1 Master_id A #CARD# NULL NULL BTREE
-def mysql slave_worker_info 0 mysql PRIMARY 2 Worker_id A #CARD# NULL NULL BTREE
def mysql tables_priv 1 mysql Grantor 1 Grantor A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 1 Host A #CARD# NULL NULL BTREE
def mysql tables_priv 0 mysql PRIMARY 2 Db A #CARD# NULL NULL BTREE
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints.result b/mysql-test/suite/funcs_1/r/is_table_constraints.result
index fb62c16ec1e..0077f74396c 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints.result
@@ -73,17 +73,12 @@ def mysql name mysql help_topic
def mysql PRIMARY mysql host
def mysql PRIMARY mysql index_stats
def mysql PRIMARY mysql innodb_index_stats
-def mysql innodb_index_stats_ibfk_1 mysql innodb_index_stats
def mysql PRIMARY mysql innodb_table_stats
-def mysql PRIMARY mysql ndb_binlog_index
def mysql PRIMARY mysql plugin
def mysql PRIMARY mysql proc
def mysql PRIMARY mysql procs_priv
def mysql PRIMARY mysql proxies_priv
def mysql PRIMARY mysql servers
-def mysql PRIMARY mysql slave_master_info
-def mysql PRIMARY mysql slave_relay_log_info
-def mysql PRIMARY mysql slave_worker_info
def mysql PRIMARY mysql table_stats
def mysql PRIMARY mysql tables_priv
def mysql PRIMARY mysql time_zone
diff --git a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
index 4a51fe66f1f..38f79f30068 100644
--- a/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_table_constraints_mysql.result
@@ -22,18 +22,13 @@ def mysql name mysql help_topic UNIQUE
def mysql PRIMARY mysql help_topic PRIMARY KEY
def mysql PRIMARY mysql host PRIMARY KEY
def mysql PRIMARY mysql index_stats PRIMARY KEY
-def mysql innodb_index_stats_ibfk_1 mysql innodb_index_stats FOREIGN KEY
def mysql PRIMARY mysql innodb_index_stats PRIMARY KEY
def mysql PRIMARY mysql innodb_table_stats PRIMARY KEY
-def mysql PRIMARY mysql ndb_binlog_index PRIMARY KEY
def mysql PRIMARY mysql plugin PRIMARY KEY
def mysql PRIMARY mysql proc PRIMARY KEY
def mysql PRIMARY mysql procs_priv PRIMARY KEY
def mysql PRIMARY mysql proxies_priv PRIMARY KEY
def mysql PRIMARY mysql servers PRIMARY KEY
-def mysql PRIMARY mysql slave_master_info PRIMARY KEY
-def mysql PRIMARY mysql slave_relay_log_info PRIMARY KEY
-def mysql PRIMARY mysql slave_worker_info PRIMARY KEY
def mysql PRIMARY mysql tables_priv PRIMARY KEY
def mysql PRIMARY mysql table_stats PRIMARY KEY
def mysql PRIMARY mysql time_zone PRIMARY KEY
diff --git a/mysql-test/suite/funcs_1/r/is_tables_mysql.result b/mysql-test/suite/funcs_1/r/is_tables_mysql.result
index 6693143e1b0..4587538ea0b 100644
--- a/mysql-test/suite/funcs_1/r/is_tables_mysql.result
+++ b/mysql-test/suite/funcs_1/r/is_tables_mysql.result
@@ -359,29 +359,6 @@ user_comment
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME ndb_binlog_index
-TABLE_TYPE BASE TABLE
-ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT DYNAMIC_OR_PAGE
-TABLE_ROWS #TBLR#
-AVG_ROW_LENGTH #ARL#
-DATA_LENGTH #DL#
-MAX_DATA_LENGTH #MDL#
-INDEX_LENGTH #IL#
-DATA_FREE #DF#
-AUTO_INCREMENT NULL
-CREATE_TIME #CRT#
-UPDATE_TIME #UT#
-CHECK_TIME #CT#
-TABLE_COLLATION latin1_swedish_ci
-CHECKSUM NULL
-CREATE_OPTIONS #CO#
-TABLE_COMMENT #TC#
-user_comment
-Separator -----------------------------------------------------
-TABLE_CATALOG def
-TABLE_SCHEMA mysql
TABLE_NAME plugin
TABLE_TYPE BASE TABLE
ENGINE MYISAM_OR_MARIA
@@ -497,75 +474,6 @@ user_comment MySQL Foreign Servers table
Separator -----------------------------------------------------
TABLE_CATALOG def
TABLE_SCHEMA mysql
-TABLE_NAME slave_master_info
-TABLE_TYPE BASE TABLE
-ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT DYNAMIC_OR_PAGE
-TABLE_ROWS #TBLR#
-AVG_ROW_LENGTH #ARL#
-DATA_LENGTH #DL#
-MAX_DATA_LENGTH #MDL#
-INDEX_LENGTH #IL#
-DATA_FREE #DF#
-AUTO_INCREMENT NULL
-CREATE_TIME #CRT#
-UPDATE_TIME #UT#
-CHECK_TIME #CT#
-TABLE_COLLATION utf8_general_ci
-CHECKSUM NULL
-CREATE_OPTIONS #CO#
-TABLE_COMMENT #TC#
-user_comment Master Information
-Separator -----------------------------------------------------
-TABLE_CATALOG def
-TABLE_SCHEMA mysql
-TABLE_NAME slave_relay_log_info
-TABLE_TYPE BASE TABLE
-ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT DYNAMIC_OR_PAGE
-TABLE_ROWS #TBLR#
-AVG_ROW_LENGTH #ARL#
-DATA_LENGTH #DL#
-MAX_DATA_LENGTH #MDL#
-INDEX_LENGTH #IL#
-DATA_FREE #DF#
-AUTO_INCREMENT NULL
-CREATE_TIME #CRT#
-UPDATE_TIME #UT#
-CHECK_TIME #CT#
-TABLE_COLLATION utf8_general_ci
-CHECKSUM NULL
-CREATE_OPTIONS #CO#
-TABLE_COMMENT #TC#
-user_comment Relay Log Information
-Separator -----------------------------------------------------
-TABLE_CATALOG def
-TABLE_SCHEMA mysql
-TABLE_NAME slave_worker_info
-TABLE_TYPE BASE TABLE
-ENGINE MYISAM_OR_MARIA
-VERSION 10
-ROW_FORMAT DYNAMIC_OR_PAGE
-TABLE_ROWS #TBLR#
-AVG_ROW_LENGTH #ARL#
-DATA_LENGTH #DL#
-MAX_DATA_LENGTH #MDL#
-INDEX_LENGTH #IL#
-DATA_FREE #DF#
-AUTO_INCREMENT NULL
-CREATE_TIME #CRT#
-UPDATE_TIME #UT#
-CHECK_TIME #CT#
-TABLE_COLLATION utf8_general_ci
-CHECKSUM NULL
-CREATE_OPTIONS #CO#
-TABLE_COMMENT #TC#
-user_comment Worker Information
-Separator -----------------------------------------------------
-TABLE_CATALOG def
-TABLE_SCHEMA mysql
TABLE_NAME slow_log
TABLE_TYPE BASE TABLE
ENGINE CSV
diff --git a/mysql-test/suite/funcs_1/r/is_user_privileges.result b/mysql-test/suite/funcs_1/r/is_user_privileges.result
index 1ec1ffc4ce1..031067f2e64 100644
--- a/mysql-test/suite/funcs_1/r/is_user_privileges.result
+++ b/mysql-test/suite/funcs_1/r/is_user_privileges.result
@@ -129,6 +129,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -171,6 +172,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -213,6 +215,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
#
# Add GRANT OPTION db_datadict.* to testuser1;
GRANT UPDATE ON db_datadict.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
@@ -279,6 +282,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -321,6 +325,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -363,6 +368,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
# Establish connection testuser1 (user=testuser1)
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -415,6 +421,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -457,6 +464,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -499,6 +507,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
@@ -573,6 +582,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -615,6 +625,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -657,6 +668,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
GRANT SELECT ON *.* TO 'testuser1'@'localhost' WITH GRANT OPTION;
#
# Here <SELECT YES> is shown correctly for testuser1;
@@ -723,6 +735,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -765,6 +778,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -807,6 +821,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
# Switch to connection testuser1
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -859,6 +874,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -901,6 +917,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -943,6 +960,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
SHOW GRANTS;
Grants for testuser1@localhost
GRANT SELECT ON *.* TO 'testuser1'@'localhost' WITH GRANT OPTION
@@ -1047,6 +1065,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -1089,6 +1108,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -1131,6 +1151,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
# Switch to connection testuser1
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -1230,6 +1251,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -1272,6 +1294,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -1314,6 +1337,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
# Switch to connection testuser1
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
@@ -1366,6 +1390,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -1408,6 +1433,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -1450,6 +1476,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
@@ -1509,6 +1536,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -1551,6 +1579,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -1593,6 +1622,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
SHOW GRANTS;
Grants for testuser1@localhost
GRANT USAGE ON *.* TO 'testuser1'@'localhost'
@@ -1667,6 +1697,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser2
Password
@@ -1709,6 +1740,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
Host localhost
User testuser3
Password
@@ -1751,6 +1783,7 @@ max_connections 0
max_user_connections 0
plugin
authentication_string
+password_expired N
# Switch to connection testuser1
SELECT * FROM information_schema.user_privileges
WHERE grantee LIKE '''testuser%'''
diff --git a/mysql-test/suite/funcs_1/r/memory_storedproc_02.result b/mysql-test/suite/funcs_1/r/memory_storedproc_02.result
index 03c16d06595..0ee25154c50 100644
--- a/mysql-test/suite/funcs_1/r/memory_storedproc_02.result
+++ b/mysql-test/suite/funcs_1/r/memory_storedproc_02.result
@@ -348,8 +348,6 @@ CALL h1();
-7- 1 1 1 1 1 1
END x1 x2 x3 x4 x5 x6
END 1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
DROP TABLE IF EXISTS tnull;
DROP PROCEDURE IF EXISTS sp1;
CREATE TABLE tnull(f1 int);
@@ -448,8 +446,6 @@ END//
CALL h2();
x1 x2 x3 x4 x5 x6
1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
SELECT * FROM res_t1;
w x
a b
@@ -555,8 +551,6 @@ exit handler 2
exit handler 2
exit handler 1
exit handler 1
-Warnings:
-Error 1146 Table 'db_storedproc.tqq' doesn't exist
create table res_t1(w char unique, x char);
insert into res_t1 values ('a', 'b');
CREATE PROCEDURE h1 ()
@@ -587,8 +581,6 @@ END//
CALL h1();
x1 x2 x3 x4 x5 x6
1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
This will fail, SQLSTATE 00000 is not allowed
CREATE PROCEDURE sp1()
begin1_label:BEGIN
@@ -632,8 +624,6 @@ CALL sp2();
NULL
@x2 @x
1 2
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE sp1;
DROP PROCEDURE sp2;
@@ -665,8 +655,6 @@ ERROR 42000: Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expe
CALL sp2();
-1- @x2 @x
-1- 0 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
SELECT '-3-', @x2, @x;
-3- @x2 @x
-3- 1 1
@@ -709,8 +697,6 @@ CALL sp2();
-2- 1 20
-4- @x2 @x
-4- 11 22
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE sp1;
DROP PROCEDURE sp2;
@@ -777,33 +763,21 @@ SELECT @done, @x;
0 1
INSERT INTO temp VALUES('1', NULL);
CALL sp1();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 1
INSERT INTO temp VALUES('2', NULL);
CALL sp2();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 1
INSERT INTO temp VALUES('3', NULL);
CALL sp3();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 0
INSERT INTO temp VALUES('4', NULL);
CALL sp4();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 0
@@ -912,26 +886,18 @@ SELECT @done, @x;
@done @x
0 1
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 2
CALL sp2();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 2
CALL sp3();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 1
CALL sp4();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 1
@@ -1066,8 +1032,6 @@ SQLSTATE
21000
SQLSTATE
24000
-Warnings:
-Error 1326 Cursor is not open
SELECT '-1-', @x;
-1- @x
-1- 6
@@ -1078,24 +1042,18 @@ SQLSTATE
SQLEXCEPTION
SQLSTATE
24000
-Warnings:
-Error 1326 Cursor is not open
SELECT '-2-', @x;
-2- @x
-2- 6
CALL sp3();
SQLSTATE
20000
-Warnings:
-Error 1339 Case not found for CASE statement
SELECT '-3-', @x;
-3- @x
-3- 1
CALL sp4();
SQLSTATE
SQLEXCEPTION
-Warnings:
-Error 1339 Case not found for CASE statement
SELECT '-4-', @x;
-4- @x
-4- 1
@@ -1378,8 +1336,6 @@ CLOSE cur1;
CLOSE cur2;
END//
CALL sp_outer();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM temp1;
f0 cnt f1 f2 f3 f4
_sp_out_ 1 a` a` 1000-01-01 -5000
diff --git a/mysql-test/suite/funcs_1/r/memory_trig_0102.result b/mysql-test/suite/funcs_1/r/memory_trig_0102.result
index b08eeb6754f..85da4aa7832 100644
--- a/mysql-test/suite/funcs_1/r/memory_trig_0102.result
+++ b/mysql-test/suite/funcs_1/r/memory_trig_0102.result
@@ -186,7 +186,7 @@ Testcase 3.5.1.7: - need to fix
-------------------------------
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (f1 int, f2 char(25),f3 int) engine = <engine_to_be_used>;
CREATE TRIGGER trg5_1 BEFORE INSERT on test.t1
for each row set new.f3 = '14';
diff --git a/mysql-test/suite/funcs_1/r/myisam_storedproc_02.result b/mysql-test/suite/funcs_1/r/myisam_storedproc_02.result
index 03c16d06595..0ee25154c50 100644
--- a/mysql-test/suite/funcs_1/r/myisam_storedproc_02.result
+++ b/mysql-test/suite/funcs_1/r/myisam_storedproc_02.result
@@ -348,8 +348,6 @@ CALL h1();
-7- 1 1 1 1 1 1
END x1 x2 x3 x4 x5 x6
END 1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
DROP TABLE IF EXISTS tnull;
DROP PROCEDURE IF EXISTS sp1;
CREATE TABLE tnull(f1 int);
@@ -448,8 +446,6 @@ END//
CALL h2();
x1 x2 x3 x4 x5 x6
1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
SELECT * FROM res_t1;
w x
a b
@@ -555,8 +551,6 @@ exit handler 2
exit handler 2
exit handler 1
exit handler 1
-Warnings:
-Error 1146 Table 'db_storedproc.tqq' doesn't exist
create table res_t1(w char unique, x char);
insert into res_t1 values ('a', 'b');
CREATE PROCEDURE h1 ()
@@ -587,8 +581,6 @@ END//
CALL h1();
x1 x2 x3 x4 x5 x6
1 1 1 1 1 1
-Warnings:
-Error 1062 Duplicate entry 'a' for key 'w'
This will fail, SQLSTATE 00000 is not allowed
CREATE PROCEDURE sp1()
begin1_label:BEGIN
@@ -632,8 +624,6 @@ CALL sp2();
NULL
@x2 @x
1 2
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE sp1;
DROP PROCEDURE sp2;
@@ -665,8 +655,6 @@ ERROR 42000: Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expe
CALL sp2();
-1- @x2 @x
-1- 0 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
SELECT '-3-', @x2, @x;
-3- @x2 @x
-3- 1 1
@@ -709,8 +697,6 @@ CALL sp2();
-2- 1 20
-4- @x2 @x
-4- 11 22
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE sp1;
DROP PROCEDURE sp2;
@@ -777,33 +763,21 @@ SELECT @done, @x;
0 1
INSERT INTO temp VALUES('1', NULL);
CALL sp1();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 1
INSERT INTO temp VALUES('2', NULL);
CALL sp2();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 1
INSERT INTO temp VALUES('3', NULL);
CALL sp3();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 0
INSERT INTO temp VALUES('4', NULL);
CALL sp4();
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
-Warning 1265 Data truncated for column 'f2' at row 1
SELECT @done, @x;
@done @x
1 0
@@ -912,26 +886,18 @@ SELECT @done, @x;
@done @x
0 1
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 2
CALL sp2();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 2
CALL sp3();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 1
CALL sp4();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT @done, @x;
@done @x
1 1
@@ -1066,8 +1032,6 @@ SQLSTATE
21000
SQLSTATE
24000
-Warnings:
-Error 1326 Cursor is not open
SELECT '-1-', @x;
-1- @x
-1- 6
@@ -1078,24 +1042,18 @@ SQLSTATE
SQLEXCEPTION
SQLSTATE
24000
-Warnings:
-Error 1326 Cursor is not open
SELECT '-2-', @x;
-2- @x
-2- 6
CALL sp3();
SQLSTATE
20000
-Warnings:
-Error 1339 Case not found for CASE statement
SELECT '-3-', @x;
-3- @x
-3- 1
CALL sp4();
SQLSTATE
SQLEXCEPTION
-Warnings:
-Error 1339 Case not found for CASE statement
SELECT '-4-', @x;
-4- @x
-4- 1
@@ -1378,8 +1336,6 @@ CLOSE cur1;
CLOSE cur2;
END//
CALL sp_outer();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM temp1;
f0 cnt f1 f2 f3 f4
_sp_out_ 1 a` a` 1000-01-01 -5000
diff --git a/mysql-test/suite/funcs_1/r/myisam_trig_0102.result b/mysql-test/suite/funcs_1/r/myisam_trig_0102.result
index b08eeb6754f..85da4aa7832 100644
--- a/mysql-test/suite/funcs_1/r/myisam_trig_0102.result
+++ b/mysql-test/suite/funcs_1/r/myisam_trig_0102.result
@@ -186,7 +186,7 @@ Testcase 3.5.1.7: - need to fix
-------------------------------
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (f1 int, f2 char(25),f3 int) engine = <engine_to_be_used>;
CREATE TRIGGER trg5_1 BEFORE INSERT on test.t1
for each row set new.f3 = '14';
diff --git a/mysql-test/suite/funcs_1/r/storedproc.result b/mysql-test/suite/funcs_1/r/storedproc.result
index f9cdbb13d61..4ec9bfbd512 100644
--- a/mysql-test/suite/funcs_1/r/storedproc.result
+++ b/mysql-test/suite/funcs_1/r/storedproc.result
@@ -1822,7 +1822,7 @@ grant execute on db_storedproc.* to 'user_1'@'localhost';
flush privileges;
drop table IF EXISTS mysql.t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'mysql.t1'
create table mysql.t1( f1 char );
DROP PROCEDURE IF EXISTS sp11;
Warnings:
@@ -9327,8 +9327,6 @@ insert into t2 values (1);
set @x = 3;
END//
CALL sp1();
-Warnings:
-Error 1136 Column count doesn't match value count at row 1
DROP PROCEDURE sp1;
DROP PROCEDURE IF EXISTS sp1;
Warnings:
@@ -13754,8 +13752,6 @@ END//
CALL sp1();
x y @x
NULL a 3
-Warnings:
-Error 1305 PROCEDURE db_storedproc.nonsexist does not exist
SELECT @v1, @v2;
@v1 @v2
4 a
@@ -14940,8 +14936,6 @@ NULL NULL
NULL NULL
@x @y
NULL NULL
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
DROP PROCEDURE sp1;
Testcase 4.2.63:
@@ -15101,8 +15095,6 @@ END;
fetch cur1 into newf1, newf2, newf4, newf3;
END//
CALL sp1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
DROP PROCEDURE sp1;
Testcase 4.2.70:
@@ -16351,7 +16343,6 @@ fn7(99999999999)
9999999999
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn8;
CREATE FUNCTION fn8( f1 decimal (0) unsigned zerofill) returns decimal (0) unsigned zerofill
BEGIN
@@ -16361,8 +16352,6 @@ END//
SELECT fn8(999999999);
fn8(999999999)
1000000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn9;
CREATE FUNCTION fn9( f1 decimal (0) zerofill) returns decimal (0) zerofill
BEGIN
@@ -16374,7 +16363,6 @@ fn9(-1.00e+09)
0000000010
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn10;
CREATE FUNCTION fn10( f1 decimal (0, 0)) returns decimal (0, 0)
BEGIN
@@ -16395,7 +16383,6 @@ fn11(99999999999)
9999999999
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn12;
CREATE FUNCTION fn12( f1 decimal (0, 0) unsigned zerofill) returns decimal (0, 0) unsigned zerofill
BEGIN
@@ -16405,8 +16392,6 @@ END//
SELECT fn12(999999999);
fn12(999999999)
1000000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn13;
CREATE FUNCTION fn13( f1 decimal (0, 0) zerofill) returns decimal (0, 0) zerofill
BEGIN
@@ -16418,7 +16403,6 @@ fn13(-1.00e+09)
0000000010
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn14;
CREATE FUNCTION fn14( f1 decimal (63, 30)) returns decimal (63, 30)
BEGIN
@@ -16457,7 +16441,6 @@ fn17(-1.00e+21)
000000000000000000000000000000010.000000000000000000000000000000
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn18_d;
CREATE FUNCTION fn18_d( f1 decimal (64)) returns decimal (64)
BEGIN
@@ -16494,8 +16477,6 @@ END//
SELECT fn21_d_z(1.00e+00);
fn21_d_z(1.00e+00)
0000000000000000000000000000000000000000000000000000000000000010
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn22;
CREATE FUNCTION fn22( f1 decimal unsigned) returns decimal unsigned
BEGIN
@@ -16505,8 +16486,6 @@ END//
SELECT fn22(1.00e+00);
fn22(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn23;
CREATE FUNCTION fn23( f1 decimal unsigned zerofill) returns decimal unsigned zerofill
BEGIN
@@ -16516,8 +16495,6 @@ END//
SELECT fn23(1.00e+00);
fn23(1.00e+00)
0000000010
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn24;
CREATE FUNCTION fn24( f1 decimal zerofill) returns decimal zerofill
BEGIN
@@ -16529,7 +16506,6 @@ fn24(-1.00e+09)
0000000010
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn25;
CREATE FUNCTION fn25( f1 double) returns double
BEGIN
@@ -16548,8 +16524,6 @@ END//
SELECT fn26(1.00e+00);
fn26(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn27;
CREATE FUNCTION fn27( f1 double unsigned zerofill) returns double unsigned zerofill
BEGIN
@@ -16559,8 +16533,6 @@ END//
SELECT fn27(1.00e+00);
fn27(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn28;
CREATE FUNCTION fn28( f1 double zerofill) returns double zerofill
BEGIN
@@ -16570,8 +16542,6 @@ END//
SELECT fn28(1.00e+00);
fn28(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn29;
CREATE FUNCTION fn29( f1 float) returns float
BEGIN
@@ -16590,8 +16560,6 @@ END//
SELECT fn30(1.00e+00);
fn30(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn31;
CREATE FUNCTION fn31( f1 float unsigned zerofill) returns float unsigned zerofill
BEGIN
@@ -16601,8 +16569,6 @@ END//
SELECT fn31(1.00e+00);
fn31(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn32;
CREATE FUNCTION fn32( f1 float zerofill) returns float zerofill
BEGIN
@@ -16612,8 +16578,6 @@ END//
SELECT fn32(1.00e+00);
fn32(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn33;
CREATE FUNCTION fn33( f1 float(0)) returns float(0)
BEGIN
@@ -16632,8 +16596,6 @@ END//
SELECT fn34(1.00e+00);
fn34(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn35;
CREATE FUNCTION fn35( f1 float(0) unsigned zerofill) returns float(0) unsigned zerofill
BEGIN
@@ -16643,8 +16605,6 @@ END//
SELECT fn35(1.00e+00);
fn35(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn36;
CREATE FUNCTION fn36( f1 float(0) zerofill) returns float(0) zerofill
BEGIN
@@ -16654,8 +16614,6 @@ END//
SELECT fn36(1.00e+00);
fn36(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn37;
CREATE FUNCTION fn37( f1 float(23)) returns float(23)
BEGIN
@@ -16674,8 +16632,6 @@ END//
SELECT fn38(1.00e+00);
fn38(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn39;
CREATE FUNCTION fn39( f1 float(23) unsigned zerofill) returns float(23) unsigned zerofill
BEGIN
@@ -16685,8 +16641,6 @@ END//
SELECT fn39(1.00e+00);
fn39(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn40;
CREATE FUNCTION fn40( f1 float(23) zerofill) returns float(23) zerofill
BEGIN
@@ -16696,8 +16650,6 @@ END//
SELECT fn40(1.00e+00);
fn40(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn41;
CREATE FUNCTION fn41( f1 float(24)) returns float(24)
BEGIN
@@ -16716,8 +16668,6 @@ END//
SELECT fn42(1.00e+00);
fn42(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn43;
CREATE FUNCTION fn43( f1 float(24) unsigned zerofill) returns float(24) unsigned zerofill
BEGIN
@@ -16727,8 +16677,6 @@ END//
SELECT fn43(1.00e+00);
fn43(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn44;
CREATE FUNCTION fn44( f1 float(24) zerofill) returns float(24) zerofill
BEGIN
@@ -16738,8 +16686,6 @@ END//
SELECT fn44(1.00e+00);
fn44(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn45;
CREATE FUNCTION fn45( f1 float(53)) returns float(53)
BEGIN
@@ -16758,8 +16704,6 @@ END//
SELECT fn46(1.00e+00);
fn46(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn47;
CREATE FUNCTION fn47( f1 float(53) unsigned zerofill) returns float(53) unsigned zerofill
BEGIN
@@ -16769,8 +16713,6 @@ END//
SELECT fn47(1.00e+00);
fn47(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn48;
CREATE FUNCTION fn48( f1 float(53) zerofill) returns float(53) zerofill
BEGIN
@@ -16780,8 +16722,6 @@ END//
SELECT fn48(1.00e+00);
fn48(1.00e+00)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn49;
CREATE FUNCTION fn49( f1 int) returns int
BEGIN
@@ -16793,7 +16733,6 @@ fn49(-2.15e+09)
-2147483638
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn50;
CREATE FUNCTION fn50( f1 int unsigned) returns int unsigned
BEGIN
@@ -16830,8 +16769,6 @@ END//
SELECT fn53(-8388600);
fn53(-8388600)
-8388598
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn54;
CREATE FUNCTION fn54( f1 mediumint unsigned) returns mediumint unsigned
BEGIN
@@ -16867,8 +16804,6 @@ END//
SELECT fn57(-999999999);
fn57(-999999999)
-1000000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn58;
CREATE FUNCTION fn58( f1 numeric (0)) returns numeric (0)
BEGIN
@@ -16878,8 +16813,6 @@ END//
SELECT fn58(-999999999);
fn58(-999999999)
-1000000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn59;
CREATE FUNCTION fn59( f1 numeric (0) unsigned) returns numeric (0) unsigned
BEGIN
@@ -16889,8 +16822,6 @@ END//
SELECT fn59(9999999999);
fn59(9999999999)
9999999999
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn60;
CREATE FUNCTION fn60( f1 numeric (0) unsigned zerofill) returns numeric (0) unsigned zerofill
BEGIN
@@ -16900,8 +16831,6 @@ END//
SELECT fn60(99999999);
fn60(99999999)
0100000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn61;
CREATE FUNCTION fn61( f1 numeric (0) zerofill) returns numeric (0) zerofill
BEGIN
@@ -16913,7 +16842,6 @@ fn61(-99999999)
0000000010
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn62;
CREATE FUNCTION fn62( f1 numeric (0, 0)) returns numeric (0, 0)
BEGIN
@@ -16923,8 +16851,6 @@ END//
SELECT fn62(-999999999);
fn62(-999999999)
-1000000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn63;
CREATE FUNCTION fn63( f1 numeric (0, 0) unsigned) returns numeric (0, 0) unsigned
BEGIN
@@ -16934,8 +16860,6 @@ END//
SELECT fn63(9999999999);
fn63(9999999999)
9999999999
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn64;
CREATE FUNCTION fn64( f1 numeric (0, 0) unsigned zerofill) returns numeric (0, 0) unsigned zerofill
BEGIN
@@ -16945,8 +16869,6 @@ END//
SELECT fn64(99999999);
fn64(99999999)
0100000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn65;
CREATE FUNCTION fn65( f1 numeric (0, 0) zerofill) returns numeric (0, 0) zerofill
BEGIN
@@ -16958,7 +16880,6 @@ fn65(-99999999)
0000000010
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn66;
CREATE FUNCTION fn66( f1 numeric (63, 30)) returns numeric (63, 30)
BEGIN
@@ -16970,7 +16891,6 @@ fn66(-1e+36)
-999999999999999999999999999999989.999999999999999999999999999999
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn67;
CREATE FUNCTION fn67( f1 numeric (63, 30) unsigned) returns numeric (63, 30) unsigned
BEGIN
@@ -16982,7 +16902,6 @@ fn67(1e+36)
999999999999999999999999999999999.999999999999999999999999999999
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn68;
CREATE FUNCTION fn68( f1 numeric (63, 30) unsigned zerofill) returns numeric (63, 30) unsigned zerofill
BEGIN
@@ -16994,7 +16913,6 @@ fn68(1e+36)
999999999999999999999999999999999.999999999999999999999999999999
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn69;
CREATE FUNCTION fn69( f1 numeric (63, 30) zerofill) returns numeric (63, 30) zerofill
BEGIN
@@ -17006,7 +16924,6 @@ fn69(-1e+36)
000000000000000000000000000000010.000000000000000000000000000000
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn70_n;
CREATE FUNCTION fn70_n( f1 numeric (64)) returns numeric (64)
BEGIN
@@ -17055,8 +16972,6 @@ END//
SELECT fn74(999999999);
fn74(999999999)
1000000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn75;
CREATE FUNCTION fn75( f1 numeric unsigned zerofill) returns numeric unsigned zerofill
BEGIN
@@ -17066,8 +16981,6 @@ END//
SELECT fn75(999999999);
fn75(999999999)
1000000000
-Warnings:
-Note 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn76;
CREATE FUNCTION fn76( f1 numeric zerofill) returns numeric zerofill
BEGIN
@@ -17079,7 +16992,6 @@ fn76(-999999999)
0000000010
Warnings:
Warning 1264 Out of range value for column 'f1' at row 1
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn77;
CREATE FUNCTION fn77( f1 real) returns real
BEGIN
@@ -17098,8 +17010,6 @@ END//
SELECT fn78(1.1);
fn78(1.1)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn79;
CREATE FUNCTION fn79( f1 real unsigned zerofill) returns real unsigned zerofill
BEGIN
@@ -17109,8 +17019,6 @@ END//
SELECT fn79(1.1);
fn79(1.1)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn80;
CREATE FUNCTION fn80( f1 real zerofill) returns real zerofill
BEGIN
@@ -17120,8 +17028,6 @@ END//
SELECT fn80(1.1);
fn80(1.1)
10
-Warnings:
-Warning 1264 Out of range value for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn81;
CREATE FUNCTION fn81( f1 smallint) returns smallint
BEGIN
@@ -17254,8 +17160,6 @@ END//
SELECT fn94( 'h');
fn94( 'h')
a
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn95;
CREATE FUNCTION fn95( f1 char ascii) returns char ascii
BEGIN
@@ -17265,8 +17169,6 @@ END//
SELECT fn95('h');
fn95('h')
a
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn96;
CREATE FUNCTION fn96( f1 binary) returns binary(2)
BEGIN
@@ -17276,8 +17178,6 @@ END//
SELECT fn96( 'h');
fn96( 'h')
a
-Warnings:
-Warning 1265 Data truncated for column 'f1' at row 1
DROP FUNCTION IF EXISTS fn97;
CREATE FUNCTION fn97( f1 longtext) returns longtext
BEGIN
@@ -22351,7 +22251,7 @@ Warnings:
Note 1305 FUNCTION db_storedproc.fn1 does not exist
drop table IF EXISTS res_t9;
Warnings:
-Note 1051 Unknown table 'res_t9'
+Note 1051 Unknown table 'db_storedproc.res_t9'
create table res_t9 (f1 int, f2 char(25), f3 int);
insert into res_t9 values (10, 'abc', 20);
CREATE FUNCTION fn1(i1 longtext) returns longtext
@@ -22377,7 +22277,7 @@ Warnings:
Note 1305 FUNCTION db_storedproc.fn1 does not exist
drop table IF EXISTS res_t9;
Warnings:
-Note 1051 Unknown table 'res_t9'
+Note 1051 Unknown table 'db_storedproc.res_t9'
create table res_t9 (f1 int, f2 char(25), f3 int);
CREATE FUNCTION fn1(i1 longtext) returns longtext
BEGIN
@@ -22453,8 +22353,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE h1;
DROP PROCEDURE sp1;
@@ -22472,8 +22370,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1305 PROCEDURE db_storedproc.sp1 does not exist
DROP PROCEDURE h1;
Testcase 4.11.3:
@@ -22493,8 +22389,6 @@ CALL sp1 (1);
set @x=0;
END//
CALL h1();
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
SELECT @x, @x2;
@x @x2
1 1
@@ -22513,8 +22407,6 @@ CALL sp1 (1);
set @x=0;
END//
CALL h1 ();
-Warnings:
-Error 1305 PROCEDURE db_storedproc.sp1 does not exist
SELECT @x, @x2;
@x @x2
1 1
@@ -22539,8 +22431,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE h1;
DROP PROCEDURE sp1;
@@ -22563,8 +22453,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE h1;
DROP PROCEDURE sp1;
@@ -22587,8 +22475,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE h1;
DROP PROCEDURE sp1;
@@ -22606,8 +22492,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1305 PROCEDURE db_storedproc.sp1 does not exist
DROP PROCEDURE h1;
Testcase 4.11.9:
@@ -22627,8 +22511,6 @@ CALL sp1 (1);
set @x=0;
END//
CALL h1();
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
SELECT @x, @x2;
@x @x2
1 1
@@ -22647,8 +22529,6 @@ CALL sp1 (1);
set @x=0;
END//
CALL h1 ();
-Warnings:
-Error 1305 PROCEDURE db_storedproc.sp1 does not exist
SELECT @x, @x2;
@x @x2
1 1
@@ -22673,8 +22553,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE h1;
DROP PROCEDURE sp1;
@@ -22697,8 +22575,6 @@ END//
CALL h1 ();
@x @x2
1 1
-Warnings:
-Error 1318 Incorrect number of arguments for PROCEDURE db_storedproc.sp1; expected 2, got 1
DROP PROCEDURE h1;
DROP PROCEDURE sp1;
@@ -22734,8 +22610,6 @@ done
1
done
1
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -22809,8 +22683,6 @@ done
0
done
1
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -22848,8 +22720,6 @@ done
0
done
1
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -22880,8 +22750,6 @@ done
0
done
1
-Warnings:
-Error 1328 Incorrect number of FETCH variables
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -22912,8 +22780,6 @@ done
0
done
1
-Warnings:
-Error 1328 Incorrect number of FETCH variables
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -22944,8 +22810,6 @@ END//
CALL h1();
done
0
-Warnings:
-Error 1328 Incorrect number of FETCH variables
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -22976,8 +22840,6 @@ END//
CALL h1();
done
0
-Warnings:
-Error 1328 Incorrect number of FETCH variables
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23008,8 +22870,6 @@ done
0
done
1
-Warnings:
-Error 1325 Cursor is already open
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23041,8 +22901,6 @@ done
0
done @x
1 1
-Warnings:
-Error 1325 Cursor is already open
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23073,8 +22931,6 @@ END//
CALL h1();
done
0
-Warnings:
-Error 1325 Cursor is already open
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23105,8 +22961,6 @@ END//
CALL h1();
done
0
-Warnings:
-Error 1325 Cursor is already open
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23134,8 +22988,6 @@ END//
CALL h1();
done @x
1 1
-Warnings:
-Error 1326 Cursor is not open
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23163,8 +23015,6 @@ END//
CALL h1();
done @x
1 1
-Warnings:
-Error 1326 Cursor is not open
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23190,8 +23040,6 @@ set @x=1;
SELECT done, @x;
END//
CALL h1();
-Warnings:
-Error 1326 Cursor is not open
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23217,8 +23065,6 @@ set @x=1;
SELECT done, @x;
END//
CALL h1();
-Warnings:
-Error 1326 Cursor is not open
DROP PROCEDURE IF EXISTS h1;
drop table IF EXISTS res_t1;
drop table IF EXISTS res_t2;
@@ -23249,8 +23095,6 @@ END//
CALL h1();
done @x
1 1
-Warnings:
-Error 1339 Case not found for CASE statement
DROP PROCEDURE IF EXISTS h1;
drop table IF EXISTS res_t1;
drop table IF EXISTS res_t2;
@@ -23281,8 +23125,6 @@ END//
CALL h1();
done @x
1 1
-Warnings:
-Error 1339 Case not found for CASE statement
DROP PROCEDURE IF EXISTS h1;
drop table IF EXISTS res_t1;
drop table IF EXISTS res_t2;
@@ -23311,8 +23153,6 @@ set @x=1;
SELECT done, @x;
END//
CALL h1();
-Warnings:
-Error 1339 Case not found for CASE statement
DROP PROCEDURE IF EXISTS h1;
drop table IF EXISTS res_t1;
drop table IF EXISTS res_t2;
@@ -23341,8 +23181,6 @@ set @x=1;
SELECT done, @x;
END//
CALL h1();
-Warnings:
-Error 1339 Case not found for CASE statement
DROP PROCEDURE IF EXISTS h1;
DROP TABLE IF EXISTS res_t1;
DROP TABLE IF EXISTS res_t2;
@@ -23491,9 +23329,6 @@ CREATE TABLE res_t1(w CHAR, x CHAR);
INSERT INTO res_t1 VALUES('a', 'b');
INSERT INTO res_t1 VALUES('c', 'd');
CALL h1();
-Warnings:
-Warning 1265 Data truncated for column 'w' at row 1
-Warning 1265 Data truncated for column 'x' at row 1
SELECT @done, @x;
@done @x
1 1
@@ -23516,9 +23351,6 @@ CREATE TABLE res_t1(w CHAR, x CHAR);
INSERT INTO res_t1 VALUES('a', 'b');
INSERT INTO res_t1 VALUES('c', 'd');
CALL h1();
-Warnings:
-Warning 1265 Data truncated for column 'w' at row 1
-Warning 1265 Data truncated for column 'x' at row 1
SELECT @done, @x;
@done @x
1 1
diff --git a/mysql-test/suite/handler/aria.result b/mysql-test/suite/handler/aria.result
index 630b16433a4..ab50da321db 100644
--- a/mysql-test/suite/handler/aria.result
+++ b/mysql-test/suite/handler/aria.result
@@ -582,7 +582,7 @@ select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (a int not null) ENGINE=csv;
--> client 2
handler t1 open;
@@ -741,7 +741,7 @@ ERROR HY000: Can't execute the given command because you have active locked tabl
handler t2 close;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
handler t3 open;
-ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+ERROR HY000: Can't reopen table: 't3'
# After UNLOCK TABLES handlers should be around and
# we should be able to continue reading through them.
unlock tables;
diff --git a/mysql-test/suite/handler/handler.inc b/mysql-test/suite/handler/handler.inc
index e099d22e3ce..d3134746033 100644
--- a/mysql-test/suite/handler/handler.inc
+++ b/mysql-test/suite/handler/handler.inc
@@ -581,7 +581,7 @@ handler t1 open;
handler t1 read next;
--error ER_LOCK_OR_ACTIVE_TRANSACTION
handler t2 close;
---error ER_LOCK_OR_ACTIVE_TRANSACTION
+--error ER_CANT_REOPEN_TABLE
handler t3 open;
--echo # After UNLOCK TABLES handlers should be around and
--echo # we should be able to continue reading through them.
diff --git a/mysql-test/suite/handler/heap.result b/mysql-test/suite/handler/heap.result
index 55f9fd41c7a..4b5af7afa72 100644
--- a/mysql-test/suite/handler/heap.result
+++ b/mysql-test/suite/handler/heap.result
@@ -582,7 +582,7 @@ select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (a int not null) ENGINE=csv;
--> client 2
handler t1 open;
@@ -741,7 +741,7 @@ ERROR HY000: Can't execute the given command because you have active locked tabl
handler t2 close;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
handler t3 open;
-ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+ERROR HY000: Can't reopen table: 't3'
# After UNLOCK TABLES handlers should be around and
# we should be able to continue reading through them.
unlock tables;
diff --git a/mysql-test/suite/handler/innodb.result b/mysql-test/suite/handler/innodb.result
index d4103ddd6ff..4338c018052 100644
--- a/mysql-test/suite/handler/innodb.result
+++ b/mysql-test/suite/handler/innodb.result
@@ -583,7 +583,7 @@ select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (a int not null) ENGINE=csv;
--> client 2
handler t1 open;
@@ -743,7 +743,7 @@ ERROR HY000: Can't execute the given command because you have active locked tabl
handler t2 close;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
handler t3 open;
-ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+ERROR HY000: Can't reopen table: 't3'
# After UNLOCK TABLES handlers should be around and
# we should be able to continue reading through them.
unlock tables;
diff --git a/mysql-test/suite/handler/myisam.result b/mysql-test/suite/handler/myisam.result
index 980b87768da..fcf6d4c133c 100644
--- a/mysql-test/suite/handler/myisam.result
+++ b/mysql-test/suite/handler/myisam.result
@@ -582,7 +582,7 @@ select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (a int not null) ENGINE=csv;
--> client 2
handler t1 open;
@@ -741,7 +741,7 @@ ERROR HY000: Can't execute the given command because you have active locked tabl
handler t2 close;
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
handler t3 open;
-ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
+ERROR HY000: Can't reopen table: 't3'
# After UNLOCK TABLES handlers should be around and
# we should be able to continue reading through them.
unlock tables;
diff --git a/mysql-test/suite/innodb/include/restart_and_reinit.inc b/mysql-test/suite/innodb/include/restart_and_reinit.inc
index 207e9c5dc6e..9df66196edf 100644
--- a/mysql-test/suite/innodb/include/restart_and_reinit.inc
+++ b/mysql-test/suite/innodb/include/restart_and_reinit.inc
@@ -6,6 +6,9 @@
source include/not_embedded.inc;
--disable_query_log
+call mtr.add_suppression("InnoDB: New log files created");
+call mtr.add_suppression("InnoDB: Creating foreign key constraint system tables");
+
let $innodb_index_stats = query_get_value(show create table mysql.innodb_index_stats, Create Table, 1);
let $innodb_table_stats = query_get_value(show create table mysql.innodb_table_stats, Create Table, 1);
let $database=`select database()`;
diff --git a/mysql-test/suite/innodb/r/innodb-autoinc-44030.result b/mysql-test/suite/innodb/r/innodb-autoinc-44030.result
index 54e972843f5..93e6ede30f2 100644
--- a/mysql-test/suite/innodb/r/innodb-autoinc-44030.result
+++ b/mysql-test/suite/innodb/r/innodb-autoinc-44030.result
@@ -19,7 +19,9 @@ Table Create Table
t1 CREATE TABLE `t1` (
`d1` int(11) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`d1`)
-) ENGINE=InnoDB AUTO_INCREMENT=4 DEFAULT CHARSET=latin1
+) ENGINE=InnoDB AUTO_INCREMENT=3 DEFAULT CHARSET=latin1
+INSERT INTO t1 VALUES(null);
+ERROR 23000: Duplicate entry '3' for key 'PRIMARY'
INSERT INTO t1 VALUES(null);
SELECT * FROM t1;
d1
diff --git a/mysql-test/suite/innodb/r/innodb-autoinc-56228.result b/mysql-test/suite/innodb/r/innodb-autoinc-56228.result
index 492130d1f08..6a3fd85ebd3 100644
--- a/mysql-test/suite/innodb/r/innodb-autoinc-56228.result
+++ b/mysql-test/suite/innodb/r/innodb-autoinc-56228.result
@@ -1,9 +1,9 @@
DROP TABLE IF EXISTS t1_56228;
Warnings:
-Note 1051 Unknown table 't1_56228'
+Note 1051 Unknown table 'test.t1_56228'
DROP TABLE IF EXISTS t2_56228;
Warnings:
-Note 1051 Unknown table 't2_56228'
+Note 1051 Unknown table 'test.t2_56228'
DROP FUNCTION IF EXISTS bug56228;
Warnings:
Note 1305 FUNCTION test.bug56228 does not exist
@@ -27,4 +27,4 @@ DROP FUNCTION bug56228;
DROP TEMPORARY TABLE t2_56228;
DROP TEMPORARY TABLE IF EXISTS t1_56228;
Warnings:
-Note 1051 Unknown table 't1_56228'
+Note 1051 Unknown table 'test.t1_56228'
diff --git a/mysql-test/suite/innodb/r/innodb-autoinc.result b/mysql-test/suite/innodb/r/innodb-autoinc.result
index c4504f6d348..2ae51b62062 100644
--- a/mysql-test/suite/innodb/r/innodb-autoinc.result
+++ b/mysql-test/suite/innodb/r/innodb-autoinc.result
@@ -171,7 +171,7 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT AUTO_INCREMENT, c2 INT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL, 1);
DELETE FROM t1 WHERE c1 = 1;
@@ -184,7 +184,7 @@ c1 c2
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT AUTO_INCREMENT, c2 INT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL, 1);
DELETE FROM t1 WHERE c1 = 1;
@@ -203,7 +203,7 @@ auto_increment_increment 100
auto_increment_offset 10
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL),(5),(NULL);
INSERT INTO t1 VALUES (250),(NULL);
@@ -236,7 +236,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(0);
SELECT * FROM t1;
@@ -275,7 +275,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(-1);
SELECT * FROM t1;
@@ -321,7 +321,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(-1);
Warnings:
@@ -376,7 +376,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(-1);
Warnings:
@@ -425,7 +425,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 BIGINT AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(NULL);
INSERT INTO t1 VALUES (9223372036854775794);
@@ -458,7 +458,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 BIGINT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(NULL);
INSERT INTO t1 VALUES (18446744073709551603);
@@ -486,7 +486,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 BIGINT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(NULL);
INSERT INTO t1 VALUES (18446744073709551603);
@@ -514,7 +514,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 BIGINT AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(NULL);
INSERT INTO t1 VALUES(-9223372036854775806);
@@ -550,7 +550,7 @@ auto_increment_increment 1
auto_increment_offset 1
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 BIGINT UNSIGNED AUTO_INCREMENT, PRIMARY KEY(c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES(NULL);
INSERT INTO t1 VALUES (18446744073709551610);
@@ -624,10 +624,10 @@ DROP TABLE t1;
SET @@SESSION.AUTO_INCREMENT_INCREMENT=1, @@SESSION.AUTO_INCREMENT_OFFSET=5;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
DROP TABLE IF EXISTS t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t1 (
a INT(11) UNSIGNED NOT NULL AUTO_INCREMENT,
b INT(10) UNSIGNED NOT NULL,
@@ -844,10 +844,10 @@ DROP TABLE t1;
DROP TABLE t2;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
DROP TABLE IF EXISTS t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t1(
c1 INT(10) UNSIGNED NOT NULL AUTO_INCREMENT
PRIMARY KEY) ENGINE=InnoDB;
@@ -1093,7 +1093,7 @@ c1 c2
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(C1 DOUBLE AUTO_INCREMENT KEY, C2 CHAR(10)) ENGINE=InnoDB;
INSERT INTO t1(C1, C2) VALUES (1, 'innodb'), (3, 'innodb');
INSERT INTO t1(C2) VALUES ('innodb');
@@ -1118,7 +1118,7 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 SET c1 = 1;
SHOW CREATE TABLE t1;
@@ -1157,7 +1157,7 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (c1 INTEGER AUTO_INCREMENT, PRIMARY KEY (c1)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (-685113344), (1), (NULL), (NULL);
SELECT * FROM t1;
@@ -1240,7 +1240,7 @@ t1 CREATE TABLE `t1` (
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(c1 BIGINT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t1 VALUES (NULL);
INSERT INTO t1 VALUES (18446744073709551615);
diff --git a/mysql-test/suite/innodb/r/innodb-create-options.result b/mysql-test/suite/innodb/r/innodb-create-options.result
index c32af26c22d..a3dcaee3a10 100644
--- a/mysql-test/suite/innodb/r/innodb-create-options.result
+++ b/mysql-test/suite/innodb/r/innodb-create-options.result
@@ -1,4 +1,4 @@
-SET storage_engine=InnoDB;
+SET default_storage_engine=InnoDB;
SET GLOBAL innodb_file_format=`Barracuda`;
SET GLOBAL innodb_file_per_table=ON;
SET SESSION innodb_strict_mode = ON;
@@ -6,14 +6,14 @@ SET SESSION innodb_strict_mode = ON;
# KEY_BLOCK_SIZE=0 means 'no KEY_BLOCK_SIZE is specified'
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
# 'FIXED' is sent to InnoDB since it is used by MyISAM.
# But it is an invalid mode in InnoDB
CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: invalid ROW_FORMAT specifier.
+Warning 1478 InnoDB: invalid ROW_FORMAT specifier.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
@@ -46,11 +46,11 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: invalid ROW_FORMAT specifier.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: invalid ROW_FORMAT specifier.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE'
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact
@@ -61,78 +61,78 @@ CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed row_format=COMPRESSED key_block_size=8
+t1 Compressed row_format=COMPRESSED key_block_size=2
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed row_format=COMPRESSED key_block_size=8
+t1 Compressed row_format=COMPRESSED key_block_size=2
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed key_block_size=16
+t1 Compressed key_block_size=1
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed key_block_size=16
+t1 Compressed key_block_size=1
# Test 3) StrictMode=ON, ALTER with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: invalid ROW_FORMAT specifier.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: invalid ROW_FORMAT specifier.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_TYPE'
ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
-ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=8;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
-ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
+ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed key_block_size=16
+t1 Compressed key_block_size=1
ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
@@ -146,11 +146,11 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
Level Code Message
@@ -158,35 +158,35 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC
-ALTER TABLE t1 KEY_BLOCK_SIZE=8;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ALTER TABLE t1 KEY_BLOCK_SIZE=2;
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compressed row_format=COMPRESSED
-ALTER TABLE t1 KEY_BLOCK_SIZE=16;
+ALTER TABLE t1 KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed row_format=COMPRESSED key_block_size=16
+t1 Compressed row_format=COMPRESSED key_block_size=1
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT;
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
@@ -212,23 +212,23 @@ t1 CREATE TABLE `t1` (
`f1` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=2
ALTER TABLE t1 ROW_FORMAT=COMPACT;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
Level Code Message
@@ -253,7 +253,7 @@ CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=9;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
# Test 7) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and
# and a valid non-zero KEY_BLOCK_SIZE are rejected with Antelope
@@ -261,24 +261,24 @@ Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SET GLOBAL innodb_file_format=Antelope;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=4;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
@@ -297,35 +297,42 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
Level Code Message
-ALTER TABLE t1 KEY_BLOCK_SIZE=8;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ALTER TABLE t1 KEY_BLOCK_SIZE=2;
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
SET GLOBAL innodb_file_format=Barracuda;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_format=Antelope;
ALTER TABLE t1 ADD COLUMN f1 INT;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warnings:
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `i` int(11) DEFAULT NULL,
+ `f1` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
Level Code Message
@@ -339,23 +346,23 @@ SET GLOBAL innodb_file_format=Barracuda;
# values during strict mode.
SET GLOBAL innodb_file_per_table=OFF;
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=16;
+CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
ERROR HY000: Can't create table `test`.`t1` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
Error 1005 Can't create table `test`.`t1` (errno: 140 "Wrong create options")
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
@@ -375,23 +382,23 @@ CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
Level Code Message
ALTER TABLE t1 KEY_BLOCK_SIZE=1;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'KEY_BLOCK_SIZE'
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+ERROR HY000: Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Error 1478 Table storage engine 'InnoDB' does not support the create option 'ROW_FORMAT'
ALTER TABLE t1 ROW_FORMAT=COMPACT;
SHOW WARNINGS;
Level Code Message
@@ -415,12 +422,8 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_per_table=OFF;
ALTER TABLE t1 ADD COLUMN f1 INT;
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Error 1005 Can't create table `test`.`#sql-temporary` (errno: 140 "Wrong create options")
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
Level Code Message
@@ -437,10 +440,10 @@ SET SESSION innodb_strict_mode = OFF;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED;
Warnings:
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=FIXED
@@ -477,10 +480,10 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0;
Warnings:
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=FIXED
@@ -489,70 +492,70 @@ t1 Compact row_format=FIXED
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT key_block_size=1
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT key_block_size=2
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC key_block_size=4
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed row_format=COMPRESSED key_block_size=8
+t1 Compressed row_format=COMPRESSED key_block_size=2
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed row_format=COMPRESSED key_block_size=8
+t1 Compressed row_format=COMPRESSED key_block_size=2
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed key_block_size=16
+t1 Compressed key_block_size=1
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed key_block_size=16
+t1 Compressed key_block_size=1
# Test 11) StrictMode=OFF, ALTER with each ROW_FORMAT & a valid KEY_BLOCK_SIZE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=FIXED key_block_size=1
@@ -560,10 +563,10 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT key_block_size=2
@@ -571,32 +574,32 @@ DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC key_block_size=4
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
-ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=8;
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=8 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=8 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Redundant row_format=REDUNDANT key_block_size=8
+t1 Redundant row_format=REDUNDANT key_block_size=2
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
-ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed key_block_size=16
+t1 Compressed key_block_size=1
ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
@@ -611,28 +614,28 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT key_block_size=2
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Redundant row_format=REDUNDANT key_block_size=2
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=2 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC key_block_size=2
@@ -650,23 +653,23 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compressed row_format=COMPRESSED key_block_size=4
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT;
-ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=8;
+ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed key_block_size=8
+t1 Compressed key_block_size=2
# Test 13) StrictMode=OFF, CREATE with a valid KEY_BLOCK_SIZE
# ALTER with each ROW_FORMAT
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=16;
+CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
Level Code Message
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=16
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
Level Code Message
@@ -675,40 +678,40 @@ Table Create Table
t1 CREATE TABLE `t1` (
`i` int(11) DEFAULT NULL,
`f1` int(11) DEFAULT NULL
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=16
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1
ALTER TABLE t1 ROW_FORMAT=COMPACT;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compact row_format=COMPACT key_block_size=16
+t1 Compact row_format=COMPACT key_block_size=1
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Redundant row_format=REDUNDANT key_block_size=16
+t1 Redundant row_format=REDUNDANT key_block_size=1
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=16 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Dynamic row_format=DYNAMIC key_block_size=16
+t1 Dynamic row_format=DYNAMIC key_block_size=1
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compressed row_format=COMPRESSED key_block_size=16
+t1 Compressed row_format=COMPRESSED key_block_size=1
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
Level Code Message
@@ -721,14 +724,15 @@ Level Code Message
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPACT
-# Test 14) StrictMode=OFF, CREATE with an invalid KEY_BLOCK_SIZE, it defaults to 8
+# Test 14) StrictMode=OFF, CREATE with an invalid KEY_BLOCK_SIZE,
+# it defaults to half of the page size.
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=15;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=15.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=15.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=15.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=15.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact key_block_size=15
@@ -745,16 +749,16 @@ t1 Compressed row_format=COMPRESSED key_block_size=1
SET GLOBAL innodb_file_format=Antelope;
ALTER TABLE t1 ADD COLUMN f1 INT;
Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=COMPRESSED key_block_size=1
@@ -775,12 +779,12 @@ t1 Dynamic row_format=DYNAMIC
SET GLOBAL innodb_file_format=Antelope;
ALTER TABLE t1 ADD COLUMN f1 INT;
Warnings:
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compact row_format=DYNAMIC
@@ -803,20 +807,11 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Compressed row_format=COMPRESSED key_block_size=2
SET GLOBAL innodb_file_per_table=OFF;
ALTER TABLE t1 ADD COLUMN f1 INT;
-Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2.
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=2.
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compact row_format=COMPRESSED key_block_size=2
+t1 Compressed row_format=COMPRESSED key_block_size=2
SET GLOBAL innodb_file_per_table=ON;
ALTER TABLE t1 ADD COLUMN f2 INT;
SHOW WARNINGS;
@@ -833,16 +828,11 @@ TABLE_NAME ROW_FORMAT CREATE_OPTIONS
t1 Dynamic row_format=DYNAMIC
SET GLOBAL innodb_file_per_table=OFF;
ALTER TABLE t1 ADD COLUMN f1 INT;
-Warnings:
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SHOW WARNINGS;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
TABLE_NAME ROW_FORMAT CREATE_OPTIONS
-t1 Compact row_format=DYNAMIC
+t1 Dynamic row_format=DYNAMIC
SET GLOBAL innodb_file_per_table=ON;
ALTER TABLE t1 ADD COLUMN f2 INT;
SHOW WARNINGS;
diff --git a/mysql-test/suite/innodb/r/innodb-index.result b/mysql-test/suite/innodb/r/innodb-index.result
index 9130b3a4ef2..d9242e8ee33 100644
--- a/mysql-test/suite/innodb/r/innodb-index.result
+++ b/mysql-test/suite/innodb/r/innodb-index.result
@@ -1,125 +1,12 @@
set global innodb_file_per_table=on;
set global innodb_file_format='Barracuda';
-CREATE TABLE t1_purge (
-A INT,
-B BLOB, C BLOB, D BLOB, E BLOB,
-F BLOB, G BLOB, H BLOB,
-PRIMARY KEY (B(767), C(767), D(767), E(767), A),
-INDEX (A)
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-INSERT INTO t1_purge VALUES (1,
-REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766),
-REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766));
-CREATE TABLE t2_purge (
-A INT PRIMARY KEY,
-B BLOB, C BLOB, D BLOB, E BLOB,
-F BLOB, G BLOB, H BLOB, I BLOB,
-J BLOB, K BLOB, L BLOB,
-INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-INSERT INTO t2_purge VALUES (1,
-REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766),
-REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766), REPEAT('i', 766),
-REPEAT('j', 766), REPEAT('k', 766), REPEAT('l', 766));
-CREATE TABLE t3_purge (
-A INT,
-B VARCHAR(800), C VARCHAR(800), D VARCHAR(800), E VARCHAR(800),
-F VARCHAR(800), G VARCHAR(800), H VARCHAR(800),
-PRIMARY KEY (B(767), C(767), D(767), E(767), A),
-INDEX (A)
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-INSERT INTO t3_purge SELECT * FROM t1_purge;
-CREATE TABLE t4_purge (
-A INT PRIMARY KEY,
-B VARCHAR(800), C VARCHAR(800), D VARCHAR(800), E VARCHAR(800),
-F VARCHAR(800), G VARCHAR(800), H VARCHAR(800), I VARCHAR(800),
-J VARCHAR(800), K VARCHAR(800), L VARCHAR(800),
-INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-INSERT INTO t4_purge SELECT * FROM t2_purge;
-DELETE FROM t1_purge;
-DELETE FROM t2_purge;
-DELETE FROM t3_purge;
-DELETE FROM t4_purge;
-SET @r=REPEAT('a',500);
-CREATE TABLE t12637786(a INT,
-v1 VARCHAR(500), v2 VARCHAR(500), v3 VARCHAR(500),
-v4 VARCHAR(500), v5 VARCHAR(500), v6 VARCHAR(500),
-v7 VARCHAR(500), v8 VARCHAR(500), v9 VARCHAR(500),
-v10 VARCHAR(500), v11 VARCHAR(500), v12 VARCHAR(500),
-v13 VARCHAR(500), v14 VARCHAR(500), v15 VARCHAR(500),
-v16 VARCHAR(500), v17 VARCHAR(500), v18 VARCHAR(500)
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-CREATE INDEX idx1 ON t12637786(a,v1);
-INSERT INTO t12637786 VALUES(9,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r);
-UPDATE t12637786 SET a=1000;
-DELETE FROM t12637786;
-create table t12963823(a blob,b blob,c blob,d blob,e blob,f blob,g blob,h blob,
-i blob,j blob,k blob,l blob,m blob,n blob,o blob,p blob)
-engine=innodb row_format=dynamic;
-SET @r = repeat('a', 767);
-insert into t12963823 values (@r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r);
-create index ndx_a on t12963823 (a(500));
-create index ndx_b on t12963823 (b(500));
-create index ndx_c on t12963823 (c(500));
-create index ndx_d on t12963823 (d(500));
-create index ndx_e on t12963823 (e(500));
-create index ndx_f on t12963823 (f(500));
-create index ndx_k on t12963823 (k(500));
-create index ndx_l on t12963823 (l(500));
-SET @r = repeat('b', 500);
-update t12963823 set a=@r,b=@r,c=@r,d=@r;
-update t12963823 set e=@r,f=@r,g=@r,h=@r;
-update t12963823 set i=@r,j=@r,k=@r,l=@r;
-update t12963823 set m=@r,n=@r,o=@r,p=@r;
-alter table t12963823 drop index ndx_a;
-alter table t12963823 drop index ndx_b;
-create index ndx_g on t12963823 (g(500));
-create index ndx_h on t12963823 (h(500));
-create index ndx_i on t12963823 (i(500));
-create index ndx_j on t12963823 (j(500));
-create index ndx_m on t12963823 (m(500));
-create index ndx_n on t12963823 (n(500));
-create index ndx_o on t12963823 (o(500));
-create index ndx_p on t12963823 (p(500));
-show create table t12963823;
-Table Create Table
-t12963823 CREATE TABLE `t12963823` (
- `a` blob,
- `b` blob,
- `c` blob,
- `d` blob,
- `e` blob,
- `f` blob,
- `g` blob,
- `h` blob,
- `i` blob,
- `j` blob,
- `k` blob,
- `l` blob,
- `m` blob,
- `n` blob,
- `o` blob,
- `p` blob,
- KEY `ndx_c` (`c`(500)),
- KEY `ndx_d` (`d`(500)),
- KEY `ndx_e` (`e`(500)),
- KEY `ndx_f` (`f`(500)),
- KEY `ndx_k` (`k`(500)),
- KEY `ndx_l` (`l`(500)),
- KEY `ndx_g` (`g`(500)),
- KEY `ndx_h` (`h`(500)),
- KEY `ndx_i` (`i`(500)),
- KEY `ndx_j` (`j`(500)),
- KEY `ndx_m` (`m`(500)),
- KEY `ndx_n` (`n`(500)),
- KEY `ndx_o` (`o`(500)),
- KEY `ndx_p` (`p`(500))
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS;
create table t1(a varchar(2) primary key) engine=innodb;
insert into t1 values('');
create index t1a1 on t1(a(1));
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
drop table t1;
-set global innodb_file_per_table=0;
-set global innodb_file_format=Antelope;
create table t1(a int not null, b int, c char(10) not null, d varchar(20)) engine = innodb;
insert into t1 values (5,5,'oo','oo'),(4,4,'tr','tr'),(3,4,'ad','ad'),(2,3,'ak','ak');
commit;
@@ -128,6 +15,8 @@ ERROR 42000: Duplicate key name 'b'
alter table t1 add index (b,b);
ERROR 42S21: Duplicate column name 'b'
alter table t1 add index d2 (d);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -137,6 +26,7 @@ t1 CREATE TABLE `t1` (
`d` varchar(20) DEFAULT NULL,
KEY `d2` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+analyze table t1;
explain select * from t1 force index(d2) order by d;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL d2 23 NULL 4
@@ -158,6 +48,8 @@ t1 CREATE TABLE `t1` (
KEY `d2` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t1 add index (b);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -169,6 +61,8 @@ t1 CREATE TABLE `t1` (
KEY `b` (`b`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t1 add unique index (c), add index (d);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -181,10 +75,15 @@ t1 CREATE TABLE `t1` (
KEY `b` (`b`),
KEY `d` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
explain select * from t1 force index(c) order by c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL c 10 NULL 4
alter table t1 add primary key (a), drop index c;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -197,11 +96,14 @@ t1 CREATE TABLE `t1` (
KEY `b` (`b`),
KEY `d` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
alter table t1 add primary key (c);
ERROR 42000: Multiple primary key defined
alter table t1 drop primary key, add primary key (b);
ERROR 23000: Duplicate entry '4' for key 'PRIMARY'
create unique index c on t1 (c);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -215,6 +117,9 @@ t1 CREATE TABLE `t1` (
KEY `b` (`b`),
KEY `d` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
explain select * from t1 force index(c) order by c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL c 10 NULL 4
@@ -225,6 +130,8 @@ a b c d
5 5 oo oo
4 4 tr tr
alter table t1 drop index b, add index (b);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -235,8 +142,8 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`a`),
UNIQUE KEY `c` (`c`),
KEY `d2` (`d`),
- KEY `d` (`d`),
- KEY `b` (`b`)
+ KEY `b` (`b`),
+ KEY `d` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
insert into t1 values(6,1,'ggg','ggg');
select * from t1;
@@ -267,6 +174,7 @@ a b c d
6 1 ggg ggg
5 5 oo oo
4 4 tr tr
+analyze table t1;
explain select * from t1 force index(b) order by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL b 5 NULL 5
@@ -286,14 +194,16 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`a`),
UNIQUE KEY `c` (`c`),
KEY `d2` (`d`),
- KEY `d` (`d`),
- KEY `b` (`b`)
+ KEY `b` (`b`),
+ KEY `d` (`d`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t1;
create table t1(a int not null, b int, c char(10), d varchar(20), primary key (a)) engine = innodb;
insert into t1 values (1,1,'ab','ab'),(2,2,'ac','ac'),(3,3,'ad','ad'),(4,4,'afe','afe');
commit;
alter table t1 add index (c(2));
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -304,7 +214,10 @@ t1 CREATE TABLE `t1` (
PRIMARY KEY (`a`),
KEY `c` (`c`(2))
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
alter table t1 add unique index (d(10));
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -316,7 +229,9 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `d` (`d`(10)),
KEY `c` (`c`(2))
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
insert into t1 values(5,1,'ggg','ggg');
+analyze table t1;
select * from t1;
a b c d
1 1 ab ab
@@ -359,6 +274,8 @@ t1 CREATE TABLE `t1` (
KEY `c` (`c`(2))
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t1 drop index d;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
insert into t1 values(8,9,'fff','fff');
select * from t1;
a b c d
@@ -376,6 +293,7 @@ a b c d
4 4 afe afe
8 9 fff fff
5 1 ggg ggg
+analyze table t1;
explain select * from t1 order by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using filesort
@@ -400,6 +318,8 @@ create table t1(a int not null, b int, c char(10), d varchar(20), primary key (a
insert into t1 values (1,1,'ab','ab'),(2,2,'ac','ac'),(3,2,'ad','ad'),(4,4,'afe','afe');
commit;
alter table t1 add unique index (b,c);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
insert into t1 values(8,9,'fff','fff');
select * from t1;
a b c d
@@ -415,6 +335,7 @@ a b c d
3 2 ad ad
4 4 afe afe
8 9 fff fff
+analyze table t1;
explain select * from t1 force index(b) order by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL b 16 NULL 5
@@ -429,6 +350,8 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `b` (`b`,`c`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t1 add index (b,c);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
insert into t1 values(11,11,'kkk','kkk');
select * from t1;
a b c d
@@ -446,6 +369,7 @@ a b c d
4 4 afe afe
8 9 fff fff
11 11 kkk kkk
+analyze table t1;
explain select * from t1 force index(b) order by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL b 16 NULL 6
@@ -461,6 +385,8 @@ t1 CREATE TABLE `t1` (
KEY `b_2` (`b`,`c`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t1 add unique index (c,d);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
insert into t1 values(13,13,'yyy','aaa');
select * from t1;
a b c d
@@ -489,6 +415,7 @@ a b c d
8 9 fff fff
11 11 kkk kkk
13 13 yyy aaa
+analyze table t1;
explain select * from t1 force index(b) order by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL b 16 NULL 7
@@ -511,9 +438,9 @@ drop table t1;
create table t1(a int not null, b int not null, c int, primary key (a), key (b)) engine = innodb;
create table t3(a int not null, c int not null, d int, primary key (a), key (c)) engine = innodb;
create table t4(a int not null, d int not null, e int, primary key (a), key (d)) engine = innodb;
-create table t2(a int not null, b int not null, c int not null, d int not null, e int,
-foreign key (b) references t1(b) on delete cascade,
-foreign key (c) references t3(c), foreign key (d) references t4(d))
+create table t2(a int not null, b int, c int, d int, e int,
+foreign key (b) references t1(b) on delete set null,
+foreign key (c) references t3(c), foreign key (d) references t4(d) on update set null)
engine = innodb;
alter table t1 drop index b;
ERROR HY000: Cannot drop index 'b': needed in a foreign key constraint
@@ -525,15 +452,49 @@ alter table t2 drop index b;
ERROR HY000: Cannot drop index 'b': needed in a foreign key constraint
alter table t2 drop index b, drop index c, drop index d;
ERROR HY000: Cannot drop index 'b': needed in a foreign key constraint
+alter table t2 MODIFY b INT NOT NULL, ALGORITHM=COPY;
+ERROR HY000: Cannot change column 'b': used in a foreign key constraint 't2_ibfk_1'
+set @old_sql_mode = @@sql_mode;
+set @@sql_mode = 'STRICT_TRANS_TABLES';
+alter table t2 MODIFY b INT NOT NULL, ALGORITHM=INPLACE;
+ERROR HY000: Column 'b' cannot be NOT NULL: needed in a foreign key constraint 'test/t2_ibfk_1' SET NULL
+set @@sql_mode = @old_sql_mode;
+SET FOREIGN_KEY_CHECKS=0;
+alter table t2 DROP COLUMN b, ALGORITHM=COPY;
+ERROR HY000: Cannot drop column 'b': needed in a foreign key constraint 't2_ibfk_1'
+alter table t2 DROP COLUMN b;
+ERROR HY000: Cannot drop column 'b': needed in a foreign key constraint 'test/t2_ibfk_1'
+alter table t1 DROP COLUMN b, ALGORITHM=COPY;
+ERROR HY000: Cannot drop column 'b': needed in a foreign key constraint 't2_ibfk_1' of table 'test.t2'
+alter table t1 DROP COLUMN b;
+ERROR HY000: Cannot drop column 'b': needed in a foreign key constraint 'test/t2_ibfk_1' of table '"test"."t2"'
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
create unique index dc on t2 (d,c);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
create index dc on t1 (b,c);
-alter table t2 add primary key (a);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+set @@sql_mode = 'STRICT_TRANS_TABLES';
+alter table t2 add primary key (alpha), change a alpha int,
+change b beta int not null, change c charlie int not null;
+ERROR HY000: Column 'b' cannot be NOT NULL: needed in a foreign key constraint 'test/t2_ibfk_1' SET NULL
+alter table t2 add primary key (alpha), change a alpha int,
+change c charlie int not null, change d delta int not null;
+ERROR HY000: Column 'd' cannot be NOT NULL: needed in a foreign key constraint 'test/t2_ibfk_3' SET NULL
+alter table t2 add primary key (alpha), change a alpha int,
+change b beta int, modify c int not null;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+set @@sql_mode = @old_sql_mode;
insert into t1 values (1,1,1);
insert into t3 values (1,1,1);
insert into t4 values (1,1,1);
insert into t2 values (1,1,1,1,1);
commit;
alter table t4 add constraint dc foreign key (a) references t1(a);
+affected rows: 1
+info: Records: 1 Duplicates: 0 Warnings: 0
show create table t4;
Table Create Table
t4 CREATE TABLE `t4` (
@@ -546,6 +507,10 @@ t4 CREATE TABLE `t4` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
alter table t3 add constraint dc foreign key (a) references t1(a);
ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 121 "Duplicate key on write or update")
+SET FOREIGN_KEY_CHECKS=0;
+alter table t3 add constraint dc foreign key (a) references t1(a);
+ERROR HY000: Failed to add the foreign key constraint 'test/dc' to system tables
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
show create table t3;
Table Create Table
t3 CREATE TABLE `t3` (
@@ -555,37 +520,41 @@ t3 CREATE TABLE `t3` (
PRIMARY KEY (`a`),
KEY `c` (`c`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
-alter table t2 drop index b, add index (b);
-ERROR 42000: Incorrect index name 'b'
+alter table t2 drop index b, add index (beta);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
show create table t2;
Table Create Table
t2 CREATE TABLE `t2` (
- `a` int(11) NOT NULL,
- `b` int(11) NOT NULL,
+ `alpha` int(11) NOT NULL DEFAULT '0',
+ `beta` int(11) DEFAULT NULL,
`c` int(11) NOT NULL,
- `d` int(11) NOT NULL,
+ `d` int(11) DEFAULT NULL,
`e` int(11) DEFAULT NULL,
- PRIMARY KEY (`a`),
+ PRIMARY KEY (`alpha`),
UNIQUE KEY `dc` (`d`,`c`),
- KEY `b` (`b`),
KEY `c` (`c`),
- CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`b`) REFERENCES `t1` (`b`) ON DELETE CASCADE,
+ KEY `beta` (`beta`),
+ CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`beta`) REFERENCES `t1` (`b`) ON DELETE SET NULL,
CONSTRAINT `t2_ibfk_2` FOREIGN KEY (`c`) REFERENCES `t3` (`c`),
- CONSTRAINT `t2_ibfk_3` FOREIGN KEY (`d`) REFERENCES `t4` (`d`)
+ CONSTRAINT `t2_ibfk_3` FOREIGN KEY (`d`) REFERENCES `t4` (`d`) ON UPDATE SET NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
delete from t1;
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t4`, CONSTRAINT `dc` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
drop index dc on t4;
ERROR 42000: Can't DROP 'dc'; check that column/key exists
alter table t3 drop foreign key dc;
-ERROR HY000: Error on rename of './test/t3' to '#sql2-temporary' (errno: 152 "Cannot delete a parent row")
+ERROR 42000: Can't DROP 'dc'; check that column/key exists
alter table t4 drop foreign key dc;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
select * from t2;
-a b c d e
+alpha beta c d e
1 1 1 1 1
delete from t1;
select * from t2;
-a b c d e
+alpha beta c d e
+1 NULL 1 1 1
drop table t2,t4,t3,t1;
create table t1(a int not null, b int, c char(10), d varchar(20), primary key (a)) engine = innodb default charset=utf8;
insert into t1 values (1,1,'ab','ab'),(2,2,'ac','ac'),(3,2,'ad','ad'),(4,4,'afe','afe');
@@ -759,6 +728,8 @@ drop table t1;
create table t1(a int not null, b int not null, c int, primary key (a), key(c)) engine=innodb;
insert into t1 values (5,1,5),(4,2,4),(3,3,3),(2,4,2),(1,5,1);
alter table t1 add unique index (b);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
insert into t1 values (10,20,20),(11,19,19),(12,18,18),(13,17,17);
show create table t1;
Table Create Table
@@ -773,6 +744,7 @@ t1 CREATE TABLE `t1` (
check table t1;
Table Op Msg_type Msg_text
test.t1 check status OK
+analyze table t1;
explain select * from t1 force index(c) order by c;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL c 5 NULL 9
@@ -819,6 +791,8 @@ drop table t1;
create table t1(a int not null, b int not null) engine=innodb;
insert into t1 values (1,1);
alter table t1 add primary key(b);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
insert into t1 values (2,2);
show create table t1;
Table Create Table
@@ -834,6 +808,9 @@ select * from t1;
a b
1 1
2 2
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL NULL NULL NULL NULL 2
@@ -850,6 +827,8 @@ drop table t1;
create table t1(a int not null) engine=innodb;
insert into t1 values (1);
alter table t1 add primary key(a);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
insert into t1 values (2);
show create table t1;
Table Create Table
@@ -865,6 +844,9 @@ select * from t1;
a
1
2
+analyze table t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
explain select * from t1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 2 Using index
@@ -872,188 +854,11 @@ explain select * from t1 order by a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 2 Using index
drop table t1;
-create table t2(d varchar(17) primary key) engine=innodb default charset=utf8;
-create table t3(a int primary key) engine=innodb;
-insert into t3 values(22),(44),(33),(55),(66);
-insert into t2 values ('jejdkrun87'),('adfd72nh9k'),
-('adfdpplkeock'),('adfdijnmnb78k'),('adfdijn0loKNHJik');
-create table t1(a int, b blob, c text, d text not null)
-engine=innodb default charset = utf8;
-insert into t1
-select a,left(repeat(d,100*a),65535),repeat(d,20*a),d from t2,t3;
-drop table t2, t3;
-select count(*) from t1 where a=44;
-count(*)
-5
-select a,
-length(b),b=left(repeat(d,100*a),65535),length(c),c=repeat(d,20*a),d from t1;
-a length(b) b=left(repeat(d,100*a),65535) length(c) c=repeat(d,20*a) d
-22 22000 1 4400 1 adfd72nh9k
-22 35200 1 7040 1 adfdijn0loKNHJik
-22 28600 1 5720 1 adfdijnmnb78k
-22 26400 1 5280 1 adfdpplkeock
-22 22000 1 4400 1 jejdkrun87
-33 33000 1 6600 1 adfd72nh9k
-33 52800 1 10560 1 adfdijn0loKNHJik
-33 42900 1 8580 1 adfdijnmnb78k
-33 39600 1 7920 1 adfdpplkeock
-33 33000 1 6600 1 jejdkrun87
-44 44000 1 8800 1 adfd72nh9k
-44 65535 1 14080 1 adfdijn0loKNHJik
-44 57200 1 11440 1 adfdijnmnb78k
-44 52800 1 10560 1 adfdpplkeock
-44 44000 1 8800 1 jejdkrun87
-55 55000 1 11000 1 adfd72nh9k
-55 65535 1 17600 1 adfdijn0loKNHJik
-55 65535 1 14300 1 adfdijnmnb78k
-55 65535 1 13200 1 adfdpplkeock
-55 55000 1 11000 1 jejdkrun87
-66 65535 1 13200 1 adfd72nh9k
-66 65535 1 21120 1 adfdijn0loKNHJik
-66 65535 1 17160 1 adfdijnmnb78k
-66 65535 1 15840 1 adfdpplkeock
-66 65535 1 13200 1 jejdkrun87
-alter table t1 add primary key (a), add key (b(20));
-ERROR 23000: Duplicate entry '22' for key 'PRIMARY'
-delete from t1 where a%2;
-check table t1;
-Table Op Msg_type Msg_text
-test.t1 check status OK
-alter table t1 add primary key (a,b(255),c(255)), add key (b(767));
-select count(*) from t1 where a=44;
-count(*)
-5
-select a,
-length(b),b=left(repeat(d,100*a),65535),length(c),c=repeat(d,20*a),d from t1;
-a length(b) b=left(repeat(d,100*a),65535) length(c) c=repeat(d,20*a) d
-22 22000 1 4400 1 adfd72nh9k
-22 35200 1 7040 1 adfdijn0loKNHJik
-22 28600 1 5720 1 adfdijnmnb78k
-22 26400 1 5280 1 adfdpplkeock
-22 22000 1 4400 1 jejdkrun87
-44 44000 1 8800 1 adfd72nh9k
-44 65535 1 14080 1 adfdijn0loKNHJik
-44 57200 1 11440 1 adfdijnmnb78k
-44 52800 1 10560 1 adfdpplkeock
-44 44000 1 8800 1 jejdkrun87
-66 65535 1 13200 1 adfd72nh9k
-66 65535 1 21120 1 adfdijn0loKNHJik
-66 65535 1 17160 1 adfdijnmnb78k
-66 65535 1 15840 1 adfdpplkeock
-66 65535 1 13200 1 jejdkrun87
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `a` int(11) NOT NULL DEFAULT '0',
- `b` blob NOT NULL,
- `c` text NOT NULL,
- `d` text NOT NULL,
- PRIMARY KEY (`a`,`b`(255),`c`(255)),
- KEY `b` (`b`(767))
-) ENGINE=InnoDB DEFAULT CHARSET=utf8
-check table t1;
-Table Op Msg_type Msg_text
-test.t1 check status OK
-explain select * from t1 where b like 'adfd%';
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range b b 769 NULL 11 Using where
-drop table t1;
-set global innodb_file_per_table=on;
-set global innodb_file_format='Barracuda';
-create table t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob,h blob,
-i blob,j blob,k blob,l blob,m blob,n blob,o blob,p blob,
-q blob,r blob,s blob,t blob,u blob)
-engine=innodb row_format=dynamic;
-create index t1a on t1 (a(767));
-create index t1b on t1 (b(767));
-create index t1c on t1 (c(767));
-create index t1d on t1 (d(767));
-create index t1e on t1 (e(767));
-create index t1f on t1 (f(767));
-create index t1g on t1 (g(767));
-create index t1h on t1 (h(767));
-create index t1i on t1 (i(767));
-create index t1j on t1 (j(767));
-create index t1k on t1 (k(767));
-create index t1l on t1 (l(767));
-create index t1m on t1 (m(767));
-create index t1n on t1 (n(767));
-create index t1o on t1 (o(767));
-create index t1p on t1 (p(767));
-create index t1q on t1 (q(767));
-create index t1r on t1 (r(767));
-create index t1s on t1 (s(767));
-create index t1t on t1 (t(767));
-create index t1u on t1 (u(767));
-ERROR HY000: Too big row
-create index t1ut on t1 (u(767), t(767));
-ERROR HY000: Too big row
-create index t1st on t1 (s(767), t(767));
-show create table t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `a` blob,
- `b` blob,
- `c` blob,
- `d` blob,
- `e` blob,
- `f` blob,
- `g` blob,
- `h` blob,
- `i` blob,
- `j` blob,
- `k` blob,
- `l` blob,
- `m` blob,
- `n` blob,
- `o` blob,
- `p` blob,
- `q` blob,
- `r` blob,
- `s` blob,
- `t` blob,
- `u` blob,
- KEY `t1a` (`a`(767)),
- KEY `t1b` (`b`(767)),
- KEY `t1c` (`c`(767)),
- KEY `t1d` (`d`(767)),
- KEY `t1e` (`e`(767)),
- KEY `t1f` (`f`(767)),
- KEY `t1g` (`g`(767)),
- KEY `t1h` (`h`(767)),
- KEY `t1i` (`i`(767)),
- KEY `t1j` (`j`(767)),
- KEY `t1k` (`k`(767)),
- KEY `t1l` (`l`(767)),
- KEY `t1m` (`m`(767)),
- KEY `t1n` (`n`(767)),
- KEY `t1o` (`o`(767)),
- KEY `t1p` (`p`(767)),
- KEY `t1q` (`q`(767)),
- KEY `t1r` (`r`(767)),
- KEY `t1s` (`s`(767)),
- KEY `t1t` (`t`(767)),
- KEY `t1st` (`s`(767),`t`(767))
-) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
-create index t1u on t1 (u(767));
-ERROR HY000: Too big row
-alter table t1 row_format=compact;
-create index t1u on t1 (u(767));
-drop table t1;
-CREATE TABLE bug12547647(
-a INT NOT NULL, b BLOB NOT NULL, c TEXT,
-PRIMARY KEY (b(10), a), INDEX (c(767)), INDEX(b(767))
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-INSERT INTO bug12547647 VALUES (5,repeat('khdfo5AlOq',1900),repeat('g',7751));
-COMMIT;
-UPDATE bug12547647 SET c = REPEAT('b',16928);
-ERROR HY000: Undo log record is too big.
-DROP TABLE bug12547647;
-set global innodb_file_per_table=0;
+set global innodb_file_per_table=1;
set global innodb_file_format=Antelope;
set global innodb_file_format_max=Antelope;
SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
-SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+SET FOREIGN_KEY_CHECKS=0;
CREATE TABLE t1(
c1 BIGINT(12) NOT NULL,
PRIMARY KEY (c1)
@@ -1102,8 +907,10 @@ c2 BIGINT(12) NOT NULL,
c3 BIGINT(12) NOT NULL,
PRIMARY KEY (c1,c2,c3)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+SET FOREIGN_KEY_CHECKS=0;
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3) REFERENCES t1(c1);
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -1144,21 +951,44 @@ c2 BIGINT(12) NOT NULL,
c3 BIGINT(12) NOT NULL,
PRIMARY KEY (c1)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+SET FOREIGN_KEY_CHECKS=0;
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+FOREIGN KEY (c3,c2) REFERENCES t1(c1,c1), ALGORITHM=COPY;
+ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c1);
+ERROR HY000: Failed to add the foreign key constaint. Missing index for constraint 'fk_t2_ca' in the referenced table 't1'
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2), ALGORITHM=COPY;
ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2);
-ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed")
+ERROR HY000: Failed to add the foreign key constaint. Missing index for constraint 'fk_t2_ca' in the referenced table 't1'
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t2 DROP FOREIGN KEY fk_t2_ca;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1), ALGORITHM=COPY;
ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t1 MODIFY COLUMN c2 BIGINT(12) NOT NULL;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
-FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2);
+FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2), ALGORITHM=COPY;
ERROR HY000: Can't create table `test`.`#sql-temporary` (errno: 150 "Foreign key constraint is incorrectly formed")
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2);
+ERROR HY000: Failed to add the foreign key constaint. Missing index for constraint 'fk_t2_ca' in the referenced table 't1'
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
+affected rows: 0
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1166,6 +996,7 @@ t1 CREATE TABLE `t1` (
`c2` bigint(12) NOT NULL,
PRIMARY KEY (`c2`,`c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -1176,7 +1007,10 @@ t2 CREATE TABLE `t2` (
KEY `fk_t2_ca` (`c3`,`c2`),
CONSTRAINT `fk_t2_ca` FOREIGN KEY (`c3`, `c2`) REFERENCES `t1` (`c2`, `c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
CREATE INDEX i_t2_c2_c1 ON t2(c2, c1);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -1188,7 +1022,10 @@ t2 CREATE TABLE `t2` (
KEY `i_t2_c2_c1` (`c2`,`c1`),
CONSTRAINT `fk_t2_ca` FOREIGN KEY (`c3`, `c2`) REFERENCES `t1` (`c2`, `c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
CREATE INDEX i_t2_c3_c1_c2 ON t2(c3, c1, c2);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -1201,7 +1038,10 @@ t2 CREATE TABLE `t2` (
KEY `i_t2_c3_c1_c2` (`c3`,`c1`,`c2`),
CONSTRAINT `fk_t2_ca` FOREIGN KEY (`c3`, `c2`) REFERENCES `t1` (`c2`, `c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
CREATE INDEX i_t2_c3_c2 ON t2(c3, c2);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
@@ -1214,11 +1054,131 @@ t2 CREATE TABLE `t2` (
KEY `i_t2_c3_c2` (`c3`,`c2`),
CONSTRAINT `fk_t2_ca` FOREIGN KEY (`c3`, `c2`) REFERENCES `t1` (`c2`, `c1`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
+affected rows: 1
DROP TABLE t2;
DROP TABLE t1;
-SELECT SLEEP(10);
-SLEEP(10)
-0
-DROP TABLE t1_purge, t2_purge, t3_purge, t4_purge;
-DROP TABLE t12637786;
-DROP TABLE t12963823;
+CREATE TABLE t1 (a INT, b CHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (3,'a'),(3,'b'),(1,'c'),(0,'d'),(1,'e');
+CREATE TABLE t2 (a INT, b CHAR(1)) ENGINE=InnoDB;
+CREATE TABLE t2i (a INT, b CHAR(1) NOT NULL) ENGINE=InnoDB;
+CREATE TABLE t2c (a INT, b CHAR(1) NOT NULL) ENGINE=InnoDB;
+INSERT INTO t2 SELECT * FROM t1;
+INSERT INTO t2i SELECT * FROM t1;
+INSERT INTO t2c SELECT * FROM t1;
+BEGIN;
+SELECT * FROM t1;
+a b
+3 a
+3 b
+1 c
+0 d
+1 e
+SET lock_wait_timeout=1;
+CREATE INDEX t1a ON t1(a);
+ERROR HY000: Lock wait timeout exceeded; try restarting transaction
+CREATE INDEX t2a ON t2(a);
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+set @old_sql_mode = @@sql_mode;
+set @@sql_mode = 'STRICT_TRANS_TABLES';
+ALTER TABLE t2i ADD PRIMARY KEY(a,b), ADD INDEX t2a(a), ALGORITHM=INPLACE;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+set @@sql_mode = @old_sql_mode;
+ALTER TABLE t2c ADD PRIMARY KEY(a,b), ADD INDEX t2a(a), ALGORITHM=COPY;
+affected rows: 5
+info: Records: 5 Duplicates: 0 Warnings: 0
+SELECT * FROM t2i;
+ERROR HY000: Table definition has changed, please retry transaction
+SELECT * FROM t2i FORCE INDEX(t2a) ORDER BY a;
+ERROR HY000: Table definition has changed, please retry transaction
+SELECT * FROM t2c;
+ERROR HY000: Table definition has changed, please retry transaction
+SELECT * FROM t2c FORCE INDEX(t2a) ORDER BY a;
+ERROR HY000: Table definition has changed, please retry transaction
+SELECT * FROM t2;
+a b
+3 a
+3 b
+1 c
+0 d
+1 e
+SELECT * FROM t2 FORCE INDEX(t2a) ORDER BY a;
+ERROR HY000: Table definition has changed, please retry transaction
+SELECT * FROM t2;
+a b
+3 a
+3 b
+1 c
+0 d
+1 e
+COMMIT;
+SELECT * FROM t2;
+a b
+3 a
+3 b
+1 c
+0 d
+1 e
+SELECT * FROM t2 FORCE INDEX(t2a) ORDER BY a;
+a b
+0 d
+1 c
+1 e
+3 a
+3 b
+SELECT * FROM t2i;
+a b
+0 d
+1 c
+1 e
+3 a
+3 b
+SELECT * FROM t2i FORCE INDEX(t2a) ORDER BY a;
+a b
+0 d
+1 c
+1 e
+3 a
+3 b
+SELECT * FROM t2c;
+a b
+0 d
+1 c
+1 e
+3 a
+3 b
+SELECT * FROM t2c FORCE INDEX(t2a) ORDER BY a;
+a b
+0 d
+1 c
+1 e
+3 a
+3 b
+alter table t2 add index t2a(b);
+ERROR 42000: Duplicate key name 't2a'
+alter table t2 drop index t2a, add index t2a(b);
+show create table t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` char(1) DEFAULT NULL,
+ KEY `t2a` (`b`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+show create table t2i;
+Table Create Table
+t2i CREATE TABLE `t2i` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` char(1) NOT NULL,
+ PRIMARY KEY (`a`,`b`),
+ KEY `t2a` (`a`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+show create table t2c;
+Table Create Table
+t2c CREATE TABLE `t2c` (
+ `a` int(11) NOT NULL DEFAULT '0',
+ `b` char(1) NOT NULL,
+ PRIMARY KEY (`a`,`b`),
+ KEY `t2a` (`a`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE t1,t2,t2c,t2i;
diff --git a/mysql-test/suite/innodb/r/innodb-zip.result b/mysql-test/suite/innodb/r/innodb-zip.result
index 69e463a4390..95a3142068b 100644
--- a/mysql-test/suite/innodb/r/innodb-zip.result
+++ b/mysql-test/suite/innodb/r/innodb-zip.result
@@ -5,62 +5,64 @@ SELECT table_name, row_format, data_length, index_length
FROM information_schema.tables
WHERE table_schema='mysqltest_innodb_zip';
table_name row_format data_length index_length
+SET @save_innodb_stats_on_metadata=@@global.innodb_stats_on_metadata;
set session innodb_strict_mode=0;
set global innodb_file_per_table=off;
set global innodb_file_format=`0`;
+SET @@global.innodb_stats_on_metadata=ON;
create table t0(a int primary key) engine=innodb row_format=compressed;
Warnings:
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t00(a int primary key) engine=innodb
key_block_size=4 row_format=compressed;
Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=4.
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=4.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t1(a int primary key) engine=innodb row_format=dynamic;
Warnings:
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t2(a int primary key) engine=innodb row_format=redundant;
create table t3(a int primary key) engine=innodb row_format=compact;
create table t4(a int primary key) engine=innodb key_block_size=9;
Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=9.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=9.
create table t5(a int primary key) engine=innodb
key_block_size=1 row_format=redundant;
Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
set global innodb_file_per_table=on;
create table t6(a int primary key) engine=innodb
key_block_size=1 row_format=redundant;
Warnings:
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1.
set global innodb_file_format=`1`;
create table t7(a int primary key) engine=innodb
key_block_size=1 row_format=redundant;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
create table t8(a int primary key) engine=innodb
key_block_size=1 row_format=fixed;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
create table t9(a int primary key) engine=innodb
key_block_size=1 row_format=compact;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
create table t10(a int primary key) engine=innodb
key_block_size=1 row_format=dynamic;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=1 unless ROW_FORMAT=COMPRESSED.
create table t11(a int primary key) engine=innodb
key_block_size=1 row_format=compressed;
create table t12(a int primary key) engine=innodb
@@ -69,43 +71,39 @@ create table t13(a int primary key) engine=innodb
row_format=compressed;
create table t14(a int primary key) engine=innodb key_block_size=9;
Warnings:
-Warning 140 InnoDB: ignoring KEY_BLOCK_SIZE=9.
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+Warning 1478 InnoDB: ignoring KEY_BLOCK_SIZE=9.
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t0 Compact 16384 0
-mysqltest_innodb_zip t00 Compact 16384 0
-mysqltest_innodb_zip t1 Compact 16384 0
-mysqltest_innodb_zip t10 Dynamic 16384 0
+mysqltest_innodb_zip t0 Compact {valid} 0
+mysqltest_innodb_zip t00 Compact {valid} 0
+mysqltest_innodb_zip t1 Compact {valid} 0
+mysqltest_innodb_zip t10 Dynamic {valid} 0
mysqltest_innodb_zip t11 Compressed 1024 0
mysqltest_innodb_zip t12 Compressed 1024 0
-mysqltest_innodb_zip t13 Compressed 8192 0
-mysqltest_innodb_zip t14 Compact 16384 0
-mysqltest_innodb_zip t2 Redundant 16384 0
-mysqltest_innodb_zip t3 Compact 16384 0
-mysqltest_innodb_zip t4 Compact 16384 0
-mysqltest_innodb_zip t5 Redundant 16384 0
-mysqltest_innodb_zip t6 Redundant 16384 0
-mysqltest_innodb_zip t7 Redundant 16384 0
-mysqltest_innodb_zip t8 Compact 16384 0
-mysqltest_innodb_zip t9 Compact 16384 0
+mysqltest_innodb_zip t13 Compressed {valid} 0
+mysqltest_innodb_zip t14 Compact {valid} 0
+mysqltest_innodb_zip t2 Redundant {valid} 0
+mysqltest_innodb_zip t3 Compact {valid} 0
+mysqltest_innodb_zip t4 Compact {valid} 0
+mysqltest_innodb_zip t5 Redundant {valid} 0
+mysqltest_innodb_zip t6 Redundant {valid} 0
+mysqltest_innodb_zip t7 Redundant {valid} 0
+mysqltest_innodb_zip t8 Compact {valid} 0
+mysqltest_innodb_zip t9 Compact {valid} 0
drop table t0,t00,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14;
alter table t1 key_block_size=0;
alter table t1 row_format=dynamic;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t1 Dynamic 16384 0
+mysqltest_innodb_zip t1 Dynamic {valid} 0
alter table t1 row_format=compact;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t1 Compact 16384 0
+mysqltest_innodb_zip t1 Compact {valid} 0
alter table t1 row_format=redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t1 Redundant 16384 0
+mysqltest_innodb_zip t1 Redundant {valid} 0
drop table t1;
create table t1(a int not null, b text, index(b(10))) engine=innodb
key_block_size=1;
@@ -122,35 +120,17 @@ rollback;
select a,left(b,40) from t1 natural join t2;
a left(b,40)
1 1abcdefghijklmnopqrstuvwxyzAAAAAAAAAAAAA
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+analyze table t1;
+Table Op Msg_type Msg_text
+mysqltest_innodb_zip.t1 analyze status OK
+analyze table t2;
+Table Op Msg_type Msg_text
+mysqltest_innodb_zip.t2 analyze status OK
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
mysqltest_innodb_zip t1 Compressed 2048 1024
-mysqltest_innodb_zip t2 Compact 16384 0
+mysqltest_innodb_zip t2 Compact {valid} 0
drop table t1,t2;
-SET SESSION innodb_strict_mode = off;
-CREATE TABLE t1(
-c TEXT NOT NULL, d TEXT NOT NULL,
-PRIMARY KEY (c(767),d(767)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
-ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline.
-CREATE TABLE t1(
-c TEXT NOT NULL, d TEXT NOT NULL,
-PRIMARY KEY (c(767),d(767)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII;
-ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline.
-CREATE TABLE t1(
-c TEXT NOT NULL, d TEXT NOT NULL,
-PRIMARY KEY (c(767),d(767)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII;
-drop table t1;
-CREATE TABLE t1(c TEXT, PRIMARY KEY (c(440)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
-ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline.
-CREATE TABLE t1(c TEXT, PRIMARY KEY (c(438)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
-INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512));
-DROP TABLE t1;
create table t1( c1 int not null, c2 blob, c3 blob, c4 blob,
primary key(c1, c2(22), c3(22)))
engine = innodb row_format = dynamic;
@@ -203,139 +183,119 @@ create table t2 (id int primary key) engine = innodb key_block_size = 9;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
Error 1005 Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb key_block_size = 1;
create table t4 (id int primary key) engine = innodb key_block_size = 2;
create table t5 (id int primary key) engine = innodb key_block_size = 4;
-create table t6 (id int primary key) engine = innodb key_block_size = 8;
-create table t7 (id int primary key) engine = innodb key_block_size = 16;
create table t8 (id int primary key) engine = innodb row_format = compressed;
create table t9 (id int primary key) engine = innodb row_format = dynamic;
create table t10(id int primary key) engine = innodb row_format = compact;
create table t11(id int primary key) engine = innodb row_format = redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t1 Compact 16384 0
-mysqltest_innodb_zip t10 Compact 16384 0
-mysqltest_innodb_zip t11 Redundant 16384 0
+mysqltest_innodb_zip t1 Compact {valid} 0
+mysqltest_innodb_zip t10 Compact {valid} 0
+mysqltest_innodb_zip t11 Redundant {valid} 0
mysqltest_innodb_zip t3 Compressed 1024 0
-mysqltest_innodb_zip t4 Compressed 2048 0
-mysqltest_innodb_zip t5 Compressed 4096 0
-mysqltest_innodb_zip t6 Compressed 8192 0
-mysqltest_innodb_zip t7 Compressed 16384 0
-mysqltest_innodb_zip t8 Compressed 8192 0
-mysqltest_innodb_zip t9 Dynamic 16384 0
-drop table t1, t3, t4, t5, t6, t7, t8, t9, t10, t11;
+mysqltest_innodb_zip t4 Compressed {valid} 0
+mysqltest_innodb_zip t5 Compressed {valid} 0
+mysqltest_innodb_zip t8 Compressed {valid} 0
+mysqltest_innodb_zip t9 Dynamic {valid} 0
+drop table t1, t3, t4, t5, t8, t9, t10, t11;
create table t1 (id int primary key) engine = innodb
-key_block_size = 8 row_format = compressed;
+key_block_size = 4 row_format = compressed;
create table t2 (id int primary key) engine = innodb
-key_block_size = 8 row_format = redundant;
+key_block_size = 4 row_format = redundant;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb
-key_block_size = 8 row_format = compact;
+key_block_size = 4 row_format = compact;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t3` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t3` (errno: 140 "Wrong create options")
create table t4 (id int primary key) engine = innodb
-key_block_size = 8 row_format = dynamic;
+key_block_size = 4 row_format = dynamic;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t4` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t4` (errno: 140 "Wrong create options")
create table t5 (id int primary key) engine = innodb
-key_block_size = 8 row_format = default;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+key_block_size = 4 row_format = default;
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t1 Compressed 8192 0
-mysqltest_innodb_zip t5 Compressed 8192 0
+mysqltest_innodb_zip t1 Compressed 4096 0
+mysqltest_innodb_zip t5 Compressed 4096 0
drop table t1, t5;
create table t1 (id int primary key) engine = innodb
key_block_size = 9 row_format = redundant;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Warning 140 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = REDUNDANT with KEY_BLOCK_SIZE.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = compact;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Warning 140 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = COMPACT with KEY_BLOCK_SIZE.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = dynamic;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
-Warning 140 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
+Warning 1478 InnoDB: invalid KEY_BLOCK_SIZE = 9. Valid values are [1, 2, 4, 8, 16]
+Warning 1478 InnoDB: cannot specify ROW_FORMAT = DYNAMIC with KEY_BLOCK_SIZE.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
set global innodb_file_per_table = off;
create table t1 (id int primary key) engine = innodb key_block_size = 1;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb key_block_size = 2;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb key_block_size = 4;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t3` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t3` (errno: 140 "Wrong create options")
-create table t4 (id int primary key) engine = innodb key_block_size = 8;
-ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t4` (errno: 140 "Wrong create options")
-show warnings;
-Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table `mysqltest_innodb_zip`.`t4` (errno: 140 "Wrong create options")
-create table t5 (id int primary key) engine = innodb key_block_size = 16;
-ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t5` (errno: 140 "Wrong create options")
-show warnings;
-Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
-Error 1005 Can't create table `mysqltest_innodb_zip`.`t5` (errno: 140 "Wrong create options")
create table t6 (id int primary key) engine = innodb row_format = compressed;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t6` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t6` (errno: 140 "Wrong create options")
create table t7 (id int primary key) engine = innodb row_format = dynamic;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t7` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_per_table.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t7` (errno: 140 "Wrong create options")
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t8 Compact 16384 0
-mysqltest_innodb_zip t9 Redundant 16384 0
+mysqltest_innodb_zip t8 Compact {valid} 0
+mysqltest_innodb_zip t9 Redundant {valid} 0
drop table t8, t9;
set global innodb_file_per_table = on;
set global innodb_file_format = `0`;
@@ -343,53 +303,40 @@ create table t1 (id int primary key) engine = innodb key_block_size = 1;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t1` (errno: 140 "Wrong create options")
create table t2 (id int primary key) engine = innodb key_block_size = 2;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t2` (errno: 140 "Wrong create options")
create table t3 (id int primary key) engine = innodb key_block_size = 4;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t3` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t3` (errno: 140 "Wrong create options")
-create table t4 (id int primary key) engine = innodb key_block_size = 8;
-ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t4` (errno: 140 "Wrong create options")
-show warnings;
-Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table `mysqltest_innodb_zip`.`t4` (errno: 140 "Wrong create options")
-create table t5 (id int primary key) engine = innodb key_block_size = 16;
-ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t5` (errno: 140 "Wrong create options")
-show warnings;
-Level Code Message
-Warning 140 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
-Error 1005 Can't create table `mysqltest_innodb_zip`.`t5` (errno: 140 "Wrong create options")
create table t6 (id int primary key) engine = innodb row_format = compressed;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t6` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t6` (errno: 140 "Wrong create options")
create table t7 (id int primary key) engine = innodb row_format = dynamic;
ERROR HY000: Can't create table `mysqltest_innodb_zip`.`t7` (errno: 140 "Wrong create options")
show warnings;
Level Code Message
-Warning 140 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
+Warning 1478 InnoDB: ROW_FORMAT=DYNAMIC requires innodb_file_format > Antelope.
Error 1005 Can't create table `mysqltest_innodb_zip`.`t7` (errno: 140 "Wrong create options")
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql';
table_schema table_name row_format data_length index_length
-mysqltest_innodb_zip t8 Compact 16384 0
-mysqltest_innodb_zip t9 Redundant 16384 0
+mysqltest_innodb_zip t8 Compact {valid} 0
+mysqltest_innodb_zip t9 Redundant {valid} 0
drop table t8, t9;
-set global innodb_file_per_table=0;
+set global innodb_file_per_table=1;
set global innodb_file_format=Antelope;
set global innodb_file_per_table=on;
set global innodb_file_format=`Barracuda`;
@@ -402,7 +349,7 @@ select @@innodb_file_format_max;
Antelope
create table zip_table (
c1 int
-) engine = innodb key_block_size = 8;
+) engine = innodb key_block_size = 4;
select @@innodb_file_format_max;
@@innodb_file_format_max
Barracuda
diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result
index da5f77fcdca..c67a200d513 100644
--- a/mysql-test/suite/innodb/r/innodb.result
+++ b/mysql-test/suite/innodb/r/innodb.result
@@ -696,8 +696,6 @@ select count(*) from t1 where sca_pic is null;
count(*)
2
alter table t1 drop index sca_pic, add index sca_pic (cat_code, sca_pic);
-alter table t1 drop index sca_pic;
-alter table t1 add index sca_pic (cat_code, sca_pic);
select count(*) from t1 where sca_code='PD' and sca_pic is null;
count(*)
1
@@ -705,9 +703,6 @@ select count(*) from t1 where cat_code='E';
count(*)
0
alter table t1 drop index sca_pic, add index (sca_pic, cat_code);
-ERROR 42000: Incorrect index name 'sca_pic'
-alter table t1 drop index sca_pic;
-alter table t1 add index (sca_pic, cat_code);
select count(*) from t1 where sca_code='PD' and sca_pic is null;
count(*)
1
@@ -1696,7 +1691,7 @@ variable_value - @innodb_rows_deleted_orig
71
SELECT variable_value - @innodb_rows_inserted_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_inserted';
variable_value - @innodb_rows_inserted_orig
-1071
+1007
SELECT variable_value - @innodb_rows_updated_orig FROM information_schema.global_status WHERE LOWER(variable_name) = 'innodb_rows_updated';
variable_value - @innodb_rows_updated_orig
866
@@ -2261,7 +2256,7 @@ t1 CREATE TABLE `t1` (
drop table t1;
create table t1 (v varchar(10), c char(10)) row_format=fixed;
Warnings:
-Warning 140 InnoDB: assuming ROW_FORMAT=COMPACT.
+Warning 1478 InnoDB: assuming ROW_FORMAT=COMPACT.
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -2977,8 +2972,11 @@ CREATE TABLE t2 (a INT, INDEX(a)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1 (a) ON DELETE SET NULL;
+set @old_sql_mode = @@sql_mode;
+set @@sql_mode = 'STRICT_TRANS_TABLES';
ALTER TABLE t2 MODIFY a INT NOT NULL;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t2' (errno: 150 "Foreign key constraint is incorrectly formed")
+ERROR HY000: Column 'a' cannot be NOT NULL: needed in a foreign key constraint 'test/t2_ibfk_1' SET NULL
+set @@sql_mode = @old_sql_mode;
DELETE FROM t1;
DROP TABLE t2,t1;
CREATE TABLE t1 (a VARCHAR(5) COLLATE utf8_unicode_ci PRIMARY KEY)
@@ -3029,7 +3027,7 @@ c29 CHAR(255), c30 CHAR(255), c31 CHAR(255), c32 CHAR(255)
ERROR 42000: Row size too large (> 8126). Changing some columns to TEXT or BLOB or using ROW_FORMAT=DYNAMIC or ROW_FORMAT=COMPRESSED may help. In current row format, BLOB prefix of 768 bytes is stored inline.
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(
id BIGINT(20) NOT NULL AUTO_INCREMENT PRIMARY KEY
) ENGINE=InnoDB;
@@ -3048,8 +3046,8 @@ SET TX_ISOLATION='read-committed';
SET AUTOCOMMIT=0;
DROP TABLE IF EXISTS t1, t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t1 ( a int ) ENGINE=InnoDB;
CREATE TABLE t2 LIKE t1;
SELECT * FROM t2;
diff --git a/mysql-test/suite/innodb/r/innodb_bug14007649.result b/mysql-test/suite/innodb/r/innodb_bug14007649.result
index 8338c7b2ba2..50de5868be4 100644
--- a/mysql-test/suite/innodb/r/innodb_bug14007649.result
+++ b/mysql-test/suite/innodb/r/innodb_bug14007649.result
@@ -39,17 +39,18 @@ update t1 set f2 = 6 where f1 = 1 and f2 is null;
(a) Number of rows updated:
select row_count();
row_count()
-0
+1
(a) After the update statement is executed.
select rowid, f1, f2 from t1;
rowid f1 f2
1 1 10
2 1 NULL
+3 1 6
commit;
"The trx with consistent snapshot ended."
select rowid, f1, f2 from t1;
rowid f1 f2
1 1 10
2 1 4
-3 1 NULL
+3 1 6
drop table t1;
diff --git a/mysql-test/suite/innodb/r/innodb_bug21704.result b/mysql-test/suite/innodb/r/innodb_bug21704.result
index 3a6b38d50da..239aeaa354d 100644
--- a/mysql-test/suite/innodb/r/innodb_bug21704.result
+++ b/mysql-test/suite/innodb/r/innodb_bug21704.result
@@ -5,9 +5,6 @@
# Test that it's not possible to rename columns participating in a
# foreign key (either in the referencing or referenced table).
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-DROP TABLE IF EXISTS t3;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ROW_FORMAT=COMPACT ENGINE=INNODB;
CREATE TABLE t2 (a INT PRIMARY KEY, b INT,
CONSTRAINT fk1 FOREIGN KEY (a) REFERENCES t1(a))
@@ -21,35 +18,72 @@ INSERT INTO t3 VALUES (1,1,1),(2,2,2),(3,3,3);
# Test renaming the column in the referenced table.
-ALTER TABLE t1 CHANGE a c INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t1' (errno: 150 "Foreign key constraint is incorrectly formed")
+ALTER TABLE t1 CHANGE a e INT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
# Ensure that online column rename works.
ALTER TABLE t1 CHANGE b c INT;
-affected rows: 3
-info: Records: 3 Duplicates: 0 Warnings: 0
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
# Test renaming the column in the referencing table
-ALTER TABLE t2 CHANGE a c INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t2' (errno: 150 "Foreign key constraint is incorrectly formed")
+ALTER TABLE t2 CHANGE a z INT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
# Ensure that online column rename works.
ALTER TABLE t2 CHANGE b c INT;
-affected rows: 3
-info: Records: 3 Duplicates: 0 Warnings: 0
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
# Test with self-referential constraints
-ALTER TABLE t3 CHANGE a d INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t3' (errno: 150 "Foreign key constraint is incorrectly formed")
-ALTER TABLE t3 CHANGE b d INT;
-ERROR HY000: Error on rename of '#sql-temporary' to './test/t3' (errno: 150 "Foreign key constraint is incorrectly formed")
+ALTER TABLE t3 CHANGE a f INT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+ALTER TABLE t3 CHANGE b g INT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
# Ensure that online column rename works.
ALTER TABLE t3 CHANGE c d INT;
-affected rows: 3
-info: Records: 3 Duplicates: 0 Warnings: 0
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
# Cleanup.
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `e` int(11) NOT NULL DEFAULT '0',
+ `c` int(11) DEFAULT NULL,
+ PRIMARY KEY (`e`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `z` int(11) NOT NULL DEFAULT '0',
+ `c` int(11) DEFAULT NULL,
+ PRIMARY KEY (`z`),
+ CONSTRAINT `fk1` FOREIGN KEY (`z`) REFERENCES `t1` (`e`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT
+SHOW CREATE TABLE t3;
+Table Create Table
+t3 CREATE TABLE `t3` (
+ `f` int(11) NOT NULL DEFAULT '0',
+ `g` int(11) DEFAULT NULL,
+ `d` int(11) DEFAULT NULL,
+ PRIMARY KEY (`f`),
+ KEY `b` (`g`),
+ CONSTRAINT `fk2` FOREIGN KEY (`g`) REFERENCES `t3` (`f`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=COMPACT
+SELECT f.*, c.*
+FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS c
+INNER JOIN INFORMATION_SCHEMA.INNODB_SYS_FOREIGN f
+ON c.ID=f.ID
+WHERE FOR_NAME LIKE 'test/t%';
+ID FOR_NAME REF_NAME N_COLS TYPE ID FOR_COL_NAME REF_COL_NAME POS
+test/fk1 test/t2 test/t1 1 0 test/fk1 z e 0
+test/fk2 test/t3 test/t3 1 0 test/fk2 g f 0
DROP TABLE t3;
DROP TABLE t2;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/r/innodb_bug52745.result b/mysql-test/suite/innodb/r/innodb_bug52745.result
index 927ba0e0e53..f4393e8fae0 100644
--- a/mysql-test/suite/innodb/r/innodb_bug52745.result
+++ b/mysql-test/suite/innodb/r/innodb_bug52745.result
@@ -127,4 +127,4 @@ Warning 1265 Data truncated for column 'col79' at row 1
Warning 1264 Out of range value for column 'col84' at row 1
DROP TABLE bug52745;
SET GLOBAL innodb_file_format=Antelope;
-SET GLOBAL innodb_file_per_table=0;
+SET GLOBAL innodb_file_per_table=1;
diff --git a/mysql-test/suite/innodb/r/innodb_bug53591.result b/mysql-test/suite/innodb/r/innodb_bug53591.result
index b0196318801..dbebb9d2d33 100644
--- a/mysql-test/suite/innodb/r/innodb_bug53591.result
+++ b/mysql-test/suite/innodb/r/innodb_bug53591.result
@@ -4,11 +4,10 @@ set old_alter_table=0;
CREATE TABLE bug53591(a text charset utf8 not null)
ENGINE=InnoDB KEY_BLOCK_SIZE=1;
ALTER TABLE bug53591 ADD PRIMARY KEY(a(220));
-ERROR HY000: Too big row
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is {checked_valid}. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
SHOW WARNINGS;
Level Code Message
-Error 139 Too big row
-Error 1118 Row size too large (> 8126). Changing some columns to TEXT or BLOB may help. In current row format, BLOB prefix of 0 bytes is stored inline.
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is {checked_valid}. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
DROP TABLE bug53591;
SET GLOBAL innodb_file_format=Antelope;
-SET GLOBAL innodb_file_per_table=0;
+SET GLOBAL innodb_file_per_table=1;
diff --git a/mysql-test/suite/innodb/r/innodb_bug54044.result b/mysql-test/suite/innodb/r/innodb_bug54044.result
index 47aa8805834..4935febcbfb 100644
--- a/mysql-test/suite/innodb/r/innodb_bug54044.result
+++ b/mysql-test/suite/innodb/r/innodb_bug54044.result
@@ -5,5 +5,13 @@ Table Create Table
table_54044 CREATE TEMPORARY TABLE `table_54044` (
`IF(NULL IS NOT NULL, NULL, NULL)` binary(0) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
-CREATE TEMPORARY TABLE tmp1 ENGINE = INNODB AS SELECT COALESCE(NULL, NULL, NULL);
-CREATE TEMPORARY TABLE tmp2 ENGINE = INNODB AS SELECT GREATEST(NULL, NULL);
+DROP TABLE table_54044;
+CREATE TABLE tmp ENGINE = INNODB AS SELECT COALESCE(NULL, NULL, NULL), GREATEST(NULL, NULL), NULL;
+SHOW CREATE TABLE tmp;
+Table Create Table
+tmp CREATE TABLE `tmp` (
+ `COALESCE(NULL, NULL, NULL)` binary(0) DEFAULT NULL,
+ `GREATEST(NULL, NULL)` binary(0) DEFAULT NULL,
+ `NULL` binary(0) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+DROP TABLE tmp;
diff --git a/mysql-test/suite/innodb/r/innodb_bug56947.result b/mysql-test/suite/innodb/r/innodb_bug56947.result
index 8b864b62e81..0b436ce2656 100644
--- a/mysql-test/suite/innodb/r/innodb_bug56947.result
+++ b/mysql-test/suite/innodb/r/innodb_bug56947.result
@@ -1,8 +1,11 @@
-SET @old_innodb_file_per_table=@@innodb_file_per_table;
SET GLOBAL innodb_file_per_table=0;
create table bug56947(a int not null) engine = innodb;
-CREATE TABLE `bug56947#1`(a int) ENGINE=InnoDB;
+SET DEBUG_DBUG='+d,ib_rebuild_cannot_rename';
alter table bug56947 add unique index (a);
-ERROR 42S01: Table 'test.bug56947#1' already exists
-drop table `bug56947#1`;
+ERROR HY000: Got error 11 "Resource temporarily unavailable" from storage engine
+SET DEBUG_DBUG='-d,ib_rebuild_cannot_rename';
+check table bug56947;
+Table Op Msg_type Msg_text
+test.bug56947 check status OK
drop table bug56947;
+SET @@global.innodb_file_per_table=DEFAULT;
diff --git a/mysql-test/suite/innodb/r/innodb_bug60049.result b/mysql-test/suite/innodb/r/innodb_bug60049.result
index a1788a8ab0a..8e3be130e48 100644
--- a/mysql-test/suite/innodb/r/innodb_bug60049.result
+++ b/mysql-test/suite/innodb/r/innodb_bug60049.result
@@ -1,9 +1,10 @@
-set @@global.innodb_fast_shutdown=0;
-CREATE TABLE t(a INT)ENGINE=InnoDB;
+call mtr.add_suppression('InnoDB: Error: Table "mysql"."innodb_(table|index)_stats" not found');
+call mtr.add_suppression('InnoDB: Error: Fetch of persistent statistics requested');
+CREATE TABLE t(a INT)ENGINE=InnoDB STATS_PERSISTENT=0;
RENAME TABLE t TO u;
DROP TABLE u;
SELECT @@innodb_fast_shutdown;
@@innodb_fast_shutdown
0
Last record of ID_IND root page (9):
-1808000018050074000000000000000c5359535f464f524549474e5f434f4c53
+18080000180500c0000000000000000c5359535f464f524549474e5f434f4c53
diff --git a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
index e194f67f5c5..8ec10a86b37 100644
--- a/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
+++ b/mysql-test/suite/innodb/r/innodb_corrupt_bit.result
@@ -46,7 +46,6 @@ ERROR HY000: Index corrupt_bit_test_Ä is corrupted
show warnings;
Level Code Message
Warning 180 InnoDB: Index "idxÄ“" for table "test"."corrupt_bit_test_Ä" is marked as corrupted
-Warning 180 Got error 180 when reading table `test`.`corrupt_bit_test_Ä`
Error 1712 Index corrupt_bit_test_Ä is corrupted
insert into corrupt_bit_test_Ä values (10001, "a", 20001, 20001);
select * from corrupt_bit_test_Ä use index(primary) where a = 10001;
diff --git a/mysql-test/suite/innodb/r/innodb_index_large_prefix.result b/mysql-test/suite/innodb/r/innodb_index_large_prefix.result
index f966574506a..330ade923b3 100644
--- a/mysql-test/suite/innodb/r/innodb_index_large_prefix.result
+++ b/mysql-test/suite/innodb/r/innodb_index_large_prefix.result
@@ -1,11 +1,16 @@
+SET default_storage_engine=InnoDB;
set global innodb_file_format="Barracuda";
set global innodb_file_per_table=1;
set global innodb_large_prefix=1;
-create table worklog5743(a TEXT not null, primary key (a(1000)))
-ROW_FORMAT=DYNAMIC, engine = innodb;
+### Test 1 ###
+create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC;
+show warnings;
+Level Code Message
insert into worklog5743 values(repeat("a", 20000));
update worklog5743 set a = (repeat("b", 16000));
create index idx on worklog5743(a(2000));
+show warnings;
+Level Code Message
begin;
update worklog5743 set a = (repeat("x", 17000));
select @@session.tx_isolation;
@@ -26,9 +31,13 @@ a = repeat("x", 17000)
1
rollback;
drop table worklog5743;
-create table worklog5743(a1 int, a2 TEXT not null)
-ROW_FORMAT=DYNAMIC, engine = innodb;
+### Test 2 ###
+create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC;
+show warnings;
+Level Code Message
create index idx on worklog5743(a1, a2(2000));
+show warnings;
+Level Code Message
insert into worklog5743 values(9, repeat("a", 10000));
begin;
update worklog5743 set a1 = 1000;
@@ -49,8 +58,8 @@ select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9;
a1 a2 = repeat("a", 10000)
rollback;
drop table worklog5743;
-create table worklog5743(a1 int, a2 TEXT not null)
-ROW_FORMAT=DYNAMIC, engine = innodb;
+### Test 3 ###
+create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC;
create index idx on worklog5743(a1, a2(50));
insert into worklog5743 values(9, repeat("a", 10000));
begin;
@@ -72,48 +81,328 @@ select a1, a2 = repeat("a", 10000) from worklog5743 where a1 = 9;
a1 a2 = repeat("a", 10000)
rollback;
drop table worklog5743;
-create table worklog5743_2(a1 int, a2 TEXT not null)
-ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb;
-create table worklog5743_4(a1 int, a2 TEXT not null)
-ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb;
-create index idx1 on worklog5743_2(a1, a2(942));
-ERROR HY000: Too big row
-create index idx1 on worklog5743_2(a1, a2(940));
-create index idx1 on worklog5743_4(a1, a2(1966));
-ERROR HY000: Too big row
-create index idx1 on worklog5743_4(a1, a2(1964));
+### Test 4 ###
+create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1;
+create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2;
+create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4;
+create table worklog5743_8(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=8;
+create table worklog5743_16(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=16;
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_1(a2(4000));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 767 bytes
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+set global innodb_large_prefix=1;
+create index idx2 on worklog5743_1(a2(4000));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx3 on worklog5743_1(a2(436));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx4 on worklog5743_1(a2(434));
+show warnings;
+Level Code Message
+create index idx5 on worklog5743_1(a1, a2(430));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx6 on worklog5743_1(a1, a2(428));
+show warnings;
+Level Code Message
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_2(a2(4000));
+Warnings:
+Warning 1071 Specified key was too long; max key length is 767 bytes
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 767 bytes
+set global innodb_large_prefix=1;
+create index idx2 on worklog5743_2(a2(4000));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx3 on worklog5743_2(a2(948));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx4 on worklog5743_2(a2(946));
+show warnings;
+Level Code Message
+create index idx5 on worklog5743_2(a1, a2(942));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx6 on worklog5743_2(a1, a2(940));
+show warnings;
+Level Code Message
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_4(a2(4000));
+Warnings:
+Warning 1071 Specified key was too long; max key length is 767 bytes
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 767 bytes
+set global innodb_large_prefix=1;
+create index idx2 on worklog5743_4(a2(4000));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx3 on worklog5743_4(a2(1972));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx4 on worklog5743_4(a2(1970));
+show warnings;
+Level Code Message
+create index idx5 on worklog5743_4(a1, a2(1966));
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+show warnings;
+Level Code Message
+Error 1118 Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
+create index idx6 on worklog5743_4(a1, a2(1964));
+show warnings;
+Level Code Message
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_8(a2(1000));
+Warnings:
+Warning 1071 Specified key was too long; max key length is 767 bytes
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 767 bytes
+set global innodb_large_prefix=1;
+create index idx2 on worklog5743_8(a2(3073));
+Warnings:
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+create index idx3 on worklog5743_8(a2(3072));
+show warnings;
+Level Code Message
+create index idx4 on worklog5743_8(a1, a2(3069));
+ERROR 42000: Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Error 1071 Specified key was too long; max key length is 3072 bytes
+create index idx5 on worklog5743_8(a1, a2(3068));
+show warnings;
+Level Code Message
+create index idx6 on worklog5743_8(a1, a2(2000), a3(1069));
+ERROR 42000: Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Error 1071 Specified key was too long; max key length is 3072 bytes
+create index idx7 on worklog5743_8(a1, a2(2000), a3(1068));
+show warnings;
+Level Code Message
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_16(a2(1000));
+Warnings:
+Warning 1071 Specified key was too long; max key length is 767 bytes
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 767 bytes
+set global innodb_large_prefix=1;
+create index idx2 on worklog5743_16(a2(3073));
+Warnings:
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+create index idx3 on worklog5743_16(a2(3072));
+show warnings;
+Level Code Message
+create index idx4 on worklog5743_16(a1, a2(3069));
+ERROR 42000: Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Error 1071 Specified key was too long; max key length is 3072 bytes
+create index idx5 on worklog5743_16(a1, a2(3068));
+show warnings;
+Level Code Message
+create index idx6 on worklog5743_16(a1, a2(2000), a3(1069));
+ERROR 42000: Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Error 1071 Specified key was too long; max key length is 3072 bytes
+create index idx7 on worklog5743_16(a1, a2(2000), a3(1068));
+show warnings;
+Level Code Message
+insert into worklog5743_1 values(9, repeat("a", 10000));
insert into worklog5743_2 values(9, repeat("a", 10000));
insert into worklog5743_4 values(9, repeat("a", 10000));
+insert into worklog5743_8 values(9, repeat("a", 10000), repeat("a", 10000));
+insert into worklog5743_16 values(9, repeat("a", 10000), repeat("a", 10000));
+set global innodb_large_prefix=0;
+insert into worklog5743_1 values(2, repeat("b", 10000));
+insert into worklog5743_2 values(2, repeat("b", 10000));
+insert into worklog5743_4 values(2, repeat("b", 10000));
+insert into worklog5743_8 values(2, repeat("b", 10000), repeat("b", 10000));
+insert into worklog5743_16 values(2, repeat("b", 10000), repeat("b", 10000));
+set global innodb_large_prefix=1;
+select a1, left(a2, 20) from worklog5743_1;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+2 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_2;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+2 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_4;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+2 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_8;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+2 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_16;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+2 bbbbbbbbbbbbbbbbbbbb
begin;
+update worklog5743_1 set a1 = 1000;
update worklog5743_2 set a1 = 1000;
update worklog5743_4 set a1 = 1000;
+update worklog5743_8 set a1 = 1000;
+update worklog5743_16 set a1 = 1000;
+select a1, left(a2, 20) from worklog5743_1;
+a1 left(a2, 20)
+1000 aaaaaaaaaaaaaaaaaaaa
+1000 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_2;
+a1 left(a2, 20)
+1000 aaaaaaaaaaaaaaaaaaaa
+1000 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_4;
+a1 left(a2, 20)
+1000 aaaaaaaaaaaaaaaaaaaa
+1000 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_8;
+a1 left(a2, 20)
+1000 aaaaaaaaaaaaaaaaaaaa
+1000 bbbbbbbbbbbbbbbbbbbb
+select a1, left(a2, 20) from worklog5743_16;
+a1 left(a2, 20)
+1000 aaaaaaaaaaaaaaaaaaaa
+1000 bbbbbbbbbbbbbbbbbbbb
select @@session.tx_isolation;
@@session.tx_isolation
REPEATABLE-READ
-explain select a1, a2 = repeat("a", 10000) from worklog5743_2 where a1 = 9;
+explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE worklog5743_2 ref idx1 idx1 5 const 1
-select a1, a2 = repeat("a", 10000) from worklog5743_2 where a1 = 9;
-a1 a2 = repeat("a", 10000)
-9 1
-select a1, a2 = repeat("a", 10000) from worklog5743_4 where a1 = 9;
-a1 a2 = repeat("a", 10000)
-9 1
+1 SIMPLE worklog5743_1 ref idx6 idx6 5 const 1
+explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE worklog5743_2 ref idx6 idx6 5 const 1
+explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE worklog5743_4 ref idx6 idx6 5 const 1
+explain select a1, left(a2, 20) from worklog5743_8 where a1 = 9;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE worklog5743_8 ref idx5,idx7 idx5 5 const 1
+explain select a1, left(a2, 20) from worklog5743_16 where a1 = 9;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE worklog5743_16 ref idx5,idx7 idx5 5 const 1
+select a1, left(a2, 20) from worklog5743_1 where a1 = 9;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+select a1, left(a2, 20) from worklog5743_2 where a1 = 9;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+select a1, left(a2, 20) from worklog5743_4 where a1 = 9;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+select a1, left(a2, 20) from worklog5743_8 where a1 = 9;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
+select a1, left(a2, 20) from worklog5743_16 where a1 = 9;
+a1 left(a2, 20)
+9 aaaaaaaaaaaaaaaaaaaa
SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
select @@session.tx_isolation;
@@session.tx_isolation
READ-UNCOMMITTED
-select a1, a2 = repeat("a", 10000) from worklog5743_2 where a1 = 9;
-a1 a2 = repeat("a", 10000)
-select a1, a2 = repeat("a", 10000) from worklog5743_4 where a1 = 9;
-a1 a2 = repeat("a", 10000)
+select a1, left(a2, 20) from worklog5743_1 where a1 = 9;
+a1 left(a2, 20)
+select a1, left(a2, 20) from worklog5743_2 where a1 = 9;
+a1 left(a2, 20)
+select a1, left(a2, 20) from worklog5743_4 where a1 = 9;
+a1 left(a2, 20)
+select a1, left(a2, 20) from worklog5743_8 where a1 = 9;
+a1 left(a2, 20)
+select a1, left(a2, 20) from worklog5743_16 where a1 = 9;
+a1 left(a2, 20)
rollback;
+drop table worklog5743_1;
drop table worklog5743_2;
drop table worklog5743_4;
-create table worklog5743(a1 int, a2 varchar(3000))
-ROW_FORMAT=DYNAMIC, engine = innodb;
-create index idx on worklog5743(a1, a2);
-insert into worklog5743 values(9, repeat("a", 3000));
+drop table worklog5743_8;
+drop table worklog5743_16;
+### Test 5 ###
+create table worklog5743(a1 int,
+a2 varchar(20000),
+a3 varchar(3073),
+a4 varchar(3072),
+a5 varchar(3069),
+a6 varchar(3068))
+ROW_FORMAT=DYNAMIC;
+create index idx1 on worklog5743(a2);
+Warnings:
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+create index idx2 on worklog5743(a3);
+Warnings:
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+create index idx3 on worklog5743(a4);
+show warnings;
+Level Code Message
+create index idx4 on worklog5743(a1, a2);
+ERROR 42000: Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Warning 1071 Specified key was too long; max key length is 3072 bytes
+Error 1071 Specified key was too long; max key length is 3072 bytes
+create index idx5 on worklog5743(a1, a5);
+ERROR 42000: Specified key was too long; max key length is 3072 bytes
+show warnings;
+Level Code Message
+Error 1071 Specified key was too long; max key length is 3072 bytes
+create index idx6 on worklog5743(a1, a6);
+show warnings;
+Level Code Message
+show create table worklog5743;
+Table Create Table
+worklog5743 CREATE TABLE `worklog5743` (
+ `a1` int(11) DEFAULT NULL,
+ `a2` varchar(20000) DEFAULT NULL,
+ `a3` varchar(3073) DEFAULT NULL,
+ `a4` varchar(3072) DEFAULT NULL,
+ `a5` varchar(3069) DEFAULT NULL,
+ `a6` varchar(3068) DEFAULT NULL,
+ KEY `idx1` (`a2`(3072)),
+ KEY `idx2` (`a3`(3072)),
+ KEY `idx3` (`a4`),
+ KEY `idx6` (`a1`,`a6`)
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
+insert into worklog5743 values(9,
+repeat("a", 20000), repeat("a", 3073),
+repeat("a", 3072), repeat("a", 3069),
+repeat("a", 3068));
begin;
update worklog5743 set a1 = 1000;
select @@session.tx_isolation;
@@ -121,7 +410,7 @@ select @@session.tx_isolation;
REPEATABLE-READ
explain select a1 from worklog5743 where a1 = 9;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE worklog5743 ref idx idx 5 const 1 Using index
+1 SIMPLE worklog5743 ref idx6 idx6 5 const 1 Using index
select a1 from worklog5743 where a1 = 9;
a1
9
@@ -133,13 +422,13 @@ select a1 from worklog5743 where a1 = 9;
a1
rollback;
drop table worklog5743;
-create table worklog5743(a TEXT not null, primary key (a(1000)))
-engine = innodb;
+### Test 6 ###
+create table worklog5743(a TEXT not null, primary key (a(1000)));
ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
-create table worklog5743(a TEXT) engine = innodb;
-create index idx on worklog5743(a(1000));
+create table worklog5743(a TEXT);
+create index idx on worklog5743(a(768));
ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
-create index idx on worklog5743(a(725));
+create index idx on worklog5743(a(767));
insert into worklog5743 values(repeat("a", 20000));
begin;
insert into worklog5743 values(repeat("b", 20000));
@@ -160,25 +449,30 @@ a = repeat("x", 25000)
1
rollback;
drop table worklog5743;
-create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC, engine = innodb;
-create index idx on worklog5743(a(3073));
+### Test 7 ###
+create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC;
+create index idx1 on worklog5743(a(3073));
Warnings:
Warning 1071 Specified key was too long; max key length is 3072 bytes
-Warning 1071 Specified key was too long; max key length is 3072 bytes
create index idx2 on worklog5743(a(3072));
show create table worklog5743;
Table Create Table
worklog5743 CREATE TABLE `worklog5743` (
`a` text NOT NULL,
- KEY `idx` (`a`(3072)),
+ KEY `idx1` (`a`(3072)),
KEY `idx2` (`a`(3072))
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=DYNAMIC
drop table worklog5743;
-create table worklog5743(a TEXT not null) engine = innodb;
+create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT;
+create index idx on worklog5743(a(768));
+ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
+create index idx2 on worklog5743(a(767));
+drop table worklog5743;
+create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT;
create index idx on worklog5743(a(768));
ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
create index idx2 on worklog5743(a(767));
drop table worklog5743;
SET GLOBAL innodb_file_format=Antelope;
-SET GLOBAL innodb_file_per_table=0;
+SET GLOBAL innodb_file_per_table=1;
SET GLOBAL innodb_large_prefix=0;
diff --git a/mysql-test/suite/innodb/r/innodb_information_schema.result b/mysql-test/suite/innodb/r/innodb_information_schema.result
index 0c4413282f7..6f97fdcc0a0 100644
--- a/mysql-test/suite/innodb/r/innodb_information_schema.result
+++ b/mysql-test/suite/innodb/r/innodb_information_schema.result
@@ -1,18 +1,18 @@
lock_mode lock_type lock_table lock_index lock_rec lock_data
-X RECORD `test`.```t'\"_str` `PRIMARY` 2 '1', 'abc', '''abc', 'abc''', 'a''bc', 'a''bc''', '''abc'''''
-X RECORD `test`.```t'\"_str` `PRIMARY` 2 '1', 'abc', '''abc', 'abc''', 'a''bc', 'a''bc''', '''abc'''''
-X RECORD `test`.```t'\"_str` `PRIMARY` 3 '2', 'abc', '"abc', 'abc"', 'a"bc', 'a"bc"', '"abc""'
-X RECORD `test`.```t'\"_str` `PRIMARY` 3 '2', 'abc', '"abc', 'abc"', 'a"bc', 'a"bc"', '"abc""'
-X RECORD `test`.```t'\"_str` `PRIMARY` 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a\\bc\\', '\\abc\\\\'
-X RECORD `test`.```t'\"_str` `PRIMARY` 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a\\bc\\', '\\abc\\\\'
-X RECORD `test`.```t'\"_str` `PRIMARY` 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0'
-X RECORD `test`.```t'\"_str` `PRIMARY` 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0'
-X RECORD `test`.`t_min` `PRIMARY` 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0
-X RECORD `test`.`t_min` `PRIMARY` 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0
-X RECORD `test`.`t_max` `PRIMARY` 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615
-X RECORD `test`.`t_max` `PRIMARY` 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615
-X RECORD `test`.```t'\"_str` `PRIMARY` 1 supremum pseudo-record
-X RECORD `test`.```t'\"_str` `PRIMARY` 1 supremum pseudo-record
+X RECORD `test`.```t'\"_str` PRIMARY 2 '1', 'abc', '''abc', 'abc''', 'a''bc', 'a''bc''', '''abc'''''
+X RECORD `test`.```t'\"_str` PRIMARY 2 '1', 'abc', '''abc', 'abc''', 'a''bc', 'a''bc''', '''abc'''''
+X RECORD `test`.```t'\"_str` PRIMARY 3 '2', 'abc', '"abc', 'abc"', 'a"bc', 'a"bc"', '"abc""'
+X RECORD `test`.```t'\"_str` PRIMARY 3 '2', 'abc', '"abc', 'abc"', 'a"bc', 'a"bc"', '"abc""'
+X RECORD `test`.```t'\"_str` PRIMARY 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a\\bc\\', '\\abc\\\\'
+X RECORD `test`.```t'\"_str` PRIMARY 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a\\bc\\', '\\abc\\\\'
+X RECORD `test`.```t'\"_str` PRIMARY 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0'
+X RECORD `test`.```t'\"_str` PRIMARY 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0'
+X RECORD `test`.`t_min` PRIMARY 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0
+X RECORD `test`.`t_min` PRIMARY 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0
+X RECORD `test`.`t_max` PRIMARY 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615
+X RECORD `test`.`t_max` PRIMARY 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615
+X RECORD `test`.```t'\"_str` PRIMARY 1 supremum pseudo-record
+X RECORD `test`.```t'\"_str` PRIMARY 1 supremum pseudo-record
lock_table COUNT(*)
`test`.`t_max` 2
`test`.`t_min` 2
diff --git a/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result b/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result
index b4a350e77a3..dcdf3082067 100644
--- a/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result
+++ b/mysql-test/suite/innodb/r/innodb_information_schema_buffer.result
@@ -6,28 +6,28 @@ CREATE TABLE infoschema_buffer_test (col1 INT) ENGINE = INNODB;
INSERT INTO infoschema_buffer_test VALUES(9);
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test"
+WHERE TABLE_NAME like "%infoschema_buffer_test%"
and PAGE_STATE="file_page" and PAGE_TYPE="index";
TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE PAGE_STATE PAGE_TYPE
-test/infoschema_buffer_test GEN_CLUST_INDEX 1 29 FILE_PAGE INDEX
+`test`.`infoschema_buffer_test` GEN_CLUST_INDEX 1 29 FILE_PAGE INDEX
INSERT INTO infoschema_buffer_test VALUES(19);
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test"
+WHERE TABLE_NAME like "%infoschema_buffer_test%"
and PAGE_STATE="file_page" and PAGE_TYPE="index";
TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE PAGE_STATE PAGE_TYPE
-test/infoschema_buffer_test GEN_CLUST_INDEX 2 58 FILE_PAGE INDEX
+`test`.`infoschema_buffer_test` GEN_CLUST_INDEX 2 58 FILE_PAGE INDEX
CREATE INDEX idx ON infoschema_buffer_test(col1);
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test"
+WHERE TABLE_NAME like "%infoschema_buffer_test%"
and PAGE_STATE="file_page" and INDEX_NAME = "idx" and PAGE_TYPE="index";
TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE PAGE_STATE PAGE_TYPE
-test/infoschema_buffer_test idx 2 32 FILE_PAGE INDEX
+`test`.`infoschema_buffer_test` idx 2 32 FILE_PAGE INDEX
DROP TABLE infoschema_buffer_test;
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test";
+WHERE TABLE_NAME like "%infoschema_buffer_test%";
TABLE_NAME INDEX_NAME NUMBER_RECORDS DATA_SIZE PAGE_STATE PAGE_TYPE
CREATE TABLE infoschema_parent (id INT NOT NULL, PRIMARY KEY (id))
ENGINE=INNODB;
@@ -38,93 +38,9 @@ ON DELETE CASCADE)
ENGINE=INNODB;
SELECT count(*)
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_child" and PAGE_STATE="file_page"
+WHERE TABLE_NAME like "%infoschema_child%" and PAGE_STATE="file_page"
and PAGE_TYPE="index";
count(*)
2
DROP TABLE infoschema_child;
DROP TABLE infoschema_parent;
-show create table information_schema.innodb_buffer_page;
-Table Create Table
-INNODB_BUFFER_PAGE CREATE TEMPORARY TABLE `INNODB_BUFFER_PAGE` (
- `POOL_ID` bigint(21) unsigned NOT NULL DEFAULT '0',
- `BLOCK_ID` bigint(21) unsigned NOT NULL DEFAULT '0',
- `SPACE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGE_NUMBER` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGE_TYPE` varchar(64) DEFAULT NULL,
- `FLUSH_TYPE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `FIX_COUNT` bigint(21) unsigned NOT NULL DEFAULT '0',
- `IS_HASHED` varchar(3) DEFAULT NULL,
- `NEWEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',
- `OLDEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',
- `ACCESS_TIME` bigint(21) unsigned NOT NULL DEFAULT '0',
- `TABLE_NAME` varchar(1024) DEFAULT NULL,
- `INDEX_NAME` varchar(1024) DEFAULT NULL,
- `NUMBER_RECORDS` bigint(21) unsigned NOT NULL DEFAULT '0',
- `DATA_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `COMPRESSED_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGE_STATE` varchar(64) DEFAULT NULL,
- `IO_FIX` varchar(64) DEFAULT NULL,
- `IS_OLD` varchar(3) DEFAULT NULL,
- `FREE_PAGE_CLOCK` bigint(21) unsigned NOT NULL DEFAULT '0'
-) ENGINE=MEMORY DEFAULT CHARSET=utf8
-show create table information_schema.innodb_buffer_page_lru;
-Table Create Table
-INNODB_BUFFER_PAGE_LRU CREATE TEMPORARY TABLE `INNODB_BUFFER_PAGE_LRU` (
- `POOL_ID` bigint(21) unsigned NOT NULL DEFAULT '0',
- `LRU_POSITION` bigint(21) unsigned NOT NULL DEFAULT '0',
- `SPACE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGE_NUMBER` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGE_TYPE` varchar(64) DEFAULT NULL,
- `FLUSH_TYPE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `FIX_COUNT` bigint(21) unsigned NOT NULL DEFAULT '0',
- `IS_HASHED` varchar(3) DEFAULT NULL,
- `NEWEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',
- `OLDEST_MODIFICATION` bigint(21) unsigned NOT NULL DEFAULT '0',
- `ACCESS_TIME` bigint(21) unsigned NOT NULL DEFAULT '0',
- `TABLE_NAME` varchar(1024) DEFAULT NULL,
- `INDEX_NAME` varchar(1024) DEFAULT NULL,
- `NUMBER_RECORDS` bigint(21) unsigned NOT NULL DEFAULT '0',
- `DATA_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `COMPRESSED_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `COMPRESSED` varchar(3) DEFAULT NULL,
- `IO_FIX` varchar(64) DEFAULT NULL,
- `IS_OLD` varchar(3) DEFAULT NULL,
- `FREE_PAGE_CLOCK` bigint(21) unsigned NOT NULL DEFAULT '0'
-) ENGINE=MEMORY DEFAULT CHARSET=utf8
-show create table information_schema.innodb_buffer_pool_stats;
-Table Create Table
-INNODB_BUFFER_POOL_STATS CREATE TEMPORARY TABLE `INNODB_BUFFER_POOL_STATS` (
- `POOL_ID` bigint(21) unsigned NOT NULL DEFAULT '0',
- `POOL_SIZE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `FREE_BUFFERS` bigint(21) unsigned NOT NULL DEFAULT '0',
- `DATABASE_PAGES` bigint(21) unsigned NOT NULL DEFAULT '0',
- `OLD_DATABASE_PAGES` bigint(21) unsigned NOT NULL DEFAULT '0',
- `MODIFIED_DATABASE_PAGES` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PENDING_DECOMPRESS` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PENDING_READS` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PENDING_FLUSH_LRU` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PENDING_FLUSH_LIST` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGES_MADE_YOUNG` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGES_NOT_MADE_YOUNG` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGES_MADE_YOUNG_RATE` double NOT NULL DEFAULT '0',
- `PAGES_MADE_NOT_YOUNG_RATE` double NOT NULL DEFAULT '0',
- `NUMBER_PAGES_READ` bigint(21) unsigned NOT NULL DEFAULT '0',
- `NUMBER_PAGES_CREATED` bigint(21) unsigned NOT NULL DEFAULT '0',
- `NUMBER_PAGES_WRITTEN` bigint(21) unsigned NOT NULL DEFAULT '0',
- `PAGES_READ_RATE` double NOT NULL DEFAULT '0',
- `PAGES_CREATE_RATE` double NOT NULL DEFAULT '0',
- `PAGES_WRITTEN_RATE` double NOT NULL DEFAULT '0',
- `NUMBER_PAGES_GET` bigint(21) unsigned NOT NULL DEFAULT '0',
- `HIT_RATE` bigint(21) unsigned NOT NULL DEFAULT '0',
- `YOUNG_MAKE_PER_THOUSAND_GETS` bigint(21) unsigned NOT NULL DEFAULT '0',
- `NOT_YOUNG_MAKE_PER_THOUSAND_GETS` bigint(21) unsigned NOT NULL DEFAULT '0',
- `NUMBER_PAGES_READ_AHEAD` bigint(21) unsigned NOT NULL DEFAULT '0',
- `NUMBER_READ_AHEAD_EVICTED` bigint(21) unsigned NOT NULL DEFAULT '0',
- `READ_AHEAD_RATE` double NOT NULL DEFAULT '0',
- `READ_AHEAD_EVICTED_RATE` double NOT NULL DEFAULT '0',
- `LRU_IO_TOTAL` bigint(21) unsigned NOT NULL DEFAULT '0',
- `LRU_IO_CURRENT` bigint(21) unsigned NOT NULL DEFAULT '0',
- `UNCOMPRESS_TOTAL` bigint(21) unsigned NOT NULL DEFAULT '0',
- `UNCOMPRESS_CURRENT` bigint(21) unsigned NOT NULL DEFAULT '0'
-) ENGINE=MEMORY DEFAULT CHARSET=utf8
diff --git a/mysql-test/suite/innodb/r/innodb_mysql.result b/mysql-test/suite/innodb/r/innodb_mysql.result
index 7247b26e86b..236529fd8d6 100644
--- a/mysql-test/suite/innodb/r/innodb_mysql.result
+++ b/mysql-test/suite/innodb/r/innodb_mysql.result
@@ -1,6 +1,6 @@
set global innodb_support_xa=default;
set session innodb_support_xa=default;
-SET SESSION STORAGE_ENGINE = InnoDB;
+SET SESSION DEFAULT_STORAGE_ENGINE = InnoDB;
drop table if exists t1,t2,t3,t1m,t1i,t2m,t2i,t4;
drop procedure if exists p1;
create table t1 (
@@ -335,7 +335,7 @@ a count(a)
1 1
NULL 1
drop table t1;
-create table t1 (f1 int, f2 char(1), primary key(f1,f2));
+create table t1 (f1 int, f2 char(1), primary key(f1,f2)) stats_persistent=0;
insert into t1 values ( 1,"e"),(2,"a"),( 3,"c"),(4,"d");
alter table t1 drop primary key, add primary key (f2, f1);
explain select distinct f1 a, f1 b from t1;
@@ -371,7 +371,7 @@ dept varchar(20) NOT NULL,
age tinyint(3) unsigned NOT NULL,
PRIMARY KEY (id),
INDEX (name,dept)
-) ENGINE=InnoDB;
+) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1(id, dept, age, name) VALUES
(3987, 'cs1', 10, 'rs1'), (3988, 'cs2', 20, 'rs1'), (3995, 'cs3', 10, 'rs2'),
(3996, 'cs4', 20, 'rs2'), (4003, 'cs5', 10, 'rs3'), (4004, 'cs6', 20, 'rs3'),
@@ -435,7 +435,7 @@ set global query_cache_size=10*1024*1024;
set global query_cache_type=1;
drop table if exists `test`;
Warnings:
-Note 1051 Unknown table 'test'
+Note 1051 Unknown table 'test.test'
CREATE TABLE `test` (`test1` varchar(3) NOT NULL,
`test2` varchar(4) NOT NULL,PRIMARY KEY (`test1`))
ENGINE=InnoDB DEFAULT CHARSET=latin1;
@@ -544,7 +544,7 @@ stat_id int NOT NULL,
acct_id int DEFAULT NULL,
INDEX idx1 (stat_id, acct_id),
INDEX idx2 (acct_id)
-) ENGINE=InnoDB;
+) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1(stat_id,acct_id) VALUES
(1,759), (2,831), (3,785), (4,854), (1,921),
(1,553), (2,589), (3,743), (2,827), (2,545),
@@ -709,12 +709,12 @@ CREATE TABLE t2 (primary key (a)) select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TEMPORARY TABLE t2 (primary key (a)) select * from t1;
ERROR 23000: Duplicate entry '1' for key 'PRIMARY'
drop table if exists t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t2 (a int, b int, primary key (a));
BEGIN;
INSERT INTO t2 values(100,100);
@@ -1129,7 +1129,6 @@ insert into t1 values('aaa');
alter table t1 add index(a(1024));
Warnings:
Warning 1071 Specified key was too long; max key length is 767 bytes
-Warning 1071 Specified key was too long; max key length is 767 bytes
show create table t1;
Table Create Table
t1 CREATE TABLE `t1` (
@@ -1788,6 +1787,33 @@ id select_type table type possible_keys key key_len ref rows Extra
DROP TABLE t1;
End of 5.1 tests
#
+# Bug#43600: Incorrect type conversion caused wrong result.
+#
+CREATE TABLE t1 (
+a int NOT NULL
+) engine= innodb;
+CREATE TABLE t2 (
+a int NOT NULL,
+b int NOT NULL,
+filler char(100) DEFAULT NULL,
+KEY a (a,b)
+) engine= innodb;
+insert into t1 values (0),(1),(2),(3),(4);
+insert into t2 select A.a + 10 *B.a, 1, 'filler' from t1 A, t1 B;
+explain select * from t1, t2 where t2.a=t1.a and t2.b + 1;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ALL NULL NULL NULL NULL 5
+1 SIMPLE t2 ref a a 4 test.t1.a 1 Using index condition
+select * from t1, t2 where t2.a=t1.a and t2.b + 1;
+a a b filler
+0 0 1 filler
+1 1 1 filler
+2 2 1 filler
+3 3 1 filler
+4 4 1 filler
+drop table t1,t2;
+# End of test case for the bug#43600
+#
# Bug#42643: InnoDB does not support replication of TRUNCATE TABLE
#
# Check that a TRUNCATE TABLE statement, needing an exclusive meta
@@ -1819,15 +1845,21 @@ a
# Connection default
DROP TABLE t1;
drop table if exists t1, t2, t3;
+#
+# BUG#35850: Performance regression in 5.1.23/5.1.24
+#
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, pk int, key(a,b), primary key(pk)) engine=innodb;
insert into t2 select @a:=A.a+10*(B.a + 10*C.a),@a, @a from t1 A, t1 B, t1 C;
-this must use key 'a', not PRIMARY:
+# this must use key 'a', not PRIMARY:
explain select a from t2 where a=b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 index NULL a 10 NULL # Using where; Using index
drop table t1, t2;
+#
+# Bug #40360: Binlog related errors with binlog off
+#
SET SESSION BINLOG_FORMAT=STATEMENT;
SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;
select @@session.sql_log_bin, @@session.binlog_format, @@session.tx_isolation;
@@ -1837,12 +1869,19 @@ select @@session.sql_log_bin, @@session.binlog_format, @@session.tx_isolation;
CREATE TABLE t1 ( a INT ) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
DROP TABLE t1;
+#
+# Bug#37284 Crash in Field_string::type()
+#
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a char(50)) ENGINE=InnoDB;
CREATE INDEX i1 on t1 (a(3));
SELECT * FROM t1 WHERE a = 'abcde';
a
DROP TABLE t1;
+#
+# Bug #37742: HA_EXTRA_KEYREAD flag is set when key contains only prefix of
+# requested column
+#
CREATE TABLE foo (a int, b int, c char(10),
PRIMARY KEY (c(3)),
KEY b (b)
@@ -1860,6 +1899,9 @@ INSERT INTO foo VALUES
(4,5,'uvwxyz'), (5,6,'meotnsyglt'), (4,5,'asfdewe');
INSERT INTO bar SELECT * FROM foo;
INSERT INTO foo2 SELECT * FROM foo;
+ANALYZE TABLE bar;
+ANALYZE TABLE foo;
+ANALYZE TABLE foo2;
EXPLAIN SELECT c FROM bar WHERE b>2;;
id 1
select_type SIMPLE
@@ -1927,6 +1969,9 @@ ref NULL
rows 6
Extra Using where; Using index
DROP TABLE foo, bar, foo2;
+#
+# Bug#41348: INSERT INTO tbl SELECT * FROM temp_tbl overwrites locking type of temp table
+#
DROP TABLE IF EXISTS t1,t3,t2;
DROP FUNCTION IF EXISTS f1;
CREATE FUNCTION f1() RETURNS VARCHAR(250)
@@ -1948,6 +1993,9 @@ DEALLOCATE PREPARE stmt1;
DEALLOCATE PREPARE stmt3;
DROP TABLE t1,t3,t2;
DROP FUNCTION f1;
+#
+# Bug#37016: TRUNCATE TABLE removes some rows but not all
+#
DROP TABLE IF EXISTS t1,t2;
CREATE TABLE t1 (id INT NOT NULL, PRIMARY KEY (id)) ENGINE=INNODB;
CREATE TABLE t2 (id INT PRIMARY KEY,
@@ -2154,6 +2202,9 @@ DROP TABLE t4;
DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
+#
+# Bug#43580: Issue with Innodb on multi-table update
+#
CREATE TABLE t1 (a INT, b INT, KEY (a)) ENGINE = INNODB;
CREATE TABLE t2 (a INT KEY, b INT, KEY (b)) ENGINE = INNODB;
CREATE TABLE t3 (a INT, b INT KEY, KEY (a)) ENGINE = INNODB;
@@ -2259,6 +2310,7 @@ INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
+ANALYZE TABLE t1;
EXPLAIN SELECT * FROM t1 WHERE b=1 AND c=1 ORDER BY a;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref i2 i2 8 const,const 1 Using where; Using filesort
@@ -2267,7 +2319,7 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ref i2 i2 8 const,const 1 Using where; Using filesort
EXPLAIN SELECT * FROM t1 FORCE INDEX(PRIMARY) WHERE b=1 AND c=1 ORDER BY a;
id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index NULL PRIMARY 4 NULL 128 Using where
+1 SIMPLE t1 index NULL PRIMARY 4 NULL {checked} Using where
DROP TABLE t1;
#
# Bug #47963: Wrong results when index is used
@@ -2337,7 +2389,7 @@ DROP TABLE t1,t2;
# Bug #49324: more valgrind errors in test_if_skip_sort_order
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb ;
-#should not cause valgrind warnings
+# should not cause valgrind warnings
SELECT 1 FROM t1 JOIN t1 a USING(a) GROUP BY t1.a,t1.a;
1
DROP TABLE t1;
@@ -2349,6 +2401,8 @@ create table t1(f1 int not null primary key, f2 int) engine=innodb;
create table t2(f1 int not null, key (f1)) engine=innodb;
insert into t1 values (1,1),(2,2),(3,3);
insert into t2 values (1),(2),(3);
+analyze table t1;
+analyze table t2;
explain select t1.* from t1 left join t2 using(f1) group by t1.f1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index NULL PRIMARY 4 NULL 3
@@ -2365,6 +2419,9 @@ INSERT INTO t1 VALUES (1,1,1,1,1,1), (2,2,2,2,2,2), (3,3,3,3,3,3),
(4,4,4,4,4,4), (5,5,5,5,5,5), (6,6,6,6,6,6),
(7,7,7,7,7,7), (8,8,8,8,8,8), (9,9,9,9,9,9),
(11,11,11,11,11,11);
+ANALYZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 analyze status OK
EXPLAIN SELECT COUNT(*) FROM t1;
id 1
select_type SIMPLE
@@ -2413,36 +2470,6 @@ Comment
Index_comment
DROP TABLE t1;
#
-# Bug #53334: wrong result for outer join with impossible ON condition
-# (see the same test case for MyISAM in join.test)
-#
-create table t1 (id int primary key);
-create table t2 (id int);
-insert into t1 values (75);
-insert into t1 values (79);
-insert into t1 values (78);
-insert into t1 values (77);
-replace into t1 values (76);
-replace into t1 values (76);
-insert into t1 values (104);
-insert into t1 values (103);
-insert into t1 values (102);
-insert into t1 values (101);
-insert into t1 values (105);
-insert into t1 values (106);
-insert into t1 values (107);
-insert into t2 values (107),(75),(1000);
-select t1.id,t2.id from t2 left join t1 on t1.id>=74 and t1.id<=0
-where t2.id=75 and t1.id is null;
-id id
-NULL 75
-explain select t1.id,t2.id from t2 left join t1 on t1.id>=74 and t1.id<=0
-where t2.id=75 and t1.id is null;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 const PRIMARY NULL NULL NULL 1 Impossible ON condition
-1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using where
-drop table t1,t2;
-#
# Bug #47453: InnoDB incorrectly changes TIMESTAMP columns when
# JOINed during an UPDATE
#
@@ -2450,7 +2477,7 @@ CREATE TABLE t1 (d INT) ENGINE=InnoDB;
CREATE TABLE t2 (a INT, b INT,
c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
ON UPDATE CURRENT_TIMESTAMP) ENGINE=InnoDB;
-set up our data elements
+# set up our data elements
INSERT INTO t1 (d) VALUES (1);
INSERT INTO t2 (a,b) VALUES (1,1);
SELECT SECOND(c) INTO @bug47453 FROM t2;
@@ -2465,7 +2492,7 @@ SELECT SLEEP(1);
SLEEP(1)
0
UPDATE t1 JOIN t2 ON d=a SET b=1 WHERE a=1;
-#should be 0
+# should be 0
SELECT SECOND(c)-@bug47453 FROM t1 JOIN t2 ON d=a;
SECOND(c)-@bug47453
0
@@ -2535,6 +2562,7 @@ KEY idx1 (f2,f5,f4),
KEY idx2 (f2,f4)
) ENGINE=InnoDB;
LOAD DATA INFILE '../../std_data/intersect-bug50389.tsv' INTO TABLE t1;
+ANALYZE TABLE t1;
SELECT * FROM t1 WHERE f1 IN
(3305028,3353871,3772880,3346860,4228206,3336022,
3470988,3305175,3329875,3817277,3856380,3796193,
@@ -2570,7 +2598,7 @@ f4 tinyint(1) NOT NULL,
PRIMARY KEY (f1),
UNIQUE KEY (f2, f3),
KEY (f4)
-) ENGINE=InnoDB;
+) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1 VALUES
(1,1,991,1), (2,1,992,1), (3,1,993,1), (4,1,994,1), (5,1,995,1),
(6,1,996,1), (7,1,997,1), (8,1,998,1), (10,1,999,1), (11,1,9910,1),
@@ -2600,44 +2628,10 @@ ALTER TABLE t1 COMMENT 'test';
UNLOCK TABLES;
DROP TABLE t1;
#
-# Bug#55826: create table .. select crashes with when KILL_BAD_DATA
-# is returned
-#
-CREATE TABLE t1(a INT) ENGINE=innodb;
-INSERT INTO t1 VALUES (0);
-SET SQL_MODE='STRICT_ALL_TABLES';
-CREATE TABLE t2
-SELECT LEAST((SELECT '' FROM t1),NOW()) FROM `t1`;
-ERROR 22007: Incorrect datetime value: ''
-DROP TABLE t1;
-SET SQL_MODE=DEFAULT;
-#
-# Bug#55580: segfault in read_view_sees_trx_id
-#
-CREATE TABLE t1 (a INT) ENGINE=Innodb;
-CREATE TABLE t2 (a INT) ENGINE=Innodb;
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
-START TRANSACTION;
-SELECT * FROM t2 LOCK IN SHARE MODE;
-a
-1
-2
-START TRANSACTION;
-SELECT * FROM t1 LOCK IN SHARE MODE;
-a
-1
-2
-SELECT * FROM t1 FOR UPDATE;
-# should not crash
-SELECT * FROM t1 GROUP BY (SELECT a FROM t2 LIMIT 1 FOR UPDATE) + t1.a;
-ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
-DROP TABLE t1,t2;
-#
# Bug#55656: mysqldump can be slower after bug #39653 fix
#
CREATE TABLE t1 (a INT , b INT, c INT, d INT,
-KEY (b), PRIMARY KEY (a,b)) ENGINE=INNODB;
+KEY (b), PRIMARY KEY (a,b)) ENGINE=INNODB STATS_PERSISTENT=0;
INSERT INTO t1 VALUES (1,1,1,1), (2,2,2,2), (3,3,3,3);
EXPLAIN SELECT COUNT(*) FROM t1;
id 1
@@ -2691,58 +2685,6 @@ rows 3
Extra Using index
DROP TABLE t1;
#
-# Bug#56862 Execution of a query that uses index merge returns a wrong result
-#
-CREATE TABLE t1 (
-pk int NOT NULL AUTO_INCREMENT PRIMARY KEY,
-a int,
-b int,
-INDEX idx(a))
-ENGINE=INNODB;
-INSERT INTO t1(a,b) VALUES
-(11, 1100), (2, 200), (1, 100), (14, 1400), (5, 500),
-(3, 300), (17, 1700), (4, 400), (12, 1200), (8, 800),
-(6, 600), (18, 1800), (9, 900), (10, 1000), (7, 700),
-(13, 1300), (15, 1500), (19, 1900), (16, 1600), (20, 2000);
-INSERT INTO t1(a,b) SELECT a+20, b+2000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+40, b+4000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+80, b+8000 FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1 VALUES (1000000, 0, 0);
-SET SESSION sort_buffer_size = 1024*36;
-EXPLAIN
-SELECT COUNT(*) FROM
-(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
-WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL 1536 Using sort_union(idx,PRIMARY); Using where
-SELECT COUNT(*) FROM
-(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
-WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-COUNT(*)
-1537
-SET SESSION sort_buffer_size = DEFAULT;
-DROP TABLE t1;
-#
-# ALTER TABLE IGNORE didn't ignore duplicates for unique add index
-#
-create table t1 (a int primary key, b int) engine = innodb;
-insert into t1 values (1,1),(2,1);
-alter ignore table t1 add unique `main` (b);
-select * from t1;
-a b
-1 1
-drop table t1;
-End of 5.1 tests
-#
-#
# Bug#55826: create table .. select crashes with when KILL_BAD_DATA
# is returned
#
@@ -2755,55 +2697,14 @@ ERROR 22007: Incorrect datetime value: ''
DROP TABLE t1;
SET SQL_MODE=DEFAULT;
#
-# Bug#56862 Execution of a query that uses index merge returns a wrong result
+# Bug#56862 Moved to innodb_16k.test
#
-CREATE TABLE t1 (
-pk int NOT NULL AUTO_INCREMENT PRIMARY KEY,
-a int,
-b int,
-INDEX idx(a))
-ENGINE=INNODB;
-INSERT INTO t1(a,b) VALUES
-(11, 1100), (2, 200), (1, 100), (14, 1400), (5, 500),
-(3, 300), (17, 1700), (4, 400), (12, 1200), (8, 800),
-(6, 600), (18, 1800), (9, 900), (10, 1000), (7, 700),
-(13, 1300), (15, 1500), (19, 1900), (16, 1600), (20, 2000);
-INSERT INTO t1(a,b) SELECT a+20, b+2000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+40, b+4000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+80, b+8000 FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1 VALUES (1000000, 0, 0);
-SET SESSION sort_buffer_size = 1024*36;
-EXPLAIN
-SELECT COUNT(*) FROM
-(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
-WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL 1536 Using sort_union(idx,PRIMARY); Using where
-SELECT COUNT(*) FROM
-(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
-WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-COUNT(*)
-1537
-SET SESSION sort_buffer_size = DEFAULT;
-DROP TABLE t1;
#
# Test for bug #39932 "create table fails if column for FK is in different
# case than in corr index".
#
drop tables if exists t1, t2;
create table t1 (pk int primary key) engine=InnoDB;
-# Even although the below statement uses uppercased field names in
-# foreign key definition it still should be able to find explicitly
-# created supporting index. So it should succeed and should not
-# create any additional supporting indexes.
create table t2 (fk int, key x (fk),
constraint x foreign key (FK) references t1 (PK)) engine=InnoDB;
show create table t2;
@@ -2815,130 +2716,6 @@ t2 CREATE TABLE `t2` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
drop table t2, t1;
#
-# Bug #663818: wrong result when BNLH is used
-#
-CREATE TABLE t1(pk int NOT NULL PRIMARY KEY) ENGINE=InnoDB;
-INSERT INTO t1 VALUES
-(1), (2), (11), (12), (13), (14),
-(15), (16), (17), (18), (19);
-CREATE TABLE t2(pk int NOT NULL PRIMARY KEY) ENGINE=InnoDB;
-INSERT INTO t2 VALUES
-(1), (10), (11), (12), (13), (14),
-(15), (16), (17), (18), (19), (20), (21);
-SET SESSION join_buffer_size=10000;
-Warnings:
-Warning 1292 Truncated incorrect join_buffer_size value: '10000'
-SET SESSION join_cache_level=3;
-EXPLAIN
-SELECT t1.pk FROM t1,t2
-WHERE t1.pk = t2.pk AND t2.pk <> 8;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 11 Using where; Using index
-1 SIMPLE t2 hash_index PRIMARY #hash#PRIMARY:PRIMARY 4:4 test.t1.pk 13 Using join buffer (flat, BNLH join)
-SELECT t1.pk FROM t1,t2
-WHERE t1.pk = t2.pk AND t2.pk <> 8;
-pk
-1
-11
-12
-13
-14
-15
-16
-17
-18
-19
-SET SESSION join_cache_level=1;
-EXPLAIN
-SELECT t1.pk FROM t1,t2
-WHERE t1.pk = t2.pk AND t2.pk <> 8;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index PRIMARY PRIMARY 4 NULL 11 Using where; Using index
-1 SIMPLE t2 eq_ref PRIMARY PRIMARY 4 test.t1.pk 1 Using index
-SELECT t1.pk FROM t1,t2
-WHERE t1.pk = t2.pk AND t2.pk <> 8;
-pk
-1
-11
-12
-13
-14
-15
-16
-17
-18
-19
-DROP TABLE t1,t2;
-SET SESSION join_cache_level=DEFAULT;
-SET SESSION join_buffer_size=DEFAULT;
-#
-# Bug#668644: HAVING + ORDER BY
-#
-CREATE TABLE t1 (
-pk int NOT NULL PRIMARY KEY, i int DEFAULT NULL,
-INDEX idx (i)
-) ENGINE=INNODB;
-INSERT INTO t1 VALUES
-(6,-1636630528),(2,-1097924608),(1,6),(3,6),(4,1148715008),(5,1541734400);
-CREATE TABLE t2 (
-i int DEFAULT NULL,
-pk int NOT NULL PRIMARY KEY,
-INDEX idx (i)
-) ENGINE= INNODB;
-INSERT INTO t2 VALUES
-(-1993998336,20),(-1036582912,1),(-733413376,5),(-538247168,16),
-(-514260992,4),(-249561088,9),(1,2),(1,6),(2,10),(2,19),(4,17),
-(5,14),(5,15),(6,8),(7,13),(8,18),(9,11),(9,12),(257425408,7),
-(576061440,3);
-EXPLAIN
-SELECT t1 .i AS f FROM t1, t2
-WHERE t2.i = t1.pk AND t1.pk BETWEEN 0 AND 224
-HAVING f > 7
-ORDER BY f;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 index PRIMARY idx 5 NULL 6 Using where; Using index
-1 SIMPLE t2 ref idx idx 5 test.t1.pk 1 Using index
-SELECT t1 .i AS f FROM t1, t2
-WHERE t2.i = t1.pk AND t1.pk BETWEEN 0 AND 224
-HAVING f > 7
-ORDER BY f;
-f
-1148715008
-1541734400
-1541734400
-DROP TABLE t1, t2;
-#
-# Test for bug #56619 - Assertion failed during
-# ALTER TABLE RENAME, DISABLE KEYS
-#
-DROP TABLE IF EXISTS t1, t2;
-CREATE TABLE t1 (a INT, INDEX(a)) engine=innodb;
-ALTER TABLE t1 RENAME TO t2, DISABLE KEYS;
-DROP TABLE IF EXISTS t1, t2;
-#
-# Bug#702322: HAVING with two ANDed predicates + ORDER BY
-#
-CREATE TABLE t1 (pk int PRIMARY KEY, a int, KEY (a)) ENGINE=InnoDB;
-CREATE TABLE t2 (a int, KEY (a)) ENGINE=InnoDB;
-INSERT INTO t1 VALUES
-(18,0),(9,10),(8,11),(2,15),(7,19),(1,20);
-SET SESSION join_cache_level = 0;
-EXPLAIN
-SELECT t1.a FROM t1 LEFT JOIN t2 ON t1.pk = t2.a
-WHERE t1.pk >= 6 HAVING t1.a<> 0 AND t1.a <> 11
-ORDER BY t1.a;
-id select_type table type possible_keys key key_len ref rows Extra
-1 SIMPLE t1 range PRIMARY PRIMARY 4 NULL 4 Using index condition; Using filesort
-1 SIMPLE t2 ref a a 5 test.t1.pk 1 Using index
-SELECT t1.a FROM t1 LEFT JOIN t2 ON t1.pk = t2.a
-WHERE t1.pk >= 6 HAVING t1.a<> 0 AND t1.a <> 11
-ORDER BY t1.a;
-a
-10
-19
-DROP TABLE t1,t2;
-End of 5.3 tests
-#
# Test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE::
# UPDATE_ROW, TEMPORARY TABLE, TABLE LOCK".
#
@@ -2954,7 +2731,39 @@ LOCK TABLES t1 READ;
UPDATE t1 SET c = 5;
UNLOCK TABLES;
DROP TEMPORARY TABLE t1;
-End of 5.1 tests
+# End of 5.1 tests
+#
+# Bug#49604 "6.0 processing compound WHERE clause incorrectly
+# with Innodb - extra rows"
+#
+CREATE TABLE t1 (
+c1 INT NOT NULL,
+c2 INT,
+PRIMARY KEY (c1),
+KEY k1 (c2)
+) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (12,1);
+INSERT INTO t1 VALUES (15,1);
+INSERT INTO t1 VALUES (16,1);
+INSERT INTO t1 VALUES (22,1);
+INSERT INTO t1 VALUES (20,2);
+CREATE TABLE t2 (
+c1 INT NOT NULL,
+c2 INT,
+PRIMARY KEY (c1)
+) ENGINE=InnoDB;
+INSERT INTO t2 VALUES (1,2);
+INSERT INTO t2 VALUES (2,9);
+SELECT STRAIGHT_JOIN t2.c2, t1.c2, t2.c1
+FROM t1 JOIN t2 ON t1.c2 = t2.c1
+WHERE t2.c1 IN (2, 1, 6) OR t2.c1 NOT IN (1);
+c2 c2 c1
+2 1 1
+2 1 1
+2 1 1
+2 1 1
+9 2 2
+DROP TABLE t1, t2;
#
# Bug#44613 SELECT statement inside FUNCTION takes a shared lock
#
@@ -2981,13 +2790,86 @@ COMMIT;
DROP TABLE t1;
DROP FUNCTION f1;
#
+# Bug#42744: Crash when using a join buffer to join a table with a blob
+# column and an additional column used for duplicate elimination.
+#
+CREATE TABLE t1 (a tinyblob) ENGINE=InnoDB;
+CREATE TABLE t2 (a int PRIMARY KEY, b tinyblob) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('1'), (NULL);
+INSERT INTO t2 VALUES (1, '1');
+EXPLAIN
+SELECT t2.b FROM t1,t2 WHERE t1.a IN (SELECT 1 FROM t2);
+id select_type table type possible_keys key key_len ref rows Extra
+1 PRIMARY t2 ALL NULL NULL NULL NULL 1
+1 PRIMARY t1 ALL NULL NULL NULL NULL 2 Using where; Using join buffer (flat, BNL join)
+1 PRIMARY t2 index NULL PRIMARY 4 NULL 1 Using index; FirstMatch(t1); Using join buffer (incremental, BNL join)
+SELECT t2.b FROM t1,t2 WHERE t1.a IN (SELECT 1 FROM t2);
+b
+1
+DROP TABLE t1,t2;
+#
+# Bug#48093: 6.0 Server not processing equivalent IN clauses properly
+# with Innodb tables
+#
+CREATE TABLE t1 (
+i int(11) DEFAULT NULL,
+v1 varchar(1) DEFAULT NULL,
+v2 varchar(20) DEFAULT NULL,
+KEY i (i),
+KEY v (v1,i)
+) ENGINE=innodb;
+INSERT INTO t1 VALUES (1,'f','no');
+INSERT INTO t1 VALUES (2,'u','yes-u');
+INSERT INTO t1 VALUES (2,'h','yes-h');
+INSERT INTO t1 VALUES (3,'d','no');
+
+SELECT v2
+FROM t1
+WHERE v1 IN ('f', 'd', 'h', 'u' ) AND i = 2;
+v2
+yes-u
+yes-h
+
+# Should not use index_merge
+EXPLAIN
+SELECT v2
+FROM t1
+WHERE v1 IN ('f', 'd', 'h', 'u' ) AND i = 2;
+id select_type table type possible_keys key key_len ref rows Extra
+1 SIMPLE t1 ref i,v i 5 const 2 Using where
+DROP TABLE t1;
+#
# Bug#54606 innodb fast alter table + pack_keys=0
# prevents adding new indexes
#
+DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a INT, b CHAR(9), c INT, key(b))
ENGINE=InnoDB
PACK_KEYS=0;
CREATE INDEX a ON t1 (a);
CREATE INDEX c on t1 (c);
DROP TABLE t1;
-End of 5.1 tests
+#
+# Additional coverage for refactoring which is made as part
+# of fix for Bug#27480 "Extend CREATE TEMPORARY TABLES privilege
+# to allow temp table operations".
+#
+# Check that OPTIMIZE table works for temporary InnoDB tables.
+DROP TABLE IF EXISTS t1;
+CREATE TEMPORARY TABLE t1 (a INT) ENGINE=InnoDB;
+OPTIMIZE TABLE t1;
+Table Op Msg_type Msg_text
+test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
+test.t1 optimize status OK
+DROP TABLE t1;
+#
+# Bug#11762345 54927: DROPPING AND ADDING AN INDEX IN ONE
+# COMMAND CAN FAIL IN INNODB PLUGIN 1.0
+#
+DROP TABLE IF EXISTS t1;
+CREATE TABLE t1 (id int, a int, b int, PRIMARY KEY (id),
+INDEX a (a)) ENGINE=innodb;
+ALTER TABLE t1 DROP INDEX a, ADD INDEX a (b, a);
+ALTER TABLE t1 DROP INDEX a, ADD INDEX (a, b);
+DROP TABLE t1;
+End of 6.0 tests
diff --git a/mysql-test/suite/innodb/r/innodb_prefix_index_liftedlimit.result b/mysql-test/suite/innodb/r/innodb_prefix_index_liftedlimit.result
index 0c68c8a6975..746072781bd 100644
--- a/mysql-test/suite/innodb/r/innodb_prefix_index_liftedlimit.result
+++ b/mysql-test/suite/innodb/r/innodb_prefix_index_liftedlimit.result
@@ -31,6 +31,11 @@ SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743;
col_1_varchar = REPEAT("c", 4000)
0
1
+ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT;
+ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
+ALTER TABLE worklog5743 ROW_FORMAT=COMPACT;
+ERROR HY000: Index column size too large. The maximum column size is 767 bytes.
+ALTER TABLE worklog5743 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16;
DROP TABLE worklog5743;
CREATE TABLE worklog5743 (
col_1_text TEXT (4000) , col_2_text TEXT (4000) ,
@@ -539,13 +544,15 @@ col_1_blob = REPEAT("c", 4000)
DROP TABLE worklog5743_key8;
CREATE TABLE worklog5743 (
col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000) ,
-col_3_text TEXT (4000), col_4_blob BLOB (4000),col_5_text TEXT (4000),
+col_3_text TEXT (4000), col_4_blob BLOB (4000), col_5_text TEXT (4000),
col_6_varchar VARCHAR (4000), col_7_binary BINARY (255)
) ROW_FORMAT=DYNAMIC, engine = innodb;
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000),
REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
REPEAT("a", 4000) , REPEAT("a", 255)
);
+CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072));
+CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072));
INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000),
REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
REPEAT("a", 4000) , REPEAT("a", 255)
@@ -570,6 +577,19 @@ SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743;
col_1_varbinary = REPEAT("c", 4000)
1
0
+INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000),
+REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
+REPEAT("a", 4000) , REPEAT("a", 255)
+);
+CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072));
+CREATE INDEX prefix_idx4 ON worklog5743(col_4_blob (3072));
+CREATE INDEX prefix_idx5 ON worklog5743(col_5_text (3072));
+UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000)
+WHERE col_1_varbinary = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000);
+ERROR HY000: Undo log record is too big.
+SHOW WARNINGS;
+Level Code Message
+Error 1713 Undo log record is too big.
DROP TABLE worklog5743;
CREATE TABLE worklog5743 (
col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000) ,
@@ -607,6 +627,13 @@ REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
REPEAT("a", 4000) , REPEAT("a", 255)
);
ROLLBACK;
+UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000)
+WHERE col_1_varbinary = REPEAT("a", 4000)
+AND col_2_varchar = REPEAT("o", 4000);
+ERROR HY000: Undo log record is too big.
+SHOW WARNINGS;
+Level Code Message
+Error 1713 Undo log record is too big.
SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743
WHERE col_1_varbinary = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000);
col_1_varbinary = REPEAT("c", 4000)
@@ -873,6 +900,7 @@ SELECT COUNT(*) FROM worklog5743;
COUNT(*)
1
COMMIT;
+"Disconnect the connections 1 and 2"
DROP TABLE worklog5743;
CREATE TABLE worklog5743 (
col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) ,
@@ -1172,7 +1200,6 @@ DROP INDEX prefix_idx ON worklog5743;
CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (4000));
Warnings:
Warning 1071 Specified key was too long; max key length is 3072 bytes
-Warning 1071 Specified key was too long; max key length is 3072 bytes
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743;
col_1_varbinary = REPEAT("a", 4000)
@@ -1230,7 +1257,6 @@ DROP INDEX prefix_idx ON worklog5743;
CREATE INDEX prefix_idx ON worklog5743(col_1_text (4000));
Warnings:
Warning 1071 Specified key was too long; max key length is 3072 bytes
-Warning 1071 Specified key was too long; max key length is 3072 bytes
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743;
col_1_text = REPEAT("a", 4000)
@@ -1288,7 +1314,7 @@ AND col_2_text = REPEAT("o", 4000);
col_1_text = REPEAT("c", 4000)
ALTER TABLE worklog5743 DROP PRIMARY KEY;
ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_text (950));
-ERROR HY000: Too big row
+ERROR 42000: Row size too large. The maximum row size for the used table type, not counting BLOBs, is 8126. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743;
col_1_text = REPEAT("a", 4000)
@@ -1343,11 +1369,22 @@ CREATE TABLE worklog5743 (
col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) ,
PRIMARY KEY (col_1_varchar(767))
) engine = innodb;
+INSERT INTO worklog5743 VALUES(REPEAT('a',4000),REPEAT('b',4000));
CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (1000));
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 1
Warnings:
Warning 1071 Specified key was too long; max key length is 767 bytes
-Warning 1071 Specified key was too long; max key length is 767 bytes
+ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT;
+affected rows: 0
+info: Records: 0 Duplicates: 0 Warnings: 0
+SHOW CREATE TABLE worklog5743;
+Table Create Table
+worklog5743 CREATE TABLE `worklog5743` (
+ `col_1_varchar` varchar(4000) NOT NULL DEFAULT '',
+ `col_2_varchar` varchar(4000) DEFAULT NULL,
+ PRIMARY KEY (`col_1_varchar`(767)),
+ KEY `prefix_idx` (`col_1_varchar`(767))
+) ENGINE=InnoDB DEFAULT CHARSET=latin1 ROW_FORMAT=REDUNDANT
DROP TABLE worklog5743;
-SET GLOBAL innodb_file_format=Antelope;
-SET GLOBAL innodb_file_per_table=0;
-SET GLOBAL innodb_large_prefix=0;
+"Disconnect the connection 1"
diff --git a/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result b/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result
index abd4c243912..e1f6f8608e1 100644
--- a/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result
+++ b/mysql-test/suite/innodb/r/innodb_prefix_index_restart_server.result
@@ -86,5 +86,5 @@ col_1_text = REPEAT("a", 3500) col_2_text = REPEAT("o", 3500)
1 1
DROP TABLE worklog5743;
SET GLOBAL innodb_file_format=Antelope;
-SET GLOBAL innodb_file_per_table=0;
+SET GLOBAL innodb_file_per_table=1;
SET GLOBAL innodb_large_prefix=0;
diff --git a/mysql-test/suite/innodb/t/innodb-autoinc-44030.test b/mysql-test/suite/innodb/t/innodb-autoinc-44030.test
index 07e9ca30fd6..fd90d5d92de 100644
--- a/mysql-test/suite/innodb/t/innodb-autoinc-44030.test
+++ b/mysql-test/suite/innodb/t/innodb-autoinc-44030.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+-- source include/have_innodb.inc
# embedded server ignores 'delayed', so skip this
-- source include/not_embedded.inc
@@ -27,8 +27,14 @@ SELECT * FROM t1;
# to be enabled. Also, see http://bugs.mysql.com/bug.php?id=47621.
#-- error ER_AUTOINC_READ_FAILED,1467
INSERT INTO t1 VALUES(null);
+# Before WL#5534, the following statement would copy the table,
+# and effectively set AUTO_INCREMENT to 4, because while copying
+# it would write values 1,2,3 to the column.
+# WL#5534 makes this an in-place ALTER, setting AUTO_INCREMENT=3 for real.
ALTER TABLE t1 AUTO_INCREMENT = 3;
SHOW CREATE TABLE t1;
+-- error ER_DUP_ENTRY
+INSERT INTO t1 VALUES(null);
INSERT INTO t1 VALUES(null);
SELECT * FROM t1;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb-create-options.test b/mysql-test/suite/innodb/t/innodb-create-options.test
index 0f8e6f8dda2..c09d707ab11 100644
--- a/mysql-test/suite/innodb/t/innodb-create-options.test
+++ b/mysql-test/suite/innodb/t/innodb-create-options.test
@@ -55,8 +55,12 @@
# ignore all non-zero KEY_BLOCK_SIZEs.
#
# See InnoDB documentation page "SQL Compression Syntax Warnings and Errors"
+# This test case does not try to create tables with KEY_BLOCK_SIZE > 4
+# since they are rejected for InnoDB page sizes of 8k and 16k.
+# See innodb_16k and innodb_8k for those tests.
-SET storage_engine=InnoDB;
+-- source include/have_innodb.inc
+SET default_storage_engine=InnoDB;
--disable_query_log
# These values can change during the test
@@ -76,10 +80,8 @@ SET SESSION innodb_strict_mode = ON;
DROP TABLE IF EXISTS t1;
--echo # 'FIXED' is sent to InnoDB since it is used by MyISAM.
--echo # But it is an invalid mode in InnoDB
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=FIXED;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
@@ -96,10 +98,8 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=0;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
@@ -108,29 +108,23 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
--echo # Test 2) StrictMode=ON, CREATE with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE
--echo # KEY_BLOCK_SIZE is incompatible with COMPACT, REDUNDANT, & DYNAMIC
DROP TABLE IF EXISTS t1;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=1;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
ALTER TABLE t1 ADD COLUMN f1 INT;
@@ -141,27 +135,19 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
--echo # Test 3) StrictMode=ON, ALTER with each ROW_FORMAT & a valid non-zero KEY_BLOCK_SIZE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=FIXED KEY_BLOCK_SIZE=1;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=COMPACT KEY_BLOCK_SIZE=2;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
-ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=8;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+--error ER_ILLEGAL_HA_CREATE_OPTION
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
-ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
@@ -173,31 +159,25 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
-ALTER TABLE t1 KEY_BLOCK_SIZE=8;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+--error ER_ILLEGAL_HA_CREATE_OPTION
+ALTER TABLE t1 KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
-ALTER TABLE t1 KEY_BLOCK_SIZE=16;
+ALTER TABLE t1 KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
DROP TABLE IF EXISTS t1;
@@ -213,20 +193,14 @@ CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=2;
SHOW CREATE TABLE t1;
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW CREATE TABLE t1;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=COMPACT;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=REDUNDANT;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
SHOW WARNINGS;
@@ -240,10 +214,8 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
--echo # Test 6) StrictMode=ON, CREATE with an invalid KEY_BLOCK_SIZE.
DROP TABLE IF EXISTS t1;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=9;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
--echo # Test 7) StrictMode=ON, Make sure ROW_FORMAT= COMPRESSED & DYNAMIC and
@@ -251,20 +223,14 @@ SHOW WARNINGS;
--echo # and that they can be set to default values during strict mode.
SET GLOBAL innodb_file_format=Antelope;
DROP TABLE IF EXISTS t1;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=4;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
@@ -276,29 +242,21 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
-ALTER TABLE t1 KEY_BLOCK_SIZE=8;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+--error ER_ILLEGAL_HA_CREATE_OPTION
+ALTER TABLE t1 KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
SET GLOBAL innodb_file_format=Barracuda;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_format=Antelope;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ADD COLUMN f1 INT;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+SHOW CREATE TABLE t1;
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
@@ -312,20 +270,14 @@ SET GLOBAL innodb_file_format=Barracuda;
--echo # values during strict mode.
SET GLOBAL innodb_file_per_table=OFF;
DROP TABLE IF EXISTS t1;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
-CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=16;
---replace_regex / - .*[0-9]*)/)/
+CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC;
---replace_regex / - .*[0-9]*)/)/
SHOW WARNINGS;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=REDUNDANT;
SHOW WARNINGS;
@@ -337,20 +289,14 @@ SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT;
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 KEY_BLOCK_SIZE=1;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=COMPRESSED;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+--error ER_ILLEGAL_HA_CREATE_OPTION
ALTER TABLE t1 ROW_FORMAT=DYNAMIC;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=COMPACT;
SHOW WARNINGS;
@@ -365,10 +311,7 @@ SET GLOBAL innodb_file_per_table=ON;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
SET GLOBAL innodb_file_per_table=OFF;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
ALTER TABLE t1 ADD COLUMN f1 INT;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
SHOW WARNINGS;
ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=0;
SHOW WARNINGS;
@@ -422,14 +365,14 @@ CREATE TABLE t1 ( i INT ) ROW_FORMAT=DYNAMIC KEY_BLOCK_SIZE=4;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=8;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
ALTER TABLE t1 ADD COLUMN f1 INT;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+CREATE TABLE t1 ( i INT ) ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
ALTER TABLE t1 ADD COLUMN f1 INT;
@@ -455,12 +398,12 @@ SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
-ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=8;
+ALTER TABLE t1 ROW_FORMAT=REDUNDANT KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT );
-ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=16;
+ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
ALTER TABLE t1 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1;
@@ -489,14 +432,14 @@ SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) ROW_FORMAT=COMPACT;
-ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=8;
+ALTER TABLE t1 ROW_FORMAT=DEFAULT KEY_BLOCK_SIZE=2;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
--echo # Test 13) StrictMode=OFF, CREATE with a valid KEY_BLOCK_SIZE
--echo # ALTER with each ROW_FORMAT
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=16;
+CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=1;
SHOW WARNINGS;
SHOW CREATE TABLE t1;
ALTER TABLE t1 ADD COLUMN f1 INT;
@@ -521,7 +464,8 @@ ALTER TABLE t1 ROW_FORMAT=COMPACT;
SHOW WARNINGS;
SELECT TABLE_NAME,ROW_FORMAT,CREATE_OPTIONS FROM information_schema.tables WHERE TABLE_NAME = 't1';
---echo # Test 14) StrictMode=OFF, CREATE with an invalid KEY_BLOCK_SIZE, it defaults to 8
+--echo # Test 14) StrictMode=OFF, CREATE with an invalid KEY_BLOCK_SIZE,
+--echo # it defaults to half of the page size.
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 ( i INT ) KEY_BLOCK_SIZE=15;
SHOW WARNINGS;
diff --git a/mysql-test/suite/innodb/t/innodb-index.test b/mysql-test/suite/innodb/t/innodb-index.test
index a8ef524c3e9..b9b98a98f58 100644
--- a/mysql-test/suite/innodb/t/innodb-index.test
+++ b/mysql-test/suite/innodb/t/innodb-index.test
@@ -7,127 +7,16 @@ let $format=`select @@innodb_file_format`;
set global innodb_file_per_table=on;
set global innodb_file_format='Barracuda';
-# Bug #12429576 - Test an assertion failure on purge.
-CREATE TABLE t1_purge (
-A INT,
-B BLOB, C BLOB, D BLOB, E BLOB,
-F BLOB, G BLOB, H BLOB,
-PRIMARY KEY (B(767), C(767), D(767), E(767), A),
-INDEX (A)
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-
-INSERT INTO t1_purge VALUES (1,
-REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766),
-REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766));
-
-CREATE TABLE t2_purge (
-A INT PRIMARY KEY,
-B BLOB, C BLOB, D BLOB, E BLOB,
-F BLOB, G BLOB, H BLOB, I BLOB,
-J BLOB, K BLOB, L BLOB,
-INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-
-INSERT INTO t2_purge VALUES (1,
-REPEAT('b', 766), REPEAT('c', 766), REPEAT('d', 766), REPEAT('e', 766),
-REPEAT('f', 766), REPEAT('g', 766), REPEAT('h', 766), REPEAT('i', 766),
-REPEAT('j', 766), REPEAT('k', 766), REPEAT('l', 766));
-
-CREATE TABLE t3_purge (
-A INT,
-B VARCHAR(800), C VARCHAR(800), D VARCHAR(800), E VARCHAR(800),
-F VARCHAR(800), G VARCHAR(800), H VARCHAR(800),
-PRIMARY KEY (B(767), C(767), D(767), E(767), A),
-INDEX (A)
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-
-INSERT INTO t3_purge SELECT * FROM t1_purge;
-
-CREATE TABLE t4_purge (
-A INT PRIMARY KEY,
-B VARCHAR(800), C VARCHAR(800), D VARCHAR(800), E VARCHAR(800),
-F VARCHAR(800), G VARCHAR(800), H VARCHAR(800), I VARCHAR(800),
-J VARCHAR(800), K VARCHAR(800), L VARCHAR(800),
-INDEX (B(767))) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-
-INSERT INTO t4_purge SELECT * FROM t2_purge;
-
-# This would trigger the failure (Bug #12429576)
-# if purge gets a chance to run before DROP TABLE t1_purge, ....
-DELETE FROM t1_purge;
-DELETE FROM t2_purge;
-DELETE FROM t3_purge;
-DELETE FROM t4_purge;
-# Instead of doing a --sleep 10, wait until the rest of the tests in
-# this file complete before dropping the tables. By then, the purge thread
-# will have delt with the updates above.
-
-# Bug#12637786 - Bad assert by purge thread for records with external data
-# used in secondary indexes.
-SET @r=REPEAT('a',500);
-CREATE TABLE t12637786(a INT,
- v1 VARCHAR(500), v2 VARCHAR(500), v3 VARCHAR(500),
- v4 VARCHAR(500), v5 VARCHAR(500), v6 VARCHAR(500),
- v7 VARCHAR(500), v8 VARCHAR(500), v9 VARCHAR(500),
- v10 VARCHAR(500), v11 VARCHAR(500), v12 VARCHAR(500),
- v13 VARCHAR(500), v14 VARCHAR(500), v15 VARCHAR(500),
- v16 VARCHAR(500), v17 VARCHAR(500), v18 VARCHAR(500)
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-CREATE INDEX idx1 ON t12637786(a,v1);
-INSERT INTO t12637786 VALUES(9,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r,@r);
-UPDATE t12637786 SET a=1000;
-DELETE FROM t12637786;
-# We need to activate the purge thread at this point to make sure it does not
-# assert and is able to clean up the old versions of secondary index entries.
-# But instead of doing a --sleep 10, wait until the rest of the tests in
-# this file complete before dropping the table. By then, the purge thread
-# will have delt with the updates above.
-
-# Bug#12963823 - Test that the purge thread does not crash when
-# the number of indexes has changed since the UNDO record was logged.
-create table t12963823(a blob,b blob,c blob,d blob,e blob,f blob,g blob,h blob,
- i blob,j blob,k blob,l blob,m blob,n blob,o blob,p blob)
- engine=innodb row_format=dynamic;
-SET @r = repeat('a', 767);
-insert into t12963823 values (@r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r, @r,@r,@r,@r);
-create index ndx_a on t12963823 (a(500));
-create index ndx_b on t12963823 (b(500));
-create index ndx_c on t12963823 (c(500));
-create index ndx_d on t12963823 (d(500));
-create index ndx_e on t12963823 (e(500));
-create index ndx_f on t12963823 (f(500));
-create index ndx_k on t12963823 (k(500));
-create index ndx_l on t12963823 (l(500));
-
-SET @r = repeat('b', 500);
-update t12963823 set a=@r,b=@r,c=@r,d=@r;
-update t12963823 set e=@r,f=@r,g=@r,h=@r;
-update t12963823 set i=@r,j=@r,k=@r,l=@r;
-update t12963823 set m=@r,n=@r,o=@r,p=@r;
-alter table t12963823 drop index ndx_a;
-alter table t12963823 drop index ndx_b;
-create index ndx_g on t12963823 (g(500));
-create index ndx_h on t12963823 (h(500));
-create index ndx_i on t12963823 (i(500));
-create index ndx_j on t12963823 (j(500));
-create index ndx_m on t12963823 (m(500));
-create index ndx_n on t12963823 (n(500));
-create index ndx_o on t12963823 (o(500));
-create index ndx_p on t12963823 (p(500));
-show create table t12963823;
-# We need to activate the purge thread at this point to see if it crashes
-# but instead of doing a --sleep 10, wait until the rest of the tests in
-# this file complete before dropping the table. By then, the purge thread
-# will have delt with the updates above.
+SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS;
# Bug#13654923 BOGUS DEBUG ASSERTION IN INDEX CREATION FOR ZERO-LENGTH RECORD
create table t1(a varchar(2) primary key) engine=innodb;
insert into t1 values('');
+--enable_info
create index t1a1 on t1(a(1));
+--disable_info
drop table t1;
-eval set global innodb_file_per_table=$per_table;
-eval set global innodb_file_format=$format;
-
create table t1(a int not null, b int, c char(10) not null, d varchar(20)) engine = innodb;
insert into t1 values (5,5,'oo','oo'),(4,4,'tr','tr'),(3,4,'ad','ad'),(2,3,'ak','ak');
commit;
@@ -135,19 +24,30 @@ commit;
alter table t1 add index b (b), add index b (b);
--error ER_DUP_FIELDNAME
alter table t1 add index (b,b);
+--enable_info
alter table t1 add index d2 (d);
+--disable_info
show create table t1;
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
explain select * from t1 force index(d2) order by d;
select * from t1 force index (d2) order by d;
--error ER_DUP_ENTRY
alter table t1 add unique index (b);
show create table t1;
+--enable_info
alter table t1 add index (b);
+--disable_info
show create table t1;
+--enable_info
alter table t1 add unique index (c), add index (d);
+--disable_info
show create table t1;
+analyze table t1;
explain select * from t1 force index(c) order by c;
+--enable_info
alter table t1 add primary key (a), drop index c;
show create table t1;
--error ER_MULTIPLE_PRI_KEY
@@ -155,16 +55,23 @@ alter table t1 add primary key (c);
--error ER_DUP_ENTRY
alter table t1 drop primary key, add primary key (b);
create unique index c on t1 (c);
+--disable_info
show create table t1;
+analyze table t1;
explain select * from t1 force index(c) order by c;
select * from t1 force index(c) order by c;
+--enable_info
alter table t1 drop index b, add index (b);
+--disable_info
show create table t1;
insert into t1 values(6,1,'ggg','ggg');
select * from t1;
select * from t1 force index(b) order by b;
select * from t1 force index(c) order by c;
select * from t1 force index(d) order by d;
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
explain select * from t1 force index(b) order by b;
explain select * from t1 force index(c) order by c;
explain select * from t1 force index(d) order by d;
@@ -174,11 +81,16 @@ drop table t1;
create table t1(a int not null, b int, c char(10), d varchar(20), primary key (a)) engine = innodb;
insert into t1 values (1,1,'ab','ab'),(2,2,'ac','ac'),(3,3,'ad','ad'),(4,4,'afe','afe');
commit;
+--enable_info
alter table t1 add index (c(2));
show create table t1;
alter table t1 add unique index (d(10));
show create table t1;
+--disable_info
insert into t1 values(5,1,'ggg','ggg');
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
select * from t1;
select * from t1 force index(c) order by c;
select * from t1 force index(d) order by d;
@@ -186,10 +98,15 @@ explain select * from t1 order by b;
explain select * from t1 force index(c) order by c;
explain select * from t1 force index(d) order by d;
show create table t1;
+--enable_info
alter table t1 drop index d;
+--disable_info
insert into t1 values(8,9,'fff','fff');
select * from t1;
select * from t1 force index(c) order by c;
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
explain select * from t1 order by b;
explain select * from t1 force index(c) order by c;
explain select * from t1 order by d;
@@ -199,23 +116,38 @@ drop table t1;
create table t1(a int not null, b int, c char(10), d varchar(20), primary key (a)) engine = innodb;
insert into t1 values (1,1,'ab','ab'),(2,2,'ac','ac'),(3,2,'ad','ad'),(4,4,'afe','afe');
commit;
+--enable_info
alter table t1 add unique index (b,c);
+--disable_info
insert into t1 values(8,9,'fff','fff');
select * from t1;
select * from t1 force index(b) order by b;
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
explain select * from t1 force index(b) order by b;
show create table t1;
+--enable_info
alter table t1 add index (b,c);
+--disable_info
insert into t1 values(11,11,'kkk','kkk');
select * from t1;
select * from t1 force index(b) order by b;
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
explain select * from t1 force index(b) order by b;
show create table t1;
+--enable_info
alter table t1 add unique index (c,d);
+--disable_info
insert into t1 values(13,13,'yyy','aaa');
select * from t1;
select * from t1 force index(b) order by b;
select * from t1 force index(c) order by c;
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
explain select * from t1 force index(b) order by b;
explain select * from t1 force index(c) order by c;
show create table t1;
@@ -224,9 +156,9 @@ drop table t1;
create table t1(a int not null, b int not null, c int, primary key (a), key (b)) engine = innodb;
create table t3(a int not null, c int not null, d int, primary key (a), key (c)) engine = innodb;
create table t4(a int not null, d int not null, e int, primary key (a), key (d)) engine = innodb;
-create table t2(a int not null, b int not null, c int not null, d int not null, e int,
-foreign key (b) references t1(b) on delete cascade,
-foreign key (c) references t3(c), foreign key (d) references t4(d))
+create table t2(a int not null, b int, c int, d int, e int,
+foreign key (b) references t1(b) on delete set null,
+foreign key (c) references t3(c), foreign key (d) references t4(d) on update set null)
engine = innodb;
--error ER_DROP_INDEX_FK
alter table t1 drop index b;
@@ -238,38 +170,80 @@ alter table t4 drop index d;
alter table t2 drop index b;
--error ER_DROP_INDEX_FK
alter table t2 drop index b, drop index c, drop index d;
+--error ER_FK_COLUMN_CANNOT_CHANGE
+alter table t2 MODIFY b INT NOT NULL, ALGORITHM=COPY;
+# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on.
+set @old_sql_mode = @@sql_mode;
+set @@sql_mode = 'STRICT_TRANS_TABLES';
+--error ER_FK_COLUMN_NOT_NULL
+alter table t2 MODIFY b INT NOT NULL, ALGORITHM=INPLACE;
+set @@sql_mode = @old_sql_mode;
+
+SET FOREIGN_KEY_CHECKS=0;
+--error ER_FK_COLUMN_CANNOT_DROP
+alter table t2 DROP COLUMN b, ALGORITHM=COPY;
+--error ER_FK_COLUMN_CANNOT_DROP
+alter table t2 DROP COLUMN b;
+--error ER_FK_COLUMN_CANNOT_DROP_CHILD
+alter table t1 DROP COLUMN b, ALGORITHM=COPY;
+--error ER_FK_COLUMN_CANNOT_DROP_CHILD
+alter table t1 DROP COLUMN b;
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
+
+--enable_info
# Apparently, the following makes mysql_alter_table() drop index d.
create unique index dc on t2 (d,c);
create index dc on t1 (b,c);
# This should preserve the foreign key constraints.
-alter table t2 add primary key (a);
+--disable_info
+# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on.
+# And adding a PRIMARY KEY will also add NOT NULL implicitly!
+set @@sql_mode = 'STRICT_TRANS_TABLES';
+--enable_info
+--error ER_FK_COLUMN_NOT_NULL
+alter table t2 add primary key (alpha), change a alpha int,
+change b beta int not null, change c charlie int not null;
+--error ER_FK_COLUMN_NOT_NULL
+alter table t2 add primary key (alpha), change a alpha int,
+change c charlie int not null, change d delta int not null;
+alter table t2 add primary key (alpha), change a alpha int,
+change b beta int, modify c int not null;
+--disable_info
+set @@sql_mode = @old_sql_mode;
insert into t1 values (1,1,1);
insert into t3 values (1,1,1);
insert into t4 values (1,1,1);
insert into t2 values (1,1,1,1,1);
commit;
+--enable_info
alter table t4 add constraint dc foreign key (a) references t1(a);
+--disable_info
show create table t4;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+# mysqltest first does replace_regex, then replace_result
+--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
+# Embedded server doesn't chdir to data directory
+--replace_result $MYSQLD_DATADIR ./ master-data/ ''
# a foreign key 'test/dc' already exists
--error ER_CANT_CREATE_TABLE
alter table t3 add constraint dc foreign key (a) references t1(a);
+SET FOREIGN_KEY_CHECKS=0;
+--error ER_FK_FAIL_ADD_SYSTEM
+alter table t3 add constraint dc foreign key (a) references t1(a);
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
show create table t3;
-# this should be fixed by MySQL (see Bug #51451)
---error ER_WRONG_NAME_FOR_INDEX
-alter table t2 drop index b, add index (b);
+--enable_info
+alter table t2 drop index b, add index (beta);
+--disable_info
show create table t2;
--error ER_ROW_IS_REFERENCED_2
delete from t1;
--error ER_CANT_DROP_FIELD_OR_KEY
drop index dc on t4;
-# there is no foreign key dc on t3
---replace_regex /'[^']*test\/#sql2-[0-9a-f-]*'/'#sql2-temporary'/
-# Embedded server doesn't chdir to data directory
---replace_result $MYSQLD_DATADIR ./ master-data/ ''
---error ER_ERROR_ON_RENAME
+--enable_info
+--error ER_CANT_DROP_FIELD_OR_KEY
alter table t3 drop foreign key dc;
alter table t4 drop foreign key dc;
+--disable_info
select * from t2;
delete from t1;
select * from t2;
@@ -311,10 +285,15 @@ drop table t1;
create table t1(a int not null, b int not null, c int, primary key (a), key(c)) engine=innodb;
insert into t1 values (5,1,5),(4,2,4),(3,3,3),(2,4,2),(1,5,1);
+--enable_info
alter table t1 add unique index (b);
+--disable_info
insert into t1 values (10,20,20),(11,19,19),(12,18,18),(13,17,17);
show create table t1;
check table t1;
+-- disable_result_log
+analyze table t1;
+-- enable_result_log
explain select * from t1 force index(c) order by c;
explain select * from t1 order by a;
explain select * from t1 force index(b) order by b;
@@ -325,11 +304,14 @@ drop table t1;
create table t1(a int not null, b int not null) engine=innodb;
insert into t1 values (1,1);
+--enable_info
alter table t1 add primary key(b);
+--disable_info
insert into t1 values (2,2);
show create table t1;
check table t1;
select * from t1;
+analyze table t1;
explain select * from t1;
explain select * from t1 order by a;
explain select * from t1 order by b;
@@ -338,205 +320,19 @@ drop table t1;
create table t1(a int not null) engine=innodb;
insert into t1 values (1);
+--enable_info
alter table t1 add primary key(a);
+--disable_info
insert into t1 values (2);
show create table t1;
check table t1;
commit;
select * from t1;
+analyze table t1;
explain select * from t1;
explain select * from t1 order by a;
drop table t1;
-create table t2(d varchar(17) primary key) engine=innodb default charset=utf8;
-create table t3(a int primary key) engine=innodb;
-
-insert into t3 values(22),(44),(33),(55),(66);
-
-insert into t2 values ('jejdkrun87'),('adfd72nh9k'),
-('adfdpplkeock'),('adfdijnmnb78k'),('adfdijn0loKNHJik');
-
-create table t1(a int, b blob, c text, d text not null)
-engine=innodb default charset = utf8;
-
-# r2667 The following test is disabled because MySQL behavior changed.
-# r2667 The test was added with this comment:
-# r2667
-# r2667 ------------------------------------------------------------------------
-# r2667 r1699 | marko | 2007-08-10 19:53:19 +0300 (Fri, 10 Aug 2007) | 5 lines
-# r2667
-# r2667 branches/zip: Add changes that accidentally omitted from r1698:
-# r2667
-# r2667 innodb-index.test, innodb-index.result: Add a test for creating
-# r2667 a PRIMARY KEY on a column that contains a NULL value.
-# r2667 ------------------------------------------------------------------------
-# r2667
-# r2667 but in BZR-r2667:
-# r2667 http://bazaar.launchpad.net/~mysql/mysql-server/mysql-5.1/revision/davi%40mysql.com-20080617141221-8yre8ys9j4uw3xx5?start_revid=joerg%40mysql.com-20080630105418-7qoe5ehomgrcdb89
-# r2667 MySQL changed the behavior to do full table copy when creating PRIMARY INDEX
-# r2667 on a non-NULL column instead of calling ::add_index() which would fail (and
-# r2667 this is what we were testing here). Before r2667 the code execution path was
-# r2667 like this (when adding PRIMARY INDEX on a non-NULL column with ALTER TABLE):
-# r2667
-# r2667 mysql_alter_table()
-# r2667 compare_tables() // would return ALTER_TABLE_INDEX_CHANGED
-# r2667 ::add_index() // would fail with "primary index cannot contain NULL"
-# r2667
-# r2667 after r2667 the code execution path is the following:
-# r2667
-# r2667 mysql_alter_table()
-# r2667 compare_tables() // returns ALTER_TABLE_DATA_CHANGED
-# r2667 full copy is done, without calling ::add_index()
-# r2667
-# r2667 To enable, remove "# r2667: " below.
-# r2667
-# r2667: insert into t1 values (null,null,null,'null');
-insert into t1
-select a,left(repeat(d,100*a),65535),repeat(d,20*a),d from t2,t3;
-drop table t2, t3;
-select count(*) from t1 where a=44;
-select a,
-length(b),b=left(repeat(d,100*a),65535),length(c),c=repeat(d,20*a),d from t1;
-# r2667: --error ER_PRIMARY_CANT_HAVE_NULL
-# r2667: alter table t1 add primary key (a), add key (b(20));
-# r2667: delete from t1 where d='null';
---error ER_DUP_ENTRY
-alter table t1 add primary key (a), add key (b(20));
-delete from t1 where a%2;
-check table t1;
-alter table t1 add primary key (a,b(255),c(255)), add key (b(767));
-select count(*) from t1 where a=44;
-select a,
-length(b),b=left(repeat(d,100*a),65535),length(c),c=repeat(d,20*a),d from t1;
-show create table t1;
-check table t1;
-explain select * from t1 where b like 'adfd%';
-
-# The following tests are disabled because of the introduced timeouts for
-# metadata locks at the MySQL level as part of the fix for
-# Bug#45225 Locking: hang if drop table with no timeout
-# The following commands now play with MySQL metadata locks instead of
-# InnoDB locks
-# start disabled45225_1
-##
-## Test locking
-##
-#
-#create table t2(a int, b varchar(255), primary key(a,b)) engine=innodb;
-#insert into t2 select a,left(b,255) from t1;
-#drop table t1;
-#rename table t2 to t1;
-#
-#connect (a,localhost,root,,);
-#connect (b,localhost,root,,);
-#connection a;
-#set innodb_lock_wait_timeout=1;
-#begin;
-## Obtain an IX lock on the table
-#select a from t1 limit 1 for update;
-#connection b;
-#set innodb_lock_wait_timeout=1;
-## This would require an S lock on the table, conflicting with the IX lock.
-#--error ER_LOCK_WAIT_TIMEOUT
-#create index t1ba on t1 (b,a);
-#connection a;
-#commit;
-#begin;
-## Obtain an IS lock on the table
-#select a from t1 limit 1 lock in share mode;
-#connection b;
-## This will require an S lock on the table. No conflict with the IS lock.
-#create index t1ba on t1 (b,a);
-## This would require an X lock on the table, conflicting with the IS lock.
-#--error ER_LOCK_WAIT_TIMEOUT
-#drop index t1ba on t1;
-#connection a;
-#commit;
-#explain select a from t1 order by b;
-#--send
-#select a,sleep(2+a/100) from t1 order by b limit 3;
-#
-## The following DROP INDEX will succeed, altough the SELECT above has
-## opened a read view. However, during the execution of the SELECT,
-## MySQL should hold a table lock that should block the execution
-## of the DROP INDEX below.
-#
-#connection b;
-#select sleep(1);
-#drop index t1ba on t1;
-#
-## After the index was dropped, subsequent SELECTs will use the same
-## read view, but they should not be accessing the dropped index any more.
-#
-#connection a;
-#reap;
-#explain select a from t1 order by b;
-#select a from t1 order by b limit 3;
-#commit;
-#
-#connection default;
-#disconnect a;
-#disconnect b;
-#
-# end disabled45225_1
-drop table t1;
-
-set global innodb_file_per_table=on;
-set global innodb_file_format='Barracuda';
-# Test creating a table that could lead to undo log overflow.
-# In the undo log, we write a 768-byte prefix (REC_MAX_INDEX_COL_LEN)
-# of each externally stored column that appears as a column prefix in an index.
-# For this test case, it would suffice to write 1 byte, though.
-create table t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob,h blob,
- i blob,j blob,k blob,l blob,m blob,n blob,o blob,p blob,
- q blob,r blob,s blob,t blob,u blob)
- engine=innodb row_format=dynamic;
-create index t1a on t1 (a(767));
-create index t1b on t1 (b(767));
-create index t1c on t1 (c(767));
-create index t1d on t1 (d(767));
-create index t1e on t1 (e(767));
-create index t1f on t1 (f(767));
-create index t1g on t1 (g(767));
-create index t1h on t1 (h(767));
-create index t1i on t1 (i(767));
-create index t1j on t1 (j(767));
-create index t1k on t1 (k(767));
-create index t1l on t1 (l(767));
-create index t1m on t1 (m(767));
-create index t1n on t1 (n(767));
-create index t1o on t1 (o(767));
-create index t1p on t1 (p(767));
-create index t1q on t1 (q(767));
-create index t1r on t1 (r(767));
-create index t1s on t1 (s(767));
-create index t1t on t1 (t(767));
---error 139
-create index t1u on t1 (u(767));
---error 139
-create index t1ut on t1 (u(767), t(767));
-create index t1st on t1 (s(767), t(767));
-show create table t1;
---error 139
-create index t1u on t1 (u(767));
-alter table t1 row_format=compact;
-create index t1u on t1 (u(767));
-
-drop table t1;
-
-# Bug#12547647 UPDATE LOGGING COULD EXCEED LOG PAGE SIZE
-CREATE TABLE bug12547647(
-a INT NOT NULL, b BLOB NOT NULL, c TEXT,
-PRIMARY KEY (b(10), a), INDEX (c(767)), INDEX(b(767))
-) ENGINE=InnoDB ROW_FORMAT=DYNAMIC;
-
-INSERT INTO bug12547647 VALUES (5,repeat('khdfo5AlOq',1900),repeat('g',7751));
-COMMIT;
-# The following used to cause infinite undo log allocation.
---error ER_UNDO_RECORD_TOO_BIG
-UPDATE bug12547647 SET c = REPEAT('b',16928);
-DROP TABLE bug12547647;
-
eval set global innodb_file_per_table=$per_table;
eval set global innodb_file_format=$format;
eval set global innodb_file_format_max=$format;
@@ -546,7 +342,7 @@ eval set global innodb_file_format_max=$format;
# constraint modifications (Issue #70, Bug #38786)
#
SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0;
-SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0;
+SET FOREIGN_KEY_CHECKS=0;
CREATE TABLE t1(
c1 BIGINT(12) NOT NULL,
@@ -589,8 +385,10 @@ CREATE TABLE t2(
PRIMARY KEY (c1,c2,c3)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+SET FOREIGN_KEY_CHECKS=0;
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3) REFERENCES t1(c1);
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
SHOW CREATE TABLE t2;
@@ -620,26 +418,60 @@ CREATE TABLE t2(
PRIMARY KEY (c1)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+SET FOREIGN_KEY_CHECKS=0;
+--enable_info
+
+# mysqltest first does replace_regex, then replace_result
+--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
+# Embedded server doesn't chdir to data directory
+--replace_result $MYSQLD_DATADIR ./ master-data/ ''
--error ER_CANT_CREATE_TABLE
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+ FOREIGN KEY (c3,c2) REFERENCES t1(c1,c1), ALGORITHM=COPY;
+--error ER_FK_NO_INDEX_PARENT
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c1);
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+# mysqltest first does replace_regex, then replace_result
+--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
+# Embedded server doesn't chdir to data directory
+--replace_result $MYSQLD_DATADIR ./ master-data/ ''
--error ER_CANT_CREATE_TABLE
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+ FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2), ALGORITHM=COPY;
+--error ER_FK_NO_INDEX_PARENT
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2);
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
---error ER_CANT_CREATE_TABLE
+
+# FIXME (WL#6251 problem): this should fail, like the ALGORITHM=COPY below
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1);
+ALTER TABLE t2 DROP FOREIGN KEY fk_t2_ca;
+
+# mysqltest first does replace_regex, then replace_result
+--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
+# Embedded server doesn't chdir to data directory
+--replace_result $MYSQLD_DATADIR ./ master-data/ ''
+--error ER_CANT_CREATE_TABLE
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+ FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1), ALGORITHM=COPY;
+
ALTER TABLE t1 MODIFY COLUMN c2 BIGINT(12) NOT NULL;
---replace_regex /#sql-[0-9a-f_]+/#sql-temporary/
+# mysqltest first does replace_regex, then replace_result
+--replace_regex /#sql-[0-9a-f_]*`/#sql-temporary`/
+# Embedded server doesn't chdir to data directory
+--replace_result $MYSQLD_DATADIR ./ master-data/ ''
--error ER_CANT_CREATE_TABLE
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
+ FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2), ALGORITHM=COPY;
+--error ER_FK_NO_INDEX_PARENT
+ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c1,c2);
ALTER TABLE t2 ADD CONSTRAINT fk_t2_ca
FOREIGN KEY (c3,c2) REFERENCES t1(c2,c1);
+
+SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS;
+
SHOW CREATE TABLE t1;
SHOW CREATE TABLE t2;
CREATE INDEX i_t2_c2_c1 ON t2(c2, c1);
@@ -648,40 +480,79 @@ CREATE INDEX i_t2_c3_c1_c2 ON t2(c3, c1, c2);
SHOW CREATE TABLE t2;
CREATE INDEX i_t2_c3_c2 ON t2(c3, c2);
SHOW CREATE TABLE t2;
+--disable_info
DROP TABLE t2;
DROP TABLE t1;
-# The following tests are disabled because of the introduced timeouts for
-# metadata locks at the MySQL level as part of the fix for
-# Bug#45225 Locking: hang if drop table with no timeout
-# The following CREATE INDEX t1a ON t1(a); causes a lock wait timeout
-# start disabled45225_2
-#connect (a,localhost,root,,);
-#connect (b,localhost,root,,);
-#connection a;
-#CREATE TABLE t1 (a INT, b CHAR(1)) ENGINE=InnoDB;
-#INSERT INTO t1 VALUES (3,'a'),(3,'b'),(1,'c'),(0,'d'),(1,'e');
-#connection b;
-#BEGIN;
-#SELECT * FROM t1;
-#connection a;
-#CREATE INDEX t1a ON t1(a);
-#connection b;
-#SELECT * FROM t1;
-#--error ER_TABLE_DEF_CHANGED
-#SELECT * FROM t1 FORCE INDEX(t1a) ORDER BY a;
-#SELECT * FROM t1;
-#COMMIT;
-#SELECT * FROM t1 FORCE INDEX(t1a) ORDER BY a;
-#connection default;
-#disconnect a;
-#disconnect b;
-#
-#DROP TABLE t1;
-# end disabled45225_2
-#this delay is needed because 45225_2 is disabled, to allow the purge to run
-SELECT SLEEP(10);
-DROP TABLE t1_purge, t2_purge, t3_purge, t4_purge;
-DROP TABLE t12637786;
-DROP TABLE t12963823;
+connect (a,localhost,root,,);
+connect (b,localhost,root,,);
+connection a;
+CREATE TABLE t1 (a INT, b CHAR(1)) ENGINE=InnoDB;
+INSERT INTO t1 VALUES (3,'a'),(3,'b'),(1,'c'),(0,'d'),(1,'e');
+CREATE TABLE t2 (a INT, b CHAR(1)) ENGINE=InnoDB;
+CREATE TABLE t2i (a INT, b CHAR(1) NOT NULL) ENGINE=InnoDB;
+CREATE TABLE t2c (a INT, b CHAR(1) NOT NULL) ENGINE=InnoDB;
+INSERT INTO t2 SELECT * FROM t1;
+INSERT INTO t2i SELECT * FROM t1;
+INSERT INTO t2c SELECT * FROM t1;
+connection b;
+BEGIN;
+# This acquires a MDL lock on t1 until commit.
+SELECT * FROM t1;
+connection a;
+# This times out before of the MDL lock held by connection b.
+SET lock_wait_timeout=1;
+--error ER_LOCK_WAIT_TIMEOUT
+CREATE INDEX t1a ON t1(a);
+--enable_info
+CREATE INDEX t2a ON t2(a);
+--disable_info
+set @old_sql_mode = @@sql_mode;
+# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on.
+# And adding a PRIMARY KEY will also add NOT NULL implicitly!
+set @@sql_mode = 'STRICT_TRANS_TABLES';
+--enable_info
+ALTER TABLE t2i ADD PRIMARY KEY(a,b), ADD INDEX t2a(a), ALGORITHM=INPLACE;
+--disable_info
+set @@sql_mode = @old_sql_mode;
+--enable_info
+ALTER TABLE t2c ADD PRIMARY KEY(a,b), ADD INDEX t2a(a), ALGORITHM=COPY;
+--disable_info
+connection b;
+# t2i and t2c are too new for this transaction, because they were rebuilt
+--error ER_TABLE_DEF_CHANGED
+SELECT * FROM t2i;
+--error ER_TABLE_DEF_CHANGED
+SELECT * FROM t2i FORCE INDEX(t2a) ORDER BY a;
+--error ER_TABLE_DEF_CHANGED
+SELECT * FROM t2c;
+--error ER_TABLE_DEF_CHANGED
+SELECT * FROM t2c FORCE INDEX(t2a) ORDER BY a;
+# In t2, only the new index t2a is too new for this transaction.
+SELECT * FROM t2;
+--error ER_TABLE_DEF_CHANGED
+SELECT * FROM t2 FORCE INDEX(t2a) ORDER BY a;
+SELECT * FROM t2;
+COMMIT;
+# For a new transaction, all of t2, t2i, t2c are accessible.
+SELECT * FROM t2;
+SELECT * FROM t2 FORCE INDEX(t2a) ORDER BY a;
+SELECT * FROM t2i;
+SELECT * FROM t2i FORCE INDEX(t2a) ORDER BY a;
+SELECT * FROM t2c;
+SELECT * FROM t2c FORCE INDEX(t2a) ORDER BY a;
+connection default;
+disconnect a;
+disconnect b;
+
+--error ER_DUP_KEYNAME
+alter table t2 add index t2a(b);
+alter table t2 drop index t2a, add index t2a(b);
+show create table t2;
+show create table t2i;
+show create table t2c;
+
+--disable_info
+
+DROP TABLE t1,t2,t2c,t2i;
diff --git a/mysql-test/suite/innodb/t/innodb-zip.test b/mysql-test/suite/innodb/t/innodb-zip.test
index 5de41ea6c37..1d904f69ffc 100644
--- a/mysql-test/suite/innodb/t/innodb-zip.test
+++ b/mysql-test/suite/innodb/t/innodb-zip.test
@@ -14,10 +14,15 @@ SELECT table_name, row_format, data_length, index_length
let $per_table=`select @@innodb_file_per_table`;
let $format=`select @@innodb_file_format`;
let $innodb_strict_mode_orig=`select @@session.innodb_strict_mode`;
+SET @save_innodb_stats_on_metadata=@@global.innodb_stats_on_metadata;
+
+
+--let $query_i_s = SELECT table_schema, table_name, row_format, data_length, index_length FROM information_schema.tables WHERE engine='innodb' AND table_schema != 'mysql'
set session innodb_strict_mode=0;
set global innodb_file_per_table=off;
set global innodb_file_format=`0`;
+SET @@global.innodb_stats_on_metadata=ON;
create table t0(a int primary key) engine=innodb row_format=compressed;
create table t00(a int primary key) engine=innodb
@@ -49,20 +54,20 @@ create table t13(a int primary key) engine=innodb
row_format=compressed;
create table t14(a int primary key) engine=innodb key_block_size=9;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--replace_result 16384 {valid} 8192 {valid} 4096 {valid} 2048 {valid}
+--eval $query_i_s
drop table t0,t00,t2,t3,t4,t5,t6,t7,t8,t9,t10,t11,t12,t13,t14;
alter table t1 key_block_size=0;
alter table t1 row_format=dynamic;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--replace_result 16384 {valid} 8192 {valid} 4096 {valid}
+--eval $query_i_s
alter table t1 row_format=compact;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--replace_result 16384 {valid} 8192 {valid} 4096 {valid}
+--eval $query_i_s
alter table t1 row_format=redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--replace_result 16384 {valid} 8192 {valid} 4096 {valid}
+--eval $query_i_s
drop table t1;
create table t1(a int not null, b text, index(b(10))) engine=innodb
@@ -94,35 +99,15 @@ connection default;
disconnect a;
disconnect b;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+analyze table t1;
+analyze table t2;
+--replace_result 16384 {valid} 12288 {valid}
+--eval $query_i_s
drop table t1,t2;
-# The following should fail in non-strict mode too.
-# (The fix of Bug #50945 only affects REDUNDANT and COMPACT tables.)
-SET SESSION innodb_strict_mode = off;
---error ER_TOO_BIG_ROWSIZE
-CREATE TABLE t1(
- c TEXT NOT NULL, d TEXT NOT NULL,
- PRIMARY KEY (c(767),d(767)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
---error ER_TOO_BIG_ROWSIZE
-CREATE TABLE t1(
- c TEXT NOT NULL, d TEXT NOT NULL,
- PRIMARY KEY (c(767),d(767)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2 CHARSET=ASCII;
-CREATE TABLE t1(
- c TEXT NOT NULL, d TEXT NOT NULL,
- PRIMARY KEY (c(767),d(767)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4 CHARSET=ASCII;
-drop table t1;
---error ER_TOO_BIG_ROWSIZE
-CREATE TABLE t1(c TEXT, PRIMARY KEY (c(440)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
-CREATE TABLE t1(c TEXT, PRIMARY KEY (c(438)))
-ENGINE=InnoDB ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=1 CHARSET=ASCII;
-INSERT INTO t1 VALUES(REPEAT('A',512)),(REPEAT('B',512));
-DROP TABLE t1;
+#
+# Bug #50945 moved to innodb_16k.test, innodb_8k.test, & innodb_4k.test
+#
#
# Test blob column inheritance (mantis issue#36)
@@ -188,18 +173,18 @@ set innodb_strict_mode = on;
create table t1 (id int primary key) engine = innodb key_block_size = 0;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb key_block_size = 9;
---replace_regex / - .*[0-9]*)/)/
show warnings;
create table t3 (id int primary key) engine = innodb key_block_size = 1;
create table t4 (id int primary key) engine = innodb key_block_size = 2;
create table t5 (id int primary key) engine = innodb key_block_size = 4;
-create table t6 (id int primary key) engine = innodb key_block_size = 8;
-create table t7 (id int primary key) engine = innodb key_block_size = 16;
+# These tests are now done in innodb_16k, innodb_8k and innodb_4k
+# where they get different result depending on page size
+# create table t6 (id int primary key) engine = innodb key_block_size = 8;
+# create table t7 (id int primary key) engine = innodb key_block_size = 16;
#check various ROW_FORMAT values.
create table t8 (id int primary key) engine = innodb row_format = compressed;
@@ -207,156 +192,110 @@ create table t9 (id int primary key) engine = innodb row_format = dynamic;
create table t10(id int primary key) engine = innodb row_format = compact;
create table t11(id int primary key) engine = innodb row_format = redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
-drop table t1, t3, t4, t5, t6, t7, t8, t9, t10, t11;
+--replace_result 16384 {valid} 8192 {valid} 4096 {valid} 2048 {valid}
+--eval $query_i_s
+drop table t1, t3, t4, t5, t8, t9, t10, t11;
#test different values of ROW_FORMAT with KEY_BLOCK_SIZE
create table t1 (id int primary key) engine = innodb
-key_block_size = 8 row_format = compressed;
+key_block_size = 4 row_format = compressed;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb
-key_block_size = 8 row_format = redundant;
---replace_regex / - .*[0-9]*)/)/
+key_block_size = 4 row_format = redundant;
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t3 (id int primary key) engine = innodb
-key_block_size = 8 row_format = compact;
---replace_regex / - .*[0-9]*)/)/
+key_block_size = 4 row_format = compact;
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t4 (id int primary key) engine = innodb
-key_block_size = 8 row_format = dynamic;
---replace_regex / - .*[0-9]*)/)/
+key_block_size = 4 row_format = dynamic;
show warnings;
create table t5 (id int primary key) engine = innodb
-key_block_size = 8 row_format = default;
+key_block_size = 4 row_format = default;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--eval $query_i_s
drop table t1, t5;
#test multiple errors
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t1 (id int primary key) engine = innodb
key_block_size = 9 row_format = redundant;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = compact;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb
key_block_size = 9 row_format = dynamic;
---replace_regex / - .*[0-9]*)/)/
show warnings;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--eval $query_i_s
#test valid values with innodb_file_per_table unset
set global innodb_file_per_table = off;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t1 (id int primary key) engine = innodb key_block_size = 1;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb key_block_size = 2;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t3 (id int primary key) engine = innodb key_block_size = 4;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
---error ER_CANT_CREATE_TABLE
-create table t4 (id int primary key) engine = innodb key_block_size = 8;
---replace_regex / - .*[0-9]*)/)/
-show warnings;
---replace_regex / - .*[0-9]*)/)/
---error ER_CANT_CREATE_TABLE
-create table t5 (id int primary key) engine = innodb key_block_size = 16;
---replace_regex / - .*[0-9]*)/)/
-show warnings;
---replace_regex / - .*[0-9]*)/)/
+
+# Tests for key_block_size = 8 and 16 were moved to innodb_16k, innodb_8k
+# and innodb_4k since they get different warnings with smaller page sizes.
+
--error ER_CANT_CREATE_TABLE
create table t6 (id int primary key) engine = innodb row_format = compressed;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t7 (id int primary key) engine = innodb row_format = dynamic;
---replace_regex / - .*[0-9]*)/)/
show warnings;
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--replace_result 16384 {valid} 8192 {valid} 4096 {valid}
+--eval $query_i_s
drop table t8, t9;
#test valid values with innodb_file_format unset
set global innodb_file_per_table = on;
set global innodb_file_format = `0`;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t1 (id int primary key) engine = innodb key_block_size = 1;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t2 (id int primary key) engine = innodb key_block_size = 2;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t3 (id int primary key) engine = innodb key_block_size = 4;
---replace_regex / - .*[0-9]*)/)/
-show warnings;
---replace_regex / - .*[0-9]*)/)/
---error ER_CANT_CREATE_TABLE
-create table t4 (id int primary key) engine = innodb key_block_size = 8;
---replace_regex / - .*[0-9]*)/)/
-show warnings;
---replace_regex / - .*[0-9]*)/)/
---error ER_CANT_CREATE_TABLE
-create table t5 (id int primary key) engine = innodb key_block_size = 16;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
+
+# Tests for key_block_size = 8 and 16 were moved to innodb_16k, innodb_8k
+# and innodb_4k since they get different warnings with smaller page sizes.
+
--error ER_CANT_CREATE_TABLE
create table t6 (id int primary key) engine = innodb row_format = compressed;
---replace_regex / - .*[0-9]*)/)/
show warnings;
---replace_regex / - .*[0-9]*)/)/
--error ER_CANT_CREATE_TABLE
create table t7 (id int primary key) engine = innodb row_format = dynamic;
---replace_regex / - .*[0-9]*)/)/
show warnings;
create table t8 (id int primary key) engine = innodb row_format = compact;
create table t9 (id int primary key) engine = innodb row_format = redundant;
-SELECT table_schema, table_name, row_format, data_length, index_length
-FROM information_schema.tables WHERE table_schema='mysqltest_innodb_zip';
+--replace_result 16384 {valid} 8192 {valid} 4096 {valid}
+--eval $query_i_s
drop table t8, t9;
eval set global innodb_file_per_table=$per_table;
@@ -374,7 +313,7 @@ create table normal_table (
select @@innodb_file_format_max;
create table zip_table (
c1 int
-) engine = innodb key_block_size = 8;
+) engine = innodb key_block_size = 4;
select @@innodb_file_format_max;
set global innodb_file_format_max=`Antelope`;
select @@innodb_file_format_max;
@@ -394,5 +333,8 @@ eval set global innodb_file_format=$format;
eval set global innodb_file_per_table=$per_table;
eval set session innodb_strict_mode=$innodb_strict_mode_orig;
+SET @@global.innodb_stats_on_metadata=@save_innodb_stats_on_metadata;
+
USE test;
DROP DATABASE mysqltest_innodb_zip;
+
diff --git a/mysql-test/suite/innodb/t/innodb.test b/mysql-test/suite/innodb/t/innodb.test
index 532870fd29c..e2056d66855 100644
--- a/mysql-test/suite/innodb/t/innodb.test
+++ b/mysql-test/suite/innodb/t/innodb.test
@@ -20,10 +20,11 @@
#######################################################################
-- source include/have_innodb.inc
+
let $MYSQLD_DATADIR= `select @@datadir`;
+
let collation=utf8_unicode_ci;
--source include/have_collation.inc
-
set optimizer_switch = 'mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';
# Save the original values of some variables in order to be able to
@@ -445,21 +446,11 @@ INSERT INTO t1 ( sca_code, cat_code, sca_desc, lan_code, sca_pic, sca_sdesc, sca
select count(*) from t1 where sca_code = 'PD';
select count(*) from t1 where sca_code <= 'PD';
select count(*) from t1 where sca_pic is null;
-# this should be fixed by MySQL (see Bug #51451)
-# now that http://bugs.mysql.com/49838 is fixed the following ALTER does
-# copy the table instead of failing
-# --error ER_WRONG_NAME_FOR_INDEX
alter table t1 drop index sca_pic, add index sca_pic (cat_code, sca_pic);
-alter table t1 drop index sca_pic;
-alter table t1 add index sca_pic (cat_code, sca_pic);
select count(*) from t1 where sca_code='PD' and sca_pic is null;
select count(*) from t1 where cat_code='E';
-# this should be fixed by MySQL (see Bug #51451)
---error ER_WRONG_NAME_FOR_INDEX
alter table t1 drop index sca_pic, add index (sca_pic, cat_code);
-alter table t1 drop index sca_pic;
-alter table t1 add index (sca_pic, cat_code);
select count(*) from t1 where sca_code='PD' and sca_pic is null;
select count(*) from t1 where sca_pic >= 'n';
select sca_pic from t1 where sca_pic is null;
@@ -1259,7 +1250,7 @@ drop table t1;
#
CREATE TABLE t1 ( a char(10) ) ENGINE=InnoDB;
---error 1765
+--error 1764
SELECT a FROM t1 WHERE MATCH (a) AGAINST ('test' IN BOOLEAN MODE);
DROP TABLE t1;
@@ -2323,12 +2314,12 @@ CREATE TABLE t2 (a INT, INDEX(a)) ENGINE=InnoDB;
INSERT INTO t1 VALUES (1);
INSERT INTO t2 VALUES (1);
ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1 (a) ON DELETE SET NULL;
-# mysqltest first does replace_regex, then replace_result
---replace_regex /'[^']*test\/#sql-[0-9a-f_]*'/'#sql-temporary'/
-# Embedded server doesn't chdir to data directory
---replace_result $MYSQLD_DATADIR ./ master-data/ ''
---error 1025
+# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on.
+set @old_sql_mode = @@sql_mode;
+set @@sql_mode = 'STRICT_TRANS_TABLES';
+--error ER_FK_COLUMN_NOT_NULL
ALTER TABLE t2 MODIFY a INT NOT NULL;
+set @@sql_mode = @old_sql_mode;
DELETE FROM t1;
DROP TABLE t2,t1;
diff --git a/mysql-test/suite/innodb/t/innodb_bug21704.test b/mysql-test/suite/innodb/t/innodb_bug21704.test
index 67d76587819..82e7c81d0e4 100644
--- a/mysql-test/suite/innodb/t/innodb_bug21704.test
+++ b/mysql-test/suite/innodb/t/innodb_bug21704.test
@@ -1,4 +1,5 @@
---source include/have_innodb.inc
+-- source include/have_innodb.inc
+
--echo #
--echo # Bug#21704: Renaming column does not update FK definition.
--echo #
@@ -8,12 +9,6 @@
--echo # foreign key (either in the referencing or referenced table).
--echo
---disable_warnings
-DROP TABLE IF EXISTS t1;
-DROP TABLE IF EXISTS t2;
-DROP TABLE IF EXISTS t3;
---enable_warnings
-
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ROW_FORMAT=COMPACT ENGINE=INNODB;
CREATE TABLE t2 (a INT PRIMARY KEY, b INT,
@@ -32,64 +27,50 @@ INSERT INTO t3 VALUES (1,1,1),(2,2,2),(3,3,3);
--echo # Test renaming the column in the referenced table.
--echo
-# mysqltest first does replace_regex, then replace_result
---replace_regex /'[^']*test\/#sql-[0-9a-f_]*'/'#sql-temporary'/
-# Embedded server doesn't chdir to data directory
---replace_result $MYSQLTEST_VARDIR . mysqld.1/data/ ''
---error ER_ERROR_ON_RENAME
-ALTER TABLE t1 CHANGE a c INT;
+--enable_info
+ALTER TABLE t1 CHANGE a e INT;
--echo # Ensure that online column rename works.
---enable_info
ALTER TABLE t1 CHANGE b c INT;
---disable_info
--echo
--echo # Test renaming the column in the referencing table
--echo
-# mysqltest first does replace_regex, then replace_result
---replace_regex /'[^']*test\/#sql-[0-9a-f_]*'/'#sql-temporary'/
-# Embedded server doesn't chdir to data directory
---replace_result $MYSQLTEST_VARDIR . mysqld.1/data/ ''
---error ER_ERROR_ON_RENAME
-ALTER TABLE t2 CHANGE a c INT;
+ALTER TABLE t2 CHANGE a z INT;
--echo # Ensure that online column rename works.
---enable_info
ALTER TABLE t2 CHANGE b c INT;
---disable_info
--echo
--echo # Test with self-referential constraints
--echo
-# mysqltest first does replace_regex, then replace_result
---replace_regex /'[^']*test\/#sql-[0-9a-f_]*'/'#sql-temporary'/
-# Embedded server doesn't chdir to data directory
---replace_result $MYSQLTEST_VARDIR . mysqld.1/data/ ''
---error ER_ERROR_ON_RENAME
-ALTER TABLE t3 CHANGE a d INT;
+ALTER TABLE t3 CHANGE a f INT;
-# mysqltest first does replace_regex, then replace_result
---replace_regex /'[^']*test\/#sql-[0-9a-f_]*'/'#sql-temporary'/
-# Embedded server doesn't chdir to data directory
---replace_result $MYSQLTEST_VARDIR . mysqld.1/data/ ''
---error ER_ERROR_ON_RENAME
-ALTER TABLE t3 CHANGE b d INT;
+ALTER TABLE t3 CHANGE b g INT;
--echo # Ensure that online column rename works.
---enable_info
ALTER TABLE t3 CHANGE c d INT;
---disable_info
--echo
--echo # Cleanup.
--echo
+--disable_info
+SHOW CREATE TABLE t1;
+SHOW CREATE TABLE t2;
+SHOW CREATE TABLE t3;
+
+SELECT f.*, c.*
+FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS c
+INNER JOIN INFORMATION_SCHEMA.INNODB_SYS_FOREIGN f
+ON c.ID=f.ID
+WHERE FOR_NAME LIKE 'test/t%';
+
DROP TABLE t3;
DROP TABLE t2;
DROP TABLE t1;
diff --git a/mysql-test/suite/innodb/t/innodb_bug52745.test b/mysql-test/suite/innodb/t/innodb_bug52745.test
index 58bcc264677..3c5d79826f0 100644
--- a/mysql-test/suite/innodb/t/innodb_bug52745.test
+++ b/mysql-test/suite/innodb/t/innodb_bug52745.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+-- source include/have_innodb.inc
let $file_format=`select @@innodb_file_format`;
let $file_per_table=`select @@innodb_file_per_table`;
diff --git a/mysql-test/suite/innodb/t/innodb_bug53591.test b/mysql-test/suite/innodb/t/innodb_bug53591.test
index 9a1c2afbccb..8bc461719b8 100644
--- a/mysql-test/suite/innodb/t/innodb_bug53591.test
+++ b/mysql-test/suite/innodb/t/innodb_bug53591.test
@@ -1,4 +1,4 @@
---source include/have_innodb.inc
+-- source include/have_innodb.inc
let $file_format=`select @@innodb_file_format`;
let $file_per_table=`select @@innodb_file_per_table`;
@@ -10,8 +10,10 @@ set old_alter_table=0;
CREATE TABLE bug53591(a text charset utf8 not null)
ENGINE=InnoDB KEY_BLOCK_SIZE=1;
--- error 139
+-- replace_result 8126 {checked_valid} 4030 {checked_valid} 1982 {checked_valid}
+-- error ER_TOO_BIG_ROWSIZE
ALTER TABLE bug53591 ADD PRIMARY KEY(a(220));
+-- replace_result 8126 {checked_valid} 4030 {checked_valid} 1982 {checked_valid}
SHOW WARNINGS;
DROP TABLE bug53591;
diff --git a/mysql-test/suite/innodb/t/innodb_bug53592.test b/mysql-test/suite/innodb/t/innodb_bug53592.test
index 9bf8578eafa..6c0f5a8422d 100644
--- a/mysql-test/suite/innodb/t/innodb_bug53592.test
+++ b/mysql-test/suite/innodb/t/innodb_bug53592.test
@@ -1,8 +1,8 @@
---source include/have_innodb.inc
# Testcase for Bug #53592 - "crash replacing duplicates into
# table after fast alter table added unique key". The fix is to make
# sure index number lookup should go through "index translation table".
+--source include/have_innodb.inc
# Use FIC for index creation
set old_alter_table=0;
diff --git a/mysql-test/suite/innodb/t/innodb_bug54044.test b/mysql-test/suite/innodb/t/innodb_bug54044.test
index 013a7ff1e93..13c37d9c841 100644
--- a/mysql-test/suite/innodb/t/innodb_bug54044.test
+++ b/mysql-test/suite/innodb/t/innodb_bug54044.test
@@ -1,12 +1,19 @@
---source include/have_innodb.inc
# This is the test for bug #54044. Special handle MYSQL_TYPE_NULL type
# during create table, so it will not trigger assertion failure.
+--source include/have_innodb.inc
# This 'create table' operation no longer uses the NULL datatype.
CREATE TEMPORARY TABLE table_54044 ENGINE = INNODB
AS SELECT IF(NULL IS NOT NULL, NULL, NULL);
SHOW CREATE TABLE table_54044;
-CREATE TEMPORARY TABLE tmp1 ENGINE = INNODB AS SELECT COALESCE(NULL, NULL, NULL);
-CREATE TEMPORARY TABLE tmp2 ENGINE = INNODB AS SELECT GREATEST(NULL, NULL);
+DROP TABLE table_54044;
+
+# These 'create table' operations should fail because of
+# using NULL datatype
+
+CREATE TABLE tmp ENGINE = INNODB AS SELECT COALESCE(NULL, NULL, NULL), GREATEST(NULL, NULL), NULL;
+SHOW CREATE TABLE tmp;
+DROP TABLE tmp;
+
diff --git a/mysql-test/suite/innodb/t/innodb_bug56947.test b/mysql-test/suite/innodb/t/innodb_bug56947.test
index b6feb239314..4cefeb391cf 100644
--- a/mysql-test/suite/innodb/t/innodb_bug56947.test
+++ b/mysql-test/suite/innodb/t/innodb_bug56947.test
@@ -1,15 +1,17 @@
---source include/have_innodb.inc
#
# Bug #56947 valgrind reports a memory leak in innodb-plugin.innodb-index
#
-SET @old_innodb_file_per_table=@@innodb_file_per_table;
-# avoid a message about filed *.ibd file creation in the error log
+-- source include/have_innodb.inc
+-- source include/have_debug.inc
+
SET GLOBAL innodb_file_per_table=0;
create table bug56947(a int not null) engine = innodb;
-CREATE TABLE `bug56947#1`(a int) ENGINE=InnoDB;
---error 156
+
+SET DEBUG_DBUG='+d,ib_rebuild_cannot_rename';
+--error ER_GET_ERRNO
alter table bug56947 add unique index (a);
-drop table `bug56947#1`;
+SET DEBUG_DBUG='-d,ib_rebuild_cannot_rename';
+check table bug56947;
+
drop table bug56947;
---disable_query_log
-SET GLOBAL innodb_file_per_table=@old_innodb_file_per_table;
+SET @@global.innodb_file_per_table=DEFAULT;
diff --git a/mysql-test/suite/innodb/t/innodb_bug60049.test b/mysql-test/suite/innodb/t/innodb_bug60049.test
index 70fec8cb959..6760d1a1f02 100644
--- a/mysql-test/suite/innodb/t/innodb_bug60049.test
+++ b/mysql-test/suite/innodb/t/innodb_bug60049.test
@@ -6,10 +6,8 @@
-- source include/have_innodb.inc
-- source include/have_innodb_16k.inc
-#
-# This test will not work if we don't do full shutdown of innodb
-#
-set @@global.innodb_fast_shutdown=0;
+call mtr.add_suppression('InnoDB: Error: Table "mysql"."innodb_(table|index)_stats" not found');
+call mtr.add_suppression('InnoDB: Error: Fetch of persistent statistics requested');
-- disable_query_log
let $create1 = query_get_value(SHOW CREATE TABLE mysql.innodb_table_stats, Create Table, 1);
@@ -18,7 +16,7 @@ DROP TABLE mysql.innodb_index_stats;
DROP TABLE mysql.innodb_table_stats;
-- enable_query_log
-CREATE TABLE t(a INT)ENGINE=InnoDB;
+CREATE TABLE t(a INT)ENGINE=InnoDB STATS_PERSISTENT=0;
RENAME TABLE t TO u;
DROP TABLE u;
SELECT @@innodb_fast_shutdown;
diff --git a/mysql-test/suite/innodb/t/innodb_corrupt_bit.test b/mysql-test/suite/innodb/t/innodb_corrupt_bit.test
index b8d19ddfcee..c57e52b65cc 100644
--- a/mysql-test/suite/innodb/t/innodb_corrupt_bit.test
+++ b/mysql-test/suite/innodb/t/innodb_corrupt_bit.test
@@ -2,6 +2,7 @@
# Test for persistent corrupt bit for corrupted index and table
#
-- source include/have_innodb.inc
+-- source include/have_innodb_16k.inc
# Issues with innodb_change_buffering_debug on Windows, so the test scenario
# cannot be created on windows
@@ -11,6 +12,7 @@
--source include/have_debug.inc
-- disable_query_log
+call mtr.add_suppression("Flagged corruption of idx.*in CHECK TABLE");
# This test setup is extracted from bug56680.test:
# The flag innodb_change_buffering_debug is only available in debug builds.
# It instructs InnoDB to try to evict pages from the buffer pool when
diff --git a/mysql-test/suite/innodb/t/innodb_file_format.test b/mysql-test/suite/innodb/t/innodb_file_format.test
index 941edb728dd..f45083101c0 100644
--- a/mysql-test/suite/innodb/t/innodb_file_format.test
+++ b/mysql-test/suite/innodb/t/innodb_file_format.test
@@ -1,5 +1,5 @@
---source include/have_innodb.inc
---source include/restart_and_reinit.inc
+-- source include/restart_and_reinit.inc
+-- source include/have_innodb.inc
let $innodb_file_format_orig=`select @@innodb_file_format`;
diff --git a/mysql-test/suite/innodb/t/innodb_index_large_prefix.test b/mysql-test/suite/innodb/t/innodb_index_large_prefix.test
index 6873c2a404c..17f82f88fef 100644
--- a/mysql-test/suite/innodb/t/innodb_index_large_prefix.test
+++ b/mysql-test/suite/innodb/t/innodb_index_large_prefix.test
@@ -1,6 +1,9 @@
---source include/have_innodb.inc
# Testcase for worklog #5743: Lift the limit of index key prefixes
+--source include/have_innodb.inc
+--source include/have_innodb_16k.inc
+SET default_storage_engine=InnoDB;
+
let $innodb_file_format_orig=`select @@innodb_file_format`;
let $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
let $innodb_large_prefix_orig=`select @@innodb_large_prefix`;
@@ -9,10 +12,11 @@ set global innodb_file_format="Barracuda";
set global innodb_file_per_table=1;
set global innodb_large_prefix=1;
+-- echo ### Test 1 ###
# Create a table of DYNAMIC format, with a primary index of 1000 bytes in
# size
-create table worklog5743(a TEXT not null, primary key (a(1000)))
-ROW_FORMAT=DYNAMIC, engine = innodb;
+create table worklog5743(a TEXT not null, primary key (a(1000))) ROW_FORMAT=DYNAMIC;
+show warnings;
# Do some insertion and update to excercise the external cache
# code path
@@ -23,13 +27,14 @@ update worklog5743 set a = (repeat("b", 16000));
# Create a secondary index
create index idx on worklog5743(a(2000));
+show warnings;
# Start a few sessions to do selections on table being updated in default
# session, so it would rebuild the previous version from undo log.
-# 1) Default session: Initiate an update on the externally stored column
-# 2) Session con1: Select from table with repeated read
-# 3) Session con2: Select from table with read uncommitted
-# 4) Default session: rollback updates
+# 1) Default session: Initiate an update on the externally stored column
+# 2) Session con1: Select from table with repeated read
+# 3) Session con2: Select from table with read uncommitted
+# 4) Default session: rollback updates
begin;
update worklog5743 set a = (repeat("x", 17000));
@@ -55,11 +60,12 @@ rollback;
drop table worklog5743;
+-- echo ### Test 2 ###
# Create a table with only a secondary index has large prefix column
-create table worklog5743(a1 int, a2 TEXT not null)
-ROW_FORMAT=DYNAMIC, engine = innodb;
-
+create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC;
+show warnings;
create index idx on worklog5743(a1, a2(2000));
+show warnings;
insert into worklog5743 values(9, repeat("a", 10000));
@@ -85,9 +91,9 @@ rollback;
drop table worklog5743;
+-- echo ### Test 3 ###
# Create a table with a secondary index has small (50 bytes) prefix column
-create table worklog5743(a1 int, a2 TEXT not null)
-ROW_FORMAT=DYNAMIC, engine = innodb;
+create table worklog5743(a1 int, a2 TEXT not null) ROW_FORMAT=DYNAMIC;
create index idx on worklog5743(a1, a2(50));
@@ -115,72 +121,225 @@ rollback;
drop table worklog5743;
-# Create a table of ROW_FORMAT=COMPRESSED format
-create table worklog5743_2(a1 int, a2 TEXT not null)
-ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2, engine = innodb;
-
-create table worklog5743_4(a1 int, a2 TEXT not null)
-ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4, engine = innodb;
-
-# The maximum overall index record (not prefix) length for this table
-# is page_zip_empty_size() / 2, which is 960. "Too big row" error (
-# HA_ERR_TO_BIG_ROW) will be printed if this limit is exceeded.
-# Considering other fields and their overhead, the maximum length
-# for column a2 is 940 or 941 depending on the zlib version used and
-# compressBound() value used in page_zip_empty_size() (please refer
-# to Bug #47495 for more detail).
--- error 139
-create index idx1 on worklog5743_2(a1, a2(942));
-
-create index idx1 on worklog5743_2(a1, a2(940));
-
-# similarly, the maximum index record length for the table is
-# 1984. Considering other fields and their overhead, the
-# maximum length for column a2 is 1964 or 1965 (please refer
-# to Bug #47495 for more detail).
--- error 139
-create index idx1 on worklog5743_4(a1, a2(1966));
-
-create index idx1 on worklog5743_4(a1, a2(1964));
-
+-- echo ### Test 4 ###
+# Create compressed tables with each KEY_BLOCK_SIZE.
+create table worklog5743_1(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=1;
+create table worklog5743_2(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=2;
+create table worklog5743_4(a1 int, a2 TEXT not null) KEY_BLOCK_SIZE=4;
+create table worklog5743_8(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=8;
+create table worklog5743_16(a1 int, a2 TEXT, a3 TEXT) KEY_BLOCK_SIZE=16;
+
+# The maximum overall index record (not prefix) length of a
+# compressed table is dependent on innodb-page-size (IPS),
+# key_block_size (KBS) and the number of fields (NF).
+# "Too big row" error (HA_ERR_TO_BIG_ROW) will be returned if this
+# limit is exceeded.
+# See page_zip_empty_size() and Bug #47495 for more detail.
+
+# Test edge cases for indexes using key_block_size=1
+set global innodb_large_prefix=0;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx1 on worklog5743_1(a2(4000));
+show warnings;
+set global innodb_large_prefix=1;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx2 on worklog5743_1(a2(4000));
+show warnings;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx3 on worklog5743_1(a2(436));
+show warnings;
+# Bug#13391353 Limit is one byte less on on 32bit-Linux only
+create index idx4 on worklog5743_1(a2(434));
+show warnings;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx5 on worklog5743_1(a1, a2(430));
+show warnings;
+# Bug#13391353 Limit is one byte less on on 32bit-Linux only
+create index idx6 on worklog5743_1(a1, a2(428));
+show warnings;
+
+# Test edge cases for indexes using key_block_size=2
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_2(a2(4000));
+show warnings;
+set global innodb_large_prefix=1;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx2 on worklog5743_2(a2(4000));
+show warnings;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx3 on worklog5743_2(a2(948));
+show warnings;
+# Bug#13391353 Limit is one byte less on on 32bit-Linux only
+create index idx4 on worklog5743_2(a2(946));
+show warnings;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx5 on worklog5743_2(a1, a2(942));
+show warnings;
+# Bug#13391353 Limit is one byte less on on 32bit-Linux only
+create index idx6 on worklog5743_2(a1, a2(940));
+show warnings;
+
+# Test edge cases for indexes using key_block_size=4
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_4(a2(4000));
+show warnings;
+set global innodb_large_prefix=1;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx2 on worklog5743_4(a2(4000));
+show warnings;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx3 on worklog5743_4(a2(1972));
+show warnings;
+# Bug#13391353 Limit is one byte less on on 32bit-Linux only
+create index idx4 on worklog5743_4(a2(1970));
+show warnings;
+-- error ER_TOO_BIG_ROWSIZE
+create index idx5 on worklog5743_4(a1, a2(1966));
+show warnings;
+# Bug#13391353 Limit is one byte less on on 32bit-Linux only
+create index idx6 on worklog5743_4(a1, a2(1964));
+show warnings;
+
+# Test edge cases for indexes using key_block_size=8
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_8(a2(1000));
+show warnings;
+set global innodb_large_prefix=1;
+create index idx2 on worklog5743_8(a2(3073));
+show warnings;
+create index idx3 on worklog5743_8(a2(3072));
+show warnings;
+-- error ER_TOO_LONG_KEY
+create index idx4 on worklog5743_8(a1, a2(3069));
+show warnings;
+create index idx5 on worklog5743_8(a1, a2(3068));
+show warnings;
+-- error ER_TOO_LONG_KEY
+create index idx6 on worklog5743_8(a1, a2(2000), a3(1069));
+show warnings;
+create index idx7 on worklog5743_8(a1, a2(2000), a3(1068));
+show warnings;
+
+# Test edge cases for indexes using key_block_size=16
+set global innodb_large_prefix=0;
+create index idx1 on worklog5743_16(a2(1000));
+show warnings;
+set global innodb_large_prefix=1;
+create index idx2 on worklog5743_16(a2(3073));
+show warnings;
+create index idx3 on worklog5743_16(a2(3072));
+show warnings;
+-- error ER_TOO_LONG_KEY
+create index idx4 on worklog5743_16(a1, a2(3069));
+show warnings;
+create index idx5 on worklog5743_16(a1, a2(3068));
+show warnings;
+-- error ER_TOO_LONG_KEY
+create index idx6 on worklog5743_16(a1, a2(2000), a3(1069));
+show warnings;
+create index idx7 on worklog5743_16(a1, a2(2000), a3(1068));
+show warnings;
+
+# Insert a large record into each of these tables.
+insert into worklog5743_1 values(9, repeat("a", 10000));
insert into worklog5743_2 values(9, repeat("a", 10000));
insert into worklog5743_4 values(9, repeat("a", 10000));
+insert into worklog5743_8 values(9, repeat("a", 10000), repeat("a", 10000));
+insert into worklog5743_16 values(9, repeat("a", 10000), repeat("a", 10000));
+
+# Now if we change the global innodb_large_prefix back to 767,
+# updates to these indexes should still be allowed.
+set global innodb_large_prefix=0;
+insert into worklog5743_1 values(2, repeat("b", 10000));
+insert into worklog5743_2 values(2, repeat("b", 10000));
+insert into worklog5743_4 values(2, repeat("b", 10000));
+insert into worklog5743_8 values(2, repeat("b", 10000), repeat("b", 10000));
+insert into worklog5743_16 values(2, repeat("b", 10000), repeat("b", 10000));
+set global innodb_large_prefix=1;
+
+select a1, left(a2, 20) from worklog5743_1;
+select a1, left(a2, 20) from worklog5743_2;
+select a1, left(a2, 20) from worklog5743_4;
+select a1, left(a2, 20) from worklog5743_8;
+select a1, left(a2, 20) from worklog5743_16;
begin;
+update worklog5743_1 set a1 = 1000;
update worklog5743_2 set a1 = 1000;
update worklog5743_4 set a1 = 1000;
+update worklog5743_8 set a1 = 1000;
+update worklog5743_16 set a1 = 1000;
+select a1, left(a2, 20) from worklog5743_1;
+select a1, left(a2, 20) from worklog5743_2;
+select a1, left(a2, 20) from worklog5743_4;
+select a1, left(a2, 20) from worklog5743_8;
+select a1, left(a2, 20) from worklog5743_16;
+
# Do a select from another connection that would use the secondary index
--connection con1
select @@session.tx_isolation;
-explain select a1, a2 = repeat("a", 10000) from worklog5743_2 where a1 = 9;
-select a1, a2 = repeat("a", 10000) from worklog5743_2 where a1 = 9;
-select a1, a2 = repeat("a", 10000) from worklog5743_4 where a1 = 9;
+explain select a1, left(a2, 20) from worklog5743_1 where a1 = 9;
+explain select a1, left(a2, 20) from worklog5743_2 where a1 = 9;
+explain select a1, left(a2, 20) from worklog5743_4 where a1 = 9;
+explain select a1, left(a2, 20) from worklog5743_8 where a1 = 9;
+explain select a1, left(a2, 20) from worklog5743_16 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_1 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_2 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_4 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_8 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_16 where a1 = 9;
# Do read uncommitted in another session, it would show there is no
# row with a1 = 9
--connection con2
SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED;
select @@session.tx_isolation;
-select a1, a2 = repeat("a", 10000) from worklog5743_2 where a1 = 9;
-select a1, a2 = repeat("a", 10000) from worklog5743_4 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_1 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_2 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_4 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_8 where a1 = 9;
+select a1, left(a2, 20) from worklog5743_16 where a1 = 9;
--connection default
rollback;
+drop table worklog5743_1;
drop table worklog5743_2;
drop table worklog5743_4;
+drop table worklog5743_8;
+drop table worklog5743_16;
+
+-- echo ### Test 5 ###
+# Create a table with large varchar columns and create indexes
+# directly on these large columns to show that prefix limit is
+# automatically applied and to show that limit.
+create table worklog5743(a1 int,
+ a2 varchar(20000),
+ a3 varchar(3073),
+ a4 varchar(3072),
+ a5 varchar(3069),
+ a6 varchar(3068))
+ ROW_FORMAT=DYNAMIC;
+create index idx1 on worklog5743(a2);
+create index idx2 on worklog5743(a3);
+create index idx3 on worklog5743(a4);
+show warnings;
+-- error ER_TOO_LONG_KEY
+create index idx4 on worklog5743(a1, a2);
+show warnings;
+-- error ER_TOO_LONG_KEY
+create index idx5 on worklog5743(a1, a5);
+show warnings;
+create index idx6 on worklog5743(a1, a6);
+show warnings;
+show create table worklog5743;
-# Create a table with varchar column, and create index directly on this
-# large column (without prefix)
-create table worklog5743(a1 int, a2 varchar(3000))
-ROW_FORMAT=DYNAMIC, engine = innodb;
-
-# Create an index with large column without prefix
-create index idx on worklog5743(a1, a2);
-
-insert into worklog5743 values(9, repeat("a", 3000));
+insert into worklog5743 values(9,
+ repeat("a", 20000), repeat("a", 3073),
+ repeat("a", 3072), repeat("a", 3069),
+ repeat("a", 3068));
begin;
@@ -203,19 +362,19 @@ rollback;
drop table worklog5743;
+-- echo ### Test 6 ###
# Create a table with old format, and the limit is 768 bytes.
-- error ER_INDEX_COLUMN_TOO_LONG
-create table worklog5743(a TEXT not null, primary key (a(1000)))
-engine = innodb;
+create table worklog5743(a TEXT not null, primary key (a(1000)));
-create table worklog5743(a TEXT) engine = innodb;
+create table worklog5743(a TEXT);
# Excercise the column length check in ha_innobase::add_index()
-- error ER_INDEX_COLUMN_TOO_LONG
-create index idx on worklog5743(a(1000));
+create index idx on worklog5743(a(768));
# This should be successful
-create index idx on worklog5743(a(725));
+create index idx on worklog5743(a(767));
# Perform some DMLs
insert into worklog5743 values(repeat("a", 20000));
@@ -240,29 +399,40 @@ rollback;
drop table worklog5743;
-# Some border line test on the column length.
+-- echo ### Test 7 ###
+# Some border line tests on the column length.
# We have a limit of 3072 bytes for Barracuda table
-create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC, engine = innodb;
-
-# Length exceeds maximum supported key length, will auto-truncated to 3072
-create index idx on worklog5743(a(3073));
+create table worklog5743(a TEXT not null) ROW_FORMAT=DYNAMIC;
+# Length exceeds maximum supported key length
+# It will be auto-truncated to 3072
+create index idx1 on worklog5743(a(3073));
create index idx2 on worklog5743(a(3072));
-
show create table worklog5743;
-
drop table worklog5743;
-# We have a limit of 767 bytes for Antelope table
-create table worklog5743(a TEXT not null) engine = innodb;
-
+# We have a limit of 767 bytes for Antelope tables
+create table worklog5743(a TEXT not null) ROW_FORMAT=REDUNDANT;
-- error ER_INDEX_COLUMN_TOO_LONG
create index idx on worklog5743(a(768));
-
create index idx2 on worklog5743(a(767));
+drop table worklog5743;
+create table worklog5743(a TEXT not null) ROW_FORMAT=COMPACT;
+-- error ER_INDEX_COLUMN_TOO_LONG
+create index idx on worklog5743(a(768));
+create index idx2 on worklog5743(a(767));
drop table worklog5743;
+
eval SET GLOBAL innodb_file_format=$innodb_file_format_orig;
eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
eval SET GLOBAL innodb_large_prefix=$innodb_large_prefix_orig;
+--connection con1
+--disconnect con1
+--source include/wait_until_disconnected.inc
+--connection con2
+--disconnect con2
+--source include/wait_until_disconnected.inc
+--connection default
+
diff --git a/mysql-test/suite/innodb/t/innodb_information_schema_buffer.test b/mysql-test/suite/innodb/t/innodb_information_schema_buffer.test
index 751a2bd6b5e..6858b898649 100644
--- a/mysql-test/suite/innodb/t/innodb_information_schema_buffer.test
+++ b/mysql-test/suite/innodb/t/innodb_information_schema_buffer.test
@@ -25,7 +25,7 @@ INSERT INTO infoschema_buffer_test VALUES(9);
# right away
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test"
+WHERE TABLE_NAME like "%infoschema_buffer_test%"
and PAGE_STATE="file_page" and PAGE_TYPE="index";
# The NUMBER_RECORDS and DATA_SIZE should check with each insertion
@@ -33,14 +33,14 @@ INSERT INTO infoschema_buffer_test VALUES(19);
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test"
+WHERE TABLE_NAME like "%infoschema_buffer_test%"
and PAGE_STATE="file_page" and PAGE_TYPE="index";
CREATE INDEX idx ON infoschema_buffer_test(col1);
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test"
+WHERE TABLE_NAME like "%infoschema_buffer_test%"
and PAGE_STATE="file_page" and INDEX_NAME = "idx" and PAGE_TYPE="index";
@@ -49,7 +49,7 @@ DROP TABLE infoschema_buffer_test;
SELECT TABLE_NAME, INDEX_NAME, NUMBER_RECORDS, DATA_SIZE, PAGE_STATE, PAGE_TYPE
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_buffer_test";
+WHERE TABLE_NAME like "%infoschema_buffer_test%";
# Do one more test
#--replace_regex /'*[0-9]*'/'NUM'/
@@ -64,13 +64,9 @@ ENGINE=INNODB;
SELECT count(*)
FROM INFORMATION_SCHEMA.INNODB_BUFFER_PAGE
-WHERE TABLE_NAME like "%infoschema_child" and PAGE_STATE="file_page"
+WHERE TABLE_NAME like "%infoschema_child%" and PAGE_STATE="file_page"
and PAGE_TYPE="index";
DROP TABLE infoschema_child;
DROP TABLE infoschema_parent;
-show create table information_schema.innodb_buffer_page;
-show create table information_schema.innodb_buffer_page_lru;
-show create table information_schema.innodb_buffer_pool_stats;
-
diff --git a/mysql-test/suite/innodb/t/innodb_mysql.test b/mysql-test/suite/innodb/t/innodb_mysql.test
index 3ae5be3aa30..44e2e8b2342 100644
--- a/mysql-test/suite/innodb/t/innodb_mysql.test
+++ b/mysql-test/suite/innodb/t/innodb_mysql.test
@@ -5,11 +5,12 @@
# main testing code t/innodb_mysql.test -> include/mix1.inc
#
-# Slow test, don't run during staging part
--- source include/not_staging.inc
--- source include/have_innodb.inc
--- source include/have_query_cache.inc
+#Want to skip this test from daily Valgrind execution.
+--source include/no_valgrind_without_big.inc
+# Adding big test option for this test.
+--source include/big_test.inc
+-- source include/have_innodb.inc
let $engine_type= InnoDB;
let $other_engine_type= MEMORY;
# InnoDB does support FOREIGN KEYFOREIGN KEYs
@@ -21,21 +22,21 @@ set session innodb_support_xa=default;
--disable_warnings
drop table if exists t1, t2, t3;
--enable_warnings
-#
-# BUG#35850: Performance regression in 5.1.23/5.1.24
-#
+--echo #
+--echo # BUG#35850: Performance regression in 5.1.23/5.1.24
+--echo #
create table t1(a int);
insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
create table t2 (a int, b int, pk int, key(a,b), primary key(pk)) engine=innodb;
insert into t2 select @a:=A.a+10*(B.a + 10*C.a),@a, @a from t1 A, t1 B, t1 C;
---echo this must use key 'a', not PRIMARY:
+--echo # this must use key 'a', not PRIMARY:
--replace_column 9 #
explain select a from t2 where a=b;
drop table t1, t2;
-#
-# Bug #40360: Binlog related errors with binlog off
-#
+--echo #
+--echo # Bug #40360: Binlog related errors with binlog off
+--echo #
# This bug is triggered when the binlog format is STATEMENT and the
# binary log is turned off. In this case, no error should be shown for
# the statement since there are no replication issues.
@@ -47,9 +48,9 @@ CREATE TABLE t1 ( a INT ) ENGINE=InnoDB;
INSERT INTO t1 VALUES(1);
DROP TABLE t1;
-#
-# Bug#37284 Crash in Field_string::type()
-#
+--echo #
+--echo # Bug#37284 Crash in Field_string::type()
+--echo #
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
@@ -58,10 +59,10 @@ CREATE INDEX i1 on t1 (a(3));
SELECT * FROM t1 WHERE a = 'abcde';
DROP TABLE t1;
-#
-# Bug #37742: HA_EXTRA_KEYREAD flag is set when key contains only prefix of
-# requested column
-#
+--echo #
+--echo # Bug #37742: HA_EXTRA_KEYREAD flag is set when key contains only prefix of
+--echo # requested column
+--echo #
CREATE TABLE foo (a int, b int, c char(10),
PRIMARY KEY (c(3)),
@@ -85,6 +86,12 @@ INSERT INTO foo VALUES
INSERT INTO bar SELECT * FROM foo;
INSERT INTO foo2 SELECT * FROM foo;
+-- disable_result_log
+ANALYZE TABLE bar;
+ANALYZE TABLE foo;
+ANALYZE TABLE foo2;
+-- enable_result_log
+
--query_vertical EXPLAIN SELECT c FROM bar WHERE b>2;
--query_vertical EXPLAIN SELECT c FROM foo WHERE b>2;
--query_vertical EXPLAIN SELECT c FROM foo2 WHERE b>2;
@@ -96,9 +103,9 @@ INSERT INTO foo2 SELECT * FROM foo;
DROP TABLE foo, bar, foo2;
-#
-# Bug#41348: INSERT INTO tbl SELECT * FROM temp_tbl overwrites locking type of temp table
-#
+--echo #
+--echo # Bug#41348: INSERT INTO tbl SELECT * FROM temp_tbl overwrites locking type of temp table
+--echo #
--disable_warnings
DROP TABLE IF EXISTS t1,t3,t2;
@@ -137,9 +144,9 @@ DEALLOCATE PREPARE stmt3;
DROP TABLE t1,t3,t2;
DROP FUNCTION f1;
-#
-# Bug#37016: TRUNCATE TABLE removes some rows but not all
-#
+--echo #
+--echo # Bug#37016: TRUNCATE TABLE removes some rows but not all
+--echo #
--disable_warnings
DROP TABLE IF EXISTS t1,t2;
@@ -337,9 +344,9 @@ DROP TABLE t1;
DROP TABLE t2;
DROP TABLE t3;
-#
-# Bug#43580: Issue with Innodb on multi-table update
-#
+--echo #
+--echo # Bug#43580: Issue with Innodb on multi-table update
+--echo #
CREATE TABLE t1 (a INT, b INT, KEY (a)) ENGINE = INNODB;
CREATE TABLE t2 (a INT KEY, b INT, KEY (b)) ENGINE = INNODB;
@@ -460,8 +467,14 @@ INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
INSERT INTO t1 (b,c,d,e) SELECT RAND()*10000, RAND()*10000, d, e FROM t1;
+-- disable_result_log
+ANALYZE TABLE t1;
+-- enable_result_log
EXPLAIN SELECT * FROM t1 WHERE b=1 AND c=1 ORDER BY a;
EXPLAIN SELECT * FROM t1 FORCE INDEX(i2) WHERE b=1 and c=1 ORDER BY a;
+# With 4k pages, the 'rows' column in the output below is either 120 or 138,
+# not 128 as it is with 8k and 16k. Bug#12602606
+--replace_result 128 {checked} 120 {checked} 138 {checked}
EXPLAIN SELECT * FROM t1 FORCE INDEX(PRIMARY) WHERE b=1 AND c=1 ORDER BY a;
DROP TABLE t1;
@@ -505,11 +518,7 @@ INSERT INTO t2 VALUES (),();
CREATE OR REPLACE VIEW v1 AS SELECT 1 FROM t2
WHERE b =(SELECT a FROM t1 LIMIT 1);
---disable_query_log
---disable_result_log
CONNECT (con1, localhost, root,,);
---enable_query_log
---enable_result_log
CONNECTION default;
DELIMITER |;
@@ -546,7 +555,7 @@ DROP TABLE t1,t2;
--echo # Bug #49324: more valgrind errors in test_if_skip_sort_order
--echo #
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=innodb ;
---echo #should not cause valgrind warnings
+--echo # should not cause valgrind warnings
SELECT 1 FROM t1 JOIN t1 a USING(a) GROUP BY t1.a,t1.a;
DROP TABLE t1;
@@ -558,6 +567,10 @@ create table t1(f1 int not null primary key, f2 int) engine=innodb;
create table t2(f1 int not null, key (f1)) engine=innodb;
insert into t1 values (1,1),(2,2),(3,3);
insert into t2 values (1),(2),(3);
+-- disable_result_log
+analyze table t1;
+analyze table t2;
+-- enable_result_log
explain select t1.* from t1 left join t2 using(f1) group by t1.f1;
drop table t1,t2;
--echo #
@@ -576,6 +589,8 @@ INSERT INTO t1 VALUES (1,1,1,1,1,1), (2,2,2,2,2,2), (3,3,3,3,3,3),
(7,7,7,7,7,7), (8,8,8,8,8,8), (9,9,9,9,9,9),
(11,11,11,11,11,11);
+ANALYZE TABLE t1;
+
--query_vertical EXPLAIN SELECT COUNT(*) FROM t1
DROP TABLE t1;
@@ -594,36 +609,6 @@ ALTER TABLE t1 DROP INDEX k, ADD UNIQUE INDEX k (a,b);
DROP TABLE t1;
---echo #
---echo # Bug #53334: wrong result for outer join with impossible ON condition
---echo # (see the same test case for MyISAM in join.test)
---echo #
-
-create table t1 (id int primary key);
-create table t2 (id int);
-
-insert into t1 values (75);
-insert into t1 values (79);
-insert into t1 values (78);
-insert into t1 values (77);
-replace into t1 values (76);
-replace into t1 values (76);
-insert into t1 values (104);
-insert into t1 values (103);
-insert into t1 values (102);
-insert into t1 values (101);
-insert into t1 values (105);
-insert into t1 values (106);
-insert into t1 values (107);
-
-insert into t2 values (107),(75),(1000);
-
-select t1.id,t2.id from t2 left join t1 on t1.id>=74 and t1.id<=0
- where t2.id=75 and t1.id is null;
-explain select t1.id,t2.id from t2 left join t1 on t1.id>=74 and t1.id<=0
- where t2.id=75 and t1.id is null;
-
-drop table t1,t2;
--echo #
--echo # Bug #47453: InnoDB incorrectly changes TIMESTAMP columns when
@@ -635,7 +620,7 @@ CREATE TABLE t2 (a INT, b INT,
c TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
ON UPDATE CURRENT_TIMESTAMP) ENGINE=InnoDB;
---echo set up our data elements
+--echo # set up our data elements
INSERT INTO t1 (d) VALUES (1);
INSERT INTO t2 (a,b) VALUES (1,1);
SELECT SECOND(c) INTO @bug47453 FROM t2;
@@ -648,7 +633,7 @@ SELECT SLEEP(1);
UPDATE t1 JOIN t2 ON d=a SET b=1 WHERE a=1;
---echo #should be 0
+--echo # should be 0
SELECT SECOND(c)-@bug47453 FROM t1 JOIN t2 ON d=a;
DROP TABLE t1, t2;
@@ -728,6 +713,10 @@ CREATE TABLE t1 (
LOAD DATA INFILE '../../std_data/intersect-bug50389.tsv' INTO TABLE t1;
+-- disable_result_log
+ANALYZE TABLE t1;
+-- enable_result_log
+
SELECT * FROM t1 WHERE f1 IN
(3305028,3353871,3772880,3346860,4228206,3336022,
3470988,3305175,3329875,3817277,3856380,3796193,
@@ -754,7 +743,7 @@ CREATE TABLE t1 (
PRIMARY KEY (f1),
UNIQUE KEY (f2, f3),
KEY (f4)
-) ENGINE=InnoDB;
+) ENGINE=InnoDB STATS_PERSISTENT=0;
INSERT INTO t1 VALUES
(1,1,991,1), (2,1,992,1), (3,1,993,1), (4,1,994,1), (5,1,995,1),
@@ -786,64 +775,11 @@ UNLOCK TABLES;
DROP TABLE t1;
--echo #
---echo # Bug#55826: create table .. select crashes with when KILL_BAD_DATA
---echo # is returned
---echo #
-CREATE TABLE t1(a INT) ENGINE=innodb;
-INSERT INTO t1 VALUES (0);
-SET SQL_MODE='STRICT_ALL_TABLES';
---error ER_TRUNCATED_WRONG_VALUE
-CREATE TABLE t2
- SELECT LEAST((SELECT '' FROM t1),NOW()) FROM `t1`;
-DROP TABLE t1;
-SET SQL_MODE=DEFAULT;
-
-
-
---echo #
---echo # Bug#55580: segfault in read_view_sees_trx_id
---echo #
-CREATE TABLE t1 (a INT) ENGINE=Innodb;
-CREATE TABLE t2 (a INT) ENGINE=Innodb;
-INSERT INTO t1 VALUES (1),(2);
-INSERT INTO t2 VALUES (1),(2);
-
-connect (con1,localhost,root,,test);
-connect (con2,localhost,root,,test);
-
-connection con1;
-START TRANSACTION;
-SELECT * FROM t2 LOCK IN SHARE MODE;
-
-connection con2;
-START TRANSACTION;
-SELECT * FROM t1 LOCK IN SHARE MODE;
-
-connection con1;
-let $conn_id= `SELECT CONNECTION_ID()`;
---send SELECT * FROM t1 FOR UPDATE
-
-connection con2;
-let $wait_timeout= 2;
-let $wait_condition= SELECT 1 FROM INFORMATION_SCHEMA.PROCESSLIST
-WHERE ID=$conn_id AND STATE='Sending data';
---source include/wait_condition.inc
---echo # should not crash
---error ER_LOCK_DEADLOCK
-SELECT * FROM t1 GROUP BY (SELECT a FROM t2 LIMIT 1 FOR UPDATE) + t1.a;
-
-connection default;
-disconnect con1;
-disconnect con2;
-
-DROP TABLE t1,t2;
-
---echo #
--echo # Bug#55656: mysqldump can be slower after bug #39653 fix
--echo #
CREATE TABLE t1 (a INT , b INT, c INT, d INT,
- KEY (b), PRIMARY KEY (a,b)) ENGINE=INNODB;
+ KEY (b), PRIMARY KEY (a,b)) ENGINE=INNODB STATS_PERSISTENT=0;
INSERT INTO t1 VALUES (1,1,1,1), (2,2,2,2), (3,3,3,3);
--query_vertical EXPLAIN SELECT COUNT(*) FROM t1
@@ -862,63 +798,6 @@ CREATE INDEX b ON t1(a,b,c,d);
DROP TABLE t1;
--echo #
---echo # Bug#56862 Execution of a query that uses index merge returns a wrong result
---echo #
-
-CREATE TABLE t1 (
- pk int NOT NULL AUTO_INCREMENT PRIMARY KEY,
- a int,
- b int,
- INDEX idx(a))
-ENGINE=INNODB;
-
-INSERT INTO t1(a,b) VALUES
- (11, 1100), (2, 200), (1, 100), (14, 1400), (5, 500),
- (3, 300), (17, 1700), (4, 400), (12, 1200), (8, 800),
- (6, 600), (18, 1800), (9, 900), (10, 1000), (7, 700),
- (13, 1300), (15, 1500), (19, 1900), (16, 1600), (20, 2000);
-INSERT INTO t1(a,b) SELECT a+20, b+2000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+40, b+4000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+80, b+8000 FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1 VALUES (1000000, 0, 0);
-
-SET SESSION sort_buffer_size = 1024*36;
-
-EXPLAIN
-SELECT COUNT(*) FROM
- (SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
- WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-
-SELECT COUNT(*) FROM
- (SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
- WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-
-SET SESSION sort_buffer_size = DEFAULT;
-
-DROP TABLE t1;
-
---echo #
---echo # ALTER TABLE IGNORE didn't ignore duplicates for unique add index
---echo #
-
-create table t1 (a int primary key, b int) engine = innodb;
-insert into t1 values (1,1),(2,1);
-alter ignore table t1 add unique `main` (b);
-select * from t1;
-drop table t1;
-
---echo End of 5.1 tests
---echo #
-
---echo #
--echo # Bug#55826: create table .. select crashes with when KILL_BAD_DATA
--echo # is returned
--echo #
@@ -933,49 +812,8 @@ DROP TABLE t1;
SET SQL_MODE=DEFAULT;
--echo #
---echo # Bug#56862 Execution of a query that uses index merge returns a wrong result
+--echo # Bug#56862 Moved to innodb_16k.test
--echo #
-
-CREATE TABLE t1 (
- pk int NOT NULL AUTO_INCREMENT PRIMARY KEY,
- a int,
- b int,
- INDEX idx(a))
-ENGINE=INNODB;
-
-INSERT INTO t1(a,b) VALUES
- (11, 1100), (2, 200), (1, 100), (14, 1400), (5, 500),
- (3, 300), (17, 1700), (4, 400), (12, 1200), (8, 800),
- (6, 600), (18, 1800), (9, 900), (10, 1000), (7, 700),
- (13, 1300), (15, 1500), (19, 1900), (16, 1600), (20, 2000);
-INSERT INTO t1(a,b) SELECT a+20, b+2000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+40, b+4000 FROM t1;
-INSERT INTO t1(a,b) SELECT a+80, b+8000 FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1(a,b) SELECT a,b FROM t1;
-INSERT INTO t1 VALUES (1000000, 0, 0);
-
-SET SESSION sort_buffer_size = 1024*36;
-
-EXPLAIN
-SELECT COUNT(*) FROM
- (SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
- WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-
-SELECT COUNT(*) FROM
- (SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
- WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
-
-SET SESSION sort_buffer_size = DEFAULT;
-
-DROP TABLE t1;
-
--echo #
--echo # Test for bug #39932 "create table fails if column for FK is in different
--echo # case than in corr index".
@@ -984,122 +822,15 @@ DROP TABLE t1;
drop tables if exists t1, t2;
--enable_warnings
create table t1 (pk int primary key) engine=InnoDB;
---echo # Even although the below statement uses uppercased field names in
---echo # foreign key definition it still should be able to find explicitly
---echo # created supporting index. So it should succeed and should not
---echo # create any additional supporting indexes.
+# Even although the below statement uses uppercased field names in
+# foreign key definition it still should be able to find explicitly
+# created supporting index. So it should succeed and should not
+# create any additional supporting indexes.
create table t2 (fk int, key x (fk),
constraint x foreign key (FK) references t1 (PK)) engine=InnoDB;
show create table t2;
drop table t2, t1;
---echo #
---echo # Bug #663818: wrong result when BNLH is used
---echo #
-
-CREATE TABLE t1(pk int NOT NULL PRIMARY KEY) ENGINE=InnoDB;
-INSERT INTO t1 VALUES
- (1), (2), (11), (12), (13), (14),
- (15), (16), (17), (18), (19);
-CREATE TABLE t2(pk int NOT NULL PRIMARY KEY) ENGINE=InnoDB;
-INSERT INTO t2 VALUES
- (1), (10), (11), (12), (13), (14),
- (15), (16), (17), (18), (19), (20), (21);
-
-SET SESSION join_buffer_size=10000;
-
-SET SESSION join_cache_level=3;
-EXPLAIN
-SELECT t1.pk FROM t1,t2
- WHERE t1.pk = t2.pk AND t2.pk <> 8;
-SELECT t1.pk FROM t1,t2
- WHERE t1.pk = t2.pk AND t2.pk <> 8;
-
-SET SESSION join_cache_level=1;
-EXPLAIN
-SELECT t1.pk FROM t1,t2
- WHERE t1.pk = t2.pk AND t2.pk <> 8;
-SELECT t1.pk FROM t1,t2
- WHERE t1.pk = t2.pk AND t2.pk <> 8;
-
-DROP TABLE t1,t2;
-
-SET SESSION join_cache_level=DEFAULT;
-SET SESSION join_buffer_size=DEFAULT;
-
---echo #
---echo # Bug#668644: HAVING + ORDER BY
---echo #
-
-CREATE TABLE t1 (
- pk int NOT NULL PRIMARY KEY, i int DEFAULT NULL,
- INDEX idx (i)
-) ENGINE=INNODB;
-INSERT INTO t1 VALUES
- (6,-1636630528),(2,-1097924608),(1,6),(3,6),(4,1148715008),(5,1541734400);
-
-CREATE TABLE t2 (
- i int DEFAULT NULL,
- pk int NOT NULL PRIMARY KEY,
- INDEX idx (i)
-) ENGINE= INNODB;
-INSERT INTO t2 VALUES
- (-1993998336,20),(-1036582912,1),(-733413376,5),(-538247168,16),
- (-514260992,4),(-249561088,9),(1,2),(1,6),(2,10),(2,19),(4,17),
- (5,14),(5,15),(6,8),(7,13),(8,18),(9,11),(9,12),(257425408,7),
- (576061440,3);
-
-EXPLAIN
-SELECT t1 .i AS f FROM t1, t2
- WHERE t2.i = t1.pk AND t1.pk BETWEEN 0 AND 224
- HAVING f > 7
- ORDER BY f;
-SELECT t1 .i AS f FROM t1, t2
- WHERE t2.i = t1.pk AND t1.pk BETWEEN 0 AND 224
- HAVING f > 7
- ORDER BY f;
-
-DROP TABLE t1, t2;
-
---echo #
---echo # Test for bug #56619 - Assertion failed during
---echo # ALTER TABLE RENAME, DISABLE KEYS
---echo #
-
---disable_warnings
-DROP TABLE IF EXISTS t1, t2;
---enable_warnings
-CREATE TABLE t1 (a INT, INDEX(a)) engine=innodb;
---disable_warnings
-ALTER TABLE t1 RENAME TO t2, DISABLE KEYS;
-DROP TABLE IF EXISTS t1, t2;
---enable_warnings
-
---echo #
---echo # Bug#702322: HAVING with two ANDed predicates + ORDER BY
---echo #
-
-CREATE TABLE t1 (pk int PRIMARY KEY, a int, KEY (a)) ENGINE=InnoDB;
-CREATE TABLE t2 (a int, KEY (a)) ENGINE=InnoDB;
-
-INSERT INTO t1 VALUES
- (18,0),(9,10),(8,11),(2,15),(7,19),(1,20);
-
-SET SESSION join_cache_level = 0;
-
-# vanilla InnoDB doesn't do ICP
---replace_result "Using where" "Using index condition"
-EXPLAIN
-SELECT t1.a FROM t1 LEFT JOIN t2 ON t1.pk = t2.a
- WHERE t1.pk >= 6 HAVING t1.a<> 0 AND t1.a <> 11
- ORDER BY t1.a;
-SELECT t1.a FROM t1 LEFT JOIN t2 ON t1.pk = t2.a
- WHERE t1.pk >= 6 HAVING t1.a<> 0 AND t1.a <> 11
- ORDER BY t1.a;
-
-DROP TABLE t1,t2;
-
---echo End of 5.3 tests
--echo #
--echo # Test for bug #11762012 - "54553: INNODB ASSERTS IN HA_INNOBASE::
@@ -1120,7 +851,41 @@ UPDATE t1 SET c = 5;
UNLOCK TABLES;
DROP TEMPORARY TABLE t1;
---echo End of 5.1 tests
+--echo # End of 5.1 tests
+
+
+--echo #
+--echo # Bug#49604 "6.0 processing compound WHERE clause incorrectly
+--echo # with Innodb - extra rows"
+--echo #
+
+CREATE TABLE t1 (
+ c1 INT NOT NULL,
+ c2 INT,
+ PRIMARY KEY (c1),
+ KEY k1 (c2)
+) ENGINE=InnoDB;
+
+INSERT INTO t1 VALUES (12,1);
+INSERT INTO t1 VALUES (15,1);
+INSERT INTO t1 VALUES (16,1);
+INSERT INTO t1 VALUES (22,1);
+INSERT INTO t1 VALUES (20,2);
+
+CREATE TABLE t2 (
+ c1 INT NOT NULL,
+ c2 INT,
+ PRIMARY KEY (c1)
+) ENGINE=InnoDB;
+
+INSERT INTO t2 VALUES (1,2);
+INSERT INTO t2 VALUES (2,9);
+
+SELECT STRAIGHT_JOIN t2.c2, t1.c2, t2.c1
+FROM t1 JOIN t2 ON t1.c2 = t2.c1
+WHERE t2.c1 IN (2, 1, 6) OR t2.c1 NOT IN (1);
+
+DROP TABLE t1, t2;
--echo #
@@ -1160,12 +925,64 @@ connection default;
COMMIT;
DROP TABLE t1;
DROP FUNCTION f1;
+--echo #
+--echo # Bug#42744: Crash when using a join buffer to join a table with a blob
+--echo # column and an additional column used for duplicate elimination.
+--echo #
+
+CREATE TABLE t1 (a tinyblob) ENGINE=InnoDB;
+CREATE TABLE t2 (a int PRIMARY KEY, b tinyblob) ENGINE=InnoDB;
+INSERT INTO t1 VALUES ('1'), (NULL);
+INSERT INTO t2 VALUES (1, '1');
+
+EXPLAIN
+SELECT t2.b FROM t1,t2 WHERE t1.a IN (SELECT 1 FROM t2);
+
+SELECT t2.b FROM t1,t2 WHERE t1.a IN (SELECT 1 FROM t2);
+
+DROP TABLE t1,t2;
+
+--echo #
+--echo # Bug#48093: 6.0 Server not processing equivalent IN clauses properly
+--echo # with Innodb tables
+--echo #
+
+CREATE TABLE t1 (
+ i int(11) DEFAULT NULL,
+ v1 varchar(1) DEFAULT NULL,
+ v2 varchar(20) DEFAULT NULL,
+ KEY i (i),
+ KEY v (v1,i)
+) ENGINE=innodb;
+
+INSERT INTO t1 VALUES (1,'f','no');
+INSERT INTO t1 VALUES (2,'u','yes-u');
+INSERT INTO t1 VALUES (2,'h','yes-h');
+INSERT INTO t1 VALUES (3,'d','no');
+
+--echo
+SELECT v2
+FROM t1
+WHERE v1 IN ('f', 'd', 'h', 'u' ) AND i = 2;
+
+--echo
+--echo # Should not use index_merge
+EXPLAIN
+SELECT v2
+FROM t1
+WHERE v1 IN ('f', 'd', 'h', 'u' ) AND i = 2;
+
+DROP TABLE t1;
--echo #
--echo # Bug#54606 innodb fast alter table + pack_keys=0
--echo # prevents adding new indexes
--echo #
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
CREATE TABLE t1 (a INT, b CHAR(9), c INT, key(b))
ENGINE=InnoDB
PACK_KEYS=0;
@@ -1174,4 +991,38 @@ CREATE INDEX c on t1 (c);
DROP TABLE t1;
---echo End of 5.1 tests
+
+--echo #
+--echo # Additional coverage for refactoring which is made as part
+--echo # of fix for Bug#27480 "Extend CREATE TEMPORARY TABLES privilege
+--echo # to allow temp table operations".
+--echo #
+--echo # Check that OPTIMIZE table works for temporary InnoDB tables.
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+CREATE TEMPORARY TABLE t1 (a INT) ENGINE=InnoDB;
+OPTIMIZE TABLE t1;
+DROP TABLE t1;
+
+
+--echo #
+--echo # Bug#11762345 54927: DROPPING AND ADDING AN INDEX IN ONE
+--echo # COMMAND CAN FAIL IN INNODB PLUGIN 1.0
+--echo #
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1 (id int, a int, b int, PRIMARY KEY (id),
+ INDEX a (a)) ENGINE=innodb;
+
+ALTER TABLE t1 DROP INDEX a, ADD INDEX a (b, a);
+# This used to fail
+ALTER TABLE t1 DROP INDEX a, ADD INDEX (a, b);
+
+DROP TABLE t1;
+
+
+--echo End of 6.0 tests
diff --git a/mysql-test/suite/innodb/t/innodb_prefix_index_liftedlimit.test b/mysql-test/suite/innodb/t/innodb_prefix_index_liftedlimit.test
index d7540dff36d..77f55002da5 100644
--- a/mysql-test/suite/innodb/t/innodb_prefix_index_liftedlimit.test
+++ b/mysql-test/suite/innodb/t/innodb_prefix_index_liftedlimit.test
@@ -1,4 +1,3 @@
---source include/have_innodb.inc
######## suite/innodb/t/innodb_prefix_index_liftedlimit.test ##########
# #
# Testcase for worklog WL#5743: Lift the limit of index key prefixes #
@@ -14,11 +13,15 @@
# #
######################################################################
+--source include/have_innodb.inc
+--source include/have_innodb_16k.inc
# Save innodb variables
-let $innodb_file_format_orig=`select @@innodb_file_format`;
-let $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
-let $innodb_large_prefix_orig=`select @@innodb_large_prefix`;
+--disable_query_log
+let $innodb_file_format_orig = `select @@innodb_file_format`;
+let $innodb_file_per_table_orig = `select @@innodb_file_per_table`;
+let $innodb_large_prefix_orig = `select @@innodb_large_prefix`;
+--enable_query_log
# Set Innodb file format as feature works for Barracuda file format
set global innodb_file_format="Barracuda";
@@ -49,6 +52,11 @@ WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000);
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000);
SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743;
+--error ER_INDEX_COLUMN_TOO_LONG
+ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT;
+--error ER_INDEX_COLUMN_TOO_LONG
+ALTER TABLE worklog5743 ROW_FORMAT=COMPACT;
+ALTER TABLE worklog5743 ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=16;
DROP TABLE worklog5743;
@@ -482,10 +490,10 @@ DROP TABLE worklog5743_key8;
#------------------------------------------------------------------------------
# Create mutiple prefix index. We can not create prefix index length > 16K
# as index is written in undo log page which of 16K size.
-# So we can create max 5 prefix index of length 3072 on table
+# So we can create max 2 prefix index of length 3072 on table
CREATE TABLE worklog5743 (
col_1_varbinary VARBINARY (4000) , col_2_varchar VARCHAR (4000) ,
-col_3_text TEXT (4000), col_4_blob BLOB (4000),col_5_text TEXT (4000),
+col_3_text TEXT (4000), col_4_blob BLOB (4000), col_5_text TEXT (4000),
col_6_varchar VARCHAR (4000), col_7_binary BINARY (255)
) ROW_FORMAT=DYNAMIC, engine = innodb;
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000),
@@ -493,13 +501,10 @@ REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
REPEAT("a", 4000) , REPEAT("a", 255)
);
-# Update hangs if we create following 5 indexes. Uncomment them once its fix
+# Update reports ER_UNDO_RECORD_TOO_BIG if we create more than 2 indexes.
# Bug#12547647 - UPDATE LOGGING COULD EXCEED LOG PAGE SIZE
-#CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072));
-#CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072));
-#CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072));
-#CREATE INDEX prefix_idx4 ON worklog5743(col_4_blob (3072));
-#CREATE INDEX prefix_idx5 ON worklog5743(col_5_text (3072));
+CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072));
+CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072));
INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000),
REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
@@ -511,14 +516,29 @@ UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000)
WHERE col_1_varbinary = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000);
SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743
WHERE col_1_varbinary = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000);
+
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000),
REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
REPEAT("a", 4000) , REPEAT("a", 255)
);
DELETE FROM worklog5743 WHERE col_1_varbinary = REPEAT("b", 4000);
SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743;
-DROP TABLE worklog5743;
+# Add 3 more indexes.
+# Update used to hang but now ER_UNDO_RECORD_TOO_BIG is reported;
+# Bug#12547647 - UPDATE LOGGING COULD EXCEED UNDO LOG PAGE SIZE
+INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000),
+REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
+REPEAT("a", 4000) , REPEAT("a", 255)
+);
+CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072));
+CREATE INDEX prefix_idx4 ON worklog5743(col_4_blob (3072));
+CREATE INDEX prefix_idx5 ON worklog5743(col_5_text (3072));
+--error ER_UNDO_RECORD_TOO_BIG
+UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000)
+WHERE col_1_varbinary = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000);
+SHOW WARNINGS;
+DROP TABLE worklog5743;
#------------------------------------------------------------------------------
# Create mutiple prefix index. We can not create prefix index length > 16K as
@@ -532,7 +552,8 @@ col_6_varchar VARCHAR (4000), col_7_binary BINARY (255)
) ROW_FORMAT=DYNAMIC, engine = innodb;
-# Update hangs if we create following 5 indexes. Uncomment them once its fix
+# Update used to hang if we create following 5 indexes. Fixed in;
+# Bug#12547647 - UPDATE LOGGING COULD EXCEED UNDO LOG PAGE SIZE
CREATE INDEX prefix_idx1 ON worklog5743(col_1_varbinary (3072));
CREATE INDEX prefix_idx2 ON worklog5743(col_2_varchar (3072));
CREATE INDEX prefix_idx3 ON worklog5743(col_3_text (3072));
@@ -562,11 +583,13 @@ REPEAT("a", 4000) , REPEAT("o", 4000), REPEAT("a", 4000),
REPEAT("a", 4000) , REPEAT("a", 255)
);
ROLLBACK;
-# Uncomment Update fater Bug#12547647 is fixed - UPDATE LOGGING COULD EXCEED LOG PAGE SIZE
# Bug#12547647 - UPDATE LOGGING COULD EXCEED LOG PAGE SIZE
-#UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000)
-#WHERE col_1_varbinary = REPEAT("a", 4000)
-#AND col_2_varchar = REPEAT("o", 4000);
+# Instead of this error, it would hang before this fix.
+--error ER_UNDO_RECORD_TOO_BIG
+UPDATE worklog5743 SET col_1_varbinary = REPEAT("c", 4000)
+WHERE col_1_varbinary = REPEAT("a", 4000)
+AND col_2_varchar = REPEAT("o", 4000);
+SHOW WARNINGS;
SELECT col_1_varbinary = REPEAT("c", 4000) FROM worklog5743
WHERE col_1_varbinary = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000);
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000),
@@ -599,14 +622,28 @@ SELECT col_1_text = REPEAT("c", 4000) FROM worklog5743;
DROP TABLE worklog5743;
# Prefix index with utf8 charset + varchar.
-# For varchar we also log the column itself as oppose of TEXT so it error
-# with limit 1024 due to overhead.
+# utf8 charcter takes 3 bytes in mysql so prefix index limit is 3072/3 = 1024
+# This is a case where dict_index_too_big_for_undo() is too conservative.
+# If it did not return error 1118, to commented code would work.
+# See bug#12953735.
--replace_regex /> [0-9]*/> max_row_size/
--- error 1118
+-- error ER_TOO_BIG_ROWSIZE
CREATE TABLE worklog5743 (col_1_varchar VARCHAR (4000) CHARACTER SET 'utf8',
col_2_varchar VARCHAR (4000) CHARACTER SET 'utf8' ,
PRIMARY KEY (col_1_varchar(1024))
) ROW_FORMAT=DYNAMIC, engine = innodb;
+#INSERT INTO worklog5743 VALUES(REPEAT("a", 4000) , REPEAT("o", 4000));
+#CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (1024));
+#INSERT INTO worklog5743 VALUES(REPEAT("b", 4000) , REPEAT("p", 4000));
+#SELECT col_1_varchar = REPEAT("a", 4000) , col_2_varchar = REPEAT("o", 4000) FROM worklog5743;
+#UPDATE worklog5743 SET col_1_varchar = REPEAT("c", 4000)
+#WHERE col_1_varchar = REPEAT("a", 4000) AND col_2_varchar = REPEAT("o", 4000);
+#SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743
+#WHERE col_1_varchar = REPEAT("c", 4000) AND col_2_varchar = REPEAT("o", 4000);
+#INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
+#DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000);
+#SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743;
+#DROP TABLE worklog5743;
#------------------------------------------------------------------------------
# prefinx index on utf8 charset with transaction
@@ -734,7 +771,7 @@ COMMIT;
# Wait for commit
let $wait_condition=SELECT COUNT(*)=0 FROM information_schema.processlist
WHERE info='COMMIT';
---source include/wait_condition.inc
+--source include/wait_condition.inc
--echo "In connection 1"
--connection con1
@@ -843,8 +880,12 @@ worklog5743;
SELECT COUNT(*) FROM worklog5743;
COMMIT;
+--echo "Disconnect the connections 1 and 2"
--disconnect con1
+--source include/wait_until_disconnected.inc
+--connection con2
--disconnect con2
+--source include/wait_until_disconnected.inc
--connection default
DROP TABLE worklog5743;
@@ -989,7 +1030,7 @@ AND col_2_varchar = REPEAT("o", 4000);
ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar(3072));
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
---error 1062
+--error ER_DUP_ENTRY
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000);
SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743;
@@ -1023,7 +1064,7 @@ ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_varchar(3072));
CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (3072));
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
---error 1062
+--error ER_DUP_ENTRY
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
DELETE FROM worklog5743 WHERE col_1_varchar = REPEAT("b", 4000);
SELECT col_1_varchar = REPEAT("c", 4000) FROM worklog5743;
@@ -1243,8 +1284,8 @@ AND col_2_text = REPEAT("o", 4000);
ALTER TABLE worklog5743 DROP PRIMARY KEY;
# Again add index length > 948. Expect error 'to big row ' due to exceed
-# in key length.
--- error 139
+# in key length.
+-- error ER_TOO_BIG_ROWSIZE
ALTER TABLE worklog5743 ADD PRIMARY KEY (col_1_text (950));
INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000));
SELECT col_1_text = REPEAT("a", 4000) FROM worklog5743;
@@ -1306,7 +1347,7 @@ VALUES(concat(REPEAT("a", 2000),REPEAT("b", 1000),REPEAT("c", 1000)),
REPEAT("o", 4000));
INSERT INTO worklog5743
VALUES(concat(REPEAT("a", 2000),REPEAT("b", 2000)), REPEAT("o", 4000));
---error 1062
+--error ER_DUP_ENTRY
ALTER TABLE worklog5743 ADD PRIMARY KEY `pk_idx` (col_1_varchar(3000));
DROP TABLE worklog5743;
@@ -1328,11 +1369,24 @@ CREATE TABLE worklog5743 (
col_1_varchar VARCHAR (4000) , col_2_varchar VARCHAR (4000) ,
PRIMARY KEY (col_1_varchar(767))
) engine = innodb;
-# Prefix index > 767 is truncated with REDUNDANT and COMPACT
+INSERT INTO worklog5743 VALUES(REPEAT('a',4000),REPEAT('b',4000));
+# Prefix index > 767 is truncated with REDUNDANT and COMPACT
+--enable_info
CREATE INDEX prefix_idx ON worklog5743(col_1_varchar (1000));
+ALTER TABLE worklog5743 ROW_FORMAT=REDUNDANT;
+--disable_info
+SHOW CREATE TABLE worklog5743;
DROP TABLE worklog5743;
#------------------------------------------------------------------------------
-eval SET GLOBAL innodb_file_format=$innodb_file_format_orig;
-eval SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
-eval SET GLOBAL innodb_large_prefix=$innodb_large_prefix_orig;
+--disable_query_log
+eval set global innodb_file_format = $innodb_file_format_orig;
+eval set global innodb_file_per_table = $innodb_file_per_table_orig;
+eval set global innodb_large_prefix = $innodb_large_prefix_orig;
+--echo "Disconnect the connection 1"
+--connection con1
+--disconnect con1
+--source include/wait_until_disconnected.inc
+--enable_query_log
+--connection default
+
diff --git a/mysql-test/suite/maria/lock.result b/mysql-test/suite/maria/lock.result
index b67d1ab7b0d..52e83eb111d 100644
--- a/mysql-test/suite/maria/lock.result
+++ b/mysql-test/suite/maria/lock.result
@@ -1,7 +1,7 @@
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t1 (i INT) ENGINE=Aria;
CREATE TABLE t2 (i INT) ENGINE=Aria;
LOCK TABLE t1 WRITE, t2 WRITE;
diff --git a/mysql-test/suite/maria/maria-gis-rtree-dynamic.result b/mysql-test/suite/maria/maria-gis-rtree-dynamic.result
index ee7135c4851..f8487258546 100644
--- a/mysql-test/suite/maria/maria-gis-rtree-dynamic.result
+++ b/mysql-test/suite/maria/maria-gis-rtree-dynamic.result
@@ -713,7 +713,7 @@ count(*)
DROP TABLE t2;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (a geometry NOT NULL, SPATIAL (a)) row_format=dynamic;
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
diff --git a/mysql-test/suite/maria/maria-gis-rtree-trans.result b/mysql-test/suite/maria/maria-gis-rtree-trans.result
index dba56204172..a8ea6f1d112 100644
--- a/mysql-test/suite/maria/maria-gis-rtree-trans.result
+++ b/mysql-test/suite/maria/maria-gis-rtree-trans.result
@@ -713,7 +713,7 @@ count(*)
DROP TABLE t2;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (a geometry NOT NULL, SPATIAL (a)) transactional=1 row_format=page;
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
diff --git a/mysql-test/suite/maria/maria-gis-rtree.result b/mysql-test/suite/maria/maria-gis-rtree.result
index 1a7b08169dc..c7c65fac003 100644
--- a/mysql-test/suite/maria/maria-gis-rtree.result
+++ b/mysql-test/suite/maria/maria-gis-rtree.result
@@ -713,7 +713,7 @@ count(*)
DROP TABLE t2;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (a geometry NOT NULL, SPATIAL (a)) transactional=0 row_format=page;
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
INSERT INTO t1 VALUES (GeomFromText("LINESTRING(100 100, 200 200, 300 300)"));
diff --git a/mysql-test/suite/maria/maria-page-checksum.result b/mysql-test/suite/maria/maria-page-checksum.result
index c4d1b71e33a..fbb7ba78b25 100644
--- a/mysql-test/suite/maria/maria-page-checksum.result
+++ b/mysql-test/suite/maria/maria-page-checksum.result
@@ -2,7 +2,7 @@ drop table if exists t1;
select @@global.aria_page_checksum;
@@global.aria_page_checksum
1
-# iteration 1
+# iteration 1a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -20,7 +20,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 2
+# iteration 2a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -38,7 +38,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 3
+# iteration 3a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -56,7 +56,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 4
+# iteration 4a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -74,7 +74,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 5
+# iteration 5a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -92,7 +92,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 6
+# iteration 6a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -110,7 +110,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 7
+# iteration 7a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -128,7 +128,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 8
+# iteration 8a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -146,7 +146,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 9
+# iteration 9a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -164,7 +164,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 10
+# iteration 10a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -182,7 +182,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 11
+# iteration 11a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -200,7 +200,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 12
+# iteration 12a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -218,7 +218,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 13
+# iteration 13a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -236,7 +236,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 14
+# iteration 14a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -254,7 +254,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 15
+# iteration 15a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -272,7 +272,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 16
+# iteration 16a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -290,7 +290,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 17
+# iteration 17a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -308,7 +308,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 18
+# iteration 18a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -326,7 +326,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 19
+# iteration 19a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -344,7 +344,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 20
+# iteration 20a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -362,7 +362,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 21
+# iteration 21a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -380,7 +380,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 22
+# iteration 22a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -398,7 +398,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 23
+# iteration 23a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -416,7 +416,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 24
+# iteration 24a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -434,7 +434,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 25
+# iteration 25a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -452,7 +452,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 26
+# iteration 26a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -470,7 +470,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 27
+# iteration 27a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -488,7 +488,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 28
+# iteration 28a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -506,7 +506,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 29
+# iteration 29a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -524,7 +524,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 30
+# iteration 30a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -542,7 +542,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 31
+# iteration 31a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -560,7 +560,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 32
+# iteration 32a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -578,7 +578,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 33
+# iteration 33a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -596,7 +596,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 34
+# iteration 34a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -614,7 +614,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 35
+# iteration 35a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -632,7 +632,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=0
Page checksums are not used
drop table t1;
-# iteration 36
+# iteration 36a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -650,7 +650,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Page checksums are used
drop table t1;
-# iteration 1
+# iteration 1b
create table t1(a int) engine=aria ;
show create table t1;
Table Create Table
@@ -666,7 +666,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Crashsafe: yes
drop table t1;
-# iteration 2
+# iteration 2b
create table t1(a int) engine=aria ;
show create table t1;
Table Create Table
@@ -682,7 +682,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 3
+# iteration 3b
create table t1(a int) engine=aria ;
show create table t1;
Table Create Table
@@ -698,7 +698,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
Crashsafe: yes
drop table t1;
-# iteration 4
+# iteration 4b
create table t1(a int) engine=aria ;
show create table t1;
Table Create Table
@@ -714,7 +714,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1
Crashsafe: yes
drop table t1;
-# iteration 5
+# iteration 5b
create table t1(a int) engine=aria ;
show create table t1;
Table Create Table
@@ -730,7 +730,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 6
+# iteration 6b
create table t1(a int) engine=aria ;
show create table t1;
Table Create Table
@@ -746,7 +746,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
Crashsafe: yes
drop table t1;
-# iteration 7
+# iteration 7b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
Table Create Table
@@ -762,7 +762,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 8
+# iteration 8b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
Table Create Table
@@ -778,7 +778,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 9
+# iteration 9b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
Table Create Table
@@ -794,7 +794,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
Crashsafe: yes
drop table t1;
-# iteration 10
+# iteration 10b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
Table Create Table
@@ -810,7 +810,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 11
+# iteration 11b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
Table Create Table
@@ -826,7 +826,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 12
+# iteration 12b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
Table Create Table
@@ -842,7 +842,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
Crashsafe: yes
drop table t1;
-# iteration 13
+# iteration 13b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
Table Create Table
@@ -858,7 +858,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
Crashsafe: yes
drop table t1;
-# iteration 14
+# iteration 14b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
Table Create Table
@@ -874,7 +874,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 15
+# iteration 15b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
Table Create Table
@@ -890,7 +890,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
Crashsafe: yes
drop table t1;
-# iteration 16
+# iteration 16b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
Table Create Table
@@ -906,7 +906,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=1
Crashsafe: yes
drop table t1;
-# iteration 17
+# iteration 17b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
Table Create Table
@@ -922,7 +922,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=Aria DEFAULT CHARSET=latin1 PAGE_CHECKSUM=1 TRANSACTIONAL=0
Crashsafe: no
drop table t1;
-# iteration 18
+# iteration 18b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
Table Create Table
diff --git a/mysql-test/suite/maria/maria-page-checksum.test b/mysql-test/suite/maria/maria-page-checksum.test
index 8dd68fce245..d35e29634c6 100644
--- a/mysql-test/suite/maria/maria-page-checksum.test
+++ b/mysql-test/suite/maria/maria-page-checksum.test
@@ -19,7 +19,7 @@ select @@global.aria_page_checksum;
# (first value of aria_page_checksum) x (clauses in CREATE TABLE) x
# (second value of aria_page_checksum) x (clauses in ALTER TABLE).
---echo # iteration 1
+--echo # iteration 1a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -48,7 +48,7 @@ perl;
EOF
drop table t1;
---echo # iteration 2
+--echo # iteration 2a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -77,7 +77,7 @@ perl;
EOF
drop table t1;
---echo # iteration 3
+--echo # iteration 3a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -106,7 +106,7 @@ perl;
EOF
drop table t1;
---echo # iteration 4
+--echo # iteration 4a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -135,7 +135,7 @@ perl;
EOF
drop table t1;
---echo # iteration 5
+--echo # iteration 5a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -164,7 +164,7 @@ perl;
EOF
drop table t1;
---echo # iteration 6
+--echo # iteration 6a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -193,7 +193,7 @@ perl;
EOF
drop table t1;
---echo # iteration 7
+--echo # iteration 7a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -222,7 +222,7 @@ perl;
EOF
drop table t1;
---echo # iteration 8
+--echo # iteration 8a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -251,7 +251,7 @@ perl;
EOF
drop table t1;
---echo # iteration 9
+--echo # iteration 9a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -280,7 +280,7 @@ perl;
EOF
drop table t1;
---echo # iteration 10
+--echo # iteration 10a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -309,7 +309,7 @@ perl;
EOF
drop table t1;
---echo # iteration 11
+--echo # iteration 11a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -338,7 +338,7 @@ perl;
EOF
drop table t1;
---echo # iteration 12
+--echo # iteration 12a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -367,7 +367,7 @@ perl;
EOF
drop table t1;
---echo # iteration 13
+--echo # iteration 13a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -396,7 +396,7 @@ perl;
EOF
drop table t1;
---echo # iteration 14
+--echo # iteration 14a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -425,7 +425,7 @@ perl;
EOF
drop table t1;
---echo # iteration 15
+--echo # iteration 15a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -454,7 +454,7 @@ perl;
EOF
drop table t1;
---echo # iteration 16
+--echo # iteration 16a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -483,7 +483,7 @@ perl;
EOF
drop table t1;
---echo # iteration 17
+--echo # iteration 17a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -512,7 +512,7 @@ perl;
EOF
drop table t1;
---echo # iteration 18
+--echo # iteration 18a
set global aria_page_checksum = 0 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -541,7 +541,7 @@ perl;
EOF
drop table t1;
---echo # iteration 19
+--echo # iteration 19a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -570,7 +570,7 @@ perl;
EOF
drop table t1;
---echo # iteration 20
+--echo # iteration 20a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -599,7 +599,7 @@ perl;
EOF
drop table t1;
---echo # iteration 21
+--echo # iteration 21a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -628,7 +628,7 @@ perl;
EOF
drop table t1;
---echo # iteration 22
+--echo # iteration 22a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -657,7 +657,7 @@ perl;
EOF
drop table t1;
---echo # iteration 23
+--echo # iteration 23a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -686,7 +686,7 @@ perl;
EOF
drop table t1;
---echo # iteration 24
+--echo # iteration 24a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -715,7 +715,7 @@ perl;
EOF
drop table t1;
---echo # iteration 25
+--echo # iteration 25a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -744,7 +744,7 @@ perl;
EOF
drop table t1;
---echo # iteration 26
+--echo # iteration 26a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -773,7 +773,7 @@ perl;
EOF
drop table t1;
---echo # iteration 27
+--echo # iteration 27a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -802,7 +802,7 @@ perl;
EOF
drop table t1;
---echo # iteration 28
+--echo # iteration 28a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -831,7 +831,7 @@ perl;
EOF
drop table t1;
---echo # iteration 29
+--echo # iteration 29a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -860,7 +860,7 @@ perl;
EOF
drop table t1;
---echo # iteration 30
+--echo # iteration 30a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=0 ;
show create table t1 /* expecting PAGE_CHECKSUM=0 */ ;
@@ -889,7 +889,7 @@ perl;
EOF
drop table t1;
---echo # iteration 31
+--echo # iteration 31a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -918,7 +918,7 @@ perl;
EOF
drop table t1;
---echo # iteration 32
+--echo # iteration 32a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -947,7 +947,7 @@ perl;
EOF
drop table t1;
---echo # iteration 33
+--echo # iteration 33a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -976,7 +976,7 @@ perl;
EOF
drop table t1;
---echo # iteration 34
+--echo # iteration 34a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -1005,7 +1005,7 @@ perl;
EOF
drop table t1;
---echo # iteration 35
+--echo # iteration 35a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -1034,7 +1034,7 @@ perl;
EOF
drop table t1;
---echo # iteration 36
+--echo # iteration 36a
set global aria_page_checksum = 1 ;
create table t1(a int) engine=aria PAGE_CHECKSUM=1 ;
show create table t1 /* expecting PAGE_CHECKSUM=1 */ ;
@@ -1071,7 +1071,7 @@ drop table t1;
# we scan through combinations in the cartesian product of
# (clauses in CREATE TABLE) x (clauses in ALTER TABLE).
---echo # iteration 1
+--echo # iteration 1b
create table t1(a int) engine=aria ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1098,7 +1098,7 @@ EOF
drop table t1;
---echo # iteration 2
+--echo # iteration 2b
create table t1(a int) engine=aria ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1125,7 +1125,7 @@ EOF
drop table t1;
---echo # iteration 3
+--echo # iteration 3b
create table t1(a int) engine=aria ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1152,7 +1152,7 @@ EOF
drop table t1;
---echo # iteration 4
+--echo # iteration 4b
create table t1(a int) engine=aria ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1179,7 +1179,7 @@ EOF
drop table t1;
---echo # iteration 5
+--echo # iteration 5b
create table t1(a int) engine=aria ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1206,7 +1206,7 @@ EOF
drop table t1;
---echo # iteration 6
+--echo # iteration 6b
create table t1(a int) engine=aria ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1233,7 +1233,7 @@ EOF
drop table t1;
---echo # iteration 7
+--echo # iteration 7b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1260,7 +1260,7 @@ EOF
drop table t1;
---echo # iteration 8
+--echo # iteration 8b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1287,7 +1287,7 @@ EOF
drop table t1;
---echo # iteration 9
+--echo # iteration 9b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1314,7 +1314,7 @@ EOF
drop table t1;
---echo # iteration 10
+--echo # iteration 10b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1341,7 +1341,7 @@ EOF
drop table t1;
---echo # iteration 11
+--echo # iteration 11b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1368,7 +1368,7 @@ EOF
drop table t1;
---echo # iteration 12
+--echo # iteration 12b
create table t1(a int) engine=aria transactional=0 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1395,7 +1395,7 @@ EOF
drop table t1;
---echo # iteration 13
+--echo # iteration 13b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1422,7 +1422,7 @@ EOF
drop table t1;
---echo # iteration 14
+--echo # iteration 14b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1449,7 +1449,7 @@ EOF
drop table t1;
---echo # iteration 15
+--echo # iteration 15b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1476,7 +1476,7 @@ EOF
drop table t1;
---echo # iteration 16
+--echo # iteration 16b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1503,7 +1503,7 @@ EOF
drop table t1;
---echo # iteration 17
+--echo # iteration 17b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
@@ -1530,7 +1530,7 @@ EOF
drop table t1;
---echo # iteration 18
+--echo # iteration 18b
create table t1(a int) engine=aria transactional=1 ;
show create table t1;
--exec $MARIA_CHK -dv $MYSQLD_DATADIR/test/t1 >$MYSQLTEST_VARDIR/tmp/ariachk.txt
diff --git a/mysql-test/suite/maria/maria-partitioning.result b/mysql-test/suite/maria/maria-partitioning.result
index bcb88626ff7..ad5de7952c7 100644
--- a/mysql-test/suite/maria/maria-partitioning.result
+++ b/mysql-test/suite/maria/maria-partitioning.result
@@ -2,8 +2,8 @@ set global storage_engine=aria;
set session storage_engine=aria;
DROP TABLE if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
create table t2(a blob) engine=aria;
create table t1(a int primary key) engine=aria;
insert into t2 values ('foo'),('bar');
diff --git a/mysql-test/suite/maria/maria-ucs2.result b/mysql-test/suite/maria/maria-ucs2.result
index e7258f21d4f..fab640f703a 100644
--- a/mysql-test/suite/maria/maria-ucs2.result
+++ b/mysql-test/suite/maria/maria-ucs2.result
@@ -17,7 +17,6 @@ test.t1 check status OK
ALTER TABLE t1 MODIFY a VARCHAR(800) CHARSET `ucs2`;
Warnings:
Warning 1071 Specified key was too long; max key length is 1000 bytes
-Warning 1071 Specified key was too long; max key length is 1000 bytes
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result
index 2aad2f5cc75..021cc8fc357 100644
--- a/mysql-test/suite/maria/maria3.result
+++ b/mysql-test/suite/maria/maria3.result
@@ -460,7 +460,7 @@ id f1
1 test1
2 test2
drop table t1;
-SET SQL_MODE = 'TRADITIONAL';
+SET SQL_MODE = '';
create table t1 (n int not null primary key auto_increment, c char(1), unique(c));
insert into t1 values(100, "a");
insert into t1 values(300, "b");
diff --git a/mysql-test/suite/maria/maria3.test b/mysql-test/suite/maria/maria3.test
index f1d95a15ba5..b4e3e30d63f 100644
--- a/mysql-test/suite/maria/maria3.test
+++ b/mysql-test/suite/maria/maria3.test
@@ -364,7 +364,7 @@ INSERT IGNORE INTO t1 (f1) VALUES ("test1");
INSERT IGNORE INTO t1 (f1) VALUES ("test2");
SELECT * FROM t1;
drop table t1;
-SET SQL_MODE = 'TRADITIONAL';
+SET SQL_MODE = '';
create table t1 (n int not null primary key auto_increment, c char(1), unique(c));
insert into t1 values(100, "a");
diff --git a/mysql-test/suite/maria/max_length.result b/mysql-test/suite/maria/max_length.result
index 6db58622698..049b92eafe5 100644
--- a/mysql-test/suite/maria/max_length.result
+++ b/mysql-test/suite/maria/max_length.result
@@ -1,7 +1,7 @@
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t1'
+Note 1051 Unknown table 'test.t2'
create table t1 (id int(10) unsigned not null auto_increment primary key, v varchar(1000), b blob) row_format=page max_rows=2 engine=aria;
create table t2 (id int(10) unsigned not null auto_increment primary key, v varchar(1000), b blob) row_format=page max_rows=20000000 engine=aria;
lock tables t1 write,t2 write;
diff --git a/mysql-test/suite/maria/small_blocksize.result b/mysql-test/suite/maria/small_blocksize.result
index f418a1f92ef..940a718d34e 100644
--- a/mysql-test/suite/maria/small_blocksize.result
+++ b/mysql-test/suite/maria/small_blocksize.result
@@ -1,6 +1,6 @@
DROP TABLE if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (col_longtext_ucs2 longtext, col_longtext_utf8 longtext, col_varchar_255_ucs2_key varchar(255), col_set_utf8 set ('a','b'), col_char_255_ucs2 char(255), col_char_255_ucs2_key char(255), col_enum_ucs2 enum ('a','b'), col_varchar_255_ucs2 varchar(255), col_longtext_ucs2_key longtext, col_longtext_utf8_key longtext, col_enum_utf8 enum ('a','b'), col_varchar_255_utf8_key varchar(1024), col_varchar_255_utf8 varchar(255), col_enum_ucs2_key enum ('a','b'), col_enum_utf8_key enum ('a','b'), col_set_utf8_key set ('a','b'), col_char_255_utf8 char(255), pk integer auto_increment, col_set_ucs2_key set ('a','b'), col_char_255_utf8_key char(255), col_set_ucs2 set ('a','b'), primary key (pk)) ENGINE=aria;
INSERT INTO t1 ( col_char_255_utf8, col_varchar_255_utf8_key, col_longtext_utf8_key ) VALUES ( 'lggnqojgqectqlkvskffihliqcwoakzzzjvhkqlwjybkngdbubskflpmzegdrk', REPEAT( 'a', 627 ), 'mlggnqojgqectqlkvskffihliqcwoakzzzjvhkqlwjybkngdbubskflpmzegdrklnipcmzbtwdqfnyinqfohgtiwmvfpbuslgobjhslxnaybcyebhsrlipnuvalhmvhlwbwujtvjsdrbyapfzprnxfgtrukwhywtkaoupsaogxsjxhqjkidvnpeytjgndtnrrbm' );
UPDATE t1 SET col_varchar_255_utf8 = REPEAT('a', 197 );
diff --git a/mysql-test/suite/optimizer_unfixed_bugs/r/bug41029.result b/mysql-test/suite/optimizer_unfixed_bugs/r/bug41029.result
index 6b3025af842..5973c8ade46 100644
--- a/mysql-test/suite/optimizer_unfixed_bugs/r/bug41029.result
+++ b/mysql-test/suite/optimizer_unfixed_bugs/r/bug41029.result
@@ -7,7 +7,7 @@ set autocommit=0;
use test;
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (dummy int primary key, a int unique, b int) engine=innodb;
insert into t1 values(1,1,1),(3,3,3),(5,5,5);
commit;
diff --git a/mysql-test/suite/optimizer_unfixed_bugs/r/bug41996.result b/mysql-test/suite/optimizer_unfixed_bugs/r/bug41996.result
index bad32010c8c..31e39d4421d 100644
--- a/mysql-test/suite/optimizer_unfixed_bugs/r/bug41996.result
+++ b/mysql-test/suite/optimizer_unfixed_bugs/r/bug41996.result
@@ -1,7 +1,7 @@
set session debug_dbug="+d,optimizer_innodb_icp";
drop table if exists `t1`;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table `t1` (`c` bigint, key(`c`),`a` int)engine=innodb;
insert into `t1` values(2,2);
delete `t1` from `t1` `a`, `t1` where `a`.`a`=`t1`.`c` ;
diff --git a/mysql-test/suite/oqgraph/r/basic.result b/mysql-test/suite/oqgraph/r/basic.result
index e90659c0986..19e48db39fc 100644
--- a/mysql-test/suite/oqgraph/r/basic.result
+++ b/mysql-test/suite/oqgraph/r/basic.result
@@ -1,6 +1,6 @@
drop table if exists graph;
Warnings:
-Note 1051 Unknown table 'graph'
+Note 1051 Unknown table 'test.graph'
CREATE TABLE graph (
latch SMALLINT UNSIGNED NULL,
origid BIGINT UNSIGNED NULL,
diff --git a/mysql-test/suite/parts/inc/partition.pre b/mysql-test/suite/parts/inc/partition.pre
index f9b361c787c..f82916ae631 100644
--- a/mysql-test/suite/parts/inc/partition.pre
+++ b/mysql-test/suite/parts/inc/partition.pre
@@ -23,13 +23,13 @@
################################################################################
# Set the session storage engine
-eval SET @@session.storage_engine = $engine;
+eval SET @@session.default_storage_engine = $engine;
##### Disabled/affected testcases, because of open bugs #####
# --echo
# --echo #------------------------------------------------------------------------
# --echo # There are several testcases disabled because of the open bugs
-# if (`SELECT @@session.storage_engine IN('ndbcluster')`)
+# if (`SELECT @@session.default_storage_engine IN('ndbcluster')`)
# {
# --echo # #18730
# }
@@ -135,7 +135,7 @@ f_charbig VARCHAR(1000);
# in partition_methods[1|2].inc and partition_alter_1[1|3].inc
# when $sub_part_no is set to >= 3.
let $sub_part_no= 3;
-if (`SELECT @@session.storage_engine = 'ndbcluster'`)
+if (`SELECT @@session.default_storage_engine = 'ndbcluster'`)
{
let $sub_part_no= 2;
}
@@ -318,7 +318,7 @@ if (0)
# --source inc/have_partition.inc
# b) Engine specific settings and requirements
# $do_pk_tests, $MAX_VALUE, $engine
-# SET SESSION storage_engine
+# SET SESSION default_storage_engine
# $engine_other
# c) Generate the prerequisites ($variables, @variables, tables) needed
# via
diff --git a/mysql-test/suite/parts/inc/partition_alter3.inc b/mysql-test/suite/parts/inc/partition_alter3.inc
index 1fad361b371..395f93f44f6 100644
--- a/mysql-test/suite/parts/inc/partition_alter3.inc
+++ b/mysql-test/suite/parts/inc/partition_alter3.inc
@@ -115,6 +115,7 @@ ALTER TABLE t1 REMOVE PARTITIONING;
--source suite/parts/inc/partition_check_read1.inc
#
--echo # 1.2.7 Remove partitioning from not partitioned table --> ????
+--error ER_PARTITION_MGMT_ON_NONPARTITIONED
ALTER TABLE t1 REMOVE PARTITIONING;
DROP TABLE t1;
--source suite/parts/inc/partition_check_drop.inc
@@ -193,6 +194,7 @@ ALTER TABLE t1 REMOVE PARTITIONING;
--source suite/parts/inc/partition_check_read2.inc
#
--echo # 2.2.7 Remove partitioning from not partitioned table --> ????
+--error ER_PARTITION_MGMT_ON_NONPARTITIONED
ALTER TABLE t1 REMOVE PARTITIONING;
DROP TABLE t1;
--source suite/parts/inc/partition_check_drop.inc
diff --git a/mysql-test/suite/parts/inc/partition_crash_exchange.inc b/mysql-test/suite/parts/inc/partition_crash_exchange.inc
new file mode 100644
index 00000000000..399cff8f376
--- /dev/null
+++ b/mysql-test/suite/parts/inc/partition_crash_exchange.inc
@@ -0,0 +1,29 @@
+# To be used with WL#4445: EXCHANGE PARTITION WITH TABLE.
+SET SESSION debug_dbug="+d,exchange_partition_abort_1";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_1";
+SET SESSION debug_dbug="+d,exchange_partition_abort_2";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_2";
+SET SESSION debug_dbug="+d,exchange_partition_abort_3";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_3";
+SET SESSION debug_dbug="+d,exchange_partition_abort_4";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_4";
+SET SESSION debug_dbug="+d,exchange_partition_abort_5";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_5";
+SET SESSION debug_dbug="+d,exchange_partition_abort_6";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_6";
+SET SESSION debug_dbug="+d,exchange_partition_abort_7";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_7";
+SET SESSION debug_dbug="+d,exchange_partition_abort_8";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_8";
+SET SESSION debug_dbug="+d,exchange_partition_abort_9";
+--source suite/parts/inc/partition_crash_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_abort_9";
+
diff --git a/mysql-test/suite/parts/inc/partition_crash_t2.inc b/mysql-test/suite/parts/inc/partition_crash_t2.inc
new file mode 100644
index 00000000000..fd88cc60a8d
--- /dev/null
+++ b/mysql-test/suite/parts/inc/partition_crash_t2.inc
@@ -0,0 +1,12 @@
+# Include file to extend partition_crash with a second table.
+# To be used with WL#4445: EXCHANGE PARTITION WITH TABLE.
+--eval $create_statement2
+--eval $insert_statement2
+SHOW CREATE TABLE t2;
+--sorted_result
+SELECT * FROM t2;
+--source suite/parts/inc/partition_crash.inc
+SHOW CREATE TABLE t2;
+--sorted_result
+SELECT * FROM t2;
+DROP TABLE t2;
diff --git a/mysql-test/suite/parts/inc/partition_fail_exchange.inc b/mysql-test/suite/parts/inc/partition_fail_exchange.inc
new file mode 100644
index 00000000000..cc8d76bedfe
--- /dev/null
+++ b/mysql-test/suite/parts/inc/partition_fail_exchange.inc
@@ -0,0 +1,27 @@
+SET SESSION debug_dbug="+d,exchange_partition_fail_1";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_1";
+SET SESSION debug_dbug="+d,exchange_partition_fail_2";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_2";
+SET SESSION debug_dbug="+d,exchange_partition_fail_3";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_3";
+SET SESSION debug_dbug="+d,exchange_partition_fail_4";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_4";
+SET SESSION debug_dbug="+d,exchange_partition_fail_5";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_5";
+SET SESSION debug_dbug="+d,exchange_partition_fail_6";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_6";
+SET SESSION debug_dbug="+d,exchange_partition_fail_7";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_7";
+SET SESSION debug_dbug="+d,exchange_partition_fail_8";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_8";
+SET SESSION debug_dbug="+d,exchange_partition_fail_9";
+--source suite/parts/inc/partition_fail_t2.inc
+SET SESSION debug_dbug="-d,exchange_partition_fail_9";
diff --git a/mysql-test/suite/parts/inc/partition_fail_t2.inc b/mysql-test/suite/parts/inc/partition_fail_t2.inc
new file mode 100644
index 00000000000..3904ec8662c
--- /dev/null
+++ b/mysql-test/suite/parts/inc/partition_fail_t2.inc
@@ -0,0 +1,31 @@
+# Include file to to test failure with error injection.
+# To be used with WL#4445: EXCHANGE PARTITION WITH TABLE.
+--eval $create_statement2
+--eval $insert_statement2
+SHOW CREATE TABLE t2;
+--sorted_result
+SELECT * FROM t2;
+--eval $create_statement
+--eval $insert_statement
+--echo # State before failure
+--replace_result #p# #P#
+--list_files $DATADIR/test
+SHOW CREATE TABLE t1;
+--sorted_result
+SELECT * FROM t1;
+# accept all errors
+--disable_abort_on_error
+--replace_regex /#sqlx-[0-9a-f_]*/#sqlx-nnnn_nnnn/i
+--eval $fail_statement
+--enable_abort_on_error
+--echo # State after failure
+--replace_result #p# #P#
+--list_files $DATADIR/test
+SHOW CREATE TABLE t1;
+--sorted_result
+SELECT * FROM t1;
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+--sorted_result
+SELECT * FROM t2;
+DROP TABLE t2;
diff --git a/mysql-test/suite/parts/r/part_ctype_utf32.result b/mysql-test/suite/parts/r/part_ctype_utf32.result
index 5d52a8eace4..4667fe1035f 100644
--- a/mysql-test/suite/parts/r/part_ctype_utf32.result
+++ b/mysql-test/suite/parts/r/part_ctype_utf32.result
@@ -3,7 +3,7 @@
#
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1 (
a enum('a') CHARACTER SET utf32 COLLATE utf32_spanish2_ci
) ENGINE=MYISAM PARTITION BY KEY(a) PARTITIONS 2;
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result
index 3ef62f2b050..00ef7527059 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_2_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result b/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result
index 887ac403cdb..bc737289895 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_2_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_innodb.result b/mysql-test/suite/parts/r/partition_alter1_1_innodb.result
index 66d84768b5e..0a7b284a15c 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter1_1_myisam.result b/mysql-test/suite/parts/r/partition_alter1_1_myisam.result
index 9f616d04df3..26450b28620 100644
--- a/mysql-test/suite/parts/r/partition_alter1_1_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter1_1_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter1_2_innodb.result
index f1a1aeecb66..22d24bd50fe 100644
--- a/mysql-test/suite/parts/r/partition_alter1_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter1_2_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter1_2_myisam.result b/mysql-test/suite/parts/r/partition_alter1_2_myisam.result
index 087262f5f89..72545e89bae 100644
--- a/mysql-test/suite/parts/r/partition_alter1_2_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter1_2_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result b/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result
index 314044044bb..f3d5314643b 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_1_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result b/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result
index e824cf45ce9..eb662109cea 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_2_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_maria.result b/mysql-test/suite/parts/r/partition_alter2_1_maria.result
index 3dc6bae975a..0e7c1818fbc 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_maria.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_maria.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'Aria';
+SET @@session.default_storage_engine = 'Aria';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_1_myisam.result b/mysql-test/suite/parts/r/partition_alter2_1_myisam.result
index f2a13497159..c3e1b18841c 100644
--- a/mysql-test/suite/parts/r/partition_alter2_1_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter2_1_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result b/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result
index faba21b41f7..1f4642f63ec 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_1_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result b/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result
index 2557cd5a219..0db5bf79c4d 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_2_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_maria.result b/mysql-test/suite/parts/r/partition_alter2_2_maria.result
index b5d45539036..2bb3683ad73 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_maria.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_maria.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'Aria';
+SET @@session.default_storage_engine = 'Aria';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter2_2_myisam.result b/mysql-test/suite/parts/r/partition_alter2_2_myisam.result
index 70e869f48fb..8d8e77ebbd3 100644
--- a/mysql-test/suite/parts/r/partition_alter2_2_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter2_2_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter3_innodb.result b/mysql-test/suite/parts/r/partition_alter3_innodb.result
index ed41ede378d..fd67547f55b 100644
--- a/mysql-test/suite/parts/r/partition_alter3_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter3_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
@@ -57,6 +57,7 @@ t1 CREATE TABLE `t1` (
`f_varchar` varchar(30) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
t1.frm
+t1.ibd
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 20 Using where
@@ -78,6 +79,7 @@ t1 CREATE TABLE `t1` (
`f_varchar` varchar(30) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (YEAR(f_date)) */
+t1#P#p0.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -96,6 +98,7 @@ t1 CREATE TABLE `t1` (
`f_varchar` varchar(30) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (DAYOFYEAR(f_date)) */
+t1#P#p0.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -112,6 +115,7 @@ t1 CREATE TABLE `t1` (
`f_varchar` varchar(30) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (YEAR(f_date)) */
+t1#P#p0.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -137,6 +141,9 @@ t1 CREATE TABLE `t1` (
(PARTITION p0 ENGINE = InnoDB,
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -161,6 +168,10 @@ t1 CREATE TABLE `t1` (
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB,
PARTITION part2 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -186,6 +197,14 @@ t1 CREATE TABLE `t1` (
PARTITION p5 ENGINE = InnoDB,
PARTITION p6 ENGINE = InnoDB,
PARTITION p7 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#p5.ibd
+t1#P#p6.ibd
+t1#P#p7.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -222,6 +241,13 @@ t1 CREATE TABLE `t1` (
PARTITION p4 ENGINE = InnoDB,
PARTITION p5 ENGINE = InnoDB,
PARTITION p6 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#p5.ibd
+t1#P#p6.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -244,6 +270,12 @@ t1 CREATE TABLE `t1` (
PARTITION part2 ENGINE = InnoDB,
PARTITION p4 ENGINE = InnoDB,
PARTITION p5 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#p5.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -265,6 +297,11 @@ t1 CREATE TABLE `t1` (
PARTITION part7 ENGINE = InnoDB,
PARTITION part2 ENGINE = InnoDB,
PARTITION p4 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -285,6 +322,10 @@ t1 CREATE TABLE `t1` (
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB,
PARTITION part2 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -304,6 +345,9 @@ t1 CREATE TABLE `t1` (
(PARTITION p0 ENGINE = InnoDB,
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -322,6 +366,8 @@ t1 CREATE TABLE `t1` (
/*!50100 PARTITION BY HASH (YEAR(f_date))
(PARTITION p0 ENGINE = InnoDB,
PARTITION part1 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -339,6 +385,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (YEAR(f_date))
(PARTITION p0 ENGINE = InnoDB) */
+t1#P#p0.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
@@ -359,6 +406,7 @@ t1 CREATE TABLE `t1` (
`f_varchar` varchar(30) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
t1.frm
+t1.ibd
EXPLAIN PARTITIONS SELECT COUNT(*) FROM t1 WHERE f_date = '1000-02-10';
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 20 Using where
@@ -367,6 +415,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# check read row by row success: 1
# 1.2.7 Remove partitioning from not partitioned table --> ????
ALTER TABLE t1 REMOVE PARTITIONING;
+ERROR HY000: Partition management on a not partitioned table is not possible
DROP TABLE t1;
# Attention: There are unused files.
# Either the DROP TABLE or a preceding ALTER TABLE
@@ -398,6 +447,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
t1.frm
+t1.ibd
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 20 Using where
@@ -420,6 +470,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (f_int1) */
+t1#P#p0.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -448,6 +499,9 @@ t1 CREATE TABLE `t1` (
(PARTITION p0 ENGINE = InnoDB,
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -472,6 +526,10 @@ t1 CREATE TABLE `t1` (
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB,
PARTITION part2 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -500,6 +558,14 @@ t1 CREATE TABLE `t1` (
PARTITION p5 ENGINE = InnoDB,
PARTITION p6 ENGINE = InnoDB,
PARTITION p7 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#p5.ibd
+t1#P#p6.ibd
+t1#P#p7.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -534,6 +600,13 @@ t1 CREATE TABLE `t1` (
PARTITION p4 ENGINE = InnoDB,
PARTITION p5 ENGINE = InnoDB,
PARTITION p6 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#p5.ibd
+t1#P#p6.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -559,6 +632,12 @@ t1 CREATE TABLE `t1` (
PARTITION part2 ENGINE = InnoDB,
PARTITION p4 ENGINE = InnoDB,
PARTITION p5 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#p5.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -583,6 +662,11 @@ t1 CREATE TABLE `t1` (
PARTITION part7 ENGINE = InnoDB,
PARTITION part2 ENGINE = InnoDB,
PARTITION p4 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#p4.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -606,6 +690,10 @@ t1 CREATE TABLE `t1` (
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB,
PARTITION part2 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -628,6 +716,9 @@ t1 CREATE TABLE `t1` (
(PARTITION p0 ENGINE = InnoDB,
PARTITION part1 ENGINE = InnoDB,
PARTITION part7 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
+t1#P#part7.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -649,6 +740,8 @@ t1 CREATE TABLE `t1` (
/*!50100 PARTITION BY KEY (f_int1)
(PARTITION p0 ENGINE = InnoDB,
PARTITION part1 ENGINE = InnoDB) */
+t1#P#p0.ibd
+t1#P#part1.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -669,6 +762,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY KEY (f_int1)
(PARTITION p0 ENGINE = InnoDB) */
+t1#P#p0.ibd
t1.frm
t1.par
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
@@ -692,6 +786,7 @@ t1 CREATE TABLE `t1` (
`f_charbig` varchar(1000) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
t1.frm
+t1.ibd
EXPLAIN PARTITIONS SELECT COUNT(*) <> 1 FROM t1 WHERE f_int1 = 3;
id select_type table partitions type possible_keys key key_len ref rows Extra
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 20 Using where
@@ -700,6 +795,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# check read row by row success: 1
# 2.2.7 Remove partitioning from not partitioned table --> ????
ALTER TABLE t1 REMOVE PARTITIONING;
+ERROR HY000: Partition management on a not partitioned table is not possible
DROP TABLE t1;
# Attention: There are unused files.
# Either the DROP TABLE or a preceding ALTER TABLE
diff --git a/mysql-test/suite/parts/r/partition_alter3_myisam.result b/mysql-test/suite/parts/r/partition_alter3_myisam.result
index 297f2bf9210..dfb21ee17ba 100644
--- a/mysql-test/suite/parts/r/partition_alter3_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter3_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
@@ -463,6 +463,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# check read row by row success: 1
# 1.2.7 Remove partitioning from not partitioned table --> ????
ALTER TABLE t1 REMOVE PARTITIONING;
+ERROR HY000: Partition management on a not partitioned table is not possible
DROP TABLE t1;
#========================================================================
@@ -882,6 +883,7 @@ id select_type table partitions type possible_keys key key_len ref rows Extra
# check read row by row success: 1
# 2.2.7 Remove partitioning from not partitioned table --> ????
ALTER TABLE t1 REMOVE PARTITIONING;
+ERROR HY000: Partition management on a not partitioned table is not possible
DROP TABLE t1;
DROP VIEW IF EXISTS v1;
DROP TABLE IF EXISTS t1;
diff --git a/mysql-test/suite/parts/r/partition_alter4_innodb.result b/mysql-test/suite/parts/r/partition_alter4_innodb.result
index 46f3ed3c974..e4dacbc663c 100644
--- a/mysql-test/suite/parts/r/partition_alter4_innodb.result
+++ b/mysql-test/suite/parts/r/partition_alter4_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_alter4_myisam.result b/mysql-test/suite/parts/r/partition_alter4_myisam.result
index c74a105306c..8aba07874aa 100644
--- a/mysql-test/suite/parts/r/partition_alter4_myisam.result
+++ b/mysql-test/suite/parts/r/partition_alter4_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_basic_innodb.result b/mysql-test/suite/parts/r/partition_basic_innodb.result
index ea5a53fc5f4..c74380d3591 100644
--- a/mysql-test/suite/parts/r/partition_basic_innodb.result
+++ b/mysql-test/suite/parts/r/partition_basic_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
@@ -77,6 +77,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -498,7 +500,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -532,6 +534,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -953,7 +960,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -1002,6 +1009,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -1423,7 +1438,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -1468,6 +1483,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -1887,7 +1908,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -1930,6 +1951,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -2351,7 +2380,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -2405,6 +2434,14 @@ SUBPARTITION BY KEY (f_int1)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -2824,7 +2861,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -2882,6 +2919,14 @@ SUBPARTITION BY HASH (f_int1 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -3303,7 +3348,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -3345,6 +3390,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -3766,7 +3820,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -3802,6 +3856,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -4223,7 +4279,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -4257,6 +4313,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -4678,7 +4739,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -4727,6 +4788,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -5148,7 +5217,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -5193,6 +5262,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -5612,7 +5687,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -5655,6 +5730,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -6074,7 +6157,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -6128,6 +6211,14 @@ SUBPARTITION BY KEY (f_int2)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -6547,7 +6638,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -6601,6 +6692,14 @@ SUBPARTITION BY HASH (f_int2 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -7022,7 +7121,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -7064,6 +7163,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -7485,7 +7593,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -7527,6 +7635,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -7983,7 +8093,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -8019,6 +8129,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -8475,7 +8590,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -8526,6 +8641,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -8982,7 +9105,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -9029,6 +9152,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -9483,7 +9612,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -9528,6 +9657,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -9984,7 +10121,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -10040,6 +10177,14 @@ SUBPARTITION BY KEY (f_int1)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -10494,7 +10639,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -10554,6 +10699,14 @@ SUBPARTITION BY HASH (f_int1 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -11010,7 +11163,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -11054,6 +11207,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -11510,7 +11672,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -11547,6 +11709,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -12003,7 +12167,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -12039,6 +12203,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -12495,7 +12664,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -12546,6 +12715,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -13002,7 +13179,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -13049,6 +13226,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -13503,7 +13686,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -13548,6 +13731,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -14004,7 +14195,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -14060,6 +14251,14 @@ SUBPARTITION BY KEY (f_int1)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -14514,7 +14713,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -14574,6 +14773,14 @@ SUBPARTITION BY HASH (f_int1 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -15030,7 +15237,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -15074,6 +15281,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -15530,7 +15746,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -15567,6 +15783,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -16039,7 +16257,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -16075,6 +16293,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -16547,7 +16770,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -16598,6 +16821,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -17070,7 +17301,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -17117,6 +17348,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -17587,7 +17824,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -17632,6 +17869,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -18104,7 +18349,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -18160,6 +18405,14 @@ SUBPARTITION BY KEY (f_int1)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -18630,7 +18883,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -18690,6 +18943,14 @@ SUBPARTITION BY HASH (f_int1 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -19162,7 +19423,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -19206,6 +19467,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -19678,7 +19948,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -19720,6 +19990,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -20176,7 +20448,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -20212,6 +20484,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -20668,7 +20945,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -20719,6 +20996,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -21175,7 +21460,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -21222,6 +21507,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -21676,7 +21967,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -21721,6 +22012,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -22175,7 +22474,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -22231,6 +22530,14 @@ SUBPARTITION BY KEY (f_int2)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -22685,7 +22992,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -22741,6 +23048,14 @@ SUBPARTITION BY HASH (f_int2 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -23197,7 +23512,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -23241,6 +23556,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -23697,7 +24021,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -23734,6 +24058,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -24190,7 +24516,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -24226,6 +24552,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -24682,7 +25013,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -24733,6 +25064,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -25189,7 +25528,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -25236,6 +25575,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -25690,7 +26035,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -25735,6 +26080,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -26189,7 +26542,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -26245,6 +26598,14 @@ SUBPARTITION BY KEY (f_int2)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -26699,7 +27060,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -26755,6 +27116,14 @@ SUBPARTITION BY HASH (f_int2 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -27211,7 +27580,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -27255,6 +27624,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -27711,7 +28089,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -27748,6 +28126,8 @@ t1 CREATE TABLE `t1` (
PARTITIONS 2 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
t1.frm
t1.par
@@ -28220,7 +28600,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -28256,6 +28636,11 @@ t1 CREATE TABLE `t1` (
PARTITIONS 5 */
unified filelist
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1#P#p2.ibd
+t1#P#p3.ibd
+t1#P#p4.ibd
t1.frm
t1.par
@@ -28728,7 +29113,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -28779,6 +29164,14 @@ t1 CREATE TABLE `t1` (
PARTITION part3 VALUES IN (3) ENGINE = InnoDB) */
unified filelist
+t1#P#part0.ibd
+t1#P#part1.ibd
+t1#P#part2.ibd
+t1#P#part3.ibd
+t1#P#part_1.ibd
+t1#P#part_2.ibd
+t1#P#part_3.ibd
+t1#P#part_N.ibd
t1.frm
t1.par
@@ -29251,7 +29644,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -29298,6 +29691,12 @@ t1 CREATE TABLE `t1` (
PARTITION partf VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta.ibd
+t1#P#partb.ibd
+t1#P#partc.ibd
+t1#P#partd.ibd
+t1#P#parte.ibd
+t1#P#partf.ibd
t1.frm
t1.par
@@ -29768,7 +30167,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -29813,6 +30212,14 @@ SUBPARTITIONS 2
PARTITION partd VALUES LESS THAN (2147483646) ENGINE = InnoDB) */
unified filelist
+t1#P#parta#SP#partasp0.ibd
+t1#P#parta#SP#partasp1.ibd
+t1#P#partb#SP#partbsp0.ibd
+t1#P#partb#SP#partbsp1.ibd
+t1#P#partc#SP#partcsp0.ibd
+t1#P#partc#SP#partcsp1.ibd
+t1#P#partd#SP#partdsp0.ibd
+t1#P#partd#SP#partdsp1.ibd
t1.frm
t1.par
@@ -30283,7 +30690,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -30339,6 +30746,14 @@ SUBPARTITION BY KEY (f_int2)
SUBPARTITION subpart42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#subpart11.ibd
+t1#P#part1#SP#subpart12.ibd
+t1#P#part2#SP#subpart21.ibd
+t1#P#part2#SP#subpart22.ibd
+t1#P#part3#SP#subpart31.ibd
+t1#P#part3#SP#subpart32.ibd
+t1#P#part4#SP#subpart41.ibd
+t1#P#part4#SP#subpart42.ibd
t1.frm
t1.par
@@ -30809,7 +31224,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -30865,6 +31280,14 @@ SUBPARTITION BY HASH (f_int2 + 1)
SUBPARTITION sp42 ENGINE = InnoDB)) */
unified filelist
+t1#P#part1#SP#sp11.ibd
+t1#P#part1#SP#sp12.ibd
+t1#P#part2#SP#sp21.ibd
+t1#P#part2#SP#sp22.ibd
+t1#P#part3#SP#sp31.ibd
+t1#P#part3#SP#sp32.ibd
+t1#P#part4#SP#sp41.ibd
+t1#P#part4#SP#sp42.ibd
t1.frm
t1.par
@@ -31337,7 +31760,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
@@ -31381,6 +31804,15 @@ SUBPARTITIONS 3
PARTITION part3 VALUES IN (NULL) ENGINE = InnoDB) */
unified filelist
+t1#P#part1#SP#part1sp0.ibd
+t1#P#part1#SP#part1sp1.ibd
+t1#P#part1#SP#part1sp2.ibd
+t1#P#part2#SP#part2sp0.ibd
+t1#P#part2#SP#part2sp1.ibd
+t1#P#part2#SP#part2sp2.ibd
+t1#P#part3#SP#part3sp0.ibd
+t1#P#part3#SP#part3sp1.ibd
+t1#P#part3#SP#part3sp2.ibd
t1.frm
t1.par
@@ -31853,7 +32285,7 @@ test.t1 optimize status OK
# check layout success: 1
REPAIR TABLE t1 EXTENDED;
Table Op Msg_type Msg_text
-test.t1 repair status OK
+test.t1 repair note The storage engine for the table doesn't support repair
# check layout success: 1
TRUNCATE t1;
diff --git a/mysql-test/suite/parts/r/partition_basic_myisam.result b/mysql-test/suite/parts/r/partition_basic_myisam.result
index f70dae13bb6..8cddb7baca6 100644
--- a/mysql-test/suite/parts/r/partition_basic_myisam.result
+++ b/mysql-test/suite/parts/r/partition_basic_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result b/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result
index 731cb186b34..a202af5b474 100644
--- a/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result
+++ b/mysql-test/suite/parts/r/partition_basic_symlink_innodb.result
@@ -1,10 +1,40 @@
-# Will not run partition_basic_symlink on InnoDB, since it is the same
-# as partition_basic, since InnoDB does not support DATA/INDEX DIR
-# Will only verify that the DATA/INDEX DIR is stored and used if
-# ALTER to MyISAM.
+#
+# Verify that the DATA/INDEX DIR is stored and used if ALTER to MyISAM.
+#
DROP TABLE IF EXISTS t1;
-CREATE TABLE t1 (c1 INT)
-ENGINE = InnoDB
+SET SESSION innodb_strict_mode = ON;
+#
+# InnoDB only supports DATA DIRECTORY with innodb_file_per_table=ON
+#
+SET GLOBAL innodb_file_per_table = OFF;
+CREATE TABLE t1 (c1 INT) ENGINE = InnoDB
+PARTITION BY HASH (c1) (
+PARTITION p0
+DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir'
+ INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir',
+PARTITION p1
+DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir'
+ INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
+);
+ERROR HY000: Can't create table 'test.t1' (errno: 140 "Wrong create options")
+SHOW WARNINGS;
+Level Code Message
+Warning 1478 InnoDB: DATA DIRECTORY requires innodb_file_per_table.
+Warning 1478 InnoDB: INDEX DIRECTORY is not supported
+Error 1005 Can't create table 'test.t1' (errno: 140 "Wrong create options")
+Error 6 Error on delete of 'MYSQLD_DATADIR/test/t1.par' (Errcode: 2 "No such file or directory")
+#
+# InnoDB is different from MyISAM in that it uses a text file
+# with an '.isl' extension instead of a symbolic link so that
+# the tablespace can be re-located on any OS. Also, instead of
+# putting the file directly into the DATA DIRECTORY,
+# it adds a folder under it with the name of the database.
+# Since strict mode is off, InnoDB ignores the INDEX DIRECTORY
+# and it is no longer part of the definition.
+#
+SET SESSION innodb_strict_mode = OFF;
+SET GLOBAL innodb_file_per_table = ON;
+CREATE TABLE t1 (c1 INT) ENGINE = InnoDB
PARTITION BY HASH (c1)
(PARTITION p0
DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir'
@@ -13,8 +43,26 @@ PARTITION p1
DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir'
INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir'
);
-# Verifying .frm and .par files
-# Verifying that there are no MyISAM files
+Warnings:
+Warning 1618 <INDEX DIRECTORY> option ignored
+Warning 1618 <INDEX DIRECTORY> option ignored
+SHOW WARNINGS;
+Level Code Message
+Warning 1618 <INDEX DIRECTORY> option ignored
+Warning 1618 <INDEX DIRECTORY> option ignored
+# Verifying .frm, .par, .isl & .ibd files
+---- MYSQLD_DATADIR/test
+t1#P#p0.isl
+t1#P#p1.isl
+t1.frm
+t1.par
+---- MYSQLTEST_VARDIR/mysql-test-data-dir/test
+t1#P#p0.ibd
+t1#P#p1.ibd
+# The ibd tablespaces should not be directly under the DATA DIRECTORY
+---- MYSQLTEST_VARDIR/mysql-test-data-dir
+test
+---- MYSQLTEST_VARDIR/mysql-test-idx-dir
FLUSH TABLES;
SHOW CREATE TABLE t1;
Table Create Table
@@ -22,17 +70,61 @@ t1 CREATE TABLE `t1` (
`c1` int(11) DEFAULT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (c1)
-(PARTITION p0 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = InnoDB,
- PARTITION p1 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = InnoDB) */
-ALTER TABLE t1 ENGINE = MyISAM;
-# Verifying .frm, .par and MyISAM files (.MYD, MYI)
-FLUSH TABLES;
+(PARTITION p0 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = InnoDB,
+ PARTITION p1 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = InnoDB) */
+#
+# Verify that the DATA/INDEX DIRECTORY is stored and used if we
+# ALTER TABLE to MyISAM.
+#
+ALTER TABLE t1 engine=MyISAM;
SHOW CREATE TABLE t1;
Table Create Table
t1 CREATE TABLE `t1` (
`c1` int(11) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
/*!50100 PARTITION BY HASH (c1)
-(PARTITION p0 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MyISAM,
- PARTITION p1 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' INDEX DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-idx-dir' ENGINE = MyISAM) */
+(PARTITION p0 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = MyISAM,
+ PARTITION p1 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = MyISAM) */
+# Verifying .frm, .par and MyISAM files (.MYD, MYI)
+---- MYSQLD_DATADIR/test
+t1#P#p0.MYD
+t1#P#p0.MYI
+t1#P#p1.MYD
+t1#P#p1.MYI
+t1.frm
+t1.par
+---- MYSQLTEST_VARDIR/mysql-test-data-dir
+t1#P#p0.MYD
+t1#P#p1.MYD
+test
+---- MYSQLTEST_VARDIR/mysql-test-idx-dir
+#
+# Now verify that the DATA DIRECTORY is used again if we
+# ALTER TABLE back to InnoDB.
+#
+SET SESSION innodb_strict_mode = ON;
+ALTER TABLE t1 engine=InnoDB;
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `c1` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY HASH (c1)
+(PARTITION p0 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = InnoDB,
+ PARTITION p1 DATA DIRECTORY = 'MYSQLTEST_VARDIR/mysql-test-data-dir' ENGINE = InnoDB) */
+# Verifying .frm, .par, .isl and InnoDB .ibd files
+---- MYSQLD_DATADIR/test
+t1#P#p0.isl
+t1#P#p1.isl
+t1.frm
+t1.par
+---- MYSQLTEST_VARDIR/mysql-test-data-dir
+test
+---- MYSQLTEST_VARDIR/mysql-test-idx-dir
+---- MYSQLTEST_VARDIR/mysql-test-data-dir/test
+t1#P#p0.ibd
+t1#P#p1.ibd
DROP TABLE t1;
+#
+# Cleanup
+#
diff --git a/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result b/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result
index 146b3d361fb..5f44fd138d9 100644
--- a/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result
+++ b/mysql-test/suite/parts/r/partition_basic_symlink_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_debug_innodb.result b/mysql-test/suite/parts/r/partition_debug_innodb.result
index 891091efd58..7ecd746077e 100644
--- a/mysql-test/suite/parts/r/partition_debug_innodb.result
+++ b/mysql-test/suite/parts/r/partition_debug_innodb.result
@@ -1,75 +1,4 @@
DROP TABLE IF EXISTS t1;
-#
-# Bug#12696518/Bug#11766879/60106:DIFF BETWEEN # OF INDEXES IN MYSQL
-# VS INNODB, PARTITONING, ON INDEX CREATE
-#
-CREATE TABLE t1
-(a INT PRIMARY KEY,
-b VARCHAR(64))
-ENGINE = InnoDB
-PARTITION BY HASH (a) PARTITIONS 3;
-INSERT INTO t1 VALUES (0, 'first row'), (1, 'second row'), (2, 'Third row');
-INSERT INTO t1 VALUES (3, 'row id 3'), (4, '4 row'), (5, 'row5');
-INSERT INTO t1 VALUES (6, 'X 6 row'), (7, 'Seventh row'), (8, 'Last row');
-ALTER TABLE t1 ADD INDEX new_b_index (b);
-ALTER TABLE t1 DROP INDEX new_b_index;
-SET SESSION debug_dbug = "+d,ha_partition_fail_final_add_index";
-ALTER TABLE t1 ADD INDEX (b);
-ERROR HY000: Table has no partition for value 0
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `a` int(11) NOT NULL,
- `b` varchar(64) DEFAULT NULL,
- PRIMARY KEY (`a`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (a)
-PARTITIONS 3 */
-SELECT * FROM t1;
-a b
-0 first row
-1 second row
-2 Third row
-3 row id 3
-4 4 row
-5 row5
-6 X 6 row
-7 Seventh row
-8 Last row
-FLUSH TABLES;
-CREATE INDEX new_index ON t1 (b);
-ERROR HY000: Table has no partition for value 0
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `a` int(11) NOT NULL,
- `b` varchar(64) DEFAULT NULL,
- PRIMARY KEY (`a`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (a)
-PARTITIONS 3 */
-SELECT * FROM t1;
-a b
-0 first row
-1 second row
-2 Third row
-3 row id 3
-4 4 row
-5 row5
-6 X 6 row
-7 Seventh row
-8 Last row
-SET SESSION debug_dbug = "-d,ha_partition_fail_final_add_index";
-SHOW CREATE TABLE t1;
-Table Create Table
-t1 CREATE TABLE `t1` (
- `a` int(11) NOT NULL,
- `b` varchar(64) DEFAULT NULL,
- PRIMARY KEY (`a`)
-) ENGINE=InnoDB DEFAULT CHARSET=latin1
-/*!50100 PARTITION BY HASH (a)
-PARTITIONS 3 */
-DROP TABLE t1;
call mtr.add_suppression("InnoDB: Warning: allocated tablespace .*, old maximum was");
call mtr.add_suppression("InnoDB: Error: table .* does not exist in the InnoDB internal");
call mtr.add_suppression("InnoDB: Warning: MySQL is trying to drop table ");
@@ -3922,7 +3851,6 @@ t1.par
# State after crash recovery
t1#P#p0.ibd
t1#P#p10.ibd
-t1#P#p20.ibd
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -3933,8 +3861,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = InnoDB,
- PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = InnoDB,
- PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29) ENGINE = InnoDB) */
+ PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = InnoDB) */
SELECT * FROM t1;
a b
1 Original from partition p0
@@ -4055,6 +3982,8 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+#sql-t1.frm
+#sql-t1.par
t1#P#p0.ibd
t1#P#p10#TMP#.ibd
t1#P#p10.ibd
@@ -5022,7 +4951,6 @@ ERROR HY000: Unknown error
# State after failure
t1#P#p0.ibd
t1#P#p10.ibd
-t1#P#p20.ibd
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5033,8 +4961,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = InnoDB,
- PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = InnoDB,
- PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29) ENGINE = InnoDB) */
+ PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = InnoDB) */
SELECT * FROM t1;
a b
1 Original from partition p0
@@ -5085,7 +5012,6 @@ ERROR HY000: Unknown error
# State after failure
t1#P#p0.ibd
t1#P#p10.ibd
-t1#P#p20.ibd
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5096,8 +5022,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=InnoDB DEFAULT CHARSET=latin1
/*!50100 PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = InnoDB,
- PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = InnoDB,
- PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29) ENGINE = InnoDB) */
+ PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = InnoDB) */
SELECT * FROM t1;
a b
1 Original from partition p0
@@ -5873,3 +5798,1836 @@ a b
UNLOCK TABLES;
DROP TABLE t1;
SET SESSION debug_dbug="-d,fail_change_partition_12";
+#
+# WL#4445: EXCHANGE PARTITION WITH TABLE
+# Verify ddl_log and InnoDB in case of crashing.
+call mtr.add_suppression("InnoDB: Warning: allocated tablespace .*, old maximum was ");
+call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
+call mtr.add_suppression("table .* does not exist in the InnoDB internal");
+SET SESSION debug_dbug="+d,exchange_partition_abort_1";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_1";
+SET SESSION debug_dbug="+d,exchange_partition_abort_2";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_2";
+SET SESSION debug_dbug="+d,exchange_partition_abort_3";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_3";
+SET SESSION debug_dbug="+d,exchange_partition_abort_4";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+#sqlx-nnnn_nnnn.ibd
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_4";
+SET SESSION debug_dbug="+d,exchange_partition_abort_5";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+#sqlx-nnnn_nnnn.ibd
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_5";
+SET SESSION debug_dbug="+d,exchange_partition_abort_6";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+#sqlx-nnnn_nnnn.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_6";
+SET SESSION debug_dbug="+d,exchange_partition_abort_7";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+#sqlx-nnnn_nnnn.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_7";
+SET SESSION debug_dbug="+d,exchange_partition_abort_8";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_8";
+SET SESSION debug_dbug="+d,exchange_partition_abort_9";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before crash
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Lost connection to MySQL server during query
+# State after crash (before recovery)
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+# State after crash recovery
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+1 Original from partition p0
+2 Original from partition p0
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_abort_9";
+SET SESSION debug_dbug="+d,exchange_partition_fail_1";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error in DDL log
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_1";
+SET SESSION debug_dbug="+d,exchange_partition_fail_2";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error in DDL log
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_2";
+SET SESSION debug_dbug="+d,exchange_partition_fail_3";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error on rename of './test/t2' to './test/#sqlx-nnnn_nnnn' (errno: 0 "Internal error/check (Not system error)")
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_3";
+SET SESSION debug_dbug="+d,exchange_partition_fail_4";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error in DDL log
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_4";
+SET SESSION debug_dbug="+d,exchange_partition_fail_5";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error on rename of './test/t1#P#p0' to './test/t2' (errno: 0 "Internal error/check (Not system error)")
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_5";
+SET SESSION debug_dbug="+d,exchange_partition_fail_6";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error in DDL log
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_6";
+SET SESSION debug_dbug="+d,exchange_partition_fail_7";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error on rename of './test/#sqlx-nnnn_nnnn' to './test/t1#P#p0' (errno: 0 "Internal error/check (Not system error)")
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_7";
+SET SESSION debug_dbug="+d,exchange_partition_fail_8";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error in DDL log
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_8";
+SET SESSION debug_dbug="+d,exchange_partition_fail_9";
+CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+PARTITION p1 VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+# State before failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+1 Original from partition p0
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+2 Original from partition p0
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+3 Original from partition p0
+4 Original from partition p0
+ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+ERROR HY000: Error in DDL log
+# State after failure
+t1#P#p0.ibd
+t1#P#p1.ibd
+t1.frm
+t1.par
+t2.frm
+t2.ibd
+SHOW CREATE TABLE t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+/*!50100 PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10) ENGINE = InnoDB,
+ PARTITION p1 VALUES LESS THAN MAXVALUE ENGINE = InnoDB) */
+SELECT * FROM t1;
+a b
+11 Original from partition p1
+12 Original from partition p1
+13 Original from partition p1
+14 Original from partition p1
+21 Original from partition p1
+22 Original from partition p1
+23 Original from partition p1
+24 Original from partition p1
+5 Original from table t2
+6 Original from table t2
+7 Original from table t2
+8 Original from table t2
+DROP TABLE t1;
+SHOW CREATE TABLE t2;
+Table Create Table
+t2 CREATE TABLE `t2` (
+ `a` int(11) DEFAULT NULL,
+ `b` varchar(64) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+SELECT * FROM t2;
+a b
+1 Original from partition p0
+2 Original from partition p0
+3 Original from partition p0
+4 Original from partition p0
+DROP TABLE t2;
+SET SESSION debug_dbug="-d,exchange_partition_fail_9";
diff --git a/mysql-test/suite/parts/r/partition_debug_myisam.result b/mysql-test/suite/parts/r/partition_debug_myisam.result
index 2411d58169e..8408f166e79 100644
--- a/mysql-test/suite/parts/r/partition_debug_myisam.result
+++ b/mysql-test/suite/parts/r/partition_debug_myisam.result
@@ -4151,8 +4151,6 @@ t1#P#p0.MYD
t1#P#p0.MYI
t1#P#p10.MYD
t1#P#p10.MYI
-t1#P#p20.MYD
-t1#P#p20.MYI
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -4163,8 +4161,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
/*!50100 PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = MyISAM,
- PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = MyISAM,
- PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29) ENGINE = MyISAM) */
+ PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = MyISAM) */
SELECT * FROM t1;
a b
1 Original from partition p0
@@ -4296,6 +4293,8 @@ ALTER TABLE t1 REORGANIZE PARTITION p10 INTO
PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29));
ERROR HY000: Lost connection to MySQL server during query
# State after crash (before recovery)
+#sql-t1.frm
+#sql-t1.par
t1#P#p0.MYD
t1#P#p0.MYI
t1#P#p10#TMP#.MYD
@@ -5348,8 +5347,6 @@ t1#P#p0.MYD
t1#P#p0.MYI
t1#P#p10.MYD
t1#P#p10.MYI
-t1#P#p20.MYD
-t1#P#p20.MYI
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5360,8 +5357,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
/*!50100 PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = MyISAM,
- PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = MyISAM,
- PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29) ENGINE = MyISAM) */
+ PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = MyISAM) */
SELECT * FROM t1;
a b
1 Original from partition p0
@@ -5416,8 +5412,6 @@ t1#P#p0.MYD
t1#P#p0.MYI
t1#P#p10.MYD
t1#P#p10.MYI
-t1#P#p20.MYD
-t1#P#p20.MYI
t1.frm
t1.par
SHOW CREATE TABLE t1;
@@ -5428,8 +5422,7 @@ t1 CREATE TABLE `t1` (
) ENGINE=MyISAM DEFAULT CHARSET=latin1
/*!50100 PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0,1,2,3,4,5,6,7,8,9) ENGINE = MyISAM,
- PARTITION p10 VALUES IN (10,11,12,13,14,15,16,17,18,19) ENGINE = MyISAM,
- PARTITION p20 VALUES IN (20,21,22,23,24,25,26,27,28,29) ENGINE = MyISAM) */
+ PARTITION p10 VALUES IN (11,12,13,14,15,16,17,18,19) ENGINE = MyISAM) */
SELECT * FROM t1;
a b
1 Original from partition p0
diff --git a/mysql-test/suite/parts/r/partition_engine_innodb.result b/mysql-test/suite/parts/r/partition_engine_innodb.result
index 5335983b560..8e5f7c43dd9 100644
--- a/mysql-test/suite/parts/r/partition_engine_innodb.result
+++ b/mysql-test/suite/parts/r/partition_engine_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_engine_myisam.result b/mysql-test/suite/parts/r/partition_engine_myisam.result
index cfba8731b2f..e63f9ee2fff 100644
--- a/mysql-test/suite/parts/r/partition_engine_myisam.result
+++ b/mysql-test/suite/parts/r/partition_engine_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc0_archive.result b/mysql-test/suite/parts/r/partition_mgm_lc0_archive.result
index f4d76874b4c..f1889289368 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc0_archive.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc0_archive.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -237,7 +237,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'Archive'
@@ -268,7 +268,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -493,7 +493,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -724,7 +724,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result b/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result
index 19f16780d13..da51cb26a1b 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc0_innodb.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -237,7 +237,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'InnoDB'
@@ -268,7 +268,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -493,7 +493,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -724,7 +724,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result b/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result
index 69a43b64d87..8e8fcfd7672 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc0_memory.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -237,7 +237,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'Memory'
@@ -268,7 +268,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -493,7 +493,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -724,7 +724,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result b/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result
index 9b4e85be9d0..321d10a6a37 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc0_myisam.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -237,7 +237,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'MyISAM'
@@ -268,7 +268,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -493,7 +493,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -724,7 +724,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc1_archive.result b/mysql-test/suite/parts/r/partition_mgm_lc1_archive.result
index 1113e7dcb0c..fe33a5bdcd1 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc1_archive.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc1_archive.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -228,7 +228,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'Archive'
@@ -259,7 +259,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -475,7 +475,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -698,7 +698,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result b/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result
index 952f4136cb6..f2509471e9e 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc1_innodb.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -228,7 +228,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'InnoDB'
@@ -259,7 +259,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -475,7 +475,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -698,7 +698,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result b/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result
index 435a0d8313e..c129f70ed4e 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc1_memory.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -228,7 +228,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'Memory'
@@ -259,7 +259,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -475,7 +475,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -698,7 +698,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result b/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result
index 3a90ce4d73c..398c0174a5d 100644
--- a/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result
+++ b/mysql-test/suite/parts/r/partition_mgm_lc1_myisam.result
@@ -32,7 +32,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -228,7 +228,7 @@ PARTITION BY HASH (a)
PARTITION partA ,
PARTITION Parta ,
PARTITION PartA );
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
# Creating Hash partitioned table
CREATE TABLE TableA (a INT)
ENGINE = 'MyISAM'
@@ -259,7 +259,7 @@ ALTER TABLE TableA ADD PARTITION
(PARTITION partA,
PARTITION Parta,
PARTITION PartA);
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE,
PARTITION Partf,
@@ -475,7 +475,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES LESS THAN (MAXVALUE));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES LESS THAN (16),
PARTITION Partf VALUES LESS THAN (19),
@@ -698,7 +698,7 @@ a
# expecting duplicate partition name
ALTER TABLE TableA ADD PARTITION
(PARTITION partA VALUES IN (0));
-ERROR HY000: Duplicate partition name parta
+ERROR HY000: Duplicate partition name partA
ALTER TABLE TableA ADD PARTITION
(PARTITION partE VALUES IN (16),
PARTITION Partf VALUES IN (19),
diff --git a/mysql-test/suite/parts/r/partition_repair_myisam.result b/mysql-test/suite/parts/r/partition_repair_myisam.result
index a9d7ce8aeb4..1e575d985a9 100644
--- a/mysql-test/suite/parts/r/partition_repair_myisam.result
+++ b/mysql-test/suite/parts/r/partition_repair_myisam.result
@@ -209,7 +209,7 @@ PARTITIONS 7;
SELECT COUNT(*) FROM t1_will_crash;
COUNT(*)
33
-SELECT (b % 7) AS partition, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
+SELECT (b % 7) AS `partition`, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
partition rows
0 2
1 5
@@ -218,7 +218,7 @@ partition rows
4 4
5 4
6 8
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash ORDER BY partition, b, a;
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash ORDER BY `partition`, b, a;
partition b a length(c)
0 0 lost 64
0 7 z lost 64
@@ -282,7 +282,7 @@ test.t1_will_crash repair status OK
SELECT COUNT(*) FROM t1_will_crash;
COUNT(*)
29
-SELECT (b % 7) AS partition, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
+SELECT (b % 7) AS `partition`, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
partition rows
1 4
2 5
@@ -290,7 +290,7 @@ partition rows
4 4
5 4
6 7
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash ORDER BY partition, b, a;
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash ORDER BY `partition`, b, a;
partition b a length(c)
1 1 abc 64
1 8 tuw 64
@@ -366,9 +366,9 @@ Table Op Msg_type Msg_text
test.t1_will_crash repair info Delete link points outside datafile at 340
test.t1_will_crash repair info Delete link points outside datafile at 340
test.t1_will_crash repair status OK
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash
WHERE (b % 7) = 6
-ORDER BY partition, b, a;
+ORDER BY `partition`, b, a;
partition b a length(c)
6 6 jkl 64
6 13 ooo 64
@@ -385,9 +385,9 @@ FLUSH TABLES;
# table, depending if one reads via index or direct on datafile.
# Since crash when reuse of deleted row space, CHECK MEDIUM or EXTENDED
# is required (MEDIUM is default) to verify correct behavior!
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash
WHERE (b % 7) = 6
-ORDER BY partition, b, a;
+ORDER BY `partition`, b, a;
partition b a length(c)
6 6 jkl 64
6 13 ooo 64
@@ -396,9 +396,9 @@ partition b a length(c)
6 97 zzzzzZzzzzz 64
SET @save_optimizer_switch= @@optimizer_switch;
SET @@optimizer_switch='derived_merge=off';
-SELECT (b % 7) AS partition, b, a FROM (SELECT b,a FROM t1_will_crash) q
+SELECT (b % 7) AS `partition`, b, a FROM (SELECT b,a FROM t1_will_crash) q
WHERE (b % 7) = 6
-ORDER BY partition, b, a;
+ORDER BY `partition`, b, a;
partition b a
6 6 jkl
6 13 ooo
@@ -422,7 +422,7 @@ test.t1_will_crash repair status OK
SELECT COUNT(*) FROM t1_will_crash;
COUNT(*)
29
-SELECT (b % 7) AS partition, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
+SELECT (b % 7) AS `partition`, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
partition rows
1 4
2 4
@@ -430,7 +430,7 @@ partition rows
4 4
5 4
6 8
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash ORDER BY partition, b, a;
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash ORDER BY `partition`, b, a;
partition b a length(c)
1 1 abc 64
1 8 tuw 64
diff --git a/mysql-test/suite/parts/r/partition_syntax_innodb.result b/mysql-test/suite/parts/r/partition_syntax_innodb.result
index f704d902429..92af6bc733f 100644
--- a/mysql-test/suite/parts/r/partition_syntax_innodb.result
+++ b/mysql-test/suite/parts/r/partition_syntax_innodb.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'InnoDB';
+SET @@session.default_storage_engine = 'InnoDB';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/r/partition_syntax_myisam.result b/mysql-test/suite/parts/r/partition_syntax_myisam.result
index 556abf7ad39..ae401e7f88a 100644
--- a/mysql-test/suite/parts/r/partition_syntax_myisam.result
+++ b/mysql-test/suite/parts/r/partition_syntax_myisam.result
@@ -1,5 +1,5 @@
SET @max_row = 20;
-SET @@session.storage_engine = 'MyISAM';
+SET @@session.default_storage_engine = 'MyISAM';
#------------------------------------------------------------------------
# 0. Setting of auxiliary variables + Creation of an auxiliary tables
diff --git a/mysql-test/suite/parts/t/partition_basic_innodb.test b/mysql-test/suite/parts/t/partition_basic_innodb.test
index 398f62dab28..032db63693c 100644
--- a/mysql-test/suite/parts/t/partition_basic_innodb.test
+++ b/mysql-test/suite/parts/t/partition_basic_innodb.test
@@ -1,3 +1,7 @@
+#Considering the time taken for this test on slow solaris platforms making it a big test
+--source include/big_test.inc
+# Skiping this test from Valgrind execution as per Bug-14627884
+--source include/not_valgrind.inc
################################################################################
# t/partition_basic_innodb.test #
# #
@@ -22,8 +26,6 @@
# any of the variables.
#
---source include/big_test.inc
-
#------------------------------------------------------------------------------#
# General not engine specific settings and requirements
@@ -42,12 +44,12 @@ SET @max_row = 20;
let $more_trigger_tests= 0;
let $more_pk_ui_tests= 0;
-# This test relies on connecting externally from mysqltest, doesn't
-# work with embedded.
---source include/not_embedded.inc
# The server must support partitioning.
--source include/have_partition.inc
+# Does not work with --embedded
+--source include/not_embedded.inc
+
#------------------------------------------------------------------------------#
# Engine specific settings and requirements
diff --git a/mysql-test/suite/parts/t/partition_basic_symlink_innodb.test b/mysql-test/suite/parts/t/partition_basic_symlink_innodb.test
index 17c6a845b25..35dc2d5e004 100644
--- a/mysql-test/suite/parts/t/partition_basic_symlink_innodb.test
+++ b/mysql-test/suite/parts/t/partition_basic_symlink_innodb.test
@@ -14,16 +14,20 @@
# Change Author: mattiasj #
# Change Date: 2008-03-16 #
# Change: Replaced all test with alter -> myisam, since innodb does not support#
-# DATA/INDEX DIRECTORY #
+# Change Author: Kevin lewis #
+# Change Date: 2012-03-02 #
+# Change: WL5980 activates DATA DIRECTORY for InnoDB #
################################################################################
-# NOTE: Until InnoDB supports DATA/INDEX DIR, test that a partitioned table
-# remembers the DATA/INDEX DIR and it is used if altered to MyISAM
+# NOTE: As of WL5980, InnoDB supports DATA DIRECTORY, but not INDEX DIRECTORY.
+# See innodb.innodb-tablespace for tests using partition engine, innodb
+# and DATADIRECTORY. The purpose of this test is to show that a
+# partitioned table remembers the DATA/INDEX DIR and it is used if
+# altered to MyISAM
#
---echo # Will not run partition_basic_symlink on InnoDB, since it is the same
---echo # as partition_basic, since InnoDB does not support DATA/INDEX DIR
---echo # Will only verify that the DATA/INDEX DIR is stored and used if
---echo # ALTER to MyISAM.
+--echo #
+--echo # Verify that the DATA/INDEX DIR is stored and used if ALTER to MyISAM.
+--echo #
--source include/have_innodb.inc
# The server must support partitioning.
--source include/have_partition.inc
@@ -32,17 +36,57 @@
# windows does not support symlink for DATA/INDEX DIRECTORY.
--source include/not_windows.inc
+# Does not work with --embedded
+--source include/not_embedded.inc
+
+--disable_query_log
+let $MYSQLD_DATADIR= `select @@datadir`;
+
+# These values can change during the test
+LET $innodb_file_per_table_orig=`select @@innodb_file_per_table`;
+LET $innodb_strict_mode_orig=`select @@session.innodb_strict_mode`;
+--enable_query_log
+
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
-let $MYSQLD_DATADIR= `select @@datadir`;
-
--mkdir $MYSQLTEST_VARDIR/mysql-test-data-dir
--mkdir $MYSQLTEST_VARDIR/mysql-test-idx-dir
+
+SET SESSION innodb_strict_mode = ON;
+
+--echo #
+--echo # InnoDB only supports DATA DIRECTORY with innodb_file_per_table=ON
+--echo #
+SET GLOBAL innodb_file_per_table = OFF;
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+--error ER_CANT_CREATE_TABLE
+eval CREATE TABLE t1 (c1 INT) ENGINE = InnoDB
+PARTITION BY HASH (c1) (
+ PARTITION p0
+ DATA DIRECTORY = '$MYSQLTEST_VARDIR/mysql-test-data-dir'
+ INDEX DIRECTORY = '$MYSQLTEST_VARDIR/mysql-test-idx-dir',
+ PARTITION p1
+ DATA DIRECTORY = '$MYSQLTEST_VARDIR/mysql-test-data-dir'
+ INDEX DIRECTORY = '$MYSQLTEST_VARDIR/mysql-test-idx-dir'
+);
+--replace_result ./ MYSQLD_DATADIR/ $MYSQLD_DATADIR MYSQLD_DATADIR
+SHOW WARNINGS;
+
+--echo #
+--echo # InnoDB is different from MyISAM in that it uses a text file
+--echo # with an '.isl' extension instead of a symbolic link so that
+--echo # the tablespace can be re-located on any OS. Also, instead of
+--echo # putting the file directly into the DATA DIRECTORY,
+--echo # it adds a folder under it with the name of the database.
+--echo # Since strict mode is off, InnoDB ignores the INDEX DIRECTORY
+--echo # and it is no longer part of the definition.
+--echo #
+SET SESSION innodb_strict_mode = OFF;
+SET GLOBAL innodb_file_per_table = ON;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
-eval CREATE TABLE t1 (c1 INT)
-ENGINE = InnoDB
+eval CREATE TABLE t1 (c1 INT) ENGINE = InnoDB
PARTITION BY HASH (c1)
(PARTITION p0
DATA DIRECTORY = '$MYSQLTEST_VARDIR/mysql-test-data-dir'
@@ -51,116 +95,70 @@ PARTITION BY HASH (c1)
DATA DIRECTORY = '$MYSQLTEST_VARDIR/mysql-test-data-dir'
INDEX DIRECTORY = '$MYSQLTEST_VARDIR/mysql-test-idx-dir'
);
---echo # Verifying .frm and .par files
---file_exists $MYSQLD_DATADIR/test/t1.frm
---file_exists $MYSQLD_DATADIR/test/t1.par
---echo # Verifying that there are no MyISAM files
---error 1
---file_exists $MYSQLD_DATADIR/test/t1#P#p0.MYD
---error 1
---file_exists $MYSQLD_DATADIR/test/t1#P#p0.MYI
---error 1
---file_exists $MYSQLD_DATADIR/test/t1#P#p1.MYD
---error 1
---file_exists $MYSQLD_DATADIR/test/t1#P#p1.MYI
---error 1
---file_exists $MYSQLTEST_VARDIR/mysql-test-data-dir/t1#P#p0.MYD
---error 1
---file_exists $MYSQLTEST_VARDIR/mysql-test-idx-dir/t1#P#p0.MYI
---error 1
---file_exists $MYSQLTEST_VARDIR/mysql-test-data-dir/t1#P#p1.MYD
---error 1
---file_exists $MYSQLTEST_VARDIR/mysql-test-idx-dir/t1#P#p1.MYI
+SHOW WARNINGS;
+
+--echo # Verifying .frm, .par, .isl & .ibd files
+--echo ---- MYSQLD_DATADIR/test
+--list_files $MYSQLD_DATADIR/test
+--echo ---- MYSQLTEST_VARDIR/mysql-test-data-dir/test
+--list_files $MYSQLTEST_VARDIR/mysql-test-data-dir/test
+--echo # The ibd tablespaces should not be directly under the DATA DIRECTORY
+--echo ---- MYSQLTEST_VARDIR/mysql-test-data-dir
+--list_files $MYSQLTEST_VARDIR/mysql-test-data-dir
+--echo ---- MYSQLTEST_VARDIR/mysql-test-idx-dir
+--list_files $MYSQLTEST_VARDIR/mysql-test-idx-dir
+
FLUSH TABLES;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
SHOW CREATE TABLE t1;
-ALTER TABLE t1 ENGINE = MyISAM;
+
+--echo #
+--echo # Verify that the DATA/INDEX DIRECTORY is stored and used if we
+--echo # ALTER TABLE to MyISAM.
+--echo #
+ALTER TABLE t1 engine=MyISAM;
+--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
+SHOW CREATE TABLE t1;
--echo # Verifying .frm, .par and MyISAM files (.MYD, MYI)
---file_exists $MYSQLD_DATADIR/test/t1.frm
---file_exists $MYSQLD_DATADIR/test/t1.par
---file_exists $MYSQLD_DATADIR/test/t1#P#p0.MYD
---file_exists $MYSQLD_DATADIR/test/t1#P#p0.MYI
---file_exists $MYSQLD_DATADIR/test/t1#P#p1.MYD
---file_exists $MYSQLD_DATADIR/test/t1#P#p1.MYI
---file_exists $MYSQLTEST_VARDIR/mysql-test-data-dir/t1#P#p0.MYD
---file_exists $MYSQLTEST_VARDIR/mysql-test-idx-dir/t1#P#p0.MYI
---file_exists $MYSQLTEST_VARDIR/mysql-test-data-dir/t1#P#p1.MYD
---file_exists $MYSQLTEST_VARDIR/mysql-test-idx-dir/t1#P#p1.MYI
-FLUSH TABLES;
+--echo ---- MYSQLD_DATADIR/test
+--list_files $MYSQLD_DATADIR/test
+--echo ---- MYSQLTEST_VARDIR/mysql-test-data-dir
+--list_files $MYSQLTEST_VARDIR/mysql-test-data-dir
+--echo ---- MYSQLTEST_VARDIR/mysql-test-idx-dir
+--list_files $MYSQLTEST_VARDIR/mysql-test-idx-dir
+
+--echo #
+--echo # Now verify that the DATA DIRECTORY is used again if we
+--echo # ALTER TABLE back to InnoDB.
+--echo #
+SET SESSION innodb_strict_mode = ON;
+ALTER TABLE t1 engine=InnoDB;
--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR
SHOW CREATE TABLE t1;
-DROP TABLE t1;
---rmdir $MYSQLTEST_VARDIR/mysql-test-data-dir
---rmdir $MYSQLTEST_VARDIR/mysql-test-idx-dir
---exit
-# here is the old test, which is tested by partition_basic_innodb
-
-#
-# NOTE: PLEASE DO NOT ADD NOT INNODB SPECIFIC TESTCASES HERE !
-# TESTCASES WHICH MUST BE APPLIED TO ALL STORAGE ENGINES MUST BE ADDED IN
-# THE SOURCED FILES ONLY.
-#
-# Please read the README at the end of inc/partition.pre before changing
-# any of the variables.
-#
-
-#------------------------------------------------------------------------------#
-# General not engine specific settings and requirements
-
-##### Options, for debugging support #####
-let $debug= 0;
-let $with_partitioning= 1;
-
-##### Option, for displaying files #####
-let $ls= 1;
-
-##### Number of rows for the INSERT/UPDATE/DELETE/SELECT experiments #####
-# on partioned tables
-SET @max_row = 20;
-
-##### Execute more tests #####
-let $more_trigger_tests= 0;
-let $more_pk_ui_tests= 0;
-
-# The server must support partitioning.
---source include/have_partition.inc
-# The server must support symlink for DATA/INDEX DIRECTORY.
---source include/have_symlink.inc
-# windows does not support symlink for DATA/INDEX DIRECTORY.
---source include/not_windows.inc
+--echo # Verifying .frm, .par, .isl and InnoDB .ibd files
+--echo ---- MYSQLD_DATADIR/test
+--list_files $MYSQLD_DATADIR/test
+--echo ---- MYSQLTEST_VARDIR/mysql-test-data-dir
+--list_files $MYSQLTEST_VARDIR/mysql-test-data-dir
+--echo ---- MYSQLTEST_VARDIR/mysql-test-idx-dir
+--list_files $MYSQLTEST_VARDIR/mysql-test-idx-dir
+--echo ---- MYSQLTEST_VARDIR/mysql-test-data-dir/test
+--list_files $MYSQLTEST_VARDIR/mysql-test-data-dir/test
-#------------------------------------------------------------------------------#
-# Engine specific settings and requirements
-
-##### Storage engine to be tested
---source include/have_innodb.inc
-let $engine= 'InnoDB';
-
-##### Execute the test of "table" files
-# InnoDB has no files per PK, UI, ...
-let $do_file_tests= 0;
-
-##### Execute PRIMARY KEY tests #####
-# AFAIK InnoDB clusters the table around PRIMARY KEYs.
-let $do_pk_tests= 1;
+DROP TABLE t1;
-##### Assign a big number smaller than the maximum value for partitions #####
-# and smaller than the maximum value of SIGNED INTEGER
-let $MAX_VALUE= (2147483646);
+--echo #
+--echo # Cleanup
+--echo #
-# Generate the prerequisites ($variables, @variables, tables) needed
---source suite/parts/inc/partition.pre
+--rmdir $MYSQLTEST_VARDIR/mysql-test-data-dir/test
+--rmdir $MYSQLTEST_VARDIR/mysql-test-data-dir
+--rmdir $MYSQLTEST_VARDIR/mysql-test-idx-dir
-##### Workarounds for known open engine specific bugs
-# none
+--disable_query_log
+EVAL SET GLOBAL innodb_file_per_table=$innodb_file_per_table_orig;
+EVAL SET SESSION innodb_strict_mode=$innodb_strict_mode_orig;
+--enable_query_log
-#------------------------------------------------------------------------------#
-# Execute the tests to be applied to all storage engines
---source suite/parts/inc/partition_basic_symlink.inc
-#------------------------------------------------------------------------------#
-# Execute storage engine specific tests
-#------------------------------------------------------------------------------#
-# Cleanup
---source suite/parts/inc/partition_cleanup.inc
diff --git a/mysql-test/suite/parts/t/partition_debug_innodb.test b/mysql-test/suite/parts/t/partition_debug_innodb.test
index 33cbd8e3b7b..d4cac35c0b6 100644
--- a/mysql-test/suite/parts/t/partition_debug_innodb.test
+++ b/mysql-test/suite/parts/t/partition_debug_innodb.test
@@ -1,10 +1,12 @@
-# Partitioning test that require debug features
+# Partitioning test that require debug features and InnoDB
# including crashing tests.
--source include/have_debug.inc
--source include/have_innodb.inc
--source include/have_partition.inc
+# Don't test this under valgrind, memory leaks will occur
--source include/not_valgrind.inc
+# Crash tests don't work with embedded
--source include/not_embedded.inc
--disable_warnings
@@ -13,6 +15,9 @@ DROP TABLE IF EXISTS t1;
--let $DATADIR= `SELECT @@datadir;`
+# Waiting for wl#6723
+if (0)
+{
--echo #
--echo # Bug#12696518/Bug#11766879/60106:DIFF BETWEEN # OF INDEXES IN MYSQL
--echo # VS INNODB, PARTITONING, ON INDEX CREATE
@@ -47,6 +52,7 @@ SELECT * FROM t1;
SET SESSION debug_dbug = "-d,ha_partition_fail_final_add_index";
SHOW CREATE TABLE t1;
DROP TABLE t1;
+}
# Checking with #innodb what this is...
call mtr.add_suppression("InnoDB: Warning: allocated tablespace .*, old maximum was");
@@ -60,3 +66,26 @@ let $engine= 'InnoDB';
--echo # Test crash and failure recovery in fast_alter_partition_table.
--source suite/parts/inc/partition_mgm_crash.inc
+
+--echo #
+--echo # WL#4445: EXCHANGE PARTITION WITH TABLE
+--echo # Verify ddl_log and InnoDB in case of crashing.
+# Investigating if this warning is OK when crash testing.
+call mtr.add_suppression("InnoDB: Warning: allocated tablespace .*, old maximum was ");
+#
+call mtr.add_suppression("Attempting backtrace. You can use the following information to find out");
+call mtr.add_suppression("table .* does not exist in the InnoDB internal");
+
+let $create_statement= CREATE TABLE t1 (a INT, b VARCHAR(64))
+ENGINE = InnoDB
+PARTITION BY RANGE (a)
+(PARTITION p0 VALUES LESS THAN (10),
+ PARTITION p1 VALUES LESS THAN MAXVALUE);
+let $insert_statement= INSERT INTO t1 VALUES (1, "Original from partition p0"), (2, "Original from partition p0"), (3, "Original from partition p0"), (4, "Original from partition p0"), (11, "Original from partition p1"), (12, "Original from partition p1"), (13, "Original from partition p1"), (14, "Original from partition p1"), (21, "Original from partition p1"), (22, "Original from partition p1"), (23, "Original from partition p1"), (24, "Original from partition p1");
+
+let $create_statement2= CREATE TABLE t2 (a INT, b VARCHAR(64)) ENGINE = InnoDB;
+let $insert_statement2= INSERT INTO t2 VALUES (5, "Original from table t2"), (6, "Original from table t2"), (7, "Original from table t2"), (8, "Original from table t2");
+let $crash_statement= ALTER TABLE t1 EXCHANGE PARTITION p0 WITH TABLE t2;
+--source suite/parts/inc/partition_crash_exchange.inc
+let $fail_statement= $crash_statement;
+--source suite/parts/inc/partition_fail_exchange.inc
diff --git a/mysql-test/suite/parts/t/partition_repair_myisam.test b/mysql-test/suite/parts/t/partition_repair_myisam.test
index 96d68fd7fc9..91c8ed57580 100644
--- a/mysql-test/suite/parts/t/partition_repair_myisam.test
+++ b/mysql-test/suite/parts/t/partition_repair_myisam.test
@@ -151,8 +151,8 @@ eval INSERT INTO t1_will_crash VALUES
('6 row 7 (crash before completely written to datafile)', 27, '$lt$lt');
--enable_query_log
SELECT COUNT(*) FROM t1_will_crash;
-SELECT (b % 7) AS partition, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash ORDER BY partition, b, a;
+SELECT (b % 7) AS `partition`, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash ORDER BY `partition`, b, a;
FLUSH TABLES;
# testing p0, p1, p3, p6(1)
--echo # truncating p0 to simulate an empty datafile (not recovered!)
@@ -177,8 +177,8 @@ OPTIMIZE TABLE t1_will_crash;
CHECK TABLE t1_will_crash;
REPAIR TABLE t1_will_crash;
SELECT COUNT(*) FROM t1_will_crash;
-SELECT (b % 7) AS partition, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash ORDER BY partition, b, a;
+SELECT (b % 7) AS `partition`, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash ORDER BY `partition`, b, a;
FLUSH TABLES;
# testing p2, p4, p6(2, 3)
@@ -215,9 +215,9 @@ FLUSH TABLES;
#ALTER TABLE t1_will_crash OPTIMIZE PARTITION p6;
ALTER TABLE t1_will_crash CHECK PARTITION p6;
ALTER TABLE t1_will_crash REPAIR PARTITION p6;
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash
WHERE (b % 7) = 6
-ORDER BY partition, b, a;
+ORDER BY `partition`, b, a;
FLUSH TABLES;
--echo #
@@ -229,21 +229,21 @@ FLUSH TABLES;
--echo # table, depending if one reads via index or direct on datafile.
--echo # Since crash when reuse of deleted row space, CHECK MEDIUM or EXTENDED
--echo # is required (MEDIUM is default) to verify correct behavior!
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash
WHERE (b % 7) = 6
-ORDER BY partition, b, a;
+ORDER BY `partition`, b, a;
SET @save_optimizer_switch= @@optimizer_switch;
SET @@optimizer_switch='derived_merge=off';
-SELECT (b % 7) AS partition, b, a FROM (SELECT b,a FROM t1_will_crash) q
+SELECT (b % 7) AS `partition`, b, a FROM (SELECT b,a FROM t1_will_crash) q
WHERE (b % 7) = 6
-ORDER BY partition, b, a;
+ORDER BY `partition`, b, a;
SET @@optimizer_switch=@save_optimizer_switch;
# NOTE: REBUILD PARTITION without CHECK before, 2 + (1) records will be lost!
#ALTER TABLE t1_will_crash REBUILD PARTITION p6;
ALTER TABLE t1_will_crash CHECK PARTITION p6;
ALTER TABLE t1_will_crash REPAIR PARTITION p6;
SELECT COUNT(*) FROM t1_will_crash;
-SELECT (b % 7) AS partition, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
-SELECT (b % 7) AS partition, b, a, length(c) FROM t1_will_crash ORDER BY partition, b, a;
+SELECT (b % 7) AS `partition`, COUNT(*) AS rows FROM t1_will_crash GROUP BY (b % 7);
+SELECT (b % 7) AS `partition`, b, a, length(c) FROM t1_will_crash ORDER BY `partition`, b, a;
ALTER TABLE t1_will_crash CHECK PARTITION all EXTENDED;
DROP TABLE t1_will_crash;
diff --git a/mysql-test/suite/perfschema/disabled.def b/mysql-test/suite/perfschema/disabled.def
index a72014a652b..888298bbb09 100644
--- a/mysql-test/suite/perfschema/disabled.def
+++ b/mysql-test/suite/perfschema/disabled.def
@@ -9,41 +9,3 @@
# Do not use any TAB characters for whitespace.
#
##############################################################################
-ddl_host_cache:
-dml_host_cache:
-hostcache_ipv4_addrinfo_again_allow:
-hostcache_ipv4_addrinfo_again_deny:
-hostcache_ipv4_addrinfo_bad_allow:
-hostcache_ipv4_addrinfo_bad_deny:
-hostcache_ipv4_addrinfo_good_allow:
-hostcache_ipv4_addrinfo_good_deny:
-hostcache_ipv4_addrinfo_noname_allow:
-hostcache_ipv4_addrinfo_noname_deny:
-hostcache_ipv4_auth_plugin:
-hostcache_ipv4_blocked:
-hostcache_ipv4_format:
-hostcache_ipv4_max_con:
-hostcache_ipv4_nameinfo_again_allow:
-hostcache_ipv4_nameinfo_again_deny:
-hostcache_ipv4_nameinfo_noname_allow:
-hostcache_ipv4_nameinfo_noname_deny:
-hostcache_ipv4_passwd:
-hostcache_ipv4_ssl:
-hostcache_ipv6_addrinfo_again_allow:
-hostcache_ipv6_addrinfo_again_deny:
-hostcache_ipv6_addrinfo_bad_allow:
-hostcache_ipv6_addrinfo_bad_deny:
-hostcache_ipv6_addrinfo_good_allow:
-hostcache_ipv6_addrinfo_good_deny:
-hostcache_ipv6_addrinfo_noname_allow:
-hostcache_ipv6_addrinfo_noname_deny:
-hostcache_ipv6_auth_plugin:
-hostcache_ipv6_blocked:
-hostcache_ipv6_max_con:
-hostcache_ipv6_nameinfo_again_allow:
-hostcache_ipv6_nameinfo_again_deny:
-hostcache_ipv6_nameinfo_noname_allow:
-hostcache_ipv6_nameinfo_noname_deny:
-hostcache_ipv6_passwd:
-hostcache_ipv6_ssl:
-hostcache_peer_addr:
diff --git a/mysql-test/suite/perfschema/include/connection_load.inc b/mysql-test/suite/perfschema/include/connection_load.inc
index 16fc136f842..02c8e2adbe1 100644
--- a/mysql-test/suite/perfschema/include/connection_load.inc
+++ b/mysql-test/suite/perfschema/include/connection_load.inc
@@ -186,10 +186,45 @@ call dump_all();
--connection default
--disconnect con4a
+
+# Wait for the disconnect to complete
+let $wait_condition=
+ select count(*) = 5 from performance_schema.threads
+ where `TYPE`='FOREGROUND' and PROCESSLIST_USER like 'user%';
+--source include/wait_condition.inc
+
--disconnect con4b
+
+# Wait for the disconnect to complete
+let $wait_condition=
+ select count(*) = 4 from performance_schema.threads
+ where `TYPE`='FOREGROUND' and PROCESSLIST_USER like 'user%';
+--source include/wait_condition.inc
+
--disconnect con4c
+
+# Wait for the disconnect to complete
+let $wait_condition=
+ select count(*) = 3 from performance_schema.threads
+ where `TYPE`='FOREGROUND' and PROCESSLIST_USER like 'user%';
+--source include/wait_condition.inc
+
--disconnect con5a
+
+# Wait for the disconnect to complete
+let $wait_condition=
+ select count(*) = 2 from performance_schema.threads
+ where `TYPE`='FOREGROUND' and PROCESSLIST_USER like 'user%';
+--source include/wait_condition.inc
+
--disconnect con5b
+
+# Wait for the disconnect to complete
+let $wait_condition=
+ select count(*) = 1 from performance_schema.threads
+ where `TYPE`='FOREGROUND' and PROCESSLIST_USER like 'user%';
+--source include/wait_condition.inc
+
--disconnect con5c
# Wait for the disconnect to complete
diff --git a/mysql-test/suite/perfschema/include/digest_cleanup.inc b/mysql-test/suite/perfschema/include/digest_cleanup.inc
index 99dc41e660f..47dd7618b43 100644
--- a/mysql-test/suite/perfschema/include/digest_cleanup.inc
+++ b/mysql-test/suite/perfschema/include/digest_cleanup.inc
@@ -7,5 +7,8 @@ DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
DROP TABLE IF EXISTS t4;
DROP TABLE IF EXISTS t5;
+DROP TABLE IF EXISTS t6;
+DROP TABLE IF EXISTS t11;
+DROP TABLE IF EXISTS t12;
DROP DATABASE IF EXISTS statements_digest;
--enable_warnings
diff --git a/mysql-test/suite/perfschema/include/digest_execution.inc b/mysql-test/suite/perfschema/include/digest_execution.inc
index 34c49590217..5483cca61d7 100644
--- a/mysql-test/suite/perfschema/include/digest_execution.inc
+++ b/mysql-test/suite/perfschema/include/digest_execution.inc
@@ -34,6 +34,19 @@ INSERT INTO t3 VALUES (1, 2), (3, 4), (5, 6);
INSERT INTO t5 VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9);
# -----------------------------------------------------------------------
+# Test case to handle NULL. If alone, not normalized otherwise normalized.
+# -----------------------------------------------------------------------
+INSERT INTO t1 VALUES (NULL);
+INSERT INTO t3 VALUES (NULL,NULL);
+INSERT INTO t3 VALUES (1,NULL);
+INSERT INTO t3 VALUES (NULL,1);
+INSERT INTO t6 VALUES (NULL, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (1, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (NULL, 2, NULL, NULL);
+INSERT INTO t6 VALUES (1, 2, 3, NULL);
+INSERT INTO t6 VALUES (1, 2, NULL, 4);
+
+# -----------------------------------------------------------------------
# Test case for handling spaces in statement.
# -----------------------------------------------------------------------
@@ -59,7 +72,6 @@ SELECT 1 /* This is an inline comment */ + 1;
*/
1;
-
# -----------------------------------------------------------------------
# Tests to show how the digest behaves with tokens that can have multiple
# names (such as DATABASE = "DATABASE" or "SCHEMA", SUBSTRING, STD_SYM,
@@ -79,8 +91,40 @@ DROP DATABASE statements_digest_temp;
# captured.
# -----------------------------------------------------------------------
--ERROR ER_NO_SUCH_TABLE
-SELECT 1 from t11;
-create table t11 (c char(4));
+SELECT 1 FROM no_such_table;
+CREATE TABLE dup_table (c char(4));
--ERROR ER_TABLE_EXISTS_ERROR
-create table t11 (c char(4));
-insert into t11 values("MySQL");
+CREATE TABLE dup_table (c char(4));
+DROP TABLE dup_table;
+INSERT INTO t11 VALUES("MySQL");
+
+# -----------------------------------------------------------------------
+# Tests to show sub-statements for following statements are not
+# instrumented.
+# - Prepared Statements
+# - Stored Procedures/Functions.
+# - Table Triggers
+# -----------------------------------------------------------------------
+PREPARE stmt FROM "SELECT * FROM t12";
+EXECUTE stmt;
+EXECUTE stmt;
+DEALLOCATE PREPARE stmt;
+
+DELIMITER //;
+CREATE PROCEDURE p1() BEGIN SELECT * FROM t12; END//
+DELIMITER ;//
+CALL p1();
+CALL p1();
+DROP PROCEDURE p1;
+
+DELIMITER //;
+CREATE FUNCTION `func`(a INT, b INT) RETURNS int(11) RETURN a+b //
+DELIMITER ;//
+select func(3,4);
+select func(13,42);
+DROP FUNCTION func;
+
+CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @a:=1;
+INSERT INTO t12 VALUES ("abc");
+INSERT INTO t12 VALUES ("def");
+DROP TRIGGER trg;
diff --git a/mysql-test/suite/perfschema/include/digest_setup.inc b/mysql-test/suite/perfschema/include/digest_setup.inc
index 7145bcfa870..ed463f58d04 100644
--- a/mysql-test/suite/perfschema/include/digest_setup.inc
+++ b/mysql-test/suite/perfschema/include/digest_setup.inc
@@ -17,5 +17,8 @@ CREATE TABLE t2(a int);
CREATE TABLE t3(a int, b int);
CREATE TABLE t4(a int, b int);
CREATE TABLE t5(a int, b int, c int);
+CREATE TABLE t6(a int, b int, c int, d int);
+CREATE TABLE t11 (c CHAR(4));
+CREATE TABLE t12 (c CHAR(4));
--enable_warnings
diff --git a/mysql-test/suite/perfschema/include/event_aggregate_load.inc b/mysql-test/suite/perfschema/include/event_aggregate_load.inc
index 75069f4b603..4a1b3f02c03 100644
--- a/mysql-test/suite/perfschema/include/event_aggregate_load.inc
+++ b/mysql-test/suite/perfschema/include/event_aggregate_load.inc
@@ -80,6 +80,12 @@ echo "================== con1 marker ==================";
--connection default
+# Wait for the payload to complete
+let $wait_condition=
+ select count(*) = 1 from performance_schema.events_waits_current
+ where EVENT_NAME= 'idle';
+--source include/wait_condition.inc
+
echo "================== Step 3 ==================";
call dump_thread();
execute dump_waits_account;
@@ -148,6 +154,12 @@ echo "================== con2 marker ==================";
--connection default
+# Wait for the payload to complete
+let $wait_condition=
+ select count(*) = 2 from performance_schema.events_waits_current
+ where EVENT_NAME= 'idle';
+--source include/wait_condition.inc
+
echo "================== Step 5 ==================";
call dump_thread();
execute dump_waits_account;
@@ -212,6 +224,12 @@ echo "================== con3 marker ==================";
--connection default
+# Wait for the payload to complete
+let $wait_condition=
+ select count(*) = 3 from performance_schema.events_waits_current
+ where EVENT_NAME= 'idle';
+--source include/wait_condition.inc
+
echo "================== Step 7 ==================";
call dump_thread();
execute dump_waits_account;
@@ -276,6 +294,12 @@ echo "================== con4 marker ==================";
--connection default
+# Wait for the payload to complete
+let $wait_condition=
+ select count(*) = 4 from performance_schema.events_waits_current
+ where EVENT_NAME= 'idle';
+--source include/wait_condition.inc
+
echo "================== Step 9 ==================";
call dump_thread();
execute dump_waits_account;
diff --git a/mysql-test/suite/perfschema/include/event_aggregate_setup.inc b/mysql-test/suite/perfschema/include/event_aggregate_setup.inc
index 47488403c1f..a0103649c09 100644
--- a/mysql-test/suite/perfschema/include/event_aggregate_setup.inc
+++ b/mysql-test/suite/perfschema/include/event_aggregate_setup.inc
@@ -111,7 +111,8 @@ update performance_schema.setup_instruments set enabled='YES', timed='YES'
where name in ('wait/synch/mutex/sql/LOCK_connection_count',
'wait/synch/mutex/sql/LOCK_user_locks',
'wait/synch/rwlock/sql/LOCK_grant',
- 'wait/io/file/sql/query_log');
+ 'wait/io/file/sql/query_log',
+ 'idle');
update performance_schema.setup_instruments set enabled='YES', timed='YES'
where name in ('stage/sql/init',
diff --git a/mysql-test/suite/perfschema/include/hostcache_set_state.inc b/mysql-test/suite/perfschema/include/hostcache_set_state.inc
new file mode 100644
index 00000000000..25dd56a4e0d
--- /dev/null
+++ b/mysql-test/suite/perfschema/include/hostcache_set_state.inc
@@ -0,0 +1,23 @@
+# Helper for hostcache_*.test
+
+# Set a known initial state for the test
+
+flush status;
+flush hosts;
+flush user_resources;
+flush privileges;
+
+# Print critical setup
+
+select @@global.debug;
+select @@global.max_connect_errors;
+select @@global.max_user_connections;
+select @@global.max_connections;
+
+# Make sure there are no remaining records that can change the test outcome
+
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+
diff --git a/mysql-test/suite/perfschema/include/schema.inc b/mysql-test/suite/perfschema/include/schema.inc
index 6c3c391c171..f5f23893d37 100644
--- a/mysql-test/suite/perfschema/include/schema.inc
+++ b/mysql-test/suite/perfschema/include/schema.inc
@@ -21,6 +21,7 @@ show create table events_stages_summary_global_by_event_name;
show create table events_statements_current;
show create table events_statements_history;
show create table events_statements_history_long;
+show create table events_statements_summary_by_digest;
show create table events_statements_summary_by_host_by_event_name;
show create table events_statements_summary_by_thread_by_event_name;
show create table events_statements_summary_by_user_by_event_name;
@@ -38,7 +39,6 @@ show create table events_waits_summary_global_by_event_name;
show create table file_instances;
show create table file_summary_by_event_name;
show create table file_summary_by_instance;
---error 1146
show create table host_cache;
show create table hosts;
show create table mutex_instances;
diff --git a/mysql-test/suite/perfschema/include/sizing_auto.inc b/mysql-test/suite/perfschema/include/sizing_auto.inc
new file mode 100644
index 00000000000..1a9afeaf219
--- /dev/null
+++ b/mysql-test/suite/perfschema/include/sizing_auto.inc
@@ -0,0 +1,16 @@
+
+show variables like "table_definition_cache";
+show variables like "table_open_cache";
+show variables like "max_connections";
+# Results vary by platform:
+# show variables like "open_files_limit";
+show variables like "%performance_schema%";
+show status like "%performance_schema%";
+
+# Each test script should provide a different test.cnf file,
+# with different settings.
+# This output will show the sizes computed automatically.
+# Note that this output is very dependent on the platform.
+
+show engine performance_schema status;
+
diff --git a/mysql-test/suite/perfschema/include/start_server_common.inc b/mysql-test/suite/perfschema/include/start_server_common.inc
index 6a7c7698999..d15c3d48ded 100644
--- a/mysql-test/suite/perfschema/include/start_server_common.inc
+++ b/mysql-test/suite/perfschema/include/start_server_common.inc
@@ -27,6 +27,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -43,21 +44,22 @@ select * from performance_schema.events_waits_summary_global_by_event_name;
select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
---error 1146
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
diff --git a/mysql-test/suite/perfschema/include/table_aggregate_load.inc b/mysql-test/suite/perfschema/include/table_aggregate_load.inc
index b72220c58bd..242768ead9e 100644
--- a/mysql-test/suite/perfschema/include/table_aggregate_load.inc
+++ b/mysql-test/suite/perfschema/include/table_aggregate_load.inc
@@ -48,17 +48,10 @@ execute dump_objects_summary;
#
connect (con1, localhost, user1, , );
-
-echo "================== con1 connected ==================";
+select concat(current_user(), " is connected") as status;
--connection default
-# Wait for the connect to complete
-let $wait_condition=
- select count(*) = 1 from performance_schema.threads
- where `TYPE`='FOREGROUND' and PROCESSLIST_USER= 'user1';
---source include/wait_condition.inc
-
echo "================== Step 2 ==================";
call dump_thread();
execute dump_waits_account;
@@ -116,17 +109,10 @@ execute dump_objects_summary;
# select PROCESSLIST_USER, PROCESSLIST_HOST, INSTRUMENTED from performance_schema.threads;
connect (con2, localhost, user2, , );
-
-echo "================== con2 connected ==================";
+select concat(current_user(), " is connected") as status;
--connection default
-# Wait for the connect to complete
-let $wait_condition=
- select count(*) = 1 from performance_schema.threads
- where `TYPE`='FOREGROUND' and PROCESSLIST_USER= 'user2';
---source include/wait_condition.inc
-
echo "================== Step 4 ==================";
call dump_thread();
execute dump_waits_account;
@@ -180,17 +166,10 @@ execute dump_waits_table_lock;
execute dump_objects_summary;
connect (con3, localhost, user3, , );
-
-echo "================== con3 connected ==================";
+select concat(current_user(), " is connected") as status;
--connection default
-# Wait for the connect to complete
-let $wait_condition=
- select count(*) = 1 from performance_schema.threads
- where `TYPE`='FOREGROUND' and PROCESSLIST_USER= 'user3';
---source include/wait_condition.inc
-
echo "================== Step 6 ==================";
call dump_thread();
execute dump_waits_account;
@@ -244,17 +223,10 @@ execute dump_waits_table_lock;
execute dump_objects_summary;
connect (con4, localhost, user4, , );
-
-echo "================== con4 connected ==================";
+select concat(current_user(), " is connected") as status;
--connection default
-# Wait for the connect to complete
-let $wait_condition=
- select count(*) = 1 from performance_schema.threads
- where `TYPE`='FOREGROUND' and PROCESSLIST_USER= 'user4';
---source include/wait_condition.inc
-
echo "================== Step 8 ==================";
call dump_thread();
execute dump_waits_account;
diff --git a/mysql-test/suite/perfschema/r/binlog_stmt.result b/mysql-test/suite/perfschema/r/binlog_stmt.result
index 319e688f3ef..b2993e454da 100644
--- a/mysql-test/suite/perfschema/r/binlog_stmt.result
+++ b/mysql-test/suite/perfschema/r/binlog_stmt.result
@@ -7,8 +7,6 @@ count(*) > 0
update performance_schema.setup_instruments set enabled='NO'
where name like "wait/synch/rwlock/sql/%"
and name not in ("wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock");
-Warnings:
-Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves.
select count(*) > 0 from performance_schema.events_waits_current;
count(*) > 0
1
@@ -20,28 +18,19 @@ insert into test.t1
select thread_id from performance_schema.events_waits_current;
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves.
-Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Mixing self-logging and non-self-logging engines in a statement is unsafe.
insert into test.t2
select name from performance_schema.setup_instruments
where name like "wait/synch/rwlock/sql/%"
and name not in ("wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock");
Warnings:
Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves.
-Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. Mixing self-logging and non-self-logging engines in a statement is unsafe.
drop table test.t1;
drop table test.t2;
update performance_schema.setup_instruments set enabled='YES'
where name like "wait/synch/rwlock/sql/%"
and name not in ("wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock");
-Warnings:
-Note 1592 Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves.
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Query # # use `test`; update performance_schema.setup_instruments set enabled='NO'
- where name like "wait/synch/rwlock/sql/%"
- and name not in ("wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock")
-master-bin.000001 # Query # # COMMIT
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */
master-bin.000001 # Gtid # # GTID #-#-#
@@ -64,8 +53,3 @@ master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t1` /* generated by server */
master-bin.000001 # Gtid # # GTID #-#-#
master-bin.000001 # Query # # use `test`; DROP TABLE `t2` /* generated by server */
-master-bin.000001 # Gtid # # BEGIN GTID #-#-#
-master-bin.000001 # Query # # use `test`; update performance_schema.setup_instruments set enabled='YES'
- where name like "wait/synch/rwlock/sql/%"
- and name not in ("wait/synch/rwlock/sql/CRYPTO_dynlock_value::lock")
-master-bin.000001 # Query # # COMMIT
diff --git a/mysql-test/suite/perfschema/r/csv_table_io.result b/mysql-test/suite/perfschema/r/csv_table_io.result
index d5b39a2d90c..f0b5a6bb935 100644
--- a/mysql-test/suite/perfschema/r/csv_table_io.result
+++ b/mysql-test/suite/perfschema/r/csv_table_io.result
@@ -126,6 +126,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/ddl_session_account_connect_attrs.result b/mysql-test/suite/perfschema/r/ddl_session_account_connect_attrs.result
new file mode 100644
index 00000000000..9d8c7212f43
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/ddl_session_account_connect_attrs.result
@@ -0,0 +1,9 @@
+ALTER TABLE performance_schema.session_account_connect_attrs
+ADD COLUMN foo INTEGER;
+ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
+TRUNCATE TABLE performance_schema.session_account_connect_attrs;
+ERROR HY000: Invalid performance_schema usage.
+ALTER TABLE performance_schema.session_account_connect_attrs ADD INDEX test_index(ATTR_NAME);
+ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
+CREATE UNIQUE INDEX test_index ON performance_schema.session_account_connect_attrs(ATTR_NAME);
+ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
diff --git a/mysql-test/suite/perfschema/r/ddl_session_connect_attrs.result b/mysql-test/suite/perfschema/r/ddl_session_connect_attrs.result
new file mode 100644
index 00000000000..3177d493963
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/ddl_session_connect_attrs.result
@@ -0,0 +1,9 @@
+ALTER TABLE performance_schema.session_connect_attrs
+ADD COLUMN foo INTEGER;
+ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
+TRUNCATE TABLE performance_schema.session_connect_attrs;
+ERROR HY000: Invalid performance_schema usage.
+ALTER TABLE performance_schema.session_connect_attrs ADD INDEX test_index(ATTR_NAME);
+ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
+CREATE UNIQUE INDEX test_index ON performance_schema.session_connect_attrs(ATTR_NAME);
+ERROR 42000: Access denied for user 'root'@'localhost' to database 'performance_schema'
diff --git a/mysql-test/suite/perfschema/r/digest_table_full.result b/mysql-test/suite/perfschema/r/digest_table_full.result
index 695c600842c..e95f9f5f73b 100644
--- a/mysql-test/suite/perfschema/r/digest_table_full.result
+++ b/mysql-test/suite/perfschema/r/digest_table_full.result
@@ -8,6 +8,9 @@ CREATE TABLE t2(a int);
CREATE TABLE t3(a int, b int);
CREATE TABLE t4(a int, b int);
CREATE TABLE t5(a int, b int, c int);
+CREATE TABLE t6(a int, b int, c int, d int);
+CREATE TABLE t11 (c CHAR(4));
+CREATE TABLE t12 (c CHAR(4));
TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
####################################
# EXECUTION
@@ -35,6 +38,15 @@ INSERT INTO t1 VALUES (1), (2), (3);
INSERT INTO t1 VALUES (1), (2), (3), (4);
INSERT INTO t3 VALUES (1, 2), (3, 4), (5, 6);
INSERT INTO t5 VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9);
+INSERT INTO t1 VALUES (NULL);
+INSERT INTO t3 VALUES (NULL,NULL);
+INSERT INTO t3 VALUES (1,NULL);
+INSERT INTO t3 VALUES (NULL,1);
+INSERT INTO t6 VALUES (NULL, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (1, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (NULL, 2, NULL, NULL);
+INSERT INTO t6 VALUES (1, 2, 3, NULL);
+INSERT INTO t6 VALUES (1, 2, NULL, 4);
SELECT 1 + 1;
1 + 1
2
@@ -61,22 +73,47 @@ CREATE SCHEMA statements_digest_temp;
DROP SCHEMA statements_digest_temp;
CREATE DATABASE statements_digest_temp;
DROP DATABASE statements_digest_temp;
-SELECT 1 from t11;
-ERROR 42S02: Table 'statements_digest.t11' doesn't exist
-create table t11 (c char(4));
-create table t11 (c char(4));
-ERROR 42S01: Table 't11' already exists
-insert into t11 values("MySQL");
+SELECT 1 FROM no_such_table;
+ERROR 42S02: Table 'statements_digest.no_such_table' doesn't exist
+CREATE TABLE dup_table (c char(4));
+CREATE TABLE dup_table (c char(4));
+ERROR 42S01: Table 'dup_table' already exists
+DROP TABLE dup_table;
+INSERT INTO t11 VALUES("MySQL");
Warnings:
Warning 1265 Data truncated for column 'c' at row 1
+PREPARE stmt FROM "SELECT * FROM t12";
+EXECUTE stmt;
+c
+EXECUTE stmt;
+c
+DEALLOCATE PREPARE stmt;
+CREATE PROCEDURE p1() BEGIN SELECT * FROM t12; END//
+CALL p1();
+c
+CALL p1();
+c
+DROP PROCEDURE p1;
+CREATE FUNCTION `func`(a INT, b INT) RETURNS int(11) RETURN a+b //
+select func(3,4);
+func(3,4)
+7
+select func(13,42);
+func(13,42)
+55
+DROP FUNCTION func;
+CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @a:=1;
+INSERT INTO t12 VALUES ("abc");
+INSERT INTO t12 VALUES ("def");
+DROP TRIGGER trg;
####################################
# QUERYING PS STATEMENT DIGEST
####################################
-SELECT DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
+SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
SUM_ERRORS FROM performance_schema.events_statements_summary_by_digest;
-DIGEST DIGEST_TEXT COUNT_STAR SUM_ROWS_AFFECTED SUM_WARNINGS SUM_ERRORS
-NULL NULL 29 21 1 2
-025af09b416617ee444962d35913c0ab TRUNCATE TABLE performance_schema . events_statements_summary_by_digest 1 0 0 0
+SCHEMA_NAME DIGEST DIGEST_TEXT COUNT_STAR SUM_ROWS_AFFECTED SUM_WARNINGS SUM_ERRORS
+NULL NULL NULL 55 32 1 2
+statements_digest 172976feb9113e0dc6aa2bbac59a41d7 TRUNCATE TABLE performance_schema . events_statements_summary_by_digest 1 0 0 0
SHOW VARIABLES LIKE "performance_schema_digests_size";
Variable_name Value
performance_schema_digests_size 2
@@ -91,4 +128,7 @@ DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
DROP TABLE IF EXISTS t4;
DROP TABLE IF EXISTS t5;
+DROP TABLE IF EXISTS t6;
+DROP TABLE IF EXISTS t11;
+DROP TABLE IF EXISTS t12;
DROP DATABASE IF EXISTS statements_digest;
diff --git a/mysql-test/suite/perfschema/r/dml_esms_by_digest.result b/mysql-test/suite/perfschema/r/dml_esms_by_digest.result
index 032c19112e1..f831b4c2e86 100644
--- a/mysql-test/suite/perfschema/r/dml_esms_by_digest.result
+++ b/mysql-test/suite/perfschema/r/dml_esms_by_digest.result
@@ -1,9 +1,9 @@
select * from performance_schema.events_statements_summary_by_digest
where digest like 'XXYYZZ%' limit 1;
-DIGEST DIGEST_TEXT COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT SUM_LOCK_TIME SUM_ERRORS SUM_WARNINGS SUM_ROWS_AFFECTED SUM_ROWS_SENT SUM_ROWS_EXAMINED SUM_CREATED_TMP_DISK_TABLES SUM_CREATED_TMP_TABLES SUM_SELECT_FULL_JOIN SUM_SELECT_FULL_RANGE_JOIN SUM_SELECT_RANGE SUM_SELECT_RANGE_CHECK SUM_SELECT_SCAN SUM_SORT_MERGE_PASSES SUM_SORT_RANGE SUM_SORT_ROWS SUM_SORT_SCAN SUM_NO_INDEX_USED SUM_NO_GOOD_INDEX_USED FIRST_SEEN LAST_SEEN
+SCHEMA_NAME DIGEST DIGEST_TEXT COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT SUM_LOCK_TIME SUM_ERRORS SUM_WARNINGS SUM_ROWS_AFFECTED SUM_ROWS_SENT SUM_ROWS_EXAMINED SUM_CREATED_TMP_DISK_TABLES SUM_CREATED_TMP_TABLES SUM_SELECT_FULL_JOIN SUM_SELECT_FULL_RANGE_JOIN SUM_SELECT_RANGE SUM_SELECT_RANGE_CHECK SUM_SELECT_SCAN SUM_SORT_MERGE_PASSES SUM_SORT_RANGE SUM_SORT_ROWS SUM_SORT_SCAN SUM_NO_INDEX_USED SUM_NO_GOOD_INDEX_USED FIRST_SEEN LAST_SEEN
select * from performance_schema.events_statements_summary_by_digest
where digest='XXYYZZ';
-DIGEST DIGEST_TEXT COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT SUM_LOCK_TIME SUM_ERRORS SUM_WARNINGS SUM_ROWS_AFFECTED SUM_ROWS_SENT SUM_ROWS_EXAMINED SUM_CREATED_TMP_DISK_TABLES SUM_CREATED_TMP_TABLES SUM_SELECT_FULL_JOIN SUM_SELECT_FULL_RANGE_JOIN SUM_SELECT_RANGE SUM_SELECT_RANGE_CHECK SUM_SELECT_SCAN SUM_SORT_MERGE_PASSES SUM_SORT_RANGE SUM_SORT_ROWS SUM_SORT_SCAN SUM_NO_INDEX_USED SUM_NO_GOOD_INDEX_USED FIRST_SEEN LAST_SEEN
+SCHEMA_NAME DIGEST DIGEST_TEXT COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT SUM_LOCK_TIME SUM_ERRORS SUM_WARNINGS SUM_ROWS_AFFECTED SUM_ROWS_SENT SUM_ROWS_EXAMINED SUM_CREATED_TMP_DISK_TABLES SUM_CREATED_TMP_TABLES SUM_SELECT_FULL_JOIN SUM_SELECT_FULL_RANGE_JOIN SUM_SELECT_RANGE SUM_SELECT_RANGE_CHECK SUM_SELECT_SCAN SUM_SORT_MERGE_PASSES SUM_SORT_RANGE SUM_SORT_ROWS SUM_SORT_SCAN SUM_NO_INDEX_USED SUM_NO_GOOD_INDEX_USED FIRST_SEEN LAST_SEEN
insert into performance_schema.events_statements_summary_by_digest
set digest='XXYYZZ', count_star=1, sum_timer_wait=2, min_timer_wait=3,
avg_timer_wait=4, max_timer_wait=5;
diff --git a/mysql-test/suite/perfschema/r/dml_handler.result b/mysql-test/suite/perfschema/r/dml_handler.result
index 41ffa18799f..2a1cc7035af 100644
--- a/mysql-test/suite/perfschema/r/dml_handler.result
+++ b/mysql-test/suite/perfschema/r/dml_handler.result
@@ -9,60 +9,69 @@ SELECT COUNT(*) FROM table_list INTO @table_count;
# For each table in the performance schema, attempt HANDLER...OPEN,
# which should fail with an error 1031, ER_ILLEGAL_HA.
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=49;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=52;
HANDLER performance_schema.users OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`users` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=48;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=51;
HANDLER performance_schema.threads OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`threads` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=47;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=50;
HANDLER performance_schema.table_lock_waits_summary_by_table OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`table_lock_waits_summary_by_table` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=46;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=49;
HANDLER performance_schema.table_io_waits_summary_by_table OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`table_io_waits_summary_by_table` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=45;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=48;
HANDLER performance_schema.table_io_waits_summary_by_index_usage OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`table_io_waits_summary_by_index_usage` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=44;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=47;
HANDLER performance_schema.socket_summary_by_instance OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`socket_summary_by_instance` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=43;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=46;
HANDLER performance_schema.socket_summary_by_event_name OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`socket_summary_by_event_name` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=42;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=45;
HANDLER performance_schema.socket_instances OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`socket_instances` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=41;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=44;
HANDLER performance_schema.setup_timers OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`setup_timers` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=40;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=43;
HANDLER performance_schema.setup_objects OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`setup_objects` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=39;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=42;
HANDLER performance_schema.setup_instruments OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`setup_instruments` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=38;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=41;
HANDLER performance_schema.setup_consumers OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`setup_consumers` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=37;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=40;
HANDLER performance_schema.setup_actors OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`setup_actors` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=36;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=39;
+HANDLER performance_schema.session_connect_attrs OPEN;
+ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`session_connect_attrs` doesn't have this option
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=38;
+HANDLER performance_schema.session_account_connect_attrs OPEN;
+ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`session_account_connect_attrs` doesn't have this option
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=37;
HANDLER performance_schema.rwlock_instances OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`rwlock_instances` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=35;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=36;
HANDLER performance_schema.performance_timers OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`performance_timers` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=34;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=35;
HANDLER performance_schema.objects_summary_global_by_type OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`objects_summary_global_by_type` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=33;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=34;
HANDLER performance_schema.mutex_instances OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`mutex_instances` doesn't have this option
-SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=32;
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=33;
HANDLER performance_schema.hosts OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`hosts` doesn't have this option
+SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=32;
+HANDLER performance_schema.host_cache OPEN;
+ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`host_cache` doesn't have this option
SELECT TABLE_NAME INTO @table_name FROM table_list WHERE id=31;
HANDLER performance_schema.file_summary_by_instance OPEN;
ERROR HY000: Storage engine PERFORMANCE_SCHEMA of the table `performance_schema`.`file_summary_by_instance` doesn't have this option
diff --git a/mysql-test/suite/perfschema/r/dml_session_account_connect_attrs.result b/mysql-test/suite/perfschema/r/dml_session_account_connect_attrs.result
new file mode 100644
index 00000000000..61b1cddd513
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/dml_session_account_connect_attrs.result
@@ -0,0 +1,25 @@
+SELECT * FROM performance_schema.session_account_connect_attrs
+LIMIT 1;
+SELECT * FROM performance_schema.session_account_connect_attrs
+where ATTR_NAME='FOO' OR ATTR_VALUE='BAR';
+INSERT INTO performance_schema.session_account_connect_attrs
+SET ATTR_NAME='FOO', ATTR_VALUE='BAR',
+ORDINAL_POSITION=100, PROCESS_ID=102;
+ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'session_account_connect_attrs'
+UPDATE performance_schema.session_account_connect_attrs
+SET ATTR_NAME='FOO';
+ERROR 42000: UPDATE command denied to user 'root'@'localhost' for table 'session_account_connect_attrs'
+UPDATE performance_schema.session_account_connect_attrs
+SET ATTR_NAME='FOO' WHERE ATTR_VALUE='BAR';
+ERROR 42000: UPDATE command denied to user 'root'@'localhost' for table 'session_account_connect_attrs'
+DELETE FROM performance_schema.session_account_connect_attrs
+WHERE ATTR_VALUE='BAR';
+ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'session_account_connect_attrs'
+DELETE FROM performance_schema.session_account_connect_attrs;
+ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'session_account_connect_attrs'
+LOCK TABLES performance_schema.session_account_connect_attrs READ;
+ERROR 42000: SELECT, LOCK TABLES command denied to user 'root'@'localhost' for table 'session_account_connect_attrs'
+UNLOCK TABLES;
+LOCK TABLES performance_schema.session_account_connect_attrs WRITE;
+ERROR 42000: SELECT, LOCK TABLES command denied to user 'root'@'localhost' for table 'session_account_connect_attrs'
+UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_session_connect_attrs.result b/mysql-test/suite/perfschema/r/dml_session_connect_attrs.result
new file mode 100644
index 00000000000..d1dea1472d4
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/dml_session_connect_attrs.result
@@ -0,0 +1,25 @@
+SELECT * FROM performance_schema.session_connect_attrs
+LIMIT 1;
+SELECT * FROM performance_schema.session_connect_attrs
+where ATTR_NAME='FOO' OR ATTR_VALUE='BAR';
+INSERT INTO performance_schema.session_connect_attrs
+SET ATTR_NAME='FOO', ATTR_VALUE='BAR',
+ORDINAL_POSITION=100, PROCESS_ID=102;
+ERROR 42000: INSERT command denied to user 'root'@'localhost' for table 'session_connect_attrs'
+UPDATE performance_schema.session_connect_attrs
+SET ATTR_NAME='FOO';
+ERROR 42000: UPDATE command denied to user 'root'@'localhost' for table 'session_connect_attrs'
+UPDATE performance_schema.session_connect_attrs
+SET ATTR_NAME='FOO' WHERE ATTR_VALUE='BAR';
+ERROR 42000: UPDATE command denied to user 'root'@'localhost' for table 'session_connect_attrs'
+DELETE FROM performance_schema.session_connect_attrs
+WHERE ATTR_VALUE='BAR';
+ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'session_connect_attrs'
+DELETE FROM performance_schema.session_connect_attrs;
+ERROR 42000: DELETE command denied to user 'root'@'localhost' for table 'session_connect_attrs'
+LOCK TABLES performance_schema.session_connect_attrs READ;
+ERROR 42000: SELECT, LOCK TABLES command denied to user 'root'@'localhost' for table 'session_connect_attrs'
+UNLOCK TABLES;
+LOCK TABLES performance_schema.session_connect_attrs WRITE;
+ERROR 42000: SELECT, LOCK TABLES command denied to user 'root'@'localhost' for table 'session_connect_attrs'
+UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/r/dml_setup_instruments.result b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
index 0632e6c94bc..ffe7699bef8 100644
--- a/mysql-test/suite/perfschema/r/dml_setup_instruments.result
+++ b/mysql-test/suite/perfschema/r/dml_setup_instruments.result
@@ -72,3 +72,9 @@ LOCK TABLES performance_schema.setup_instruments READ;
UNLOCK TABLES;
LOCK TABLES performance_schema.setup_instruments WRITE;
UNLOCK TABLES;
+
+# Bug#13813193 ASSERTION `TABLE->READ_SET ==
+# &TABLE->DEF_READ_SET' FAILED / MYSQL_UPDATE
+
+UPDATE performance_schema.setup_instruments SET timed='NO'
+ORDER BY RAND();
diff --git a/mysql-test/suite/perfschema/r/func_file_io.result b/mysql-test/suite/perfschema/r/func_file_io.result
index d925463a8c2..a71c8e601aa 100644
--- a/mysql-test/suite/perfschema/r/func_file_io.result
+++ b/mysql-test/suite/perfschema/r/func_file_io.result
@@ -144,6 +144,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/func_mutex.result b/mysql-test/suite/perfschema/r/func_mutex.result
index 657f45d70e9..9f86ba49d39 100644
--- a/mysql-test/suite/perfschema/r/func_mutex.result
+++ b/mysql-test/suite/perfschema/r/func_mutex.result
@@ -119,6 +119,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_allow.result
index d2e99912200..ffe3dc2e0eb 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("santa.claus.ipv4.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'192.0.2.4';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_deny.result
index 39057797b76..08f280f30a9 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_again_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("santa.claus.ipv4.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_allow.result
index 7bddddb44a2..7c658e17c55 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("santa.claus.ipv4.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'192.0.2.4';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_deny.result
index 4bb67d51eb7..924b66cf726 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_bad_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("santa.claus.ipv4.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_allow.result
index dd69ff95ccd..5657887e439 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'santa.claus.ipv4.example.com';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN null
LAST_ERROR_SEEN null
revoke select on test.* from 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_deny.result
index 1443c9708a6..1846dbe0719 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_good_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
-ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST santa.claus.ipv4.example.com
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MySQL server
+ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST santa.claus.ipv4.example.com
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_allow.result
index a67937e32b2..115af4792c3 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("santa.claus.ipv4.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'santa.claus.ipv4.example.com';
grant select on test.* to 'root'@'192.0.2.4';
select "Con1 is alive";
@@ -12,7 +31,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -85,4 +104,4 @@ revoke select on test.* from 'root'@'santa.claus.ipv4.example.com';
revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_deny.result
index 0ec0b833d0e..618bcea553d 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_addrinfo_noname_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("santa.claus.ipv4.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_auth_plugin.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_auth_plugin.result
index 4e1cbacf228..553cd609b4d 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_auth_plugin.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_auth_plugin.result
@@ -1,8 +1,27 @@
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
CREATE USER 'plug'@'santa.claus.ipv4.example.com'
IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
CREATE USER 'plug_dest'@'santa.claus.ipv4.example.com'
@@ -14,7 +33,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
uninstall plugin test_plugin_server;
ERROR HY000: Plugin 'test_plugin_server' is not loaded
"Dumping performance_schema.host_cache"
@@ -177,4 +196,4 @@ REVOKE PROXY ON 'plug_dest'@'santa.claus.ipv4.example.com'
FROM 'plug'@'santa.claus.ipv4.example.com';
DROP USER 'plug'@'santa.claus.ipv4.example.com';
DROP USER 'plug_dest'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_blocked.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_blocked.result
index 8e378e52bf5..88c7c2fbd3d 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_blocked.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_blocked.result
@@ -1,12 +1,27 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
select @@global.max_connect_errors;
@@global.max_connect_errors
-10
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select @@global.max_connect_errors into @saved_max_connect_errors;
set global max_connect_errors = 2;
grant select on test.* to 'root'@'santa.claus.ipv4.example.com';
@@ -16,7 +31,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4,native_password_bad_reply";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4,native_password_bad_reply";
ERROR 28000: Access denied for user 'quota'@'santa.claus.ipv4.example.com' (using password: NO)
"Dumping performance_schema.host_cache"
IP 192.0.2.4
@@ -195,4 +210,4 @@ LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'santa.claus.ipv4.example.com';
set global max_connect_errors = @saved_max_connect_errors;
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_format.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_format.result
index 5389dd43d11..baf9eae9f5c 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_format.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_format.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_format_ipv4";
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_format_ipv4";
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_max_con.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_max_con.result
index 069fac2ddb8..31e4bc9f843 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_max_con.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_max_con.result
@@ -1,16 +1,29 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
select @@global.max_user_connections;
@@global.max_user_connections
-0
+1024
select @@global.max_connections;
@@global.max_connections
151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select @@global.max_connections into @saved_max_connections;
+select @@global.max_user_connections into @saved_max_user_connections;
create user 'quota'@'santa.claus.ipv4.example.com';
grant select on test.* to 'quota'@'santa.claus.ipv4.example.com';
grant usage on *.* to 'quota'@'santa.claus.ipv4.example.com'
@@ -21,7 +34,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
select "Con2a is alive";
Con2a is alive
Con2a is alive
@@ -641,5 +654,6 @@ COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
set global max_connections = @saved_max_connections;
+set global max_user_connections = @saved_max_user_connections;
drop user 'quota'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result
index c2163a2d5b1..6531f7e69f0 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'192.0.2.4';
grant select on test.* to 'root'@'santa.claus.ipv4.example.com';
select "Con1 is alive";
@@ -12,7 +31,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -81,7 +100,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
select "Con4 is alive";
Con4 is alive
Con4 is alive
@@ -154,4 +173,4 @@ revoke select on test.* from 'root'@'192.0.2.4';
revoke select on test.* from 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'192.0.2.4';
drop user 'root'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_deny.result
index 048081756bb..2976f8c9e16 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_again_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -69,8 +88,8 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
-ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST santa.claus.ipv4.example.com
@@ -99,7 +118,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MySQL server
+ERROR HY000: Host 'santa.claus.ipv4.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST santa.claus.ipv4.example.com
@@ -128,4 +147,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_allow.result
index 85a825f3907..315f90ce3e5 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'192.0.2.4';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_deny.result
index 6bb381888f9..f27bda9ada1 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_nameinfo_noname_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MySQL server
+ERROR HY000: Host '192.0.2.4' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 192.0.2.4
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_passwd.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_passwd.result
index a7d34ede333..69f68de8d90 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_passwd.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_passwd.result
@@ -1,9 +1,27 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
create user 'user_without'@'santa.claus.ipv4.example.com';
create user 'user_with'@'santa.claus.ipv4.example.com'
identified by 'good_password';
@@ -15,7 +33,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
ERROR 28000: Access denied for user 'user_without'@'santa.claus.ipv4.example.com' (using password: YES)
"Dumping performance_schema.host_cache"
IP 192.0.2.4
@@ -192,4 +210,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
drop user 'user_with'@'santa.claus.ipv4.example.com';
drop user 'user_without'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv4_ssl.result b/mysql-test/suite/perfschema/r/hostcache_ipv4_ssl.result
index e6e5b99392f..a3d2b1d273f 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv4_ssl.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv4_ssl.result
@@ -1,9 +1,27 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
create user 'user_ssl'@'santa.claus.ipv4.example.com';
create user 'user_ssl_x509'@'santa.claus.ipv4.example.com'
identified by 'good_password';
@@ -19,7 +37,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
ERROR 28000: Access denied for user 'user_ssl'@'santa.claus.ipv4.example.com' (using password: NO)
"Dumping performance_schema.host_cache"
IP 192.0.2.4
@@ -138,4 +156,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
drop user 'user_ssl'@'santa.claus.ipv4.example.com';
drop user 'user_ssl_x509'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_allow.result
index 599fcfe9130..59a12495352 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("santa.claus.ipv6.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'2001:db8::6:6';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_deny.result
index d6b579fcb57..d241bede905 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_again_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("santa.claus.ipv6.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_allow.result
index 2a41f269159..904139875c6 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("santa.claus.ipv6.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'2001:db8::6:6';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_deny.result
index 786936acc20..b14fb2f955e 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_bad_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("santa.claus.ipv6.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_allow.result
index b0269234e11..f5b2f43ab91 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("192.0.2.4");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'santa.claus.ipv6.example.com';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN null
LAST_ERROR_SEEN null
revoke select on test.* from 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_deny.result
index 9bef9efb93c..b88bb94343f 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_good_deny.result
@@ -1,16 +1,35 @@
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
-ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST santa.claus.ipv6.example.com
@@ -39,7 +58,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MySQL server
+ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST santa.claus.ipv6.example.com
@@ -68,4 +87,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_allow.result
index 432a0b4eaaf..9cc8a675fcc 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("santa.claus.ipv6.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'santa.claus.ipv6.example.com';
grant select on test.* to 'root'@'2001:db8::6:6';
select "Con1 is alive";
@@ -12,7 +31,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -85,4 +104,4 @@ revoke select on test.* from 'root'@'santa.claus.ipv6.example.com';
revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_deny.result
index ea5c6e357ef..73741cf42ed 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_addrinfo_noname_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("santa.claus.ipv6.example.com");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_auth_plugin.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_auth_plugin.result
index 61fd1573067..b0764faf525 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_auth_plugin.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_auth_plugin.result
@@ -1,8 +1,27 @@
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
CREATE USER 'plug'@'santa.claus.ipv6.example.com'
IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
CREATE USER 'plug_dest'@'santa.claus.ipv6.example.com'
@@ -14,7 +33,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
uninstall plugin test_plugin_server;
ERROR HY000: Plugin 'test_plugin_server' is not loaded
"Dumping performance_schema.host_cache"
@@ -177,4 +196,4 @@ REVOKE PROXY ON 'plug_dest'@'santa.claus.ipv6.example.com'
FROM 'plug'@'santa.claus.ipv6.example.com';
DROP USER 'plug'@'santa.claus.ipv6.example.com';
DROP USER 'plug_dest'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_blocked.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_blocked.result
index 07121b6031f..0097c680c70 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_blocked.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_blocked.result
@@ -1,12 +1,27 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
select @@global.max_connect_errors;
@@global.max_connect_errors
-10
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select @@global.max_connect_errors into @saved_max_connect_errors;
set global max_connect_errors = 2;
grant select on test.* to 'root'@'santa.claus.ipv6.example.com';
@@ -16,7 +31,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6,native_password_bad_reply";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6,native_password_bad_reply";
ERROR 28000: Access denied for user 'quota'@'santa.claus.ipv6.example.com' (using password: NO)
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
@@ -195,4 +210,4 @@ LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'santa.claus.ipv6.example.com';
set global max_connect_errors = @saved_max_connect_errors;
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_max_con.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_max_con.result
index 3bf09f19843..4416b78d009 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_max_con.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_max_con.result
@@ -1,16 +1,29 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
select @@global.max_user_connections;
@@global.max_user_connections
-0
+1024
select @@global.max_connections;
@@global.max_connections
151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select @@global.max_connections into @saved_max_connections;
+select @@global.max_user_connections into @saved_max_user_connections;
create user 'quota'@'santa.claus.ipv6.example.com';
grant select on test.* to 'quota'@'santa.claus.ipv6.example.com';
grant usage on *.* to 'quota'@'santa.claus.ipv6.example.com'
@@ -21,7 +34,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
select "Con2a is alive";
Con2a is alive
Con2a is alive
@@ -641,5 +654,6 @@ COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
set global max_connections = @saved_max_connections;
+set global max_user_connections = @saved_max_user_connections;
drop user 'quota'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result
index 755f5b2037d..a2107324c65 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("2001:db8::6:6");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'2001:db8::6:6';
grant select on test.* to 'root'@'santa.claus.ipv6.example.com';
select "Con1 is alive";
@@ -12,7 +31,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -81,7 +100,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
select "Con4 is alive";
Con4 is alive
Con4 is alive
@@ -154,4 +173,4 @@ revoke select on test.* from 'root'@'2001:db8::6:6';
revoke select on test.* from 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'2001:db8::6:6';
drop user 'root'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_deny.result
index da240b24ca6..373ed10677a 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_again_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("2001:db8::6:6");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -69,8 +88,8 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
-ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST santa.claus.ipv6.example.com
@@ -99,7 +118,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MySQL server
+ERROR HY000: Host 'santa.claus.ipv6.example.com' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST santa.claus.ipv6.example.com
@@ -128,4 +147,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_allow.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_allow.result
index 8a4dee22aa2..809644cbe08 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_allow.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_allow.result
@@ -1,9 +1,28 @@
call mtr.add_suppression("2001:db8::6:6");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
grant select on test.* to 'root'@'2001:db8::6:6';
select "Con1 is alive";
Con1 is alive
@@ -11,7 +30,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
select "Con2 is alive";
Con2 is alive
Con2 is alive
@@ -82,4 +101,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_deny.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_deny.result
index 63b8a33b8bf..32a06e8105f 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_deny.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_nameinfo_noname_deny.result
@@ -1,17 +1,36 @@
call mtr.add_suppression("2001:db8::6:6");
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
select "Con1 is alive";
Con1 is alive
Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -40,7 +59,7 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MySQL server
+ERROR HY000: Host '2001:db8::6:6' is not allowed to connect to this MariaDB server
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
HOST NULL
@@ -69,4 +88,4 @@ COUNT_LOCAL_ERRORS 0
COUNT_UNKNOWN_ERRORS 0
FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_passwd.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_passwd.result
index 5d67dc8d943..5c3b363c5cd 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_passwd.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_passwd.result
@@ -1,9 +1,27 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
create user 'user_without'@'santa.claus.ipv6.example.com';
create user 'user_with'@'santa.claus.ipv6.example.com'
identified by 'good_password';
@@ -15,7 +33,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
ERROR 28000: Access denied for user 'user_without'@'santa.claus.ipv6.example.com' (using password: YES)
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
@@ -192,4 +210,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
drop user 'user_with'@'santa.claus.ipv6.example.com';
drop user 'user_without'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_ipv6_ssl.result b/mysql-test/suite/perfschema/r/hostcache_ipv6_ssl.result
index 29c151d24c1..422db77b5ae 100644
--- a/mysql-test/suite/perfschema/r/hostcache_ipv6_ssl.result
+++ b/mysql-test/suite/perfschema/r/hostcache_ipv6_ssl.result
@@ -1,9 +1,27 @@
flush status;
flush hosts;
flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
create user 'user_ssl'@'santa.claus.ipv6.example.com';
create user 'user_ssl_x509'@'santa.claus.ipv6.example.com'
identified by 'good_password';
@@ -19,7 +37,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
ERROR 28000: Access denied for user 'user_ssl'@'santa.claus.ipv6.example.com' (using password: NO)
"Dumping performance_schema.host_cache"
IP 2001:db8::6:6
@@ -138,4 +156,4 @@ FIRST_ERROR_SEEN set
LAST_ERROR_SEEN set
drop user 'user_ssl'@'santa.claus.ipv6.example.com';
drop user 'user_ssl_x509'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/r/hostcache_peer_addr.result b/mysql-test/suite/perfschema/r/hostcache_peer_addr.result
index 0eff14b50f0..70618be6145 100644
--- a/mysql-test/suite/perfschema/r/hostcache_peer_addr.result
+++ b/mysql-test/suite/perfschema/r/hostcache_peer_addr.result
@@ -1,8 +1,27 @@
flush status;
flush hosts;
+flush user_resources;
+flush privileges;
select @@global.debug;
@@global.debug
+select @@global.max_connect_errors;
+@@global.max_connect_errors
+100
+select @@global.max_user_connections;
+@@global.max_user_connections
+0
+select @@global.max_connections;
+@@global.max_connections
+151
+select `User`, `Host` from mysql.`user` where `host` like '%\\%%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '192.%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like '2001:%';
+User Host
+select `User`, `Host` from mysql.`user` where `user` like 'santa.claus.%';
+User Host
show global status like "connection_errors_%";
Variable_name Value
Connection_errors_accept 0
@@ -17,7 +36,7 @@ Con1 is alive
select current_user();
current_user()
root@localhost
-set global debug= "+d,vio_peer_addr_error";
+set global debug_dbug= "+d,vio_peer_addr_error";
ERROR HY000: Can't get hostname for your address
show global status like "connection_errors_%";
Variable_name Value
@@ -38,7 +57,7 @@ Connection_errors_peer_address 2
Connection_errors_select 0
Connection_errors_tcpwrap 0
"Dumping performance_schema.host_cache"
-set global debug= default;
+set global debug_dbug= default;
flush status;
show global status like "connection_errors_%";
Variable_name Value
diff --git a/mysql-test/suite/perfschema/r/indexed_table_io.result b/mysql-test/suite/perfschema/r/indexed_table_io.result
index 6b420323b74..57518673491 100644
--- a/mysql-test/suite/perfschema/r/indexed_table_io.result
+++ b/mysql-test/suite/perfschema/r/indexed_table_io.result
@@ -139,6 +139,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/information_schema.result b/mysql-test/suite/perfschema/r/information_schema.result
index 8497f7ea40d..5e5ce57fb2e 100644
--- a/mysql-test/suite/perfschema/r/information_schema.result
+++ b/mysql-test/suite/perfschema/r/information_schema.result
@@ -33,11 +33,14 @@ performance_schema events_waits_summary_global_by_event_name def
performance_schema file_instances def
performance_schema file_summary_by_event_name def
performance_schema file_summary_by_instance def
+performance_schema host_cache def
performance_schema hosts def
performance_schema mutex_instances def
performance_schema objects_summary_global_by_type def
performance_schema performance_timers def
performance_schema rwlock_instances def
+performance_schema session_account_connect_attrs def
+performance_schema session_connect_attrs def
performance_schema setup_actors def
performance_schema setup_consumers def
performance_schema setup_instruments def
@@ -86,11 +89,14 @@ events_waits_summary_global_by_event_name BASE TABLE PERFORMANCE_SCHEMA
file_instances BASE TABLE PERFORMANCE_SCHEMA
file_summary_by_event_name BASE TABLE PERFORMANCE_SCHEMA
file_summary_by_instance BASE TABLE PERFORMANCE_SCHEMA
+host_cache BASE TABLE PERFORMANCE_SCHEMA
hosts BASE TABLE PERFORMANCE_SCHEMA
mutex_instances BASE TABLE PERFORMANCE_SCHEMA
objects_summary_global_by_type BASE TABLE PERFORMANCE_SCHEMA
performance_timers BASE TABLE PERFORMANCE_SCHEMA
rwlock_instances BASE TABLE PERFORMANCE_SCHEMA
+session_account_connect_attrs BASE TABLE PERFORMANCE_SCHEMA
+session_connect_attrs BASE TABLE PERFORMANCE_SCHEMA
setup_actors BASE TABLE PERFORMANCE_SCHEMA
setup_consumers BASE TABLE PERFORMANCE_SCHEMA
setup_instruments BASE TABLE PERFORMANCE_SCHEMA
@@ -139,11 +145,14 @@ events_waits_summary_global_by_event_name 10 Dynamic
file_instances 10 Dynamic
file_summary_by_event_name 10 Dynamic
file_summary_by_instance 10 Dynamic
+host_cache 10 Dynamic
hosts 10 Fixed
mutex_instances 10 Dynamic
objects_summary_global_by_type 10 Dynamic
performance_timers 10 Fixed
rwlock_instances 10 Dynamic
+session_account_connect_attrs 10 Dynamic
+session_connect_attrs 10 Dynamic
setup_actors 10 Fixed
setup_consumers 10 Dynamic
setup_instruments 10 Dynamic
@@ -192,11 +201,14 @@ events_waits_summary_global_by_event_name 1000 0
file_instances 1000 0
file_summary_by_event_name 1000 0
file_summary_by_instance 1000 0
+host_cache 1000 0
hosts 1000 0
mutex_instances 1000 0
objects_summary_global_by_type 1000 0
performance_timers 5 0
rwlock_instances 1000 0
+session_account_connect_attrs 1000 0
+session_connect_attrs 1000 0
setup_actors 1 0
setup_consumers 12 0
setup_instruments 1000 0
@@ -245,11 +257,14 @@ events_waits_summary_global_by_event_name 0 0
file_instances 0 0
file_summary_by_event_name 0 0
file_summary_by_instance 0 0
+host_cache 0 0
hosts 0 0
mutex_instances 0 0
objects_summary_global_by_type 0 0
performance_timers 0 0
rwlock_instances 0 0
+session_account_connect_attrs 0 0
+session_connect_attrs 0 0
setup_actors 0 0
setup_consumers 0 0
setup_instruments 0 0
@@ -298,11 +313,14 @@ events_waits_summary_global_by_event_name 0 0 NULL
file_instances 0 0 NULL
file_summary_by_event_name 0 0 NULL
file_summary_by_instance 0 0 NULL
+host_cache 0 0 NULL
hosts 0 0 NULL
mutex_instances 0 0 NULL
objects_summary_global_by_type 0 0 NULL
performance_timers 0 0 NULL
rwlock_instances 0 0 NULL
+session_account_connect_attrs 0 0 NULL
+session_connect_attrs 0 0 NULL
setup_actors 0 0 NULL
setup_consumers 0 0 NULL
setup_instruments 0 0 NULL
@@ -351,11 +369,14 @@ events_waits_summary_global_by_event_name NULL NULL NULL
file_instances NULL NULL NULL
file_summary_by_event_name NULL NULL NULL
file_summary_by_instance NULL NULL NULL
+host_cache NULL NULL NULL
hosts NULL NULL NULL
mutex_instances NULL NULL NULL
objects_summary_global_by_type NULL NULL NULL
performance_timers NULL NULL NULL
rwlock_instances NULL NULL NULL
+session_account_connect_attrs NULL NULL NULL
+session_connect_attrs NULL NULL NULL
setup_actors NULL NULL NULL
setup_consumers NULL NULL NULL
setup_instruments NULL NULL NULL
@@ -404,11 +425,14 @@ events_waits_summary_global_by_event_name utf8_general_ci NULL
file_instances utf8_general_ci NULL
file_summary_by_event_name utf8_general_ci NULL
file_summary_by_instance utf8_general_ci NULL
+host_cache utf8_general_ci NULL
hosts utf8_general_ci NULL
mutex_instances utf8_general_ci NULL
objects_summary_global_by_type utf8_general_ci NULL
performance_timers utf8_general_ci NULL
rwlock_instances utf8_general_ci NULL
+session_account_connect_attrs utf8_bin NULL
+session_connect_attrs utf8_bin NULL
setup_actors utf8_general_ci NULL
setup_consumers utf8_general_ci NULL
setup_instruments utf8_general_ci NULL
@@ -457,11 +481,14 @@ events_waits_summary_global_by_event_name
file_instances
file_summary_by_event_name
file_summary_by_instance
+host_cache
hosts
mutex_instances
objects_summary_global_by_type
performance_timers
rwlock_instances
+session_account_connect_attrs
+session_connect_attrs
setup_actors
setup_consumers
setup_instruments
diff --git a/mysql-test/suite/perfschema/r/innodb_table_io.result b/mysql-test/suite/perfschema/r/innodb_table_io.result
index d6dd7e3b122..9b5bd783f88 100644
--- a/mysql-test/suite/perfschema/r/innodb_table_io.result
+++ b/mysql-test/suite/perfschema/r/innodb_table_io.result
@@ -130,6 +130,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/memory_table_io.result b/mysql-test/suite/perfschema/r/memory_table_io.result
index 8d314d52ec3..230de713846 100644
--- a/mysql-test/suite/perfschema/r/memory_table_io.result
+++ b/mysql-test/suite/perfschema/r/memory_table_io.result
@@ -128,6 +128,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/merge_table_io.result b/mysql-test/suite/perfschema/r/merge_table_io.result
index 8d93d3798a4..7f0b602778c 100644
--- a/mysql-test/suite/perfschema/r/merge_table_io.result
+++ b/mysql-test/suite/perfschema/r/merge_table_io.result
@@ -158,6 +158,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/multi_table_io.result b/mysql-test/suite/perfschema/r/multi_table_io.result
index b72cc6b164f..74c8b94c1d5 100644
--- a/mysql-test/suite/perfschema/r/multi_table_io.result
+++ b/mysql-test/suite/perfschema/r/multi_table_io.result
@@ -87,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/myisam_file_io.result b/mysql-test/suite/perfschema/r/myisam_file_io.result
index b433d1391b7..826c4563932 100644
--- a/mysql-test/suite/perfschema/r/myisam_file_io.result
+++ b/mysql-test/suite/perfschema/r/myisam_file_io.result
@@ -57,6 +57,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/myisam_table_io.result b/mysql-test/suite/perfschema/r/myisam_table_io.result
index 517252a9dd6..432e5964802 100644
--- a/mysql-test/suite/perfschema/r/myisam_table_io.result
+++ b/mysql-test/suite/perfschema/r/myisam_table_io.result
@@ -126,6 +126,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/nesting.result b/mysql-test/suite/perfschema/r/nesting.result
index cac92025a97..9f449fc3739 100644
--- a/mysql-test/suite/perfschema/r/nesting.result
+++ b/mysql-test/suite/perfschema/r/nesting.result
@@ -55,6 +55,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/ortho_iter.result b/mysql-test/suite/perfschema/r/ortho_iter.result
index 0498fbca25f..8c8cb918025 100644
--- a/mysql-test/suite/perfschema/r/ortho_iter.result
+++ b/mysql-test/suite/perfschema/r/ortho_iter.result
@@ -120,6 +120,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -138,6 +139,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/part_table_io.result b/mysql-test/suite/perfschema/r/part_table_io.result
index 1fa91e82ae0..2aa12851679 100644
--- a/mysql-test/suite/perfschema/r/part_table_io.result
+++ b/mysql-test/suite/perfschema/r/part_table_io.result
@@ -77,70 +77,40 @@ wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab insert NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab insert NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab insert NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab update NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab update NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab update NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab update NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab update NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab update NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab delete NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab delete NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
-wait/io/table/sql/handler handler.cc: TABLE test no_index_tab fetch NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
wait/io/table/sql/handler handler.cc: TABLE test marker insert NULL
show status like 'performance_schema_%';
@@ -158,6 +128,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/pfs_upgrade.result b/mysql-test/suite/perfschema/r/pfs_upgrade.result
deleted file mode 100644
index 9a2b6524c83..00000000000
--- a/mysql-test/suite/perfschema/r/pfs_upgrade.result
+++ /dev/null
@@ -1,303 +0,0 @@
-drop table if exists test.user_table;
-drop procedure if exists test.user_proc;
-drop function if exists test.user_func;
-drop event if exists test.user_event;
-"Testing mysql_upgrade with TABLE performance_schema.user_table"
-create table test.user_table(a int);
-use performance_schema;
-show tables like "user_table";
-Tables_in_performance_schema (user_table)
-user_table
-ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
-ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'threads' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
-ERROR 1050 (42S01) at line ###: Table 'users' already exists
-ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
-FATAL ERROR: Upgrade failed
-show tables like "user_table";
-Tables_in_performance_schema (user_table)
-user_table
-use test;
-drop table test.user_table;
-"Testing mysql_upgrade with VIEW performance_schema.user_view"
-create view test.user_view as select "Not supposed to be here";
-use performance_schema;
-show tables like "user_view";
-Tables_in_performance_schema (user_view)
-user_view
-ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
-ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'threads' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
-ERROR 1050 (42S01) at line ###: Table 'users' already exists
-ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
-FATAL ERROR: Upgrade failed
-show tables like "user_view";
-Tables_in_performance_schema (user_view)
-user_view
-use test;
-drop view test.user_view;
-"Testing mysql_upgrade with PROCEDURE performance_schema.user_proc"
-create procedure test.user_proc()
-select "Not supposed to be here";
-update mysql.proc set db='performance_schema' where name='user_proc';
-ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
-ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'threads' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
-ERROR 1050 (42S01) at line ###: Table 'users' already exists
-ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
-FATAL ERROR: Upgrade failed
-select name from mysql.proc where db='performance_schema';
-name
-user_proc
-update mysql.proc set db='test' where name='user_proc';
-drop procedure test.user_proc;
-"Testing mysql_upgrade with FUNCTION performance_schema.user_func"
-create function test.user_func() returns integer
-return 0;
-update mysql.proc set db='performance_schema' where name='user_func';
-ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
-ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'threads' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
-ERROR 1050 (42S01) at line ###: Table 'users' already exists
-ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
-FATAL ERROR: Upgrade failed
-select name from mysql.proc where db='performance_schema';
-name
-user_func
-update mysql.proc set db='test' where name='user_func';
-drop function test.user_func;
-"Testing mysql_upgrade with EVENT performance_schema.user_event"
-create event test.user_event on schedule every 1 day do
-select "not supposed to be here";
-update mysql.event set db='performance_schema' where name='user_event';
-ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
-ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
-ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
-ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
-ERROR 1050 (42S01) at line ###: Table 'threads' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
-ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
-ERROR 1050 (42S01) at line ###: Table 'users' already exists
-ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
-ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
-FATAL ERROR: Upgrade failed
-select name from mysql.event where db='performance_schema';
-name
-user_event
-update mysql.event set db='test' where name='user_event';
-drop event test.user_event;
diff --git a/mysql-test/suite/perfschema/r/pfs_upgrade_event.result b/mysql-test/suite/perfschema/r/pfs_upgrade_event.result
new file mode 100644
index 00000000000..cbd684a6232
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/pfs_upgrade_event.result
@@ -0,0 +1,63 @@
+drop event if exists test.user_event;
+"Testing mysql_upgrade with EVENT performance_schema.user_event"
+create event test.user_event on schedule every 1 day do
+select "not supposed to be here";
+update mysql.event set db='performance_schema' where name='user_event';
+ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'host_cache' already exists
+ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
+ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'threads' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
+ERROR 1050 (42S01) at line ###: Table 'users' already exists
+ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_connect_attrs' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_account_connect_attrs' already exists
+FATAL ERROR: Upgrade failed
+select name from mysql.event where db='performance_schema';
+name
+user_event
+update mysql.event set db='test' where name='user_event';
+drop event test.user_event;
diff --git a/mysql-test/suite/perfschema/r/pfs_upgrade_func.result b/mysql-test/suite/perfschema/r/pfs_upgrade_func.result
new file mode 100644
index 00000000000..6978e1ed0a8
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/pfs_upgrade_func.result
@@ -0,0 +1,63 @@
+drop function if exists test.user_func;
+"Testing mysql_upgrade with FUNCTION performance_schema.user_func"
+create function test.user_func() returns integer
+return 0;
+update mysql.proc set db='performance_schema' where name='user_func';
+ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'host_cache' already exists
+ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
+ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'threads' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
+ERROR 1050 (42S01) at line ###: Table 'users' already exists
+ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_connect_attrs' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_account_connect_attrs' already exists
+FATAL ERROR: Upgrade failed
+select name from mysql.proc where db='performance_schema';
+name
+user_func
+update mysql.proc set db='test' where name='user_func';
+drop function test.user_func;
diff --git a/mysql-test/suite/perfschema/r/pfs_upgrade_proc.result b/mysql-test/suite/perfschema/r/pfs_upgrade_proc.result
new file mode 100644
index 00000000000..f5a13fb445d
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/pfs_upgrade_proc.result
@@ -0,0 +1,63 @@
+drop procedure if exists test.user_proc;
+"Testing mysql_upgrade with PROCEDURE performance_schema.user_proc"
+create procedure test.user_proc()
+select "Not supposed to be here";
+update mysql.proc set db='performance_schema' where name='user_proc';
+ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'host_cache' already exists
+ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
+ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'threads' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
+ERROR 1050 (42S01) at line ###: Table 'users' already exists
+ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_connect_attrs' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_account_connect_attrs' already exists
+FATAL ERROR: Upgrade failed
+select name from mysql.proc where db='performance_schema';
+name
+user_proc
+update mysql.proc set db='test' where name='user_proc';
+drop procedure test.user_proc;
diff --git a/mysql-test/suite/perfschema/r/pfs_upgrade_table.result b/mysql-test/suite/perfschema/r/pfs_upgrade_table.result
new file mode 100644
index 00000000000..bb5ba7060ae
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/pfs_upgrade_table.result
@@ -0,0 +1,65 @@
+drop table if exists test.user_table;
+"Testing mysql_upgrade with TABLE performance_schema.user_table"
+create table test.user_table(a int);
+use performance_schema;
+show tables like "user_table";
+Tables_in_performance_schema (user_table)
+user_table
+ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'host_cache' already exists
+ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
+ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'threads' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
+ERROR 1050 (42S01) at line ###: Table 'users' already exists
+ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_connect_attrs' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_account_connect_attrs' already exists
+FATAL ERROR: Upgrade failed
+show tables like "user_table";
+Tables_in_performance_schema (user_table)
+user_table
+use test;
+drop table test.user_table;
diff --git a/mysql-test/suite/perfschema/r/pfs_upgrade_view.result b/mysql-test/suite/perfschema/r/pfs_upgrade_view.result
new file mode 100644
index 00000000000..f9541680b6d
--- /dev/null
+++ b/mysql-test/suite/perfschema/r/pfs_upgrade_view.result
@@ -0,0 +1,65 @@
+drop view if exists test.user_view;
+"Testing mysql_upgrade with VIEW performance_schema.user_view"
+create view test.user_view as select "Not supposed to be here";
+use performance_schema;
+show tables like "user_view";
+Tables_in_performance_schema (user_view)
+user_view
+ERROR 1050 (42S01) at line ###: Table 'cond_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_waits_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'file_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_instance' already exists
+ERROR 1050 (42S01) at line ###: Table 'socket_summary_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'host_cache' already exists
+ERROR 1050 (42S01) at line ###: Table 'mutex_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'objects_summary_global_by_type' already exists
+ERROR 1050 (42S01) at line ###: Table 'performance_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'rwlock_instances' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_actors' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_consumers' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_instruments' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_objects' already exists
+ERROR 1050 (42S01) at line ###: Table 'setup_timers' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_index_usage' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_io_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'table_lock_waits_summary_by_table' already exists
+ERROR 1050 (42S01) at line ###: Table 'threads' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_stages_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_current' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_history_long' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_thread_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_host_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_user_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_account_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_global_by_event_name' already exists
+ERROR 1050 (42S01) at line ###: Table 'hosts' already exists
+ERROR 1050 (42S01) at line ###: Table 'users' already exists
+ERROR 1050 (42S01) at line ###: Table 'accounts' already exists
+ERROR 1050 (42S01) at line ###: Table 'events_statements_summary_by_digest' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_connect_attrs' already exists
+ERROR 1050 (42S01) at line ###: Table 'session_account_connect_attrs' already exists
+FATAL ERROR: Upgrade failed
+show tables like "user_view";
+Tables_in_performance_schema (user_view)
+user_view
+use test;
+drop view test.user_view;
diff --git a/mysql-test/suite/perfschema/r/privilege_table_io.result b/mysql-test/suite/perfschema/r/privilege_table_io.result
index 3881a2ca6b7..2bc96f21cfc 100644
--- a/mysql-test/suite/perfschema/r/privilege_table_io.result
+++ b/mysql-test/suite/perfschema/r/privilege_table_io.result
@@ -8,7 +8,13 @@ truncate table performance_schema.events_waits_history_long;
flush status;
flush tables;
# We are forced to suppress here the server response.
+optimize table mysql.host;
+optimize table mysql.user;
optimize table mysql.db;
+optimize table mysql.proxies_priv;
+optimize table mysql.tables_priv;
+optimize table mysql.procs_priv;
+optimize table mysql.servers;
update performance_schema.setup_consumers set enabled='YES';
update performance_schema.setup_objects set enabled='YES'
where object_type='TABLE' and object_schema= 'mysql';
@@ -45,6 +51,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -63,6 +70,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -117,6 +125,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/rollback_table_io.result b/mysql-test/suite/perfschema/r/rollback_table_io.result
index ec1c1b86fb4..f08d11e21d8 100644
--- a/mysql-test/suite/perfschema/r/rollback_table_io.result
+++ b/mysql-test/suite/perfschema/r/rollback_table_io.result
@@ -69,6 +69,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/schema.result b/mysql-test/suite/perfschema/r/schema.result
index d3b5572c383..050c4461105 100644
--- a/mysql-test/suite/perfschema/r/schema.result
+++ b/mysql-test/suite/perfschema/r/schema.result
@@ -38,11 +38,14 @@ events_waits_summary_global_by_event_name
file_instances
file_summary_by_event_name
file_summary_by_instance
+host_cache
hosts
mutex_instances
objects_summary_global_by_type
performance_timers
rwlock_instances
+session_account_connect_attrs
+session_connect_attrs
setup_actors
setup_consumers
setup_instruments
@@ -73,7 +76,7 @@ cond_instances CREATE TABLE `cond_instances` (
show create table events_stages_current;
Table Create Table
events_stages_current CREATE TABLE `events_stages_current` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -87,7 +90,7 @@ events_stages_current CREATE TABLE `events_stages_current` (
show create table events_stages_history;
Table Create Table
events_stages_history CREATE TABLE `events_stages_history` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -101,7 +104,7 @@ events_stages_history CREATE TABLE `events_stages_history` (
show create table events_stages_history_long;
Table Create Table
events_stages_history_long CREATE TABLE `events_stages_history_long` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -126,7 +129,7 @@ events_stages_summary_by_host_by_event_name CREATE TABLE `events_stages_summary_
show create table events_stages_summary_by_thread_by_event_name;
Table Create Table
events_stages_summary_by_thread_by_event_name CREATE TABLE `events_stages_summary_by_thread_by_event_name` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -170,7 +173,7 @@ events_stages_summary_global_by_event_name CREATE TABLE `events_stages_summary_g
show create table events_statements_current;
Table Create Table
events_statements_current CREATE TABLE `events_statements_current` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -214,7 +217,7 @@ events_statements_current CREATE TABLE `events_statements_current` (
show create table events_statements_history;
Table Create Table
events_statements_history CREATE TABLE `events_statements_history` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -258,7 +261,7 @@ events_statements_history CREATE TABLE `events_statements_history` (
show create table events_statements_history_long;
Table Create Table
events_statements_history_long CREATE TABLE `events_statements_history_long` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -299,6 +302,39 @@ events_statements_history_long CREATE TABLE `events_statements_history_long` (
`NESTING_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`NESTING_EVENT_TYPE` enum('STATEMENT','STAGE','WAIT') DEFAULT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
+show create table events_statements_summary_by_digest;
+Table Create Table
+events_statements_summary_by_digest CREATE TABLE `events_statements_summary_by_digest` (
+ `SCHEMA_NAME` varchar(64) DEFAULT NULL,
+ `DIGEST` varchar(32) DEFAULT NULL,
+ `DIGEST_TEXT` longtext,
+ `COUNT_STAR` bigint(20) unsigned NOT NULL,
+ `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MIN_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `AVG_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `MAX_TIMER_WAIT` bigint(20) unsigned NOT NULL,
+ `SUM_LOCK_TIME` bigint(20) unsigned NOT NULL,
+ `SUM_ERRORS` bigint(20) unsigned NOT NULL,
+ `SUM_WARNINGS` bigint(20) unsigned NOT NULL,
+ `SUM_ROWS_AFFECTED` bigint(20) unsigned NOT NULL,
+ `SUM_ROWS_SENT` bigint(20) unsigned NOT NULL,
+ `SUM_ROWS_EXAMINED` bigint(20) unsigned NOT NULL,
+ `SUM_CREATED_TMP_DISK_TABLES` bigint(20) unsigned NOT NULL,
+ `SUM_CREATED_TMP_TABLES` bigint(20) unsigned NOT NULL,
+ `SUM_SELECT_FULL_JOIN` bigint(20) unsigned NOT NULL,
+ `SUM_SELECT_FULL_RANGE_JOIN` bigint(20) unsigned NOT NULL,
+ `SUM_SELECT_RANGE` bigint(20) unsigned NOT NULL,
+ `SUM_SELECT_RANGE_CHECK` bigint(20) unsigned NOT NULL,
+ `SUM_SELECT_SCAN` bigint(20) unsigned NOT NULL,
+ `SUM_SORT_MERGE_PASSES` bigint(20) unsigned NOT NULL,
+ `SUM_SORT_RANGE` bigint(20) unsigned NOT NULL,
+ `SUM_SORT_ROWS` bigint(20) unsigned NOT NULL,
+ `SUM_SORT_SCAN` bigint(20) unsigned NOT NULL,
+ `SUM_NO_INDEX_USED` bigint(20) unsigned NOT NULL,
+ `SUM_NO_GOOD_INDEX_USED` bigint(20) unsigned NOT NULL,
+ `FIRST_SEEN` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `LAST_SEEN` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00'
+) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
show create table events_statements_summary_by_host_by_event_name;
Table Create Table
events_statements_summary_by_host_by_event_name CREATE TABLE `events_statements_summary_by_host_by_event_name` (
@@ -332,7 +368,7 @@ events_statements_summary_by_host_by_event_name CREATE TABLE `events_statements_
show create table events_statements_summary_by_thread_by_event_name;
Table Create Table
events_statements_summary_by_thread_by_event_name CREATE TABLE `events_statements_summary_by_thread_by_event_name` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -452,7 +488,7 @@ events_statements_summary_global_by_event_name CREATE TABLE `events_statements_s
show create table events_waits_current;
Table Create Table
events_waits_current CREATE TABLE `events_waits_current` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -475,7 +511,7 @@ events_waits_current CREATE TABLE `events_waits_current` (
show create table events_waits_history;
Table Create Table
events_waits_history CREATE TABLE `events_waits_history` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -498,7 +534,7 @@ events_waits_history CREATE TABLE `events_waits_history` (
show create table events_waits_history_long;
Table Create Table
events_waits_history_long CREATE TABLE `events_waits_history_long` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_ID` bigint(20) unsigned NOT NULL,
`END_EVENT_ID` bigint(20) unsigned DEFAULT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
@@ -543,7 +579,7 @@ events_waits_summary_by_instance CREATE TABLE `events_waits_summary_by_instance`
show create table events_waits_summary_by_thread_by_event_name;
Table Create Table
events_waits_summary_by_thread_by_event_name CREATE TABLE `events_waits_summary_by_thread_by_event_name` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`EVENT_NAME` varchar(128) NOT NULL,
`COUNT_STAR` bigint(20) unsigned NOT NULL,
`SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL,
@@ -648,7 +684,38 @@ file_summary_by_instance CREATE TABLE `file_summary_by_instance` (
`MAX_TIMER_MISC` bigint(20) unsigned NOT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
show create table host_cache;
-ERROR 42S02: Table 'performance_schema.host_cache' doesn't exist
+Table Create Table
+host_cache CREATE TABLE `host_cache` (
+ `IP` varchar(64) NOT NULL,
+ `HOST` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,
+ `HOST_VALIDATED` enum('YES','NO') NOT NULL,
+ `SUM_CONNECT_ERRORS` bigint(20) NOT NULL,
+ `COUNT_HOST_BLOCKED_ERRORS` bigint(20) NOT NULL,
+ `COUNT_NAMEINFO_TRANSIENT_ERRORS` bigint(20) NOT NULL,
+ `COUNT_NAMEINFO_PERMANENT_ERRORS` bigint(20) NOT NULL,
+ `COUNT_FORMAT_ERRORS` bigint(20) NOT NULL,
+ `COUNT_ADDRINFO_TRANSIENT_ERRORS` bigint(20) NOT NULL,
+ `COUNT_ADDRINFO_PERMANENT_ERRORS` bigint(20) NOT NULL,
+ `COUNT_FCRDNS_ERRORS` bigint(20) NOT NULL,
+ `COUNT_HOST_ACL_ERRORS` bigint(20) NOT NULL,
+ `COUNT_NO_AUTH_PLUGIN_ERRORS` bigint(20) NOT NULL,
+ `COUNT_AUTH_PLUGIN_ERRORS` bigint(20) NOT NULL,
+ `COUNT_HANDSHAKE_ERRORS` bigint(20) NOT NULL,
+ `COUNT_PROXY_USER_ERRORS` bigint(20) NOT NULL,
+ `COUNT_PROXY_USER_ACL_ERRORS` bigint(20) NOT NULL,
+ `COUNT_AUTHENTICATION_ERRORS` bigint(20) NOT NULL,
+ `COUNT_SSL_ERRORS` bigint(20) NOT NULL,
+ `COUNT_MAX_USER_CONNECTIONS_ERRORS` bigint(20) NOT NULL,
+ `COUNT_MAX_USER_CONNECTIONS_PER_HOUR_ERRORS` bigint(20) NOT NULL,
+ `COUNT_DEFAULT_DATABASE_ERRORS` bigint(20) NOT NULL,
+ `COUNT_INIT_CONNECT_ERRORS` bigint(20) NOT NULL,
+ `COUNT_LOCAL_ERRORS` bigint(20) NOT NULL,
+ `COUNT_UNKNOWN_ERRORS` bigint(20) NOT NULL,
+ `FIRST_SEEN` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `LAST_SEEN` timestamp NOT NULL DEFAULT '0000-00-00 00:00:00',
+ `FIRST_ERROR_SEEN` timestamp NULL DEFAULT '0000-00-00 00:00:00',
+ `LAST_ERROR_SEEN` timestamp NULL DEFAULT '0000-00-00 00:00:00'
+) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
show create table hosts;
Table Create Table
hosts CREATE TABLE `hosts` (
@@ -661,7 +728,7 @@ Table Create Table
mutex_instances CREATE TABLE `mutex_instances` (
`NAME` varchar(128) NOT NULL,
`OBJECT_INSTANCE_BEGIN` bigint(20) unsigned NOT NULL,
- `LOCKED_BY_THREAD_ID` int(11) DEFAULT NULL
+ `LOCKED_BY_THREAD_ID` bigint(20) unsigned DEFAULT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
show create table objects_summary_global_by_type;
Table Create Table
@@ -688,7 +755,7 @@ Table Create Table
rwlock_instances CREATE TABLE `rwlock_instances` (
`NAME` varchar(128) NOT NULL,
`OBJECT_INSTANCE_BEGIN` bigint(20) unsigned NOT NULL,
- `WRITE_LOCKED_BY_THREAD_ID` int(11) DEFAULT NULL,
+ `WRITE_LOCKED_BY_THREAD_ID` bigint(20) unsigned DEFAULT NULL,
`READ_LOCKED_BY_COUNT` int(10) unsigned NOT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
show create table setup_actors;
@@ -731,7 +798,7 @@ Table Create Table
socket_instances CREATE TABLE `socket_instances` (
`EVENT_NAME` varchar(128) NOT NULL,
`OBJECT_INSTANCE_BEGIN` bigint(20) unsigned NOT NULL,
- `THREAD_ID` int(11) DEFAULT NULL,
+ `THREAD_ID` bigint(20) unsigned DEFAULT NULL,
`SOCKET_ID` int(11) NOT NULL,
`IP` varchar(64) NOT NULL,
`PORT` int(11) NOT NULL,
@@ -957,10 +1024,10 @@ table_lock_waits_summary_by_table CREATE TABLE `table_lock_waits_summary_by_tabl
show create table threads;
Table Create Table
threads CREATE TABLE `threads` (
- `THREAD_ID` int(11) NOT NULL,
+ `THREAD_ID` bigint(20) unsigned NOT NULL,
`NAME` varchar(128) NOT NULL,
`TYPE` varchar(10) NOT NULL,
- `PROCESSLIST_ID` int(11) DEFAULT NULL,
+ `PROCESSLIST_ID` bigint(20) unsigned DEFAULT NULL,
`PROCESSLIST_USER` varchar(16) DEFAULT NULL,
`PROCESSLIST_HOST` varchar(60) DEFAULT NULL,
`PROCESSLIST_DB` varchar(64) DEFAULT NULL,
@@ -968,7 +1035,7 @@ threads CREATE TABLE `threads` (
`PROCESSLIST_TIME` bigint(20) DEFAULT NULL,
`PROCESSLIST_STATE` varchar(64) DEFAULT NULL,
`PROCESSLIST_INFO` longtext,
- `PARENT_THREAD_ID` int(11) DEFAULT NULL,
+ `PARENT_THREAD_ID` bigint(20) unsigned DEFAULT NULL,
`ROLE` varchar(64) DEFAULT NULL,
`INSTRUMENTED` enum('YES','NO') NOT NULL
) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8
diff --git a/mysql-test/suite/perfschema/r/short_option_1.result b/mysql-test/suite/perfschema/r/short_option_1.result
index 6004dc96327..d97ece1f67a 100644
--- a/mysql-test/suite/perfschema/r/short_option_1.result
+++ b/mysql-test/suite/perfschema/r/short_option_1.result
@@ -19,6 +19,8 @@ log ON
show variables like 'general_log';
Variable_name Value
general_log ON
+show variables like 'new';
+Variable_name Value
show variables like 'log_warnings';
Variable_name Value
log_warnings 3
diff --git a/mysql-test/suite/perfschema/r/socket_summary_by_instance_func_win.result b/mysql-test/suite/perfschema/r/socket_summary_by_instance_func_win.result
index 2bdc2524bbc..2c5a9e9284f 100644
--- a/mysql-test/suite/perfschema/r/socket_summary_by_instance_func_win.result
+++ b/mysql-test/suite/perfschema/r/socket_summary_by_instance_func_win.result
@@ -59,7 +59,7 @@ ERROR 42S02: Table 'mysqltest.does_not_exist' doesn't exist
# The statement has the same length like in 3.2 but the error
# message is now different and much longer.
SELECT col2 FROM does_not_exist WHERE col1 A 0;
-ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A 0' at line 1
+ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'A 0' at line 1
# 3.4 SELECT ending with server sending an error message.
# Statement and error message are a bit longer than in 3.1
# because the table name is longer.
diff --git a/mysql-test/suite/perfschema/r/stage_mdl_table.result b/mysql-test/suite/perfschema/r/stage_mdl_table.result
index b64d94f406c..20bb91159da 100644
--- a/mysql-test/suite/perfschema/r/stage_mdl_table.result
+++ b/mysql-test/suite/perfschema/r/stage_mdl_table.result
@@ -34,4 +34,7 @@ user2 stage/sql/checking permissions STATEMENT
user2 stage/sql/checking permissions STATEMENT
user2 stage/sql/init STATEMENT
user2 stage/sql/Opening tables STATEMENT
+user2 stage/sql/setup STATEMENT
+user2 stage/sql/creating table STATEMENT
+user2 stage/sql/After create STATEMENT
commit;
diff --git a/mysql-test/suite/perfschema/r/start_server_disable_idle.result b/mysql-test/suite/perfschema/r/start_server_disable_idle.result
index bde23fff83e..a04e50083b1 100644
--- a/mysql-test/suite/perfschema/r/start_server_disable_idle.result
+++ b/mysql-test/suite/perfschema/r/start_server_disable_idle.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_disable_stages.result b/mysql-test/suite/perfschema/r/start_server_disable_stages.result
index 94d9efd8beb..61bab26b6b6 100644
--- a/mysql-test/suite/perfschema/r/start_server_disable_stages.result
+++ b/mysql-test/suite/perfschema/r/start_server_disable_stages.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_disable_statements.result b/mysql-test/suite/perfschema/r/start_server_disable_statements.result
index 9b7bebed018..fb4e3adb3bf 100644
--- a/mysql-test/suite/perfschema/r/start_server_disable_statements.result
+++ b/mysql-test/suite/perfschema/r/start_server_disable_statements.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_disable_waits.result b/mysql-test/suite/perfschema/r/start_server_disable_waits.result
index 1a5394e41d5..2ce9fe0f265 100644
--- a/mysql-test/suite/perfschema/r/start_server_disable_waits.result
+++ b/mysql-test/suite/perfschema/r/start_server_disable_waits.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_innodb.result b/mysql-test/suite/perfschema/r/start_server_innodb.result
index 0f2019a6e78..319485d1a55 100644
--- a/mysql-test/suite/perfschema/r/start_server_innodb.result
+++ b/mysql-test/suite/perfschema/r/start_server_innodb.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -115,6 +119,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/start_server_no_account.result b/mysql-test/suite/perfschema/r/start_server_no_account.result
index bd07bab8e2f..bf4bb7568af 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_account.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_account.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_cond_class.result b/mysql-test/suite/perfschema/r/start_server_no_cond_class.result
index a210a97a6a6..2b3aefc819b 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_cond_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_cond_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_cond_inst.result b/mysql-test/suite/perfschema/r/start_server_no_cond_inst.result
index b2c3eedcdea..6e4b90147b4 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_cond_inst.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_cond_inst.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_digests.result b/mysql-test/suite/perfschema/r/start_server_no_digests.result
index 38d6d2750f6..4f6fa9bc5da 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_digests.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_digests.result
@@ -8,6 +8,9 @@ CREATE TABLE t2(a int);
CREATE TABLE t3(a int, b int);
CREATE TABLE t4(a int, b int);
CREATE TABLE t5(a int, b int, c int);
+CREATE TABLE t6(a int, b int, c int, d int);
+CREATE TABLE t11 (c CHAR(4));
+CREATE TABLE t12 (c CHAR(4));
TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
####################################
# EXECUTION
@@ -35,6 +38,15 @@ INSERT INTO t1 VALUES (1), (2), (3);
INSERT INTO t1 VALUES (1), (2), (3), (4);
INSERT INTO t3 VALUES (1, 2), (3, 4), (5, 6);
INSERT INTO t5 VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9);
+INSERT INTO t1 VALUES (NULL);
+INSERT INTO t3 VALUES (NULL,NULL);
+INSERT INTO t3 VALUES (1,NULL);
+INSERT INTO t3 VALUES (NULL,1);
+INSERT INTO t6 VALUES (NULL, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (1, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (NULL, 2, NULL, NULL);
+INSERT INTO t6 VALUES (1, 2, 3, NULL);
+INSERT INTO t6 VALUES (1, 2, NULL, 4);
SELECT 1 + 1;
1 + 1
2
@@ -61,20 +73,45 @@ CREATE SCHEMA statements_digest_temp;
DROP SCHEMA statements_digest_temp;
CREATE DATABASE statements_digest_temp;
DROP DATABASE statements_digest_temp;
-SELECT 1 from t11;
-ERROR 42S02: Table 'statements_digest.t11' doesn't exist
-create table t11 (c char(4));
-create table t11 (c char(4));
-ERROR 42S01: Table 't11' already exists
-insert into t11 values("MySQL");
+SELECT 1 FROM no_such_table;
+ERROR 42S02: Table 'statements_digest.no_such_table' doesn't exist
+CREATE TABLE dup_table (c char(4));
+CREATE TABLE dup_table (c char(4));
+ERROR 42S01: Table 'dup_table' already exists
+DROP TABLE dup_table;
+INSERT INTO t11 VALUES("MySQL");
Warnings:
Warning 1265 Data truncated for column 'c' at row 1
+PREPARE stmt FROM "SELECT * FROM t12";
+EXECUTE stmt;
+c
+EXECUTE stmt;
+c
+DEALLOCATE PREPARE stmt;
+CREATE PROCEDURE p1() BEGIN SELECT * FROM t12; END//
+CALL p1();
+c
+CALL p1();
+c
+DROP PROCEDURE p1;
+CREATE FUNCTION `func`(a INT, b INT) RETURNS int(11) RETURN a+b //
+select func(3,4);
+func(3,4)
+7
+select func(13,42);
+func(13,42)
+55
+DROP FUNCTION func;
+CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @a:=1;
+INSERT INTO t12 VALUES ("abc");
+INSERT INTO t12 VALUES ("def");
+DROP TRIGGER trg;
####################################
# QUERYING PS STATEMENT DIGEST
####################################
-SELECT DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
+SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
SUM_ERRORS FROM performance_schema.events_statements_summary_by_digest;
-DIGEST DIGEST_TEXT COUNT_STAR SUM_ROWS_AFFECTED SUM_WARNINGS SUM_ERRORS
+SCHEMA_NAME DIGEST DIGEST_TEXT COUNT_STAR SUM_ROWS_AFFECTED SUM_WARNINGS SUM_ERRORS
SHOW VARIABLES LIKE "performance_schema_digests_size";
Variable_name Value
performance_schema_digests_size 0
@@ -89,4 +126,7 @@ DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
DROP TABLE IF EXISTS t4;
DROP TABLE IF EXISTS t5;
+DROP TABLE IF EXISTS t6;
+DROP TABLE IF EXISTS t11;
+DROP TABLE IF EXISTS t12;
DROP DATABASE IF EXISTS statements_digest;
diff --git a/mysql-test/suite/perfschema/r/start_server_no_file_class.result b/mysql-test/suite/perfschema/r/start_server_no_file_class.result
index 8720de80017..4e093920917 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_file_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_file_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_file_inst.result b/mysql-test/suite/perfschema/r/start_server_no_file_inst.result
index f57609092d0..040b3edcb43 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_file_inst.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_file_inst.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_host.result b/mysql-test/suite/perfschema/r/start_server_no_host.result
index 51dab043968..5b5195ff795 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_host.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_host.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_mutex_class.result b/mysql-test/suite/perfschema/r/start_server_no_mutex_class.result
index 870e5524b8c..235174f5b87 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_mutex_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_mutex_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_mutex_inst.result b/mysql-test/suite/perfschema/r/start_server_no_mutex_inst.result
index 2a26112e693..3ea5aafaed3 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_mutex_inst.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_mutex_inst.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_rwlock_class.result b/mysql-test/suite/perfschema/r/start_server_no_rwlock_class.result
index 397365a9755..355b174bb5b 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_rwlock_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_rwlock_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_rwlock_inst.result b/mysql-test/suite/perfschema/r/start_server_no_rwlock_inst.result
index f3557a1cf9e..8e13b3ae51d 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_rwlock_inst.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_rwlock_inst.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_setup_actors.result b/mysql-test/suite/perfschema/r/start_server_no_setup_actors.result
index a65f90977af..3808b799fdb 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_setup_actors.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_setup_actors.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 0
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_setup_objects.result b/mysql-test/suite/perfschema/r/start_server_no_setup_objects.result
index b08fc537dcd..b83dfd1ba4b 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_setup_objects.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_setup_objects.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 0
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_socket_class.result b/mysql-test/suite/perfschema/r/start_server_no_socket_class.result
index 2d9b36583a5..37e0c755ca9 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_socket_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_socket_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_socket_inst.result b/mysql-test/suite/perfschema/r/start_server_no_socket_inst.result
index 1679fba7ac3..585cc1d5281 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_socket_inst.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_socket_inst.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_stage_class.result b/mysql-test/suite/perfschema/r/start_server_no_stage_class.result
index a8d763afa0c..2f80e999077 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_stage_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_stage_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_stages_history.result b/mysql-test/suite/perfschema/r/start_server_no_stages_history.result
index 40f7e9fa1c6..0c1d772434c 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_stages_history.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_stages_history.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_stages_history_long.result b/mysql-test/suite/perfschema/r/start_server_no_stages_history_long.result
index 48093277aac..3b7653cd4f7 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_stages_history_long.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_stages_history_long.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_statement_class.result b/mysql-test/suite/perfschema/r/start_server_no_statement_class.result
index f918d5a47f1..11113ef5d7d 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_statement_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_statement_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_statements_history.result b/mysql-test/suite/perfschema/r/start_server_no_statements_history.result
index b6be8047d31..6b6546e3155 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_statements_history.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_statements_history.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_statements_history_long.result b/mysql-test/suite/perfschema/r/start_server_no_statements_history_long.result
index 740d4ad74ef..3955e70d6ed 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_statements_history_long.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_statements_history_long.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_table_hdl.result b/mysql-test/suite/perfschema/r/start_server_no_table_hdl.result
index 6f83d396458..4b037a188bb 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_table_hdl.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_table_hdl.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 0
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_table_inst.result b/mysql-test/suite/perfschema/r/start_server_no_table_inst.result
index 667b28dae88..5bb2e9c7ecf 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_table_inst.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_table_inst.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 0
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_thread_class.result b/mysql-test/suite/perfschema/r/start_server_no_thread_class.result
index e6db3e23483..588e7ef63fa 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_thread_class.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_thread_class.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 0
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_thread_inst.result b/mysql-test/suite/perfschema/r/start_server_no_thread_inst.result
index cc3cd9b1ed3..31003b3070a 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_thread_inst.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_thread_inst.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 0
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_user.result b/mysql-test/suite/perfschema/r/start_server_no_user.result
index d3b69586def..a6eb3bac16e 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_user.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_user.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 0
diff --git a/mysql-test/suite/perfschema/r/start_server_no_waits_history.result b/mysql-test/suite/perfschema/r/start_server_no_waits_history.result
index 155ab82c0d4..8906341bc8f 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_waits_history.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_waits_history.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_no_waits_history_long.result b/mysql-test/suite/perfschema/r/start_server_no_waits_history_long.result
index 1a64bb98d65..d669bf50b2f 100644
--- a/mysql-test/suite/perfschema/r/start_server_no_waits_history_long.result
+++ b/mysql-test/suite/perfschema/r/start_server_no_waits_history_long.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
diff --git a/mysql-test/suite/perfschema/r/start_server_nothing.result b/mysql-test/suite/perfschema/r/start_server_nothing.result
index 6d81d08d30e..4e6994d670e 100644
--- a/mysql-test/suite/perfschema/r/start_server_nothing.result
+++ b/mysql-test/suite/perfschema/r/start_server_nothing.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 0
performance_schema_max_table_instances 0
performance_schema_max_thread_classes 0
performance_schema_max_thread_instances 0
+performance_schema_session_connect_attrs_size 0
performance_schema_setup_actors_size 0
performance_schema_setup_objects_size 0
performance_schema_users_size 0
@@ -129,6 +133,7 @@ performance_schema_max_table_handles 0
performance_schema_max_table_instances 0
performance_schema_max_thread_classes 0
performance_schema_max_thread_instances 0
+performance_schema_session_connect_attrs_size 0
performance_schema_setup_actors_size 0
performance_schema_setup_objects_size 0
performance_schema_users_size 0
@@ -158,12 +163,12 @@ events_waits_history_long YES
global_instrumentation YES
thread_instrumentation YES
statements_digest YES
-select * from performance_schema.setup_timers;
-NAME TIMER_NAME
-idle MICROSECOND
-wait CYCLE
-stage NANOSECOND
-statement NANOSECOND
+select NAME from performance_schema.setup_timers;
+NAME
+idle
+wait
+stage
+statement
select * from performance_schema.accounts;
USER HOST CURRENT_CONNECTIONS TOTAL_CONNECTIONS
select * from performance_schema.cond_instances;
@@ -227,6 +232,10 @@ select * from performance_schema.file_summary_by_event_name;
EVENT_NAME COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT COUNT_READ SUM_TIMER_READ MIN_TIMER_READ AVG_TIMER_READ MAX_TIMER_READ SUM_NUMBER_OF_BYTES_READ COUNT_WRITE SUM_TIMER_WRITE MIN_TIMER_WRITE AVG_TIMER_WRITE MAX_TIMER_WRITE SUM_NUMBER_OF_BYTES_WRITE COUNT_MISC SUM_TIMER_MISC MIN_TIMER_MISC AVG_TIMER_MISC MAX_TIMER_MISC
select * from performance_schema.file_summary_by_instance;
FILE_NAME EVENT_NAME OBJECT_INSTANCE_BEGIN COUNT_STAR SUM_TIMER_WAIT MIN_TIMER_WAIT AVG_TIMER_WAIT MAX_TIMER_WAIT COUNT_READ SUM_TIMER_READ MIN_TIMER_READ AVG_TIMER_READ MAX_TIMER_READ SUM_NUMBER_OF_BYTES_READ COUNT_WRITE SUM_TIMER_WRITE MIN_TIMER_WRITE AVG_TIMER_WRITE MAX_TIMER_WRITE SUM_NUMBER_OF_BYTES_WRITE COUNT_MISC SUM_TIMER_MISC MIN_TIMER_MISC AVG_TIMER_MISC MAX_TIMER_MISC
+select * from performance_schema.session_account_connect_attrs;
+PROCESSLIST_ID ATTR_NAME ATTR_VALUE ORDINAL_POSITION
+select * from performance_schema.session_connect_attrs;
+PROCESSLIST_ID ATTR_NAME ATTR_VALUE ORDINAL_POSITION
select * from performance_schema.socket_instances;
EVENT_NAME OBJECT_INSTANCE_BEGIN THREAD_ID SOCKET_ID IP PORT STATE
select * from performance_schema.socket_summary_by_instance;
diff --git a/mysql-test/suite/perfschema/r/start_server_off.result b/mysql-test/suite/perfschema/r/start_server_off.result
index 5db8896faca..57fa6444318 100644
--- a/mysql-test/suite/perfschema/r/start_server_off.result
+++ b/mysql-test/suite/perfschema/r/start_server_off.result
@@ -7,16 +7,16 @@ performance_schema
test
select count(*) from performance_schema.performance_timers;
count(*)
-5
+0
select count(*) from performance_schema.setup_consumers;
count(*)
-12
+0
select count(*) > 3 from performance_schema.setup_instruments;
count(*) > 3
0
select count(*) from performance_schema.setup_timers;
count(*)
-4
+0
select * from performance_schema.accounts;
select * from performance_schema.cond_instances;
select * from performance_schema.events_stages_current;
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -115,6 +119,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -126,18 +131,6 @@ Performance_schema_thread_instances_lost 0
Performance_schema_users_lost 0
select * from performance_schema.setup_consumers;
NAME ENABLED
-events_stages_current NO
-events_stages_history NO
-events_stages_history_long NO
-events_statements_current NO
-events_statements_history NO
-events_statements_history_long NO
-events_waits_current NO
-events_waits_history NO
-events_waits_history_long NO
-global_instrumentation NO
-thread_instrumentation NO
-statements_digest YES
select * from performance_schema.setup_instruments;
NAME ENABLED TIMED
select * from performance_schema.setup_actors;
@@ -146,7 +139,20 @@ select * from performance_schema.setup_objects;
OBJECT_TYPE OBJECT_SCHEMA OBJECT_NAME ENABLED TIMED
select * from performance_schema.setup_timers;
NAME TIMER_NAME
-idle MICROSECOND
-wait CYCLE
-stage NANOSECOND
-statement NANOSECOND
+insert into performance_schema.setup_objects values ('TABLE', 'myschema', 'myobject', 'YES', 'YES');
+ERROR HY000: Invalid performance_schema usage.
+insert into performance_schema.setup_actors values ('myhost', 'mysuser', 'myrole');
+ERROR HY000: Invalid performance_schema usage.
+select * from performance_schema.setup_objects;
+OBJECT_TYPE OBJECT_SCHEMA OBJECT_NAME ENABLED TIMED
+update performance_schema.setup_objects set OBJECT_NAME = 'myobject';
+delete from performance_schema.setup_objects;
+select * from performance_schema.setup_actors;
+HOST USER ROLE
+update performance_schema.setup_actors set HOST = 'myhost';
+delete from performance_schema.setup_actors;
+truncate performance_schema.events_stages_history_long;
+truncate performance_schema.events_statements_history_long;
+truncate performance_schema.events_waits_history_long;
+truncate performance_schema.setup_objects;
+truncate performance_schema.setup_actors;
diff --git a/mysql-test/suite/perfschema/r/start_server_on.result b/mysql-test/suite/perfschema/r/start_server_on.result
index 0f2019a6e78..319485d1a55 100644
--- a/mysql-test/suite/perfschema/r/start_server_on.result
+++ b/mysql-test/suite/perfschema/r/start_server_on.result
@@ -31,6 +31,7 @@ select * from performance_schema.events_statements_current;
select * from performance_schema.events_statements_history;
select * from performance_schema.events_statements_history_long;
select * from performance_schema.events_statements_summary_by_account_by_event_name;
+select * from performance_schema.events_statements_summary_by_digest;
select * from performance_schema.events_statements_summary_by_host_by_event_name;
select * from performance_schema.events_statements_summary_by_thread_by_event_name;
select * from performance_schema.events_statements_summary_by_user_by_event_name;
@@ -48,19 +49,21 @@ select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
select * from performance_schema.host_cache;
-select * from performance_schema.socket_instances;
-select * from performance_schema.socket_summary_by_instance;
-select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.hosts;
select * from performance_schema.mutex_instances;
select * from performance_schema.objects_summary_global_by_type;
select * from performance_schema.performance_timers;
select * from performance_schema.rwlock_instances;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_consumers;
select * from performance_schema.setup_instruments;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+select * from performance_schema.socket_instances;
+select * from performance_schema.socket_summary_by_instance;
+select * from performance_schema.socket_summary_by_event_name;
select * from performance_schema.table_io_waits_summary_by_index_usage;
select * from performance_schema.table_io_waits_summary_by_table;
select * from performance_schema.table_lock_waits_summary_by_table;
@@ -95,6 +98,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -115,6 +119,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/statement_digest.result b/mysql-test/suite/perfschema/r/statement_digest.result
index 628775781e6..876ff059ef5 100644
--- a/mysql-test/suite/perfschema/r/statement_digest.result
+++ b/mysql-test/suite/perfschema/r/statement_digest.result
@@ -8,6 +8,9 @@ CREATE TABLE t2(a int);
CREATE TABLE t3(a int, b int);
CREATE TABLE t4(a int, b int);
CREATE TABLE t5(a int, b int, c int);
+CREATE TABLE t6(a int, b int, c int, d int);
+CREATE TABLE t11 (c CHAR(4));
+CREATE TABLE t12 (c CHAR(4));
TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
####################################
# EXECUTION
@@ -35,6 +38,15 @@ INSERT INTO t1 VALUES (1), (2), (3);
INSERT INTO t1 VALUES (1), (2), (3), (4);
INSERT INTO t3 VALUES (1, 2), (3, 4), (5, 6);
INSERT INTO t5 VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9);
+INSERT INTO t1 VALUES (NULL);
+INSERT INTO t3 VALUES (NULL,NULL);
+INSERT INTO t3 VALUES (1,NULL);
+INSERT INTO t3 VALUES (NULL,1);
+INSERT INTO t6 VALUES (NULL, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (1, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (NULL, 2, NULL, NULL);
+INSERT INTO t6 VALUES (1, 2, 3, NULL);
+INSERT INTO t6 VALUES (1, 2, NULL, 4);
SELECT 1 + 1;
1 + 1
2
@@ -61,42 +73,82 @@ CREATE SCHEMA statements_digest_temp;
DROP SCHEMA statements_digest_temp;
CREATE DATABASE statements_digest_temp;
DROP DATABASE statements_digest_temp;
-SELECT 1 from t11;
-ERROR 42S02: Table 'statements_digest.t11' doesn't exist
-create table t11 (c char(4));
-create table t11 (c char(4));
-ERROR 42S01: Table 't11' already exists
-insert into t11 values("MySQL");
+SELECT 1 FROM no_such_table;
+ERROR 42S02: Table 'statements_digest.no_such_table' doesn't exist
+CREATE TABLE dup_table (c char(4));
+CREATE TABLE dup_table (c char(4));
+ERROR 42S01: Table 'dup_table' already exists
+DROP TABLE dup_table;
+INSERT INTO t11 VALUES("MySQL");
Warnings:
Warning 1265 Data truncated for column 'c' at row 1
+PREPARE stmt FROM "SELECT * FROM t12";
+EXECUTE stmt;
+c
+EXECUTE stmt;
+c
+DEALLOCATE PREPARE stmt;
+CREATE PROCEDURE p1() BEGIN SELECT * FROM t12; END//
+CALL p1();
+c
+CALL p1();
+c
+DROP PROCEDURE p1;
+CREATE FUNCTION `func`(a INT, b INT) RETURNS int(11) RETURN a+b //
+select func(3,4);
+func(3,4)
+7
+select func(13,42);
+func(13,42)
+55
+DROP FUNCTION func;
+CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @a:=1;
+INSERT INTO t12 VALUES ("abc");
+INSERT INTO t12 VALUES ("def");
+DROP TRIGGER trg;
####################################
# QUERYING PS STATEMENT DIGEST
####################################
-SELECT DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
+SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
SUM_ERRORS FROM performance_schema.events_statements_summary_by_digest;
-DIGEST DIGEST_TEXT COUNT_STAR SUM_ROWS_AFFECTED SUM_WARNINGS SUM_ERRORS
-025af09b416617ee444962d35913c0ab TRUNCATE TABLE performance_schema . events_statements_summary_by_digest 1 0 0 0
-2448fef9bf02af329f4379caa8311a29 SELECT ? FROM t1 1 0 0 0
-8ffffc32710da95472a3eae27f5a4138 SELECT ? FROM `t1` 1 0 0 0
-0fb24bb23dd5e0781c8be24d072a4e0d SELECT ?, ... FROM t1 2 0 0 0
-bfb5a2acdbc5fce89461e691841090d8 SELECT ? FROM t2 1 0 0 0
-320290df27bd6c85764c3d9293087f6d SELECT ?, ... FROM t2 2 0 0 0
-e71851702cece9c252fe03e12e065471 INSERT INTO t1 VALUES (?) 1 1 0 0
-2bfe58b981242b825ff30e0a23610c01 INSERT INTO t2 VALUES (?) 1 1 0 0
-7dffbc5052092965f9d2739569afbb89 INSERT INTO t3 VALUES (...) 1 1 0 0
-22d4d66694b4eaffa0b2037d9312aae0 INSERT INTO t4 VALUES (...) 1 1 0 0
-d8b582fde31cf51cd5d0ee4e565c5eee INSERT INTO t5 VALUES (...) 1 1 0 0
-c45c72afb3fbdbec45f98072a4ecf6f5 INSERT INTO t1 VALUES (?) /* , ... */ 2 7 0 0
-9a49ff059861b8b0fac613a205c80fcd INSERT INTO t3 VALUES (...) /* , ... */ 1 3 0 0
-bb9851b80e774365eadd37ae7e6efb7f INSERT INTO t5 VALUES (...) /* , ... */ 1 3 0 0
-f130568315c6ad1d3e9804f1877b5f1e SELECT ? + ? 3 0 0 0
-6de2f55944526286ad0a812d0c546851 SELECT ? 1 0 0 0
-00c7b29a063ecaa8b0986db7fb2226a8 CREATE SCHEMA statements_digest_temp 2 2 0 0
-ab15c731548dc40ff43f9bff0ad94c80 DROP SCHEMA statements_digest_temp 2 0 0 0
-42f722a57efba27876a0124a5be1ab5b SELECT ? FROM t11 1 0 0 1
-d98c529e915c29f2244a14921a990335 CREATE TABLE t11 ( c CHARACTER (?) ) 2 0 0 1
-dc1241f077d462bb4d6d096b0e7b2b1a INSERT INTO t11 VALUES (?) 1 1 1 0
-043fc5cdadb7f0300fc8e9c83d768f13 SHOW WARNINGS 1 0 0 0
+SCHEMA_NAME DIGEST DIGEST_TEXT COUNT_STAR SUM_ROWS_AFFECTED SUM_WARNINGS SUM_ERRORS
+statements_digest 172976feb9113e0dc6aa2bbac59a41d7 TRUNCATE TABLE performance_schema . events_statements_summary_by_digest 1 0 0 0
+statements_digest 246bfb7a2c115a3857247496f0a06e08 SELECT ? FROM t1 1 0 0 0
+statements_digest 907d22a5ac65e1b0bfc82abcdf84a8d8 SELECT ? FROM `t1` 1 0 0 0
+statements_digest 3f0d7f4f2bd4a19524543fc56a624c83 SELECT ?, ... FROM t1 2 0 0 0
+statements_digest aac3102cf9419f1651994d4aaafa8c20 SELECT ? FROM t2 1 0 0 0
+statements_digest 279d5b0e21a9c9c646c4b465da5dcd44 SELECT ?, ... FROM t2 2 0 0 0
+statements_digest a036d5e6499bf4645f6a64340c2d8438 INSERT INTO t1 VALUES (?) 1 1 0 0
+statements_digest 159174fa24dc866247d5dced9c722e02 INSERT INTO t2 VALUES (?) 1 1 0 0
+statements_digest a3be3a6183a1e4cb444b4d5a66cdd2b2 INSERT INTO t3 VALUES (...) 4 4 0 0
+statements_digest 0941de9adae9af8685915d60ebed1683 INSERT INTO t4 VALUES (...) 1 1 0 0
+statements_digest aee3244eac2c1faff982545129512427 INSERT INTO t5 VALUES (...) 1 1 0 0
+statements_digest 4cde588fe902029f8988148ceed0cbfd INSERT INTO t1 VALUES (?) /* , ... */ 2 7 0 0
+statements_digest 7cf6e181ccf6a9b09ee2d2f51f736d80 INSERT INTO t3 VALUES (...) /* , ... */ 1 3 0 0
+statements_digest 987ef81982bd577fdc2af56fe921929f INSERT INTO t5 VALUES (...) /* , ... */ 1 3 0 0
+statements_digest 5d5e2ef70ec7fe7d032f355108196a54 INSERT INTO t1 VALUES ( NULL ) 1 1 0 0
+statements_digest 2b4cd139a8a9ad1cb63b947283c8dd67 INSERT INTO t6 VALUES (...) 5 5 0 0
+statements_digest 7c4b227b74c9a1ca5e2beca9b32259ba SELECT ? + ? 3 0 0 0
+statements_digest dd253e864da5a0da21a8123f1ae62884 SELECT ? 1 0 0 0
+statements_digest 4eadbe94b51e0d5fda438dcef8df01a2 CREATE SCHEMA statements_digest_temp 2 2 0 0
+statements_digest bfedee4db6e9a7a04da672d6fa34715a DROP SCHEMA statements_digest_temp 2 0 0 0
+statements_digest c4eeebe7dc1822f01e7b451ba4621cc7 SELECT ? FROM no_such_table 1 0 0 1
+statements_digest a08b576290907ef16f2aa465b73fac27 CREATE TABLE dup_table ( c CHARACTER (?) ) 2 0 0 1
+statements_digest a8c52b07321246d4da76fb363035febb DROP TABLE dup_table 1 0 0 0
+statements_digest 7d1828b42909655dddee855f03bc838a INSERT INTO t11 VALUES (?) 1 1 1 0
+statements_digest 9ba09d3e124eee1b713313da9fff39b6 SHOW WARNINGS 1 0 0 0
+statements_digest 4c0f5e01e17fb5890796cbb748d3a794 PREPARE stmt FROM ? 1 0 0 0
+statements_digest c26d5948e897228bdd2d079bcf94bf6d EXECUTE stmt 2 0 0 0
+statements_digest 59dbac4c2a79eda07b2580ec00de6e61 DEALLOCATE PREPARE stmt 1 0 0 0
+statements_digest 615b2b6dbac1151d115c0ecb7e4f21ae CREATE PROCEDURE p1 ( ) BEGIN SELECT * FROM t12 ; END 1 0 0 0
+statements_digest 409b182c50d83079ecdfdb38aa5eec91 CALL p1 ( ) 2 0 0 0
+statements_digest 41700a1312963051979149240eabd249 DROP PROCEDURE p1 1 0 0 0
+statements_digest 62ff7dd6ae64d2f692ddae8bbdf8f9bc CREATE FUNCTION `func` ( a INTEGER , b INTEGER ) RETURNS INTEGER (?) RETURN a + b 1 0 0 0
+statements_digest d8ad5d7a1d04081b952dd069c06cdb52 SELECT func (...) 2 0 0 0
+statements_digest 1e7ef179d2dbf6ab4dfbcc38c82f818a DROP FUNCTION func 1 0 0 0
+statements_digest bd575ad35bc8b3e3defaf15492f8c6a4 CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @ ? := ? 1 0 0 0
+statements_digest 8b8adfe407b0cb1f777ad2f6988d91ee INSERT INTO t12 VALUES (?) 2 2 0 0
+statements_digest 3a4c46884c810a7536a4b6515102d9f9 DROP TRIGGER trg 1 0 0 0
####################################
# CLEANUP
####################################
@@ -105,4 +157,7 @@ DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
DROP TABLE IF EXISTS t4;
DROP TABLE IF EXISTS t5;
+DROP TABLE IF EXISTS t6;
+DROP TABLE IF EXISTS t11;
+DROP TABLE IF EXISTS t12;
DROP DATABASE IF EXISTS statements_digest;
diff --git a/mysql-test/suite/perfschema/r/statement_digest_consumers.result b/mysql-test/suite/perfschema/r/statement_digest_consumers.result
index 3b96736ba17..ff987ecd5f3 100644
--- a/mysql-test/suite/perfschema/r/statement_digest_consumers.result
+++ b/mysql-test/suite/perfschema/r/statement_digest_consumers.result
@@ -8,6 +8,9 @@ CREATE TABLE t2(a int);
CREATE TABLE t3(a int, b int);
CREATE TABLE t4(a int, b int);
CREATE TABLE t5(a int, b int, c int);
+CREATE TABLE t6(a int, b int, c int, d int);
+CREATE TABLE t11 (c CHAR(4));
+CREATE TABLE t12 (c CHAR(4));
SELECT * FROM performance_schema.setup_consumers;
NAME ENABLED
events_stages_current YES
@@ -49,6 +52,15 @@ INSERT INTO t1 VALUES (1), (2), (3);
INSERT INTO t1 VALUES (1), (2), (3), (4);
INSERT INTO t3 VALUES (1, 2), (3, 4), (5, 6);
INSERT INTO t5 VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9);
+INSERT INTO t1 VALUES (NULL);
+INSERT INTO t3 VALUES (NULL,NULL);
+INSERT INTO t3 VALUES (1,NULL);
+INSERT INTO t3 VALUES (NULL,1);
+INSERT INTO t6 VALUES (NULL, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (1, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (NULL, 2, NULL, NULL);
+INSERT INTO t6 VALUES (1, 2, 3, NULL);
+INSERT INTO t6 VALUES (1, 2, NULL, 4);
SELECT 1 + 1;
1 + 1
2
@@ -75,41 +87,81 @@ CREATE SCHEMA statements_digest_temp;
DROP SCHEMA statements_digest_temp;
CREATE DATABASE statements_digest_temp;
DROP DATABASE statements_digest_temp;
-SELECT 1 from t11;
-ERROR 42S02: Table 'statements_digest.t11' doesn't exist
-create table t11 (c char(4));
-create table t11 (c char(4));
-ERROR 42S01: Table 't11' already exists
-insert into t11 values("MySQL");
+SELECT 1 FROM no_such_table;
+ERROR 42S02: Table 'statements_digest.no_such_table' doesn't exist
+CREATE TABLE dup_table (c char(4));
+CREATE TABLE dup_table (c char(4));
+ERROR 42S01: Table 'dup_table' already exists
+DROP TABLE dup_table;
+INSERT INTO t11 VALUES("MySQL");
Warnings:
Warning 1265 Data truncated for column 'c' at row 1
+PREPARE stmt FROM "SELECT * FROM t12";
+EXECUTE stmt;
+c
+EXECUTE stmt;
+c
+DEALLOCATE PREPARE stmt;
+CREATE PROCEDURE p1() BEGIN SELECT * FROM t12; END//
+CALL p1();
+c
+CALL p1();
+c
+DROP PROCEDURE p1;
+CREATE FUNCTION `func`(a INT, b INT) RETURNS int(11) RETURN a+b //
+select func(3,4);
+func(3,4)
+7
+select func(13,42);
+func(13,42)
+55
+DROP FUNCTION func;
+CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @a:=1;
+INSERT INTO t12 VALUES ("abc");
+INSERT INTO t12 VALUES ("def");
+DROP TRIGGER trg;
####################################
# QUERYING PS STATEMENT DIGEST
####################################
-SELECT digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
-digest digest_text count_star
-025af09b416617ee444962d35913c0ab TRUNCATE TABLE performance_schema . events_statements_summary_by_digest 1
-2448fef9bf02af329f4379caa8311a29 SELECT ? FROM t1 1
-8ffffc32710da95472a3eae27f5a4138 SELECT ? FROM `t1` 1
-0fb24bb23dd5e0781c8be24d072a4e0d SELECT ?, ... FROM t1 2
-bfb5a2acdbc5fce89461e691841090d8 SELECT ? FROM t2 1
-320290df27bd6c85764c3d9293087f6d SELECT ?, ... FROM t2 2
-e71851702cece9c252fe03e12e065471 INSERT INTO t1 VALUES (?) 1
-2bfe58b981242b825ff30e0a23610c01 INSERT INTO t2 VALUES (?) 1
-7dffbc5052092965f9d2739569afbb89 INSERT INTO t3 VALUES (...) 1
-22d4d66694b4eaffa0b2037d9312aae0 INSERT INTO t4 VALUES (...) 1
-d8b582fde31cf51cd5d0ee4e565c5eee INSERT INTO t5 VALUES (...) 1
-c45c72afb3fbdbec45f98072a4ecf6f5 INSERT INTO t1 VALUES (?) /* , ... */ 2
-9a49ff059861b8b0fac613a205c80fcd INSERT INTO t3 VALUES (...) /* , ... */ 1
-bb9851b80e774365eadd37ae7e6efb7f INSERT INTO t5 VALUES (...) /* , ... */ 1
-f130568315c6ad1d3e9804f1877b5f1e SELECT ? + ? 3
-6de2f55944526286ad0a812d0c546851 SELECT ? 1
-00c7b29a063ecaa8b0986db7fb2226a8 CREATE SCHEMA statements_digest_temp 2
-ab15c731548dc40ff43f9bff0ad94c80 DROP SCHEMA statements_digest_temp 2
-42f722a57efba27876a0124a5be1ab5b SELECT ? FROM t11 1
-d98c529e915c29f2244a14921a990335 CREATE TABLE t11 ( c CHARACTER (?) ) 2
-dc1241f077d462bb4d6d096b0e7b2b1a INSERT INTO t11 VALUES (?) 1
-043fc5cdadb7f0300fc8e9c83d768f13 SHOW WARNINGS 1
+SELECT schema_name, digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
+schema_name digest digest_text count_star
+statements_digest 172976feb9113e0dc6aa2bbac59a41d7 TRUNCATE TABLE performance_schema . events_statements_summary_by_digest 1
+statements_digest 246bfb7a2c115a3857247496f0a06e08 SELECT ? FROM t1 1
+statements_digest 907d22a5ac65e1b0bfc82abcdf84a8d8 SELECT ? FROM `t1` 1
+statements_digest 3f0d7f4f2bd4a19524543fc56a624c83 SELECT ?, ... FROM t1 2
+statements_digest aac3102cf9419f1651994d4aaafa8c20 SELECT ? FROM t2 1
+statements_digest 279d5b0e21a9c9c646c4b465da5dcd44 SELECT ?, ... FROM t2 2
+statements_digest a036d5e6499bf4645f6a64340c2d8438 INSERT INTO t1 VALUES (?) 1
+statements_digest 159174fa24dc866247d5dced9c722e02 INSERT INTO t2 VALUES (?) 1
+statements_digest a3be3a6183a1e4cb444b4d5a66cdd2b2 INSERT INTO t3 VALUES (...) 4
+statements_digest 0941de9adae9af8685915d60ebed1683 INSERT INTO t4 VALUES (...) 1
+statements_digest aee3244eac2c1faff982545129512427 INSERT INTO t5 VALUES (...) 1
+statements_digest 4cde588fe902029f8988148ceed0cbfd INSERT INTO t1 VALUES (?) /* , ... */ 2
+statements_digest 7cf6e181ccf6a9b09ee2d2f51f736d80 INSERT INTO t3 VALUES (...) /* , ... */ 1
+statements_digest 987ef81982bd577fdc2af56fe921929f INSERT INTO t5 VALUES (...) /* , ... */ 1
+statements_digest 5d5e2ef70ec7fe7d032f355108196a54 INSERT INTO t1 VALUES ( NULL ) 1
+statements_digest 2b4cd139a8a9ad1cb63b947283c8dd67 INSERT INTO t6 VALUES (...) 5
+statements_digest 7c4b227b74c9a1ca5e2beca9b32259ba SELECT ? + ? 3
+statements_digest dd253e864da5a0da21a8123f1ae62884 SELECT ? 1
+statements_digest 4eadbe94b51e0d5fda438dcef8df01a2 CREATE SCHEMA statements_digest_temp 2
+statements_digest bfedee4db6e9a7a04da672d6fa34715a DROP SCHEMA statements_digest_temp 2
+statements_digest c4eeebe7dc1822f01e7b451ba4621cc7 SELECT ? FROM no_such_table 1
+statements_digest a08b576290907ef16f2aa465b73fac27 CREATE TABLE dup_table ( c CHARACTER (?) ) 2
+statements_digest a8c52b07321246d4da76fb363035febb DROP TABLE dup_table 1
+statements_digest 7d1828b42909655dddee855f03bc838a INSERT INTO t11 VALUES (?) 1
+statements_digest 9ba09d3e124eee1b713313da9fff39b6 SHOW WARNINGS 1
+statements_digest 4c0f5e01e17fb5890796cbb748d3a794 PREPARE stmt FROM ? 1
+statements_digest c26d5948e897228bdd2d079bcf94bf6d EXECUTE stmt 2
+statements_digest 59dbac4c2a79eda07b2580ec00de6e61 DEALLOCATE PREPARE stmt 1
+statements_digest 615b2b6dbac1151d115c0ecb7e4f21ae CREATE PROCEDURE p1 ( ) BEGIN SELECT * FROM t12 ; END 1
+statements_digest 409b182c50d83079ecdfdb38aa5eec91 CALL p1 ( ) 2
+statements_digest 41700a1312963051979149240eabd249 DROP PROCEDURE p1 1
+statements_digest 62ff7dd6ae64d2f692ddae8bbdf8f9bc CREATE FUNCTION `func` ( a INTEGER , b INTEGER ) RETURNS INTEGER (?) RETURN a + b 1
+statements_digest d8ad5d7a1d04081b952dd069c06cdb52 SELECT func (...) 2
+statements_digest 1e7ef179d2dbf6ab4dfbcc38c82f818a DROP FUNCTION func 1
+statements_digest bd575ad35bc8b3e3defaf15492f8c6a4 CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @ ? := ? 1
+statements_digest 8b8adfe407b0cb1f777ad2f6988d91ee INSERT INTO t12 VALUES (?) 2
+statements_digest 3a4c46884c810a7536a4b6515102d9f9 DROP TRIGGER trg 1
SELECT digest, digest_text FROM performance_schema.events_statements_current;
digest digest_text
####################################
@@ -120,4 +172,7 @@ DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
DROP TABLE IF EXISTS t4;
DROP TABLE IF EXISTS t5;
+DROP TABLE IF EXISTS t6;
+DROP TABLE IF EXISTS t11;
+DROP TABLE IF EXISTS t12;
DROP DATABASE IF EXISTS statements_digest;
diff --git a/mysql-test/suite/perfschema/r/statement_digest_consumers2.result b/mysql-test/suite/perfschema/r/statement_digest_consumers2.result
index 57c92aca988..434914c65e7 100644
--- a/mysql-test/suite/perfschema/r/statement_digest_consumers2.result
+++ b/mysql-test/suite/perfschema/r/statement_digest_consumers2.result
@@ -8,6 +8,9 @@ CREATE TABLE t2(a int);
CREATE TABLE t3(a int, b int);
CREATE TABLE t4(a int, b int);
CREATE TABLE t5(a int, b int, c int);
+CREATE TABLE t6(a int, b int, c int, d int);
+CREATE TABLE t11 (c CHAR(4));
+CREATE TABLE t12 (c CHAR(4));
SELECT * FROM performance_schema.setup_consumers;
NAME ENABLED
events_stages_current YES
@@ -49,6 +52,15 @@ INSERT INTO t1 VALUES (1), (2), (3);
INSERT INTO t1 VALUES (1), (2), (3), (4);
INSERT INTO t3 VALUES (1, 2), (3, 4), (5, 6);
INSERT INTO t5 VALUES (1, 2, 3), (4, 5, 6), (7, 8, 9);
+INSERT INTO t1 VALUES (NULL);
+INSERT INTO t3 VALUES (NULL,NULL);
+INSERT INTO t3 VALUES (1,NULL);
+INSERT INTO t3 VALUES (NULL,1);
+INSERT INTO t6 VALUES (NULL, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (1, NULL, NULL, NULL);
+INSERT INTO t6 VALUES (NULL, 2, NULL, NULL);
+INSERT INTO t6 VALUES (1, 2, 3, NULL);
+INSERT INTO t6 VALUES (1, 2, NULL, 4);
SELECT 1 + 1;
1 + 1
2
@@ -75,19 +87,44 @@ CREATE SCHEMA statements_digest_temp;
DROP SCHEMA statements_digest_temp;
CREATE DATABASE statements_digest_temp;
DROP DATABASE statements_digest_temp;
-SELECT 1 from t11;
-ERROR 42S02: Table 'statements_digest.t11' doesn't exist
-create table t11 (c char(4));
-create table t11 (c char(4));
-ERROR 42S01: Table 't11' already exists
-insert into t11 values("MySQL");
+SELECT 1 FROM no_such_table;
+ERROR 42S02: Table 'statements_digest.no_such_table' doesn't exist
+CREATE TABLE dup_table (c char(4));
+CREATE TABLE dup_table (c char(4));
+ERROR 42S01: Table 'dup_table' already exists
+DROP TABLE dup_table;
+INSERT INTO t11 VALUES("MySQL");
Warnings:
Warning 1265 Data truncated for column 'c' at row 1
+PREPARE stmt FROM "SELECT * FROM t12";
+EXECUTE stmt;
+c
+EXECUTE stmt;
+c
+DEALLOCATE PREPARE stmt;
+CREATE PROCEDURE p1() BEGIN SELECT * FROM t12; END//
+CALL p1();
+c
+CALL p1();
+c
+DROP PROCEDURE p1;
+CREATE FUNCTION `func`(a INT, b INT) RETURNS int(11) RETURN a+b //
+select func(3,4);
+func(3,4)
+7
+select func(13,42);
+func(13,42)
+55
+DROP FUNCTION func;
+CREATE TRIGGER trg BEFORE INSERT ON t12 FOR EACH ROW SET @a:=1;
+INSERT INTO t12 VALUES ("abc");
+INSERT INTO t12 VALUES ("def");
+DROP TRIGGER trg;
####################################
# QUERYING PS STATEMENT DIGEST
####################################
-SELECT digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
-digest digest_text count_star
+SELECT schema_name, digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
+schema_name digest digest_text count_star
SELECT digest, digest_text FROM performance_schema.events_statements_current;
digest digest_text
NULL NULL
@@ -99,4 +136,7 @@ DROP TABLE IF EXISTS t2;
DROP TABLE IF EXISTS t3;
DROP TABLE IF EXISTS t4;
DROP TABLE IF EXISTS t5;
+DROP TABLE IF EXISTS t6;
+DROP TABLE IF EXISTS t11;
+DROP TABLE IF EXISTS t12;
DROP DATABASE IF EXISTS statements_digest;
diff --git a/mysql-test/suite/perfschema/r/statement_digest_long_query.result b/mysql-test/suite/perfschema/r/statement_digest_long_query.result
index a7c28822185..0d832402e89 100644
--- a/mysql-test/suite/perfschema/r/statement_digest_long_query.result
+++ b/mysql-test/suite/perfschema/r/statement_digest_long_query.result
@@ -6,7 +6,7 @@ SELECT 1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1
####################################
# QUERYING PS STATEMENT DIGEST
####################################
-SELECT digest, digest_text, count_star FROM events_statements_summary_by_digest;
-digest digest_text count_star
-5c6b2b48a2a39d5dce7ab6ff18ba12d7 TRUNCATE TABLE events_statements_summary_by_digest 1
-3eec87a1ca63856db1629def2300543e SELECT ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ... 1
+SELECT schema_name, digest, digest_text, count_star FROM events_statements_summary_by_digest;
+schema_name digest digest_text count_star
+performance_schema b94b1531dc75a46d0674ff4a7c95abf7 TRUNCATE TABLE events_statements_summary_by_digest 1
+performance_schema 1c412ba09370b4dbafb9aabf2fe7aa51 SELECT ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ? + ... 1
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_global_2u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_global_2u_2t.result
index 5db83182106..b1cd921ee35 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_global_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_global_2u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -297,7 +301,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -463,7 +469,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 0
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -646,7 +654,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 0
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1731,6 +1741,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_global_2u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_global_2u_3t.result
index 5e31faf740b..156d2644afb 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_global_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_global_2u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -296,7 +300,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -462,7 +468,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 54
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -645,7 +653,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 90
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1730,6 +1740,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_global_4u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_global_4u_2t.result
index eacd331f5a8..35a3a2ebb29 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_global_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_global_4u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -297,7 +301,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -463,7 +469,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 0
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -646,7 +654,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 0
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1731,6 +1741,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_global_4u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_global_4u_3t.result
index 2035cdbe84f..ab67bade127 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_global_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_global_4u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -296,7 +300,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -462,7 +468,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 54
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -645,7 +653,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 90
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1730,6 +1740,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result
index 5496d6bc755..1980aba6db9 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_2t.result
@@ -66,6 +66,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -84,6 +85,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -146,7 +148,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -473,7 +479,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -664,7 +672,9 @@ object_type object_schema object_name count_star
TABLE test t1 43
TABLE test t2 0
TABLE test t3 82
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1805,6 +1815,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result
index 83ebe3ccad0..3e2f2dba337 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_2u_3t.result
@@ -65,6 +65,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -83,6 +84,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -145,7 +147,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -300,7 +304,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -478,7 +484,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -673,7 +681,9 @@ object_type object_schema object_name count_star
TABLE test t1 43
TABLE test t2 60
TABLE test t3 82
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1842,6 +1852,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result
index 985a3cee16f..fd74760c784 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_2t.result
@@ -66,6 +66,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -84,6 +85,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -146,7 +148,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -473,7 +479,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 0
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -664,7 +672,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 0
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1805,6 +1815,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result
index d3c6e468c5a..329080013e5 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_hist_4u_3t.result
@@ -65,6 +65,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -83,6 +84,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -145,7 +147,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -300,7 +304,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -478,7 +484,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 54
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -673,7 +681,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 90
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1842,6 +1852,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_off.result b/mysql-test/suite/perfschema/r/table_aggregate_off.result
index 0b36f7f559d..ec4963b516d 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_off.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_off.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -296,7 +300,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -462,7 +468,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -645,7 +653,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1730,6 +1740,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_2t.result
index 1898ee90d72..bc65143f461 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -297,7 +301,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -463,7 +469,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -646,7 +654,9 @@ object_type object_schema object_name count_star
TABLE test t1 43
TABLE test t2 0
TABLE test t3 82
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1731,6 +1741,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_3t.result
index 72665ace56e..1d89fa9fa67 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_thread_2u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -296,7 +300,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -462,7 +468,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -645,7 +653,9 @@ object_type object_schema object_name count_star
TABLE test t1 43
TABLE test t2 60
TABLE test t3 82
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1730,6 +1740,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_2t.result b/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_2t.result
index 0f552bf578d..342acd7bc11 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -297,7 +301,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 0
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -463,7 +469,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 0
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -646,7 +654,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 0
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1731,6 +1741,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_3t.result b/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_3t.result
index ac9c539430b..114aaa7010f 100644
--- a/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_aggregate_thread_4u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -296,7 +300,9 @@ object_type object_schema object_name count_star
TABLE test t1 18
TABLE test t2 24
TABLE test t3 31
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -462,7 +468,9 @@ object_type object_schema object_name count_star
TABLE test t1 40
TABLE test t2 54
TABLE test t3 72
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -645,7 +653,9 @@ object_type object_schema object_name count_star
TABLE test t1 65
TABLE test t2 90
TABLE test t3 123
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1730,6 +1740,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_2t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_2t.result
index 278d0051a04..5438b8c5158 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 0
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_3t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_3t.result
index d6725a70b68..9c05c78eb9a 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_global_2u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 30
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 54
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_2t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_2t.result
index fdaef3f147b..82bef33c9a1 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 0
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_3t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_3t.result
index 4adb2eaf89d..28a37f8a4a0 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_global_4u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 30
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 54
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_2t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_2t.result
index 0af737eee96..fc75720abe6 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -469,7 +475,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -656,7 +664,9 @@ object_type object_schema object_name count_star
TABLE test t1 23
TABLE test t2 0
TABLE test t3 54
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1769,6 +1779,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_3t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_3t.result
index 6fab82b38df..739380507c5 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_2u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -471,7 +477,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -660,7 +668,9 @@ object_type object_schema object_name count_star
TABLE test t1 23
TABLE test t2 36
TABLE test t3 54
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1787,6 +1797,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_2t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_2t.result
index 0338bde63a2..789761490e9 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -469,7 +475,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -656,7 +664,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 0
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1769,6 +1779,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_3t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_3t.result
index 29ee1bda675..2ec1c21e634 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_hist_4u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -471,7 +477,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 30
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -660,7 +668,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 54
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1787,6 +1797,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_2t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_2t.result
index 232e8af1017..51ea3f14986 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 23
TABLE test t2 0
TABLE test t3 54
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_3t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_3t.result
index a97e6f693ac..6b565e8f00d 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_2u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 23
TABLE test t2 36
TABLE test t3 54
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_2t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_2t.result
index fdcdda4d6f0..72822fd30cd 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 0
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 0
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_3t.result b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_3t.result
index 09f4e3aff5e..948ca570723 100644
--- a/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_io_aggregate_thread_4u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 8
TABLE test t2 12
TABLE test t3 17
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 30
TABLE test t3 44
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 35
TABLE test t2 54
TABLE test t3 81
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_2t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_2t.result
index 3e13281e0d9..b584afc2f2e 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 0
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_3t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_3t.result
index e1264576c56..43de94add0d 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_2u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 24
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 36
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_2t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_2t.result
index ccdcc67156c..453fd2c1478 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 0
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_3t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_3t.result
index d36a8a1a279..2a43dc1f2dc 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_global_4u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 24
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 36
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_2t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_2t.result
index 396df8f69d2..2afe89ecdd7 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -469,7 +475,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -656,7 +664,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 28
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1769,6 +1779,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_3t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_3t.result
index 09b59ef8c31..d01ec751a88 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_2u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -471,7 +477,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -660,7 +668,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 24
TABLE test t3 28
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1787,6 +1797,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_2t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_2t.result
index d2e7c407cc3..295db1332c7 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_2t.result
@@ -68,6 +68,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -86,6 +87,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -148,7 +150,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -469,7 +475,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -656,7 +664,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 0
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1769,6 +1779,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_3t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_3t.result
index fd9bd38a81a..e0601a1c9bb 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_hist_4u_3t.result
@@ -67,6 +67,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -85,6 +86,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -147,7 +149,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -471,7 +477,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 24
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -660,7 +668,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 36
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1787,6 +1797,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_2t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_2t.result
index 5f13b37b262..d7a63c6dd64 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 28
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_3t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_3t.result
index 456c570a34f..986d2b251ff 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_2u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 24
TABLE test t3 28
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_2t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_2t.result
index 7a2df579687..cb3c5ed0c4b 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_2t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_2t.result
@@ -70,6 +70,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -88,6 +89,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -150,7 +152,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -299,7 +303,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 0
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -465,7 +471,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 0
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -648,7 +656,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 0
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1733,6 +1743,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_3t.result b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_3t.result
index 1f5dc009e67..e05d4a06449 100644
--- a/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_3t.result
+++ b/mysql-test/suite/perfschema/r/table_lock_aggregate_thread_4u_3t.result
@@ -69,6 +69,7 @@ performance_schema_max_table_handles 1000
performance_schema_max_table_instances 500
performance_schema_max_thread_classes 50
performance_schema_max_thread_instances 200
+performance_schema_session_connect_attrs_size 2048
performance_schema_setup_actors_size 100
performance_schema_setup_objects_size 100
performance_schema_users_size 100
@@ -87,6 +88,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
@@ -149,7 +151,9 @@ object_type object_schema object_name count_star
TABLE test t1 0
TABLE test t2 0
TABLE test t3 0
-"================== con1 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user1@localhost is connected
"================== Step 2 =================="
call dump_thread();
username event_name count_star
@@ -298,7 +302,9 @@ object_type object_schema object_name count_star
TABLE test t1 10
TABLE test t2 12
TABLE test t3 14
-"================== con2 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user2@localhost is connected
"================== Step 4 =================="
call dump_thread();
username event_name count_star
@@ -464,7 +470,9 @@ object_type object_schema object_name count_star
TABLE test t1 20
TABLE test t2 24
TABLE test t3 28
-"================== con3 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user3@localhost is connected
"================== Step 6 =================="
call dump_thread();
username event_name count_star
@@ -647,7 +655,9 @@ object_type object_schema object_name count_star
TABLE test t1 30
TABLE test t2 36
TABLE test t3 42
-"================== con4 connected =================="
+select concat(current_user(), " is connected") as status;
+status
+user4@localhost is connected
"================== Step 8 =================="
call dump_thread();
username event_name count_star
@@ -1732,6 +1742,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/table_schema.result b/mysql-test/suite/perfschema/r/table_schema.result
index 2580b9eeff8..faf0a384503 100644
--- a/mysql-test/suite/perfschema/r/table_schema.result
+++ b/mysql-test/suite/perfschema/r/table_schema.result
@@ -7,7 +7,7 @@ def performance_schema accounts CURRENT_CONNECTIONS 3 NULL NO bigint NULL NULL 1
def performance_schema accounts TOTAL_CONNECTIONS 4 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
def performance_schema cond_instances NAME 1 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema cond_instances OBJECT_INSTANCE_BEGIN 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_stages_current THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_stages_current THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_current EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_current END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_current EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -17,7 +17,7 @@ def performance_schema events_stages_current TIMER_END 7 NULL YES bigint NULL NU
def performance_schema events_stages_current TIMER_WAIT 8 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_current NESTING_EVENT_ID 9 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_current NESTING_EVENT_TYPE 10 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references
-def performance_schema events_stages_history THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_stages_history THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -27,7 +27,7 @@ def performance_schema events_stages_history TIMER_END 7 NULL YES bigint NULL NU
def performance_schema events_stages_history TIMER_WAIT 8 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history NESTING_EVENT_ID 9 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history NESTING_EVENT_TYPE 10 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references
-def performance_schema events_stages_history_long THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_stages_history_long THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history_long EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history_long END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_history_long EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -52,7 +52,7 @@ def performance_schema events_stages_summary_by_host_by_event_name SUM_TIMER_WAI
def performance_schema events_stages_summary_by_host_by_event_name MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_summary_by_host_by_event_name AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_summary_by_host_by_event_name MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_stages_summary_by_thread_by_event_name THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_stages_summary_by_thread_by_event_name THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_summary_by_thread_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema events_stages_summary_by_thread_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_summary_by_thread_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
@@ -72,7 +72,7 @@ def performance_schema events_stages_summary_global_by_event_name SUM_TIMER_WAIT
def performance_schema events_stages_summary_global_by_event_name MIN_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_summary_global_by_event_name AVG_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_stages_summary_global_by_event_name MAX_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_current THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_statements_current THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_current EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_current END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_current EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -112,7 +112,7 @@ def performance_schema events_statements_current NO_INDEX_USED 37 NULL NO bigint
def performance_schema events_statements_current NO_GOOD_INDEX_USED 38 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_current NESTING_EVENT_ID 39 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_current NESTING_EVENT_TYPE 40 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references
-def performance_schema events_statements_history THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_statements_history THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -152,7 +152,7 @@ def performance_schema events_statements_history NO_INDEX_USED 37 NULL NO bigint
def performance_schema events_statements_history NO_GOOD_INDEX_USED 38 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history NESTING_EVENT_ID 39 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history NESTING_EVENT_TYPE 40 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references
-def performance_schema events_statements_history_long THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_statements_history_long THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history_long EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history_long END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_history_long EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -219,34 +219,35 @@ def performance_schema events_statements_summary_by_account_by_event_name SUM_SO
def performance_schema events_statements_summary_by_account_by_event_name SUM_SORT_SCAN 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_by_account_by_event_name SUM_NO_INDEX_USED 26 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_by_account_by_event_name SUM_NO_GOOD_INDEX_USED 27 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest DIGEST 1 NULL YES varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select,insert,update,references
-def performance_schema events_statements_summary_by_digest DIGEST_TEXT 2 NULL YES longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext select,insert,update,references
-def performance_schema events_statements_summary_by_digest COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_LOCK_TIME 8 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_ERRORS 9 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_WARNINGS 10 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_ROWS_AFFECTED 11 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_ROWS_SENT 12 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_ROWS_EXAMINED 13 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_CREATED_TMP_DISK_TABLES 14 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_CREATED_TMP_TABLES 15 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SELECT_FULL_JOIN 16 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SELECT_FULL_RANGE_JOIN 17 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SELECT_RANGE 18 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SELECT_RANGE_CHECK 19 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SELECT_SCAN 20 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SORT_MERGE_PASSES 21 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SORT_RANGE 22 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SORT_ROWS 23 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_SORT_SCAN 24 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_NO_INDEX_USED 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest SUM_NO_GOOD_INDEX_USED 26 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_digest FIRST_SEEN 27 0000-00-00 00:00:00 NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
-def performance_schema events_statements_summary_by_digest LAST_SEEN 28 0000-00-00 00:00:00 NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
+def performance_schema events_statements_summary_by_digest SCHEMA_NAME 1 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
+def performance_schema events_statements_summary_by_digest DIGEST 2 NULL YES varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select,insert,update,references
+def performance_schema events_statements_summary_by_digest DIGEST_TEXT 3 NULL YES longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext select,insert,update,references
+def performance_schema events_statements_summary_by_digest COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest MIN_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest AVG_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest MAX_TIMER_WAIT 8 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_LOCK_TIME 9 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_ERRORS 10 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_WARNINGS 11 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_ROWS_AFFECTED 12 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_ROWS_SENT 13 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_ROWS_EXAMINED 14 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_CREATED_TMP_DISK_TABLES 15 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_CREATED_TMP_TABLES 16 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SELECT_FULL_JOIN 17 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SELECT_FULL_RANGE_JOIN 18 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SELECT_RANGE 19 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SELECT_RANGE_CHECK 20 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SELECT_SCAN 21 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SORT_MERGE_PASSES 22 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SORT_RANGE 23 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SORT_ROWS 24 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_SORT_SCAN 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_NO_INDEX_USED 26 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest SUM_NO_GOOD_INDEX_USED 27 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
+def performance_schema events_statements_summary_by_digest FIRST_SEEN 28 0000-00-00 00:00:00 NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
+def performance_schema events_statements_summary_by_digest LAST_SEEN 29 0000-00-00 00:00:00 NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
def performance_schema events_statements_summary_by_host_by_event_name HOST 1 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references
def performance_schema events_statements_summary_by_host_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema events_statements_summary_by_host_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
@@ -273,7 +274,7 @@ def performance_schema events_statements_summary_by_host_by_event_name SUM_SORT_
def performance_schema events_statements_summary_by_host_by_event_name SUM_SORT_SCAN 24 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_by_host_by_event_name SUM_NO_INDEX_USED 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_by_host_by_event_name SUM_NO_GOOD_INDEX_USED 26 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_statements_summary_by_thread_by_event_name THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_statements_summary_by_thread_by_event_name THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_by_thread_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema events_statements_summary_by_thread_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_by_thread_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
@@ -350,7 +351,7 @@ def performance_schema events_statements_summary_global_by_event_name SUM_SORT_R
def performance_schema events_statements_summary_global_by_event_name SUM_SORT_SCAN 23 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_global_by_event_name SUM_NO_INDEX_USED 24 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_statements_summary_global_by_event_name SUM_NO_GOOD_INDEX_USED 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_waits_current THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_waits_current THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_current EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_current END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_current EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -369,7 +370,7 @@ def performance_schema events_waits_current NESTING_EVENT_TYPE 16 NULL YES enum
def performance_schema events_waits_current OPERATION 17 NULL NO varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select,insert,update,references
def performance_schema events_waits_current NUMBER_OF_BYTES 18 NULL YES bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
def performance_schema events_waits_current FLAGS 19 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
-def performance_schema events_waits_history THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_waits_history THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_history EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_history END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_history EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -388,7 +389,7 @@ def performance_schema events_waits_history NESTING_EVENT_TYPE 16 NULL YES enum
def performance_schema events_waits_history OPERATION 17 NULL NO varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select,insert,update,references
def performance_schema events_waits_history NUMBER_OF_BYTES 18 NULL YES bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
def performance_schema events_waits_history FLAGS 19 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
-def performance_schema events_waits_history_long THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_waits_history_long THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_history_long EVENT_ID 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_history_long END_EVENT_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_history_long EVENT_NAME 4 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
@@ -429,7 +430,7 @@ def performance_schema events_waits_summary_by_instance SUM_TIMER_WAIT 4 NULL NO
def performance_schema events_waits_summary_by_instance MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_summary_by_instance AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_summary_by_instance MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema events_waits_summary_by_thread_by_event_name THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema events_waits_summary_by_thread_by_event_name THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_summary_by_thread_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema events_waits_summary_by_thread_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema events_waits_summary_by_thread_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
@@ -503,9 +504,38 @@ def performance_schema file_summary_by_instance MAX_TIMER_MISC 25 NULL NO bigint
def performance_schema hosts HOST 1 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references
def performance_schema hosts CURRENT_CONNECTIONS 2 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
def performance_schema hosts TOTAL_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache IP 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
+def performance_schema host_cache HOST 2 NULL YES varchar 255 765 NULL NULL NULL utf8 utf8_bin varchar(255) select,insert,update,references
+def performance_schema host_cache HOST_VALIDATED 3 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references
+def performance_schema host_cache SUM_CONNECT_ERRORS 4 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_HOST_BLOCKED_ERRORS 5 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_NAMEINFO_TRANSIENT_ERRORS 6 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_NAMEINFO_PERMANENT_ERRORS 7 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_FORMAT_ERRORS 8 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_ADDRINFO_TRANSIENT_ERRORS 9 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_ADDRINFO_PERMANENT_ERRORS 10 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_FCRDNS_ERRORS 11 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_HOST_ACL_ERRORS 12 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_NO_AUTH_PLUGIN_ERRORS 13 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_AUTH_PLUGIN_ERRORS 14 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_HANDSHAKE_ERRORS 15 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_PROXY_USER_ERRORS 16 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_PROXY_USER_ACL_ERRORS 17 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_AUTHENTICATION_ERRORS 18 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_SSL_ERRORS 19 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_MAX_USER_CONNECTIONS_ERRORS 20 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_MAX_USER_CONNECTIONS_PER_HOUR_ERRORS 21 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_DEFAULT_DATABASE_ERRORS 22 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_INIT_CONNECT_ERRORS 23 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_LOCAL_ERRORS 24 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache COUNT_UNKNOWN_ERRORS 25 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
+def performance_schema host_cache FIRST_SEEN 26 0000-00-00 00:00:00 NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
+def performance_schema host_cache LAST_SEEN 27 0000-00-00 00:00:00 NO timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
+def performance_schema host_cache FIRST_ERROR_SEEN 28 0000-00-00 00:00:00 YES timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
+def performance_schema host_cache LAST_ERROR_SEEN 29 0000-00-00 00:00:00 YES timestamp NULL NULL NULL NULL 0 NULL NULL timestamp select,insert,update,references
def performance_schema mutex_instances NAME 1 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema mutex_instances OBJECT_INSTANCE_BEGIN 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema mutex_instances LOCKED_BY_THREAD_ID 3 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema mutex_instances LOCKED_BY_THREAD_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema objects_summary_global_by_type OBJECT_TYPE 1 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
def performance_schema objects_summary_global_by_type OBJECT_SCHEMA 2 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
def performance_schema objects_summary_global_by_type OBJECT_NAME 3 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
@@ -520,8 +550,16 @@ def performance_schema performance_timers TIMER_RESOLUTION 3 NULL YES bigint NUL
def performance_schema performance_timers TIMER_OVERHEAD 4 NULL YES bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
def performance_schema rwlock_instances NAME 1 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema rwlock_instances OBJECT_INSTANCE_BEGIN 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema rwlock_instances WRITE_LOCKED_BY_THREAD_ID 3 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema rwlock_instances WRITE_LOCKED_BY_THREAD_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema rwlock_instances READ_LOCKED_BY_COUNT 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references
+def performance_schema session_account_connect_attrs PROCESSLIST_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema session_account_connect_attrs ATTR_NAME 2 NULL NO varchar 32 96 NULL NULL NULL utf8 utf8_bin varchar(32) select,insert,update,references
+def performance_schema session_account_connect_attrs ATTR_VALUE 3 NULL YES varchar 1024 3072 NULL NULL NULL utf8 utf8_bin varchar(1024) select,insert,update,references
+def performance_schema session_account_connect_attrs ORDINAL_POSITION 4 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema session_connect_attrs PROCESSLIST_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema session_connect_attrs ATTR_NAME 2 NULL NO varchar 32 96 NULL NULL NULL utf8 utf8_bin varchar(32) select,insert,update,references
+def performance_schema session_connect_attrs ATTR_VALUE 3 NULL YES varchar 1024 3072 NULL NULL NULL utf8 utf8_bin varchar(1024) select,insert,update,references
+def performance_schema session_connect_attrs ORDINAL_POSITION 4 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
def performance_schema setup_actors HOST 1 % NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references
def performance_schema setup_actors USER 2 % NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references
def performance_schema setup_actors ROLE 3 % NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references
@@ -539,7 +577,7 @@ def performance_schema setup_timers NAME 1 NULL NO varchar 64 192 NULL NULL NULL
def performance_schema setup_timers TIMER_NAME 2 NULL NO enum 11 33 NULL NULL NULL utf8 utf8_general_ci enum('CYCLE','NANOSECOND','MICROSECOND','MILLISECOND','TICK') select,insert,update,references
def performance_schema socket_instances EVENT_NAME 1 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema socket_instances OBJECT_INSTANCE_BEGIN 2 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema socket_instances THREAD_ID 3 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema socket_instances THREAD_ID 3 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema socket_instances SOCKET_ID 4 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
def performance_schema socket_instances IP 5 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
def performance_schema socket_instances PORT 6 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
@@ -741,10 +779,10 @@ def performance_schema table_lock_waits_summary_by_table SUM_TIMER_WRITE_EXTERNA
def performance_schema table_lock_waits_summary_by_table MIN_TIMER_WRITE_EXTERNAL 71 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema table_lock_waits_summary_by_table AVG_TIMER_WRITE_EXTERNAL 72 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema table_lock_waits_summary_by_table MAX_TIMER_WRITE_EXTERNAL 73 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
-def performance_schema threads THREAD_ID 1 NULL NO int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema threads THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema threads NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references
def performance_schema threads TYPE 3 NULL NO varchar 10 30 NULL NULL NULL utf8 utf8_general_ci varchar(10) select,insert,update,references
-def performance_schema threads PROCESSLIST_ID 4 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema threads PROCESSLIST_ID 4 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema threads PROCESSLIST_USER 5 NULL YES varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select,insert,update,references
def performance_schema threads PROCESSLIST_HOST 6 NULL YES varchar 60 180 NULL NULL NULL utf8 utf8_general_ci varchar(60) select,insert,update,references
def performance_schema threads PROCESSLIST_DB 7 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
@@ -752,7 +790,7 @@ def performance_schema threads PROCESSLIST_COMMAND 8 NULL YES varchar 16 48 NULL
def performance_schema threads PROCESSLIST_TIME 9 NULL YES bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references
def performance_schema threads PROCESSLIST_STATE 10 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
def performance_schema threads PROCESSLIST_INFO 11 NULL YES longtext 4294967295 4294967295 NULL NULL NULL utf8 utf8_general_ci longtext select,insert,update,references
-def performance_schema threads PARENT_THREAD_ID 12 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references
+def performance_schema threads PARENT_THREAD_ID 12 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references
def performance_schema threads ROLE 13 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references
def performance_schema threads INSTRUMENTED 14 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references
def performance_schema users USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references
diff --git a/mysql-test/suite/perfschema/r/temp_table_io.result b/mysql-test/suite/perfschema/r/temp_table_io.result
index 732d1acac6a..c5de365dbf4 100644
--- a/mysql-test/suite/perfschema/r/temp_table_io.result
+++ b/mysql-test/suite/perfschema/r/temp_table_io.result
@@ -99,6 +99,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/trigger_table_io.result b/mysql-test/suite/perfschema/r/trigger_table_io.result
index a8207a8b32a..e77e7d864f1 100644
--- a/mysql-test/suite/perfschema/r/trigger_table_io.result
+++ b/mysql-test/suite/perfschema/r/trigger_table_io.result
@@ -184,6 +184,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/r/view_table_io.result b/mysql-test/suite/perfschema/r/view_table_io.result
index 1d05ad38f56..5f17b5ae9e1 100644
--- a/mysql-test/suite/perfschema/r/view_table_io.result
+++ b/mysql-test/suite/perfschema/r/view_table_io.result
@@ -135,6 +135,7 @@ Performance_schema_mutex_classes_lost 0
Performance_schema_mutex_instances_lost 0
Performance_schema_rwlock_classes_lost 0
Performance_schema_rwlock_instances_lost 0
+Performance_schema_session_connect_attrs_lost 0
Performance_schema_socket_classes_lost 0
Performance_schema_socket_instances_lost 0
Performance_schema_stage_classes_lost 0
diff --git a/mysql-test/suite/perfschema/t/ddl_session_account_connect_attrs.test b/mysql-test/suite/perfschema/t/ddl_session_account_connect_attrs.test
new file mode 100644
index 00000000000..5d472d383ae
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/ddl_session_account_connect_attrs.test
@@ -0,0 +1,15 @@
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+
+-- error ER_DBACCESS_DENIED_ERROR
+ALTER TABLE performance_schema.session_account_connect_attrs
+ ADD COLUMN foo INTEGER;
+
+-- error ER_WRONG_PERFSCHEMA_USAGE
+TRUNCATE TABLE performance_schema.session_account_connect_attrs;
+
+-- error ER_DBACCESS_DENIED_ERROR
+ALTER TABLE performance_schema.session_account_connect_attrs ADD INDEX test_index(ATTR_NAME);
+
+-- error ER_DBACCESS_DENIED_ERROR
+CREATE UNIQUE INDEX test_index ON performance_schema.session_account_connect_attrs(ATTR_NAME);
diff --git a/mysql-test/suite/perfschema/t/ddl_session_connect_attrs.test b/mysql-test/suite/perfschema/t/ddl_session_connect_attrs.test
new file mode 100644
index 00000000000..8f3e10ca2a8
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/ddl_session_connect_attrs.test
@@ -0,0 +1,15 @@
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+
+-- error ER_DBACCESS_DENIED_ERROR
+ALTER TABLE performance_schema.session_connect_attrs
+ ADD COLUMN foo INTEGER;
+
+-- error ER_WRONG_PERFSCHEMA_USAGE
+TRUNCATE TABLE performance_schema.session_connect_attrs;
+
+-- error ER_DBACCESS_DENIED_ERROR
+ALTER TABLE performance_schema.session_connect_attrs ADD INDEX test_index(ATTR_NAME);
+
+-- error ER_DBACCESS_DENIED_ERROR
+CREATE UNIQUE INDEX test_index ON performance_schema.session_connect_attrs(ATTR_NAME);
diff --git a/mysql-test/suite/perfschema/t/digest_table_full.test b/mysql-test/suite/perfschema/t/digest_table_full.test
index b0ff0ab6f21..cb9d7ea4ea8 100644
--- a/mysql-test/suite/perfschema/t/digest_table_full.test
+++ b/mysql-test/suite/perfschema/t/digest_table_full.test
@@ -19,7 +19,7 @@ TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
--echo ####################################
--echo # QUERYING PS STATEMENT DIGEST
--echo ####################################
-SELECT DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
+SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
SUM_ERRORS FROM performance_schema.events_statements_summary_by_digest;
diff --git a/mysql-test/suite/perfschema/t/disabled.def b/mysql-test/suite/perfschema/t/disabled.def
index 888298bbb09..79b31af3066 100644
--- a/mysql-test/suite/perfschema/t/disabled.def
+++ b/mysql-test/suite/perfschema/t/disabled.def
@@ -9,3 +9,5 @@
# Do not use any TAB characters for whitespace.
#
##############################################################################
+hostcache_ipv4_max_con : BUG#14627287 27th Sept, 2012 Mayank
+hostcache_ipv6_max_con : BUG#14627287 27th Sept, 2012 Mayank
diff --git a/mysql-test/suite/perfschema/t/dml_session_account_connect_attrs.test b/mysql-test/suite/perfschema/t/dml_session_account_connect_attrs.test
new file mode 100644
index 00000000000..82768cb0d6b
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/dml_session_account_connect_attrs.test
@@ -0,0 +1,38 @@
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+
+--disable_result_log
+SELECT * FROM performance_schema.session_account_connect_attrs
+ LIMIT 1;
+
+SELECT * FROM performance_schema.session_account_connect_attrs
+ where ATTR_NAME='FOO' OR ATTR_VALUE='BAR';
+--enable_result_log
+
+--error ER_TABLEACCESS_DENIED_ERROR
+INSERT INTO performance_schema.session_account_connect_attrs
+ SET ATTR_NAME='FOO', ATTR_VALUE='BAR',
+ ORDINAL_POSITION=100, PROCESS_ID=102;
+
+--error ER_TABLEACCESS_DENIED_ERROR
+UPDATE performance_schema.session_account_connect_attrs
+ SET ATTR_NAME='FOO';
+
+--error ER_TABLEACCESS_DENIED_ERROR
+UPDATE performance_schema.session_account_connect_attrs
+ SET ATTR_NAME='FOO' WHERE ATTR_VALUE='BAR';
+
+--error ER_TABLEACCESS_DENIED_ERROR
+DELETE FROM performance_schema.session_account_connect_attrs
+ WHERE ATTR_VALUE='BAR';
+
+--error ER_TABLEACCESS_DENIED_ERROR
+DELETE FROM performance_schema.session_account_connect_attrs;
+
+-- error ER_TABLEACCESS_DENIED_ERROR
+LOCK TABLES performance_schema.session_account_connect_attrs READ;
+UNLOCK TABLES;
+
+-- error ER_TABLEACCESS_DENIED_ERROR
+LOCK TABLES performance_schema.session_account_connect_attrs WRITE;
+UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/t/dml_session_connect_attrs.test b/mysql-test/suite/perfschema/t/dml_session_connect_attrs.test
new file mode 100644
index 00000000000..db2bcb7afd6
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/dml_session_connect_attrs.test
@@ -0,0 +1,38 @@
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+
+--disable_result_log
+SELECT * FROM performance_schema.session_connect_attrs
+ LIMIT 1;
+
+SELECT * FROM performance_schema.session_connect_attrs
+ where ATTR_NAME='FOO' OR ATTR_VALUE='BAR';
+--enable_result_log
+
+--error ER_TABLEACCESS_DENIED_ERROR
+INSERT INTO performance_schema.session_connect_attrs
+ SET ATTR_NAME='FOO', ATTR_VALUE='BAR',
+ ORDINAL_POSITION=100, PROCESS_ID=102;
+
+--error ER_TABLEACCESS_DENIED_ERROR
+UPDATE performance_schema.session_connect_attrs
+ SET ATTR_NAME='FOO';
+
+--error ER_TABLEACCESS_DENIED_ERROR
+UPDATE performance_schema.session_connect_attrs
+ SET ATTR_NAME='FOO' WHERE ATTR_VALUE='BAR';
+
+--error ER_TABLEACCESS_DENIED_ERROR
+DELETE FROM performance_schema.session_connect_attrs
+ WHERE ATTR_VALUE='BAR';
+
+--error ER_TABLEACCESS_DENIED_ERROR
+DELETE FROM performance_schema.session_connect_attrs;
+
+-- error ER_TABLEACCESS_DENIED_ERROR
+LOCK TABLES performance_schema.session_connect_attrs READ;
+UNLOCK TABLES;
+
+-- error ER_TABLEACCESS_DENIED_ERROR
+LOCK TABLES performance_schema.session_connect_attrs WRITE;
+UNLOCK TABLES;
diff --git a/mysql-test/suite/perfschema/t/dml_setup_instruments.test b/mysql-test/suite/perfschema/t/dml_setup_instruments.test
index 5582d559664..8a4f11ba51f 100644
--- a/mysql-test/suite/perfschema/t/dml_setup_instruments.test
+++ b/mysql-test/suite/perfschema/t/dml_setup_instruments.test
@@ -82,3 +82,9 @@ UNLOCK TABLES;
LOCK TABLES performance_schema.setup_instruments WRITE;
UNLOCK TABLES;
+--echo
+--echo # Bug#13813193 ASSERTION `TABLE->READ_SET ==
+--echo # &TABLE->DEF_READ_SET' FAILED / MYSQL_UPDATE
+--echo
+UPDATE performance_schema.setup_instruments SET timed='NO'
+ORDER BY RAND();
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_allow.test
index 4113618b0cf..a96ccf86315 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_allow.test
@@ -14,9 +14,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("santa.claus.ipv4.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'192.0.2.4';
@@ -26,7 +26,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
connect (con2,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -47,5 +47,5 @@ disconnect con3;
revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_deny.test
index 0c3bc365ae1..f99cbc80577 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_again_deny.test
@@ -14,9 +14,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("santa.claus.ipv4.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -24,7 +24,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_again";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -42,5 +42,5 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_allow.test
index 6de091eae67..4bf041b1ada 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_allow.test
@@ -16,9 +16,9 @@
# [Note] - 192.0.2.127
call mtr.add_suppression("santa.claus.ipv4.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'192.0.2.4';
@@ -28,7 +28,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
connect (con2,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -49,5 +49,5 @@ disconnect con3;
revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_deny.test
index c5a9f33177d..2f5f12fe0ab 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_bad_deny.test
@@ -16,9 +16,9 @@
# [Note] - 192.0.2.127
call mtr.add_suppression("santa.claus.ipv4.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -26,7 +26,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_bad_ipv4";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -44,5 +44,5 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_allow.test
index e479944183b..9931fa5ffba 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_allow.test
@@ -14,9 +14,9 @@
# no reverse address mapping.
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'santa.claus.ipv4.example.com';
@@ -26,7 +26,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
connect (con2,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -47,5 +47,5 @@ disconnect con3;
revoke select on test.* from 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_deny.test
index cf31acf1da2..b46661c6852 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_good_deny.test
@@ -14,9 +14,9 @@
# no reverse address mapping.
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -24,7 +24,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -42,5 +42,5 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_allow.test
index 4932dd0a924..283d3a5841d 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_allow.test
@@ -14,9 +14,9 @@
# Name or service not known
call mtr.add_suppression("santa.claus.ipv4.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'santa.claus.ipv4.example.com';
grant select on test.* to 'root'@'192.0.2.4';
@@ -27,7 +27,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
connect (con2,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -50,5 +50,5 @@ revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_deny.test
index 0c10c53d933..eb69c2a5365 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_addrinfo_noname_deny.test
@@ -14,9 +14,9 @@
# Name or service not known
call mtr.add_suppression("santa.claus.ipv4.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -24,7 +24,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_error_noname";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -42,5 +42,5 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_auth_plugin.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_auth_plugin.test
index 2ce251bb77b..e4903f0aa33 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_auth_plugin.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_auth_plugin.test
@@ -13,9 +13,9 @@
--source include/have_perfschema.inc
--source include/have_plugin_auth.inc
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
CREATE USER 'plug'@'santa.claus.ipv4.example.com'
IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
@@ -29,7 +29,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
uninstall plugin test_plugin_server;
--disable_query_log
@@ -48,8 +48,8 @@ connect (con3,"127.0.0.1",plug,plug_dest,test,$MASTER_MYPORT,,,auth_test_plugin)
--connection default
--source ../include/hostcache_dump.inc
---replace_result $PLUGIN_AUTH PLUGIN_AUTH
-eval install plugin test_plugin_server soname '$PLUGIN_AUTH';
+--replace_result $AUTH_TEST_PLUGIN_SO PLUGIN_AUTH
+eval install plugin test_plugin_server soname '$AUTH_TEST_PLUGIN_SO';
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -85,5 +85,5 @@ REVOKE PROXY ON 'plug_dest'@'santa.claus.ipv4.example.com'
DROP USER 'plug'@'santa.claus.ipv4.example.com';
DROP USER 'plug_dest'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_blocked.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_blocked.test
index fcfb1b9631b..0917fbf2aa8 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_blocked.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_blocked.test
@@ -12,11 +12,10 @@
--source include/have_debug.inc
--source include/have_perfschema.inc
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
-select @@global.max_connect_errors;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
+
select @@global.max_connect_errors into @saved_max_connect_errors;
set global max_connect_errors = 2;
@@ -29,7 +28,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4,native_password_bad_reply";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4,native_password_bad_reply";
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -86,5 +85,5 @@ revoke select on test.* from 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'santa.claus.ipv4.example.com';
set global max_connect_errors = @saved_max_connect_errors;
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_format.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_format.test
index a5a06171be7..4766dbef778 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_format.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_format.test
@@ -13,9 +13,9 @@
# Name or service not known
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -23,7 +23,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_format_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_format_ipv4";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -41,5 +41,5 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_max_con-master.opt b/mysql-test/suite/perfschema/t/hostcache_ipv4_max_con-master.opt
new file mode 100644
index 00000000000..87ad0bc3f2d
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_max_con-master.opt
@@ -0,0 +1 @@
+--max-user-connections=1024
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_max_con.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_max_con.test
index 98d9ccbe83b..3bf5ef3b68d 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_max_con.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_max_con.test
@@ -14,14 +14,10 @@
# Enforce a clean state
--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
-select @@global.max_user_connections;
-select @@global.max_connections;
select @@global.max_connections into @saved_max_connections;
+select @@global.max_user_connections into @saved_max_user_connections;
create user 'quota'@'santa.claus.ipv4.example.com';
grant select on test.* to 'quota'@'santa.claus.ipv4.example.com';
@@ -34,7 +30,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
connect (con2a,"127.0.0.1",quota,,test,$MASTER_MYPORT,);
select "Con2a is alive";
@@ -242,9 +238,10 @@ disconnect con5b;
--source ../include/wait_for_pfs_thread_count.inc
set global max_connections = @saved_max_connections;
+set global max_user_connections = @saved_max_user_connections;
# revoke all privileges on test.* from 'quota'@'santa.claus.ipv4.example.com';
drop user 'quota'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_allow.test
index de30b4a98d3..bce65b61cfc 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_allow.test
@@ -14,9 +14,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'192.0.2.4';
grant select on test.* to 'root'@'santa.claus.ipv4.example.com';
@@ -27,7 +27,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
connect (con2,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -45,7 +45,7 @@ disconnect con3;
--connection default
--source ../include/hostcache_dump.inc
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
connect (con4,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con4 is alive";
@@ -68,5 +68,5 @@ revoke select on test.* from 'root'@'santa.claus.ipv4.example.com';
drop user 'root'@'192.0.2.4';
drop user 'root'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_deny.test
index 4498d5921ac..6152399503b 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_again_deny.test
@@ -14,9 +14,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -24,7 +24,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_again";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -42,7 +42,7 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -60,5 +60,5 @@ connect (con5,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_allow.test
index c5a5e4d76a0..52b696831af 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_allow.test
@@ -14,9 +14,9 @@
# no reverse address mapping.
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'192.0.2.4';
@@ -26,7 +26,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
connect (con2,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -47,5 +47,5 @@ disconnect con3;
revoke select on test.* from 'root'@'192.0.2.4';
drop user 'root'@'192.0.2.4';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_deny.test
index 9684f676818..72bb7fac6af 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_nameinfo_noname_deny.test
@@ -14,9 +14,9 @@
# no reverse address mapping.
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -24,7 +24,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_error_noname";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -42,5 +42,5 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_passwd.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_passwd.test
index c77fc625954..b39f3478146 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_passwd.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_passwd.test
@@ -12,10 +12,9 @@
# Enforce a clean state
--source ../include/wait_for_pfs_thread_count.inc
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
create user 'user_without'@'santa.claus.ipv4.example.com';
create user 'user_with'@'santa.claus.ipv4.example.com'
@@ -29,7 +28,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -82,5 +81,5 @@ connect (con2f,"127.0.0.1",user_with,wrong_password,test,$MASTER_MYPORT,);
drop user 'user_with'@'santa.claus.ipv4.example.com';
drop user 'user_without'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv4_ssl.test b/mysql-test/suite/perfschema/t/hostcache_ipv4_ssl.test
index 3e9161302d1..de58219d490 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv4_ssl.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv4_ssl.test
@@ -11,11 +11,7 @@
# Enforce a clean state
--source ../include/wait_for_pfs_thread_count.inc
-
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
+--source ../include/hostcache_set_state.inc
create user 'user_ssl'@'santa.claus.ipv4.example.com';
create user 'user_ssl_x509'@'santa.claus.ipv4.example.com'
@@ -33,7 +29,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv4,getnameinfo_fake_ipv4,getaddrinfo_fake_good_ipv4";
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -70,5 +66,5 @@ connect (con2d,"127.0.0.1",user_ssl_x509,good_password,test,$MASTER_MYPORT,,SSL)
drop user 'user_ssl'@'santa.claus.ipv4.example.com';
drop user 'user_ssl_x509'@'santa.claus.ipv4.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_allow.test
index 14c92597f95..ae58f4089ed 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_allow.test
@@ -15,9 +15,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("santa.claus.ipv6.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'2001:db8::6:6';
@@ -27,7 +27,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
connect (con2,"::1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -48,5 +48,5 @@ disconnect con3;
revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_deny.test
index 1b2f3f865dc..8c408b160a6 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_again_deny.test
@@ -15,9 +15,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("santa.claus.ipv6.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"::1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -25,7 +25,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_again";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -43,5 +43,5 @@ connect (con3,"::1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_allow.test
index ad478a763e5..ccd0ae383ee 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_allow.test
@@ -17,9 +17,9 @@
# [Note] - 2001:db8::6:7f
call mtr.add_suppression("santa.claus.ipv6.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'2001:db8::6:6';
@@ -29,7 +29,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
connect (con2,"::1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -50,5 +50,5 @@ disconnect con3;
revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_deny.test
index 758fc973a34..72d5d693a1b 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_bad_deny.test
@@ -17,10 +17,9 @@
# [Note] - 2001:db8::6:7f
call mtr.add_suppression("santa.claus.ipv6.example.com");
-
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"::1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -28,7 +27,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_bad_ipv6";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -46,5 +45,5 @@ connect (con3,"::1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_allow.test
index 19a3c2d11f3..c253e4b77d5 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_allow.test
@@ -15,9 +15,9 @@
# no reverse address mapping.
call mtr.add_suppression("192.0.2.4");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'santa.claus.ipv6.example.com';
@@ -27,7 +27,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
connect (con2,"::1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -48,5 +48,5 @@ disconnect con3;
revoke select on test.* from 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_deny.test
index 543f056534a..096b4b11eb4 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_good_deny.test
@@ -11,9 +11,9 @@
--source include/have_ipv6.inc
--source include/have_perfschema.inc
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"::1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -21,7 +21,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -39,5 +39,5 @@ connect (con3,"::1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_allow.test
index be304140ba2..575dab9a337 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_allow.test
@@ -15,9 +15,9 @@
# Name or service not known
call mtr.add_suppression("santa.claus.ipv6.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'santa.claus.ipv6.example.com';
grant select on test.* to 'root'@'2001:db8::6:6';
@@ -28,7 +28,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
connect (con2,"::1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -51,5 +51,5 @@ revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_deny.test
index 101abc86a20..f6e5fa118df 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_addrinfo_noname_deny.test
@@ -15,9 +15,9 @@
# Name or service not known
call mtr.add_suppression("santa.claus.ipv6.example.com");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"::1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -25,7 +25,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_error_noname";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -43,5 +43,5 @@ connect (con3,"::1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_auth_plugin.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_auth_plugin.test
index 89bbc016224..1d5c7c546e9 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_auth_plugin.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_auth_plugin.test
@@ -14,9 +14,9 @@
--source include/have_perfschema.inc
--source include/have_plugin_auth.inc
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
CREATE USER 'plug'@'santa.claus.ipv6.example.com'
IDENTIFIED WITH 'test_plugin_server' AS 'plug_dest';
@@ -30,7 +30,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
uninstall plugin test_plugin_server;
--disable_query_log
@@ -49,9 +49,8 @@ connect (con3,"::1",plug,plug_dest,test,$MASTER_MYPORT,,,auth_test_plugin);
--connection default
--source ../include/hostcache_dump.inc
-#--replace_result $PLUGIN_AUTH PLUGIN_AUTH
-#eval install plugin test_plugin_server soname '$PLUGIN_AUTH';
-install plugin test_plugin_server soname 'auth_test_plugin';
+--replace_result $AUTH_TEST_PLUGIN_SO PLUGIN_AUTH
+eval install plugin test_plugin_server soname '$AUTH_TEST_PLUGIN_SO';
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -87,5 +86,5 @@ REVOKE PROXY ON 'plug_dest'@'santa.claus.ipv6.example.com'
DROP USER 'plug'@'santa.claus.ipv6.example.com';
DROP USER 'plug_dest'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_blocked.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_blocked.test
index ef276db982e..9ed365bf3c8 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_blocked.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_blocked.test
@@ -13,11 +13,10 @@
--source include/have_ipv6.inc
--source include/have_perfschema.inc
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
-select @@global.max_connect_errors;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
+
select @@global.max_connect_errors into @saved_max_connect_errors;
set global max_connect_errors = 2;
@@ -30,7 +29,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6,native_password_bad_reply";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6,native_password_bad_reply";
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -87,5 +86,5 @@ revoke select on test.* from 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'santa.claus.ipv6.example.com';
set global max_connect_errors = @saved_max_connect_errors;
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con-master.opt b/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con-master.opt
index a0a6079ec16..143ad44c2f2 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con-master.opt
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con-master.opt
@@ -1 +1,2 @@
--bind-addr=::
+--max-user-connections=1024
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con.test
index c8d6d524cb0..d4adc3e0d00 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_max_con.test
@@ -15,14 +15,10 @@
# Enforce a clean state
--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
-select @@global.max_user_connections;
-select @@global.max_connections;
select @@global.max_connections into @saved_max_connections;
+select @@global.max_user_connections into @saved_max_user_connections;
create user 'quota'@'santa.claus.ipv6.example.com';
grant select on test.* to 'quota'@'santa.claus.ipv6.example.com';
@@ -35,7 +31,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
connect (con2a,"::1",quota,,test,$MASTER_MYPORT,);
select "Con2a is alive";
@@ -224,9 +220,10 @@ disconnect con5b;
--source ../include/wait_for_pfs_thread_count.inc
set global max_connections = @saved_max_connections;
+set global max_user_connections = @saved_max_user_connections;
# revoke all privileges on test.* from 'quota'@'santa.claus.ipv6.example.com';
drop user 'quota'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_allow.test
index ecf18ab55f4..e396dbbad3c 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_allow.test
@@ -15,9 +15,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("2001:db8::6:6");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'2001:db8::6:6';
grant select on test.* to 'root'@'santa.claus.ipv6.example.com';
@@ -28,7 +28,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
connect (con2,"::1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -46,7 +46,7 @@ disconnect con3;
--connection default
--source ../include/hostcache_dump.inc
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
connect (con4,"::1",root,,test,$MASTER_MYPORT,);
select "Con4 is alive";
@@ -69,5 +69,5 @@ revoke select on test.* from 'root'@'santa.claus.ipv6.example.com';
drop user 'root'@'2001:db8::6:6';
drop user 'root'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_deny.test
index 510318fae53..cc7eb0b566f 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_again_deny.test
@@ -15,9 +15,9 @@
# Temporary failure in name resolution
call mtr.add_suppression("2001:db8::6:6");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"::1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -25,7 +25,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_again";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -43,7 +43,7 @@ connect (con3,"::1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -61,5 +61,5 @@ connect (con5,"::1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_allow.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_allow.test
index 00b279aabe8..80f07989212 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_allow.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_allow.test
@@ -15,9 +15,9 @@
# Name or service not known
call mtr.add_suppression("2001:db8::6:6");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
grant select on test.* to 'root'@'2001:db8::6:6';
@@ -27,7 +27,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
connect (con2,"::1",root,,test,$MASTER_MYPORT,);
select "Con2 is alive";
@@ -48,5 +48,5 @@ disconnect con3;
revoke select on test.* from 'root'@'2001:db8::6:6';
drop user 'root'@'2001:db8::6:6';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_deny.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_deny.test
index 03a207fccfb..0d11e433b58 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_deny.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_nameinfo_noname_deny.test
@@ -15,9 +15,9 @@
# Name or service not known
call mtr.add_suppression("2001:db8::6:6");
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
connect (con1,"::1",root,,test,$MASTER_MYPORT,);
select "Con1 is alive";
@@ -25,7 +25,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_error_noname";
--disable_query_log
--error ER_HOST_NOT_PRIVILEGED
@@ -43,5 +43,5 @@ connect (con3,"::1",root,,test,$MASTER_MYPORT,);
--connection default
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_passwd.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_passwd.test
index 9de453544bb..6dd33b9bb5b 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_passwd.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_passwd.test
@@ -12,11 +12,7 @@
# Enforce a clean state
--source ../include/wait_for_pfs_thread_count.inc
-
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
+--source ../include/hostcache_set_state.inc
create user 'user_without'@'santa.claus.ipv6.example.com';
create user 'user_with'@'santa.claus.ipv6.example.com'
@@ -30,7 +26,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -83,5 +79,5 @@ connect (con2f,"::1",user_with,wrong_password,test,$MASTER_MYPORT,);
drop user 'user_with'@'santa.claus.ipv6.example.com';
drop user 'user_without'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_ipv6_ssl.test b/mysql-test/suite/perfschema/t/hostcache_ipv6_ssl.test
index ce6f825f4fb..2b30a4eaf41 100644
--- a/mysql-test/suite/perfschema/t/hostcache_ipv6_ssl.test
+++ b/mysql-test/suite/perfschema/t/hostcache_ipv6_ssl.test
@@ -12,11 +12,7 @@
# Enforce a clean state
--source ../include/wait_for_pfs_thread_count.inc
-
-flush status;
-flush hosts;
-flush user_resources;
-select @@global.debug;
+--source ../include/hostcache_set_state.inc
create user 'user_ssl'@'santa.claus.ipv6.example.com';
create user 'user_ssl_x509'@'santa.claus.ipv6.example.com'
@@ -34,7 +30,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
+set global debug_dbug= "+d,vio_peer_addr_fake_ipv6,getnameinfo_fake_ipv6,getaddrinfo_fake_good_ipv6";
--disable_query_log
--error ER_ACCESS_DENIED_ERROR
@@ -71,5 +67,5 @@ connect (con2d,"::1",user_ssl_x509,good_password,test,$MASTER_MYPORT,,SSL);
drop user 'user_ssl'@'santa.claus.ipv6.example.com';
drop user 'user_ssl_x509'@'santa.claus.ipv6.example.com';
-set global debug= default;
+set global debug_dbug= default;
diff --git a/mysql-test/suite/perfschema/t/hostcache_peer_addr.test b/mysql-test/suite/perfschema/t/hostcache_peer_addr.test
index 022e20a9a64..9e5c88b8bd5 100644
--- a/mysql-test/suite/perfschema/t/hostcache_peer_addr.test
+++ b/mysql-test/suite/perfschema/t/hostcache_peer_addr.test
@@ -9,9 +9,10 @@
--source include/have_debug.inc
--source include/have_perfschema.inc
-flush status;
-flush hosts;
-select @@global.debug;
+# Enforce a clean state
+--source ../include/wait_for_pfs_thread_count.inc
+--source ../include/hostcache_set_state.inc
+
show global status like "connection_errors_%";
connect (con1,"127.0.0.1",root,,test,$MASTER_MYPORT,);
@@ -20,7 +21,7 @@ select current_user();
disconnect con1;
--connection default
-set global debug= "+d,vio_peer_addr_error";
+set global debug_dbug= "+d,vio_peer_addr_error";
--disable_query_log
--error ER_BAD_HOST_ERROR
@@ -40,7 +41,7 @@ connect (con3,"127.0.0.1",root,,test,$MASTER_MYPORT,);
show global status like "connection_errors_%";
--source ../include/hostcache_dump.inc
-set global debug= default;
+set global debug_dbug= default;
flush status;
show global status like "connection_errors_%";
diff --git a/mysql-test/suite/perfschema/t/pfs_upgrade.test b/mysql-test/suite/perfschema/t/pfs_upgrade.test
deleted file mode 100644
index b4a19a66c3b..00000000000
--- a/mysql-test/suite/perfschema/t/pfs_upgrade.test
+++ /dev/null
@@ -1,10 +0,0 @@
-# Tests for PERFORMANCE_SCHEMA
-# Make sure mysql_upgrade does not destroy data in a 'performance_schema'
-# database.
-#
-
---source include/not_embedded.inc
---source include/have_perfschema.inc
---source include/have_innodb.inc
---source ../include/pfs_upgrade.inc
-
diff --git a/mysql-test/suite/perfschema/t/pfs_upgrade_event.test b/mysql-test/suite/perfschema/t/pfs_upgrade_event.test
new file mode 100644
index 00000000000..db7613052d4
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/pfs_upgrade_event.test
@@ -0,0 +1,36 @@
+# Tests for PERFORMANCE_SCHEMA
+# Make sure mysql_upgrade does not destroy data in a 'performance_schema'
+# database.
+#
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+--source include/have_innodb.inc
+
+# Some initial settings + Preemptive cleanup
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+let $err_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_event.err;
+let $out_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_event.out;
+--error 0,1
+--remove_file $out_file
+--error 0,1
+--remove_file $err_file
+
+--disable_warnings
+drop event if exists test.user_event;
+--enable_warnings
+
+--echo "Testing mysql_upgrade with EVENT performance_schema.user_event"
+
+create event test.user_event on schedule every 1 day do
+ select "not supposed to be here";
+
+update mysql.event set db='performance_schema' where name='user_event';
+
+--source suite/perfschema/include/upgrade_check.inc
+
+select name from mysql.event where db='performance_schema';
+
+update mysql.event set db='test' where name='user_event';
+drop event test.user_event;
+
diff --git a/mysql-test/suite/perfschema/t/pfs_upgrade_func.test b/mysql-test/suite/perfschema/t/pfs_upgrade_func.test
new file mode 100644
index 00000000000..4f53aa1bdd1
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/pfs_upgrade_func.test
@@ -0,0 +1,36 @@
+# Tests for PERFORMANCE_SCHEMA
+# Make sure mysql_upgrade does not destroy data in a 'performance_schema'
+# database.
+#
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+--source include/have_innodb.inc
+
+# Some initial settings + Preemptive cleanup
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+let $err_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_func.err;
+let $out_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_func.out;
+--error 0,1
+--remove_file $out_file
+--error 0,1
+--remove_file $err_file
+
+--disable_warnings
+drop function if exists test.user_func;
+--enable_warnings
+
+--echo "Testing mysql_upgrade with FUNCTION performance_schema.user_func"
+
+create function test.user_func() returns integer
+ return 0;
+
+update mysql.proc set db='performance_schema' where name='user_func';
+
+--source suite/perfschema/include/upgrade_check.inc
+
+select name from mysql.proc where db='performance_schema';
+
+update mysql.proc set db='test' where name='user_func';
+drop function test.user_func;
+
diff --git a/mysql-test/suite/perfschema/t/pfs_upgrade_proc.test b/mysql-test/suite/perfschema/t/pfs_upgrade_proc.test
new file mode 100644
index 00000000000..432cee6fb1a
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/pfs_upgrade_proc.test
@@ -0,0 +1,36 @@
+# Tests for PERFORMANCE_SCHEMA
+# Make sure mysql_upgrade does not destroy data in a 'performance_schema'
+# database.
+#
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+--source include/have_innodb.inc
+
+# Some initial settings + Preemptive cleanup
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+let $err_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_proc.err;
+let $out_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_proc.out;
+--error 0,1
+--remove_file $out_file
+--error 0,1
+--remove_file $err_file
+
+--disable_warnings
+drop procedure if exists test.user_proc;
+--enable_warnings
+
+--echo "Testing mysql_upgrade with PROCEDURE performance_schema.user_proc"
+
+create procedure test.user_proc()
+ select "Not supposed to be here";
+
+update mysql.proc set db='performance_schema' where name='user_proc';
+
+--source suite/perfschema/include/upgrade_check.inc
+
+select name from mysql.proc where db='performance_schema';
+
+update mysql.proc set db='test' where name='user_proc';
+drop procedure test.user_proc;
+
diff --git a/mysql-test/suite/perfschema/t/pfs_upgrade_table.test b/mysql-test/suite/perfschema/t/pfs_upgrade_table.test
new file mode 100644
index 00000000000..c68e156a2db
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/pfs_upgrade_table.test
@@ -0,0 +1,44 @@
+# Tests for PERFORMANCE_SCHEMA
+# Make sure mysql_upgrade does not destroy data in a 'performance_schema'
+# database.
+#
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+--source include/have_innodb.inc
+
+# Some initial settings + Preemptive cleanup
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+let $err_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_table.err;
+let $out_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_table.out;
+--error 0,1
+--remove_file $out_file
+--error 0,1
+--remove_file $err_file
+
+--disable_warnings
+drop table if exists test.user_table;
+--enable_warnings
+
+--echo "Testing mysql_upgrade with TABLE performance_schema.user_table"
+
+create table test.user_table(a int);
+
+--error 0,1
+--remove_file $MYSQLD_DATADIR/performance_schema/user_table.frm
+--copy_file $MYSQLD_DATADIR/test/user_table.frm $MYSQLD_DATADIR/performance_schema/user_table.frm
+
+# Make sure the table is visible
+use performance_schema;
+show tables like "user_table";
+
+--source suite/perfschema/include/upgrade_check.inc
+
+# Make sure the table is still visible
+show tables like "user_table";
+
+use test;
+
+--remove_file $MYSQLD_DATADIR/performance_schema/user_table.frm
+drop table test.user_table;
+
diff --git a/mysql-test/suite/perfschema/t/pfs_upgrade_view.test b/mysql-test/suite/perfschema/t/pfs_upgrade_view.test
new file mode 100644
index 00000000000..38ce377f235
--- /dev/null
+++ b/mysql-test/suite/perfschema/t/pfs_upgrade_view.test
@@ -0,0 +1,44 @@
+# Tests for PERFORMANCE_SCHEMA
+# Make sure mysql_upgrade does not destroy data in a 'performance_schema'
+# database.
+#
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+--source include/have_innodb.inc
+
+# Some initial settings + Preemptive cleanup
+let $MYSQLD_DATADIR= `SELECT @@datadir`;
+let $err_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_view.err;
+let $out_file= $MYSQLTEST_VARDIR/tmp/pfs_upgrade_view.out;
+--error 0,1
+--remove_file $out_file
+--error 0,1
+--remove_file $err_file
+
+--disable_warnings
+drop view if exists test.user_view;
+--enable_warnings
+
+--echo "Testing mysql_upgrade with VIEW performance_schema.user_view"
+
+create view test.user_view as select "Not supposed to be here";
+
+--error 0,1
+--remove_file $MYSQLD_DATADIR/performance_schema/user_view.frm
+--copy_file $MYSQLD_DATADIR/test/user_view.frm $MYSQLD_DATADIR/performance_schema/user_view.frm
+
+# Make sure the view is visible
+use performance_schema;
+show tables like "user_view";
+
+--source suite/perfschema/include/upgrade_check.inc
+
+# Make sure the view is still visible
+show tables like "user_view";
+
+use test;
+
+--remove_file $MYSQLD_DATADIR/performance_schema/user_view.frm
+drop view test.user_view;
+
diff --git a/mysql-test/suite/perfschema/t/privilege_table_io.test b/mysql-test/suite/perfschema/t/privilege_table_io.test
index aee8221dfc5..dc17fbcf67e 100644
--- a/mysql-test/suite/perfschema/t/privilege_table_io.test
+++ b/mysql-test/suite/perfschema/t/privilege_table_io.test
@@ -14,7 +14,13 @@
# Therefore we suppress the query_log here.
--echo # We are forced to suppress here the server response.
--disable_result_log
+optimize table mysql.host;
+optimize table mysql.user;
optimize table mysql.db;
+optimize table mysql.proxies_priv;
+optimize table mysql.tables_priv;
+optimize table mysql.procs_priv;
+optimize table mysql.servers;
--enable_result_log
# Start recording events
diff --git a/mysql-test/suite/perfschema/t/short_option_1.test b/mysql-test/suite/perfschema/t/short_option_1.test
index 3877e7b7277..b94d1685867 100644
--- a/mysql-test/suite/perfschema/t/short_option_1.test
+++ b/mysql-test/suite/perfschema/t/short_option_1.test
@@ -14,4 +14,6 @@ show variables like 'character_set_system';
show variables like 'log';
show variables like 'general_log';
+show variables like 'new';
+
show variables like 'log_warnings';
diff --git a/mysql-test/suite/perfschema/t/socket_connect.test b/mysql-test/suite/perfschema/t/socket_connect.test
index 94625d2ed06..909840144ef 100644
--- a/mysql-test/suite/perfschema/t/socket_connect.test
+++ b/mysql-test/suite/perfschema/t/socket_connect.test
@@ -9,7 +9,7 @@
--source ../include/wait_for_pfs_thread_count.inc
# Set this to enable debugging output
-let $my_socket_debug=0;
+let $my_socket_debug_dbug=0;
--echo #==============================================================================
--echo # Establish the level of IPV6 support
diff --git a/mysql-test/suite/perfschema/t/socket_instances_func.test b/mysql-test/suite/perfschema/t/socket_instances_func.test
index 5ed687a483b..12411103579 100644
--- a/mysql-test/suite/perfschema/t/socket_instances_func.test
+++ b/mysql-test/suite/perfschema/t/socket_instances_func.test
@@ -7,7 +7,7 @@
--source include/have_perfschema.inc
# Set this to enable debugging output
-let $my_socket_debug= 0;
+let $my_socket_debug_dbug= 0;
#
# Set IP address defaults with respect to IPV6 support
diff --git a/mysql-test/suite/perfschema/t/socket_instances_func_win.test b/mysql-test/suite/perfschema/t/socket_instances_func_win.test
index 4b5fc07a85a..5faf0d24ec4 100644
--- a/mysql-test/suite/perfschema/t/socket_instances_func_win.test
+++ b/mysql-test/suite/perfschema/t/socket_instances_func_win.test
@@ -7,7 +7,7 @@
--source include/have_perfschema.inc
# Set this to enable debugging output
-let $my_socket_debug= 0;
+let $my_socket_debug_dbug= 0;
#
# Set IP address defaults with respect to IPV6 support
diff --git a/mysql-test/suite/perfschema/t/socket_summary_by_event_name_func.test b/mysql-test/suite/perfschema/t/socket_summary_by_event_name_func.test
index 1c317cedcf5..5ba3af1e408 100644
--- a/mysql-test/suite/perfschema/t/socket_summary_by_event_name_func.test
+++ b/mysql-test/suite/perfschema/t/socket_summary_by_event_name_func.test
@@ -29,7 +29,7 @@
--source ../include/no_protocol.inc
# Set this to enable debugging output
-let $my_socket_debug= 0;
+let $my_socket_debug_dbug= 0;
--echo #==============================================================================
--echo # Establish the level of IPV6 support
diff --git a/mysql-test/suite/perfschema/t/socket_summary_by_instance_func.test b/mysql-test/suite/perfschema/t/socket_summary_by_instance_func.test
index b039023dc6f..ef5096d85ff 100644
--- a/mysql-test/suite/perfschema/t/socket_summary_by_instance_func.test
+++ b/mysql-test/suite/perfschema/t/socket_summary_by_instance_func.test
@@ -43,7 +43,7 @@
# Set IP address defaults with respect to IPV6 support
#
# Set this to enable debugging output
-let $my_socket_debug= 0;
+let $my_socket_debug_dbug= 0;
#
# Determine if IPV6 supported
#
@@ -66,7 +66,7 @@ if($check_ipv4_mapped_supported)
let $my_localhost=::ffff:127.0.0.1;
}
#
-let $my_socket_debug= 0;
+let $my_socket_debug_dbug= 0;
if($my_socket_debug)
{
--echo IPV6=$check_ipv6_supported, IPV4_MAPPED = $check_ipv4_mapped_supported, LOCALHOST = $my_localhost
diff --git a/mysql-test/suite/perfschema/t/socket_summary_by_instance_func_win.test b/mysql-test/suite/perfschema/t/socket_summary_by_instance_func_win.test
index 80b7f3c2be7..5d4b4209b6e 100644
--- a/mysql-test/suite/perfschema/t/socket_summary_by_instance_func_win.test
+++ b/mysql-test/suite/perfschema/t/socket_summary_by_instance_func_win.test
@@ -45,7 +45,7 @@
# Set IP address defaults with respect to IPV6 support
#
# Set this to enable debugging output
-let $my_socket_debug= 0;
+let $my_socket_debug_dbug= 0;
#
# Determine if IPV6 supported
#
@@ -68,7 +68,7 @@ if($check_ipv4_mapped_supported)
let $my_localhost=::ffff:127.0.0.1;
}
#
-let $my_socket_debug= 0;
+let $my_socket_debug_dbug= 0;
if($my_socket_debug)
{
--echo IPV6=$check_ipv6_supported, IPV4_MAPPED = $check_ipv4_mapped_supported, LOCALHOST = $my_localhost
diff --git a/mysql-test/suite/perfschema/t/start_server_no_digests.test b/mysql-test/suite/perfschema/t/start_server_no_digests.test
index b0ff0ab6f21..cb9d7ea4ea8 100644
--- a/mysql-test/suite/perfschema/t/start_server_no_digests.test
+++ b/mysql-test/suite/perfschema/t/start_server_no_digests.test
@@ -19,7 +19,7 @@ TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
--echo ####################################
--echo # QUERYING PS STATEMENT DIGEST
--echo ####################################
-SELECT DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
+SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
SUM_ERRORS FROM performance_schema.events_statements_summary_by_digest;
diff --git a/mysql-test/suite/perfschema/t/start_server_nothing-master.opt b/mysql-test/suite/perfschema/t/start_server_nothing-master.opt
index 0203918849b..d0cd757cac1 100644
--- a/mysql-test/suite/perfschema/t/start_server_nothing-master.opt
+++ b/mysql-test/suite/perfschema/t/start_server_nothing-master.opt
@@ -35,3 +35,5 @@
--loose-performance_schema_events_statements_history_long_size=0
--loose-performance_schema_events_statements_history_size=0
+--loose-performance_schema_session_connect_attrs=0
+
diff --git a/mysql-test/suite/perfschema/t/start_server_nothing.test b/mysql-test/suite/perfschema/t/start_server_nothing.test
index 61f70dcfdb5..c337ef604f6 100644
--- a/mysql-test/suite/perfschema/t/start_server_nothing.test
+++ b/mysql-test/suite/perfschema/t/start_server_nothing.test
@@ -11,7 +11,7 @@ show variables like "performance_schema%";
select * from performance_schema.setup_instruments;
select TIMER_NAME from performance_schema.performance_timers;
select * from performance_schema.setup_consumers;
-select * from performance_schema.setup_timers;
+select NAME from performance_schema.setup_timers;
# All empty
select * from performance_schema.accounts;
@@ -44,6 +44,8 @@ select * from performance_schema.events_waits_summary_global_by_event_name;
select * from performance_schema.file_instances;
select * from performance_schema.file_summary_by_event_name;
select * from performance_schema.file_summary_by_instance;
+select * from performance_schema.session_account_connect_attrs;
+select * from performance_schema.session_connect_attrs;
select * from performance_schema.socket_instances;
select * from performance_schema.socket_summary_by_instance;
select * from performance_schema.socket_summary_by_event_name;
diff --git a/mysql-test/suite/perfschema/t/start_server_off.test b/mysql-test/suite/perfschema/t/start_server_off.test
index a46c2898865..8b58f8c0814 100644
--- a/mysql-test/suite/perfschema/t/start_server_off.test
+++ b/mysql-test/suite/perfschema/t/start_server_off.test
@@ -19,3 +19,30 @@ select * from performance_schema.setup_instruments;
select * from performance_schema.setup_actors;
select * from performance_schema.setup_objects;
select * from performance_schema.setup_timers;
+
+# Expect INSERT to fail with an error
+
+--error ER_WRONG_PERFSCHEMA_USAGE
+insert into performance_schema.setup_objects values ('TABLE', 'myschema', 'myobject', 'YES', 'YES');
+
+--error ER_WRONG_PERFSCHEMA_USAGE
+insert into performance_schema.setup_actors values ('myhost', 'mysuser', 'myrole');
+
+# Expect SELECT, UPDATE, DELETE and TRUNCATE to affect 0 rows, but with
+# no error because the target row(s) will not be found
+
+select * from performance_schema.setup_objects;
+update performance_schema.setup_objects set OBJECT_NAME = 'myobject';
+delete from performance_schema.setup_objects;
+
+select * from performance_schema.setup_actors;
+update performance_schema.setup_actors set HOST = 'myhost';
+delete from performance_schema.setup_actors;
+
+truncate performance_schema.events_stages_history_long;
+truncate performance_schema.events_statements_history_long;
+truncate performance_schema.events_waits_history_long;
+truncate performance_schema.setup_objects;
+truncate performance_schema.setup_actors;
+
+
diff --git a/mysql-test/suite/perfschema/t/statement_digest.test b/mysql-test/suite/perfschema/t/statement_digest.test
index 24c43cdbac4..ed1f99e4318 100644
--- a/mysql-test/suite/perfschema/t/statement_digest.test
+++ b/mysql-test/suite/perfschema/t/statement_digest.test
@@ -16,7 +16,7 @@ TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
--echo ####################################
--echo # QUERYING PS STATEMENT DIGEST
--echo ####################################
-SELECT DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
+SELECT SCHEMA_NAME, DIGEST, DIGEST_TEXT, COUNT_STAR, SUM_ROWS_AFFECTED, SUM_WARNINGS,
SUM_ERRORS FROM performance_schema.events_statements_summary_by_digest;
# Cleanup for Digest
diff --git a/mysql-test/suite/perfschema/t/statement_digest_consumers.test b/mysql-test/suite/perfschema/t/statement_digest_consumers.test
index e7721d770ea..e7510e32049 100644
--- a/mysql-test/suite/perfschema/t/statement_digest_consumers.test
+++ b/mysql-test/suite/perfschema/t/statement_digest_consumers.test
@@ -28,7 +28,7 @@ TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
--echo ####################################
--echo # QUERYING PS STATEMENT DIGEST
--echo ####################################
-SELECT digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
+SELECT schema_name, digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
SELECT digest, digest_text FROM performance_schema.events_statements_current;
# Cleanup for Digest
diff --git a/mysql-test/suite/perfschema/t/statement_digest_consumers2.test b/mysql-test/suite/perfschema/t/statement_digest_consumers2.test
index 72bf5241ee8..98442349927 100644
--- a/mysql-test/suite/perfschema/t/statement_digest_consumers2.test
+++ b/mysql-test/suite/perfschema/t/statement_digest_consumers2.test
@@ -30,7 +30,7 @@ TRUNCATE TABLE performance_schema.events_statements_summary_by_digest;
--echo ####################################
--echo # QUERYING PS STATEMENT DIGEST
--echo ####################################
-SELECT digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
+SELECT schema_name, digest, digest_text, count_star FROM performance_schema.events_statements_summary_by_digest;
SELECT digest, digest_text FROM performance_schema.events_statements_current;
# Cleanup for Digest
diff --git a/mysql-test/suite/perfschema/t/statement_digest_long_query.test b/mysql-test/suite/perfschema/t/statement_digest_long_query.test
index fdc9496c4f1..3969383a6fb 100644
--- a/mysql-test/suite/perfschema/t/statement_digest_long_query.test
+++ b/mysql-test/suite/perfschema/t/statement_digest_long_query.test
@@ -20,4 +20,4 @@ SELECT 1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1+1
--echo ####################################
--echo # QUERYING PS STATEMENT DIGEST
--echo ####################################
-SELECT digest, digest_text, count_star FROM events_statements_summary_by_digest;
+SELECT schema_name, digest, digest_text, count_star FROM events_statements_summary_by_digest;
diff --git a/mysql-test/suite/rpl/r/rpl_000013.result b/mysql-test/suite/rpl/r/rpl_000013.result
index e94a469e970..a45f11c4b5e 100644
--- a/mysql-test/suite/rpl/r/rpl_000013.result
+++ b/mysql-test/suite/rpl/r/rpl_000013.result
@@ -21,5 +21,5 @@ Variable_name Value
Slave_open_temp_tables 0
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_drop.result b/mysql-test/suite/rpl/r/rpl_drop.result
index bdba42e1d34..b91400b2296 100644
--- a/mysql-test/suite/rpl/r/rpl_drop.result
+++ b/mysql-test/suite/rpl/r/rpl_drop.result
@@ -2,5 +2,5 @@ include/master-slave.inc
[connection master]
create table t1 (a int);
drop table t1, t2;
-ERROR 42S02: Unknown table 't2'
+ERROR 42S02: Unknown table 'test.t2'
include/rpl_end.inc
diff --git a/mysql-test/suite/rpl/r/rpl_drop_temp.result b/mysql-test/suite/rpl/r/rpl_drop_temp.result
index 9ec3a62ccc1..44b5f44b4a2 100644
--- a/mysql-test/suite/rpl/r/rpl_drop_temp.result
+++ b/mysql-test/suite/rpl/r/rpl_drop_temp.result
@@ -10,12 +10,12 @@ Slave_open_temp_tables 0
drop database mysqltest;
DROP TEMPORARY TABLE IF EXISTS tmp1;
Warnings:
-Note 1051 Unknown table 'tmp1'
+Note 1051 Unknown table 'test.tmp1'
CREATE TEMPORARY TABLE t1 ( a int );
DROP TEMPORARY TABLE t1, t2;
-ERROR 42S02: Unknown table 't2'
+ERROR 42S02: Unknown table 'test.t2'
DROP TEMPORARY TABLE tmp2;
-ERROR 42S02: Unknown table 'tmp2'
+ERROR 42S02: Unknown table 'test.tmp2'
stop slave;
**** On Master ****
CREATE TEMPORARY TABLE tmp3 (a int);
diff --git a/mysql-test/suite/rpl/r/rpl_mixed_drop_create_temp_table.result b/mysql-test/suite/rpl/r/rpl_mixed_drop_create_temp_table.result
index 755a799a740..4d16b1eb781 100644
--- a/mysql-test/suite/rpl/r/rpl_mixed_drop_create_temp_table.result
+++ b/mysql-test/suite/rpl/r/rpl_mixed_drop_create_temp_table.result
@@ -20,35 +20,35 @@ CREATE TEMPORARY TABLE nt_tmp_xx_1 ( id INT ) ENGINE = MyIsam;
CREATE TEMPORARY TABLE tt_tmp_xx_1 ( id INT ) ENGINE = Innodb;
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2;
Warnings:
-Note 1051 Unknown table 'nt_tmp_2'
+Note 1051 Unknown table 'test.nt_tmp_2'
CREATE TEMPORARY TABLE nt_tmp_2 ( id INT ) ENGINE = MyIsam;
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1;
Warnings:
-Note 1051 Unknown table 'nt_tmp_1'
+Note 1051 Unknown table 'test.nt_tmp_1'
CREATE TEMPORARY TABLE nt_tmp_1 ( id INT ) ENGINE = MyIsam;
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2;
Warnings:
-Note 1051 Unknown table 'tt_tmp_2'
+Note 1051 Unknown table 'test.tt_tmp_2'
CREATE TEMPORARY TABLE tt_tmp_2 ( id INT ) ENGINE = Innodb;
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1;
Warnings:
-Note 1051 Unknown table 'tt_tmp_1'
+Note 1051 Unknown table 'test.tt_tmp_1'
CREATE TEMPORARY TABLE tt_tmp_1 ( id INT ) ENGINE = Innodb;
DROP TABLE IF EXISTS nt_2;
Warnings:
-Note 1051 Unknown table 'nt_2'
+Note 1051 Unknown table 'test.nt_2'
CREATE TABLE nt_2 ( id INT ) ENGINE = MyIsam;
DROP TABLE IF EXISTS nt_1;
Warnings:
-Note 1051 Unknown table 'nt_1'
+Note 1051 Unknown table 'test.nt_1'
CREATE TABLE nt_1 ( id INT ) ENGINE = MyIsam;
DROP TABLE IF EXISTS tt_2;
Warnings:
-Note 1051 Unknown table 'tt_2'
+Note 1051 Unknown table 'test.tt_2'
CREATE TABLE tt_2 ( id INT ) ENGINE = Innodb;
DROP TABLE IF EXISTS tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
CREATE TABLE tt_1 ( id INT ) ENGINE = Innodb;
SET @commands= '';
#########################################################################
@@ -78,7 +78,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE `nt_tmp_2` /* gen
SET @commands= 'Drop-Temp-Xe-Temp';
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-Xe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
-e-e-e-e-e-e-e-e-e-e-e- >> Drop-Temp-Xe-Temp << -e-e-e-e-e-e-e-e-e-e-e-
@@ -86,7 +86,7 @@ include/show_binlog_events.inc
SET @commands= 'Drop-Temp-If-Xe-Temp';
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-Xe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -96,7 +96,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `tt_xx_
SET @commands= 'Drop-Temp-TXe-Temp';
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-TXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -107,7 +107,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE `tt_tmp_2` /* gen
SET @commands= 'Drop-Temp-If-TXe-Temp';
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-TXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -117,7 +117,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `tt_tmp
SET @commands= 'Drop-Temp-NXe-Temp';
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-NXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -128,7 +128,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE `nt_tmp_2` /* gen
SET @commands= 'Drop-Temp-If-NXe-Temp';
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-NXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -249,7 +249,7 @@ SET @commands= 'B T Drop-Temp-Xe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -263,10 +263,10 @@ SET @commands= 'B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -284,7 +284,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -300,11 +300,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp N Drop-Temp-If-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -323,7 +323,7 @@ SET @commands= 'B T Drop-Temp-TXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -338,10 +338,10 @@ SET @commands= 'B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -361,7 +361,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -377,11 +377,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp N Drop-Temp-If-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -400,7 +400,7 @@ SET @commands= 'B T Drop-Temp-NXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -417,10 +417,10 @@ SET @commands= 'B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -444,7 +444,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -463,11 +463,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp N Drop-Temp-If-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -757,7 +757,7 @@ SET @commands= 'B T Drop-Temp-Xe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -771,10 +771,10 @@ SET @commands= 'B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -794,7 +794,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -810,11 +810,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -835,7 +835,7 @@ SET @commands= 'B T Drop-Temp-TXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -850,10 +850,10 @@ SET @commands= 'B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -875,7 +875,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -891,11 +891,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -916,7 +916,7 @@ SET @commands= 'B T Drop-Temp-NXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -933,10 +933,10 @@ SET @commands= 'B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -962,7 +962,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -981,11 +981,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -1230,7 +1230,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `nt_2` /* generated by serv
SET @commands= 'Drop-Xe';
DROP TABLE xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
-e-e-e-e-e-e-e-e-e-e-e- >> Drop-Xe << -e-e-e-e-e-e-e-e-e-e-e-
@@ -1238,7 +1238,7 @@ include/show_binlog_events.inc
SET @commands= 'Drop-If-Xe';
DROP TABLE IF EXISTS xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1248,7 +1248,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `xx_1` /* generat
SET @commands= 'Drop-TXe';
DROP TABLE tt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1259,7 +1259,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `tt_2`,`xx_1` /* generated
SET @commands= 'Drop-If-TXe';
DROP TABLE IF EXISTS tt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1269,7 +1269,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `tt_2`,`xx_1` /*
SET @commands= 'Drop-NXe';
DROP TABLE nt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1280,7 +1280,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `nt_2`,`xx_1` /* generated
SET @commands= 'Drop-If-NXe';
DROP TABLE IF EXISTS nt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1375,7 +1375,7 @@ SET @commands= 'B T Drop-Xe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1389,7 +1389,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1404,7 +1404,7 @@ SET @commands= 'B T Drop-TXe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE tt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1420,7 +1420,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS tt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1435,7 +1435,7 @@ SET @commands= 'B T Drop-NXe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE nt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1451,7 +1451,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS nt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
diff --git a/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result
index 2afd910f367..12dfa9ecc7c 100644
--- a/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result
+++ b/mysql-test/suite/rpl/r/rpl_mixed_implicit_commit_binlog.result
@@ -551,7 +551,7 @@ master-bin.000001 # Query # # use `test`; TRUNCATE TABLE new_tt_xx
INSERT INTO tt_1(ddl_case) VALUES (3);
DROP TABLE IF EXISTS tt_xx, new_tt_xx;
Warnings:
-Note 1051 Unknown table 'tt_xx'
+Note 1051 Unknown table 'test.tt_xx'
-e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e-
-b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
diff --git a/mysql-test/suite/rpl/r/rpl_multi_update3.result b/mysql-test/suite/rpl/r/rpl_multi_update3.result
index bf9946f219f..273352fe5ed 100644
--- a/mysql-test/suite/rpl/r/rpl_multi_update3.result
+++ b/mysql-test/suite/rpl/r/rpl_multi_update3.result
@@ -121,10 +121,10 @@ i j x y z
DROP TABLE t1, t2, t3;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
DROP TABLE IF EXISTS t2;
Warnings:
-Note 1051 Unknown table 't2'
+Note 1051 Unknown table 'test.t2'
CREATE TABLE t1 (
idp int(11) NOT NULL default '0',
idpro int(11) default NULL,
diff --git a/mysql-test/suite/rpl/r/rpl_password_boundaries.result b/mysql-test/suite/rpl/r/rpl_password_boundaries.result
index a0ccc0ce33c..462ab154a55 100644
--- a/mysql-test/suite/rpl/r/rpl_password_boundaries.result
+++ b/mysql-test/suite/rpl/r/rpl_password_boundaries.result
@@ -12,7 +12,7 @@ include/start_slave.inc
[ on master ]
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1 (i int);
insert into t1 values (1);
[ on slave: synchronized ]
diff --git a/mysql-test/suite/rpl/r/rpl_row_colSize.result b/mysql-test/suite/rpl/r/rpl_row_colSize.result
index 8db9aa6059f..d147f416c69 100644
--- a/mysql-test/suite/rpl/r/rpl_row_colSize.result
+++ b/mysql-test/suite/rpl/r/rpl_row_colSize.result
@@ -6,7 +6,7 @@ DROP TABLE IF EXISTS t1;
Checking MYSQL_TYPE_NEWDECIMAL fields
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
STOP SLAVE;
RESET SLAVE;
CREATE TABLE t1 (a DECIMAL(5,2));
diff --git a/mysql-test/suite/rpl/r/rpl_row_drop_create_temp_table.result b/mysql-test/suite/rpl/r/rpl_row_drop_create_temp_table.result
index 914caa99c17..020200f2ef7 100644
--- a/mysql-test/suite/rpl/r/rpl_row_drop_create_temp_table.result
+++ b/mysql-test/suite/rpl/r/rpl_row_drop_create_temp_table.result
@@ -20,35 +20,35 @@ CREATE TEMPORARY TABLE nt_tmp_xx_1 ( id INT ) ENGINE = MyIsam;
CREATE TEMPORARY TABLE tt_tmp_xx_1 ( id INT ) ENGINE = Innodb;
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2;
Warnings:
-Note 1051 Unknown table 'nt_tmp_2'
+Note 1051 Unknown table 'test.nt_tmp_2'
CREATE TEMPORARY TABLE nt_tmp_2 ( id INT ) ENGINE = MyIsam;
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1;
Warnings:
-Note 1051 Unknown table 'nt_tmp_1'
+Note 1051 Unknown table 'test.nt_tmp_1'
CREATE TEMPORARY TABLE nt_tmp_1 ( id INT ) ENGINE = MyIsam;
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2;
Warnings:
-Note 1051 Unknown table 'tt_tmp_2'
+Note 1051 Unknown table 'test.tt_tmp_2'
CREATE TEMPORARY TABLE tt_tmp_2 ( id INT ) ENGINE = Innodb;
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1;
Warnings:
-Note 1051 Unknown table 'tt_tmp_1'
+Note 1051 Unknown table 'test.tt_tmp_1'
CREATE TEMPORARY TABLE tt_tmp_1 ( id INT ) ENGINE = Innodb;
DROP TABLE IF EXISTS nt_2;
Warnings:
-Note 1051 Unknown table 'nt_2'
+Note 1051 Unknown table 'test.nt_2'
CREATE TABLE nt_2 ( id INT ) ENGINE = MyIsam;
DROP TABLE IF EXISTS nt_1;
Warnings:
-Note 1051 Unknown table 'nt_1'
+Note 1051 Unknown table 'test.nt_1'
CREATE TABLE nt_1 ( id INT ) ENGINE = MyIsam;
DROP TABLE IF EXISTS tt_2;
Warnings:
-Note 1051 Unknown table 'tt_2'
+Note 1051 Unknown table 'test.tt_2'
CREATE TABLE tt_2 ( id INT ) ENGINE = Innodb;
DROP TABLE IF EXISTS tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
CREATE TABLE tt_1 ( id INT ) ENGINE = Innodb;
SET @commands= '';
#########################################################################
@@ -78,7 +78,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `nt_tmp
SET @commands= 'Drop-Temp-Xe-Temp';
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-Xe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
-e-e-e-e-e-e-e-e-e-e-e- >> Drop-Temp-Xe-Temp << -e-e-e-e-e-e-e-e-e-e-e-
@@ -86,7 +86,7 @@ include/show_binlog_events.inc
SET @commands= 'Drop-Temp-If-Xe-Temp';
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-Xe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -96,7 +96,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `tt_xx_
SET @commands= 'Drop-Temp-TXe-Temp';
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-TXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -107,7 +107,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `tt_tmp
SET @commands= 'Drop-Temp-If-TXe-Temp';
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-TXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -117,7 +117,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `tt_tmp
SET @commands= 'Drop-Temp-NXe-Temp';
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-NXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -128,7 +128,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `nt_tmp
SET @commands= 'Drop-Temp-If-NXe-Temp';
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-NXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -255,7 +255,7 @@ SET @commands= 'B T Drop-Temp-Xe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -270,10 +270,10 @@ SET @commands= 'B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -293,7 +293,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -310,11 +310,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp N Drop-Temp-If-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -335,7 +335,7 @@ SET @commands= 'B T Drop-Temp-TXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -351,10 +351,10 @@ SET @commands= 'B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -376,7 +376,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -393,11 +393,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp N Drop-Temp-If-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -418,7 +418,7 @@ SET @commands= 'B T Drop-Temp-NXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -436,10 +436,10 @@ SET @commands= 'B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -465,7 +465,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -485,11 +485,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp N Drop-Temp-If-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -806,7 +806,7 @@ SET @commands= 'B T Drop-Temp-Xe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -821,10 +821,10 @@ SET @commands= 'B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -846,7 +846,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -863,11 +863,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -890,7 +890,7 @@ SET @commands= 'B T Drop-Temp-TXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -906,10 +906,10 @@ SET @commands= 'B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -933,7 +933,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -950,11 +950,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -977,7 +977,7 @@ SET @commands= 'B T Drop-Temp-NXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -995,10 +995,10 @@ SET @commands= 'B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -1026,7 +1026,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -1046,11 +1046,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -1316,7 +1316,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `nt_2` /* generated by serv
SET @commands= 'Drop-Xe';
DROP TABLE xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
-e-e-e-e-e-e-e-e-e-e-e- >> Drop-Xe << -e-e-e-e-e-e-e-e-e-e-e-
@@ -1324,7 +1324,7 @@ include/show_binlog_events.inc
SET @commands= 'Drop-If-Xe';
DROP TABLE IF EXISTS xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1334,7 +1334,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `xx_1` /* generat
SET @commands= 'Drop-TXe';
DROP TABLE tt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1345,7 +1345,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `tt_2`,`xx_1` /* generated
SET @commands= 'Drop-If-TXe';
DROP TABLE IF EXISTS tt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1355,7 +1355,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `tt_2`,`xx_1` /*
SET @commands= 'Drop-NXe';
DROP TABLE nt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1366,7 +1366,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `nt_2`,`xx_1` /* generated
SET @commands= 'Drop-If-NXe';
DROP TABLE IF EXISTS nt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1463,7 +1463,7 @@ SET @commands= 'B T Drop-Xe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1478,7 +1478,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1494,7 +1494,7 @@ SET @commands= 'B T Drop-TXe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE tt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1511,7 +1511,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS tt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1527,7 +1527,7 @@ SET @commands= 'B T Drop-NXe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE nt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1544,7 +1544,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS nt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
diff --git a/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result
index 3daae3bf260..28697dc0bdc 100644
--- a/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result
+++ b/mysql-test/suite/rpl/r/rpl_row_implicit_commit_binlog.result
@@ -586,7 +586,7 @@ master-bin.000001 # Query # # use `test`; TRUNCATE TABLE new_tt_xx
INSERT INTO tt_1(ddl_case) VALUES (3);
DROP TABLE IF EXISTS tt_xx, new_tt_xx;
Warnings:
-Note 1051 Unknown table 'tt_xx'
+Note 1051 Unknown table 'test.tt_xx'
-e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e-
-b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
diff --git a/mysql-test/suite/rpl/r/rpl_row_sp005.result b/mysql-test/suite/rpl/r/rpl_row_sp005.result
index 0496fd6fe46..df877233e94 100644
--- a/mysql-test/suite/rpl/r/rpl_row_sp005.result
+++ b/mysql-test/suite/rpl/r/rpl_row_sp005.result
@@ -73,8 +73,6 @@ id2
< ---- Master selects-- >
-------------------------
CALL test.p1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
SELECT * FROM test.t3 ORDER BY id3;
id3 c
1 MySQL
diff --git a/mysql-test/suite/rpl/r/rpl_row_sp006_InnoDB.result b/mysql-test/suite/rpl/r/rpl_row_sp006_InnoDB.result
index 2b3348270f1..da196bb3615 100644
--- a/mysql-test/suite/rpl/r/rpl_row_sp006_InnoDB.result
+++ b/mysql-test/suite/rpl/r/rpl_row_sp006_InnoDB.result
@@ -34,8 +34,6 @@ INSERT INTO t1 VALUES ('MySQL','1993-02-04'),('ROCKS', '1990-08-27'),('Texas', '
END|
CALL p2();
CALL p1();
-Warnings:
-Error 1329 No data - zero rows fetched, selected, or processed
DROP TABLE t1;
DROP TABLE t2;
DROP PROCEDURE p1;
diff --git a/mysql-test/suite/rpl/r/rpl_session_var.result b/mysql-test/suite/rpl/r/rpl_session_var.result
index 030ae161b22..f58744c5d8f 100644
--- a/mysql-test/suite/rpl/r/rpl_session_var.result
+++ b/mysql-test/suite/rpl/r/rpl_session_var.result
@@ -2,7 +2,7 @@ include/master-slave.inc
[connection master]
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
create table t1(a varchar(100),b int);
set @@session.sql_mode=pipes_as_concat;
insert into t1 values('My'||'SQL', 1);
diff --git a/mysql-test/suite/rpl/r/rpl_stm_drop_create_temp_table.result b/mysql-test/suite/rpl/r/rpl_stm_drop_create_temp_table.result
index 4ed2f6d52b2..1c0cad621c5 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_drop_create_temp_table.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_drop_create_temp_table.result
@@ -20,35 +20,35 @@ CREATE TEMPORARY TABLE nt_tmp_xx_1 ( id INT ) ENGINE = MyIsam;
CREATE TEMPORARY TABLE tt_tmp_xx_1 ( id INT ) ENGINE = Innodb;
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2;
Warnings:
-Note 1051 Unknown table 'nt_tmp_2'
+Note 1051 Unknown table 'test.nt_tmp_2'
CREATE TEMPORARY TABLE nt_tmp_2 ( id INT ) ENGINE = MyIsam;
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1;
Warnings:
-Note 1051 Unknown table 'nt_tmp_1'
+Note 1051 Unknown table 'test.nt_tmp_1'
CREATE TEMPORARY TABLE nt_tmp_1 ( id INT ) ENGINE = MyIsam;
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2;
Warnings:
-Note 1051 Unknown table 'tt_tmp_2'
+Note 1051 Unknown table 'test.tt_tmp_2'
CREATE TEMPORARY TABLE tt_tmp_2 ( id INT ) ENGINE = Innodb;
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1;
Warnings:
-Note 1051 Unknown table 'tt_tmp_1'
+Note 1051 Unknown table 'test.tt_tmp_1'
CREATE TEMPORARY TABLE tt_tmp_1 ( id INT ) ENGINE = Innodb;
DROP TABLE IF EXISTS nt_2;
Warnings:
-Note 1051 Unknown table 'nt_2'
+Note 1051 Unknown table 'test.nt_2'
CREATE TABLE nt_2 ( id INT ) ENGINE = MyIsam;
DROP TABLE IF EXISTS nt_1;
Warnings:
-Note 1051 Unknown table 'nt_1'
+Note 1051 Unknown table 'test.nt_1'
CREATE TABLE nt_1 ( id INT ) ENGINE = MyIsam;
DROP TABLE IF EXISTS tt_2;
Warnings:
-Note 1051 Unknown table 'tt_2'
+Note 1051 Unknown table 'test.tt_2'
CREATE TABLE tt_2 ( id INT ) ENGINE = Innodb;
DROP TABLE IF EXISTS tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
CREATE TABLE tt_1 ( id INT ) ENGINE = Innodb;
SET @commands= '';
#########################################################################
@@ -78,7 +78,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE `nt_tmp_2` /* gen
SET @commands= 'Drop-Temp-Xe-Temp';
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-Xe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
-e-e-e-e-e-e-e-e-e-e-e- >> Drop-Temp-Xe-Temp << -e-e-e-e-e-e-e-e-e-e-e-
@@ -86,7 +86,7 @@ include/show_binlog_events.inc
SET @commands= 'Drop-Temp-If-Xe-Temp';
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-Xe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -96,7 +96,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `tt_xx_
SET @commands= 'Drop-Temp-TXe-Temp';
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-TXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -107,7 +107,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE `tt_tmp_2` /* gen
SET @commands= 'Drop-Temp-If-TXe-Temp';
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-TXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -117,7 +117,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE IF EXISTS `tt_tmp
SET @commands= 'Drop-Temp-NXe-Temp';
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-NXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -128,7 +128,7 @@ master-bin.000001 # Query # # use `test`; DROP TEMPORARY TABLE `nt_tmp_2` /* gen
SET @commands= 'Drop-Temp-If-NXe-Temp';
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Temp-If-NXe-Temp << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -249,7 +249,7 @@ SET @commands= 'B T Drop-Temp-Xe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -263,10 +263,10 @@ SET @commands= 'B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -284,7 +284,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -300,11 +300,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp N Drop-Temp-If-Xe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -323,7 +323,7 @@ SET @commands= 'B T Drop-Temp-TXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -338,10 +338,10 @@ SET @commands= 'B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -361,7 +361,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -377,11 +377,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp N Drop-Temp-If-TXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -400,7 +400,7 @@ SET @commands= 'B T Drop-Temp-NXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -417,10 +417,10 @@ SET @commands= 'B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp C';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -444,7 +444,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -463,11 +463,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
COMMIT;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp N Drop-Temp-If-NXe-Temp C << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -757,7 +757,7 @@ SET @commands= 'B T Drop-Temp-Xe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-Xe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -771,10 +771,10 @@ SET @commands= 'B T Drop-Temp-Xe-Temp N Drop-Temp-Xe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_xx_1;
-ERROR 42S02: Unknown table 'tt_xx_1'
+ERROR 42S02: Unknown table 'test.tt_xx_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -794,7 +794,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-Xe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -810,11 +810,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_xx_1;
Warnings:
-Note 1051 Unknown table 'tt_xx_1'
+Note 1051 Unknown table 'test.tt_xx_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -835,7 +835,7 @@ SET @commands= 'B T Drop-Temp-TXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-TXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -850,10 +850,10 @@ SET @commands= 'B T Drop-Temp-TXe-Temp N Drop-Temp-TXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE tt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -875,7 +875,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-TXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -891,11 +891,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS tt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -916,7 +916,7 @@ SET @commands= 'B T Drop-Temp-NXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-NXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -933,10 +933,10 @@ SET @commands= 'B T Drop-Temp-NXe-Temp N Drop-Temp-NXe-Temp R';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_2, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE nt_tmp_1, tt_1;
-ERROR 42S02: Unknown table 'tt_1'
+ERROR 42S02: Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -962,7 +962,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Temp-If-NXe-Temp R << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
@@ -981,11 +981,11 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_2, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
INSERT INTO nt_xx_1() VALUES (1);
DROP TEMPORARY TABLE IF EXISTS nt_tmp_1, tt_1;
Warnings:
-Note 1051 Unknown table 'tt_1'
+Note 1051 Unknown table 'test.tt_1'
ROLLBACK;
Warnings:
Warning # Some non-transactional changed tables couldn't be rolled back
@@ -1230,7 +1230,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `nt_2` /* generated by serv
SET @commands= 'Drop-Xe';
DROP TABLE xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
-e-e-e-e-e-e-e-e-e-e-e- >> Drop-Xe << -e-e-e-e-e-e-e-e-e-e-e-
@@ -1238,7 +1238,7 @@ include/show_binlog_events.inc
SET @commands= 'Drop-If-Xe';
DROP TABLE IF EXISTS xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1248,7 +1248,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `xx_1` /* generat
SET @commands= 'Drop-TXe';
DROP TABLE tt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1259,7 +1259,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `tt_2`,`xx_1` /* generated
SET @commands= 'Drop-If-TXe';
DROP TABLE IF EXISTS tt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1269,7 +1269,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE IF EXISTS `tt_2`,`xx_1` /*
SET @commands= 'Drop-NXe';
DROP TABLE nt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1280,7 +1280,7 @@ master-bin.000001 # Query # # use `test`; DROP TABLE `nt_2`,`xx_1` /* generated
SET @commands= 'Drop-If-NXe';
DROP TABLE IF EXISTS nt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> Drop-If-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1375,7 +1375,7 @@ SET @commands= 'B T Drop-Xe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1389,7 +1389,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-Xe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1404,7 +1404,7 @@ SET @commands= 'B T Drop-TXe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE tt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1420,7 +1420,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS tt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-TXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1435,7 +1435,7 @@ SET @commands= 'B T Drop-NXe';
BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE nt_2, xx_1;
-ERROR 42S02: Unknown table 'xx_1'
+ERROR 42S02: Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
@@ -1451,7 +1451,7 @@ BEGIN;
INSERT INTO tt_xx_1() VALUES (1);
DROP TABLE IF EXISTS nt_2, xx_1;
Warnings:
-Note 1051 Unknown table 'xx_1'
+Note 1051 Unknown table 'test.xx_1'
-b-b-b-b-b-b-b-b-b-b-b- >> B T Drop-If-NXe << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info
diff --git a/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result b/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result
index 65b65d4a21f..3d5bbea8e93 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_implicit_commit_binlog.result
@@ -552,7 +552,7 @@ master-bin.000001 # Query # # use `test`; TRUNCATE TABLE new_tt_xx
INSERT INTO tt_1(ddl_case) VALUES (3);
DROP TABLE IF EXISTS tt_xx, new_tt_xx;
Warnings:
-Note 1051 Unknown table 'tt_xx'
+Note 1051 Unknown table 'test.tt_xx'
-e-e-e-e-e-e-e-e-e-e-e- >> << -e-e-e-e-e-e-e-e-e-e-e-
-b-b-b-b-b-b-b-b-b-b-b- >> << -b-b-b-b-b-b-b-b-b-b-b-
include/show_binlog_events.inc
diff --git a/mysql-test/suite/rpl/r/rpl_stm_no_op.result b/mysql-test/suite/rpl/r/rpl_stm_no_op.result
index cf5b03cdfa6..52e893d616b 100644
--- a/mysql-test/suite/rpl/r/rpl_stm_no_op.result
+++ b/mysql-test/suite/rpl/r/rpl_stm_no_op.result
@@ -9,7 +9,7 @@ ERROR 42000: Unknown database 'mysqltest'
create table t1 (a int);
drop table if exists t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
select * from t1;
ERROR 42S02: Table 'test.t1' doesn't exist
create table t1 (a int, b int);
diff --git a/mysql-test/suite/rpl/r/rpl_temp_table.result b/mysql-test/suite/rpl/r/rpl_temp_table.result
index e7df070874a..504b0471748 100644
--- a/mysql-test/suite/rpl/r/rpl_temp_table.result
+++ b/mysql-test/suite/rpl/r/rpl_temp_table.result
@@ -41,5 +41,5 @@ Variable_name Value
Slave_open_temp_tables 0
drop table if exists t1,t2;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
include/rpl_end.inc
diff --git a/mysql-test/suite/sys_vars/r/host_cache_size_basic.result b/mysql-test/suite/sys_vars/r/host_cache_size_basic.result
new file mode 100644
index 00000000000..0a25202e7e4
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/host_cache_size_basic.result
@@ -0,0 +1,37 @@
+select @@global.host_cache_size;
+@@global.host_cache_size
+123
+select @@session.host_cache_size;
+ERROR HY000: Variable 'host_cache_size' is a GLOBAL variable
+show global variables like 'host_cache_size';
+Variable_name Value
+host_cache_size 123
+show session variables like 'host_cache_size';
+Variable_name Value
+host_cache_size 123
+select * from information_schema.global_variables
+where variable_name='host_cache_size';
+VARIABLE_NAME VARIABLE_VALUE
+HOST_CACHE_SIZE 123
+select * from information_schema.session_variables
+where variable_name='host_cache_size';
+VARIABLE_NAME VARIABLE_VALUE
+HOST_CACHE_SIZE 123
+set global host_cache_size=1;
+select @@global.host_cache_size;
+@@global.host_cache_size
+1
+set global host_cache_size=12;
+select @@global.host_cache_size;
+@@global.host_cache_size
+12
+set global host_cache_size=0;
+select @@global.host_cache_size;
+@@global.host_cache_size
+0
+set session host_cache_size=1;
+ERROR HY000: Variable 'host_cache_size' is a GLOBAL variable and should be set with SET GLOBAL
+set global host_cache_size=123;
+select @@global.host_cache_size;
+@@global.host_cache_size
+123
diff --git a/mysql-test/suite/sys_vars/r/innodb_adaptive_flushing_lwm_basic.result b/mysql-test/suite/sys_vars/r/innodb_adaptive_flushing_lwm_basic.result
new file mode 100644
index 00000000000..1797845def2
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_adaptive_flushing_lwm_basic.result
@@ -0,0 +1,96 @@
+SET @global_start_value = @@global.innodb_adaptive_flushing_lwm;
+SELECT @global_start_value;
+@global_start_value
+10
+'#--------------------FN_DYNVARS_046_01------------------------#'
+SET @@global.innodb_adaptive_flushing_lwm = 1;
+SET @@global.innodb_adaptive_flushing_lwm = DEFAULT;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+10
+'#---------------------FN_DYNVARS_046_02-------------------------#'
+SET innodb_adaptive_flushing_lwm = 1;
+ERROR HY000: Variable 'innodb_adaptive_flushing_lwm' is a GLOBAL variable and should be set with SET GLOBAL
+SELECT @@innodb_adaptive_flushing_lwm;
+@@innodb_adaptive_flushing_lwm
+10
+SELECT local.innodb_adaptive_flushing_lwm;
+ERROR 42S02: Unknown table 'local' in field list
+SET global innodb_adaptive_flushing_lwm = 1;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+1
+'#--------------------FN_DYNVARS_046_03------------------------#'
+SET @@global.innodb_adaptive_flushing_lwm = 1;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+1
+SET @@global.innodb_adaptive_flushing_lwm = 60;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+60
+SET @@global.innodb_adaptive_flushing_lwm = 70;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+70
+'#--------------------FN_DYNVARS_046_04-------------------------#'
+SET @@global.innodb_adaptive_flushing_lwm = -1;
+Warnings:
+Warning 1292 Truncated incorrect innodb_adaptive_flushing_lwm value: '-1'
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+0
+SET @@global.innodb_adaptive_flushing_lwm = "T";
+ERROR 42000: Incorrect argument type to variable 'innodb_adaptive_flushing_lwm'
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+0
+SET @@global.innodb_adaptive_flushing_lwm = "Y";
+ERROR 42000: Incorrect argument type to variable 'innodb_adaptive_flushing_lwm'
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+0
+SET @@global.innodb_adaptive_flushing_lwm = 71;
+Warnings:
+Warning 1292 Truncated incorrect innodb_adaptive_flushing_lwm value: '71'
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+70
+'#----------------------FN_DYNVARS_046_05------------------------#'
+SELECT @@global.innodb_adaptive_flushing_lwm =
+VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_adaptive_flushing_lwm';
+@@global.innodb_adaptive_flushing_lwm =
+VARIABLE_VALUE
+1
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+70
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_adaptive_flushing_lwm';
+VARIABLE_VALUE
+70
+'#---------------------FN_DYNVARS_046_06-------------------------#'
+SET @@global.innodb_adaptive_flushing_lwm = OFF;
+ERROR 42000: Incorrect argument type to variable 'innodb_adaptive_flushing_lwm'
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+70
+SET @@global.innodb_adaptive_flushing_lwm = ON;
+ERROR 42000: Incorrect argument type to variable 'innodb_adaptive_flushing_lwm'
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+70
+'#---------------------FN_DYNVARS_046_07----------------------#'
+SET @@global.innodb_adaptive_flushing_lwm = TRUE;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+1
+SET @@global.innodb_adaptive_flushing_lwm = FALSE;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+0
+SET @@global.innodb_adaptive_flushing_lwm = @global_start_value;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+@@global.innodb_adaptive_flushing_lwm
+10
diff --git a/mysql-test/suite/sys_vars/r/innodb_analyze_is_persistent_basic.result b/mysql-test/suite/sys_vars/r/innodb_analyze_is_persistent_basic.result
deleted file mode 100644
index db3deb560ec..00000000000
--- a/mysql-test/suite/sys_vars/r/innodb_analyze_is_persistent_basic.result
+++ /dev/null
@@ -1,103 +0,0 @@
-SET @start_global_value = @@global.innodb_analyze_is_persistent;
-SELECT @start_global_value;
-@start_global_value
-0
-Valid values are 'ON' and 'OFF'
-SELECT @@global.innodb_analyze_is_persistent in (0, 1);
-@@global.innodb_analyze_is_persistent in (0, 1)
-1
-SELECT @@global.innodb_analyze_is_persistent;
-@@global.innodb_analyze_is_persistent
-0
-SELECT @@session.innodb_analyze_is_persistent;
-@@session.innodb_analyze_is_persistent
-0
-SHOW global variables LIKE 'innodb_analyze_is_persistent';
-Variable_name Value
-innodb_analyze_is_persistent OFF
-SHOW session variables LIKE 'innodb_analyze_is_persistent';
-Variable_name Value
-innodb_analyze_is_persistent OFF
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SET global innodb_analyze_is_persistent='OFF';
-SELECT @@global.innodb_analyze_is_persistent;
-@@global.innodb_analyze_is_persistent
-0
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SET @@global.innodb_analyze_is_persistent=1;
-SELECT @@global.innodb_analyze_is_persistent;
-@@global.innodb_analyze_is_persistent
-1
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT ON
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SET global innodb_analyze_is_persistent=0;
-SELECT @@global.innodb_analyze_is_persistent;
-@@global.innodb_analyze_is_persistent
-0
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SET @@global.innodb_analyze_is_persistent='ON';
-SELECT @@global.innodb_analyze_is_persistent;
-@@global.innodb_analyze_is_persistent
-1
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT ON
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT OFF
-SET session innodb_analyze_is_persistent='OFF';
-SET @@session.innodb_analyze_is_persistent='ON';
-SET global innodb_analyze_is_persistent=1.1;
-ERROR 42000: Incorrect argument type to variable 'innodb_analyze_is_persistent'
-SET global innodb_analyze_is_persistent=1e1;
-ERROR 42000: Incorrect argument type to variable 'innodb_analyze_is_persistent'
-SET global innodb_analyze_is_persistent=2;
-ERROR 42000: Variable 'innodb_analyze_is_persistent' can't be set to the value of '2'
-SET global innodb_analyze_is_persistent=-3;
-ERROR 42000: Variable 'innodb_analyze_is_persistent' can't be set to the value of '-3'
-SELECT @@global.innodb_analyze_is_persistent;
-@@global.innodb_analyze_is_persistent
-1
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT ON
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_ANALYZE_IS_PERSISTENT ON
-SET global innodb_analyze_is_persistent='AUTO';
-ERROR 42000: Variable 'innodb_analyze_is_persistent' can't be set to the value of 'AUTO'
-SET @@global.innodb_analyze_is_persistent = @start_global_value;
-SELECT @@global.innodb_analyze_is_persistent;
-@@global.innodb_analyze_is_persistent
-0
diff --git a/mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result b/mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result
new file mode 100644
index 00000000000..d2773b7da69
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_api_bk_commit_interval_basic.result
@@ -0,0 +1,64 @@
+SET @start_global_value = @@global.innodb_api_bk_commit_interval;
+SELECT @start_global_value;
+@start_global_value
+5
+Valid values are positive number
+SELECT @@global.innodb_api_bk_commit_interval > 0;
+@@global.innodb_api_bk_commit_interval > 0
+1
+SELECT @@global.innodb_api_bk_commit_interval <= 1024*1024*1024;
+@@global.innodb_api_bk_commit_interval <= 1024*1024*1024
+1
+SELECT @@global.innodb_api_bk_commit_interval;
+@@global.innodb_api_bk_commit_interval
+5
+SELECT @@session.innodb_api_bk_commit_interval;
+ERROR HY000: Variable 'innodb_api_bk_commit_interval' is a GLOBAL variable
+SHOW global variables LIKE 'innodb_api_bk_commit_interval';
+Variable_name Value
+innodb_api_bk_commit_interval 5
+SHOW session variables LIKE 'innodb_api_bk_commit_interval';
+Variable_name Value
+innodb_api_bk_commit_interval 5
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_BK_COMMIT_INTERVAL 5
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_BK_COMMIT_INTERVAL 5
+SET global innodb_api_bk_commit_interval=100;
+SELECT @@global.innodb_api_bk_commit_interval;
+@@global.innodb_api_bk_commit_interval
+100
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_BK_COMMIT_INTERVAL 100
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_BK_COMMIT_INTERVAL 100
+SET session innodb_api_bk_commit_interval=1;
+ERROR HY000: Variable 'innodb_api_bk_commit_interval' is a GLOBAL variable and should be set with SET GLOBAL
+SET global innodb_api_bk_commit_interval=1.1;
+ERROR 42000: Incorrect argument type to variable 'innodb_api_bk_commit_interval'
+SET global innodb_api_bk_commit_interval=1e1;
+ERROR 42000: Incorrect argument type to variable 'innodb_api_bk_commit_interval'
+SET global innodb_api_bk_commit_interval="foo";
+ERROR 42000: Incorrect argument type to variable 'innodb_api_bk_commit_interval'
+SET global innodb_api_bk_commit_interval=-7;
+Warnings:
+Warning 1292 Truncated incorrect innodb_api_bk_commit_interval value: '-7'
+SELECT @@global.innodb_api_bk_commit_interval;
+@@global.innodb_api_bk_commit_interval
+1
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_BK_COMMIT_INTERVAL 1
+SET @@global.innodb_api_bk_commit_interval = @start_global_value;
+SELECT @@global.innodb_api_bk_commit_interval;
+@@global.innodb_api_bk_commit_interval
+5
diff --git a/mysql-test/suite/sys_vars/r/innodb_api_disable_rowlock_basic.result b/mysql-test/suite/sys_vars/r/innodb_api_disable_rowlock_basic.result
new file mode 100644
index 00000000000..5256c1ece8a
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_api_disable_rowlock_basic.result
@@ -0,0 +1,53 @@
+'#---------------------BS_STVARS_035_01----------------------#'
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+COUNT(@@GLOBAL.innodb_api_disable_rowlock)
+1
+1 Expected
+'#---------------------BS_STVARS_035_02----------------------#'
+SET @@GLOBAL.innodb_api_disable_rowlock=1;
+ERROR HY000: Variable 'innodb_api_disable_rowlock' is a read only variable
+Expected error 'Read only variable'
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+COUNT(@@GLOBAL.innodb_api_disable_rowlock)
+1
+1 Expected
+'#---------------------BS_STVARS_035_03----------------------#'
+SELECT IF(@@GLOBAL.innodb_api_disable_rowlock, 'ON', 'OFF') = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_disable_rowlock';
+IF(@@GLOBAL.innodb_api_disable_rowlock, 'ON', 'OFF') = VARIABLE_VALUE
+1
+1 Expected
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+COUNT(@@GLOBAL.innodb_api_disable_rowlock)
+1
+1 Expected
+SELECT COUNT(VARIABLE_VALUE)
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_disable_rowlock';
+COUNT(VARIABLE_VALUE)
+1
+1 Expected
+'#---------------------BS_STVARS_035_04----------------------#'
+SELECT @@innodb_api_disable_rowlock = @@GLOBAL.innodb_api_enable_binlog;
+@@innodb_api_disable_rowlock = @@GLOBAL.innodb_api_enable_binlog
+1
+1 Expected
+'#---------------------BS_STVARS_035_05----------------------#'
+SELECT COUNT(@@innodb_api_disable_rowlock);
+COUNT(@@innodb_api_disable_rowlock)
+1
+1 Expected
+SELECT COUNT(@@local.innodb_api_disable_rowlock);
+ERROR HY000: Variable 'innodb_api_disable_rowlock' is a GLOBAL variable
+Expected error 'Variable is a GLOBAL variable'
+SELECT COUNT(@@SESSION.innodb_api_disable_rowlock);
+ERROR HY000: Variable 'innodb_api_disable_rowlock' is a GLOBAL variable
+Expected error 'Variable is a GLOBAL variable'
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+COUNT(@@GLOBAL.innodb_api_disable_rowlock)
+1
+1 Expected
+SELECT innodb_api_disable_rowlock = @@SESSION.innodb_api_enable_binlog;
+ERROR 42S22: Unknown column 'innodb_api_disable_rowlock' in 'field list'
+Expected error 'Readonly variable'
diff --git a/mysql-test/suite/sys_vars/r/innodb_api_enable_binlog_basic.result b/mysql-test/suite/sys_vars/r/innodb_api_enable_binlog_basic.result
new file mode 100644
index 00000000000..f06fbeb8da7
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_api_enable_binlog_basic.result
@@ -0,0 +1,53 @@
+'#---------------------BS_STVARS_035_01----------------------#'
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+COUNT(@@GLOBAL.innodb_api_enable_binlog)
+1
+1 Expected
+'#---------------------BS_STVARS_035_02----------------------#'
+SET @@GLOBAL.innodb_api_enable_binlog=1;
+ERROR HY000: Variable 'innodb_api_enable_binlog' is a read only variable
+Expected error 'Read only variable'
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+COUNT(@@GLOBAL.innodb_api_enable_binlog)
+1
+1 Expected
+'#---------------------BS_STVARS_035_03----------------------#'
+SELECT IF(@@GLOBAL.innodb_api_enable_binlog, 'ON', 'OFF') = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_binlog';
+IF(@@GLOBAL.innodb_api_enable_binlog, 'ON', 'OFF') = VARIABLE_VALUE
+1
+1 Expected
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+COUNT(@@GLOBAL.innodb_api_enable_binlog)
+1
+1 Expected
+SELECT COUNT(VARIABLE_VALUE)
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_binlog';
+COUNT(VARIABLE_VALUE)
+1
+1 Expected
+'#---------------------BS_STVARS_035_04----------------------#'
+SELECT @@innodb_api_enable_binlog = @@GLOBAL.innodb_api_enable_binlog;
+@@innodb_api_enable_binlog = @@GLOBAL.innodb_api_enable_binlog
+1
+1 Expected
+'#---------------------BS_STVARS_035_05----------------------#'
+SELECT COUNT(@@innodb_api_enable_binlog);
+COUNT(@@innodb_api_enable_binlog)
+1
+1 Expected
+SELECT COUNT(@@local.innodb_api_enable_binlog);
+ERROR HY000: Variable 'innodb_api_enable_binlog' is a GLOBAL variable
+Expected error 'Variable is a GLOBAL variable'
+SELECT COUNT(@@SESSION.innodb_api_enable_binlog);
+ERROR HY000: Variable 'innodb_api_enable_binlog' is a GLOBAL variable
+Expected error 'Variable is a GLOBAL variable'
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+COUNT(@@GLOBAL.innodb_api_enable_binlog)
+1
+1 Expected
+SELECT innodb_api_enable_binlog = @@SESSION.innodb_api_enable_binlog;
+ERROR 42S22: Unknown column 'innodb_api_enable_binlog' in 'field list'
+Expected error 'Readonly variable'
diff --git a/mysql-test/suite/sys_vars/r/innodb_api_enable_mdl_basic.result b/mysql-test/suite/sys_vars/r/innodb_api_enable_mdl_basic.result
new file mode 100644
index 00000000000..4484b151396
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_api_enable_mdl_basic.result
@@ -0,0 +1,53 @@
+'#---------------------BS_STVARS_035_01----------------------#'
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+COUNT(@@GLOBAL.innodb_api_enable_mdl)
+1
+1 Expected
+'#---------------------BS_STVARS_035_02----------------------#'
+SET @@GLOBAL.innodb_api_enable_mdl=1;
+ERROR HY000: Variable 'innodb_api_enable_mdl' is a read only variable
+Expected error 'Read only variable'
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+COUNT(@@GLOBAL.innodb_api_enable_mdl)
+1
+1 Expected
+'#---------------------BS_STVARS_035_03----------------------#'
+SELECT IF(@@GLOBAL.innodb_api_enable_mdl, 'ON', 'OFF') = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_mdl';
+IF(@@GLOBAL.innodb_api_enable_mdl, 'ON', 'OFF') = VARIABLE_VALUE
+1
+1 Expected
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+COUNT(@@GLOBAL.innodb_api_enable_mdl)
+1
+1 Expected
+SELECT COUNT(VARIABLE_VALUE)
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_mdl';
+COUNT(VARIABLE_VALUE)
+1
+1 Expected
+'#---------------------BS_STVARS_035_04----------------------#'
+SELECT @@innodb_api_enable_mdl = @@GLOBAL.innodb_api_enable_mdl;
+@@innodb_api_enable_mdl = @@GLOBAL.innodb_api_enable_mdl
+1
+1 Expected
+'#---------------------BS_STVARS_035_05----------------------#'
+SELECT COUNT(@@innodb_api_enable_mdl);
+COUNT(@@innodb_api_enable_mdl)
+1
+1 Expected
+SELECT COUNT(@@local.innodb_api_enable_mdl);
+ERROR HY000: Variable 'innodb_api_enable_mdl' is a GLOBAL variable
+Expected error 'Variable is a GLOBAL variable'
+SELECT COUNT(@@SESSION.innodb_api_enable_mdl);
+ERROR HY000: Variable 'innodb_api_enable_mdl' is a GLOBAL variable
+Expected error 'Variable is a GLOBAL variable'
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+COUNT(@@GLOBAL.innodb_api_enable_mdl)
+1
+1 Expected
+SELECT innodb_api_enable_mdl = @@SESSION.innodb_api_enable_mdl;
+ERROR 42S22: Unknown column 'innodb_api_enable_mdl' in 'field list'
+Expected error 'Readonly variable'
diff --git a/mysql-test/suite/sys_vars/r/innodb_api_trx_level_basic.result b/mysql-test/suite/sys_vars/r/innodb_api_trx_level_basic.result
new file mode 100644
index 00000000000..41071799883
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_api_trx_level_basic.result
@@ -0,0 +1,66 @@
+SET @start_global_value = @@global.innodb_api_trx_level;
+SELECT @start_global_value;
+@start_global_value
+0
+Valid values are zero or above
+SELECT @@global.innodb_api_trx_level >=0;
+@@global.innodb_api_trx_level >=0
+1
+SELECT @@global.innodb_api_trx_level <=3;
+@@global.innodb_api_trx_level <=3
+1
+SELECT @@global.innodb_api_trx_level;
+@@global.innodb_api_trx_level
+0
+SELECT @@session.innodb_api_trx_level;
+ERROR HY000: Variable 'innodb_api_trx_level' is a GLOBAL variable
+SHOW global variables LIKE 'innodb_api_trx_level';
+Variable_name Value
+innodb_api_trx_level 0
+SHOW session variables LIKE 'innodb_api_trx_level';
+Variable_name Value
+innodb_api_trx_level 0
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_trx_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_TRX_LEVEL 0
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_trx_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_TRX_LEVEL 0
+SET global innodb_api_trx_level=100;
+Warnings:
+Warning 1292 Truncated incorrect innodb_api_trx_level value: '100'
+SELECT @@global.innodb_api_trx_level;
+@@global.innodb_api_trx_level
+3
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_trx_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_TRX_LEVEL 3
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_trx_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_TRX_LEVEL 3
+SET session innodb_api_trx_level=1;
+ERROR HY000: Variable 'innodb_api_trx_level' is a GLOBAL variable and should be set with SET GLOBAL
+SET global innodb_api_trx_level=1.1;
+ERROR 42000: Incorrect argument type to variable 'innodb_api_trx_level'
+SET global innodb_api_trx_level=1e1;
+ERROR 42000: Incorrect argument type to variable 'innodb_api_trx_level'
+SET global innodb_api_trx_level="foo";
+ERROR 42000: Incorrect argument type to variable 'innodb_api_trx_level'
+SET global innodb_api_trx_level=-7;
+Warnings:
+Warning 1292 Truncated incorrect innodb_api_trx_level value: '-7'
+SELECT @@global.innodb_api_trx_level;
+@@global.innodb_api_trx_level
+0
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_trx_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_API_TRX_LEVEL 0
+SET @@global.innodb_api_trx_level = @start_global_value;
+SELECT @@global.innodb_api_trx_level;
+@@global.innodb_api_trx_level
+0
diff --git a/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result b/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result
index 1f458a8e1fd..900f0167261 100644
--- a/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_autoextend_increment_basic.result
@@ -6,13 +6,13 @@ Warning 1292 Truncated incorrect innodb_autoextend_increment value: '0'
SET @@global.innodb_autoextend_increment = DEFAULT;
SELECT @@global.innodb_autoextend_increment ;
@@global.innodb_autoextend_increment
-8
+64
'#---------------------FN_DYNVARS_046_02-------------------------#'
SET innodb_autoextend_increment = 1;
ERROR HY000: Variable 'innodb_autoextend_increment' is a GLOBAL variable and should be set with SET GLOBAL
SELECT @@innodb_autoextend_increment ;
@@innodb_autoextend_increment
-8
+64
SELECT local.innodb_autoextend_increment ;
ERROR 42S02: Unknown table 'local' in field list
SET global innodb_autoextend_increment = 0;
diff --git a/mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result b/mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result
new file mode 100644
index 00000000000..3ee9448bdab
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_cmp_per_index_enabled_basic.result
@@ -0,0 +1,65 @@
+SELECT @@global.innodb_cmp_per_index_enabled;
+@@global.innodb_cmp_per_index_enabled
+0
+SET GLOBAL innodb_stats_persistent=123;
+ERROR 42000: Variable 'innodb_stats_persistent' can't be set to the value of '123'
+SET GLOBAL innodb_stats_persistent='foo';
+ERROR 42000: Variable 'innodb_stats_persistent' can't be set to the value of 'foo'
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+SELECT @@global.innodb_cmp_per_index_enabled;
+@@global.innodb_cmp_per_index_enabled
+1
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+SELECT @@global.innodb_cmp_per_index_enabled;
+@@global.innodb_cmp_per_index_enabled
+1
+SET GLOBAL innodb_cmp_per_index_enabled=OFF;
+SELECT @@global.innodb_cmp_per_index_enabled;
+@@global.innodb_cmp_per_index_enabled
+0
+SET GLOBAL innodb_cmp_per_index_enabled=OFF;
+SELECT @@global.innodb_cmp_per_index_enabled;
+@@global.innodb_cmp_per_index_enabled
+0
+SET GLOBAL innodb_file_format=Barracuda;
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+CREATE TABLE t (a INT) ENGINE=INNODB KEY_BLOCK_SIZE=8;
+INSERT INTO t VALUES (1);
+SELECT * FROM information_schema.innodb_cmp_per_index;
+database_name test
+table_name t
+index_name GEN_CLUST_INDEX
+compress_ops 1
+compress_ops_ok 1
+compress_time 0
+uncompress_ops 0
+uncompress_time 0
+SET GLOBAL innodb_cmp_per_index_enabled=OFF;
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+SELECT * FROM information_schema.innodb_cmp_per_index;
+DROP TABLE t;
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+CREATE TABLE t (a INT) ENGINE=INNODB KEY_BLOCK_SIZE=8;
+INSERT INTO t VALUES (1);
+SELECT * FROM information_schema.innodb_cmp_per_index;
+database_name test
+table_name t
+index_name GEN_CLUST_INDEX
+compress_ops 1
+compress_ops_ok 1
+compress_time 0
+uncompress_ops 0
+uncompress_time 0
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+SELECT * FROM information_schema.innodb_cmp_per_index;
+database_name test
+table_name t
+index_name GEN_CLUST_INDEX
+compress_ops 1
+compress_ops_ok 1
+compress_time 0
+uncompress_ops 0
+uncompress_time 0
+DROP TABLE t;
+SET GLOBAL innodb_file_format=default;
+SET GLOBAL innodb_cmp_per_index_enabled=default;
diff --git a/mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result
new file mode 100644
index 00000000000..9f85eccdb7a
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_compression_failure_threshold_pct_basic.result
@@ -0,0 +1,96 @@
+SET @global_start_value = @@global.innodb_compression_failure_threshold_pct;
+SELECT @global_start_value;
+@global_start_value
+5
+'#--------------------FN_DYNVARS_046_01------------------------#'
+SET @@global.innodb_compression_failure_threshold_pct = 0;
+SET @@global.innodb_compression_failure_threshold_pct = DEFAULT;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+5
+'#---------------------FN_DYNVARS_046_02-------------------------#'
+SET innodb_compression_failure_threshold_pct = 1;
+ERROR HY000: Variable 'innodb_compression_failure_threshold_pct' is a GLOBAL variable and should be set with SET GLOBAL
+SELECT @@innodb_compression_failure_threshold_pct;
+@@innodb_compression_failure_threshold_pct
+5
+SELECT local.innodb_compression_failure_threshold_pct;
+ERROR 42S02: Unknown table 'local' in field list
+SET global innodb_compression_failure_threshold_pct = 0;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+0
+'#--------------------FN_DYNVARS_046_03------------------------#'
+SET @@global.innodb_compression_failure_threshold_pct = 0;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+0
+SET @@global.innodb_compression_failure_threshold_pct = 1;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+1
+SET @@global.innodb_compression_failure_threshold_pct = 100;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+100
+'#--------------------FN_DYNVARS_046_04-------------------------#'
+SET @@global.innodb_compression_failure_threshold_pct = -1;
+Warnings:
+Warning 1292 Truncated incorrect innodb_compression_failure_thres value: '-1'
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+0
+SET @@global.innodb_compression_failure_threshold_pct = "T";
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct'
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+0
+SET @@global.innodb_compression_failure_threshold_pct = "Y";
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct'
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+0
+SET @@global.innodb_compression_failure_threshold_pct = 101;
+Warnings:
+Warning 1292 Truncated incorrect innodb_compression_failure_thres value: '101'
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+100
+'#----------------------FN_DYNVARS_046_05------------------------#'
+SELECT @@global.innodb_compression_failure_threshold_pct =
+VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_compression_failure_threshold_pct';
+@@global.innodb_compression_failure_threshold_pct =
+VARIABLE_VALUE
+1
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+100
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_compression_failure_threshold_pct';
+VARIABLE_VALUE
+100
+'#---------------------FN_DYNVARS_046_06-------------------------#'
+SET @@global.innodb_compression_failure_threshold_pct = OFF;
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct'
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+100
+SET @@global.innodb_compression_failure_threshold_pct = ON;
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_failure_threshold_pct'
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+100
+'#---------------------FN_DYNVARS_046_07----------------------#'
+SET @@global.innodb_compression_failure_threshold_pct = TRUE;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+1
+SET @@global.innodb_compression_failure_threshold_pct = FALSE;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+0
+SET @@global.innodb_compression_failure_threshold_pct = @global_start_value;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+@@global.innodb_compression_failure_threshold_pct
+5
diff --git a/mysql-test/suite/sys_vars/r/innodb_compression_level_basic.result b/mysql-test/suite/sys_vars/r/innodb_compression_level_basic.result
new file mode 100644
index 00000000000..b9d1a2e4953
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_compression_level_basic.result
@@ -0,0 +1,73 @@
+SET @start_global_value = @@global.innodb_compression_level;
+SELECT @start_global_value;
+@start_global_value
+6
+Valid value 0-9
+select @@global.innodb_compression_level <= 9;
+@@global.innodb_compression_level <= 9
+1
+select @@global.innodb_compression_level;
+@@global.innodb_compression_level
+6
+select @@session.innodb_compression_level;
+ERROR HY000: Variable 'innodb_compression_level' is a GLOBAL variable
+show global variables like 'innodb_compression_level';
+Variable_name Value
+innodb_compression_level 6
+show session variables like 'innodb_compression_level';
+Variable_name Value
+innodb_compression_level 6
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_COMPRESSION_LEVEL 6
+select * from information_schema.session_variables where variable_name='innodb_compression_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_COMPRESSION_LEVEL 6
+set global innodb_compression_level=2;
+select @@global.innodb_compression_level;
+@@global.innodb_compression_level
+2
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_COMPRESSION_LEVEL 2
+select * from information_schema.session_variables where variable_name='innodb_compression_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_COMPRESSION_LEVEL 2
+set session innodb_compression_level=4;
+ERROR HY000: Variable 'innodb_compression_level' is a GLOBAL variable and should be set with SET GLOBAL
+set global innodb_compression_level=1.1;
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_level'
+set global innodb_compression_level=1e1;
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_level'
+set global innodb_compression_level="foo";
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_level'
+set global innodb_compression_level=10;
+Warnings:
+Warning 1292 Truncated incorrect innodb_compression_level value: '10'
+select @@global.innodb_compression_level;
+@@global.innodb_compression_level
+9
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_COMPRESSION_LEVEL 9
+set global innodb_compression_level=-7;
+Warnings:
+Warning 1292 Truncated incorrect innodb_compression_level value: '-7'
+select @@global.innodb_compression_level;
+@@global.innodb_compression_level
+0
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_COMPRESSION_LEVEL 0
+set global innodb_compression_level=0;
+select @@global.innodb_compression_level;
+@@global.innodb_compression_level
+0
+set global innodb_compression_level=9;
+select @@global.innodb_compression_level;
+@@global.innodb_compression_level
+9
+SET @@global.innodb_compression_level = @start_global_value;
+SELECT @@global.innodb_compression_level;
+@@global.innodb_compression_level
+6
diff --git a/mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result b/mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result
new file mode 100644
index 00000000000..628993ef873
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_compression_pad_pct_max_basic.result
@@ -0,0 +1,86 @@
+SET @global_start_value = @@global.innodb_compression_pad_pct_max;
+SELECT @global_start_value;
+@global_start_value
+50
+'#--------------------FN_DYNVARS_046_01------------------------#'
+SET @@global.innodb_compression_pad_pct_max = DEFAULT;
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+50
+'#---------------------FN_DYNVARS_046_02-------------------------#'
+SET innodb_compression_pad_pct_max = 1;
+ERROR HY000: Variable 'innodb_compression_pad_pct_max' is a GLOBAL variable and should be set with SET GLOBAL
+SELECT @@innodb_compression_pad_pct_max;
+@@innodb_compression_pad_pct_max
+50
+SELECT local.innodb_compression_pad_pct_max;
+ERROR 42S02: Unknown table 'local' in field list
+SET global innodb_compression_pad_pct_max = 0;
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+0
+'#--------------------FN_DYNVARS_046_03------------------------#'
+SET @@global.innodb_compression_pad_pct_max = 0;
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+0
+SET @@global.innodb_compression_pad_pct_max = 75;
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+75
+'#--------------------FN_DYNVARS_046_04-------------------------#'
+SET @@global.innodb_compression_pad_pct_max = -1;
+Warnings:
+Warning 1292 Truncated incorrect innodb_compression_pad_pct_max value: '-1'
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+0
+SET @@global.innodb_compression_pad_pct_max = "T";
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_pad_pct_max'
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+0
+SET @@global.innodb_compression_pad_pct_max = 76;
+Warnings:
+Warning 1292 Truncated incorrect innodb_compression_pad_pct_max value: '76'
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+75
+'#----------------------FN_DYNVARS_046_05------------------------#'
+SELECT @@global.innodb_compression_pad_pct_max =
+VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_compression_pad_pct_max';
+@@global.innodb_compression_pad_pct_max =
+VARIABLE_VALUE
+1
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+75
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_compression_pad_pct_max';
+VARIABLE_VALUE
+75
+'#---------------------FN_DYNVARS_046_06-------------------------#'
+SET @@global.innodb_compression_pad_pct_max = OFF;
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_pad_pct_max'
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+75
+SET @@global.innodb_compression_pad_pct_max = ON;
+ERROR 42000: Incorrect argument type to variable 'innodb_compression_pad_pct_max'
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+75
+'#---------------------FN_DYNVARS_046_07----------------------#'
+SET @@global.innodb_compression_pad_pct_max = TRUE;
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+1
+SET @@global.innodb_compression_pad_pct_max = FALSE;
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+0
+SET @@global.innodb_compression_pad_pct_max = @global_start_value;
+SELECT @@global.innodb_compression_pad_pct_max;
+@@global.innodb_compression_pad_pct_max
+50
diff --git a/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result b/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result
index cd27de0cb0b..0b790fb3557 100644
--- a/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_concurrency_tickets_basic.result
@@ -1,7 +1,7 @@
SET @global_start_value = @@global.innodb_concurrency_tickets;
SELECT @global_start_value;
@global_start_value
-500
+5000
'#--------------------FN_DYNVARS_046_01------------------------#'
SET @@global.innodb_concurrency_tickets = 0;
Warnings:
@@ -9,13 +9,13 @@ Warning 1292 Truncated incorrect innodb_concurrency_tickets value: '0'
SET @@global.innodb_concurrency_tickets = DEFAULT;
SELECT @@global.innodb_concurrency_tickets;
@@global.innodb_concurrency_tickets
-500
+5000
'#---------------------FN_DYNVARS_046_02-------------------------#'
SET innodb_concurrency_tickets = 1;
ERROR HY000: Variable 'innodb_concurrency_tickets' is a GLOBAL variable and should be set with SET GLOBAL
SELECT @@innodb_concurrency_tickets;
@@innodb_concurrency_tickets
-500
+5000
SELECT local.innodb_concurrency_tickets;
ERROR 42S02: Unknown table 'local' in field list
SET global innodb_concurrency_tickets = 0;
@@ -97,4 +97,4 @@ SELECT @@global.innodb_concurrency_tickets;
SET @@global.innodb_concurrency_tickets = @global_start_value;
SELECT @@global.innodb_concurrency_tickets;
@@global.innodb_concurrency_tickets
-500
+5000
diff --git a/mysql-test/suite/sys_vars/r/innodb_disable_background_merge_basic.result b/mysql-test/suite/sys_vars/r/innodb_disable_background_merge_basic.result
new file mode 100644
index 00000000000..c4bf621a33d
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_disable_background_merge_basic.result
@@ -0,0 +1,4 @@
+SET @orig = @@global.innodb_disable_background_merge;
+SELECT @orig;
+@orig
+0
diff --git a/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result b/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result
index ecf11351cd9..9780357e69f 100644
--- a/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_file_per_table_basic.result
@@ -1,7 +1,7 @@
SET @start_global_value = @@global.innodb_file_per_table;
SELECT @start_global_value;
@start_global_value
-0
+1
'#---------------------BS_STVARS_028_01----------------------#'
SELECT COUNT(@@GLOBAL.innodb_file_per_table);
COUNT(@@GLOBAL.innodb_file_per_table)
@@ -66,4 +66,4 @@ ERROR 42S22: Unknown column 'innodb_file_per_table' in 'field list'
SET @@global.innodb_file_per_table = @start_global_value;
SELECT @@global.innodb_file_per_table;
@@global.innodb_file_per_table
-0
+1
diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result
new file mode 100644
index 00000000000..60a4081849f
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_flush_log_at_timeout_basic.result
@@ -0,0 +1,96 @@
+SET @global_start_value = @@global.innodb_flush_log_at_timeout;
+SELECT @global_start_value;
+@global_start_value
+1
+'#--------------------FN_DYNVARS_046_01------------------------#'
+SET @@global.innodb_flush_log_at_timeout = 0;
+SET @@global.innodb_flush_log_at_timeout = DEFAULT;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+1
+'#---------------------FN_DYNVARS_046_02-------------------------#'
+SET innodb_flush_log_at_timeout = 1;
+ERROR HY000: Variable 'innodb_flush_log_at_timeout' is a GLOBAL variable and should be set with SET GLOBAL
+SELECT @@innodb_flush_log_at_timeout;
+@@innodb_flush_log_at_timeout
+1
+SELECT local.innodb_flush_log_at_timeout;
+ERROR 42S02: Unknown table 'local' in field list
+SET global innodb_flush_log_at_timeout = 0;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+0
+'#--------------------FN_DYNVARS_046_03------------------------#'
+SET @@global.innodb_flush_log_at_timeout = 0;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+0
+SET @@global.innodb_flush_log_at_timeout = 10;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+10
+SET @@global.innodb_flush_log_at_timeout = 2700;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+2700
+'#--------------------FN_DYNVARS_046_04-------------------------#'
+SET @@global.innodb_flush_log_at_timeout = -1;
+Warnings:
+Warning 1292 Truncated incorrect innodb_flush_log_at_timeout value: '-1'
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+0
+SET @@global.innodb_flush_log_at_timeout = "T";
+ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_timeout'
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+0
+SET @@global.innodb_flush_log_at_timeout = "Y";
+ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_timeout'
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+0
+SET @@global.innodb_flush_log_at_timeout = 2701;
+Warnings:
+Warning 1292 Truncated incorrect innodb_flush_log_at_timeout value: '2701'
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+2700
+'#----------------------FN_DYNVARS_046_05------------------------#'
+SELECT @@global.innodb_flush_log_at_timeout =
+VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_flush_log_at_timeout';
+@@global.innodb_flush_log_at_timeout =
+VARIABLE_VALUE
+1
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+2700
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_flush_log_at_timeout';
+VARIABLE_VALUE
+2700
+'#---------------------FN_DYNVARS_046_06-------------------------#'
+SET @@global.innodb_flush_log_at_timeout = OFF;
+ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_timeout'
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+2700
+SET @@global.innodb_flush_log_at_timeout = ON;
+ERROR 42000: Incorrect argument type to variable 'innodb_flush_log_at_timeout'
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+2700
+'#---------------------FN_DYNVARS_046_07----------------------#'
+SET @@global.innodb_flush_log_at_timeout = TRUE;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+1
+SET @@global.innodb_flush_log_at_timeout = FALSE;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+0
+SET @@global.innodb_flush_log_at_timeout = @global_start_value;
+SELECT @@global.innodb_flush_log_at_timeout;
+@@global.innodb_flush_log_at_timeout
+1
diff --git a/mysql-test/suite/sys_vars/r/innodb_flush_neighbors_basic.result b/mysql-test/suite/sys_vars/r/innodb_flush_neighbors_basic.result
index 70cccb6fdfd..167c613135d 100644
--- a/mysql-test/suite/sys_vars/r/innodb_flush_neighbors_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_flush_neighbors_basic.result
@@ -2,10 +2,6 @@ SET @start_global_value = @@global.innodb_flush_neighbors;
SELECT @start_global_value;
@start_global_value
1
-Valid values are 'ON' and 'OFF'
-select @@global.innodb_flush_neighbors in (0, 1);
-@@global.innodb_flush_neighbors in (0, 1)
-1
select @@global.innodb_flush_neighbors;
@@global.innodb_flush_neighbors
1
@@ -13,79 +9,92 @@ select @@session.innodb_flush_neighbors;
ERROR HY000: Variable 'innodb_flush_neighbors' is a GLOBAL variable
show global variables like 'innodb_flush_neighbors';
Variable_name Value
-innodb_flush_neighbors ON
+innodb_flush_neighbors 1
show session variables like 'innodb_flush_neighbors';
Variable_name Value
-innodb_flush_neighbors ON
+innodb_flush_neighbors 1
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
+INNODB_FLUSH_NEIGHBORS 1
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
-set global innodb_flush_neighbors='OFF';
+INNODB_FLUSH_NEIGHBORS 1
+set global innodb_flush_neighbors=0;
select @@global.innodb_flush_neighbors;
@@global.innodb_flush_neighbors
0
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS OFF
+INNODB_FLUSH_NEIGHBORS 0
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS OFF
-set @@global.innodb_flush_neighbors=1;
+INNODB_FLUSH_NEIGHBORS 0
+set @@global.innodb_flush_neighbors=TRUE;
select @@global.innodb_flush_neighbors;
@@global.innodb_flush_neighbors
1
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
+INNODB_FLUSH_NEIGHBORS 1
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
+INNODB_FLUSH_NEIGHBORS 1
set global innodb_flush_neighbors=0;
select @@global.innodb_flush_neighbors;
@@global.innodb_flush_neighbors
0
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS OFF
+INNODB_FLUSH_NEIGHBORS 0
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS OFF
-set @@global.innodb_flush_neighbors='ON';
+INNODB_FLUSH_NEIGHBORS 0
+set @@global.innodb_flush_neighbors=2;
+select @@global.innodb_flush_neighbors;
+@@global.innodb_flush_neighbors
+2
+select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_FLUSH_NEIGHBORS 2
+select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_FLUSH_NEIGHBORS 2
+set @@global.innodb_flush_neighbors=DEFAULT;
select @@global.innodb_flush_neighbors;
@@global.innodb_flush_neighbors
1
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
+INNODB_FLUSH_NEIGHBORS 1
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
-set session innodb_flush_neighbors='OFF';
+INNODB_FLUSH_NEIGHBORS 1
+set session innodb_flush_neighbors=0;
ERROR HY000: Variable 'innodb_flush_neighbors' is a GLOBAL variable and should be set with SET GLOBAL
-set @@session.innodb_flush_neighbors='ON';
+set @@session.innodb_flush_neighbors=1;
ERROR HY000: Variable 'innodb_flush_neighbors' is a GLOBAL variable and should be set with SET GLOBAL
+set global innodb_flush_neighbors='OFF';
+ERROR 42000: Incorrect argument type to variable 'innodb_flush_neighbors'
+set global innodb_flush_neighbors='ON';
+ERROR 42000: Incorrect argument type to variable 'innodb_flush_neighbors'
set global innodb_flush_neighbors=1.1;
ERROR 42000: Incorrect argument type to variable 'innodb_flush_neighbors'
set global innodb_flush_neighbors=1e1;
ERROR 42000: Incorrect argument type to variable 'innodb_flush_neighbors'
-set global innodb_flush_neighbors=2;
-ERROR 42000: Variable 'innodb_flush_neighbors' can't be set to the value of '2'
+set global innodb_flush_neighbors=3;
+Warnings:
+Warning 1292 Truncated incorrect innodb_flush_neighbors value: '3'
+select @@global.innodb_flush_neighbors;
+@@global.innodb_flush_neighbors
+2
set global innodb_flush_neighbors=-3;
-ERROR 42000: Variable 'innodb_flush_neighbors' can't be set to the value of '-3'
+Warnings:
+Warning 1292 Truncated incorrect innodb_flush_neighbors value: '-3'
select @@global.innodb_flush_neighbors;
@@global.innodb_flush_neighbors
-1
-select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
-select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
-VARIABLE_NAME VARIABLE_VALUE
-INNODB_FLUSH_NEIGHBORS ON
+0
set global innodb_flush_neighbors='AUTO';
-ERROR 42000: Variable 'innodb_flush_neighbors' can't be set to the value of 'AUTO'
+ERROR 42000: Incorrect argument type to variable 'innodb_flush_neighbors'
SET @@global.innodb_flush_neighbors = @start_global_value;
SELECT @@global.innodb_flush_neighbors;
@@global.innodb_flush_neighbors
diff --git a/mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result b/mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result
new file mode 100644
index 00000000000..caa41bd64ed
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_flushing_avg_loops_basic.result
@@ -0,0 +1,98 @@
+SET @global_start_value = @@global.innodb_flushing_avg_loops;
+SELECT @global_start_value;
+@global_start_value
+30
+'#--------------------FN_DYNVARS_046_01------------------------#'
+SET @@global.innodb_flushing_avg_loops = 1;
+SET @@global.innodb_flushing_avg_loops = DEFAULT;
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+30
+'#---------------------FN_DYNVARS_046_02-------------------------#'
+SET innodb_flushing_avg_loops = 1;
+ERROR HY000: Variable 'innodb_flushing_avg_loops' is a GLOBAL variable and should be set with SET GLOBAL
+SELECT @@innodb_flushing_avg_loops;
+@@innodb_flushing_avg_loops
+30
+SELECT local.innodb_flushing_avg_loops;
+ERROR 42S02: Unknown table 'local' in field list
+SET global innodb_flushing_avg_loops = 1;
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1
+'#--------------------FN_DYNVARS_046_03------------------------#'
+SET @@global.innodb_flushing_avg_loops = 1;
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1
+SET @@global.innodb_flushing_avg_loops = 60;
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+60
+SET @@global.innodb_flushing_avg_loops = 1000;
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1000
+'#--------------------FN_DYNVARS_046_04-------------------------#'
+SET @@global.innodb_flushing_avg_loops = -1;
+Warnings:
+Warning 1292 Truncated incorrect innodb_flushing_avg_loops value: '-1'
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1
+SET @@global.innodb_flushing_avg_loops = "T";
+ERROR 42000: Incorrect argument type to variable 'innodb_flushing_avg_loops'
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1
+SET @@global.innodb_flushing_avg_loops = "Y";
+ERROR 42000: Incorrect argument type to variable 'innodb_flushing_avg_loops'
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1
+SET @@global.innodb_flushing_avg_loops = 1001;
+Warnings:
+Warning 1292 Truncated incorrect innodb_flushing_avg_loops value: '1001'
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1000
+'#----------------------FN_DYNVARS_046_05------------------------#'
+SELECT @@global.innodb_flushing_avg_loops =
+VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_flushing_avg_loops';
+@@global.innodb_flushing_avg_loops =
+VARIABLE_VALUE
+1
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1000
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_flushing_avg_loops';
+VARIABLE_VALUE
+1000
+'#---------------------FN_DYNVARS_046_06-------------------------#'
+SET @@global.innodb_flushing_avg_loops = OFF;
+ERROR 42000: Incorrect argument type to variable 'innodb_flushing_avg_loops'
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1000
+SET @@global.innodb_flushing_avg_loops = ON;
+ERROR 42000: Incorrect argument type to variable 'innodb_flushing_avg_loops'
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1000
+'#---------------------FN_DYNVARS_046_07----------------------#'
+SET @@global.innodb_flushing_avg_loops = TRUE;
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1
+SET @@global.innodb_flushing_avg_loops = FALSE;
+Warnings:
+Warning 1292 Truncated incorrect innodb_flushing_avg_loops value: '0'
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+1
+SET @@global.innodb_flushing_avg_loops = @global_start_value;
+SELECT @@global.innodb_flushing_avg_loops;
+@@global.innodb_flushing_avg_loops
+30
diff --git a/mysql-test/suite/sys_vars/r/innodb_force_recovery_crash_basic.result b/mysql-test/suite/sys_vars/r/innodb_force_recovery_crash_basic.result
new file mode 100644
index 00000000000..5af00f21c74
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_force_recovery_crash_basic.result
@@ -0,0 +1,33 @@
+select @@global.innodb_force_recovery_crash in (0, 1);
+@@global.innodb_force_recovery_crash in (0, 1)
+1
+select @@global.innodb_force_recovery_crash;
+@@global.innodb_force_recovery_crash
+0
+select @@session.innodb_force_recovery_crash;
+ERROR HY000: Variable 'innodb_force_recovery_crash' is a GLOBAL variable
+show global variables like 'innodb_force_recovery_crash';
+Variable_name Value
+innodb_force_recovery_crash 0
+show session variables like 'innodb_force_recovery_crash';
+Variable_name Value
+innodb_force_recovery_crash 0
+select * from information_schema.global_variables where variable_name='innodb_force_recovery_crash';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_FORCE_RECOVERY_CRASH 0
+select * from information_schema.session_variables where variable_name='innodb_force_recovery_crash';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_FORCE_RECOVERY_CRASH 0
+set global innodb_force_recovery_crash=1;
+ERROR HY000: Variable 'innodb_force_recovery_crash' is a read only variable
+set global innodb_force_recovery_crash=0;
+ERROR HY000: Variable 'innodb_force_recovery_crash' is a read only variable
+select @@global.innodb_force_recovery_crash;
+@@global.innodb_force_recovery_crash
+0
+set session innodb_force_recovery_crash='some';
+ERROR HY000: Variable 'innodb_force_recovery_crash' is a read only variable
+set @@session.innodb_force_recovery_crash='some';
+ERROR HY000: Variable 'innodb_force_recovery_crash' is a read only variable
+set global innodb_force_recovery_crash='some';
+ERROR HY000: Variable 'innodb_force_recovery_crash' is a read only variable
diff --git a/mysql-test/suite/sys_vars/r/innodb_ft_cache_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_ft_cache_size_basic.result
index dc68ceed211..f50b6d4180c 100644
--- a/mysql-test/suite/sys_vars/r/innodb_ft_cache_size_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_ft_cache_size_basic.result
@@ -1,20 +1,20 @@
select @@global.innodb_ft_cache_size;
@@global.innodb_ft_cache_size
-32000000
+8000000
select @@session.innodb_ft_cache_size;
ERROR HY000: Variable 'innodb_ft_cache_size' is a GLOBAL variable
show global variables like 'innodb_ft_cache_size';
Variable_name Value
-innodb_ft_cache_size 32000000
+innodb_ft_cache_size 8000000
show session variables like 'innodb_ft_cache_size';
Variable_name Value
-innodb_ft_cache_size 32000000
+innodb_ft_cache_size 8000000
select * from information_schema.global_variables where variable_name='innodb_ft_cache_size';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FT_CACHE_SIZE 32000000
+INNODB_FT_CACHE_SIZE 8000000
select * from information_schema.session_variables where variable_name='innodb_ft_cache_size';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FT_CACHE_SIZE 32000000
+INNODB_FT_CACHE_SIZE 8000000
set global innodb_ft_cache_size=1;
ERROR HY000: Variable 'innodb_ft_cache_size' is a read only variable
set session innodb_ft_cache_size=1;
diff --git a/mysql-test/suite/sys_vars/r/innodb_ft_enable_diag_print_basic.result b/mysql-test/suite/sys_vars/r/innodb_ft_enable_diag_print_basic.result
index a4af92591b3..5401e26d8a5 100644
--- a/mysql-test/suite/sys_vars/r/innodb_ft_enable_diag_print_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_ft_enable_diag_print_basic.result
@@ -1,28 +1,28 @@
SET @start_global_value = @@global.innodb_ft_enable_diag_print;
SELECT @start_global_value;
@start_global_value
-1
+0
Valid values are 'ON' and 'OFF'
select @@global.innodb_ft_enable_diag_print in (0, 1);
@@global.innodb_ft_enable_diag_print in (0, 1)
1
select @@global.innodb_ft_enable_diag_print;
@@global.innodb_ft_enable_diag_print
-1
+0
select @@session.innodb_ft_enable_diag_print;
ERROR HY000: Variable 'innodb_ft_enable_diag_print' is a GLOBAL variable
show global variables like 'innodb_ft_enable_diag_print';
Variable_name Value
-innodb_ft_enable_diag_print ON
+innodb_ft_enable_diag_print OFF
show session variables like 'innodb_ft_enable_diag_print';
Variable_name Value
-innodb_ft_enable_diag_print ON
+innodb_ft_enable_diag_print OFF
select * from information_schema.global_variables where variable_name='innodb_ft_enable_diag_print';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FT_ENABLE_DIAG_PRINT ON
+INNODB_FT_ENABLE_DIAG_PRINT OFF
select * from information_schema.session_variables where variable_name='innodb_ft_enable_diag_print';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_FT_ENABLE_DIAG_PRINT ON
+INNODB_FT_ENABLE_DIAG_PRINT OFF
set global innodb_ft_enable_diag_print='OFF';
select @@global.innodb_ft_enable_diag_print;
@@global.innodb_ft_enable_diag_print
@@ -89,4 +89,4 @@ ERROR 42000: Variable 'innodb_ft_enable_diag_print' can't be set to the value of
SET @@global.innodb_ft_enable_diag_print = @start_global_value;
SELECT @@global.innodb_ft_enable_diag_print;
@@global.innodb_ft_enable_diag_print
-1
+0
diff --git a/mysql-test/suite/sys_vars/r/innodb_io_capacity_max_basic.result b/mysql-test/suite/sys_vars/r/innodb_io_capacity_max_basic.result
new file mode 100644
index 00000000000..ebc934acf6e
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_io_capacity_max_basic.result
@@ -0,0 +1,81 @@
+SET @start_innodb_max_capacity = @@global.innodb_io_capacity_max;
+SELECT @start_innodb_max_capacity;
+@start_innodb_max_capacity
+2000
+SET @start_innodb_capacity = @@global.innodb_io_capacity;
+SELECT @start_innodb_capacity;
+@start_innodb_capacity
+200
+Valid value 100 or more
+select @@global.innodb_io_capacity_max > 99;
+@@global.innodb_io_capacity_max > 99
+1
+select @@global.innodb_io_capacity_max;
+@@global.innodb_io_capacity_max
+2000
+select @@session.innodb_io_capacity_max;
+ERROR HY000: Variable 'innodb_io_capacity_max' is a GLOBAL variable
+show global variables like 'innodb_io_capacity_max';
+Variable_name Value
+innodb_io_capacity_max 2000
+show session variables like 'innodb_io_capacity_max';
+Variable_name Value
+innodb_io_capacity_max 2000
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_IO_CAPACITY_MAX 2000
+select * from information_schema.session_variables where variable_name='innodb_io_capacity_max';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_IO_CAPACITY_MAX 2000
+set global innodb_io_capacity_max=@start_innodb_capacity + 1;
+select @@global.innodb_io_capacity_max;
+@@global.innodb_io_capacity_max
+201
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_IO_CAPACITY_MAX 201
+select * from information_schema.session_variables where variable_name='innodb_io_capacity_max';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_IO_CAPACITY_MAX 201
+set session innodb_io_capacity_max=444;
+ERROR HY000: Variable 'innodb_io_capacity_max' is a GLOBAL variable and should be set with SET GLOBAL
+set global innodb_io_capacity_max=1.1;
+ERROR 42000: Incorrect argument type to variable 'innodb_io_capacity_max'
+set global innodb_io_capacity_max=1e1;
+ERROR 42000: Incorrect argument type to variable 'innodb_io_capacity_max'
+set global innodb_io_capacity_max="foo";
+ERROR 42000: Incorrect argument type to variable 'innodb_io_capacity_max'
+set global innodb_io_capacity_max=@start_innodb_capacity - 1;
+Warnings:
+Warning 1210 innodb_io_capacity_max cannot be set lower than innodb_io_capacity.
+Warning 1210 Setting innodb_io_capacity_max to 200
+select @@global.innodb_io_capacity_max;
+@@global.innodb_io_capacity_max
+200
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_IO_CAPACITY_MAX 200
+set global innodb_io_capacity_max=-7;
+Warnings:
+Warning 1292 Truncated incorrect innodb_io_capacity_max value: '-7'
+Warning 1210 innodb_io_capacity_max cannot be set lower than innodb_io_capacity.
+Warning 1210 Setting innodb_io_capacity_max to 200
+select @@global.innodb_io_capacity_max;
+@@global.innodb_io_capacity_max
+200
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_IO_CAPACITY_MAX 200
+set global innodb_io_capacity=100;
+set global innodb_io_capacity_max=100;
+select @@global.innodb_io_capacity_max;
+@@global.innodb_io_capacity_max
+100
+SET @@global.innodb_io_capacity_max = @start_innodb_max_capacity;
+SELECT @@global.innodb_io_capacity_max;
+@@global.innodb_io_capacity_max
+2000
+SET @@global.innodb_io_capacity = @start_innodb_capacity;
+SELECT @@global.innodb_io_capacity;
+@@global.innodb_io_capacity
+200
diff --git a/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result
new file mode 100644
index 00000000000..82388cebc82
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_max_dirty_pages_pct_lwm_basic.result
@@ -0,0 +1,109 @@
+SET @pct_lwm_start_value = @@global.innodb_max_dirty_pages_pct_lwm;
+SELECT @pct_lwm_start_value;
+@pct_lwm_start_value
+0
+SET @pct_start_value = @@global.innodb_max_dirty_pages_pct;
+SELECT @pct_start_value;
+@pct_start_value
+75
+'#--------------------FN_DYNVARS_046_01------------------------#'
+SET @@global.innodb_max_dirty_pages_pct_lwm = 0;
+SET @@global.innodb_max_dirty_pages_pct_lwm = DEFAULT;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
+'#---------------------FN_DYNVARS_046_02-------------------------#'
+SET innodb_max_dirty_pages_pct_lwm = 1;
+ERROR HY000: Variable 'innodb_max_dirty_pages_pct_lwm' is a GLOBAL variable and should be set with SET GLOBAL
+SELECT @@innodb_max_dirty_pages_pct_lwm;
+@@innodb_max_dirty_pages_pct_lwm
+0
+SELECT local.innodb_max_dirty_pages_pct_lwm;
+ERROR 42S02: Unknown table 'local' in field list
+SET global innodb_max_dirty_pages_pct_lwm = 0;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
+'#--------------------FN_DYNVARS_046_03------------------------#'
+SET @@global.innodb_max_dirty_pages_pct_lwm = 0;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
+SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_start_value;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+75
+'#--------------------FN_DYNVARS_046_04-------------------------#'
+SET @@global.innodb_max_dirty_pages_pct_lwm = -1;
+Warnings:
+Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct_lwm value: '-1'
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
+SET @@global.innodb_max_dirty_pages_pct_lwm = "T";
+ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct_lwm'
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
+SET @@global.innodb_max_dirty_pages_pct_lwm = "Y";
+ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct_lwm'
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
+SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_start_value + 1;
+Warnings:
+Warning 1210 innodb_max_dirty_pages_pct_lwm cannot be set higher than innodb_max_dirty_pages_pct.
+Warning 1210 Setting innodb_max_dirty_page_pct_lwm to 75
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+75
+SET @@global.innodb_max_dirty_pages_pct_lwm = 100;
+Warnings:
+Warning 1292 Truncated incorrect innodb_max_dirty_pages_pct_lwm value: '100'
+Warning 1210 innodb_max_dirty_pages_pct_lwm cannot be set higher than innodb_max_dirty_pages_pct.
+Warning 1210 Setting innodb_max_dirty_page_pct_lwm to 75
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+75
+'#----------------------FN_DYNVARS_046_05------------------------#'
+SELECT @@global.innodb_max_dirty_pages_pct_lwm =
+VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct_lwm';
+@@global.innodb_max_dirty_pages_pct_lwm =
+VARIABLE_VALUE
+1
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+75
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct_lwm';
+VARIABLE_VALUE
+75
+'#---------------------FN_DYNVARS_046_06-------------------------#'
+SET @@global.innodb_max_dirty_pages_pct_lwm = OFF;
+ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct_lwm'
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+75
+SET @@global.innodb_max_dirty_pages_pct_lwm = ON;
+ERROR 42000: Incorrect argument type to variable 'innodb_max_dirty_pages_pct_lwm'
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+75
+'#---------------------FN_DYNVARS_046_07----------------------#'
+SET @@global.innodb_max_dirty_pages_pct_lwm = TRUE;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+1
+SET @@global.innodb_max_dirty_pages_pct_lwm = FALSE;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
+SET @@global.innodb_max_dirty_pages_pct = @pct_start_value;
+SELECT @@global.innodb_max_dirty_pages_pct;
+@@global.innodb_max_dirty_pages_pct
+75
+SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_lwm_start_value;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+@@global.innodb_max_dirty_pages_pct_lwm
+0
diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result
index 8bb508c877e..7a7c0a6b6a2 100644
--- a/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_monitor_disable_basic.result
@@ -25,14 +25,15 @@ buffer_pool_size disabled
buffer_pool_reads disabled
buffer_pool_read_requests disabled
buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
buffer_pool_wait_free disabled
buffer_pool_read_ahead disabled
buffer_pool_read_ahead_evicted disabled
buffer_pool_pages_total disabled
buffer_pool_pages_misc disabled
buffer_pool_pages_data disabled
+buffer_pool_bytes_data disabled
buffer_pool_pages_dirty disabled
+buffer_pool_bytes_dirty disabled
buffer_pool_pages_free disabled
buffer_pages_created disabled
buffer_pages_written disabled
@@ -48,15 +49,15 @@ buffer_flush_batch_pages disabled
buffer_flush_neighbor_total_pages disabled
buffer_flush_neighbor disabled
buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
+buffer_flush_n_to_flush_requested disabled
+buffer_flush_avg_page_rate disabled
+buffer_flush_lsn_avg_rate disabled
+buffer_flush_pct_for_dirty disabled
+buffer_flush_pct_for_lsn disabled
+buffer_flush_sync_waits disabled
buffer_flush_adaptive_total_pages disabled
buffer_flush_adaptive disabled
buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
buffer_flush_sync_total_pages disabled
buffer_flush_sync disabled
buffer_flush_sync_pages disabled
@@ -156,6 +157,8 @@ log_write_requests disabled
log_writes disabled
compress_pages_compressed disabled
compress_pages_decompressed disabled
+compression_pad_increments disabled
+compression_pad_decrements disabled
index_splits disabled
index_merges disabled
adaptive_hash_searches disabled
@@ -199,657 +202,29 @@ dml_reads disabled
dml_inserts disabled
dml_deletes disabled
dml_updates disabled
+ddl_background_drop_indexes disabled
ddl_background_drop_tables disabled
+ddl_online_create_index disabled
+ddl_pending_alter_table disabled
icp_attempts disabled
icp_no_match disabled
icp_out_of_range disabled
icp_match disabled
set global innodb_monitor_enable = all;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_enable = aaa;
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of 'aaa'
set global innodb_monitor_disable = All;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_reset_all = all;
-select name, count, status from information_schema.innodb_metrics;
-name count status
-metadata_table_handles_opened 0 disabled
-metadata_table_handles_closed 0 disabled
-metadata_table_reference_count 0 disabled
-metadata_mem_pool_size 0 disabled
-lock_deadlocks 0 disabled
-lock_timeouts 0 disabled
-lock_rec_lock_waits 0 disabled
-lock_table_lock_waits 0 disabled
-lock_rec_lock_requests 0 disabled
-lock_rec_lock_created 0 disabled
-lock_rec_lock_removed 0 disabled
-lock_rec_locks 0 disabled
-lock_table_lock_created 0 disabled
-lock_table_lock_removed 0 disabled
-lock_table_locks 0 disabled
-lock_row_lock_current_waits 0 disabled
-lock_row_lock_time 0 disabled
-lock_row_lock_time_max 0 disabled
-lock_row_lock_waits 0 disabled
-lock_row_lock_time_avg 0 disabled
-buffer_pool_size 0 disabled
-buffer_pool_reads 0 disabled
-buffer_pool_read_requests 0 disabled
-buffer_pool_write_requests 0 disabled
-buffer_pool_pages_in_flush 0 disabled
-buffer_pool_wait_free 0 disabled
-buffer_pool_read_ahead 0 disabled
-buffer_pool_read_ahead_evicted 0 disabled
-buffer_pool_pages_total 0 disabled
-buffer_pool_pages_misc 0 disabled
-buffer_pool_pages_data 0 disabled
-buffer_pool_pages_dirty 0 disabled
-buffer_pool_pages_free 0 disabled
-buffer_pages_created 0 disabled
-buffer_pages_written 0 disabled
-buffer_pages_read 0 disabled
-buffer_data_reads 0 disabled
-buffer_data_written 0 disabled
-buffer_flush_batch_scanned 0 disabled
-buffer_flush_batch_num_scan 0 disabled
-buffer_flush_batch_scanned_per_call 0 disabled
-buffer_flush_batch_total_pages 0 disabled
-buffer_flush_batches 0 disabled
-buffer_flush_batch_pages 0 disabled
-buffer_flush_neighbor_total_pages 0 disabled
-buffer_flush_neighbor 0 disabled
-buffer_flush_neighbor_pages 0 disabled
-buffer_flush_max_dirty_total_pages 0 disabled
-buffer_flush_max_dirty 0 disabled
-buffer_flush_max_dirty_pages 0 disabled
-buffer_flush_adaptive_total_pages 0 disabled
-buffer_flush_adaptive 0 disabled
-buffer_flush_adaptive_pages 0 disabled
-buffer_flush_async_total_pages 0 disabled
-buffer_flush_async 0 disabled
-buffer_flush_async_pages 0 disabled
-buffer_flush_sync_total_pages 0 disabled
-buffer_flush_sync 0 disabled
-buffer_flush_sync_pages 0 disabled
-buffer_flush_background_total_pages 0 disabled
-buffer_flush_background 0 disabled
-buffer_flush_background_pages 0 disabled
-buffer_LRU_batch_scanned 0 disabled
-buffer_LRU_batch_num_scan 0 disabled
-buffer_LRU_batch_scanned_per_call 0 disabled
-buffer_LRU_batch_total_pages 0 disabled
-buffer_LRU_batches 0 disabled
-buffer_LRU_batch_pages 0 disabled
-buffer_LRU_single_flush_scanned 0 disabled
-buffer_LRU_single_flush_num_scan 0 disabled
-buffer_LRU_single_flush_scanned_per_call 0 disabled
-buffer_LRU_single_flush_failure_count 0 disabled
-buffer_LRU_get_free_search 0 disabled
-buffer_LRU_search_scanned 0 disabled
-buffer_LRU_search_num_scan 0 disabled
-buffer_LRU_search_scanned_per_call 0 disabled
-buffer_LRU_unzip_search_scanned 0 disabled
-buffer_LRU_unzip_search_num_scan 0 disabled
-buffer_LRU_unzip_search_scanned_per_call 0 disabled
-buffer_page_read_index_leaf 0 disabled
-buffer_page_read_index_non_leaf 0 disabled
-buffer_page_read_index_ibuf_leaf 0 disabled
-buffer_page_read_index_ibuf_non_leaf 0 disabled
-buffer_page_read_undo_log 0 disabled
-buffer_page_read_index_inode 0 disabled
-buffer_page_read_ibuf_free_list 0 disabled
-buffer_page_read_ibuf_bitmap 0 disabled
-buffer_page_read_system_page 0 disabled
-buffer_page_read_trx_system 0 disabled
-buffer_page_read_fsp_hdr 0 disabled
-buffer_page_read_xdes 0 disabled
-buffer_page_read_blob 0 disabled
-buffer_page_read_zblob 0 disabled
-buffer_page_read_zblob2 0 disabled
-buffer_page_read_other 0 disabled
-buffer_page_written_index_leaf 0 disabled
-buffer_page_written_index_non_leaf 0 disabled
-buffer_page_written_index_ibuf_leaf 0 disabled
-buffer_page_written_index_ibuf_non_leaf 0 disabled
-buffer_page_written_undo_log 0 disabled
-buffer_page_written_index_inode 0 disabled
-buffer_page_written_ibuf_free_list 0 disabled
-buffer_page_written_ibuf_bitmap 0 disabled
-buffer_page_written_system_page 0 disabled
-buffer_page_written_trx_system 0 disabled
-buffer_page_written_fsp_hdr 0 disabled
-buffer_page_written_xdes 0 disabled
-buffer_page_written_blob 0 disabled
-buffer_page_written_zblob 0 disabled
-buffer_page_written_zblob2 0 disabled
-buffer_page_written_other 0 disabled
-os_data_reads 0 disabled
-os_data_writes 0 disabled
-os_data_fsyncs 0 disabled
-os_pending_reads 0 disabled
-os_pending_writes 0 disabled
-os_log_bytes_written 0 disabled
-os_log_fsyncs 0 disabled
-os_log_pending_fsyncs 0 disabled
-os_log_pending_writes 0 disabled
-trx_rw_commits 0 disabled
-trx_ro_commits 0 disabled
-trx_nl_ro_commits 0 disabled
-trx_commits_insert_update 0 disabled
-trx_rollbacks 0 disabled
-trx_rollbacks_savepoint 0 disabled
-trx_rollback_active 0 disabled
-trx_active_transactions 0 disabled
-trx_rseg_history_len 0 disabled
-trx_undo_slots_used 0 disabled
-trx_undo_slots_cached 0 disabled
-trx_rseg_current_size 0 disabled
-purge_del_mark_records 0 disabled
-purge_upd_exist_or_extern_records 0 disabled
-purge_invoked 0 disabled
-purge_undo_log_pages 0 disabled
-purge_dml_delay_usec 0 disabled
-purge_stop_count 0 disabled
-purge_resume_count 0 disabled
-log_checkpoints 0 disabled
-log_lsn_last_flush 0 disabled
-log_lsn_last_checkpoint 0 disabled
-log_lsn_current 0 disabled
-log_lsn_checkpoint_age 0 disabled
-log_lsn_buf_pool_oldest 0 disabled
-log_max_modified_age_async 0 disabled
-log_max_modified_age_sync 0 disabled
-log_pending_log_writes 0 disabled
-log_pending_checkpoint_writes 0 disabled
-log_num_log_io 0 disabled
-log_waits 0 disabled
-log_write_requests 0 disabled
-log_writes 0 disabled
-compress_pages_compressed 0 disabled
-compress_pages_decompressed 0 disabled
-index_splits 0 disabled
-index_merges 0 disabled
-adaptive_hash_searches 0 disabled
-adaptive_hash_searches_btree 0 disabled
-adaptive_hash_pages_added 0 disabled
-adaptive_hash_pages_removed 0 disabled
-adaptive_hash_rows_added 0 disabled
-adaptive_hash_rows_removed 0 disabled
-adaptive_hash_rows_deleted_no_hash_entry 0 disabled
-adaptive_hash_rows_updated 0 disabled
-file_num_open_files 0 disabled
-ibuf_merges_insert 0 disabled
-ibuf_merges_delete_mark 0 disabled
-ibuf_merges_delete 0 disabled
-ibuf_merges_discard_insert 0 disabled
-ibuf_merges_discard_delete_mark 0 disabled
-ibuf_merges_discard_delete 0 disabled
-ibuf_merges 0 disabled
-ibuf_size 0 disabled
-innodb_master_thread_sleeps 0 disabled
-innodb_activity_count 0 disabled
-innodb_master_active_loops 0 disabled
-innodb_master_idle_loops 0 disabled
-innodb_background_drop_table_usec 0 disabled
-innodb_ibuf_merge_usec 0 disabled
-innodb_log_flush_usec 0 disabled
-innodb_mem_validate_usec 0 disabled
-innodb_master_purge_usec 0 disabled
-innodb_dict_lru_usec 0 disabled
-innodb_checkpoint_usec 0 disabled
-innodb_dblwr_writes 0 disabled
-innodb_dblwr_pages_written 0 disabled
-innodb_page_size 0 disabled
-innodb_rwlock_s_spin_waits 0 disabled
-innodb_rwlock_x_spin_waits 0 disabled
-innodb_rwlock_s_spin_rounds 0 disabled
-innodb_rwlock_x_spin_rounds 0 disabled
-innodb_rwlock_s_os_waits 0 disabled
-innodb_rwlock_x_os_waits 0 disabled
-dml_reads 0 disabled
-dml_inserts 0 disabled
-dml_deletes 0 disabled
-dml_updates 0 disabled
-ddl_background_drop_tables 0 disabled
-icp_attempts 0 disabled
-icp_no_match 0 disabled
-icp_out_of_range 0 disabled
-icp_match 0 disabled
+select name from information_schema.innodb_metrics where count!=0;
+name
set global innodb_monitor_enable = "%lock%";
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
-name status
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
+name
set global innodb_monitor_disable = "%lock%";
select name, status from information_schema.innodb_metrics
where name like "%lock%";
@@ -879,1035 +254,21 @@ innodb_rwlock_x_os_waits disabled
set global innodb_monitor_enable = "%lock*";
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*'
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
+name
set global innodb_monitor_enable="os_%a_fs_ncs";
set global innodb_monitor_enable="os%pending%";
select name, status from information_schema.innodb_metrics
@@ -1989,7 +350,7 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu
metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled
set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
col
select name, max_count, min_count, count,
@@ -2146,7 +507,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "file_num_open_files";
name max_count min_count count max_count_reset min_count_reset count_reset status
-file_num_open_files 3 3 3 3 3 3 enabled
+file_num_open_files # # # # # # enabled
set global innodb_monitor_disable = file_num_open_files;
set global innodb_monitor_enable = "icp%";
create table monitor_test(a char(3), b int, c char(2),
diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result
index 8bb508c877e..7a7c0a6b6a2 100644
--- a/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_monitor_enable_basic.result
@@ -25,14 +25,15 @@ buffer_pool_size disabled
buffer_pool_reads disabled
buffer_pool_read_requests disabled
buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
buffer_pool_wait_free disabled
buffer_pool_read_ahead disabled
buffer_pool_read_ahead_evicted disabled
buffer_pool_pages_total disabled
buffer_pool_pages_misc disabled
buffer_pool_pages_data disabled
+buffer_pool_bytes_data disabled
buffer_pool_pages_dirty disabled
+buffer_pool_bytes_dirty disabled
buffer_pool_pages_free disabled
buffer_pages_created disabled
buffer_pages_written disabled
@@ -48,15 +49,15 @@ buffer_flush_batch_pages disabled
buffer_flush_neighbor_total_pages disabled
buffer_flush_neighbor disabled
buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
+buffer_flush_n_to_flush_requested disabled
+buffer_flush_avg_page_rate disabled
+buffer_flush_lsn_avg_rate disabled
+buffer_flush_pct_for_dirty disabled
+buffer_flush_pct_for_lsn disabled
+buffer_flush_sync_waits disabled
buffer_flush_adaptive_total_pages disabled
buffer_flush_adaptive disabled
buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
buffer_flush_sync_total_pages disabled
buffer_flush_sync disabled
buffer_flush_sync_pages disabled
@@ -156,6 +157,8 @@ log_write_requests disabled
log_writes disabled
compress_pages_compressed disabled
compress_pages_decompressed disabled
+compression_pad_increments disabled
+compression_pad_decrements disabled
index_splits disabled
index_merges disabled
adaptive_hash_searches disabled
@@ -199,657 +202,29 @@ dml_reads disabled
dml_inserts disabled
dml_deletes disabled
dml_updates disabled
+ddl_background_drop_indexes disabled
ddl_background_drop_tables disabled
+ddl_online_create_index disabled
+ddl_pending_alter_table disabled
icp_attempts disabled
icp_no_match disabled
icp_out_of_range disabled
icp_match disabled
set global innodb_monitor_enable = all;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_enable = aaa;
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of 'aaa'
set global innodb_monitor_disable = All;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_reset_all = all;
-select name, count, status from information_schema.innodb_metrics;
-name count status
-metadata_table_handles_opened 0 disabled
-metadata_table_handles_closed 0 disabled
-metadata_table_reference_count 0 disabled
-metadata_mem_pool_size 0 disabled
-lock_deadlocks 0 disabled
-lock_timeouts 0 disabled
-lock_rec_lock_waits 0 disabled
-lock_table_lock_waits 0 disabled
-lock_rec_lock_requests 0 disabled
-lock_rec_lock_created 0 disabled
-lock_rec_lock_removed 0 disabled
-lock_rec_locks 0 disabled
-lock_table_lock_created 0 disabled
-lock_table_lock_removed 0 disabled
-lock_table_locks 0 disabled
-lock_row_lock_current_waits 0 disabled
-lock_row_lock_time 0 disabled
-lock_row_lock_time_max 0 disabled
-lock_row_lock_waits 0 disabled
-lock_row_lock_time_avg 0 disabled
-buffer_pool_size 0 disabled
-buffer_pool_reads 0 disabled
-buffer_pool_read_requests 0 disabled
-buffer_pool_write_requests 0 disabled
-buffer_pool_pages_in_flush 0 disabled
-buffer_pool_wait_free 0 disabled
-buffer_pool_read_ahead 0 disabled
-buffer_pool_read_ahead_evicted 0 disabled
-buffer_pool_pages_total 0 disabled
-buffer_pool_pages_misc 0 disabled
-buffer_pool_pages_data 0 disabled
-buffer_pool_pages_dirty 0 disabled
-buffer_pool_pages_free 0 disabled
-buffer_pages_created 0 disabled
-buffer_pages_written 0 disabled
-buffer_pages_read 0 disabled
-buffer_data_reads 0 disabled
-buffer_data_written 0 disabled
-buffer_flush_batch_scanned 0 disabled
-buffer_flush_batch_num_scan 0 disabled
-buffer_flush_batch_scanned_per_call 0 disabled
-buffer_flush_batch_total_pages 0 disabled
-buffer_flush_batches 0 disabled
-buffer_flush_batch_pages 0 disabled
-buffer_flush_neighbor_total_pages 0 disabled
-buffer_flush_neighbor 0 disabled
-buffer_flush_neighbor_pages 0 disabled
-buffer_flush_max_dirty_total_pages 0 disabled
-buffer_flush_max_dirty 0 disabled
-buffer_flush_max_dirty_pages 0 disabled
-buffer_flush_adaptive_total_pages 0 disabled
-buffer_flush_adaptive 0 disabled
-buffer_flush_adaptive_pages 0 disabled
-buffer_flush_async_total_pages 0 disabled
-buffer_flush_async 0 disabled
-buffer_flush_async_pages 0 disabled
-buffer_flush_sync_total_pages 0 disabled
-buffer_flush_sync 0 disabled
-buffer_flush_sync_pages 0 disabled
-buffer_flush_background_total_pages 0 disabled
-buffer_flush_background 0 disabled
-buffer_flush_background_pages 0 disabled
-buffer_LRU_batch_scanned 0 disabled
-buffer_LRU_batch_num_scan 0 disabled
-buffer_LRU_batch_scanned_per_call 0 disabled
-buffer_LRU_batch_total_pages 0 disabled
-buffer_LRU_batches 0 disabled
-buffer_LRU_batch_pages 0 disabled
-buffer_LRU_single_flush_scanned 0 disabled
-buffer_LRU_single_flush_num_scan 0 disabled
-buffer_LRU_single_flush_scanned_per_call 0 disabled
-buffer_LRU_single_flush_failure_count 0 disabled
-buffer_LRU_get_free_search 0 disabled
-buffer_LRU_search_scanned 0 disabled
-buffer_LRU_search_num_scan 0 disabled
-buffer_LRU_search_scanned_per_call 0 disabled
-buffer_LRU_unzip_search_scanned 0 disabled
-buffer_LRU_unzip_search_num_scan 0 disabled
-buffer_LRU_unzip_search_scanned_per_call 0 disabled
-buffer_page_read_index_leaf 0 disabled
-buffer_page_read_index_non_leaf 0 disabled
-buffer_page_read_index_ibuf_leaf 0 disabled
-buffer_page_read_index_ibuf_non_leaf 0 disabled
-buffer_page_read_undo_log 0 disabled
-buffer_page_read_index_inode 0 disabled
-buffer_page_read_ibuf_free_list 0 disabled
-buffer_page_read_ibuf_bitmap 0 disabled
-buffer_page_read_system_page 0 disabled
-buffer_page_read_trx_system 0 disabled
-buffer_page_read_fsp_hdr 0 disabled
-buffer_page_read_xdes 0 disabled
-buffer_page_read_blob 0 disabled
-buffer_page_read_zblob 0 disabled
-buffer_page_read_zblob2 0 disabled
-buffer_page_read_other 0 disabled
-buffer_page_written_index_leaf 0 disabled
-buffer_page_written_index_non_leaf 0 disabled
-buffer_page_written_index_ibuf_leaf 0 disabled
-buffer_page_written_index_ibuf_non_leaf 0 disabled
-buffer_page_written_undo_log 0 disabled
-buffer_page_written_index_inode 0 disabled
-buffer_page_written_ibuf_free_list 0 disabled
-buffer_page_written_ibuf_bitmap 0 disabled
-buffer_page_written_system_page 0 disabled
-buffer_page_written_trx_system 0 disabled
-buffer_page_written_fsp_hdr 0 disabled
-buffer_page_written_xdes 0 disabled
-buffer_page_written_blob 0 disabled
-buffer_page_written_zblob 0 disabled
-buffer_page_written_zblob2 0 disabled
-buffer_page_written_other 0 disabled
-os_data_reads 0 disabled
-os_data_writes 0 disabled
-os_data_fsyncs 0 disabled
-os_pending_reads 0 disabled
-os_pending_writes 0 disabled
-os_log_bytes_written 0 disabled
-os_log_fsyncs 0 disabled
-os_log_pending_fsyncs 0 disabled
-os_log_pending_writes 0 disabled
-trx_rw_commits 0 disabled
-trx_ro_commits 0 disabled
-trx_nl_ro_commits 0 disabled
-trx_commits_insert_update 0 disabled
-trx_rollbacks 0 disabled
-trx_rollbacks_savepoint 0 disabled
-trx_rollback_active 0 disabled
-trx_active_transactions 0 disabled
-trx_rseg_history_len 0 disabled
-trx_undo_slots_used 0 disabled
-trx_undo_slots_cached 0 disabled
-trx_rseg_current_size 0 disabled
-purge_del_mark_records 0 disabled
-purge_upd_exist_or_extern_records 0 disabled
-purge_invoked 0 disabled
-purge_undo_log_pages 0 disabled
-purge_dml_delay_usec 0 disabled
-purge_stop_count 0 disabled
-purge_resume_count 0 disabled
-log_checkpoints 0 disabled
-log_lsn_last_flush 0 disabled
-log_lsn_last_checkpoint 0 disabled
-log_lsn_current 0 disabled
-log_lsn_checkpoint_age 0 disabled
-log_lsn_buf_pool_oldest 0 disabled
-log_max_modified_age_async 0 disabled
-log_max_modified_age_sync 0 disabled
-log_pending_log_writes 0 disabled
-log_pending_checkpoint_writes 0 disabled
-log_num_log_io 0 disabled
-log_waits 0 disabled
-log_write_requests 0 disabled
-log_writes 0 disabled
-compress_pages_compressed 0 disabled
-compress_pages_decompressed 0 disabled
-index_splits 0 disabled
-index_merges 0 disabled
-adaptive_hash_searches 0 disabled
-adaptive_hash_searches_btree 0 disabled
-adaptive_hash_pages_added 0 disabled
-adaptive_hash_pages_removed 0 disabled
-adaptive_hash_rows_added 0 disabled
-adaptive_hash_rows_removed 0 disabled
-adaptive_hash_rows_deleted_no_hash_entry 0 disabled
-adaptive_hash_rows_updated 0 disabled
-file_num_open_files 0 disabled
-ibuf_merges_insert 0 disabled
-ibuf_merges_delete_mark 0 disabled
-ibuf_merges_delete 0 disabled
-ibuf_merges_discard_insert 0 disabled
-ibuf_merges_discard_delete_mark 0 disabled
-ibuf_merges_discard_delete 0 disabled
-ibuf_merges 0 disabled
-ibuf_size 0 disabled
-innodb_master_thread_sleeps 0 disabled
-innodb_activity_count 0 disabled
-innodb_master_active_loops 0 disabled
-innodb_master_idle_loops 0 disabled
-innodb_background_drop_table_usec 0 disabled
-innodb_ibuf_merge_usec 0 disabled
-innodb_log_flush_usec 0 disabled
-innodb_mem_validate_usec 0 disabled
-innodb_master_purge_usec 0 disabled
-innodb_dict_lru_usec 0 disabled
-innodb_checkpoint_usec 0 disabled
-innodb_dblwr_writes 0 disabled
-innodb_dblwr_pages_written 0 disabled
-innodb_page_size 0 disabled
-innodb_rwlock_s_spin_waits 0 disabled
-innodb_rwlock_x_spin_waits 0 disabled
-innodb_rwlock_s_spin_rounds 0 disabled
-innodb_rwlock_x_spin_rounds 0 disabled
-innodb_rwlock_s_os_waits 0 disabled
-innodb_rwlock_x_os_waits 0 disabled
-dml_reads 0 disabled
-dml_inserts 0 disabled
-dml_deletes 0 disabled
-dml_updates 0 disabled
-ddl_background_drop_tables 0 disabled
-icp_attempts 0 disabled
-icp_no_match 0 disabled
-icp_out_of_range 0 disabled
-icp_match 0 disabled
+select name from information_schema.innodb_metrics where count!=0;
+name
set global innodb_monitor_enable = "%lock%";
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
-name status
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
+name
set global innodb_monitor_disable = "%lock%";
select name, status from information_schema.innodb_metrics
where name like "%lock%";
@@ -879,1035 +254,21 @@ innodb_rwlock_x_os_waits disabled
set global innodb_monitor_enable = "%lock*";
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*'
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
+name
set global innodb_monitor_enable="os_%a_fs_ncs";
set global innodb_monitor_enable="os%pending%";
select name, status from information_schema.innodb_metrics
@@ -1989,7 +350,7 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu
metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled
set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
col
select name, max_count, min_count, count,
@@ -2146,7 +507,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "file_num_open_files";
name max_count min_count count max_count_reset min_count_reset count_reset status
-file_num_open_files 3 3 3 3 3 3 enabled
+file_num_open_files # # # # # # enabled
set global innodb_monitor_disable = file_num_open_files;
set global innodb_monitor_enable = "icp%";
create table monitor_test(a char(3), b int, c char(2),
diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result
index 8bb508c877e..7a7c0a6b6a2 100644
--- a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_all_basic.result
@@ -25,14 +25,15 @@ buffer_pool_size disabled
buffer_pool_reads disabled
buffer_pool_read_requests disabled
buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
buffer_pool_wait_free disabled
buffer_pool_read_ahead disabled
buffer_pool_read_ahead_evicted disabled
buffer_pool_pages_total disabled
buffer_pool_pages_misc disabled
buffer_pool_pages_data disabled
+buffer_pool_bytes_data disabled
buffer_pool_pages_dirty disabled
+buffer_pool_bytes_dirty disabled
buffer_pool_pages_free disabled
buffer_pages_created disabled
buffer_pages_written disabled
@@ -48,15 +49,15 @@ buffer_flush_batch_pages disabled
buffer_flush_neighbor_total_pages disabled
buffer_flush_neighbor disabled
buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
+buffer_flush_n_to_flush_requested disabled
+buffer_flush_avg_page_rate disabled
+buffer_flush_lsn_avg_rate disabled
+buffer_flush_pct_for_dirty disabled
+buffer_flush_pct_for_lsn disabled
+buffer_flush_sync_waits disabled
buffer_flush_adaptive_total_pages disabled
buffer_flush_adaptive disabled
buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
buffer_flush_sync_total_pages disabled
buffer_flush_sync disabled
buffer_flush_sync_pages disabled
@@ -156,6 +157,8 @@ log_write_requests disabled
log_writes disabled
compress_pages_compressed disabled
compress_pages_decompressed disabled
+compression_pad_increments disabled
+compression_pad_decrements disabled
index_splits disabled
index_merges disabled
adaptive_hash_searches disabled
@@ -199,657 +202,29 @@ dml_reads disabled
dml_inserts disabled
dml_deletes disabled
dml_updates disabled
+ddl_background_drop_indexes disabled
ddl_background_drop_tables disabled
+ddl_online_create_index disabled
+ddl_pending_alter_table disabled
icp_attempts disabled
icp_no_match disabled
icp_out_of_range disabled
icp_match disabled
set global innodb_monitor_enable = all;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_enable = aaa;
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of 'aaa'
set global innodb_monitor_disable = All;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_reset_all = all;
-select name, count, status from information_schema.innodb_metrics;
-name count status
-metadata_table_handles_opened 0 disabled
-metadata_table_handles_closed 0 disabled
-metadata_table_reference_count 0 disabled
-metadata_mem_pool_size 0 disabled
-lock_deadlocks 0 disabled
-lock_timeouts 0 disabled
-lock_rec_lock_waits 0 disabled
-lock_table_lock_waits 0 disabled
-lock_rec_lock_requests 0 disabled
-lock_rec_lock_created 0 disabled
-lock_rec_lock_removed 0 disabled
-lock_rec_locks 0 disabled
-lock_table_lock_created 0 disabled
-lock_table_lock_removed 0 disabled
-lock_table_locks 0 disabled
-lock_row_lock_current_waits 0 disabled
-lock_row_lock_time 0 disabled
-lock_row_lock_time_max 0 disabled
-lock_row_lock_waits 0 disabled
-lock_row_lock_time_avg 0 disabled
-buffer_pool_size 0 disabled
-buffer_pool_reads 0 disabled
-buffer_pool_read_requests 0 disabled
-buffer_pool_write_requests 0 disabled
-buffer_pool_pages_in_flush 0 disabled
-buffer_pool_wait_free 0 disabled
-buffer_pool_read_ahead 0 disabled
-buffer_pool_read_ahead_evicted 0 disabled
-buffer_pool_pages_total 0 disabled
-buffer_pool_pages_misc 0 disabled
-buffer_pool_pages_data 0 disabled
-buffer_pool_pages_dirty 0 disabled
-buffer_pool_pages_free 0 disabled
-buffer_pages_created 0 disabled
-buffer_pages_written 0 disabled
-buffer_pages_read 0 disabled
-buffer_data_reads 0 disabled
-buffer_data_written 0 disabled
-buffer_flush_batch_scanned 0 disabled
-buffer_flush_batch_num_scan 0 disabled
-buffer_flush_batch_scanned_per_call 0 disabled
-buffer_flush_batch_total_pages 0 disabled
-buffer_flush_batches 0 disabled
-buffer_flush_batch_pages 0 disabled
-buffer_flush_neighbor_total_pages 0 disabled
-buffer_flush_neighbor 0 disabled
-buffer_flush_neighbor_pages 0 disabled
-buffer_flush_max_dirty_total_pages 0 disabled
-buffer_flush_max_dirty 0 disabled
-buffer_flush_max_dirty_pages 0 disabled
-buffer_flush_adaptive_total_pages 0 disabled
-buffer_flush_adaptive 0 disabled
-buffer_flush_adaptive_pages 0 disabled
-buffer_flush_async_total_pages 0 disabled
-buffer_flush_async 0 disabled
-buffer_flush_async_pages 0 disabled
-buffer_flush_sync_total_pages 0 disabled
-buffer_flush_sync 0 disabled
-buffer_flush_sync_pages 0 disabled
-buffer_flush_background_total_pages 0 disabled
-buffer_flush_background 0 disabled
-buffer_flush_background_pages 0 disabled
-buffer_LRU_batch_scanned 0 disabled
-buffer_LRU_batch_num_scan 0 disabled
-buffer_LRU_batch_scanned_per_call 0 disabled
-buffer_LRU_batch_total_pages 0 disabled
-buffer_LRU_batches 0 disabled
-buffer_LRU_batch_pages 0 disabled
-buffer_LRU_single_flush_scanned 0 disabled
-buffer_LRU_single_flush_num_scan 0 disabled
-buffer_LRU_single_flush_scanned_per_call 0 disabled
-buffer_LRU_single_flush_failure_count 0 disabled
-buffer_LRU_get_free_search 0 disabled
-buffer_LRU_search_scanned 0 disabled
-buffer_LRU_search_num_scan 0 disabled
-buffer_LRU_search_scanned_per_call 0 disabled
-buffer_LRU_unzip_search_scanned 0 disabled
-buffer_LRU_unzip_search_num_scan 0 disabled
-buffer_LRU_unzip_search_scanned_per_call 0 disabled
-buffer_page_read_index_leaf 0 disabled
-buffer_page_read_index_non_leaf 0 disabled
-buffer_page_read_index_ibuf_leaf 0 disabled
-buffer_page_read_index_ibuf_non_leaf 0 disabled
-buffer_page_read_undo_log 0 disabled
-buffer_page_read_index_inode 0 disabled
-buffer_page_read_ibuf_free_list 0 disabled
-buffer_page_read_ibuf_bitmap 0 disabled
-buffer_page_read_system_page 0 disabled
-buffer_page_read_trx_system 0 disabled
-buffer_page_read_fsp_hdr 0 disabled
-buffer_page_read_xdes 0 disabled
-buffer_page_read_blob 0 disabled
-buffer_page_read_zblob 0 disabled
-buffer_page_read_zblob2 0 disabled
-buffer_page_read_other 0 disabled
-buffer_page_written_index_leaf 0 disabled
-buffer_page_written_index_non_leaf 0 disabled
-buffer_page_written_index_ibuf_leaf 0 disabled
-buffer_page_written_index_ibuf_non_leaf 0 disabled
-buffer_page_written_undo_log 0 disabled
-buffer_page_written_index_inode 0 disabled
-buffer_page_written_ibuf_free_list 0 disabled
-buffer_page_written_ibuf_bitmap 0 disabled
-buffer_page_written_system_page 0 disabled
-buffer_page_written_trx_system 0 disabled
-buffer_page_written_fsp_hdr 0 disabled
-buffer_page_written_xdes 0 disabled
-buffer_page_written_blob 0 disabled
-buffer_page_written_zblob 0 disabled
-buffer_page_written_zblob2 0 disabled
-buffer_page_written_other 0 disabled
-os_data_reads 0 disabled
-os_data_writes 0 disabled
-os_data_fsyncs 0 disabled
-os_pending_reads 0 disabled
-os_pending_writes 0 disabled
-os_log_bytes_written 0 disabled
-os_log_fsyncs 0 disabled
-os_log_pending_fsyncs 0 disabled
-os_log_pending_writes 0 disabled
-trx_rw_commits 0 disabled
-trx_ro_commits 0 disabled
-trx_nl_ro_commits 0 disabled
-trx_commits_insert_update 0 disabled
-trx_rollbacks 0 disabled
-trx_rollbacks_savepoint 0 disabled
-trx_rollback_active 0 disabled
-trx_active_transactions 0 disabled
-trx_rseg_history_len 0 disabled
-trx_undo_slots_used 0 disabled
-trx_undo_slots_cached 0 disabled
-trx_rseg_current_size 0 disabled
-purge_del_mark_records 0 disabled
-purge_upd_exist_or_extern_records 0 disabled
-purge_invoked 0 disabled
-purge_undo_log_pages 0 disabled
-purge_dml_delay_usec 0 disabled
-purge_stop_count 0 disabled
-purge_resume_count 0 disabled
-log_checkpoints 0 disabled
-log_lsn_last_flush 0 disabled
-log_lsn_last_checkpoint 0 disabled
-log_lsn_current 0 disabled
-log_lsn_checkpoint_age 0 disabled
-log_lsn_buf_pool_oldest 0 disabled
-log_max_modified_age_async 0 disabled
-log_max_modified_age_sync 0 disabled
-log_pending_log_writes 0 disabled
-log_pending_checkpoint_writes 0 disabled
-log_num_log_io 0 disabled
-log_waits 0 disabled
-log_write_requests 0 disabled
-log_writes 0 disabled
-compress_pages_compressed 0 disabled
-compress_pages_decompressed 0 disabled
-index_splits 0 disabled
-index_merges 0 disabled
-adaptive_hash_searches 0 disabled
-adaptive_hash_searches_btree 0 disabled
-adaptive_hash_pages_added 0 disabled
-adaptive_hash_pages_removed 0 disabled
-adaptive_hash_rows_added 0 disabled
-adaptive_hash_rows_removed 0 disabled
-adaptive_hash_rows_deleted_no_hash_entry 0 disabled
-adaptive_hash_rows_updated 0 disabled
-file_num_open_files 0 disabled
-ibuf_merges_insert 0 disabled
-ibuf_merges_delete_mark 0 disabled
-ibuf_merges_delete 0 disabled
-ibuf_merges_discard_insert 0 disabled
-ibuf_merges_discard_delete_mark 0 disabled
-ibuf_merges_discard_delete 0 disabled
-ibuf_merges 0 disabled
-ibuf_size 0 disabled
-innodb_master_thread_sleeps 0 disabled
-innodb_activity_count 0 disabled
-innodb_master_active_loops 0 disabled
-innodb_master_idle_loops 0 disabled
-innodb_background_drop_table_usec 0 disabled
-innodb_ibuf_merge_usec 0 disabled
-innodb_log_flush_usec 0 disabled
-innodb_mem_validate_usec 0 disabled
-innodb_master_purge_usec 0 disabled
-innodb_dict_lru_usec 0 disabled
-innodb_checkpoint_usec 0 disabled
-innodb_dblwr_writes 0 disabled
-innodb_dblwr_pages_written 0 disabled
-innodb_page_size 0 disabled
-innodb_rwlock_s_spin_waits 0 disabled
-innodb_rwlock_x_spin_waits 0 disabled
-innodb_rwlock_s_spin_rounds 0 disabled
-innodb_rwlock_x_spin_rounds 0 disabled
-innodb_rwlock_s_os_waits 0 disabled
-innodb_rwlock_x_os_waits 0 disabled
-dml_reads 0 disabled
-dml_inserts 0 disabled
-dml_deletes 0 disabled
-dml_updates 0 disabled
-ddl_background_drop_tables 0 disabled
-icp_attempts 0 disabled
-icp_no_match 0 disabled
-icp_out_of_range 0 disabled
-icp_match 0 disabled
+select name from information_schema.innodb_metrics where count!=0;
+name
set global innodb_monitor_enable = "%lock%";
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
-name status
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
+name
set global innodb_monitor_disable = "%lock%";
select name, status from information_schema.innodb_metrics
where name like "%lock%";
@@ -879,1035 +254,21 @@ innodb_rwlock_x_os_waits disabled
set global innodb_monitor_enable = "%lock*";
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*'
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
+name
set global innodb_monitor_enable="os_%a_fs_ncs";
set global innodb_monitor_enable="os%pending%";
select name, status from information_schema.innodb_metrics
@@ -1989,7 +350,7 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu
metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled
set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
col
select name, max_count, min_count, count,
@@ -2146,7 +507,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "file_num_open_files";
name max_count min_count count max_count_reset min_count_reset count_reset status
-file_num_open_files 3 3 3 3 3 3 enabled
+file_num_open_files # # # # # # enabled
set global innodb_monitor_disable = file_num_open_files;
set global innodb_monitor_enable = "icp%";
create table monitor_test(a char(3), b int, c char(2),
diff --git a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result
index 8bb508c877e..7a7c0a6b6a2 100644
--- a/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_monitor_reset_basic.result
@@ -25,14 +25,15 @@ buffer_pool_size disabled
buffer_pool_reads disabled
buffer_pool_read_requests disabled
buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
buffer_pool_wait_free disabled
buffer_pool_read_ahead disabled
buffer_pool_read_ahead_evicted disabled
buffer_pool_pages_total disabled
buffer_pool_pages_misc disabled
buffer_pool_pages_data disabled
+buffer_pool_bytes_data disabled
buffer_pool_pages_dirty disabled
+buffer_pool_bytes_dirty disabled
buffer_pool_pages_free disabled
buffer_pages_created disabled
buffer_pages_written disabled
@@ -48,15 +49,15 @@ buffer_flush_batch_pages disabled
buffer_flush_neighbor_total_pages disabled
buffer_flush_neighbor disabled
buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
+buffer_flush_n_to_flush_requested disabled
+buffer_flush_avg_page_rate disabled
+buffer_flush_lsn_avg_rate disabled
+buffer_flush_pct_for_dirty disabled
+buffer_flush_pct_for_lsn disabled
+buffer_flush_sync_waits disabled
buffer_flush_adaptive_total_pages disabled
buffer_flush_adaptive disabled
buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
buffer_flush_sync_total_pages disabled
buffer_flush_sync disabled
buffer_flush_sync_pages disabled
@@ -156,6 +157,8 @@ log_write_requests disabled
log_writes disabled
compress_pages_compressed disabled
compress_pages_decompressed disabled
+compression_pad_increments disabled
+compression_pad_decrements disabled
index_splits disabled
index_merges disabled
adaptive_hash_searches disabled
@@ -199,657 +202,29 @@ dml_reads disabled
dml_inserts disabled
dml_deletes disabled
dml_updates disabled
+ddl_background_drop_indexes disabled
ddl_background_drop_tables disabled
+ddl_online_create_index disabled
+ddl_pending_alter_table disabled
icp_attempts disabled
icp_no_match disabled
icp_out_of_range disabled
icp_match disabled
set global innodb_monitor_enable = all;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_enable = aaa;
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of 'aaa'
set global innodb_monitor_disable = All;
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_reset_all = all;
-select name, count, status from information_schema.innodb_metrics;
-name count status
-metadata_table_handles_opened 0 disabled
-metadata_table_handles_closed 0 disabled
-metadata_table_reference_count 0 disabled
-metadata_mem_pool_size 0 disabled
-lock_deadlocks 0 disabled
-lock_timeouts 0 disabled
-lock_rec_lock_waits 0 disabled
-lock_table_lock_waits 0 disabled
-lock_rec_lock_requests 0 disabled
-lock_rec_lock_created 0 disabled
-lock_rec_lock_removed 0 disabled
-lock_rec_locks 0 disabled
-lock_table_lock_created 0 disabled
-lock_table_lock_removed 0 disabled
-lock_table_locks 0 disabled
-lock_row_lock_current_waits 0 disabled
-lock_row_lock_time 0 disabled
-lock_row_lock_time_max 0 disabled
-lock_row_lock_waits 0 disabled
-lock_row_lock_time_avg 0 disabled
-buffer_pool_size 0 disabled
-buffer_pool_reads 0 disabled
-buffer_pool_read_requests 0 disabled
-buffer_pool_write_requests 0 disabled
-buffer_pool_pages_in_flush 0 disabled
-buffer_pool_wait_free 0 disabled
-buffer_pool_read_ahead 0 disabled
-buffer_pool_read_ahead_evicted 0 disabled
-buffer_pool_pages_total 0 disabled
-buffer_pool_pages_misc 0 disabled
-buffer_pool_pages_data 0 disabled
-buffer_pool_pages_dirty 0 disabled
-buffer_pool_pages_free 0 disabled
-buffer_pages_created 0 disabled
-buffer_pages_written 0 disabled
-buffer_pages_read 0 disabled
-buffer_data_reads 0 disabled
-buffer_data_written 0 disabled
-buffer_flush_batch_scanned 0 disabled
-buffer_flush_batch_num_scan 0 disabled
-buffer_flush_batch_scanned_per_call 0 disabled
-buffer_flush_batch_total_pages 0 disabled
-buffer_flush_batches 0 disabled
-buffer_flush_batch_pages 0 disabled
-buffer_flush_neighbor_total_pages 0 disabled
-buffer_flush_neighbor 0 disabled
-buffer_flush_neighbor_pages 0 disabled
-buffer_flush_max_dirty_total_pages 0 disabled
-buffer_flush_max_dirty 0 disabled
-buffer_flush_max_dirty_pages 0 disabled
-buffer_flush_adaptive_total_pages 0 disabled
-buffer_flush_adaptive 0 disabled
-buffer_flush_adaptive_pages 0 disabled
-buffer_flush_async_total_pages 0 disabled
-buffer_flush_async 0 disabled
-buffer_flush_async_pages 0 disabled
-buffer_flush_sync_total_pages 0 disabled
-buffer_flush_sync 0 disabled
-buffer_flush_sync_pages 0 disabled
-buffer_flush_background_total_pages 0 disabled
-buffer_flush_background 0 disabled
-buffer_flush_background_pages 0 disabled
-buffer_LRU_batch_scanned 0 disabled
-buffer_LRU_batch_num_scan 0 disabled
-buffer_LRU_batch_scanned_per_call 0 disabled
-buffer_LRU_batch_total_pages 0 disabled
-buffer_LRU_batches 0 disabled
-buffer_LRU_batch_pages 0 disabled
-buffer_LRU_single_flush_scanned 0 disabled
-buffer_LRU_single_flush_num_scan 0 disabled
-buffer_LRU_single_flush_scanned_per_call 0 disabled
-buffer_LRU_single_flush_failure_count 0 disabled
-buffer_LRU_get_free_search 0 disabled
-buffer_LRU_search_scanned 0 disabled
-buffer_LRU_search_num_scan 0 disabled
-buffer_LRU_search_scanned_per_call 0 disabled
-buffer_LRU_unzip_search_scanned 0 disabled
-buffer_LRU_unzip_search_num_scan 0 disabled
-buffer_LRU_unzip_search_scanned_per_call 0 disabled
-buffer_page_read_index_leaf 0 disabled
-buffer_page_read_index_non_leaf 0 disabled
-buffer_page_read_index_ibuf_leaf 0 disabled
-buffer_page_read_index_ibuf_non_leaf 0 disabled
-buffer_page_read_undo_log 0 disabled
-buffer_page_read_index_inode 0 disabled
-buffer_page_read_ibuf_free_list 0 disabled
-buffer_page_read_ibuf_bitmap 0 disabled
-buffer_page_read_system_page 0 disabled
-buffer_page_read_trx_system 0 disabled
-buffer_page_read_fsp_hdr 0 disabled
-buffer_page_read_xdes 0 disabled
-buffer_page_read_blob 0 disabled
-buffer_page_read_zblob 0 disabled
-buffer_page_read_zblob2 0 disabled
-buffer_page_read_other 0 disabled
-buffer_page_written_index_leaf 0 disabled
-buffer_page_written_index_non_leaf 0 disabled
-buffer_page_written_index_ibuf_leaf 0 disabled
-buffer_page_written_index_ibuf_non_leaf 0 disabled
-buffer_page_written_undo_log 0 disabled
-buffer_page_written_index_inode 0 disabled
-buffer_page_written_ibuf_free_list 0 disabled
-buffer_page_written_ibuf_bitmap 0 disabled
-buffer_page_written_system_page 0 disabled
-buffer_page_written_trx_system 0 disabled
-buffer_page_written_fsp_hdr 0 disabled
-buffer_page_written_xdes 0 disabled
-buffer_page_written_blob 0 disabled
-buffer_page_written_zblob 0 disabled
-buffer_page_written_zblob2 0 disabled
-buffer_page_written_other 0 disabled
-os_data_reads 0 disabled
-os_data_writes 0 disabled
-os_data_fsyncs 0 disabled
-os_pending_reads 0 disabled
-os_pending_writes 0 disabled
-os_log_bytes_written 0 disabled
-os_log_fsyncs 0 disabled
-os_log_pending_fsyncs 0 disabled
-os_log_pending_writes 0 disabled
-trx_rw_commits 0 disabled
-trx_ro_commits 0 disabled
-trx_nl_ro_commits 0 disabled
-trx_commits_insert_update 0 disabled
-trx_rollbacks 0 disabled
-trx_rollbacks_savepoint 0 disabled
-trx_rollback_active 0 disabled
-trx_active_transactions 0 disabled
-trx_rseg_history_len 0 disabled
-trx_undo_slots_used 0 disabled
-trx_undo_slots_cached 0 disabled
-trx_rseg_current_size 0 disabled
-purge_del_mark_records 0 disabled
-purge_upd_exist_or_extern_records 0 disabled
-purge_invoked 0 disabled
-purge_undo_log_pages 0 disabled
-purge_dml_delay_usec 0 disabled
-purge_stop_count 0 disabled
-purge_resume_count 0 disabled
-log_checkpoints 0 disabled
-log_lsn_last_flush 0 disabled
-log_lsn_last_checkpoint 0 disabled
-log_lsn_current 0 disabled
-log_lsn_checkpoint_age 0 disabled
-log_lsn_buf_pool_oldest 0 disabled
-log_max_modified_age_async 0 disabled
-log_max_modified_age_sync 0 disabled
-log_pending_log_writes 0 disabled
-log_pending_checkpoint_writes 0 disabled
-log_num_log_io 0 disabled
-log_waits 0 disabled
-log_write_requests 0 disabled
-log_writes 0 disabled
-compress_pages_compressed 0 disabled
-compress_pages_decompressed 0 disabled
-index_splits 0 disabled
-index_merges 0 disabled
-adaptive_hash_searches 0 disabled
-adaptive_hash_searches_btree 0 disabled
-adaptive_hash_pages_added 0 disabled
-adaptive_hash_pages_removed 0 disabled
-adaptive_hash_rows_added 0 disabled
-adaptive_hash_rows_removed 0 disabled
-adaptive_hash_rows_deleted_no_hash_entry 0 disabled
-adaptive_hash_rows_updated 0 disabled
-file_num_open_files 0 disabled
-ibuf_merges_insert 0 disabled
-ibuf_merges_delete_mark 0 disabled
-ibuf_merges_delete 0 disabled
-ibuf_merges_discard_insert 0 disabled
-ibuf_merges_discard_delete_mark 0 disabled
-ibuf_merges_discard_delete 0 disabled
-ibuf_merges 0 disabled
-ibuf_size 0 disabled
-innodb_master_thread_sleeps 0 disabled
-innodb_activity_count 0 disabled
-innodb_master_active_loops 0 disabled
-innodb_master_idle_loops 0 disabled
-innodb_background_drop_table_usec 0 disabled
-innodb_ibuf_merge_usec 0 disabled
-innodb_log_flush_usec 0 disabled
-innodb_mem_validate_usec 0 disabled
-innodb_master_purge_usec 0 disabled
-innodb_dict_lru_usec 0 disabled
-innodb_checkpoint_usec 0 disabled
-innodb_dblwr_writes 0 disabled
-innodb_dblwr_pages_written 0 disabled
-innodb_page_size 0 disabled
-innodb_rwlock_s_spin_waits 0 disabled
-innodb_rwlock_x_spin_waits 0 disabled
-innodb_rwlock_s_spin_rounds 0 disabled
-innodb_rwlock_x_spin_rounds 0 disabled
-innodb_rwlock_s_os_waits 0 disabled
-innodb_rwlock_x_os_waits 0 disabled
-dml_reads 0 disabled
-dml_inserts 0 disabled
-dml_deletes 0 disabled
-dml_updates 0 disabled
-ddl_background_drop_tables 0 disabled
-icp_attempts 0 disabled
-icp_no_match 0 disabled
-icp_out_of_range 0 disabled
-icp_match 0 disabled
+select name from information_schema.innodb_metrics where count!=0;
+name
set global innodb_monitor_enable = "%lock%";
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
-name status
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
+name
set global innodb_monitor_disable = "%lock%";
select name, status from information_schema.innodb_metrics
where name like "%lock%";
@@ -879,1035 +254,21 @@ innodb_rwlock_x_os_waits disabled
set global innodb_monitor_enable = "%lock*";
ERROR 42000: Variable 'innodb_monitor_enable' can't be set to the value of '%lock*'
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened enabled
-metadata_table_handles_closed enabled
-metadata_table_reference_count enabled
-metadata_mem_pool_size enabled
-lock_deadlocks enabled
-lock_timeouts enabled
-lock_rec_lock_waits enabled
-lock_table_lock_waits enabled
-lock_rec_lock_requests enabled
-lock_rec_lock_created enabled
-lock_rec_lock_removed enabled
-lock_rec_locks enabled
-lock_table_lock_created enabled
-lock_table_lock_removed enabled
-lock_table_locks enabled
-lock_row_lock_current_waits enabled
-lock_row_lock_time enabled
-lock_row_lock_time_max enabled
-lock_row_lock_waits enabled
-lock_row_lock_time_avg enabled
-buffer_pool_size enabled
-buffer_pool_reads enabled
-buffer_pool_read_requests enabled
-buffer_pool_write_requests enabled
-buffer_pool_pages_in_flush enabled
-buffer_pool_wait_free enabled
-buffer_pool_read_ahead enabled
-buffer_pool_read_ahead_evicted enabled
-buffer_pool_pages_total enabled
-buffer_pool_pages_misc enabled
-buffer_pool_pages_data enabled
-buffer_pool_pages_dirty enabled
-buffer_pool_pages_free enabled
-buffer_pages_created enabled
-buffer_pages_written enabled
-buffer_pages_read enabled
-buffer_data_reads enabled
-buffer_data_written enabled
-buffer_flush_batch_scanned enabled
-buffer_flush_batch_num_scan enabled
-buffer_flush_batch_scanned_per_call enabled
-buffer_flush_batch_total_pages enabled
-buffer_flush_batches enabled
-buffer_flush_batch_pages enabled
-buffer_flush_neighbor_total_pages enabled
-buffer_flush_neighbor enabled
-buffer_flush_neighbor_pages enabled
-buffer_flush_max_dirty_total_pages enabled
-buffer_flush_max_dirty enabled
-buffer_flush_max_dirty_pages enabled
-buffer_flush_adaptive_total_pages enabled
-buffer_flush_adaptive enabled
-buffer_flush_adaptive_pages enabled
-buffer_flush_async_total_pages enabled
-buffer_flush_async enabled
-buffer_flush_async_pages enabled
-buffer_flush_sync_total_pages enabled
-buffer_flush_sync enabled
-buffer_flush_sync_pages enabled
-buffer_flush_background_total_pages enabled
-buffer_flush_background enabled
-buffer_flush_background_pages enabled
-buffer_LRU_batch_scanned enabled
-buffer_LRU_batch_num_scan enabled
-buffer_LRU_batch_scanned_per_call enabled
-buffer_LRU_batch_total_pages enabled
-buffer_LRU_batches enabled
-buffer_LRU_batch_pages enabled
-buffer_LRU_single_flush_scanned enabled
-buffer_LRU_single_flush_num_scan enabled
-buffer_LRU_single_flush_scanned_per_call enabled
-buffer_LRU_single_flush_failure_count enabled
-buffer_LRU_get_free_search enabled
-buffer_LRU_search_scanned enabled
-buffer_LRU_search_num_scan enabled
-buffer_LRU_search_scanned_per_call enabled
-buffer_LRU_unzip_search_scanned enabled
-buffer_LRU_unzip_search_num_scan enabled
-buffer_LRU_unzip_search_scanned_per_call enabled
-buffer_page_read_index_leaf enabled
-buffer_page_read_index_non_leaf enabled
-buffer_page_read_index_ibuf_leaf enabled
-buffer_page_read_index_ibuf_non_leaf enabled
-buffer_page_read_undo_log enabled
-buffer_page_read_index_inode enabled
-buffer_page_read_ibuf_free_list enabled
-buffer_page_read_ibuf_bitmap enabled
-buffer_page_read_system_page enabled
-buffer_page_read_trx_system enabled
-buffer_page_read_fsp_hdr enabled
-buffer_page_read_xdes enabled
-buffer_page_read_blob enabled
-buffer_page_read_zblob enabled
-buffer_page_read_zblob2 enabled
-buffer_page_read_other enabled
-buffer_page_written_index_leaf enabled
-buffer_page_written_index_non_leaf enabled
-buffer_page_written_index_ibuf_leaf enabled
-buffer_page_written_index_ibuf_non_leaf enabled
-buffer_page_written_undo_log enabled
-buffer_page_written_index_inode enabled
-buffer_page_written_ibuf_free_list enabled
-buffer_page_written_ibuf_bitmap enabled
-buffer_page_written_system_page enabled
-buffer_page_written_trx_system enabled
-buffer_page_written_fsp_hdr enabled
-buffer_page_written_xdes enabled
-buffer_page_written_blob enabled
-buffer_page_written_zblob enabled
-buffer_page_written_zblob2 enabled
-buffer_page_written_other enabled
-os_data_reads enabled
-os_data_writes enabled
-os_data_fsyncs enabled
-os_pending_reads enabled
-os_pending_writes enabled
-os_log_bytes_written enabled
-os_log_fsyncs enabled
-os_log_pending_fsyncs enabled
-os_log_pending_writes enabled
-trx_rw_commits enabled
-trx_ro_commits enabled
-trx_nl_ro_commits enabled
-trx_commits_insert_update enabled
-trx_rollbacks enabled
-trx_rollbacks_savepoint enabled
-trx_rollback_active enabled
-trx_active_transactions enabled
-trx_rseg_history_len enabled
-trx_undo_slots_used enabled
-trx_undo_slots_cached enabled
-trx_rseg_current_size enabled
-purge_del_mark_records enabled
-purge_upd_exist_or_extern_records enabled
-purge_invoked enabled
-purge_undo_log_pages enabled
-purge_dml_delay_usec enabled
-purge_stop_count enabled
-purge_resume_count enabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed enabled
-compress_pages_decompressed enabled
-index_splits enabled
-index_merges enabled
-adaptive_hash_searches enabled
-adaptive_hash_searches_btree enabled
-adaptive_hash_pages_added enabled
-adaptive_hash_pages_removed enabled
-adaptive_hash_rows_added enabled
-adaptive_hash_rows_removed enabled
-adaptive_hash_rows_deleted_no_hash_entry enabled
-adaptive_hash_rows_updated enabled
-file_num_open_files enabled
-ibuf_merges_insert enabled
-ibuf_merges_delete_mark enabled
-ibuf_merges_delete enabled
-ibuf_merges_discard_insert enabled
-ibuf_merges_discard_delete_mark enabled
-ibuf_merges_discard_delete enabled
-ibuf_merges enabled
-ibuf_size enabled
-innodb_master_thread_sleeps enabled
-innodb_activity_count enabled
-innodb_master_active_loops enabled
-innodb_master_idle_loops enabled
-innodb_background_drop_table_usec enabled
-innodb_ibuf_merge_usec enabled
-innodb_log_flush_usec enabled
-innodb_mem_validate_usec enabled
-innodb_master_purge_usec enabled
-innodb_dict_lru_usec enabled
-innodb_checkpoint_usec enabled
-innodb_dblwr_writes enabled
-innodb_dblwr_pages_written enabled
-innodb_page_size enabled
-innodb_rwlock_s_spin_waits enabled
-innodb_rwlock_x_spin_waits enabled
-innodb_rwlock_s_spin_rounds enabled
-innodb_rwlock_x_spin_rounds enabled
-innodb_rwlock_s_os_waits enabled
-innodb_rwlock_x_os_waits enabled
-dml_reads enabled
-dml_inserts enabled
-dml_deletes enabled
-dml_updates enabled
-ddl_background_drop_tables enabled
-icp_attempts enabled
-icp_no_match enabled
-icp_out_of_range enabled
-icp_match enabled
+select name from information_schema.innodb_metrics where status!='enabled';
+name
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints disabled
-log_lsn_last_flush disabled
-log_lsn_last_checkpoint disabled
-log_lsn_current disabled
-log_lsn_checkpoint_age disabled
-log_lsn_buf_pool_oldest disabled
-log_max_modified_age_async disabled
-log_max_modified_age_sync disabled
-log_pending_log_writes disabled
-log_pending_checkpoint_writes disabled
-log_num_log_io disabled
-log_waits disabled
-log_write_requests disabled
-log_writes disabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics where status!='disabled';
+name
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
-name status
-metadata_table_handles_opened disabled
-metadata_table_handles_closed disabled
-metadata_table_reference_count disabled
-metadata_mem_pool_size disabled
-lock_deadlocks disabled
-lock_timeouts disabled
-lock_rec_lock_waits disabled
-lock_table_lock_waits disabled
-lock_rec_lock_requests disabled
-lock_rec_lock_created disabled
-lock_rec_lock_removed disabled
-lock_rec_locks disabled
-lock_table_lock_created disabled
-lock_table_lock_removed disabled
-lock_table_locks disabled
-lock_row_lock_current_waits disabled
-lock_row_lock_time disabled
-lock_row_lock_time_max disabled
-lock_row_lock_waits disabled
-lock_row_lock_time_avg disabled
-buffer_pool_size disabled
-buffer_pool_reads disabled
-buffer_pool_read_requests disabled
-buffer_pool_write_requests disabled
-buffer_pool_pages_in_flush disabled
-buffer_pool_wait_free disabled
-buffer_pool_read_ahead disabled
-buffer_pool_read_ahead_evicted disabled
-buffer_pool_pages_total disabled
-buffer_pool_pages_misc disabled
-buffer_pool_pages_data disabled
-buffer_pool_pages_dirty disabled
-buffer_pool_pages_free disabled
-buffer_pages_created disabled
-buffer_pages_written disabled
-buffer_pages_read disabled
-buffer_data_reads disabled
-buffer_data_written disabled
-buffer_flush_batch_scanned disabled
-buffer_flush_batch_num_scan disabled
-buffer_flush_batch_scanned_per_call disabled
-buffer_flush_batch_total_pages disabled
-buffer_flush_batches disabled
-buffer_flush_batch_pages disabled
-buffer_flush_neighbor_total_pages disabled
-buffer_flush_neighbor disabled
-buffer_flush_neighbor_pages disabled
-buffer_flush_max_dirty_total_pages disabled
-buffer_flush_max_dirty disabled
-buffer_flush_max_dirty_pages disabled
-buffer_flush_adaptive_total_pages disabled
-buffer_flush_adaptive disabled
-buffer_flush_adaptive_pages disabled
-buffer_flush_async_total_pages disabled
-buffer_flush_async disabled
-buffer_flush_async_pages disabled
-buffer_flush_sync_total_pages disabled
-buffer_flush_sync disabled
-buffer_flush_sync_pages disabled
-buffer_flush_background_total_pages disabled
-buffer_flush_background disabled
-buffer_flush_background_pages disabled
-buffer_LRU_batch_scanned disabled
-buffer_LRU_batch_num_scan disabled
-buffer_LRU_batch_scanned_per_call disabled
-buffer_LRU_batch_total_pages disabled
-buffer_LRU_batches disabled
-buffer_LRU_batch_pages disabled
-buffer_LRU_single_flush_scanned disabled
-buffer_LRU_single_flush_num_scan disabled
-buffer_LRU_single_flush_scanned_per_call disabled
-buffer_LRU_single_flush_failure_count disabled
-buffer_LRU_get_free_search disabled
-buffer_LRU_search_scanned disabled
-buffer_LRU_search_num_scan disabled
-buffer_LRU_search_scanned_per_call disabled
-buffer_LRU_unzip_search_scanned disabled
-buffer_LRU_unzip_search_num_scan disabled
-buffer_LRU_unzip_search_scanned_per_call disabled
-buffer_page_read_index_leaf disabled
-buffer_page_read_index_non_leaf disabled
-buffer_page_read_index_ibuf_leaf disabled
-buffer_page_read_index_ibuf_non_leaf disabled
-buffer_page_read_undo_log disabled
-buffer_page_read_index_inode disabled
-buffer_page_read_ibuf_free_list disabled
-buffer_page_read_ibuf_bitmap disabled
-buffer_page_read_system_page disabled
-buffer_page_read_trx_system disabled
-buffer_page_read_fsp_hdr disabled
-buffer_page_read_xdes disabled
-buffer_page_read_blob disabled
-buffer_page_read_zblob disabled
-buffer_page_read_zblob2 disabled
-buffer_page_read_other disabled
-buffer_page_written_index_leaf disabled
-buffer_page_written_index_non_leaf disabled
-buffer_page_written_index_ibuf_leaf disabled
-buffer_page_written_index_ibuf_non_leaf disabled
-buffer_page_written_undo_log disabled
-buffer_page_written_index_inode disabled
-buffer_page_written_ibuf_free_list disabled
-buffer_page_written_ibuf_bitmap disabled
-buffer_page_written_system_page disabled
-buffer_page_written_trx_system disabled
-buffer_page_written_fsp_hdr disabled
-buffer_page_written_xdes disabled
-buffer_page_written_blob disabled
-buffer_page_written_zblob disabled
-buffer_page_written_zblob2 disabled
-buffer_page_written_other disabled
-os_data_reads disabled
-os_data_writes disabled
-os_data_fsyncs disabled
-os_pending_reads disabled
-os_pending_writes disabled
-os_log_bytes_written disabled
-os_log_fsyncs disabled
-os_log_pending_fsyncs disabled
-os_log_pending_writes disabled
-trx_rw_commits disabled
-trx_ro_commits disabled
-trx_nl_ro_commits disabled
-trx_commits_insert_update disabled
-trx_rollbacks disabled
-trx_rollbacks_savepoint disabled
-trx_rollback_active disabled
-trx_active_transactions disabled
-trx_rseg_history_len disabled
-trx_undo_slots_used disabled
-trx_undo_slots_cached disabled
-trx_rseg_current_size disabled
-purge_del_mark_records disabled
-purge_upd_exist_or_extern_records disabled
-purge_invoked disabled
-purge_undo_log_pages disabled
-purge_dml_delay_usec disabled
-purge_stop_count disabled
-purge_resume_count disabled
-log_checkpoints enabled
-log_lsn_last_flush enabled
-log_lsn_last_checkpoint enabled
-log_lsn_current enabled
-log_lsn_checkpoint_age enabled
-log_lsn_buf_pool_oldest enabled
-log_max_modified_age_async enabled
-log_max_modified_age_sync enabled
-log_pending_log_writes enabled
-log_pending_checkpoint_writes enabled
-log_num_log_io enabled
-log_waits enabled
-log_write_requests enabled
-log_writes enabled
-compress_pages_compressed disabled
-compress_pages_decompressed disabled
-index_splits disabled
-index_merges disabled
-adaptive_hash_searches disabled
-adaptive_hash_searches_btree disabled
-adaptive_hash_pages_added disabled
-adaptive_hash_pages_removed disabled
-adaptive_hash_rows_added disabled
-adaptive_hash_rows_removed disabled
-adaptive_hash_rows_deleted_no_hash_entry disabled
-adaptive_hash_rows_updated disabled
-file_num_open_files disabled
-ibuf_merges_insert disabled
-ibuf_merges_delete_mark disabled
-ibuf_merges_delete disabled
-ibuf_merges_discard_insert disabled
-ibuf_merges_discard_delete_mark disabled
-ibuf_merges_discard_delete disabled
-ibuf_merges disabled
-ibuf_size disabled
-innodb_master_thread_sleeps disabled
-innodb_activity_count disabled
-innodb_master_active_loops disabled
-innodb_master_idle_loops disabled
-innodb_background_drop_table_usec disabled
-innodb_ibuf_merge_usec disabled
-innodb_log_flush_usec disabled
-innodb_mem_validate_usec disabled
-innodb_master_purge_usec disabled
-innodb_dict_lru_usec disabled
-innodb_checkpoint_usec disabled
-innodb_dblwr_writes disabled
-innodb_dblwr_pages_written disabled
-innodb_page_size disabled
-innodb_rwlock_s_spin_waits disabled
-innodb_rwlock_x_spin_waits disabled
-innodb_rwlock_s_spin_rounds disabled
-innodb_rwlock_x_spin_rounds disabled
-innodb_rwlock_s_os_waits disabled
-innodb_rwlock_x_os_waits disabled
-dml_reads disabled
-dml_inserts disabled
-dml_deletes disabled
-dml_updates disabled
-ddl_background_drop_tables disabled
-icp_attempts disabled
-icp_no_match disabled
-icp_out_of_range disabled
-icp_match disabled
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
+name
set global innodb_monitor_enable="os_%a_fs_ncs";
set global innodb_monitor_enable="os%pending%";
select name, status from information_schema.innodb_metrics
@@ -1989,7 +350,7 @@ name max_count min_count count max_count_reset min_count_reset count_reset statu
metadata_table_handles_opened NULL NULL 0 NULL NULL 0 disabled
set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
col
select name, max_count, min_count, count,
@@ -2146,7 +507,7 @@ max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
where name like "file_num_open_files";
name max_count min_count count max_count_reset min_count_reset count_reset status
-file_num_open_files 3 3 3 3 3 3 enabled
+file_num_open_files # # # # # # enabled
set global innodb_monitor_disable = file_num_open_files;
set global innodb_monitor_enable = "icp%";
create table monitor_test(a char(3), b int, c char(2),
diff --git a/mysql-test/suite/sys_vars/r/innodb_old_blocks_time_basic.result b/mysql-test/suite/sys_vars/r/innodb_old_blocks_time_basic.result
index a285cc14a01..add91e56039 100644
--- a/mysql-test/suite/sys_vars/r/innodb_old_blocks_time_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_old_blocks_time_basic.result
@@ -1,28 +1,28 @@
SET @start_global_value = @@global.innodb_old_blocks_time;
SELECT @start_global_value;
@start_global_value
-0
+1000
Valid values are zero or above
select @@global.innodb_old_blocks_time >=0;
@@global.innodb_old_blocks_time >=0
1
select @@global.innodb_old_blocks_time;
@@global.innodb_old_blocks_time
-0
+1000
select @@session.innodb_old_blocks_time;
ERROR HY000: Variable 'innodb_old_blocks_time' is a GLOBAL variable
show global variables like 'innodb_old_blocks_time';
Variable_name Value
-innodb_old_blocks_time 0
+innodb_old_blocks_time 1000
show session variables like 'innodb_old_blocks_time';
Variable_name Value
-innodb_old_blocks_time 0
+innodb_old_blocks_time 1000
select * from information_schema.global_variables where variable_name='innodb_old_blocks_time';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_OLD_BLOCKS_TIME 0
+INNODB_OLD_BLOCKS_TIME 1000
select * from information_schema.session_variables where variable_name='innodb_old_blocks_time';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_OLD_BLOCKS_TIME 0
+INNODB_OLD_BLOCKS_TIME 1000
set global innodb_old_blocks_time=10;
select @@global.innodb_old_blocks_time;
@@global.innodb_old_blocks_time
@@ -53,4 +53,4 @@ INNODB_OLD_BLOCKS_TIME 0
SET @@global.innodb_old_blocks_time = @start_global_value;
SELECT @@global.innodb_old_blocks_time;
@@global.innodb_old_blocks_time
-0
+1000
diff --git a/mysql-test/suite/sys_vars/r/innodb_online_alter_log_max_size_basic.result b/mysql-test/suite/sys_vars/r/innodb_online_alter_log_max_size_basic.result
new file mode 100644
index 00000000000..99f315eb1f9
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_online_alter_log_max_size_basic.result
@@ -0,0 +1,64 @@
+SET @start_global_value = @@global.innodb_online_alter_log_max_size;
+SELECT @start_global_value;
+@start_global_value
+134217728
+select @@global.innodb_online_alter_log_max_size >= 524288;
+@@global.innodb_online_alter_log_max_size >= 524288
+1
+select @@global.innodb_online_alter_log_max_size;
+@@global.innodb_online_alter_log_max_size
+134217728
+select @@session.innodb_online_alter_log_max_size;
+ERROR HY000: Variable 'innodb_online_alter_log_max_size' is a GLOBAL variable
+show global variables like 'innodb_online_alter_log_max_size';
+Variable_name Value
+innodb_online_alter_log_max_size 134217728
+show session variables like 'innodb_online_alter_log_max_size';
+Variable_name Value
+innodb_online_alter_log_max_size 134217728
+select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_ONLINE_ALTER_LOG_MAX_SIZE 134217728
+select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_ONLINE_ALTER_LOG_MAX_SIZE 134217728
+set global innodb_online_alter_log_max_size=1048576;
+select @@global.innodb_online_alter_log_max_size;
+@@global.innodb_online_alter_log_max_size
+1048576
+select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_ONLINE_ALTER_LOG_MAX_SIZE 1048576
+select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_ONLINE_ALTER_LOG_MAX_SIZE 1048576
+set @@global.innodb_online_alter_log_max_size=524288;
+select @@global.innodb_online_alter_log_max_size;
+@@global.innodb_online_alter_log_max_size
+524288
+select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_ONLINE_ALTER_LOG_MAX_SIZE 524288
+select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_ONLINE_ALTER_LOG_MAX_SIZE 524288
+set session innodb_online_alter_log_max_size='some';
+ERROR HY000: Variable 'innodb_online_alter_log_max_size' is a GLOBAL variable and should be set with SET GLOBAL
+set @@session.innodb_online_alter_log_max_size='some';
+ERROR HY000: Variable 'innodb_online_alter_log_max_size' is a GLOBAL variable and should be set with SET GLOBAL
+set global innodb_online_alter_log_max_size=1.1;
+ERROR 42000: Incorrect argument type to variable 'innodb_online_alter_log_max_size'
+set global innodb_online_alter_log_max_size='foo';
+ERROR 42000: Incorrect argument type to variable 'innodb_online_alter_log_max_size'
+set global innodb_online_alter_log_max_size=-2;
+Warnings:
+Warning 1292 Truncated incorrect innodb_online_alter_log_max_size value: '-2'
+set global innodb_online_alter_log_max_size=1e1;
+ERROR 42000: Incorrect argument type to variable 'innodb_online_alter_log_max_size'
+set global innodb_online_alter_log_max_size=2;
+Warnings:
+Warning 1292 Truncated incorrect innodb_online_alter_log_max_size value: '2'
+SET @@global.innodb_online_alter_log_max_size = @start_global_value;
+SELECT @@global.innodb_online_alter_log_max_size;
+@@global.innodb_online_alter_log_max_size
+134217728
diff --git a/mysql-test/suite/sys_vars/r/innodb_purge_run_now_basic.result b/mysql-test/suite/sys_vars/r/innodb_purge_run_now_basic.result
new file mode 100644
index 00000000000..41204422767
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_purge_run_now_basic.result
@@ -0,0 +1,27 @@
+SELECT name, count
+FROM information_schema.innodb_metrics
+WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+name count
+purge_stop_count 0
+purge_resume_count 0
+SET @orig = @@global.innodb_purge_run_now;
+SELECT @orig;
+@orig
+0
+SET GLOBAL innodb_purge_stop_now = ON;
+SELECT name, count
+FROM information_schema.innodb_metrics
+WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+name count
+purge_stop_count 1
+purge_resume_count 0
+SET GLOBAL innodb_purge_run_now = ON;
+SELECT @@global.innodb_purge_run_now;
+@@global.innodb_purge_run_now
+0
+SELECT name, count
+FROM information_schema.innodb_metrics
+WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+name count
+purge_stop_count 1
+purge_resume_count 1
diff --git a/mysql-test/suite/sys_vars/r/innodb_purge_stop_now_basic.result b/mysql-test/suite/sys_vars/r/innodb_purge_stop_now_basic.result
new file mode 100644
index 00000000000..41204422767
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_purge_stop_now_basic.result
@@ -0,0 +1,27 @@
+SELECT name, count
+FROM information_schema.innodb_metrics
+WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+name count
+purge_stop_count 0
+purge_resume_count 0
+SET @orig = @@global.innodb_purge_run_now;
+SELECT @orig;
+@orig
+0
+SET GLOBAL innodb_purge_stop_now = ON;
+SELECT name, count
+FROM information_schema.innodb_metrics
+WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+name count
+purge_stop_count 1
+purge_resume_count 0
+SET GLOBAL innodb_purge_run_now = ON;
+SELECT @@global.innodb_purge_run_now;
+@@global.innodb_purge_run_now
+0
+SELECT name, count
+FROM information_schema.innodb_metrics
+WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+name count
+purge_stop_count 1
+purge_resume_count 1
diff --git a/mysql-test/suite/sys_vars/r/innodb_read_only_basic.result b/mysql-test/suite/sys_vars/r/innodb_read_only_basic.result
new file mode 100644
index 00000000000..a97f1ddc698
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_read_only_basic.result
@@ -0,0 +1,22 @@
+Valid values are 'ON' and 'OFF'
+select @@global.innodb_read_only;
+@@global.innodb_read_only
+0
+select @@session.innodb_read_only;
+ERROR HY000: Variable 'innodb_read_only' is a GLOBAL variable
+show global variables like 'innodb_read_only';
+Variable_name Value
+innodb_read_only OFF
+show session variables like 'innodb_read_only';
+Variable_name Value
+innodb_read_only OFF
+select * from information_schema.global_variables where variable_name='innodb_read_only';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_READ_ONLY OFF
+select * from information_schema.session_variables where variable_name='innodb_read_only';
+VARIABLE_NAME VARIABLE_VALUE
+INNODB_READ_ONLY OFF
+set global innodb_read_only=1;
+ERROR HY000: Variable 'innodb_read_only' is a read only variable
+set session innodb_read_only=1;
+ERROR HY000: Variable 'innodb_read_only' is a read only variable
diff --git a/mysql-test/suite/sys_vars/r/innodb_stats_auto_recalc_basic.result b/mysql-test/suite/sys_vars/r/innodb_stats_auto_recalc_basic.result
new file mode 100644
index 00000000000..dd4970cbdb0
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_stats_auto_recalc_basic.result
@@ -0,0 +1,24 @@
+SELECT @@innodb_stats_auto_recalc;
+@@innodb_stats_auto_recalc
+1
+SET GLOBAL innodb_stats_auto_recalc=ON;
+SELECT @@innodb_stats_auto_recalc;
+@@innodb_stats_auto_recalc
+1
+SET GLOBAL innodb_stats_auto_recalc=OFF;
+SELECT @@innodb_stats_auto_recalc;
+@@innodb_stats_auto_recalc
+0
+SET GLOBAL innodb_stats_auto_recalc=1;
+SELECT @@innodb_stats_auto_recalc;
+@@innodb_stats_auto_recalc
+1
+SET GLOBAL innodb_stats_auto_recalc=0;
+SELECT @@innodb_stats_auto_recalc;
+@@innodb_stats_auto_recalc
+0
+SET GLOBAL innodb_stats_auto_recalc=123;
+ERROR 42000: Variable 'innodb_stats_auto_recalc' can't be set to the value of '123'
+SET GLOBAL innodb_stats_auto_recalc='foo';
+ERROR 42000: Variable 'innodb_stats_auto_recalc' can't be set to the value of 'foo'
+SET GLOBAL innodb_stats_auto_recalc=default;
diff --git a/mysql-test/suite/sys_vars/r/innodb_stats_on_metadata_basic.result b/mysql-test/suite/sys_vars/r/innodb_stats_on_metadata_basic.result
index 852ef8a353d..19e4a8a67d2 100644
--- a/mysql-test/suite/sys_vars/r/innodb_stats_on_metadata_basic.result
+++ b/mysql-test/suite/sys_vars/r/innodb_stats_on_metadata_basic.result
@@ -1,28 +1,28 @@
SET @start_global_value = @@global.innodb_stats_on_metadata;
SELECT @start_global_value;
@start_global_value
-1
+0
Valid values are 'ON' and 'OFF'
select @@global.innodb_stats_on_metadata in (0, 1);
@@global.innodb_stats_on_metadata in (0, 1)
1
select @@global.innodb_stats_on_metadata;
@@global.innodb_stats_on_metadata
-1
+0
select @@session.innodb_stats_on_metadata;
ERROR HY000: Variable 'innodb_stats_on_metadata' is a GLOBAL variable
show global variables like 'innodb_stats_on_metadata';
Variable_name Value
-innodb_stats_on_metadata ON
+innodb_stats_on_metadata OFF
show session variables like 'innodb_stats_on_metadata';
Variable_name Value
-innodb_stats_on_metadata ON
+innodb_stats_on_metadata OFF
select * from information_schema.global_variables where variable_name='innodb_stats_on_metadata';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_STATS_ON_METADATA ON
+INNODB_STATS_ON_METADATA OFF
select * from information_schema.session_variables where variable_name='innodb_stats_on_metadata';
VARIABLE_NAME VARIABLE_VALUE
-INNODB_STATS_ON_METADATA ON
+INNODB_STATS_ON_METADATA OFF
set global innodb_stats_on_metadata='OFF';
select @@global.innodb_stats_on_metadata;
@@global.innodb_stats_on_metadata
@@ -89,4 +89,4 @@ ERROR 42000: Variable 'innodb_stats_on_metadata' can't be set to the value of 'A
SET @@global.innodb_stats_on_metadata = @start_global_value;
SELECT @@global.innodb_stats_on_metadata;
@@global.innodb_stats_on_metadata
-1
+0
diff --git a/mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result b/mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result
new file mode 100644
index 00000000000..1cbdd16afdf
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/innodb_stats_persistent_basic.result
@@ -0,0 +1,24 @@
+SELECT @@innodb_stats_persistent;
+@@innodb_stats_persistent
+0
+SET GLOBAL innodb_stats_persistent=ON;
+SELECT @@innodb_stats_persistent;
+@@innodb_stats_persistent
+1
+SET GLOBAL innodb_stats_persistent=OFF;
+SELECT @@innodb_stats_persistent;
+@@innodb_stats_persistent
+0
+SET GLOBAL innodb_stats_persistent=1;
+SELECT @@innodb_stats_persistent;
+@@innodb_stats_persistent
+1
+SET GLOBAL innodb_stats_persistent=0;
+SELECT @@innodb_stats_persistent;
+@@innodb_stats_persistent
+0
+SET GLOBAL innodb_stats_persistent=123;
+ERROR 42000: Variable 'innodb_stats_persistent' can't be set to the value of '123'
+SET GLOBAL innodb_stats_persistent='foo';
+ERROR 42000: Variable 'innodb_stats_persistent' can't be set to the value of 'foo'
+SET GLOBAL innodb_stats_persistent=off;
diff --git a/mysql-test/suite/sys_vars/r/max_connect_errors_basic.result b/mysql-test/suite/sys_vars/r/max_connect_errors_basic.result
index be1a58394b8..99bc0727ad8 100644
--- a/mysql-test/suite/sys_vars/r/max_connect_errors_basic.result
+++ b/mysql-test/suite/sys_vars/r/max_connect_errors_basic.result
@@ -1,18 +1,18 @@
SET @start_value = @@global.max_connect_errors;
SELECT @start_value;
@start_value
-10
+100
'#--------------------FN_DYNVARS_073_01------------------------#'
SET @@global.max_connect_errors = 5000;
SET @@global.max_connect_errors = DEFAULT;
SELECT @@global.max_connect_errors;
@@global.max_connect_errors
-10
+100
'#---------------------FN_DYNVARS_073_02-------------------------#'
SET @@global.max_connect_errors = @start_value;
SELECT @@global.max_connect_errors = 10;
@@global.max_connect_errors = 10
-1
+0
'#--------------------FN_DYNVARS_073_03------------------------#'
SET @@global.max_connect_errors = 4096;
SELECT @@global.max_connect_errors;
@@ -131,4 +131,4 @@ ERROR 42S22: Unknown column 'max_connect_errors' in 'field list'
SET @@global.max_connect_errors = @start_value;
SELECT @@global.max_connect_errors;
@@global.max_connect_errors
-10
+100
diff --git a/mysql-test/suite/sys_vars/r/metadata_locks_hash_instances_basic.result b/mysql-test/suite/sys_vars/r/metadata_locks_hash_instances_basic.result
new file mode 100644
index 00000000000..46e65fbc003
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/metadata_locks_hash_instances_basic.result
@@ -0,0 +1,51 @@
+####################################################################
+# Displaying default value #
+####################################################################
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+@@GLOBAL.metadata_locks_hash_instances
+8
+####################################################################
+# Check that value cannot be set (this variable is settable only #
+# at start-up). #
+####################################################################
+SET @@GLOBAL.metadata_locks_hash_instances=1;
+ERROR HY000: Variable 'metadata_locks_hash_instances' is a read only variable
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+@@GLOBAL.metadata_locks_hash_instances
+8
+#################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#################################################################
+SELECT @@GLOBAL.metadata_locks_hash_instances = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='metadata_locks_hash_instances';
+@@GLOBAL.metadata_locks_hash_instances = VARIABLE_VALUE
+1
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+@@GLOBAL.metadata_locks_hash_instances
+8
+SELECT VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='metadata_locks_hash_instances';
+VARIABLE_VALUE
+8
+######################################################################
+# Check if accessing variable with and without GLOBAL point to same #
+# variable #
+######################################################################
+SELECT @@metadata_locks_hash_instances = @@GLOBAL.metadata_locks_hash_instances;
+@@metadata_locks_hash_instances = @@GLOBAL.metadata_locks_hash_instances
+1
+######################################################################
+# Check if variable has only the GLOBAL scope #
+######################################################################
+SELECT @@metadata_locks_hash_instances;
+@@metadata_locks_hash_instances
+8
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+@@GLOBAL.metadata_locks_hash_instances
+8
+SELECT @@local.metadata_locks_hash_instances;
+ERROR HY000: Variable 'metadata_locks_hash_instances' is a GLOBAL variable
+SELECT @@SESSION.metadata_locks_hash_instances;
+ERROR HY000: Variable 'metadata_locks_hash_instances' is a GLOBAL variable
diff --git a/mysql-test/suite/sys_vars/r/pfs_session_connect_attrs_size_basic.result b/mysql-test/suite/sys_vars/r/pfs_session_connect_attrs_size_basic.result
new file mode 100644
index 00000000000..a65a71ff8a0
--- /dev/null
+++ b/mysql-test/suite/sys_vars/r/pfs_session_connect_attrs_size_basic.result
@@ -0,0 +1,23 @@
+select @@global.performance_schema_session_connect_attrs_size;
+@@global.performance_schema_session_connect_attrs_size
+2048
+select @@session.performance_schema_session_connect_attrs_size;
+ERROR HY000: Variable 'performance_schema_session_connect_attrs_size' is a GLOBAL variable
+show global variables like 'performance_schema_session_connect_attrs_size';
+Variable_name Value
+performance_schema_session_connect_attrs_size 2048
+show session variables like 'performance_schema_session_connect_attrs_size';
+Variable_name Value
+performance_schema_session_connect_attrs_size 2048
+select * from information_schema.global_variables
+where variable_name='performance_schema_session_connect_attrs_size';
+VARIABLE_NAME VARIABLE_VALUE
+PERFORMANCE_SCHEMA_SESSION_CONNECT_ATTRS_SIZE 2048
+select * from information_schema.session_variables
+where variable_name='performance_schema_session_connect_attrs_size';
+VARIABLE_NAME VARIABLE_VALUE
+PERFORMANCE_SCHEMA_SESSION_CONNECT_ATTRS_SIZE 2048
+set global performance_schema_session_connect_attrs_size=1;
+ERROR HY000: Variable 'performance_schema_session_connect_attrs_size' is a read only variable
+set session performance_schema_session_connect_attrs_size=1;
+ERROR HY000: Variable 'performance_schema_session_connect_attrs_size' is a read only variable
diff --git a/mysql-test/suite/sys_vars/r/slow_query_log_func.result b/mysql-test/suite/sys_vars/r/slow_query_log_func.result
index fb650399597..f01b2c4c48b 100644
--- a/mysql-test/suite/sys_vars/r/slow_query_log_func.result
+++ b/mysql-test/suite/sys_vars/r/slow_query_log_func.result
@@ -81,7 +81,7 @@ DROP FUNCTION f_slow_current_time;
DROP TABLE t1;
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
CREATE TABLE t1(c1 INT) ENGINE=MyISAM;
DROP PROCEDURE IF EXISTS p1;
CREATE PROCEDURE p1()
diff --git a/mysql-test/suite/sys_vars/r/sql_notes_func.result b/mysql-test/suite/sys_vars/r/sql_notes_func.result
index 87c4ecb8431..28510f027da 100644
--- a/mysql-test/suite/sys_vars/r/sql_notes_func.result
+++ b/mysql-test/suite/sys_vars/r/sql_notes_func.result
@@ -13,7 +13,7 @@ SELECT @@warning_count;
0 Expected
DROP TABLE IF EXISTS t1;
Warnings:
-Note 1051 Unknown table 't1'
+Note 1051 Unknown table 'test.t1'
SELECT @@warning_count;
@@warning_count
1
diff --git a/mysql-test/suite/sys_vars/t/host_cache_size_basic-master.opt b/mysql-test/suite/sys_vars/t/host_cache_size_basic-master.opt
new file mode 100644
index 00000000000..7fb505c5572
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/host_cache_size_basic-master.opt
@@ -0,0 +1 @@
+--host-cache-size=123
diff --git a/mysql-test/suite/sys_vars/t/host_cache_size_basic.test b/mysql-test/suite/sys_vars/t/host_cache_size_basic.test
new file mode 100644
index 00000000000..38713d2f963
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/host_cache_size_basic.test
@@ -0,0 +1,41 @@
+--source include/not_embedded.inc
+
+#
+# Only global
+#
+
+select @@global.host_cache_size;
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.host_cache_size;
+
+show global variables like 'host_cache_size';
+
+show session variables like 'host_cache_size';
+
+select * from information_schema.global_variables
+ where variable_name='host_cache_size';
+
+select * from information_schema.session_variables
+ where variable_name='host_cache_size';
+
+#
+# Read-Write
+#
+
+set global host_cache_size=1;
+select @@global.host_cache_size;
+
+set global host_cache_size=12;
+select @@global.host_cache_size;
+
+set global host_cache_size=0;
+select @@global.host_cache_size;
+
+--error ER_GLOBAL_VARIABLE
+set session host_cache_size=1;
+
+# Restore default
+set global host_cache_size=123;
+select @@global.host_cache_size;
+
diff --git a/mysql-test/suite/sys_vars/t/innodb_adaptive_flushing_lwm_basic.test b/mysql-test/suite/sys_vars/t/innodb_adaptive_flushing_lwm_basic.test
new file mode 100644
index 00000000000..04f07cdd3df
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_adaptive_flushing_lwm_basic.test
@@ -0,0 +1,142 @@
+############ mysql-test\t\innodb_adaptive_flushing_lwm_basic.test #############
+# #
+# Variable Name: innodb_adaptive_flushing_lwm #
+# Scope: GLOBAL #
+# Access Type: Dynamic #
+# Data Type: Numeric #
+# Default Value: 10 #
+# Range: 0-70 #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author: Rizwan #
+# #
+#Description:Test Cases of Dynamic System Variable innodb_adaptive_flushing_lwm #
+# that checks the behavior of this variable in the following ways #
+# * Default Value #
+# * Valid & Invalid values #
+# * Scope & Access method #
+# * Data Integrity #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+
+--source include/have_innodb.inc
+--source include/load_sysvars.inc
+
+########################################################################
+# START OF innodb_adaptive_flushing_lwm TESTS #
+########################################################################
+
+
+###############################################################################
+#Saving initial value of innodb_adaptive_flushing_lwm in a temporary variable #
+###############################################################################
+
+SET @global_start_value = @@global.innodb_adaptive_flushing_lwm;
+SELECT @global_start_value;
+
+--echo '#--------------------FN_DYNVARS_046_01------------------------#'
+########################################################################
+# Display the DEFAULT value of innodb_adaptive_flushing_lwm #
+########################################################################
+
+SET @@global.innodb_adaptive_flushing_lwm = 1;
+SET @@global.innodb_adaptive_flushing_lwm = DEFAULT;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--echo '#---------------------FN_DYNVARS_046_02-------------------------#'
+###################################################################################
+# Check if innodb_adaptive_flushing_lwm can be accessed with and without @@ sign #
+###################################################################################
+
+--Error ER_GLOBAL_VARIABLE
+SET innodb_adaptive_flushing_lwm = 1;
+SELECT @@innodb_adaptive_flushing_lwm;
+
+--Error ER_UNKNOWN_TABLE
+SELECT local.innodb_adaptive_flushing_lwm;
+
+SET global innodb_adaptive_flushing_lwm = 1;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--echo '#--------------------FN_DYNVARS_046_03------------------------#'
+###############################################################################
+# change the value of innodb_adaptive_flushing_lwm to a valid value #
+###############################################################################
+
+SET @@global.innodb_adaptive_flushing_lwm = 1;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+SET @@global.innodb_adaptive_flushing_lwm = 60;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+SET @@global.innodb_adaptive_flushing_lwm = 70;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--echo '#--------------------FN_DYNVARS_046_04-------------------------#'
+###########################################################################
+# Change the value of innodb_adaptive_flushing_lwm to invalid value #
+###########################################################################
+
+SET @@global.innodb_adaptive_flushing_lwm = -1;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_adaptive_flushing_lwm = "T";
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_adaptive_flushing_lwm = "Y";
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+SET @@global.innodb_adaptive_flushing_lwm = 71;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--echo '#----------------------FN_DYNVARS_046_05------------------------#'
+#########################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#########################################################################
+
+SELECT @@global.innodb_adaptive_flushing_lwm =
+ VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_adaptive_flushing_lwm';
+SELECT @@global.innodb_adaptive_flushing_lwm;
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_adaptive_flushing_lwm';
+
+--echo '#---------------------FN_DYNVARS_046_06-------------------------#'
+###################################################################
+# Check if ON and OFF values can be used on variable #
+###################################################################
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_adaptive_flushing_lwm = OFF;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_adaptive_flushing_lwm = ON;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+--echo '#---------------------FN_DYNVARS_046_07----------------------#'
+###################################################################
+# Check if TRUE and FALSE values can be used on variable #
+###################################################################
+
+
+SET @@global.innodb_adaptive_flushing_lwm = TRUE;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+SET @@global.innodb_adaptive_flushing_lwm = FALSE;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+##############################
+# Restore initial value #
+##############################
+
+SET @@global.innodb_adaptive_flushing_lwm = @global_start_value;
+SELECT @@global.innodb_adaptive_flushing_lwm;
+
+###############################################################
+# END OF innodb_adaptive_flushing_lwm TESTS #
+###############################################################
diff --git a/mysql-test/suite/sys_vars/t/innodb_analyze_is_persistent_basic.test b/mysql-test/suite/sys_vars/t/innodb_analyze_is_persistent_basic.test
deleted file mode 100644
index 1283678b2b1..00000000000
--- a/mysql-test/suite/sys_vars/t/innodb_analyze_is_persistent_basic.test
+++ /dev/null
@@ -1,81 +0,0 @@
-
-# 2010-01-25 - Added
-#
-
---source include/have_innodb.inc
-
-SET @start_global_value = @@global.innodb_analyze_is_persistent;
-SELECT @start_global_value;
-
-#
-# exists as global only
-#
---echo Valid values are 'ON' and 'OFF'
-SELECT @@global.innodb_analyze_is_persistent in (0, 1);
-SELECT @@global.innodb_analyze_is_persistent;
-#--error ER_INCORRECT_GLOBAL_LOCAL_VAR
-SELECT @@session.innodb_analyze_is_persistent;
-SHOW global variables LIKE 'innodb_analyze_is_persistent';
-SHOW session variables LIKE 'innodb_analyze_is_persistent';
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-
-#
-# SHOW that it's writable
-#
-SET global innodb_analyze_is_persistent='OFF';
-SELECT @@global.innodb_analyze_is_persistent;
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SET @@global.innodb_analyze_is_persistent=1;
-SELECT @@global.innodb_analyze_is_persistent;
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SET global innodb_analyze_is_persistent=0;
-SELECT @@global.innodb_analyze_is_persistent;
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SET @@global.innodb_analyze_is_persistent='ON';
-SELECT @@global.innodb_analyze_is_persistent;
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-#--error ER_GLOBAL_VARIABLE
-SET session innodb_analyze_is_persistent='OFF';
-#--error ER_GLOBAL_VARIABLE
-SET @@session.innodb_analyze_is_persistent='ON';
-
-#
-# incorrect types
-#
---error ER_WRONG_TYPE_FOR_VAR
-SET global innodb_analyze_is_persistent=1.1;
---error ER_WRONG_TYPE_FOR_VAR
-SET global innodb_analyze_is_persistent=1e1;
---error ER_WRONG_VALUE_FOR_VAR
-SET global innodb_analyze_is_persistent=2;
---error ER_WRONG_VALUE_FOR_VAR
-SET global innodb_analyze_is_persistent=-3;
-SELECT @@global.innodb_analyze_is_persistent;
-SELECT * FROM information_schema.global_variables
-WHERE variable_name='innodb_analyze_is_persistent';
-SELECT * FROM information_schema.session_variables
-WHERE variable_name='innodb_analyze_is_persistent';
---error ER_WRONG_VALUE_FOR_VAR
-SET global innodb_analyze_is_persistent='AUTO';
-
-#
-# Cleanup
-#
-
-SET @@global.innodb_analyze_is_persistent = @start_global_value;
-SELECT @@global.innodb_analyze_is_persistent;
diff --git a/mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test
new file mode 100644
index 00000000000..b3a7aebce4e
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_api_bk_commit_interval_basic.test
@@ -0,0 +1,58 @@
+#
+# 2012-08-01 Added
+#
+
+--source include/have_innodb.inc
+
+SET @start_global_value = @@global.innodb_api_bk_commit_interval;
+SELECT @start_global_value;
+
+#
+# exists as global only
+#
+--echo Valid values are positive number
+SELECT @@global.innodb_api_bk_commit_interval > 0;
+SELECT @@global.innodb_api_bk_commit_interval <= 1024*1024*1024;
+SELECT @@global.innodb_api_bk_commit_interval;
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT @@session.innodb_api_bk_commit_interval;
+SHOW global variables LIKE 'innodb_api_bk_commit_interval';
+SHOW session variables LIKE 'innodb_api_bk_commit_interval';
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+
+#
+# show that it's writable
+#
+SET global innodb_api_bk_commit_interval=100;
+SELECT @@global.innodb_api_bk_commit_interval;
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+--error ER_GLOBAL_VARIABLE
+SET session innodb_api_bk_commit_interval=1;
+
+#
+# incorrect types
+#
+--error ER_WRONG_TYPE_FOR_VAR
+SET global innodb_api_bk_commit_interval=1.1;
+--error ER_WRONG_TYPE_FOR_VAR
+SET global innodb_api_bk_commit_interval=1e1;
+--error ER_WRONG_TYPE_FOR_VAR
+SET global innodb_api_bk_commit_interval="foo";
+SET global innodb_api_bk_commit_interval=-7;
+SELECT @@global.innodb_api_bk_commit_interval;
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_bk_commit_interval';
+
+#
+# cleanup
+#
+
+SET @@global.innodb_api_bk_commit_interval = @start_global_value;
+SELECT @@global.innodb_api_bk_commit_interval;
diff --git a/mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test
new file mode 100644
index 00000000000..c9c04a27229
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_api_disable_rowlock_basic.test
@@ -0,0 +1,102 @@
+
+
+################## mysql-test\t\innodb_api_disable_rowlock_basic.test ##############
+# #
+# Variable Name: innodb_api_disable_rowlock #
+# Scope: Global #
+# Access Type: Static #
+# Data Type: numeric #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author : Sharique Abdullah #
+# #
+# #
+# Description:Test Cases of Dynamic System Variable innodb_api_disable_rowlock #
+# that checks the behavior of this variable in the following ways #
+# * Value Check #
+# * Scope Check #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+
+--source include/have_innodb.inc
+
+--echo '#---------------------BS_STVARS_035_01----------------------#'
+####################################################################
+# Displaying default value #
+####################################################################
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+--echo 1 Expected
+
+
+--echo '#---------------------BS_STVARS_035_02----------------------#'
+####################################################################
+# Check if Value can set #
+####################################################################
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SET @@GLOBAL.innodb_api_disable_rowlock=1;
+--echo Expected error 'Read only variable'
+
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+--echo 1 Expected
+
+
+
+
+--echo '#---------------------BS_STVARS_035_03----------------------#'
+#################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#################################################################
+
+SELECT IF(@@GLOBAL.innodb_api_disable_rowlock, 'ON', 'OFF') = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_disable_rowlock';
+--echo 1 Expected
+
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+--echo 1 Expected
+
+SELECT COUNT(VARIABLE_VALUE)
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_disable_rowlock';
+--echo 1 Expected
+
+
+
+--echo '#---------------------BS_STVARS_035_04----------------------#'
+################################################################################
+# Check if accessing variable with and without GLOBAL point to same variable #
+################################################################################
+SELECT @@innodb_api_disable_rowlock = @@GLOBAL.innodb_api_enable_binlog;
+--echo 1 Expected
+
+
+
+--echo '#---------------------BS_STVARS_035_05----------------------#'
+################################################################################
+# Check if innodb_log_file_size can be accessed with and without @@ sign #
+################################################################################
+
+SELECT COUNT(@@innodb_api_disable_rowlock);
+--echo 1 Expected
+
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT COUNT(@@local.innodb_api_disable_rowlock);
+--echo Expected error 'Variable is a GLOBAL variable'
+
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT COUNT(@@SESSION.innodb_api_disable_rowlock);
+--echo Expected error 'Variable is a GLOBAL variable'
+
+SELECT COUNT(@@GLOBAL.innodb_api_disable_rowlock);
+--echo 1 Expected
+
+--Error ER_BAD_FIELD_ERROR
+SELECT innodb_api_disable_rowlock = @@SESSION.innodb_api_enable_binlog;
+--echo Expected error 'Readonly variable'
+
+
diff --git a/mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test
new file mode 100644
index 00000000000..637541ef621
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_api_enable_binlog_basic.test
@@ -0,0 +1,102 @@
+
+
+################## mysql-test\t\innodb_api_enable_binlog_basic.test ##############
+# #
+# Variable Name: innodb_api_enable_binlog #
+# Scope: Global #
+# Access Type: Static #
+# Data Type: numeric #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author : Sharique Abdullah #
+# #
+# #
+# Description:Test Cases of Dynamic System Variable innodb_api_enable_binlog #
+# that checks the behavior of this variable in the following ways #
+# * Value Check #
+# * Scope Check #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+
+--source include/have_innodb.inc
+
+--echo '#---------------------BS_STVARS_035_01----------------------#'
+####################################################################
+# Displaying default value #
+####################################################################
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+--echo 1 Expected
+
+
+--echo '#---------------------BS_STVARS_035_02----------------------#'
+####################################################################
+# Check if Value can set #
+####################################################################
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SET @@GLOBAL.innodb_api_enable_binlog=1;
+--echo Expected error 'Read only variable'
+
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+--echo 1 Expected
+
+
+
+
+--echo '#---------------------BS_STVARS_035_03----------------------#'
+#################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#################################################################
+
+SELECT IF(@@GLOBAL.innodb_api_enable_binlog, 'ON', 'OFF') = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_binlog';
+--echo 1 Expected
+
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+--echo 1 Expected
+
+SELECT COUNT(VARIABLE_VALUE)
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_binlog';
+--echo 1 Expected
+
+
+
+--echo '#---------------------BS_STVARS_035_04----------------------#'
+################################################################################
+# Check if accessing variable with and without GLOBAL point to same variable #
+################################################################################
+SELECT @@innodb_api_enable_binlog = @@GLOBAL.innodb_api_enable_binlog;
+--echo 1 Expected
+
+
+
+--echo '#---------------------BS_STVARS_035_05----------------------#'
+################################################################################
+# Check if innodb_log_file_size can be accessed with and without @@ sign #
+################################################################################
+
+SELECT COUNT(@@innodb_api_enable_binlog);
+--echo 1 Expected
+
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT COUNT(@@local.innodb_api_enable_binlog);
+--echo Expected error 'Variable is a GLOBAL variable'
+
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT COUNT(@@SESSION.innodb_api_enable_binlog);
+--echo Expected error 'Variable is a GLOBAL variable'
+
+SELECT COUNT(@@GLOBAL.innodb_api_enable_binlog);
+--echo 1 Expected
+
+--Error ER_BAD_FIELD_ERROR
+SELECT innodb_api_enable_binlog = @@SESSION.innodb_api_enable_binlog;
+--echo Expected error 'Readonly variable'
+
+
diff --git a/mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test
new file mode 100644
index 00000000000..0e440a72cce
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_api_enable_mdl_basic.test
@@ -0,0 +1,102 @@
+
+
+################## mysql-test\t\innodb_api_enable_mdl_basic.test ##############
+# #
+# Variable Name: innodb_api_enable_mdl #
+# Scope: Global #
+# Access Type: Static #
+# Data Type: numeric #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author : Sharique Abdullah #
+# #
+# #
+# Description:Test Cases of Dynamic System Variable innodb_api_enable_mdl #
+# that checks the behavior of this variable in the following ways #
+# * Value Check #
+# * Scope Check #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+
+--source include/have_innodb.inc
+
+--echo '#---------------------BS_STVARS_035_01----------------------#'
+####################################################################
+# Displaying default value #
+####################################################################
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+--echo 1 Expected
+
+
+--echo '#---------------------BS_STVARS_035_02----------------------#'
+####################################################################
+# Check if Value can set #
+####################################################################
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SET @@GLOBAL.innodb_api_enable_mdl=1;
+--echo Expected error 'Read only variable'
+
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+--echo 1 Expected
+
+
+
+
+--echo '#---------------------BS_STVARS_035_03----------------------#'
+#################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#################################################################
+
+SELECT IF(@@GLOBAL.innodb_api_enable_mdl, 'ON', 'OFF') = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_mdl';
+--echo 1 Expected
+
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+--echo 1 Expected
+
+SELECT COUNT(VARIABLE_VALUE)
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='innodb_api_enable_mdl';
+--echo 1 Expected
+
+
+
+--echo '#---------------------BS_STVARS_035_04----------------------#'
+################################################################################
+# Check if accessing variable with and without GLOBAL point to same variable #
+################################################################################
+SELECT @@innodb_api_enable_mdl = @@GLOBAL.innodb_api_enable_mdl;
+--echo 1 Expected
+
+
+
+--echo '#---------------------BS_STVARS_035_05----------------------#'
+################################################################################
+# Check if innodb_log_file_size can be accessed with and without @@ sign #
+################################################################################
+
+SELECT COUNT(@@innodb_api_enable_mdl);
+--echo 1 Expected
+
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT COUNT(@@local.innodb_api_enable_mdl);
+--echo Expected error 'Variable is a GLOBAL variable'
+
+--Error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT COUNT(@@SESSION.innodb_api_enable_mdl);
+--echo Expected error 'Variable is a GLOBAL variable'
+
+SELECT COUNT(@@GLOBAL.innodb_api_enable_mdl);
+--echo 1 Expected
+
+--Error ER_BAD_FIELD_ERROR
+SELECT innodb_api_enable_mdl = @@SESSION.innodb_api_enable_mdl;
+--echo Expected error 'Readonly variable'
+
+
diff --git a/mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test b/mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test
new file mode 100644
index 00000000000..49c34b647fd
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_api_trx_level_basic.test
@@ -0,0 +1,58 @@
+#
+# 2011-08-01 Added
+#
+
+--source include/have_innodb.inc
+
+SET @start_global_value = @@global.innodb_api_trx_level;
+SELECT @start_global_value;
+
+#
+# exists as global only
+#
+--echo Valid values are zero or above
+SELECT @@global.innodb_api_trx_level >=0;
+SELECT @@global.innodb_api_trx_level <=3;
+SELECT @@global.innodb_api_trx_level;
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT @@session.innodb_api_trx_level;
+SHOW global variables LIKE 'innodb_api_trx_level';
+SHOW session variables LIKE 'innodb_api_trx_level';
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_trx_level';
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_trx_level';
+
+#
+# show that it's writable
+#
+SET global innodb_api_trx_level=100;
+SELECT @@global.innodb_api_trx_level;
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_trx_level';
+SELECT * FROM information_schema.session_variables
+WHERE variable_name='innodb_api_trx_level';
+--error ER_GLOBAL_VARIABLE
+SET session innodb_api_trx_level=1;
+
+#
+# incorrect types
+#
+--error ER_WRONG_TYPE_FOR_VAR
+SET global innodb_api_trx_level=1.1;
+--error ER_WRONG_TYPE_FOR_VAR
+SET global innodb_api_trx_level=1e1;
+--error ER_WRONG_TYPE_FOR_VAR
+SET global innodb_api_trx_level="foo";
+SET global innodb_api_trx_level=-7;
+SELECT @@global.innodb_api_trx_level;
+SELECT * FROM information_schema.global_variables
+WHERE variable_name='innodb_api_trx_level';
+
+#
+# cleanup
+#
+
+SET @@global.innodb_api_trx_level = @start_global_value;
+SELECT @@global.innodb_api_trx_level;
diff --git a/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test b/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test
index eb208e7197f..cbe62a105ff 100644
--- a/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_autoextend_increment_basic.test
@@ -4,7 +4,7 @@
# Scope: GLOBAL #
# Access Type: Dynamic #
# Data Type: Numeric #
-# Default Value: 8 #
+# Default Value: 64 #
# Range: 0,1 #
# #
# #
diff --git a/mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test b/mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test
new file mode 100644
index 00000000000..d729acea02c
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_cmp_per_index_enabled_basic.test
@@ -0,0 +1,69 @@
+-- source include/have_innodb.inc
+# CREATE TABLE ... KEY_BLOCK_SIZE=8; does not work with page size = 4k
+-- source include/have_innodb_16k.inc
+
+# Check the default value
+SELECT @@global.innodb_cmp_per_index_enabled;
+
+# should be a boolean
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL innodb_stats_persistent=123;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL innodb_stats_persistent='foo';
+
+# Check that changing value works and that setting the same value again
+# is as expected
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+SELECT @@global.innodb_cmp_per_index_enabled;
+
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+SELECT @@global.innodb_cmp_per_index_enabled;
+
+SET GLOBAL innodb_cmp_per_index_enabled=OFF;
+SELECT @@global.innodb_cmp_per_index_enabled;
+
+SET GLOBAL innodb_cmp_per_index_enabled=OFF;
+SELECT @@global.innodb_cmp_per_index_enabled;
+
+SET GLOBAL innodb_file_format=Barracuda;
+
+-- vertical_results
+
+# Check that enabling after being disabled resets the stats
+
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+
+CREATE TABLE t (a INT) ENGINE=INNODB KEY_BLOCK_SIZE=8;
+
+INSERT INTO t VALUES (1);
+
+SELECT * FROM information_schema.innodb_cmp_per_index;
+
+SET GLOBAL innodb_cmp_per_index_enabled=OFF;
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+
+SELECT * FROM information_schema.innodb_cmp_per_index;
+
+DROP TABLE t;
+
+# Check that enabling after being enabled does not reset the stats
+
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+
+CREATE TABLE t (a INT) ENGINE=INNODB KEY_BLOCK_SIZE=8;
+
+INSERT INTO t VALUES (1);
+
+SELECT * FROM information_schema.innodb_cmp_per_index;
+
+SET GLOBAL innodb_cmp_per_index_enabled=ON;
+
+SELECT * FROM information_schema.innodb_cmp_per_index;
+
+DROP TABLE t;
+
+#
+
+SET GLOBAL innodb_file_format=default;
+SET GLOBAL innodb_cmp_per_index_enabled=default;
diff --git a/mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test b/mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test
new file mode 100644
index 00000000000..1cdfaa6b31d
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_compression_failure_threshold_pct_basic.test
@@ -0,0 +1,143 @@
+##### mysql-test\t\innodb_compression_failure_threshold_pct_basic.test ########
+# #
+# Variable Name: innodb_compression_failure_threshold_pct #
+# Scope: GLOBAL #
+# Access Type: Dynamic #
+# Data Type: Numeric #
+# Default Value: 5 #
+# Range: 0-100 #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author: Rizwan #
+# #
+#Description: Test Cases of Dynamic System Variable #
+# innodb_compression_failure_threshold_pct #
+# that checks the behavior of #
+# this variable in the following ways #
+# * Default Value #
+# * Valid & Invalid values #
+# * Scope & Access method #
+# * Data Integrity #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+--source include/have_innodb.inc
+--source include/load_sysvars.inc
+
+######################################################################
+# START OF innodb_compression_failure_threshold_pct TESTS #
+######################################################################
+
+
+############################################################################################
+# Saving initial value of innodb_compression_failure_threshold_pct in a temporary variable #
+############################################################################################
+
+SET @global_start_value = @@global.innodb_compression_failure_threshold_pct;
+SELECT @global_start_value;
+
+--echo '#--------------------FN_DYNVARS_046_01------------------------#'
+########################################################################
+# Display the DEFAULT value of innodb_compression_failure_threshold_pct#
+########################################################################
+
+SET @@global.innodb_compression_failure_threshold_pct = 0;
+SET @@global.innodb_compression_failure_threshold_pct = DEFAULT;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+--echo '#---------------------FN_DYNVARS_046_02-------------------------#'
+##############################################################################################
+# check if innodb_compression_failure_threshold_pct can be accessed with and without @@ sign #
+##############################################################################################
+
+--Error ER_GLOBAL_VARIABLE
+SET innodb_compression_failure_threshold_pct = 1;
+SELECT @@innodb_compression_failure_threshold_pct;
+
+--Error ER_UNKNOWN_TABLE
+SELECT local.innodb_compression_failure_threshold_pct;
+
+SET global innodb_compression_failure_threshold_pct = 0;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+--echo '#--------------------FN_DYNVARS_046_03------------------------#'
+#################################################################################
+# change the value of innodb_compression_failure_threshold_pct to a valid value #
+#################################################################################
+
+SET @@global.innodb_compression_failure_threshold_pct = 0;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+SET @@global.innodb_compression_failure_threshold_pct = 1;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+SET @@global.innodb_compression_failure_threshold_pct = 100;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+--echo '#--------------------FN_DYNVARS_046_04-------------------------#'
+################################################################################
+# Cange the value of innodb_compression_failure_threshold_pct to invalid value #
+################################################################################
+
+SET @@global.innodb_compression_failure_threshold_pct = -1;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_compression_failure_threshold_pct = "T";
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_compression_failure_threshold_pct = "Y";
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+SET @@global.innodb_compression_failure_threshold_pct = 101;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+
+--echo '#----------------------FN_DYNVARS_046_05------------------------#'
+#########################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#########################################################################
+
+SELECT @@global.innodb_compression_failure_threshold_pct =
+ VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_compression_failure_threshold_pct';
+SELECT @@global.innodb_compression_failure_threshold_pct;
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_compression_failure_threshold_pct';
+
+--echo '#---------------------FN_DYNVARS_046_06-------------------------#'
+###################################################################
+# Check if ON and OFF values can be used on variable #
+###################################################################
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_compression_failure_threshold_pct = OFF;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_compression_failure_threshold_pct = ON;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+--echo '#---------------------FN_DYNVARS_046_07----------------------#'
+###################################################################
+# Check if TRUE and FALSE values can be used on variable #
+###################################################################
+
+SET @@global.innodb_compression_failure_threshold_pct = TRUE;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+SET @@global.innodb_compression_failure_threshold_pct = FALSE;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+##############################
+# Restore initial value #
+##############################
+
+SET @@global.innodb_compression_failure_threshold_pct = @global_start_value;
+SELECT @@global.innodb_compression_failure_threshold_pct;
+
+###############################################################
+# END OF innodb_compression_failure_threshold_pct TESTS #
+###############################################################
diff --git a/mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test b/mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test
new file mode 100644
index 00000000000..a90abdde2f1
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_compression_level_basic.test
@@ -0,0 +1,64 @@
+
+
+# 2012-05-29 - Added
+#
+
+--source include/have_innodb.inc
+
+SET @start_global_value = @@global.innodb_compression_level;
+SELECT @start_global_value;
+
+#
+# exists as global only
+#
+--echo Valid value 0-9
+select @@global.innodb_compression_level <= 9;
+select @@global.innodb_compression_level;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.innodb_compression_level;
+show global variables like 'innodb_compression_level';
+show session variables like 'innodb_compression_level';
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+select * from information_schema.session_variables where variable_name='innodb_compression_level';
+
+#
+# show that it's writable
+#
+set global innodb_compression_level=2;
+select @@global.innodb_compression_level;
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+select * from information_schema.session_variables where variable_name='innodb_compression_level';
+--error ER_GLOBAL_VARIABLE
+set session innodb_compression_level=4;
+
+#
+# incorrect types
+#
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_compression_level=1.1;
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_compression_level=1e1;
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_compression_level="foo";
+
+set global innodb_compression_level=10;
+select @@global.innodb_compression_level;
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+set global innodb_compression_level=-7;
+select @@global.innodb_compression_level;
+select * from information_schema.global_variables where variable_name='innodb_compression_level';
+
+#
+# min/max values
+#
+set global innodb_compression_level=0;
+select @@global.innodb_compression_level;
+set global innodb_compression_level=9;
+select @@global.innodb_compression_level;
+
+#
+# cleanup
+#
+
+SET @@global.innodb_compression_level = @start_global_value;
+SELECT @@global.innodb_compression_level;
diff --git a/mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test b/mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test
new file mode 100644
index 00000000000..3ca566956ef
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_compression_pad_pct_max_basic.test
@@ -0,0 +1,136 @@
+############# mysql-test\t\innodb_compression_pad_pct_max_basic.test ##########
+# #
+# Variable Name: innodb_compression_pad_pct_max #
+# Scope: GLOBAL #
+# Access Type: Dynamic #
+# Data Type: Numeric #
+# Default Value: 50 #
+# Range: 0-75 #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author: Rizwan #
+# #
+#Description: Test Cases of Dynamic System Variable #
+# innodb_compression_pad_pct_max #
+# that checks the behavior of #
+# this variable in the following ways #
+# * Default Value #
+# * Valid & Invalid values #
+# * Scope & Access method #
+# * Data Integrity #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+--source include/have_innodb.inc
+--source include/load_sysvars.inc
+
+######################################################################
+# START OF innodb_compression_pad_pct_max TESTS #
+######################################################################
+
+
+############################################################################################
+# Saving initial value of innodb_compression_pad_pct_max in a temporary variable #
+############################################################################################
+
+SET @global_start_value = @@global.innodb_compression_pad_pct_max;
+SELECT @global_start_value;
+
+--echo '#--------------------FN_DYNVARS_046_01------------------------#'
+########################################################################
+# Display the DEFAULT value of innodb_compression_pad_pct_max #
+########################################################################
+
+SET @@global.innodb_compression_pad_pct_max = DEFAULT;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+--echo '#---------------------FN_DYNVARS_046_02-------------------------#'
+##############################################################################################
+# check if innodb_compression_pad_pct_max can be accessed with and without @@ sign #
+##############################################################################################
+
+--Error ER_GLOBAL_VARIABLE
+SET innodb_compression_pad_pct_max = 1;
+SELECT @@innodb_compression_pad_pct_max;
+
+--Error ER_UNKNOWN_TABLE
+SELECT local.innodb_compression_pad_pct_max;
+
+SET global innodb_compression_pad_pct_max = 0;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+--echo '#--------------------FN_DYNVARS_046_03------------------------#'
+#################################################################################
+# change the value of innodb_compression_pad_pct_max to a valid value #
+#################################################################################
+
+SET @@global.innodb_compression_pad_pct_max = 0;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+SET @@global.innodb_compression_pad_pct_max = 75;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+--echo '#--------------------FN_DYNVARS_046_04-------------------------#'
+################################################################################
+# Cange the value of innodb_compression_pad_pct_max to invalid value #
+################################################################################
+
+SET @@global.innodb_compression_pad_pct_max = -1;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_compression_pad_pct_max = "T";
+SELECT @@global.innodb_compression_pad_pct_max;
+
+SET @@global.innodb_compression_pad_pct_max = 76;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+
+--echo '#----------------------FN_DYNVARS_046_05------------------------#'
+#########################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#########################################################################
+
+SELECT @@global.innodb_compression_pad_pct_max =
+ VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_compression_pad_pct_max';
+SELECT @@global.innodb_compression_pad_pct_max;
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_compression_pad_pct_max';
+
+--echo '#---------------------FN_DYNVARS_046_06-------------------------#'
+###################################################################
+# Check if ON and OFF values can be used on variable #
+###################################################################
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_compression_pad_pct_max = OFF;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_compression_pad_pct_max = ON;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+--echo '#---------------------FN_DYNVARS_046_07----------------------#'
+###################################################################
+# Check if TRUE and FALSE values can be used on variable #
+###################################################################
+
+SET @@global.innodb_compression_pad_pct_max = TRUE;
+SELECT @@global.innodb_compression_pad_pct_max;
+SET @@global.innodb_compression_pad_pct_max = FALSE;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+##############################
+# Restore initial value #
+##############################
+
+SET @@global.innodb_compression_pad_pct_max = @global_start_value;
+SELECT @@global.innodb_compression_pad_pct_max;
+
+###############################################################
+# END OF innodb_compression_pad_pct_max TESTS #
+###############################################################
diff --git a/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test b/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test
index 67b0247d169..f73e25179ba 100644
--- a/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_concurrency_tickets_basic.test
@@ -4,7 +4,7 @@
# Scope: GLOBAL #
# Access Type: Dynamic #
# Data Type: Numeric #
-# Default Value: 500 #
+# Default Value: 5000 #
# Range: 1-4294967295 #
# #
# #
diff --git a/mysql-test/suite/sys_vars/t/innodb_disable_background_merge_basic.test b/mysql-test/suite/sys_vars/t/innodb_disable_background_merge_basic.test
new file mode 100644
index 00000000000..9ab1a90efe1
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_disable_background_merge_basic.test
@@ -0,0 +1,12 @@
+#
+# Basic test for innodb_disable_background_merge.
+#
+
+-- source include/have_innodb.inc
+
+# The config variable is a debug variable
+-- source include/have_debug.inc
+
+# Check the default value
+SET @orig = @@global.innodb_disable_background_merge;
+SELECT @orig;
diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test
new file mode 100644
index 00000000000..0ab079adaa8
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_flush_log_at_timeout_basic.test
@@ -0,0 +1,151 @@
+################# mysql-test\t\innodb_flush_log_at_timeout_basic.test #########
+# #
+# Variable Name: innodb_flush_log_at_timeout #
+# Scope: GLOBAL #
+# Access Type: Dynamic #
+# Data Type: Numeric #
+# Default Value: 1 #
+# Range: 0-2700 #
+# #
+# #
+# Creation Date: 2012-05-10 #
+# Author: Nuno Carvalho #
+# #
+#Description: Test Cases of Dynamic System Variable #
+# innodb_flush_log_at_timeout that checks the behavior of #
+# this variable in the following ways #
+# * Default Value #
+# * Valid & Invalid values #
+# * Scope & Access method #
+# * Data Integrity #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+
+--source include/have_innodb.inc
+--source include/load_sysvars.inc
+
+########################################################################
+# START OF innodb_flush_log_at_timeout TESTS #
+########################################################################
+
+
+#############################################################################
+# Saving initial value of innodb_flush_log_at_timeout #
+#############################################################################
+
+
+SET @global_start_value = @@global.innodb_flush_log_at_timeout;
+SELECT @global_start_value;
+
+
+
+--echo '#--------------------FN_DYNVARS_046_01------------------------#'
+##########################################################################
+# Display the DEFAULT value of innodb_flush_log_at_timeout #
+##########################################################################
+
+SET @@global.innodb_flush_log_at_timeout = 0;
+SET @@global.innodb_flush_log_at_timeout = DEFAULT;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+--echo '#---------------------FN_DYNVARS_046_02-------------------------#'
+###############################################################################
+# Check if variable can be accessed with and without @@ sign #
+###############################################################################
+
+--Error ER_GLOBAL_VARIABLE
+SET innodb_flush_log_at_timeout = 1;
+SELECT @@innodb_flush_log_at_timeout;
+
+
+--Error ER_UNKNOWN_TABLE
+SELECT local.innodb_flush_log_at_timeout;
+
+
+SET global innodb_flush_log_at_timeout = 0;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+
+--echo '#--------------------FN_DYNVARS_046_03------------------------#'
+#############################################################################
+# change the value of innodb_flush_log_at_timeout to a valid value #
+#############################################################################
+
+
+SET @@global.innodb_flush_log_at_timeout = 0;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+SET @@global.innodb_flush_log_at_timeout = 10;
+SELECT @@global.innodb_flush_log_at_timeout;
+SET @@global.innodb_flush_log_at_timeout = 2700;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+--echo '#--------------------FN_DYNVARS_046_04-------------------------#'
+###############################################################################
+# Change the value of innodb_flush_log_at_timeout to invalid value #
+###############################################################################
+
+SET @@global.innodb_flush_log_at_timeout = -1;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flush_log_at_timeout = "T";
+SELECT @@global.innodb_flush_log_at_timeout;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flush_log_at_timeout = "Y";
+SELECT @@global.innodb_flush_log_at_timeout;
+
+SET @@global.innodb_flush_log_at_timeout = 2701;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+--echo '#----------------------FN_DYNVARS_046_05------------------------#'
+#########################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#########################################################################
+
+SELECT @@global.innodb_flush_log_at_timeout =
+ VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_flush_log_at_timeout';
+SELECT @@global.innodb_flush_log_at_timeout;
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_flush_log_at_timeout';
+
+--echo '#---------------------FN_DYNVARS_046_06-------------------------#'
+###################################################################
+# Check if ON and OFF values can be used on variable #
+###################################################################
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flush_log_at_timeout = OFF;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flush_log_at_timeout = ON;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+--echo '#---------------------FN_DYNVARS_046_07----------------------#'
+###################################################################
+# Check if TRUE and FALSE values can be used on variable #
+###################################################################
+
+
+SET @@global.innodb_flush_log_at_timeout = TRUE;
+SELECT @@global.innodb_flush_log_at_timeout;
+SET @@global.innodb_flush_log_at_timeout = FALSE;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+##############################
+# Restore initial value #
+##############################
+
+
+SET @@global.innodb_flush_log_at_timeout = @global_start_value;
+SELECT @@global.innodb_flush_log_at_timeout;
+
+###############################################################
+# END OF innodb_flush_log_at_timeout TESTS #
+###############################################################
diff --git a/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test b/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test
index 2f28809de3e..698e30b6669 100644
--- a/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_flush_neighbors_basic.test
@@ -11,8 +11,6 @@ SELECT @start_global_value;
#
# exists as global only
#
---echo Valid values are 'ON' and 'OFF'
-select @@global.innodb_flush_neighbors in (0, 1);
select @@global.innodb_flush_neighbors;
--error ER_INCORRECT_GLOBAL_LOCAL_VAR
select @@session.innodb_flush_neighbors;
@@ -24,11 +22,11 @@ select * from information_schema.session_variables where variable_name='innodb_f
#
# show that it's writable
#
-set global innodb_flush_neighbors='OFF';
+set global innodb_flush_neighbors=0;
select @@global.innodb_flush_neighbors;
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
-set @@global.innodb_flush_neighbors=1;
+set @@global.innodb_flush_neighbors=TRUE;
select @@global.innodb_flush_neighbors;
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
@@ -36,30 +34,35 @@ set global innodb_flush_neighbors=0;
select @@global.innodb_flush_neighbors;
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
-set @@global.innodb_flush_neighbors='ON';
+set @@global.innodb_flush_neighbors=2;
+select @@global.innodb_flush_neighbors;
+select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
+select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
+set @@global.innodb_flush_neighbors=DEFAULT;
select @@global.innodb_flush_neighbors;
select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
--error ER_GLOBAL_VARIABLE
-set session innodb_flush_neighbors='OFF';
+set session innodb_flush_neighbors=0;
--error ER_GLOBAL_VARIABLE
-set @@session.innodb_flush_neighbors='ON';
+set @@session.innodb_flush_neighbors=1;
#
# incorrect types
#
--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_flush_neighbors='OFF';
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_flush_neighbors='ON';
+--error ER_WRONG_TYPE_FOR_VAR
set global innodb_flush_neighbors=1.1;
--error ER_WRONG_TYPE_FOR_VAR
set global innodb_flush_neighbors=1e1;
---error ER_WRONG_VALUE_FOR_VAR
-set global innodb_flush_neighbors=2;
---error ER_WRONG_VALUE_FOR_VAR
+set global innodb_flush_neighbors=3;
+select @@global.innodb_flush_neighbors;
set global innodb_flush_neighbors=-3;
select @@global.innodb_flush_neighbors;
-select * from information_schema.global_variables where variable_name='innodb_flush_neighbors';
-select * from information_schema.session_variables where variable_name='innodb_flush_neighbors';
---error ER_WRONG_VALUE_FOR_VAR
+--error ER_WRONG_TYPE_FOR_VAR
set global innodb_flush_neighbors='AUTO';
#
diff --git a/mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test b/mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test
new file mode 100644
index 00000000000..a84e623f2c3
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_flushing_avg_loops_basic.test
@@ -0,0 +1,143 @@
+############ mysql-test\t\innodb_flushing_avg_loops_basic.test ################
+# #
+# Variable Name: innodb_flushing_avg_loops #
+# Scope: GLOBAL #
+# Access Type: Dynamic #
+# Data Type: Numeric #
+# Default Value: 10 #
+# Range: 0-70 #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author: Rizwan #
+# #
+#Description:Test Cases of Dynamic System Variable innodb_flushing_avg_loops #
+# that checks the behavior of this variable in the following ways #
+# * Default Value #
+# * Valid & Invalid values #
+# * Scope & Access method #
+# * Data Integrity #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+
+--source include/have_innodb.inc
+--source include/load_sysvars.inc
+
+########################################################################
+# START OF innodb_flushing_avg_loops TESTS #
+########################################################################
+
+
+###############################################################################
+#Saving initial value of innodb_flushing_avg_loops in a temporary variable #
+###############################################################################
+
+SET @global_start_value = @@global.innodb_flushing_avg_loops;
+SELECT @global_start_value;
+
+--echo '#--------------------FN_DYNVARS_046_01------------------------#'
+########################################################################
+# Display the DEFAULT value of innodb_flushing_avg_loops #
+########################################################################
+
+SET @@global.innodb_flushing_avg_loops = 1;
+SET @@global.innodb_flushing_avg_loops = DEFAULT;
+SELECT @@global.innodb_flushing_avg_loops;
+
+--echo '#---------------------FN_DYNVARS_046_02-------------------------#'
+##################################################################################
+# Check if innodb_flushing_avg_loops can be accessed with and without @@ sign #
+##################################################################################
+
+--Error ER_GLOBAL_VARIABLE
+SET innodb_flushing_avg_loops = 1;
+SELECT @@innodb_flushing_avg_loops;
+
+--Error ER_UNKNOWN_TABLE
+SELECT local.innodb_flushing_avg_loops;
+
+SET global innodb_flushing_avg_loops = 1;
+SELECT @@global.innodb_flushing_avg_loops;
+
+--echo '#--------------------FN_DYNVARS_046_03------------------------#'
+###############################################################################
+# change the value of innodb_flushing_avg_loops to a valid value #
+###############################################################################
+
+SET @@global.innodb_flushing_avg_loops = 1;
+SELECT @@global.innodb_flushing_avg_loops;
+
+SET @@global.innodb_flushing_avg_loops = 60;
+SELECT @@global.innodb_flushing_avg_loops;
+
+SET @@global.innodb_flushing_avg_loops = 1000;
+SELECT @@global.innodb_flushing_avg_loops;
+
+--echo '#--------------------FN_DYNVARS_046_04-------------------------#'
+###########################################################################
+# Change the value of innodb_flushing_avg_loops to invalid value #
+###########################################################################
+
+SET @@global.innodb_flushing_avg_loops = -1;
+SELECT @@global.innodb_flushing_avg_loops;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flushing_avg_loops = "T";
+SELECT @@global.innodb_flushing_avg_loops;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flushing_avg_loops = "Y";
+SELECT @@global.innodb_flushing_avg_loops;
+
+SET @@global.innodb_flushing_avg_loops = 1001;
+SELECT @@global.innodb_flushing_avg_loops;
+
+--echo '#----------------------FN_DYNVARS_046_05------------------------#'
+#########################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#########################################################################
+
+SELECT @@global.innodb_flushing_avg_loops =
+ VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_flushing_avg_loops';
+SELECT @@global.innodb_flushing_avg_loops;
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_flushing_avg_loops';
+
+--echo '#---------------------FN_DYNVARS_046_06-------------------------#'
+###################################################################
+# Check if ON and OFF values can be used on variable #
+###################################################################
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flushing_avg_loops = OFF;
+SELECT @@global.innodb_flushing_avg_loops;
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_flushing_avg_loops = ON;
+SELECT @@global.innodb_flushing_avg_loops;
+
+--echo '#---------------------FN_DYNVARS_046_07----------------------#'
+###################################################################
+# Check if TRUE and FALSE values can be used on variable #
+###################################################################
+
+
+SET @@global.innodb_flushing_avg_loops = TRUE;
+SELECT @@global.innodb_flushing_avg_loops;
+SET @@global.innodb_flushing_avg_loops = FALSE;
+SELECT @@global.innodb_flushing_avg_loops;
+
+##############################
+# Restore initial value #
+##############################
+
+SET @@global.innodb_flushing_avg_loops = @global_start_value;
+SELECT @@global.innodb_flushing_avg_loops;
+
+###############################################################
+# END OF innodb_flushing_avg_loops TESTS #
+###############################################################
diff --git a/mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test b/mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test
new file mode 100644
index 00000000000..5eefe1b9219
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_force_recovery_crash_basic.test
@@ -0,0 +1,28 @@
+--source include/have_innodb.inc
+--source include/have_debug.inc
+
+#
+# exists as global only
+#
+select @@global.innodb_force_recovery_crash in (0, 1);
+select @@global.innodb_force_recovery_crash;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.innodb_force_recovery_crash;
+show global variables like 'innodb_force_recovery_crash';
+show session variables like 'innodb_force_recovery_crash';
+select * from information_schema.global_variables where variable_name='innodb_force_recovery_crash';
+select * from information_schema.session_variables where variable_name='innodb_force_recovery_crash';
+
+# show that it's read-only
+#
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set global innodb_force_recovery_crash=1;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set global innodb_force_recovery_crash=0;
+select @@global.innodb_force_recovery_crash;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set session innodb_force_recovery_crash='some';
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set @@session.innodb_force_recovery_crash='some';
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set global innodb_force_recovery_crash='some';
diff --git a/mysql-test/suite/sys_vars/t/innodb_io_capacity_max_basic.test b/mysql-test/suite/sys_vars/t/innodb_io_capacity_max_basic.test
new file mode 100644
index 00000000000..125ceaa1c30
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_io_capacity_max_basic.test
@@ -0,0 +1,74 @@
+
+
+# 2012-04-02 - Added
+#
+
+--source include/have_innodb.inc
+
+SET @start_innodb_max_capacity = @@global.innodb_io_capacity_max;
+SELECT @start_innodb_max_capacity;
+
+SET @start_innodb_capacity = @@global.innodb_io_capacity;
+SELECT @start_innodb_capacity;
+
+#
+# exists as global only
+#
+--echo Valid value 100 or more
+select @@global.innodb_io_capacity_max > 99;
+select @@global.innodb_io_capacity_max;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.innodb_io_capacity_max;
+show global variables like 'innodb_io_capacity_max';
+show session variables like 'innodb_io_capacity_max';
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+select * from information_schema.session_variables where variable_name='innodb_io_capacity_max';
+
+#
+# show that it's writable. Allowed value cannot be lower than innodb_io_capacity
+#
+set global innodb_io_capacity_max=@start_innodb_capacity + 1;
+select @@global.innodb_io_capacity_max;
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+select * from information_schema.session_variables where variable_name='innodb_io_capacity_max';
+--error ER_GLOBAL_VARIABLE
+set session innodb_io_capacity_max=444;
+
+#
+# incorrect types
+#
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_io_capacity_max=1.1;
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_io_capacity_max=1e1;
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_io_capacity_max="foo";
+
+#
+# can't set it below innodb_io_capacity
+#
+set global innodb_io_capacity_max=@start_innodb_capacity - 1;
+select @@global.innodb_io_capacity_max;
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+set global innodb_io_capacity_max=-7;
+select @@global.innodb_io_capacity_max;
+select * from information_schema.global_variables where variable_name='innodb_io_capacity_max';
+
+#
+# min/max values
+#
+# first set innodb_io_cpaacity lower
+set global innodb_io_capacity=100;
+set global innodb_io_capacity_max=100;
+select @@global.innodb_io_capacity_max;
+
+#
+# cleanup
+#
+
+SET @@global.innodb_io_capacity_max = @start_innodb_max_capacity;
+SELECT @@global.innodb_io_capacity_max;
+
+SET @@global.innodb_io_capacity = @start_innodb_capacity;
+SELECT @@global.innodb_io_capacity;
+
diff --git a/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test
new file mode 100644
index 00000000000..7a6da2e6a08
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_max_dirty_pages_pct_lwm_basic.test
@@ -0,0 +1,151 @@
+############# mysql-test\t\innodb_max_dirty_pages_pct_lwm_basic.test ##########
+# #
+# Variable Name: innodb_max_dirty_pages_pct_lwm #
+# Scope: GLOBAL #
+# Access Type: Dynamic #
+# Data Type: Numeric #
+# Default Value: 75 #
+# Range: 0-99 #
+# #
+# #
+# Creation Date: 2008-02-07 #
+# Author: Rizwan #
+# #
+#Description: Test Cases of Dynamic System Variable #
+# innodb_max_dirty_pages_pct_lwm that checks the behavior of #
+# this variable in the following ways #
+# * Default Value #
+# * Valid & Invalid values #
+# * Scope & Access method #
+# * Data Integrity #
+# #
+# Reference: http://dev.mysql.com/doc/refman/5.1/en/ #
+# server-system-variables.html #
+# #
+###############################################################################
+--source include/have_innodb.inc
+--source include/load_sysvars.inc
+
+######################################################################
+# START OF innodb_max_dirty_pages_pct_lwm TESTS #
+######################################################################
+
+
+#################################################################################
+#Saving initial value of innodb_max_dirty_pages_pct_lwm in a temporary variable #
+#################################################################################
+
+SET @pct_lwm_start_value = @@global.innodb_max_dirty_pages_pct_lwm;
+SELECT @pct_lwm_start_value;
+
+SET @pct_start_value = @@global.innodb_max_dirty_pages_pct;
+SELECT @pct_start_value;
+
+--echo '#--------------------FN_DYNVARS_046_01------------------------#'
+########################################################################
+# Display the DEFAULT value of innodb_max_dirty_pages_pct_lwm #
+########################################################################
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = 0;
+SET @@global.innodb_max_dirty_pages_pct_lwm = DEFAULT;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+--echo '#---------------------FN_DYNVARS_046_02-------------------------#'
+###################################################################################
+#check if innodb_max_dirty_pages_pct_lwm can be accessed with and without @@ sign #
+###################################################################################
+
+--Error ER_GLOBAL_VARIABLE
+SET innodb_max_dirty_pages_pct_lwm = 1;
+SELECT @@innodb_max_dirty_pages_pct_lwm;
+
+--Error ER_UNKNOWN_TABLE
+SELECT local.innodb_max_dirty_pages_pct_lwm;
+
+SET global innodb_max_dirty_pages_pct_lwm = 0;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+--echo '#--------------------FN_DYNVARS_046_03------------------------#'
+##########################################################################
+# change the value of innodb_max_dirty_pages_pct_lwm to a valid value #
+# Aloowed values are <= innodb_max_dirty_pages_pct value #
+##########################################################################
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = 0;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_start_value;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+--echo '#--------------------FN_DYNVARS_046_04-------------------------#'
+###########################################################################
+# Change the value of innodb_max_dirty_pages_pct_lwm to invalid value #
+###########################################################################
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = -1;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_max_dirty_pages_pct_lwm = "T";
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+--Error ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_max_dirty_pages_pct_lwm = "Y";
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_start_value + 1;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = 100;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+
+
+--echo '#----------------------FN_DYNVARS_046_05------------------------#'
+#########################################################################
+# Check if the value in GLOBAL Table matches value in variable #
+#########################################################################
+
+SELECT @@global.innodb_max_dirty_pages_pct_lwm =
+ VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct_lwm';
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+ WHERE VARIABLE_NAME='innodb_max_dirty_pages_pct_lwm';
+
+--echo '#---------------------FN_DYNVARS_046_06-------------------------#'
+###################################################################
+# Check if ON and OFF values can be used on variable #
+###################################################################
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_max_dirty_pages_pct_lwm = OFF;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+--ERROR ER_WRONG_TYPE_FOR_VAR
+SET @@global.innodb_max_dirty_pages_pct_lwm = ON;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+--echo '#---------------------FN_DYNVARS_046_07----------------------#'
+###################################################################
+# Check if TRUE and FALSE values can be used on variable #
+###################################################################
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = TRUE;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+SET @@global.innodb_max_dirty_pages_pct_lwm = FALSE;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+##############################
+# Restore initial value #
+##############################
+
+SET @@global.innodb_max_dirty_pages_pct = @pct_start_value;
+SELECT @@global.innodb_max_dirty_pages_pct;
+
+SET @@global.innodb_max_dirty_pages_pct_lwm = @pct_lwm_start_value;
+SELECT @@global.innodb_max_dirty_pages_pct_lwm;
+
+###############################################################
+# END OF innodb_max_dirty_pages_pct_lwm TESTS #
+###############################################################
diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test
index 8afbcac80df..1b23ae14e49 100644
--- a/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_monitor_disable_basic.test
@@ -11,8 +11,8 @@ select name, status from information_schema.innodb_metrics;
# Turn on all monitor counters
set global innodb_monitor_enable = all;
-# status should all change to "started"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "enabled"
+select name from information_schema.innodb_metrics where status!='enabled';
# Test wrong argument to the global configure option
--error ER_WRONG_VALUE_FOR_VAR
@@ -29,21 +29,21 @@ set global innodb_monitor_enable = aaa;
# insensitive
set global innodb_monitor_disable = All;
-# status should all change to "stopped"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "disabled"
+select name from information_schema.innodb_metrics where status!='disabled';
# Reset all counter values
set global innodb_monitor_reset_all = all;
# count should all change to 0
-select name, count, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where count!=0;
# Test wildcard match, turn on all counters contain string "lock"
set global innodb_monitor_enable = "%lock%";
# All lock related counter should be enabled
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
# Disable them
set global innodb_monitor_disable = "%lock%";
@@ -59,28 +59,29 @@ set global innodb_monitor_enable = "%lock*";
# All counters will be turned on with wildcard match string with all "%"
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all counters
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# One more round testing. All counters will be turned on with
# single wildcard character "%"
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all the counters with "%_%"
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# Turn on all counters start with "log"
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
# Turn on counters "os_data_fsync" with wildcard match "os_%a_fs_ncs", "_"
# is single character wildcard match word
@@ -153,7 +154,7 @@ where name = "metadata_table_handles_opened";
# Turn off the counter "metadata_table_handles_opened"
set global innodb_monitor_disable = metadata_table_handles_opened;
-# Reset the counter value while counter is off (stopped)
+# Reset the counter value while counter is off (disabled)
set global innodb_monitor_reset = metadata_table_handles_opened;
select name, max_count, min_count, count,
@@ -192,7 +193,7 @@ set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
# Create a new table to test "metadata_table_handles_opened" counter
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
@@ -336,6 +337,8 @@ drop table monitor_test;
set global innodb_monitor_enable = file_num_open_files;
+# Counters are unpredictable when innodb-file-per-table is on
+--replace_column 2 # 3 # 4 # 5 # 6 # 7 #
select name, max_count, min_count, count,
max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test
index 8afbcac80df..1b23ae14e49 100644
--- a/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_monitor_enable_basic.test
@@ -11,8 +11,8 @@ select name, status from information_schema.innodb_metrics;
# Turn on all monitor counters
set global innodb_monitor_enable = all;
-# status should all change to "started"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "enabled"
+select name from information_schema.innodb_metrics where status!='enabled';
# Test wrong argument to the global configure option
--error ER_WRONG_VALUE_FOR_VAR
@@ -29,21 +29,21 @@ set global innodb_monitor_enable = aaa;
# insensitive
set global innodb_monitor_disable = All;
-# status should all change to "stopped"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "disabled"
+select name from information_schema.innodb_metrics where status!='disabled';
# Reset all counter values
set global innodb_monitor_reset_all = all;
# count should all change to 0
-select name, count, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where count!=0;
# Test wildcard match, turn on all counters contain string "lock"
set global innodb_monitor_enable = "%lock%";
# All lock related counter should be enabled
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
# Disable them
set global innodb_monitor_disable = "%lock%";
@@ -59,28 +59,29 @@ set global innodb_monitor_enable = "%lock*";
# All counters will be turned on with wildcard match string with all "%"
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all counters
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# One more round testing. All counters will be turned on with
# single wildcard character "%"
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all the counters with "%_%"
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# Turn on all counters start with "log"
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
# Turn on counters "os_data_fsync" with wildcard match "os_%a_fs_ncs", "_"
# is single character wildcard match word
@@ -153,7 +154,7 @@ where name = "metadata_table_handles_opened";
# Turn off the counter "metadata_table_handles_opened"
set global innodb_monitor_disable = metadata_table_handles_opened;
-# Reset the counter value while counter is off (stopped)
+# Reset the counter value while counter is off (disabled)
set global innodb_monitor_reset = metadata_table_handles_opened;
select name, max_count, min_count, count,
@@ -192,7 +193,7 @@ set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
# Create a new table to test "metadata_table_handles_opened" counter
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
@@ -336,6 +337,8 @@ drop table monitor_test;
set global innodb_monitor_enable = file_num_open_files;
+# Counters are unpredictable when innodb-file-per-table is on
+--replace_column 2 # 3 # 4 # 5 # 6 # 7 #
select name, max_count, min_count, count,
max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test
index 8afbcac80df..1b23ae14e49 100644
--- a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_all_basic.test
@@ -11,8 +11,8 @@ select name, status from information_schema.innodb_metrics;
# Turn on all monitor counters
set global innodb_monitor_enable = all;
-# status should all change to "started"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "enabled"
+select name from information_schema.innodb_metrics where status!='enabled';
# Test wrong argument to the global configure option
--error ER_WRONG_VALUE_FOR_VAR
@@ -29,21 +29,21 @@ set global innodb_monitor_enable = aaa;
# insensitive
set global innodb_monitor_disable = All;
-# status should all change to "stopped"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "disabled"
+select name from information_schema.innodb_metrics where status!='disabled';
# Reset all counter values
set global innodb_monitor_reset_all = all;
# count should all change to 0
-select name, count, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where count!=0;
# Test wildcard match, turn on all counters contain string "lock"
set global innodb_monitor_enable = "%lock%";
# All lock related counter should be enabled
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
# Disable them
set global innodb_monitor_disable = "%lock%";
@@ -59,28 +59,29 @@ set global innodb_monitor_enable = "%lock*";
# All counters will be turned on with wildcard match string with all "%"
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all counters
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# One more round testing. All counters will be turned on with
# single wildcard character "%"
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all the counters with "%_%"
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# Turn on all counters start with "log"
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
# Turn on counters "os_data_fsync" with wildcard match "os_%a_fs_ncs", "_"
# is single character wildcard match word
@@ -153,7 +154,7 @@ where name = "metadata_table_handles_opened";
# Turn off the counter "metadata_table_handles_opened"
set global innodb_monitor_disable = metadata_table_handles_opened;
-# Reset the counter value while counter is off (stopped)
+# Reset the counter value while counter is off (disabled)
set global innodb_monitor_reset = metadata_table_handles_opened;
select name, max_count, min_count, count,
@@ -192,7 +193,7 @@ set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
# Create a new table to test "metadata_table_handles_opened" counter
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
@@ -336,6 +337,8 @@ drop table monitor_test;
set global innodb_monitor_enable = file_num_open_files;
+# Counters are unpredictable when innodb-file-per-table is on
+--replace_column 2 # 3 # 4 # 5 # 6 # 7 #
select name, max_count, min_count, count,
max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
diff --git a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test
index 8afbcac80df..1b23ae14e49 100644
--- a/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test
+++ b/mysql-test/suite/sys_vars/t/innodb_monitor_reset_basic.test
@@ -11,8 +11,8 @@ select name, status from information_schema.innodb_metrics;
# Turn on all monitor counters
set global innodb_monitor_enable = all;
-# status should all change to "started"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "enabled"
+select name from information_schema.innodb_metrics where status!='enabled';
# Test wrong argument to the global configure option
--error ER_WRONG_VALUE_FOR_VAR
@@ -29,21 +29,21 @@ set global innodb_monitor_enable = aaa;
# insensitive
set global innodb_monitor_disable = All;
-# status should all change to "stopped"
-select name, status from information_schema.innodb_metrics;
+# status should all change to "disabled"
+select name from information_schema.innodb_metrics where status!='disabled';
# Reset all counter values
set global innodb_monitor_reset_all = all;
# count should all change to 0
-select name, count, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where count!=0;
# Test wildcard match, turn on all counters contain string "lock"
set global innodb_monitor_enable = "%lock%";
# All lock related counter should be enabled
-select name, status from information_schema.innodb_metrics
-where name like "%lock%";
+select name from information_schema.innodb_metrics
+where status != IF(name like "%lock%", 'enabled', 'disabled');
# Disable them
set global innodb_monitor_disable = "%lock%";
@@ -59,28 +59,29 @@ set global innodb_monitor_enable = "%lock*";
# All counters will be turned on with wildcard match string with all "%"
set global innodb_monitor_enable="%%%%%%%%%%%%%%%%%%%%%%%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all counters
set global innodb_monitor_disable="%%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# One more round testing. All counters will be turned on with
# single wildcard character "%"
set global innodb_monitor_enable="%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='enabled';
# Turn off all the counters with "%_%"
set global innodb_monitor_disable="%_%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics where status!='disabled';
# Turn on all counters start with "log"
set global innodb_monitor_enable="log%%%%";
-select name, status from information_schema.innodb_metrics;
+select name from information_schema.innodb_metrics
+where status != IF(name like "log%", 'enabled', 'disabled');
# Turn on counters "os_data_fsync" with wildcard match "os_%a_fs_ncs", "_"
# is single character wildcard match word
@@ -153,7 +154,7 @@ where name = "metadata_table_handles_opened";
# Turn off the counter "metadata_table_handles_opened"
set global innodb_monitor_disable = metadata_table_handles_opened;
-# Reset the counter value while counter is off (stopped)
+# Reset the counter value while counter is off (disabled)
set global innodb_monitor_reset = metadata_table_handles_opened;
select name, max_count, min_count, count,
@@ -192,7 +193,7 @@ set global innodb_monitor_enable = metadata_table_handles_opened;
drop table monitor_test;
# Create a new table to test "metadata_table_handles_opened" counter
-create table monitor_test(col int) engine = innodb;
+create table monitor_test(col int) engine = innodb stats_persistent=0;
select * from monitor_test;
@@ -336,6 +337,8 @@ drop table monitor_test;
set global innodb_monitor_enable = file_num_open_files;
+# Counters are unpredictable when innodb-file-per-table is on
+--replace_column 2 # 3 # 4 # 5 # 6 # 7 #
select name, max_count, min_count, count,
max_count_reset, min_count_reset, count_reset, status
from information_schema.innodb_metrics
diff --git a/mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test b/mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test
new file mode 100644
index 00000000000..aa1cc83819e
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_online_alter_log_max_size_basic.test
@@ -0,0 +1,51 @@
+--source include/have_innodb.inc
+
+SET @start_global_value = @@global.innodb_online_alter_log_max_size;
+SELECT @start_global_value;
+
+#
+# exists as global only
+#
+select @@global.innodb_online_alter_log_max_size >= 524288;
+select @@global.innodb_online_alter_log_max_size;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.innodb_online_alter_log_max_size;
+show global variables like 'innodb_online_alter_log_max_size';
+show session variables like 'innodb_online_alter_log_max_size';
+select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size';
+select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size';
+
+#
+# show that it's writable
+#
+set global innodb_online_alter_log_max_size=1048576;
+select @@global.innodb_online_alter_log_max_size;
+select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size';
+select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size';
+set @@global.innodb_online_alter_log_max_size=524288;
+select @@global.innodb_online_alter_log_max_size;
+select * from information_schema.global_variables where variable_name='innodb_online_alter_log_max_size';
+select * from information_schema.session_variables where variable_name='innodb_online_alter_log_max_size';
+--error ER_GLOBAL_VARIABLE
+set session innodb_online_alter_log_max_size='some';
+--error ER_GLOBAL_VARIABLE
+set @@session.innodb_online_alter_log_max_size='some';
+
+#
+# incorrect types
+#
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_online_alter_log_max_size=1.1;
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_online_alter_log_max_size='foo';
+set global innodb_online_alter_log_max_size=-2;
+--error ER_WRONG_TYPE_FOR_VAR
+set global innodb_online_alter_log_max_size=1e1;
+set global innodb_online_alter_log_max_size=2;
+
+#
+# Cleanup
+#
+
+SET @@global.innodb_online_alter_log_max_size = @start_global_value;
+SELECT @@global.innodb_online_alter_log_max_size;
diff --git a/mysql-test/suite/sys_vars/t/innodb_purge_run_now_basic.test b/mysql-test/suite/sys_vars/t/innodb_purge_run_now_basic.test
new file mode 100644
index 00000000000..0704784dbcc
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_purge_run_now_basic.test
@@ -0,0 +1,53 @@
+#
+# Basic test for innodb_purge_run_now, note it is a duplicate of
+# innodb_purge_stop_now.
+#
+
+-- source include/have_innodb.inc
+
+# The config variable is a debug variable for now
+-- source include/have_debug.inc
+
+--disable_query_log
+# Enable metrics for the counters we are going to use
+set global innodb_monitor_enable = purge_stop_count;
+set global innodb_monitor_enable = purge_resume_count;
+--enable_query_log
+
+# Should be 0 for both
+SELECT name, count
+ FROM information_schema.innodb_metrics
+ WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+
+# Check the default value
+SET @orig = @@global.innodb_purge_run_now;
+SELECT @orig;
+
+# Stop of purge
+SET GLOBAL innodb_purge_stop_now = ON;
+
+# Stop count should now be 1
+SELECT name, count
+ FROM information_schema.innodb_metrics
+ WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+
+SET GLOBAL innodb_purge_run_now = ON;
+
+# Should always be OFF
+SELECT @@global.innodb_purge_run_now;
+
+# Both should be 1 now
+SELECT name, count
+ FROM information_schema.innodb_metrics
+ WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+
+--disable_query_log
+set global innodb_monitor_disable = all;
+set global innodb_monitor_reset_all = all;
+
+-- disable_warnings
+set global innodb_monitor_enable = default;
+set global innodb_monitor_disable = default;
+set global innodb_monitor_reset = default;
+set global innodb_monitor_reset_all = default;
+-- enable_warnings
diff --git a/mysql-test/suite/sys_vars/t/innodb_purge_stop_now_basic.test b/mysql-test/suite/sys_vars/t/innodb_purge_stop_now_basic.test
new file mode 100644
index 00000000000..0704784dbcc
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_purge_stop_now_basic.test
@@ -0,0 +1,53 @@
+#
+# Basic test for innodb_purge_run_now, note it is a duplicate of
+# innodb_purge_stop_now.
+#
+
+-- source include/have_innodb.inc
+
+# The config variable is a debug variable for now
+-- source include/have_debug.inc
+
+--disable_query_log
+# Enable metrics for the counters we are going to use
+set global innodb_monitor_enable = purge_stop_count;
+set global innodb_monitor_enable = purge_resume_count;
+--enable_query_log
+
+# Should be 0 for both
+SELECT name, count
+ FROM information_schema.innodb_metrics
+ WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+
+# Check the default value
+SET @orig = @@global.innodb_purge_run_now;
+SELECT @orig;
+
+# Stop of purge
+SET GLOBAL innodb_purge_stop_now = ON;
+
+# Stop count should now be 1
+SELECT name, count
+ FROM information_schema.innodb_metrics
+ WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+
+SET GLOBAL innodb_purge_run_now = ON;
+
+# Should always be OFF
+SELECT @@global.innodb_purge_run_now;
+
+# Both should be 1 now
+SELECT name, count
+ FROM information_schema.innodb_metrics
+ WHERE name = 'purge_stop_count' OR name = 'purge_resume_count';
+
+--disable_query_log
+set global innodb_monitor_disable = all;
+set global innodb_monitor_reset_all = all;
+
+-- disable_warnings
+set global innodb_monitor_enable = default;
+set global innodb_monitor_disable = default;
+set global innodb_monitor_reset = default;
+set global innodb_monitor_reset_all = default;
+-- enable_warnings
diff --git a/mysql-test/suite/sys_vars/t/innodb_read_only_basic.test b/mysql-test/suite/sys_vars/t/innodb_read_only_basic.test
new file mode 100644
index 00000000000..581eb3538b8
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_read_only_basic.test
@@ -0,0 +1,20 @@
+--source include/have_innodb.inc
+
+# Can only be set from the command line.
+# show the global and session values;
+
+--echo Valid values are 'ON' and 'OFF'
+select @@global.innodb_read_only;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.innodb_read_only;
+show global variables like 'innodb_read_only';
+show session variables like 'innodb_read_only';
+select * from information_schema.global_variables where variable_name='innodb_read_only';
+select * from information_schema.session_variables where variable_name='innodb_read_only';
+
+# Show that it's read-only
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set global innodb_read_only=1;
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set session innodb_read_only=1;
+
diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_auto_recalc_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_auto_recalc_basic.test
new file mode 100644
index 00000000000..0020c493091
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_stats_auto_recalc_basic.test
@@ -0,0 +1,31 @@
+#
+# innodb_stats_auto_recalc
+#
+
+-- source include/have_innodb.inc
+
+# show the default value
+SELECT @@innodb_stats_auto_recalc;
+
+# check that it is writeable
+SET GLOBAL innodb_stats_auto_recalc=ON;
+SELECT @@innodb_stats_auto_recalc;
+
+SET GLOBAL innodb_stats_auto_recalc=OFF;
+SELECT @@innodb_stats_auto_recalc;
+
+SET GLOBAL innodb_stats_auto_recalc=1;
+SELECT @@innodb_stats_auto_recalc;
+
+SET GLOBAL innodb_stats_auto_recalc=0;
+SELECT @@innodb_stats_auto_recalc;
+
+# should be a boolean
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL innodb_stats_auto_recalc=123;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL innodb_stats_auto_recalc='foo';
+
+# restore the environment
+SET GLOBAL innodb_stats_auto_recalc=default;
diff --git a/mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test b/mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test
new file mode 100644
index 00000000000..4277b58de00
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/innodb_stats_persistent_basic.test
@@ -0,0 +1,31 @@
+#
+# innodb_stats_persistent
+#
+
+-- source include/have_innodb.inc
+
+# show the default value
+SELECT @@innodb_stats_persistent;
+
+# check that it is writeable
+SET GLOBAL innodb_stats_persistent=ON;
+SELECT @@innodb_stats_persistent;
+
+SET GLOBAL innodb_stats_persistent=OFF;
+SELECT @@innodb_stats_persistent;
+
+SET GLOBAL innodb_stats_persistent=1;
+SELECT @@innodb_stats_persistent;
+
+SET GLOBAL innodb_stats_persistent=0;
+SELECT @@innodb_stats_persistent;
+
+# should be a boolean
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL innodb_stats_persistent=123;
+
+-- error ER_WRONG_VALUE_FOR_VAR
+SET GLOBAL innodb_stats_persistent='foo';
+
+# restore the environment
+SET GLOBAL innodb_stats_persistent=off;
diff --git a/mysql-test/suite/sys_vars/t/metadata_locks_hash_instances_basic.test b/mysql-test/suite/sys_vars/t/metadata_locks_hash_instances_basic.test
new file mode 100644
index 00000000000..5fcf1d956d0
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/metadata_locks_hash_instances_basic.test
@@ -0,0 +1,60 @@
+########## mysql-test\t\metadata_locks_hash_instances_basic.test ##############
+# #
+# Variable Name: metadata_locks_hash_instances #
+# Scope: Global #
+# Access Type: Static #
+# Data Type: Integer #
+# #
+###############################################################################
+
+
+--echo ####################################################################
+--echo # Displaying default value #
+--echo ####################################################################
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+
+
+--echo ####################################################################
+--echo # Check that value cannot be set (this variable is settable only #
+--echo # at start-up). #
+--echo ####################################################################
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SET @@GLOBAL.metadata_locks_hash_instances=1;
+
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+
+
+--echo #################################################################
+--echo # Check if the value in GLOBAL Table matches value in variable #
+--echo #################################################################
+SELECT @@GLOBAL.metadata_locks_hash_instances = VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='metadata_locks_hash_instances';
+
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+
+SELECT VARIABLE_VALUE
+FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
+WHERE VARIABLE_NAME='metadata_locks_hash_instances';
+
+
+--echo ######################################################################
+--echo # Check if accessing variable with and without GLOBAL point to same #
+--echo # variable #
+--echo ######################################################################
+SELECT @@metadata_locks_hash_instances = @@GLOBAL.metadata_locks_hash_instances;
+
+
+--echo ######################################################################
+--echo # Check if variable has only the GLOBAL scope #
+--echo ######################################################################
+
+SELECT @@metadata_locks_hash_instances;
+
+SELECT @@GLOBAL.metadata_locks_hash_instances;
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT @@local.metadata_locks_hash_instances;
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+SELECT @@SESSION.metadata_locks_hash_instances;
diff --git a/mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic-master.opt b/mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic-master.opt
new file mode 100644
index 00000000000..dcbb8a0bef2
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic-master.opt
@@ -0,0 +1,2 @@
+--loose-enable-performance-schema
+--loose-performance-schema-session-connect-attrs-size=2048
diff --git a/mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic.test b/mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic.test
new file mode 100644
index 00000000000..c10700b8903
--- /dev/null
+++ b/mysql-test/suite/sys_vars/t/pfs_session_connect_attrs_size_basic.test
@@ -0,0 +1,47 @@
+# Copyright (C) 2012 Sun Microsystems, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
+
+--source include/not_embedded.inc
+--source include/have_perfschema.inc
+
+#
+# Only global
+#
+
+select @@global.performance_schema_session_connect_attrs_size;
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+select @@session.performance_schema_session_connect_attrs_size;
+
+show global variables like 'performance_schema_session_connect_attrs_size';
+
+show session variables like 'performance_schema_session_connect_attrs_size';
+
+select * from information_schema.global_variables
+ where variable_name='performance_schema_session_connect_attrs_size';
+
+select * from information_schema.session_variables
+ where variable_name='performance_schema_session_connect_attrs_size';
+
+#
+# Read-only
+#
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set global performance_schema_session_connect_attrs_size=1;
+
+--error ER_INCORRECT_GLOBAL_LOCAL_VAR
+set session performance_schema_session_connect_attrs_size=1;
+
diff --git a/mysql-test/suite/vcol/t/rpl_vcol.test b/mysql-test/suite/vcol/t/rpl_vcol.test
index 43003f80ee9..03837df1b99 100644
--- a/mysql-test/suite/vcol/t/rpl_vcol.test
+++ b/mysql-test/suite/vcol/t/rpl_vcol.test
@@ -27,7 +27,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
SET @@session.storage_engine = 'InnoDB';
#------------------------------------------------------------------------------#
diff --git a/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_innodb.test b/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_innodb.test
index baefddc0fd1..516e121a2aa 100644
--- a/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_blocked_sql_funcs_innodb.test
@@ -32,7 +32,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
let $skip_full_text_checks = 1;
diff --git a/mysql-test/suite/vcol/t/vcol_column_def_options_innodb.test b/mysql-test/suite/vcol/t/vcol_column_def_options_innodb.test
index e11618163cc..38baa2b3024 100644
--- a/mysql-test/suite/vcol/t/vcol_column_def_options_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_column_def_options_innodb.test
@@ -33,7 +33,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_handler_innodb.test b/mysql-test/suite/vcol/t/vcol_handler_innodb.test
index 1a50aeaaa86..bf443c6bbd3 100644
--- a/mysql-test/suite/vcol/t/vcol_handler_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_handler_innodb.test
@@ -33,7 +33,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_ins_upd_innodb.test b/mysql-test/suite/vcol/t/vcol_ins_upd_innodb.test
index 3b83c7f4565..5d9ac12e930 100644
--- a/mysql-test/suite/vcol/t/vcol_ins_upd_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_ins_upd_innodb.test
@@ -33,7 +33,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_keys_innodb.test b/mysql-test/suite/vcol/t/vcol_keys_innodb.test
index d44d2f701cf..e408672ac07 100644
--- a/mysql-test/suite/vcol/t/vcol_keys_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_keys_innodb.test
@@ -33,7 +33,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_non_stored_columns_innodb.test b/mysql-test/suite/vcol/t/vcol_non_stored_columns_innodb.test
index 42834d5c0bb..88ed6157294 100644
--- a/mysql-test/suite/vcol/t/vcol_non_stored_columns_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_non_stored_columns_innodb.test
@@ -35,7 +35,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_partition_innodb.test b/mysql-test/suite/vcol/t/vcol_partition_innodb.test
index ab90bbf303a..7790a82800c 100644
--- a/mysql-test/suite/vcol/t/vcol_partition_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_partition_innodb.test
@@ -33,7 +33,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_select_innodb.test b/mysql-test/suite/vcol/t/vcol_select_innodb.test
index 787f5fe77a7..314aecb75b9 100644
--- a/mysql-test/suite/vcol/t/vcol_select_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_select_innodb.test
@@ -33,7 +33,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_innodb.test b/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_innodb.test
index 32e2600c2fc..53826a460a7 100644
--- a/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_supported_sql_funcs_innodb.test
@@ -32,7 +32,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_trigger_sp_innodb.test b/mysql-test/suite/vcol/t/vcol_trigger_sp_innodb.test
index 57655d6d3fe..5a36fb1c06d 100644
--- a/mysql-test/suite/vcol/t/vcol_trigger_sp_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_trigger_sp_innodb.test
@@ -34,7 +34,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/suite/vcol/t/vcol_view_innodb.test b/mysql-test/suite/vcol/t/vcol_view_innodb.test
index 322fb122436..01fced8e4c3 100644
--- a/mysql-test/suite/vcol/t/vcol_view_innodb.test
+++ b/mysql-test/suite/vcol/t/vcol_view_innodb.test
@@ -33,7 +33,7 @@
##### Storage engine to be tested
# Set the session storage engine
---source include/have_xtradb.inc
+--source include/have_innodb.inc
eval SET @@session.storage_engine = 'InnoDB';
##### Workarounds for known open engine specific bugs
diff --git a/mysql-test/t/alter_table.test b/mysql-test/t/alter_table.test
index d48b1687fa0..f40f8c11fac 100644
--- a/mysql-test/t/alter_table.test
+++ b/mysql-test/t/alter_table.test
@@ -1,3 +1,4 @@
+--source include/have_innodb.inc
#
# Test of alter table
#
@@ -1215,6 +1216,24 @@ ALTER TABLE db1.t1 ADD baz INT;
DROP DATABASE db1;
+--echo # Additional coverage for refactoring which is made as part
+--echo # of fix for bug #27480 "Extend CREATE TEMPORARY TABLES privilege
+--echo # to allow temp table operations".
+--echo #
+--echo # At some point the below test case failed on assertion.
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TEMPORARY TABLE t1 (i int) ENGINE=MyISAM;
+
+--error ER_ILLEGAL_HA
+ALTER TABLE t1 DISCARD TABLESPACE;
+
+DROP TABLE t1;
+
+
--echo #
--echo # Bug#11938039 RE-EXECUTION OF FRM-ONLY ALTER TABLE WITH RENAME
--echo # CLAUSE FAILS OR ABORTS SERVER.
@@ -1258,3 +1277,360 @@ CREATE INDEX IF NOT EXISTS x_param1 ON t1(x_param);
SHOW CREATE TABLE t1;
DROP TABLE t1;
+--echo #
+--echo # Bug#11938817 ALTER BEHAVIOR DIFFERENT THEN DOCUMENTED
+--echo #
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1(a INT) engine=innodb;
+INSERT INTO t1 VALUES (1), (2);
+
+--enable_info
+--echo # This should not do anything
+ALTER TABLE t1;
+--echo # Check that we rebuild the table
+ALTER TABLE t1 engine=innodb;
+--echo # This should also rebuild the table
+ALTER TABLE t1 FORCE;
+--disable_info
+
+DROP TABLE t1;
+
+--echo # Bug#11748057 (formerly known as 34972): ALTER TABLE statement doesn't
+--echo # identify correct column name.
+--echo #
+
+CREATE TABLE t1 (c1 int unsigned , c2 char(100) not null default '');
+ALTER TABLE t1 ADD c3 char(16) NOT NULL DEFAULT '' AFTER c2,
+ MODIFY c2 char(100) NOT NULL DEFAULT '' AFTER c1;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+--echo #
+--echo # WL#5534 Online ALTER, Phase 1
+--echo #
+
+--echo # Single thread tests.
+--echo # See innodb_mysql_sync.test for multi thread tests.
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1(a INT PRIMARY KEY, b INT) engine=InnoDB;
+CREATE TABLE m1(a INT PRIMARY KEY, b INT) engine=MyISAM;
+INSERT INTO t1 VALUES (1,1), (2,2);
+INSERT INTO m1 VALUES (1,1), (2,2);
+
+--echo #
+--echo # 1: Test ALGORITHM keyword
+--echo #
+
+--echo # --enable_info allows us to see how many rows were updated
+--echo # by ALTER TABLE. in-place will show 0 rows, while copy > 0.
+
+--enable_info
+ALTER TABLE t1 ADD INDEX i1(b);
+ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT;
+ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY;
+ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE;
+--error ER_UNKNOWN_ALTER_ALGORITHM
+ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= INVALID;
+
+ALTER TABLE m1 ENABLE KEYS;
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= DEFAULT;
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY;
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE;
+--disable_info
+
+ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4;
+
+--echo #
+--echo # 2: Test ALGORITHM + old_alter_table
+--echo #
+
+--enable_info
+SET SESSION old_alter_table= 1;
+ALTER TABLE t1 ADD INDEX i1(b);
+ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= DEFAULT;
+ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= COPY;
+ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE;
+SET SESSION old_alter_table= 0;
+--disable_info
+
+ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4;
+
+--echo #
+--echo # 3: Test unsupported in-place operation
+--echo #
+
+ALTER TABLE t1 ADD COLUMN (c1 INT);
+ALTER TABLE t1 ADD COLUMN (c2 INT), ALGORITHM= DEFAULT;
+ALTER TABLE t1 ADD COLUMN (c3 INT), ALGORITHM= COPY;
+ALTER TABLE t1 ADD COLUMN (c4 INT), ALGORITHM= INPLACE;
+
+ALTER TABLE t1 DROP COLUMN c1, DROP COLUMN c2, DROP COLUMN c3, DROP COLUMN c4;
+
+--echo #
+--echo # 4: Test LOCK keyword
+--echo #
+
+--enable_info
+ALTER TABLE t1 ADD INDEX i1(b), LOCK= DEFAULT;
+ALTER TABLE t1 ADD INDEX i2(b), LOCK= NONE;
+ALTER TABLE t1 ADD INDEX i3(b), LOCK= SHARED;
+ALTER TABLE t1 ADD INDEX i4(b), LOCK= EXCLUSIVE;
+--error ER_UNKNOWN_ALTER_LOCK
+ALTER TABLE t1 ADD INDEX i5(b), LOCK= INVALID;
+--disable_info
+
+ALTER TABLE m1 ENABLE KEYS, LOCK= DEFAULT;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE m1 ENABLE KEYS, LOCK= NONE;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE m1 ENABLE KEYS, LOCK= SHARED;
+ALTER TABLE m1 ENABLE KEYS, LOCK= EXCLUSIVE;
+
+ALTER TABLE t1 DROP INDEX i1, DROP INDEX i2, DROP INDEX i3, DROP INDEX i4;
+
+--echo #
+--echo # 5: Test ALGORITHM + LOCK
+--echo #
+
+--enable_info
+ALTER TABLE t1 ADD INDEX i1(b), ALGORITHM= INPLACE, LOCK= NONE;
+ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= INPLACE, LOCK= SHARED;
+ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= INPLACE, LOCK= EXCLUSIVE;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= COPY, LOCK= NONE;
+ALTER TABLE t1 ADD INDEX i5(b), ALGORITHM= COPY, LOCK= SHARED;
+ALTER TABLE t1 ADD INDEX i6(b), ALGORITHM= COPY, LOCK= EXCLUSIVE;
+
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= NONE;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= SHARED;
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= INPLACE, LOCK= EXCLUSIVE;
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= NONE;
+# This works because the lock will be SNW for the copy phase.
+# It will still require exclusive lock for actually enabling keys.
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= SHARED;
+ALTER TABLE m1 ENABLE KEYS, ALGORITHM= COPY, LOCK= EXCLUSIVE;
+--disable_info
+
+DROP TABLE t1, m1;
+
+--echo #
+--echo # 6: Possible deadlock involving thr_lock.c
+--echo #
+
+CREATE TABLE t1(a INT PRIMARY KEY, b INT);
+INSERT INTO t1 VALUES (1,1), (2,2);
+
+START TRANSACTION;
+INSERT INTO t1 VALUES (3,3);
+
+--echo # Connection con1
+connect (con1, localhost, root);
+--echo # Sending:
+--send ALTER TABLE t1 DISABLE KEYS
+
+--echo # Connection default
+connection default;
+--echo # Waiting until ALTER TABLE is blocked.
+let $wait_condition=
+ SELECT COUNT(*) = 1 FROM information_schema.processlist
+ WHERE state = "Waiting for table metadata lock" AND
+ info = "ALTER TABLE t1 DISABLE KEYS";
+--source include/wait_condition.inc
+UPDATE t1 SET b = 4;
+COMMIT;
+
+--echo # Connection con1
+connection con1;
+--echo # Reaping: ALTER TABLE t1 DISABLE KEYS
+--reap
+disconnect con1;
+--source include/wait_until_disconnected.inc
+
+--echo # Connection default
+connection default;
+DROP TABLE t1;
+
+--echo #
+--echo # 7: Which operations require copy and which can be done in-place?
+--echo #
+--echo # Test which ALTER TABLE operations are done in-place and
+--echo # which operations are done using temporary table copy.
+--echo #
+--echo # --enable_info allows us to see how many rows were updated
+--echo # by ALTER TABLE. in-place will show 0 rows, while copy > 0.
+--echo #
+
+--disable_warnings
+DROP TABLE IF EXISTS ti1, ti2, ti3, tm1, tm2, tm3;
+--enable_warnings
+
+--echo # Single operation tests
+
+CREATE TABLE ti1(a INT NOT NULL, b INT, c INT) engine=InnoDB;
+CREATE TABLE tm1(a INT NOT NULL, b INT, c INT) engine=MyISAM;
+CREATE TABLE ti2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=InnoDB;
+CREATE TABLE tm2(a INT PRIMARY KEY AUTO_INCREMENT, b INT, c INT) engine=MyISAM;
+INSERT INTO ti1 VALUES (1,1,1), (2,2,2);
+INSERT INTO ti2 VALUES (1,1,1), (2,2,2);
+INSERT INTO tm1 VALUES (1,1,1), (2,2,2);
+INSERT INTO tm2 VALUES (1,1,1), (2,2,2);
+
+--enable_info
+ALTER TABLE ti1;
+ALTER TABLE tm1;
+
+ALTER TABLE ti1 ADD COLUMN d VARCHAR(200);
+ALTER TABLE tm1 ADD COLUMN d VARCHAR(200);
+ALTER TABLE ti1 ADD COLUMN d2 VARCHAR(200);
+ALTER TABLE tm1 ADD COLUMN d2 VARCHAR(200);
+ALTER TABLE ti1 ADD COLUMN e ENUM('a', 'b') FIRST;
+ALTER TABLE tm1 ADD COLUMN e ENUM('a', 'b') FIRST;
+ALTER TABLE ti1 ADD COLUMN f INT AFTER a;
+ALTER TABLE tm1 ADD COLUMN f INT AFTER a;
+
+ALTER TABLE ti1 ADD INDEX ii1(b);
+ALTER TABLE tm1 ADD INDEX im1(b);
+ALTER TABLE ti1 ADD UNIQUE INDEX ii2 (c);
+ALTER TABLE tm1 ADD UNIQUE INDEX im2 (c);
+ALTER TABLE ti1 ADD FULLTEXT INDEX ii3 (d);
+ALTER TABLE tm1 ADD FULLTEXT INDEX im3 (d);
+ALTER TABLE ti1 ADD FULLTEXT INDEX ii4 (d2);
+ALTER TABLE tm1 ADD FULLTEXT INDEX im4 (d2);
+
+# Bug#14140038 INCONSISTENT HANDLING OF FULLTEXT INDEXES IN ALTER TABLE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
+ALTER TABLE ti1 ADD PRIMARY KEY(a), ALGORITHM=INPLACE;
+ALTER TABLE ti1 ADD PRIMARY KEY(a);
+ALTER TABLE tm1 ADD PRIMARY KEY(a);
+
+ALTER TABLE ti1 DROP INDEX ii3;
+ALTER TABLE tm1 DROP INDEX im3;
+
+ALTER TABLE ti1 DROP COLUMN d2;
+ALTER TABLE tm1 DROP COLUMN d2;
+
+ALTER TABLE ti1 ADD CONSTRAINT fi1 FOREIGN KEY (b) REFERENCES ti2(a);
+ALTER TABLE tm1 ADD CONSTRAINT fm1 FOREIGN KEY (b) REFERENCES tm2(a);
+
+ALTER TABLE ti1 ALTER COLUMN b SET DEFAULT 1;
+ALTER TABLE tm1 ALTER COLUMN b SET DEFAULT 1;
+ALTER TABLE ti1 ALTER COLUMN b DROP DEFAULT;
+ALTER TABLE tm1 ALTER COLUMN b DROP DEFAULT;
+
+# This will set both ALTER_COLUMN_NAME and COLUMN_DEFAULT_VALUE
+ALTER TABLE ti1 CHANGE COLUMN f g INT;
+ALTER TABLE tm1 CHANGE COLUMN f g INT;
+ALTER TABLE ti1 CHANGE COLUMN g h VARCHAR(20);
+ALTER TABLE tm1 CHANGE COLUMN g h VARCHAR(20);
+ALTER TABLE ti1 MODIFY COLUMN e ENUM('a', 'b', 'c');
+ALTER TABLE tm1 MODIFY COLUMN e ENUM('a', 'b', 'c');
+ALTER TABLE ti1 MODIFY COLUMN e INT;
+ALTER TABLE tm1 MODIFY COLUMN e INT;
+# This will set both ALTER_COLUMN_ORDER and COLUMN_DEFAULT_VALUE
+ALTER TABLE ti1 MODIFY COLUMN e INT AFTER h;
+ALTER TABLE tm1 MODIFY COLUMN e INT AFTER h;
+ALTER TABLE ti1 MODIFY COLUMN e INT FIRST;
+ALTER TABLE tm1 MODIFY COLUMN e INT FIRST;
+# This will set both ALTER_COLUMN_NOT_NULLABLE and COLUMN_DEFAULT_VALUE
+--disable_info
+# NULL -> NOT NULL only allowed INPLACE if strict sql_mode is on.
+SET @orig_sql_mode = @@sql_mode;
+SET @@sql_mode = 'STRICT_TRANS_TABLES';
+--enable_info
+ALTER TABLE ti1 MODIFY COLUMN c INT NOT NULL;
+--disable_info
+SET @@sql_mode = @orig_sql_mode;
+--enable_info
+ALTER TABLE tm1 MODIFY COLUMN c INT NOT NULL;
+# This will set both ALTER_COLUMN_NULLABLE and COLUMN_DEFAULT_VALUE
+ALTER TABLE ti1 MODIFY COLUMN c INT NULL;
+ALTER TABLE tm1 MODIFY COLUMN c INT NULL;
+# This will set both ALTER_COLUMN_EQUAL_PACK_LENGTH and COLUMN_DEFAULT_VALUE
+ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30);
+ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30);
+ALTER TABLE ti1 MODIFY COLUMN h VARCHAR(30) AFTER d;
+ALTER TABLE tm1 MODIFY COLUMN h VARCHAR(30) AFTER d;
+
+ALTER TABLE ti1 DROP COLUMN h;
+ALTER TABLE tm1 DROP COLUMN h;
+
+ALTER TABLE ti1 DROP INDEX ii2;
+ALTER TABLE tm1 DROP INDEX im2;
+ALTER TABLE ti1 DROP PRIMARY KEY;
+ALTER TABLE tm1 DROP PRIMARY KEY;
+
+ALTER TABLE ti1 DROP FOREIGN KEY fi1;
+ALTER TABLE tm1 DROP FOREIGN KEY fm1;
+
+ALTER TABLE ti1 RENAME TO ti3;
+ALTER TABLE tm1 RENAME TO tm3;
+ALTER TABLE ti3 RENAME TO ti1;
+ALTER TABLE tm3 RENAME TO tm1;
+
+ALTER TABLE ti1 ORDER BY b;
+ALTER TABLE tm1 ORDER BY b;
+
+ALTER TABLE ti1 CONVERT TO CHARACTER SET utf16;
+ALTER TABLE tm1 CONVERT TO CHARACTER SET utf16;
+ALTER TABLE ti1 DEFAULT CHARACTER SET utf8;
+ALTER TABLE tm1 DEFAULT CHARACTER SET utf8;
+
+ALTER TABLE ti1 FORCE;
+ALTER TABLE tm1 FORCE;
+
+ALTER TABLE ti1 AUTO_INCREMENT 3;
+ALTER TABLE tm1 AUTO_INCREMENT 3;
+ALTER TABLE ti1 AVG_ROW_LENGTH 10;
+ALTER TABLE tm1 AVG_ROW_LENGTH 10;
+ALTER TABLE ti1 CHECKSUM 1;
+ALTER TABLE tm1 CHECKSUM 1;
+ALTER TABLE ti1 COMMENT 'test';
+ALTER TABLE tm1 COMMENT 'test';
+ALTER TABLE ti1 MAX_ROWS 100;
+ALTER TABLE tm1 MAX_ROWS 100;
+ALTER TABLE ti1 MIN_ROWS 1;
+ALTER TABLE tm1 MIN_ROWS 1;
+ALTER TABLE ti1 PACK_KEYS 1;
+ALTER TABLE tm1 PACK_KEYS 1;
+
+--disable_info
+DROP TABLE ti1, ti2, tm1, tm2;
+
+--echo # Tests of >1 operation (InnoDB)
+
+CREATE TABLE ti1(a INT PRIMARY KEY AUTO_INCREMENT, b INT) engine=InnoDB;
+INSERT INTO ti1(b) VALUES (1), (2);
+
+--enable_info
+ALTER TABLE ti1 RENAME TO ti3, ADD INDEX ii1(b);
+
+ALTER TABLE ti3 DROP INDEX ii1, AUTO_INCREMENT 5;
+--disable_info
+INSERT INTO ti3(b) VALUES (5);
+--enable_info
+ALTER TABLE ti3 ADD INDEX ii1(b), AUTO_INCREMENT 7;
+--disable_info
+INSERT INTO ti3(b) VALUES (7);
+SELECT * FROM ti3;
+
+DROP TABLE ti3;
+
+--echo #
+--echo # 8: Scenario in which ALTER TABLE was returning an unwarranted
+--echo # ER_ILLEGAL_HA error at some point during work on this WL.
+--echo #
+
+CREATE TABLE tm1(i INT DEFAULT 1) engine=MyISAM;
+ALTER TABLE tm1 ADD INDEX ii1(i), ALTER COLUMN i DROP DEFAULT;
+DROP TABLE tm1;
diff --git a/mysql-test/t/alter_table_online.test b/mysql-test/t/alter_table_online.test
index 19096efe0fa..a9ce77d1445 100644
--- a/mysql-test/t/alter_table_online.test
+++ b/mysql-test/t/alter_table_online.test
@@ -1,5 +1,5 @@
#
-# Test of alter online table
+# Test of ALTER ONLINE TABLE syntax
#
--source include/have_innodb.inc
@@ -29,15 +29,15 @@ drop table t1;
create temporary table t1 (a int not null primary key, b int, c varchar(80), e enum('a','b'));
insert into t1 (a) values (1),(2),(3);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify b int default 5;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 change b new_name int;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify e enum('a','b','c');
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 comment "new comment";
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 rename to t2;
drop table t1;
@@ -49,52 +49,50 @@ drop table t1;
create table t1 (a int not null primary key, b int, c varchar(80), e enum('a','b'));
insert into t1 (a) values (1),(2),(3);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 drop column b, add b int;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify b bigint;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify e enum('c','a','b');
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify c varchar(50);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify c varchar(100);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 add f int;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 engine=memory;
alter table t1 engine=innodb;
alter table t1 add index (b);
---error ER_CANT_DO_ONLINE
alter online table t1 add index c (c);
---error ER_CANT_DO_ONLINE
alter online table t1 drop index b;
drop table t1;
create temporary table t1 (a int not null primary key, b int, c varchar(80), e enum('a','b'));
insert into t1 (a) values (1),(2),(3);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 drop column b, add b int;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify b bigint;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify e enum('c','a','b');
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify c varchar(50);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 modify c varchar(100);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 add f int;
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 engine=memory;
alter table t1 engine=innodb;
alter table t1 add index (b);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 add index c (c);
---error ER_CANT_DO_ONLINE
+--error ER_ALTER_OPERATION_NOT_SUPPORTED
alter online table t1 drop index b;
drop table t1;
diff --git a/mysql-test/t/ctype_utf8mb4.test b/mysql-test/t/ctype_utf8mb4.test
index fda20ca0ec5..e4067245622 100644
--- a/mysql-test/t/ctype_utf8mb4.test
+++ b/mysql-test/t/ctype_utf8mb4.test
@@ -1812,8 +1812,7 @@ SHOW CREATE TABLE t2;
DROP TABLE t1, t2;
--echo #
---echo # Bug#13581962 HIGH MEMORY USAGE ATTEMPT, THEN CRASH WITH
---echo # LONGTEXT, UNION, USER VARIABLE
+--echo # Bug#13581962 HIGH MEMORY USAGE ATTEMPT, THEN CRASH WITH LONGTEXT, UNION, USER VARIABLE
--echo # Bug#14096619 UNABLE TO RESTORE DATABASE DUMP
--echo #
diff --git a/mysql-test/t/events_restart.test b/mysql-test/t/events_restart.test
index 83d28c0812d..c6152e5d961 100644
--- a/mysql-test/t/events_restart.test
+++ b/mysql-test/t/events_restart.test
@@ -107,3 +107,25 @@ let $wait_condition=
where db='events_test' and command = 'Connect' and user=current_user();
--source include/wait_condition.inc
+--echo #
+--echo # Test for bug#11748899 -- EVENT SET TO DISABLED AND ON COMPLETION
+--echo # NOT PRESERVE IS DELETED AT SERVER
+--echo #
+SELECT @@event_scheduler;
+USE test;
+--disable_warnings
+DROP EVENT IF EXISTS e1;
+--enable_warnings
+CREATE EVENT e1 ON SCHEDULE EVERY 1 SECOND DISABLE DO SELECT 1;
+--replace_column 6 # 9 # 10 #
+SHOW EVENTS;
+
+--echo "Now we restart the server"
+--source include/restart_mysqld.inc
+USE test;
+SELECT @@event_scheduler;
+--replace_column 6 # 9 # 10 #
+SHOW EVENTS;
+DROP EVENT e1;
+
+--echo # end test for bug#11748899
diff --git a/mysql-test/t/flush_read_lock.test b/mysql-test/t/flush_read_lock.test
index f024bff0af7..e8ec07392b5 100644
--- a/mysql-test/t/flush_read_lock.test
+++ b/mysql-test/t/flush_read_lock.test
@@ -708,11 +708,10 @@ let $cleanup_stmt1= create table t2_base(j int);
--source include/check_ftwrl_incompatible.inc
--echo # 13.1.b) DROP TABLES which affects only temporary tables
---echo # in theory can be compatible with FTWRL.
---echo # In practice it is not yet.
+--echo # is compatible with FTWRL.
let $statement= drop table t2_temp;
-let $cleanup_stmt1= create temporary table t2_temp(j int);
---source include/check_ftwrl_incompatible.inc
+let $cleanup_stmt= create temporary table t2_temp(j int);
+--source include/check_ftwrl_compatible.inc
--echo #
--echo # 13.1.c) DROP TEMPORARY TABLES should be compatible with FTWRL.
@@ -1902,35 +1901,10 @@ let $statement= analyze table t3_temp_trans;
let $cleanup_stmt= ;
--source include/check_ftwrl_compatible.inc
--echo #
---echo # 39.2.c) Some statements do implicit commit and not
---echo # considered read-only. As result they are
---echo # not compatible with FTWRL.
---echo #
-flush tables with read lock;
---echo # Implicit commits are allowed under FTWRL.
-alter table t3_temp_trans add column c1 int;
-unlock tables;
---echo #
---echo # Switching to connection '$con_aux1'.
-connection $con_aux1;
-flush tables with read lock;
---echo # Switching to connection 'default'.
-connection default;
---send alter table t3_temp_trans drop column c1
---echo # Switching to connection '$con_aux1'.
-connection $con_aux1;
---echo # Check that ALTER TABLE is blocked.
-let $wait_condition=
- select count(*) = 1 from information_schema.processlist
- where state = "Waiting for commit lock" and
- info = "alter table t3_temp_trans drop column c1";
---source include/wait_condition.inc
-unlock tables;
---echo # Switching to connection 'default'.
-connection default;
---echo # Reap ALTER TABLE
---reap
-
+--echo # And ALTER TABLE:
+let $statement= alter table t3_temp_trans add column c1 int;
+let $cleanup_stmt= alter table t3_temp_trans drop column c1;
+--source include/check_ftwrl_compatible.inc
--echo #
--echo # 40) Test effect of implicit commit for DDL which is otherwise
diff --git a/mysql-test/t/innodb_mysql_sync.test b/mysql-test/t/innodb_mysql_sync.test
index 2f3bd643837..b1e21837404 100644
--- a/mysql-test/t/innodb_mysql_sync.test
+++ b/mysql-test/t/innodb_mysql_sync.test
@@ -168,7 +168,7 @@ connection default;
CREATE DATABASE db1;
CREATE TABLE db1.t1(id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, value INT) engine=innodb;
INSERT INTO db1.t1(value) VALUES (1), (2);
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
--echo # Sending:
--send ALTER TABLE db1.t1 ADD INDEX(value)
@@ -186,26 +186,27 @@ connection default;
--reap
DROP DATABASE db1;
---echo # Test 2: Primary index (implicit), should block reads.
+--echo # Test 2: Primary index (implicit), should block writes.
CREATE TABLE t1(a INT NOT NULL, b INT NOT NULL) engine=innodb;
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
--echo # Sending:
---send ALTER TABLE t1 ADD UNIQUE INDEX(a)
+--send ALTER TABLE t1 ADD UNIQUE INDEX(a), LOCK=SHARED
--echo # Connection con1
connection con1;
SET DEBUG_SYNC= "now WAIT_FOR manage";
USE test;
+SELECT * FROM t1;
--echo # Sending:
---send SELECT * FROM t1
+--send UPDATE t1 SET a=NULL
--echo # Connection con2
connection con2;
--echo # Waiting for SELECT to be blocked by the metadata lock on t1
let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
WHERE state= 'Waiting for table metadata lock'
- AND info='SELECT * FROM t1';
+ AND info='UPDATE t1 SET a=NULL';
--source include/wait_condition.inc
SET DEBUG_SYNC= "now SIGNAL query";
@@ -216,30 +217,31 @@ connection default;
--echo # Connection con1
connection con1;
---echo # Reaping: SELECT * FROM t1
+--echo # Reaping: UPDATE t1 SET a=NULL
--reap
---echo # Test 3: Primary index (explicit), should block reads.
+--echo # Test 3: Primary index (explicit), should block writes.
--echo # Connection default
connection default;
ALTER TABLE t1 DROP INDEX a;
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
--echo # Sending:
---send ALTER TABLE t1 ADD PRIMARY KEY (a)
+--send ALTER TABLE t1 ADD PRIMARY KEY (a), LOCK=SHARED
--echo # Connection con1
connection con1;
SET DEBUG_SYNC= "now WAIT_FOR manage";
+SELECT * FROM t1;
--echo # Sending:
---send SELECT * FROM t1
+--send UPDATE t1 SET a=NULL
--echo # Connection con2
connection con2;
--echo # Waiting for SELECT to be blocked by the metadata lock on t1
let $wait_condition= SELECT COUNT(*)= 1 FROM information_schema.processlist
WHERE state= 'Waiting for table metadata lock'
- AND info='SELECT * FROM t1';
+ AND info='UPDATE t1 SET a=NULL';
--source include/wait_condition.inc
SET DEBUG_SYNC= "now SIGNAL query";
@@ -250,14 +252,14 @@ connection default;
--echo # Connection con1
connection con1;
---echo # Reaping: SELECT * FROM t1
+--echo # Reaping: UPDATE t1 SET a=NULL
--reap
--echo # Test 4: Secondary unique index, should not block reads.
--echo # Connection default
connection default;
-SET DEBUG_SYNC= "alter_table_manage_keys SIGNAL manage WAIT_FOR query";
+SET DEBUG_SYNC= "alter_table_inplace_after_lock_downgrade SIGNAL manage WAIT_FOR query";
--echo # Sending:
--send ALTER TABLE t1 ADD UNIQUE (b)
@@ -304,6 +306,292 @@ DROP TABLE t1;
disconnect con1;
+--echo #
+--echo # Bug#13417754 ASSERT IN ROW_DROP_DATABASE_FOR_MYSQL DURING DROP SCHEMA
+--echo #
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP DATABASE IF EXISTS db1;
+--enable_warnings
+
+CREATE TABLE t1(a int) engine=InnoDB;
+CREATE DATABASE db1;
+
+connect(con1, localhost, root);
+connect(con2, localhost, root);
+
+--echo # Connection con1
+connection con1;
+SET DEBUG_SYNC= 'after_innobase_rename_table SIGNAL locked WAIT_FOR continue';
+--echo # Sending:
+--send ALTER TABLE t1 RENAME db1.t1
+
+--echo # Connection con2
+connection con2;
+SET DEBUG_SYNC= 'now WAIT_FOR locked';
+--echo # DROP DATABASE db1 should now be blocked by ALTER TABLE
+--echo # Sending:
+--send DROP DATABASE db1
+
+--echo # Connection default
+connection default;
+--echo # Check that DROP DATABASE is blocked by IX lock on db1
+let $wait_condition=
+ SELECT COUNT(*) = 1 FROM information_schema.processlist
+ WHERE state = "Waiting for schema metadata lock" and
+ info = "DROP DATABASE db1";
+--source include/wait_condition.inc
+--echo # Resume ALTER TABLE
+SET DEBUG_SYNC= 'now SIGNAL continue';
+
+--echo # Connection con1
+connection con1;
+--echo # Reaping: ALTER TABLE t1 RENAME db1.t1;
+--reap
+
+--echo # Connection con2
+connection con2;
+--echo # Reaping: DROP DATABASE db1
+--reap
+
+--echo # Connection default;
+connection default;
+SET DEBUG_SYNC= 'RESET';
+disconnect con1;
+disconnect con2;
+
+
+--echo #
+--echo # WL#5534 Online ALTER, Phase 1
+--echo #
+
+--echo # Multi thread tests.
+--echo # See alter_table.test for single thread tests.
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1(a INT PRIMARY KEY, b INT) engine=InnoDB;
+INSERT INTO t1 VALUES (1,1), (2,2);
+SET DEBUG_SYNC= 'RESET';
+connect (con1, localhost, root);
+SET SESSION lock_wait_timeout= 1;
+
+--echo #
+--echo # 1: In-place + writes blocked.
+--echo #
+
+--echo # Connection default
+--connection default
+SET DEBUG_SYNC= 'alter_opened_table SIGNAL opened WAIT_FOR continue1';
+SET DEBUG_SYNC= 'alter_table_inplace_after_lock_upgrade SIGNAL upgraded WAIT_FOR continue2';
+SET DEBUG_SYNC= 'alter_table_inplace_before_commit SIGNAL beforecommit WAIT_FOR continue3';
+SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL binlog WAIT_FOR continue4';
+--echo # Sending:
+--send ALTER TABLE t1 ADD INDEX i1(b), ALGORITHM= INPLACE, LOCK= SHARED
+
+--echo # Connection con1;
+--connection con1
+SET DEBUG_SYNC= 'now WAIT_FOR opened';
+--echo # At this point, neither reads nor writes should be blocked.
+SELECT * FROM t1;
+INSERT INTO t1 VALUES (3,3);
+
+SET DEBUG_SYNC= 'now SIGNAL continue1';
+SET DEBUG_SYNC= 'now WAIT_FOR upgraded';
+--echo # Now both reads and writes should be blocked
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (4,4);
+
+SET DEBUG_SYNC= 'now SIGNAL continue2';
+SET DEBUG_SYNC= 'now WAIT_FOR beforecommit';
+--echo # Still both reads and writes should be blocked.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (5,5);
+
+SET DEBUG_SYNC= 'now SIGNAL continue3';
+SET DEBUG_SYNC= 'now WAIT_FOR binlog';
+--echo # Same here.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (6,6);
+
+SET DEBUG_SYNC= 'now SIGNAL continue4';
+--echo # Connection default
+--connection default
+--echo # Reaping ALTER TABLE ...
+--reap
+SET DEBUG_SYNC= 'RESET';
+DELETE FROM t1 WHERE a= 3;
+
+--echo #
+--echo # 2: Copy + writes blocked.
+--echo #
+
+SET DEBUG_SYNC= 'alter_opened_table SIGNAL opened WAIT_FOR continue1';
+SET DEBUG_SYNC= 'alter_table_copy_after_lock_upgrade SIGNAL upgraded WAIT_FOR continue2';
+SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL binlog WAIT_FOR continue3';
+--echo # Sending:
+--send ALTER TABLE t1 ADD INDEX i2(b), ALGORITHM= COPY, LOCK= SHARED
+
+--echo # Connection con1;
+--connection con1
+SET DEBUG_SYNC= 'now WAIT_FOR opened';
+--echo # At this point, neither reads nor writes should be blocked.
+SELECT * FROM t1;
+INSERT INTO t1 VALUES (3,3);
+
+SET DEBUG_SYNC= 'now SIGNAL continue1';
+SET DEBUG_SYNC= 'now WAIT_FOR upgraded';
+--echo # Now writes should be blocked, reads still allowed.
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (4,4);
+
+SET DEBUG_SYNC= 'now SIGNAL continue2';
+SET DEBUG_SYNC= 'now WAIT_FOR binlog';
+--echo # Now both reads and writes should be blocked.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1 limit 1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (5,5);
+
+SET DEBUG_SYNC= 'now SIGNAL continue3';
+--echo # Connection default
+--connection default
+--echo # Reaping ALTER TABLE ...
+--reap
+SET DEBUG_SYNC= 'RESET';
+DELETE FROM t1 WHERE a= 3;
+
+--echo #
+--echo # 3: In-place + writes allowed.
+--echo #
+
+--echo # TODO: Enable this test once WL#5526 is pushed
+--disable_parsing
+
+--echo # Connection default
+--connection default
+SET DEBUG_SYNC= 'alter_opened_table SIGNAL opened WAIT_FOR continue1';
+SET DEBUG_SYNC= 'alter_table_inplace_after_lock_upgrade SIGNAL upgraded WAIT_FOR continue2';
+SET DEBUG_SYNC= 'alter_table_inplace_after_lock_downgrade SIGNAL downgraded WAIT_FOR continue3';
+SET DEBUG_SYNC= 'alter_table_inplace_before_commit SIGNAL beforecommit WAIT_FOR continue4';
+SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL binlog WAIT_FOR continue5';
+--echo # Sending:
+--send ALTER TABLE t1 ADD INDEX i3(b), ALGORITHM= INPLACE, LOCK= NONE
+
+--echo # Connection con1;
+--connection con1
+SET DEBUG_SYNC= 'now WAIT_FOR opened';
+--echo # At this point, neither reads nor writes should be blocked.
+SELECT * FROM t1;
+INSERT INTO t1 VALUES (3,3);
+
+SET DEBUG_SYNC= 'now SIGNAL continue1';
+SET DEBUG_SYNC= 'now WAIT_FOR upgraded';
+--echo # Now writes should be blocked, reads still allowed.
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (4,4);
+
+SET DEBUG_SYNC= 'now SIGNAL continue2';
+SET DEBUG_SYNC= 'now WAIT_FOR downgraded';
+--echo # Now writes should be allowed again.
+SELECT * FROM t1;
+INSERT INTO t1 VALUES (5,5);
+
+SET DEBUG_SYNC= 'now SIGNAL continue3';
+SET DEBUG_SYNC= 'now WAIT_FOR beforecommit';
+--echo # Now both reads and writes should be blocked.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (6,6);
+
+SET DEBUG_SYNC= 'now SIGNAL continue4';
+SET DEBUG_SYNC= 'now WAIT_FOR binlog';
+--echo # Same here.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (7,7);
+
+SET DEBUG_SYNC= 'now SIGNAL continue5';
+--echo # Connection default
+--connection default
+--echo # Reaping ALTER TABLE ...
+--reap
+SET DEBUG_SYNC= 'RESET';
+DELETE FROM t1 WHERE a= 3 OR a= 4;
+
+--echo # TODO: Enable this test once WL#5526 is pushed
+--enable_parsing
+
+--echo #
+--echo # 4: In-place + reads and writes blocked.
+--echo #
+
+--echo # Connection default
+--connection default
+SET DEBUG_SYNC= 'alter_opened_table SIGNAL opened WAIT_FOR continue1';
+SET DEBUG_SYNC= 'alter_table_inplace_after_lock_upgrade SIGNAL upgraded WAIT_FOR continue2';
+SET DEBUG_SYNC= 'alter_table_inplace_before_commit SIGNAL beforecommit WAIT_FOR continue3';
+SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL binlog WAIT_FOR continue4';
+--echo # Sending:
+--send ALTER TABLE t1 ADD INDEX i4(b), ALGORITHM= INPLACE, LOCK= EXCLUSIVE
+
+--echo # Connection con1;
+--connection con1
+SET DEBUG_SYNC= 'now WAIT_FOR opened';
+--echo # At this point, neither reads nor writes should be blocked.
+SELECT * FROM t1;
+INSERT INTO t1 VALUES (3,3);
+
+SET DEBUG_SYNC= 'now SIGNAL continue1';
+SET DEBUG_SYNC= 'now WAIT_FOR upgraded';
+--echo # Now both reads and writes should be blocked.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (4,4);
+
+SET DEBUG_SYNC= 'now SIGNAL continue2';
+SET DEBUG_SYNC= 'now WAIT_FOR beforecommit';
+--echo # Same here.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (5,5);
+
+SET DEBUG_SYNC= 'now SIGNAL continue3';
+SET DEBUG_SYNC= 'now WAIT_FOR binlog';
+--echo # Same here.
+--error ER_LOCK_WAIT_TIMEOUT
+SELECT * FROM t1;
+--error ER_LOCK_WAIT_TIMEOUT
+INSERT INTO t1 VALUES (6,6);
+
+SET DEBUG_SYNC= 'now SIGNAL continue4';
+--echo # Connection default
+--connection default
+--echo # Reaping ALTER TABLE ...
+--reap
+SET DEBUG_SYNC= 'RESET';
+
+--connection default
+--disconnect con1
+DROP TABLE t1;
+SET DEBUG_SYNC= 'RESET';
+
+
# Check that all connections opened by test cases in this file are really
# gone so execution of other tests won't be affected by their presence.
--source include/wait_until_count_sessions.inc
diff --git a/mysql-test/t/log_state.test b/mysql-test/t/log_state.test
index e8f0bf8c511..0b900b14b0b 100644
--- a/mysql-test/t/log_state.test
+++ b/mysql-test/t/log_state.test
@@ -48,7 +48,7 @@ connection con1;
set @long_query_time = 2;
set session long_query_time = @long_query_time;
select sleep(@long_query_time + 1);
---replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME 12 THREAD_ID
select * from mysql.slow_log where sql_text NOT LIKE '%slow_log%';
--echo # Switch to connection default
connection default;
@@ -58,7 +58,7 @@ set global slow_query_log= ON;
connection con1;
set session long_query_time = @long_query_time;
select sleep(@long_query_time + 1);
---replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME 12 THREAD_ID
select * from mysql.slow_log where sql_text NOT LIKE '%slow_log%';
--echo # Switch to connection default
connection default;
diff --git a/mysql-test/t/log_tables.test b/mysql-test/t/log_tables.test
index eb652946672..0b9932c2c4d 100644
--- a/mysql-test/t/log_tables.test
+++ b/mysql-test/t/log_tables.test
@@ -180,7 +180,7 @@ drop table bug16905;
truncate table mysql.slow_log;
set session long_query_time=1;
select sleep(2);
---replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME
+--replace_column 1 TIMESTAMP 2 USER_HOST 3 QUERY_TIME 12 THREAD_ID
select * from mysql.slow_log;
set @@session.long_query_time = @saved_long_query_time;
@@ -290,10 +290,10 @@ drop table mysql.slow_log;
use mysql;
CREATE TABLE `general_log` (
- `event_time` timestamp(6) NOT NULL DEFAULT CURRENT_TIMESTAMP
+ `event_time` TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP
ON UPDATE CURRENT_TIMESTAMP,
`user_host` mediumtext NOT NULL,
- `thread_id` int(11) NOT NULL,
+ `thread_id` BIGINT(21) UNSIGNED NOT NULL,
`server_id` int(10) unsigned NOT NULL,
`command_type` varchar(64) NOT NULL,
`argument` mediumtext NOT NULL
@@ -311,7 +311,8 @@ CREATE TABLE `slow_log` (
`last_insert_id` int(11) NOT NULL,
`insert_id` int(11) NOT NULL,
`server_id` int(10) unsigned NOT NULL,
- `sql_text` mediumtext NOT NULL
+ `sql_text` mediumtext NOT NULL,
+ `thread_id` BIGINT(21) UNSIGNED NOT NULL
) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log';
set global general_log='ON';
@@ -746,6 +747,7 @@ BEGIN
DECLARE start_time, query_time, lock_time CHAR(28);
DECLARE user_host MEDIUMTEXT;
DECLARE rows_set, rows_examined, last_insert_id, insert_id, server_id INT;
+ DECLARE thread_id BIGINT UNSIGNED;
DECLARE dbname MEDIUMTEXT;
DECLARE sql_text BLOB;
DECLARE done INT DEFAULT 0;
@@ -763,7 +765,7 @@ BEGIN
FETCH cur1 INTO
start_time, user_host, query_time, lock_time,
rows_set, rows_examined, dbname, last_insert_id,
- insert_id, server_id, sql_text;
+ insert_id, server_id, sql_text, thread_id;
END;
IF NOT done THEN
@@ -771,7 +773,7 @@ BEGIN
INSERT INTO
`db_17876.slow_log_data`
VALUES(start_time, user_host, query_time, lock_time, rows_set, rows_examined,
- dbname, last_insert_id, insert_id, server_id, sql_text);
+ dbname, last_insert_id, insert_id, server_id, sql_text, thread_id);
END;
END IF;
END;
diff --git a/mysql-test/t/lowercase_table4.test b/mysql-test/t/lowercase_table4.test
index 3758ab62214..d13b1a16be1 100644
--- a/mysql-test/t/lowercase_table4.test
+++ b/mysql-test/t/lowercase_table4.test
@@ -72,7 +72,7 @@ CREATE TABLE `Table1`(c1 INT PRIMARY KEY) ENGINE=InnoDB;
CREATE TABLE `Table2`(c1 INT PRIMARY KEY, c2 INT) ENGINE=InnoDB;
ALTER TABLE `Table2` ADD CONSTRAINT fk1 FOREIGN KEY(c2) REFERENCES `Table1`(c1);
query_vertical SHOW CREATE TABLE `Table2`;
-query_vertical SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
+query_vertical SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS WHERE CONSTRAINT_SCHEMA='test';
DROP TABLE `Table2`;
DROP TABLE `Table1`;
@@ -101,7 +101,7 @@ CREATE TABLE Product_Order (No INT NOT NULL AUTO_INCREMENT,
query_vertical SHOW CREATE TABLE Product_Order;
query_vertical SHOW CREATE TABLE Product;
query_vertical SHOW CREATE TABLE Customer;
-query_vertical SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS;
+query_vertical SELECT * FROM INFORMATION_SCHEMA.REFERENTIAL_CONSTRAINTS WHERE CONSTRAINT_SCHEMA='test';
DROP TABLE Product_Order;
DROP TABLE Product;
DROP TABLE Customer;
diff --git a/mysql-test/t/mdl_sync.test b/mysql-test/t/mdl_sync.test
index fef8a9848ca..57eea6c4fc1 100644
--- a/mysql-test/t/mdl_sync.test
+++ b/mysql-test/t/mdl_sync.test
@@ -38,7 +38,7 @@ lock tables t2 read;
connection con1;
--echo connection: con1
-set debug_sync='mdl_upgrade_shared_lock_to_exclusive SIGNAL parked WAIT_FOR go';
+set debug_sync='mdl_upgrade_lock SIGNAL parked WAIT_FOR go';
--send alter table t1 rename t3
connection default;
@@ -110,8 +110,13 @@ select column_name from information_schema.columns where
table_schema='test' and table_name='t1';
select count(*) from t1;
insert into t1 values (1), (1);
+--echo # Check that SU lock is compatible with it. To do this use ALTER TABLE
+--echo # which will fail when constructing .frm and thus obtaining SU metadata
+--echo # lock.
+--error ER_KEY_COLUMN_DOES_NOT_EXITS
+alter table t1 add index (not_exist);
--echo # Check that SNW lock is compatible with it. To do this use ALTER TABLE
---echo # which will fail after opening the table and thus obtaining SNW metadata
+--echo # which will fail during copying the table and thus obtaining SNW metadata
--echo # lock.
--error ER_DUP_ENTRY
alter table t1 add primary key (c1);
@@ -230,8 +235,13 @@ select column_name from information_schema.columns where
table_schema='test' and table_name='t1';
select count(*) from t1;
insert into t1 values (1);
+--echo # Check that SU lock is compatible with it. To do this use ALTER TABLE
+--echo # which will fail when constructing .frm and thus obtaining SU metadata
+--echo # lock.
+--error ER_KEY_COLUMN_DOES_NOT_EXITS
+alter table t1 add index (not_exist);
--echo # Check that SNW lock is compatible with it. To do this use ALTER TABLE
---echo # which will fail after opening the table and thus obtaining SNW metadata
+--echo # which will fail during copying the table and thus obtaining SNW metadata
--echo # lock.
--error ER_DUP_ENTRY
alter table t1 add primary key (c1);
@@ -359,8 +369,13 @@ select column_name from information_schema.columns where
table_schema='test' and table_name='t1';
select count(*) from t1;
insert into t1 values (1);
+--echo # Check that SU lock is compatible with it. To do this use ALTER TABLE
+--echo # which will fail when constructing .frm and thus obtaining SU metadata
+--echo # lock.
+--error ER_KEY_COLUMN_DOES_NOT_EXITS
+alter table t1 add index (not_exist);
--echo # Check that SNW lock is compatible with it. To do this use ALTER TABLE
---echo # which will fail after opening the table and thus obtaining SNW metadata
+--echo # which will fail during copying the table and thus obtaining SNW metadata
--echo # lock.
--error ER_DUP_ENTRY
alter table t1 add primary key (c1);
@@ -477,8 +492,13 @@ select column_name from information_schema.columns where
select * from t1;
--enable_result_log
insert into t1 values (1);
+--echo # Check that SU lock is compatible with it. To do this use ALTER TABLE
+--echo # which will fail when constructing .frm and thus obtaining SU metadata
+--echo # lock.
+--error ER_KEY_COLUMN_DOES_NOT_EXITS
+alter table t1 add index (not_exist);
--echo # Check that SNW lock is not compatible with SW lock.
---echo # Again we use ALTER TABLE which fails after opening
+--echo # Again we use ALTER TABLE which fails during copying
--echo # the table to avoid upgrade of SNW -> X.
--echo # Sending:
--send alter table t1 add primary key (c1);
@@ -570,16 +590,144 @@ rename table t2 to t1;
connection default;
--echo #
--echo #
---echo # 5) Acquire SNW lock on the table. We have to use DEBUG_SYNC for
---echo # this, to prevent SNW from being immediately upgraded to X.
+--echo # 5) Acquire SU lock on the table. We have to use DEBUG_SYNC for
+--echo # this, to prevent SU from being immediately upgraded to X.
--echo #
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_opened_table SIGNAL locked WAIT_FOR finish';
+--echo # Sending:
+--send alter table t1 add primary key (c1);
+--echo #
+--echo # Switching to connection 'mdl_con1'.
+connection mdl_con1;
+set debug_sync= 'now WAIT_FOR locked';
+--echo # Check that S, SH, SR and SW locks are compatible with it.
+handler t1 open;
+handler t1 close;
+select column_name from information_schema.columns where
+ table_schema='test' and table_name='t1';
+select count(*) from t1;
+delete from t1 limit 1;
+--echo # Check that SU lock is incompatible with SU lock.
+--echo # Sending:
+--send alter table t1 add primary key (c1);
+--echo #
+--echo # Switching to connection 'mdl_con2'.
+connection mdl_con2;
+--echo # Check that the above ALTER is blocked because of SU lock.
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock" and
+ info = "alter table t1 add primary key (c1)";
+--source include/wait_condition.inc
+--echo # Unblock ALTERs.
+set debug_sync= 'now SIGNAL finish';
+--echo #
+--echo # Switching to connection 'default'.
+connection default;
+--echo # Reaping first ALTER TABLE.
+--error ER_DUP_ENTRY
+--reap
+--echo #
+--echo # Switching to connection 'mdl_con1'.
+connection mdl_con1;
+--echo # Reaping another ALTER TABLE.
+--error ER_DUP_ENTRY
+--reap
+--echo #
+--echo # Switching to connection 'default'.
+connection default;
+set debug_sync= 'alter_opened_table SIGNAL locked WAIT_FOR finish';
--echo # Sending:
--send alter table t1 add primary key (c1);
--echo #
--echo # Switching to connection 'mdl_con1'.
connection mdl_con1;
set debug_sync= 'now WAIT_FOR locked';
+--echo # Check that SNRW lock is incompatible with SU lock.
+--echo # Sending:
+--send lock table t1 write;
+--echo #
+--echo # Switching to connection 'mdl_con2'.
+connection mdl_con2;
+--echo # Check that the above LOCK TABLES is blocked because of SU lock.
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock" and
+ info = "lock table t1 write";
+--source include/wait_condition.inc
+--echo # Unblock ALTER and thus LOCK TABLES.
+set debug_sync= 'now SIGNAL finish';
+--echo #
+--echo # Switching to connection 'default'.
+connection default;
+--echo # Reaping ALTER TABLE.
+--error ER_DUP_ENTRY
+--reap
+--echo #
+--echo # Switching to connection 'mdl_con1'.
+connection mdl_con1;
+--echo # Reaping LOCK TABLES
+--reap
+insert into t1 values (1);
+unlock tables;
+--echo #
+--echo # Switching to connection 'default'.
+connection default;
+set debug_sync= 'alter_opened_table SIGNAL locked WAIT_FOR finish';
+--echo # Sending:
+--send alter table t1 add primary key (c1);
+--echo #
+--echo # Switching to connection 'mdl_con1'.
+connection mdl_con1;
+set debug_sync= 'now WAIT_FOR locked';
+--echo # Check that X lock is incompatible with SU lock.
+--echo # Sending:
+--send rename table t1 to t2;
+--echo #
+--echo # Switching to connection 'mdl_con2'.
+connection mdl_con2;
+--echo # Check that the above RENAME is blocked because of SU lock.
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table metadata lock" and
+ info = "rename table t1 to t2";
+--source include/wait_condition.inc
+--echo # Unblock ALTER and thus RENAME TABLE.
+set debug_sync= 'now SIGNAL finish';
+--echo #
+--echo # Switching to connection 'default'.
+connection default;
+--echo # Now we have ALTER TABLE with SU->SNW and RENAME TABLE with pending
+--echo # X-lock. In this case ALTER TABLE should be chosen as victim.
+--echo # Reaping ALTER TABLE.
+--error ER_LOCK_DEADLOCK
+--reap
+--echo #
+--echo # Switching to connection 'mdl_con1'.
+connection mdl_con1;
+--echo # Reaping RENAME TABLE
+--reap
+--echo # Revert back to original state of things.
+rename table t2 to t1;
+--echo #
+--echo # There is no need to check that upgrade from SNW/SNRW to X is
+--echo # blocked by presence of another SU lock because SNW/SNRW is
+--echo # incompatible with SU anyway.
+--echo #
+--echo # Switching to connection 'default'.
+connection default;
+--echo #
+--echo #
+--echo # 6) Acquire SNW lock on the table. We have to use DEBUG_SYNC for
+--echo # this, to prevent SNW from being immediately upgraded to X.
+--echo #
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
+--echo # Sending:
+--send alter table t1 add primary key (c1), lock=shared, algorithm=copy;
+--echo #
+--echo # Switching to connection 'mdl_con1'.
+connection mdl_con1;
+set debug_sync= 'now WAIT_FOR locked';
--echo # Check that S, SH and SR locks are compatible with it.
handler t1 open;
handler t1 close;
@@ -614,14 +762,14 @@ connection mdl_con1;
--echo #
--echo # Switching to connection 'default'.
connection default;
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
--echo # Sending:
---send alter table t1 add primary key (c1);
+--send alter table t1 add primary key (c1), lock=shared, algorithm=copy;
--echo #
--echo # Switching to connection 'mdl_con1'.
connection mdl_con1;
set debug_sync= 'now WAIT_FOR locked';
---echo # Check that SNW lock is incompatible with SNW lock.
+--echo # Check that SU lock is incompatible with SNW lock.
--echo # Sending:
--send alter table t1 add primary key (c1);
--echo #
@@ -648,11 +796,15 @@ connection mdl_con1;
--error ER_DUP_ENTRY
--reap
--echo #
+--echo # Note that we can't easily check SNW vs SNW locks since
+--echo # SNW is only used by ALTER TABLE after upgrading from SU
+--echo # and SU is also incompatible with SNW.
+--echo #
--echo # Switching to connection 'default'.
connection default;
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
--echo # Sending:
---send alter table t1 add primary key (c1);
+--send alter table t1 add primary key (c1), lock=shared, algorithm=copy;
--echo #
--echo # Switching to connection 'mdl_con1'.
connection mdl_con1;
@@ -687,9 +839,9 @@ unlock tables;
--echo #
--echo # Switching to connection 'default'.
connection default;
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
--echo # Sending:
---send alter table t1 add primary key (c1);
+--send alter table t1 add primary key (c1), algorithm=copy, lock=shared;
--echo #
--echo # Switching to connection 'mdl_con1'.
connection mdl_con1;
@@ -730,7 +882,7 @@ rename table t2 to t1;
connection default;
--echo #
--echo #
---echo # 6) Acquire SNRW lock on the table.
+--echo # 7) Acquire SNRW lock on the table.
--echo #
--echo #
lock table t1 write;
@@ -794,13 +946,13 @@ lock table t1 write;
--echo #
--echo # Switching to connection 'mdl_con1'.
connection mdl_con1;
---echo # Check that SNW lock is incompatible with SNRW lock.
+--echo # Check that SU lock is incompatible with SNRW lock.
--echo # Sending:
--send alter table t1 add primary key (c1);
--echo #
--echo # Switching to connection 'default'.
connection default;
---echo # Check that the above ALTER is blocked because of UNWR lock.
+--echo # Check that the above ALTER is blocked because of SNRW lock.
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table metadata lock" and
@@ -815,6 +967,10 @@ connection mdl_con1;
--error ER_DUP_ENTRY
--reap
--echo #
+--echo # Note that we can't easily check SNW vs SNRW locks since
+--echo # SNW is only used by ALTER TABLE after upgrading from SU
+--echo # and SU is also incompatible with SNRW.
+--echo #
--echo # Switching to connection 'default'.
connection default;
lock table t1 write;
@@ -879,7 +1035,7 @@ rename table t2 to t1;
connection default;
--echo #
--echo #
---echo # 7) Now do the same round of tests for X lock. We use additional
+--echo # 8) Now do the same round of tests for X lock. We use additional
--echo # table to get long-lived lock of this type.
--echo #
create table t2 (c1 int);
@@ -1083,7 +1239,7 @@ select count(*) = 1 from information_schema.processlist
where state = "Waiting for table metadata lock" and
info = "rename table t1 to t2";
--source include/wait_condition.inc
---echo # Check that SNW lock is incompatible with X lock.
+--echo # Check that SU lock is incompatible with X lock.
--echo # Sending:
--send alter table t1 add primary key (c1);
--echo #
@@ -1110,7 +1266,11 @@ connection mdl_con1;
--echo # Reaping ALTER.
--error ER_DUP_ENTRY
--reap
---echo #
+--echo #
+--echo # Note that we can't easily check SNW vs X locks since
+--echo # SNW is only used by ALTER TABLE after upgrading from SU
+--echo # and SU is also incompatible with X.
+--echo #
--echo # Switching to connection 'mdl_con2'.
connection mdl_con2;
--echo # Prepare for blocking RENAME TABLE.
@@ -1208,6 +1368,9 @@ rename table t3 to t1;
--echo # are pending. I.e. let us test rules for priorities between
--echo # different types of metadata locks.
--echo #
+--echo # Note: No tests for pending SU lock as this lock requires
+--echo # even stronger active or pending lock.
+--echo #
--echo #
--echo # Switching to connection 'mdl_con2'.
@@ -1657,6 +1820,9 @@ connection default;
--echo # transactional context. Obviously we are mostly interested
--echo # in conflicting types of locks.
--echo #
+--echo # Note: No tests for active/pending SU lock since
+--echo # ALTER TABLE is in its own transaction.
+--echo #
--echo #
--echo # 1) Let us check how various locks used within transactional
@@ -1673,9 +1839,9 @@ connection mdl_con1;
--echo # We have to use DEBUG_SYNC facility as otherwise SNW lock
--echo # will be immediately released (or upgraded to X lock).
insert into t2 values (1), (1);
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
--echo # Sending:
---send alter table t2 add primary key (c1);
+--send alter table t2 add primary key (c1), algorithm=copy, lock=shared;
--echo #
--echo # Switching to connection 'default'.
connection default;
@@ -1724,9 +1890,9 @@ select count(*) from t1;
--echo # Switching to connection 'mdl_con1'.
connection mdl_con1;
--echo # Create an active SNW lock on t1.
-set debug_sync= 'after_open_table_mdl_shared SIGNAL locked WAIT_FOR finish';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL locked WAIT_FOR finish';
--echo # Sending:
---send alter table t1 add primary key (c1);
+--send alter table t1 add primary key (c1), algorithm=copy, lock=shared;
--echo #
--echo # Switching to connection 'default'.
connection default;
@@ -2845,7 +3011,7 @@ drop tables t1, t2;
create table t1 (i int);
--echo # Ensure that ALTER waits once it has acquired SNW lock.
-set debug_sync='after_open_table_mdl_shared SIGNAL parked1 WAIT_FOR go1';
+set debug_sync='alter_table_copy_after_lock_upgrade SIGNAL parked1 WAIT_FOR go1';
--echo # Sending:
--send alter table t1 add column j int
@@ -3345,18 +3511,33 @@ drop tables if exists t1, t2;
--enable_warnings
connect (con46044, localhost, root,,);
connect (con46044_2, localhost, root,,);
+connect (con46044_3, localhost, root,,);
connection default;
create table t1 (i int);
+insert into t1 values(1);
--echo # Let us check that we won't deadlock if during filling
--echo # of I_S table we encounter conflicting metadata lock
--echo # which owner is in its turn waiting for our connection.
lock tables t1 read;
+--echo # Switching to connection 'con46044_2'.
+connection con46044_2;
+--echo # Sending:
+--send update t1 set i = 2
+
--echo # Switching to connection 'con46044'.
connection con46044;
+
+--echo # Waiting until UPDATE t1 SET ... is blocked.
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table level lock" and
+ info = "update t1 set i = 2";
+--source include/wait_condition.inc
+
--echo # Sending:
---send create table t2 select * from t1 for update;
+--send create table t2 select * from t1;
--echo # Switching to connection 'default'.
connection default;
@@ -3364,7 +3545,7 @@ connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table level lock" and
- info = "create table t2 select * from t1 for update";
+ info = "create table t2 select * from t1";
--source include/wait_condition.inc
--echo # First let us check that SHOW FIELDS/DESCRIBE doesn't
@@ -3395,6 +3576,10 @@ connection con46044;
--reap
drop table t2;
+connection con46044_2;
+--echo # Reaping UPDATE t1 statement
+--reap
+
--echo #
--echo # Let us also check that queries to I_S wait for conflicting metadata
--echo # locks to go away instead of skipping table with a warning in cases
@@ -3407,10 +3592,23 @@ drop table t2;
connection con46044_2;
lock tables t1 read;
+--echo # Switching to connection 'con46044_3'.
+connection con46044_3;
+--echo # Sending:
+send update t1 set i = 3;
+
--echo # Switching to connection 'con46044'.
connection con46044;
+
+--echo # Waiting until UPDATE t1 SET ... is blocked.
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table level lock" and
+ info = "update t1 set i = 3";
+--source include/wait_condition.inc
+
--echo # Sending:
---send create table t2 select * from t1 for update;
+--send create table t2 select * from t1;
--echo # Switching to connection 'default'.
connection default;
@@ -3418,7 +3616,7 @@ connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table level lock" and
- info = "create table t2 select * from t1 for update";
+ info = "create table t2 select * from t1";
--source include/wait_condition.inc
--echo # Let us check that SHOW FIELDS/DESCRIBE gets blocked.
@@ -3447,14 +3645,31 @@ connection default;
--reap
drop table t2;
+connection con46044_3;
+--echo # Reaping UPDATE t1 statement
+--reap
+
--echo # Switching to connection 'con46044_2'.
connection con46044_2;
lock tables t1 read;
+--echo # Switching to connection 'con46044_3'.
+connection con46044_3;
+--echo # Sending:
+--send update t1 set i = 4
+
--echo # Switching to connection 'con46044'.
connection con46044;
+
+--echo # Waiting until UPDATE t1 SET ... is blocked.
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table level lock" and
+ info = "update t1 set i = 4";
+--source include/wait_condition.inc
+
--echo # Sending:
---send create table t2 select * from t1 for update;
+--send create table t2 select * from t1;
--echo # Switching to connection 'default'.
connection default;
@@ -3462,7 +3677,7 @@ connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table level lock" and
- info = "create table t2 select * from t1 for update";
+ info = "create table t2 select * from t1";
--source include/wait_condition.inc
--echo # Check that I_S query which reads only .FRMs gets blocked.
@@ -3491,14 +3706,31 @@ connection default;
--reap
drop table t2;
+connection con46044_3;
+--echo # Reaping UPDATE t1 statement
+--reap
+
--echo # Switching to connection 'con46044_2'.
connection con46044_2;
lock tables t1 read;
+--echo # Switching to connection 'con46044_3'.
+connection con46044_3;
+--echo # Sending:
+--send update t1 set i = 5
+
--echo # Switching to connection 'con46044'.
connection con46044;
+
+--echo # Waiting until UPDATE t1 SET ... is blocked.
+let $wait_condition=
+ select count(*) = 1 from information_schema.processlist
+ where state = "Waiting for table level lock" and
+ info = "update t1 set i = 5";
+--source include/wait_condition.inc
+
--echo # Sending:
---send create table t2 select * from t1 for update;
+--send create table t2 select * from t1;
--echo # Switching to connection 'default'.
connection default;
@@ -3506,7 +3738,7 @@ connection default;
let $wait_condition=
select count(*) = 1 from information_schema.processlist
where state = "Waiting for table level lock" and
- info = "create table t2 select * from t1 for update";
+ info = "create table t2 select * from t1";
--source include/wait_condition.inc
--echo # Finally, check that I_S query which does full-blown table open
@@ -3536,11 +3768,16 @@ connection default;
--reap
drop table t2;
+connection con46044_3;
+--echo # Reaping UPDATE t1 statement
+--reap
+
--echo # Switching to connection 'default'.
connection default;
--echo # Clean-up.
disconnect con46044;
disconnect con46044_2;
+disconnect con46044_3;
drop table t1;
@@ -3563,7 +3800,7 @@ select * from t1 where c2 = 3;
--echo #
--echo # Switching to connection 'con46273'.
connection con46273;
-set debug_sync='after_lock_tables_takes_lock SIGNAL alter_table_locked WAIT_FOR alter_go';
+set debug_sync='alter_table_copy_after_lock_upgrade SIGNAL alter_table_locked WAIT_FOR alter_go';
--send alter table t1 add column e int, rename to t2;
--echo #
@@ -3826,9 +4063,9 @@ create table t1 (i int) engine=InnoDB;
--echo # Switching to connection 'con50913_1'.
connection con50913_1;
-set debug_sync= 'thr_multi_lock_after_thr_lock SIGNAL parked WAIT_FOR go';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL parked WAIT_FOR go';
--echo # Sending:
---send alter table t1 add column j int
+--send alter table t1 add column j int, ALGORITHM=COPY
--echo # Switching to connection 'default'.
connection default;
@@ -3897,7 +4134,7 @@ select * from t1;
connection default;
--echo # Start ALTER TABLE which will acquire SNW lock and
--echo # table lock and get blocked on sync point.
-set debug_sync= 'thr_multi_lock_after_thr_lock SIGNAL parked WAIT_FOR go';
+set debug_sync= 'alter_table_copy_after_lock_upgrade SIGNAL parked WAIT_FOR go';
--echo # Sending:
--send alter table t1 add column j int
@@ -4562,7 +4799,9 @@ connect(con2, localhost, root);
--echo # Connection con1
connection con1;
-SET DEBUG_SYNC= 'mdl_upgrade_shared_lock_to_exclusive SIGNAL upgrade WAIT_FOR continue';
+--echo # We need EXECUTE 2 since ALTER TABLE does SU => SNW => X and we want
+--echo # to stop at the second upgrade.
+SET DEBUG_SYNC= 'mdl_upgrade_lock SIGNAL upgrade WAIT_FOR continue EXECUTE 2';
--echo # Sending:
--send ALTER TABLE m1 engine=MERGE UNION=(t2, t1)
@@ -4570,6 +4809,8 @@ SET DEBUG_SYNC= 'mdl_upgrade_shared_lock_to_exclusive SIGNAL upgrade WAIT_FOR co
connection con2;
--echo # Waiting for ALTER TABLE to try lock upgrade
SET DEBUG_SYNC= 'now WAIT_FOR upgrade';
+SET DEBUG_SYNC= 'now SIGNAL continue';
+SET DEBUG_SYNC= 'now WAIT_FOR upgrade';
--echo # Sending:
--send DELETE FROM t2 WHERE a = 3
diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test
index bad59ff09c3..713b3ed7347 100644
--- a/mysql-test/t/partition.test
+++ b/mysql-test/t/partition.test
@@ -89,6 +89,16 @@ AND A.c = 343;
DROP TABLE t1;
--echo #
+--echo # Bug#59503: explain extended crash in get_mm_leaf
+--echo #
+CREATE TABLE t1 (a VARCHAR(51) CHARACTER SET latin1)
+ENGINE=MyISAM
+PARTITION BY KEY (a) PARTITIONS 1;
+INSERT INTO t1 VALUES ('a'),('b'),('c');
+EXPLAIN EXTENDED SELECT 1 FROM t1 WHERE a > 1;
+DROP TABLE t1;
+
+--echo #
--echo # Bug#57778: failed primary key add to partitioned innodb table
--echo # inconsistent and crashes
--echo #
@@ -303,6 +313,31 @@ DROP TABLE t1;
#
# Bug#35765: ALTER TABLE produces wrong error when non-existent storage engine
# used
+SET sql_mode=no_engine_substitution;
+--error ER_UNKNOWN_STORAGE_ENGINE
+CREATE TABLE t1 (a INT)
+ENGINE=NonExistentEngine;
+--error ER_UNKNOWN_STORAGE_ENGINE
+CREATE TABLE t1 (a INT)
+ENGINE=NonExistentEngine
+PARTITION BY HASH (a);
+CREATE TABLE t1 (a INT)
+ENGINE=Memory;
+--error ER_UNKNOWN_STORAGE_ENGINE
+ALTER TABLE t1 ENGINE=NonExistentEngine;
+# OK to only specify one partitions engine, since it is already assigned at
+# table level (after create, it is specified on all levels and all parts).
+--error ER_UNKNOWN_STORAGE_ENGINE
+ALTER TABLE t1
+PARTITION BY HASH (a)
+(PARTITION p0 ENGINE=Memory,
+ PARTITION p1 ENGINE=NonExistentEngine);
+--error ER_UNKNOWN_STORAGE_ENGINE
+ALTER TABLE t1 ENGINE=NonExistentEngine;
+SHOW CREATE TABLE t1;
+DROP TABLE t1;
+
+SET sql_mode='';
CREATE TABLE t1 (a INT)
ENGINE=NonExistentEngine;
DROP TABLE t1;
@@ -322,6 +357,7 @@ PARTITION BY HASH (a)
ALTER TABLE t1 ENGINE=NonExistentEngine;
SHOW CREATE TABLE t1;
DROP TABLE t1;
+SET sql_mode=DEFAULT;
#
# Bug#40494: Crash MYSQL server crashes on range access with partitioning
@@ -990,13 +1026,13 @@ drop table t1;
#
# Bug #16775: Wrong engine type stored for subpartition
#
-set session storage_engine= 'memory';
+set session default_storage_engine= 'memory';
create table t1 (f_int1 int(11) default null) engine = memory
partition by range (f_int1) subpartition by hash (f_int1)
(partition part1 values less than (1000)
(subpartition subpart11 engine = memory));
drop table t1;
-set session storage_engine='myisam';
+set session default_storage_engine='myisam';
#
# Bug #16782: Crash using REPLACE on table with primary key
@@ -1840,8 +1876,7 @@ WHERE t1.id IN (
SELECT distinct id
FROM t4
WHERE taken BETWEEN @f_date AND date_add(@t_date, INTERVAL 1 DAY))
-ORDER BY t1.id
-;
+ORDER BY t1.id;
drop table t1, t2, t4;
diff --git a/mysql-test/t/partition_binlog.test b/mysql-test/t/partition_binlog.test
index 9869be75759..d6986c86ebe 100644
--- a/mysql-test/t/partition_binlog.test
+++ b/mysql-test/t/partition_binlog.test
@@ -20,7 +20,7 @@ INSERT INTO t1 VALUES (1), (10), (100), (1000);
--let $binlog_file=query_get_value(SHOW MASTER STATUS, File, 1)
--let $binlog_start=query_get_value(SHOW MASTER STATUS, Position, 1)
---error ER_WRONG_PARTITION_NAME
+--error ER_UNKNOWN_PARTITION
ALTER TABLE t1 TRUNCATE PARTITION p1;
--error ER_DROP_PARTITION_NON_EXISTENT
ALTER TABLE t1 DROP PARTITION p1;
diff --git a/mysql-test/t/partition_debug_sync.test b/mysql-test/t/partition_debug_sync.test
index bcec5503e6f..3ca21c2185a 100644
--- a/mysql-test/t/partition_debug_sync.test
+++ b/mysql-test/t/partition_debug_sync.test
@@ -5,6 +5,8 @@
#
--source include/have_partition.inc
--source include/have_debug_sync.inc
+--source include/have_debug.inc
+--source include/have_innodb.inc
--disable_warnings
DROP TABLE IF EXISTS t1, t2;
@@ -16,7 +18,9 @@ SET DEBUG_SYNC= 'RESET';
--echo # Test when remove partitioning is done while drop table is waiting
--echo # for the table.
--echo # After MDL was introduced, there is no longer any race, so test is done
---echo # by adding a small sleep to verify that the delete waits.
+--echo # by adding a small sleep to verify that the delete waits. This happens
+--echo # only until ALTER tries to upgrade its MDL lock, which ends up in MDL
+--echo # deadlock which is correctly reported.
connect(con1, localhost, root,,);
--echo # Con 1
SET DEBUG_SYNC= 'RESET';
@@ -31,16 +35,17 @@ ENGINE = MYISAM
PARTITION p2 VALUES LESS THAN (100),
PARTITION p3 VALUES LESS THAN MAXVALUE ) */;
SET DEBUG_SYNC= 'alter_table_before_create_table_no_lock SIGNAL removing_partitioning WAIT_FOR waiting_for_alter';
-SET DEBUG_SYNC= 'alter_table_before_main_binlog SIGNAL partitioning_removed';
+SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL waiting_for_upgrade';
--send ALTER TABLE t1 REMOVE PARTITIONING
connection default;
--echo # Con default
SET DEBUG_SYNC= 'now WAIT_FOR removing_partitioning';
SET DEBUG_SYNC= 'mdl_acquire_lock_wait SIGNAL waiting_for_alter';
-SET DEBUG_SYNC= 'rm_table_no_locks_before_delete_table WAIT_FOR partitioning_removed';
+SET DEBUG_SYNC= 'rm_table_no_locks_before_delete_table WAIT_FOR waiting_for_upgrade';
DROP TABLE IF EXISTS t1;
--echo # Con 1
connection con1;
+--error ER_LOCK_DEADLOCK
--reap
connection default;
SET DEBUG_SYNC= 'RESET';
@@ -77,8 +82,54 @@ connection con1;
--reap
SET DEBUG_SYNC= 'RESET';
disconnect con1;
+--source include/wait_until_disconnected.inc
connection default;
--echo # Con default
SET DEBUG_SYNC= 'RESET';
--echo End of 5.1 tests
+
+--echo #
+--echo # Coverage test for non pruned ha_partition::store_lock()
+--echo #
+CREATE TABLE t1 (a int) ENGINE = InnoDB;
+CREATE TABLE t2 (a int PRIMARY KEY)
+ENGINE = InnoDB PARTITION BY HASH (a) PARTITIONS 3;
+
+HANDLER t1 OPEN;
+
+--echo # Con1
+connect (con1, localhost, root,,);
+
+LOCK TABLES t1 WRITE, t2 READ;
+
+--echo # Default
+connection default;
+
+SET DEBUG_SYNC="wait_for_lock SIGNAL locking";
+send INSERT INTO t2 VALUES (1), (2), (3);
+
+--echo # Con1
+connection con1;
+SET DEBUG_SYNC="now WAIT_FOR locking";
+
+send ALTER TABLE t1 ADD COLUMN b int;
+
+--echo # Default
+connection default;
+--error ER_LOCK_ABORTED
+--reap
+
+SELECT 1;
+
+--echo # Con1
+connection con1;
+--reap
+
+UNLOCK TABLES;
+--disconnect con1
+
+--echo # Default
+connection default;
+
+DROP TABLE t1, t2;
diff --git a/mysql-test/t/partition_innodb.test b/mysql-test/t/partition_innodb.test
index ea8faec0d51..7d903fa1165 100644
--- a/mysql-test/t/partition_innodb.test
+++ b/mysql-test/t/partition_innodb.test
@@ -41,6 +41,17 @@ EXPLAIN SELECT b FROM t1 WHERE b between 'L' and 'N' AND a > -100;
DROP TABLE t1;
--echo #
+--echo # Bug#13007154: Crash in keys_to_use_for_scanning with ORDER BY
+--echo # and PARTITIONING
+--echo #
+CREATE TABLE t1 (a INT, KEY(a))
+ENGINE = InnoDB
+PARTITION BY KEY (a) PARTITIONS 1;
+SELECT 1 FROM t1 WHERE a > (SELECT LAST_INSERT_ID() FROM t1 LIMIT 0)
+ORDER BY a;
+DROP TABLE t1;
+
+--echo #
--echo # Bug#56287: crash when using Partition datetime in sub in query
--echo #
@@ -71,7 +82,7 @@ DROP TABLE t1;
--echo # SELECT is not detected
--echo #
-SET @old_innodb_thread_concurrency:= @@innodb_thread_concurrency;
+SET @old_innodb_thread_concurrency := @@innodb_thread_concurrency;
SET @old_innodb_thread_sleep_delay := @@innodb_thread_sleep_delay;
SET GLOBAL innodb_thread_concurrency = 1;
diff --git a/mysql-test/t/partition_mgm_err.test b/mysql-test/t/partition_mgm_err.test
index f921fa8ebca..0987c427fc7 100644
--- a/mysql-test/t/partition_mgm_err.test
+++ b/mysql-test/t/partition_mgm_err.test
@@ -147,7 +147,7 @@ PARTITION BY KEY (a)
ALTER TABLE t1 ADD PARTITION PARTITIONS 0;
--error ER_TOO_MANY_PARTITIONS_ERROR
-ALTER TABLE t1 ADD PARTITION PARTITIONS 1024;
+ALTER TABLE t1 ADD PARTITION PARTITIONS 8192;
--error ER_ONLY_ON_RANGE_LIST_PARTITION
ALTER TABLE t1 DROP PARTITION x0;
diff --git a/mysql-test/t/partition_myisam.test b/mysql-test/t/partition_myisam.test
index 49c5d793169..a33b9e19fbf 100644
--- a/mysql-test/t/partition_myisam.test
+++ b/mysql-test/t/partition_myisam.test
@@ -1,5 +1,4 @@
--- source include/have_partition.inc
-
+--source include/have_partition.inc
--disable_warnings
DROP TABLE IF EXISTS t1, t2;
--enable_warnings
@@ -10,53 +9,88 @@ DROP TABLE IF EXISTS t1, t2;
let $MYSQLD_DATADIR= `SELECT @@datadir`;
-
--echo #
---echo # Bug#50036: Inconsistent errors when using TIMESTAMP
---echo # columns/expressions
-
---echo # Added test with existing TIMESTAMP partitioning (when it was allowed).
-CREATE TABLE t1 (a TIMESTAMP)
-ENGINE = MyISAM
-PARTITION BY HASH (UNIX_TIMESTAMP(a));
-INSERT INTO t1 VALUES ('2000-01-02 03:04:05');
---sorted_result
-SELECT * FROM t1;
-FLUSH TABLES;
---echo # replacing t1.frm with TO_DAYS(a) which was allowed earlier.
---remove_file $MYSQLD_DATADIR/test/t1.frm
---copy_file std_data/parts/t1TIMESTAMP.frm $MYSQLD_DATADIR/test/t1.frm
---echo # Disable warnings, since the result would differ when running with
---echo # --ps-protocol (only for the 'SELECT * FROM t1' statement).
---disable_warnings
---sorted_result
-SELECT * FROM t1;
---enable_warnings
---replace_result MyISAM <curr_engine> InnoDB <curr_engine>
-SHOW CREATE TABLE t1;
-INSERT INTO t1 VALUES ('2001-02-03 04:05:06');
---sorted_result
-SELECT * FROM t1;
-ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
---error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR
-ALTER TABLE t1
-PARTITION BY RANGE (TO_DAYS(a))
-(PARTITION p0 VALUES LESS THAN (10000),
- PARTITION p1 VALUES LESS THAN (MAXVALUE));
+--echo # BUG#11933226 - 60681: CHECKSUM TABLE RETURNS 0 FOR PARTITIONED TABLE
+--echo #
+CREATE TABLE t1 (
+ i INT
+)
+ENGINE=MyISAM
+PARTITION BY RANGE (i)
+(PARTITION p3 VALUES LESS THAN (3),
+ PARTITION p5 VALUES LESS THAN (5),
+ PARTITION pMax VALUES LESS THAN MAXVALUE);
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6);
+CHECKSUM TABLE t1;
+ALTER TABLE t1 CHECKSUM = 1;
+CHECKSUM TABLE t1 EXTENDED;
+--echo # Before patch this returned 0!
+CHECKSUM TABLE t1;
SHOW CREATE TABLE t1;
-CREATE TABLE t2 LIKE t1;
-SHOW CREATE TABLE t2;
-DROP TABLE t2;
-CREATE TABLE t2 SELECT * FROM t1;
-DROP TABLE t2;
-ALTER TABLE t1 PARTITION BY HASH (UNIX_TIMESTAMP(a));
+DROP TABLE t1;
+
+--echo # Same test without partitioning
+CREATE TABLE t1 (
+ i INT
+) ENGINE=MyISAM;
SHOW CREATE TABLE t1;
-ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
+INSERT INTO t1 VALUES (1), (2), (3), (4), (5), (6);
+CHECKSUM TABLE t1;
+ALTER TABLE t1 CHECKSUM = 1;
+CHECKSUM TABLE t1 EXTENDED;
+CHECKSUM TABLE t1;
SHOW CREATE TABLE t1;
---sorted_result
-SELECT * FROM t1;
DROP TABLE t1;
+#
+# Disabled by WL#946: binary format for timestamp column is not compatible.
+# So the trick with replacing FRM file does not work any more.
+#--echo #
+#--echo # Bug#50036: Inconsistent errors when using TIMESTAMP
+#--echo # columns/expressions
+#
+#--echo # Added test with existing TIMESTAMP partitioning (when it was allowed).
+#CREATE TABLE t1 (a TIMESTAMP)
+#ENGINE = MyISAM
+#PARTITION BY HASH (UNIX_TIMESTAMP(a));
+#INSERT INTO t1 VALUES ('2000-01-02 03:04:05');
+#--sorted_result
+#SELECT * FROM t1;
+#FLUSH TABLES;
+#--echo # replacing t1.frm with TO_DAYS(a) which was allowed earlier.
+#--remove_file $MYSQLD_DATADIR/test/t1.frm
+#--copy_file std_data/parts/t1TIMESTAMP.frm $MYSQLD_DATADIR/test/t1.frm
+#--echo # Disable warnings, since the result would differ when running with
+#--echo # --ps-protocol (only for the 'SELECT * FROM t1' statement).
+#--disable_warnings
+#--sorted_result
+#SELECT * FROM t1;
+#--enable_warnings
+#--replace_result MyISAM <curr_engine> InnoDB <curr_engine>
+#SHOW CREATE TABLE t1;
+#INSERT INTO t1 VALUES ('2001-02-03 04:05:06');
+#--sorted_result
+#SELECT * FROM t1;
+#SELECT a, hex(weight_string(a)) FROM t1;
+#ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
+#--error ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR
+#ALTER TABLE t1
+#PARTITION BY RANGE (TO_DAYS(a))
+#(PARTITION p0 VALUES LESS THAN (10000),
+# PARTITION p1 VALUES LESS THAN (MAXVALUE));
+#SHOW CREATE TABLE t1;
+#CREATE TABLE t2 LIKE t1;
+#SHOW CREATE TABLE t2;
+#DROP TABLE t2;
+#CREATE TABLE t2 SELECT * FROM t1;
+#DROP TABLE t2;
+#ALTER TABLE t1 PARTITION BY HASH (UNIX_TIMESTAMP(a));
+#SHOW CREATE TABLE t1;
+#ALTER TABLE t1 ADD PARTITION PARTITIONS 2;
+#SHOW CREATE TABLE t1;
+#--sorted_result
+#SELECT * FROM t1;
+#DROP TABLE t1;
--echo #
--echo # Bug#31931: Mix of handlers error message
@@ -181,3 +215,18 @@ PARTITION BY RANGE (a)
PARTITION pMax VALUES LESS THAN MAXVALUE);
INSERT INTO t1 VALUES (1, "Partition p1, first row");
DROP TABLE t1;
+--echo #
+--echo # bug#11760213-52599: ALTER TABLE REMOVE PARTITIONING ON NON-PARTITIONED
+--echo # TABLE CORRUPTS MYISAM
+--disable_warnings
+DROP TABLE if exists `t1`;
+--enable_warnings
+CREATE TABLE `t1`(`a` INT)ENGINE=myisam;
+ALTER TABLE `t1` ADD COLUMN `b` INT;
+CREATE UNIQUE INDEX `i1` ON `t1`(`b`);
+CREATE UNIQUE INDEX `i2` ON `t1`(`a`);
+ALTER TABLE `t1` ADD PRIMARY KEY (`a`);
+--error ER_PARTITION_MGMT_ON_NONPARTITIONED
+ALTER TABLE `t1` REMOVE PARTITIONING;
+CHECK TABLE `t1` EXTENDED;
+DROP TABLE t1;
diff --git a/mysql-test/t/partition_pruning.test b/mysql-test/t/partition_pruning.test
index 1c8a4d254a8..e7e764ce138 100644
--- a/mysql-test/t/partition_pruning.test
+++ b/mysql-test/t/partition_pruning.test
@@ -622,7 +622,6 @@ insert into t3 values (5),(15);
explain partitions select * from t3 where a=11;
explain partitions select * from t3 where a=10;
explain partitions select * from t3 where a=20;
-
explain partitions select * from t3 where a=30;
# LIST(expr) partitioning
diff --git a/mysql-test/t/partition_truncate.test b/mysql-test/t/partition_truncate.test
index 165213d204c..62ed548808f 100644
--- a/mysql-test/t/partition_truncate.test
+++ b/mysql-test/t/partition_truncate.test
@@ -11,7 +11,7 @@ partition by list (a)
(partition p1 values in (0));
--error ER_WRONG_PARTITION_NAME
alter table t1 truncate partition p1,p1;
---error ER_WRONG_PARTITION_NAME
+--error ER_UNKNOWN_PARTITION
alter table t1 truncate partition p0;
drop table t1;
diff --git a/mysql-test/t/query_cache.test b/mysql-test/t/query_cache.test
index 6e68b57bb3c..a8a71dcf8b3 100644
--- a/mysql-test/t/query_cache.test
+++ b/mysql-test/t/query_cache.test
@@ -1628,6 +1628,28 @@ DROP TABLE t1;
SET GLOBAL query_cache_size= @qc;
--echo #
+--echo End of 5.5 tests
+
+--echo #
+--echo # MDEV-617 LP:671189 - Query cache is not used for tables or
+--echo # databases with dots in their names
+--echo #
+CREATE DATABASE `foo.bar`;
+use `foo.bar`;
+flush status;
+CREATE TABLE moocow (a int);
+INSERT INTO moocow VALUES (1), (2), (3);
+SHOW STATUS LIKE 'Qcache_inserts';
+SELECT * FROM moocow;
+SHOW STATUS LIKE 'Qcache_inserts';
+SHOW STATUS LIKE 'Qcache_hits';
+SELECT * FROM moocow;
+SHOW STATUS LIKE 'Qcache_hits';
+use test;
+drop database `foo.bar`;
+
+--echo End of 10.0 tests
+
--echo restore defaults
SET GLOBAL query_cache_type= default;
SET GLOBAL query_cache_size= default;
diff --git a/mysql-test/t/signal.test b/mysql-test/t/signal.test
index 13a0db2029b..31bc7bc9633 100644
--- a/mysql-test/t/signal.test
+++ b/mysql-test/t/signal.test
@@ -1551,15 +1551,24 @@ drop procedure test_signal $$
--echo # Test where SIGNAL can be used
--echo #
+--echo
+--echo # RETURN statement clears Diagnostics Area, thus
+--echo # the warnings raised in a stored function are not
+--echo # visible outsidef the stored function. So, we're using
+--echo # @@warning_count variable to check that SIGNAL succeeded.
+--echo
+
create function test_signal_func() returns integer
begin
+ DECLARE v INT;
DECLARE warn CONDITION FOR SQLSTATE "01XXX";
SIGNAL warn SET
MESSAGE_TEXT = "This function SIGNAL a warning",
MYSQL_ERRNO = 1012;
- return 5;
+ SELECT @@warning_count INTO v;
+ return v;
end $$
select test_signal_func() $$
diff --git a/mysql-test/t/sp-bugs.test b/mysql-test/t/sp-bugs.test
index 3ab1689e8b2..1ec154f1c69 100644
--- a/mysql-test/t/sp-bugs.test
+++ b/mysql-test/t/sp-bugs.test
@@ -167,6 +167,15 @@ USE test;
--echo End of 5.1 tests
--echo #
+--echo # BUG#13489996 valgrind:conditional jump or move depends on
+--echo # uninitialised values-field_blob
+--echo #
+
+CREATE FUNCTION sf() RETURNS BLOB RETURN "";
+SELECT sf();
+DROP FUNCTION sf;
+
+--echo #
--echo # Bug#11763507 - 56224: FUNCTION NAME IS CASE-SENSITIVE
--echo #
SET @@SQL_MODE = '';
@@ -228,3 +237,4 @@ DROP PROCEDURE testp_bug11763507;
DROP FUNCTION testf_bug11763507;
--echo #END OF BUG#11763507 test.
+
diff --git a/mysql-test/t/sp-error.test b/mysql-test/t/sp-error.test
index 063b30c01cb..711e639191e 100644
--- a/mysql-test/t/sp-error.test
+++ b/mysql-test/t/sp-error.test
@@ -2881,3 +2881,973 @@ SHOW WARNINGS;
DROP TABLE t1;
DROP TABLE t2;
DROP PROCEDURE p1;
+
+--echo
+--echo ###################################################################
+--echo # Tests for the following bugs:
+--echo # - Bug#11763171: 55852 - Possibly inappropriate handler activation.
+--echo # - Bug#11749343: 38806 - Wrong scope for SQL HANDLERS in SP.
+--echo ###################################################################
+--echo
+
+#
+# Structure of SQL-block:
+# BEGIN
+# <Handler declaration block>
+# <Statement block>
+# END
+#
+# Scope of Handler-decl-block is Statement-block.
+# I.e. SQL-conditions thrown in the Handler-decl-block can not be handled by
+# the same block, only by outer SQL-blocks.
+#
+# This rule is recursive, i.e. if a Handler-decl-block has nested SQL-blocks,
+# the SQL-conditions from those nested blocks can not be handled by the this
+# Handler-decl-block, only by outer SQL-blocks.
+#
+
+delimiter |;
+
+--echo
+--echo # -- Check that SQL-conditions thrown by Statement-blocks are
+--echo # -- handled by Handler-decl blocks properly.
+--echo
+
+CREATE PROCEDURE p1()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H2' AS HandlerId;
+
+ SIGNAL SQLSTATE '01000'; # Should be handled by H2.
+END|
+
+--echo
+CALL p1()|
+
+--echo
+--echo # -- Check that SQL-conditions thrown by Statement-blocks are
+--echo # -- handled by Handler-decl blocks properly in case of nested
+--echo # -- SQL-blocks.
+--echo
+
+CREATE PROCEDURE p2()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H2' AS HandlerId;
+
+ BEGIN
+
+ SELECT 'B1' AS BlockId;
+ BEGIN
+
+ SELECT 'B2' AS BlockId;
+ BEGIN
+ SELECT 'B3' AS BlockId;
+ SIGNAL SQLSTATE '01000'; # Should be handled by H2.
+ END;
+
+ END;
+
+ END;
+
+END|
+
+--echo
+CALL p2()|
+
+--echo
+--echo # -- Check SQL-handler resolution rules.
+--echo
+
+CREATE PROCEDURE p3()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H3' AS HandlerId;
+
+ SIGNAL SQLSTATE '01000'; # Should be handled by H3.
+END|
+
+--echo
+CALL p3()|
+--echo
+
+CREATE PROCEDURE p4()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H3' AS HandlerId;
+
+ SIGNAL SQLSTATE '01000'; # Should be handled by H2.
+END|
+
+--echo
+CALL p4()|
+--echo
+
+CREATE PROCEDURE p5()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H2' AS HandlerId;
+
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H3' AS HandlerId;
+
+ SIGNAL SQLSTATE '01000'; # Should be handled by H3.
+ END;
+END|
+
+--echo
+CALL p5()|
+
+--echo
+--echo # -- Check that handlers don't handle its own exceptions.
+--echo
+
+CREATE PROCEDURE p6()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+ SELECT 'H1' AS HandlerId;
+ SIGNAL SQLSTATE 'HY000'; # Should *not* be handled by H1.
+ END;
+
+ SELECT 'S1' AS SignalId;
+ SIGNAL SQLSTATE 'HY000'; # Should be handled by H1.
+END|
+
+--echo
+--error ER_SIGNAL_EXCEPTION
+CALL p6()|
+
+--echo
+--echo # -- Check that handlers don't handle its own warnings.
+--echo
+
+CREATE PROCEDURE p7()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ SELECT 'H1' AS HandlerId;
+ SIGNAL SQLSTATE '01000'; # Should *not* be handled by H1.
+ END;
+
+ SELECT 'S1' AS SignalId;
+ SIGNAL SQLSTATE '01000'; # Should be handled by H1.
+END|
+
+--echo
+CALL p7()|
+
+--echo
+--echo # -- Check that conditions for handlers are not handled by the handlers
+--echo # -- from the same block.
+--echo
+
+CREATE PROCEDURE p8()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+ SELECT 'H2' AS HandlerId;
+ SIGNAL SQLSTATE '01000'; # Should *not* be handled by H1.
+ END;
+
+ SELECT 'S1' AS SignalId;
+ SIGNAL SQLSTATE 'HY000'; # Should be handled by H2.
+END|
+
+--echo
+CALL p8()|
+
+--echo
+--echo # -- Check that conditions for handlers are not handled by the handlers
+--echo # -- from the same block even if they are thrown deep down the stack.
+--echo
+
+CREATE PROCEDURE p9()
+BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H1:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H1:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H2:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H2:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H3:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H3:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H4:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H4:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H5:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H5:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H6:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H6:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+ SELECT 'H2' AS HandlerId;
+ SIGNAL SQLSTATE '01000'; # Should *not* be handled by H1.
+ END;
+
+ SELECT 'S6' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+ END;
+
+ SELECT 'S5' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S4' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S3' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S2' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S1' AS SignalId;
+ SIGNAL SQLSTATE 'HY000'; # Should be handled by H2.
+
+END|
+
+--echo
+CALL p9()|
+
+--echo
+--echo # -- Check that handlers are choosen properly in case of deep stack and
+--echo # -- nested SQL-blocks.
+--echo
+
+CREATE PROCEDURE p10()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H2' AS HandlerId;
+
+ BEGIN
+ BEGIN
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H1:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H1:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H2:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H2:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H3:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H3:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H4:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H4:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H5:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H5:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000'
+ SELECT 'Wrong:H6:1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'Wrong:H6:2' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ BEGIN
+ SELECT 'H2' AS HandlerId;
+ SIGNAL SQLSTATE '01000'; # Should be handled by H1.
+ END;
+
+ SELECT 'S6' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+ END;
+
+ SELECT 'S5' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S4' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S3' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S2' AS SignalId;
+ SIGNAL SQLSTATE 'HY000';
+
+ END;
+
+ SELECT 'S1' AS SignalId;
+ SIGNAL SQLSTATE 'HY000'; # Should be handled by H2.
+
+ END;
+ END;
+ END;
+END|
+
+--echo
+CALL p10()|
+
+--echo
+--echo # -- Test stored procedure from Peter's mail.
+--echo
+
+CREATE PROCEDURE p11()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ SELECT 'H1' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H2' AS HandlerId;
+
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01000', 1249
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
+ SELECT 'H3' AS HandlerId;
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ SELECT 'H4' AS HandlerId;
+
+ BEGIN
+ SELECT 'H5' AS HandlerId;
+
+ SELECT 'S3' AS SignalId;
+ SIGNAL SQLSTATE 'HY000'; # H3
+
+ SELECT 'S4' AS SignalId;
+ SIGNAL SQLSTATE '22003'; # H3
+
+ SELECT 'S5' AS SignalId;
+ SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1249; # H4
+ END;
+ END;
+
+ SELECT 'S6' AS SignalId;
+ SIGNAL SQLSTATE 'HY000'; # H1
+
+ SELECT 'S7' AS SignalId;
+ SIGNAL SQLSTATE '22003'; # H1
+
+ SELECT 'S8' AS SignalId;
+ SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1249; # H5
+ END;
+
+ SELECT 'S1' AS SignalId;
+ SIGNAL SQLSTATE 'HY000'; # H1
+
+ SELECT 'S2' AS SignalId;
+ SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1249; # H2
+END|
+
+--echo
+CALL p11()|
+
+--echo
+--echo # -- Check that runtime stack-trace can be deeper than parsing-time one.
+--echo
+
+CREATE PROCEDURE p12()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01001'
+ BEGIN
+ SELECT 'H1:5' AS HandlerId;
+ SIGNAL SQLSTATE '01002';
+ END;
+ SELECT 'H1:4' AS HandlerId;
+ SIGNAL SQLSTATE '01001';
+ END;
+ SELECT 'H1:3' AS HandlerId;
+ SIGNAL SQLSTATE '01001';
+ END;
+ SELECT 'H1:2' AS HandlerId;
+ SIGNAL SQLSTATE '01001';
+ END;
+ SELECT 'H1:1' AS HandlerId;
+ SIGNAL SQLSTATE '01001';
+ END;
+
+ #########################################################
+
+ DECLARE CONTINUE HANDLER FOR SQLSTATE '01002'
+ SELECT 'OK' AS Msg;
+
+ #########################################################
+
+ BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ SELECT 'H2:5' AS HandlerId;
+ SIGNAL SQLSTATE '01001';
+ END;
+ SELECT 'H2:4' AS HandlerId;
+ SIGNAL SQLSTATE '01000';
+ END;
+ SELECT 'H2:3' AS HandlerId;
+ SIGNAL SQLSTATE '01000';
+ END;
+ SELECT 'H2:2' AS HandlerId;
+ SIGNAL SQLSTATE '01000';
+ END;
+ SELECT 'H2:1' AS HandlerId;
+ SIGNAL SQLSTATE '01000';
+ END;
+
+ #######################################################
+
+ SELECT 'Throw 01000' AS Msg;
+ SIGNAL SQLSTATE '01000';
+ END;
+
+END|
+
+--echo
+CALL p12()|
+
+--echo
+--echo # -- Check that handler-call-frames are removed properly for EXIT
+--echo # -- handlers.
+--echo
+
+CREATE PROCEDURE p13()
+BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING
+ BEGIN
+ DECLARE EXIT HANDLER FOR SQLWARNING
+ BEGIN
+ SELECT 'EXIT handler 3' AS Msg;
+ END;
+
+ SELECT 'CONTINUE handler 2: 1' AS Msg;
+ SIGNAL SQLSTATE '01000';
+ SELECT 'CONTINUE handler 2: 2' AS Msg;
+ END;
+
+ SELECT 'CONTINUE handler 1: 1' AS Msg;
+ SIGNAL SQLSTATE '01000';
+ SELECT 'CONTINUE handler 1: 2' AS Msg;
+ END;
+
+ SELECT 'Throw 01000' AS Msg;
+ SIGNAL SQLSTATE '01000';
+END|
+
+--echo
+CALL p13()|
+
+delimiter ;|
+
+--echo
+--echo # That's it. Cleanup.
+--echo
+
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+DROP PROCEDURE p3;
+DROP PROCEDURE p4;
+DROP PROCEDURE p5;
+DROP PROCEDURE p6;
+DROP PROCEDURE p7;
+DROP PROCEDURE p8;
+DROP PROCEDURE p9;
+DROP PROCEDURE p10;
+DROP PROCEDURE p11;
+DROP PROCEDURE p12;
+DROP PROCEDURE p13;
+
+--echo
+--echo # Bug#12731619: NESTED SP HANDLERS CAN TRIGGER ASSERTION
+--echo
+
+--disable_warnings
+DROP FUNCTION IF EXISTS f1;
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+CREATE TABLE t1(msg VARCHAR(255));
+
+delimiter |;
+CREATE FUNCTION f1() RETURNS INT
+BEGIN
+
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION # handler 1
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION # handler 2
+ BEGIN
+ INSERT INTO t1 VALUE('WRONG: Inside H2');
+ RETURN 2;
+ END;
+
+ INSERT INTO t1 VALUE('CORRECT: Inside H1');
+ RETURN 1;
+ END;
+
+ BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING # handler 3
+ BEGIN
+ INSERT INTO t1 VALUE('WRONG: Inside H3');
+ RETURN 3;
+ END;
+
+ INSERT INTO t1 VALUE('CORRECT: Calling f1()');
+ RETURN f1(); # -- exception here
+ END;
+
+ INSERT INTO t1 VALUE('WRONG: Returning 10');
+ RETURN 10;
+
+END|
+
+delimiter ;|
+
+--echo
+SELECT f1();
+--echo
+SELECT * FROM t1;
+--echo
+
+DROP FUNCTION f1;
+DROP TABLE t1;
+
+
+--echo
+--echo # Check that handled SQL-conditions are properly cleared from DA.
+--echo
+
+--disable_warnings
+DROP TABLE IF EXISTS t1;
+DROP TABLE IF EXISTS t2;
+DROP PROCEDURE IF EXISTS p1;
+DROP PROCEDURE IF EXISTS p2;
+DROP PROCEDURE IF EXISTS p3;
+DROP PROCEDURE IF EXISTS p4;
+DROP PROCEDURE IF EXISTS p5;
+--enable_warnings
+
+CREATE TABLE t1(a CHAR, b CHAR, c CHAR);
+CREATE TABLE t2(a SMALLINT, b SMALLINT, c SMALLINT);
+
+delimiter |;
+
+--echo
+--echo # Check that SQL-conditions for which SQL-handler has been invoked,
+--echo # are cleared from the Diagnostics Area. Note, there might be several
+--echo # SQL-conditions, but SQL-handler must be invoked only once.
+--echo
+
+CREATE PROCEDURE p1()
+BEGIN
+ DECLARE EXIT HANDLER FOR SQLWARNING
+ SELECT 'Warning caught' AS msg;
+
+ # The INSERT below raises 3 SQL-conditions (warnings). The EXIT HANDLER
+ # above must be invoked once (for one condition), but all three conditions
+ # must be cleared from the Diagnostics Area.
+
+ INSERT INTO t1 VALUES('qqqq', 'ww', 'eee');
+
+ # The following INSERT will not be executed, because of the EXIT HANDLER.
+
+ INSERT INTO t1 VALUES('zzz', 'xx', 'yyyy');
+END|
+
+--echo
+CALL p1()|
+--echo
+SELECT * FROM t1|
+
+--echo
+--echo # Check that SQL-conditions for which SQL-handler has *not* been
+--echo # invoked, are *still* cleared from the Diagnostics Area.
+--echo
+
+CREATE PROCEDURE p2()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR 1292
+ SELECT 'Warning 1292 caught' AS msg;
+
+ # The following INSERT raises 6 SQL-warnings with code 1292,
+ # and 3 SQL-warnings with code 1264. The CONTINUE HANDLER above must be
+ # invoked once, and all nine SQL-warnings must be cleared from
+ # the Diagnostics Area.
+
+ INSERT INTO t2
+ SELECT
+ CAST(CONCAT(CAST('1 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+--echo
+CALL p2()|
+
+--echo
+--echo # Check that if there are two equally ranked SQL-handlers to handle
+--echo # SQL-conditions from SQL-statement, only one of them will be invoked.
+--echo
+
+CREATE PROCEDURE p3()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR 1292
+ SELECT 'Warning 1292 caught' AS msg;
+
+ DECLARE CONTINUE HANDLER FOR 1264
+ SELECT 'Warning 1264 caught' AS msg;
+
+ # The following INSERT raises 6 SQL-warnings with code 1292,
+ # and 3 SQL-warnings with code 1264. Only one of the CONTINUE HANDLERs above
+ # must be called, and only once. The SQL Standard does not define, which one
+ # should be invoked.
+
+ INSERT INTO t2
+ SELECT
+ CAST(CONCAT(CAST('1 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+--echo
+CALL p3()|
+
+--echo
+--echo # The same as p3, but 1264 comes first.
+--echo
+
+CREATE PROCEDURE p4()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR 1292
+ SELECT 'Warning 1292 caught' AS msg;
+
+ DECLARE CONTINUE HANDLER FOR 1264
+ SELECT 'Warning 1264 caught' AS msg;
+
+ # The following INSERT raises 4 SQL-warnings with code 1292,
+ # and 3 SQL-warnings with code 1264. Only one of the CONTINUE HANDLERs above
+ # must be called, and only once. The SQL Standard does not define, which one
+ # should be invoked.
+
+ INSERT INTO t2
+ SELECT
+ CAST(999999 AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+--echo
+CALL p4()|
+
+--echo
+--echo # Check that if a SQL-handler raised its own SQL-conditions, there are
+--echo # preserved after handler exit.
+--echo
+
+CREATE PROCEDURE p5()
+BEGIN
+ DECLARE EXIT HANDLER FOR 1292
+ BEGIN
+ SELECT 'Handler for 1292 (1)' AS Msg;
+ SIGNAL SQLSTATE '01000' SET MYSQL_ERRNO = 1234;
+ SHOW WARNINGS;
+ SELECT 'Handler for 1292 (2)' AS Msg;
+ END;
+
+ INSERT INTO t2
+ SELECT
+ CAST(999999 AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+--echo
+CALL p5()|
+
+--echo
+--echo # Check that SQL-conditions are available inside the handler, but
+--echo # cleared after the handler exits.
+--echo
+
+CREATE PROCEDURE p6()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR 1292
+ BEGIN
+ SHOW WARNINGS;
+ SELECT 'Handler for 1292' Msg;
+ END;
+
+ INSERT INTO t2
+ SELECT
+ CAST(CONCAT(CAST('1 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('2 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER),
+ CAST(CONCAT(CAST('3 ' AS UNSIGNED INTEGER), '999999 ') AS SIGNED INTEGER);
+END|
+
+--echo
+CALL p6()|
+
+delimiter ;|
+
+--echo
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+DROP PROCEDURE p3;
+DROP PROCEDURE p4;
+DROP PROCEDURE p5;
+DROP PROCEDURE p6;
+DROP TABLE t1;
+DROP TABLE t2;
+
+--echo
+--echo # Bug#13059316: ASSERTION FAILURE IN SP_RCONTEXT.CC
+--echo # Check DECLARE statements that raise conditions before handlers
+--echo # are declared.
+--echo
+
+--disable_warnings
+DROP PROCEDURE IF EXISTS p1;
+DROP PROCEDURE IF EXISTS p2;
+--enable_warnings
+
+delimiter |;
+
+CREATE PROCEDURE p1()
+BEGIN
+ DECLARE var1 INTEGER DEFAULT 'string';
+ DECLARE EXIT HANDLER FOR SQLWARNING SELECT 'H1';
+END|
+
+--echo
+CALL p1()|
+--echo
+
+CREATE PROCEDURE p2()
+BEGIN
+ DECLARE EXIT HANDLER FOR SQLWARNING SELECT 'H2';
+ CALL p1();
+END|
+
+--echo
+CALL p2()|
+
+delimiter ;|
+
+--echo
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+
+
+--echo #
+--echo # Bug#13113222 RQG_SIGNAL_RESIGNAL FAILED WITH ASSERTION.
+--echo #
+
+--disable_warnings
+DROP PROCEDURE IF EXISTS p1;
+DROP PROCEDURE IF EXISTS p2;
+--enable_warnings
+
+delimiter |;
+CREATE PROCEDURE p1()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLEXCEPTION SELECT 'triggered p1';
+ # This will trigger an error.
+ SIGNAL SQLSTATE 'HY000';
+END|
+
+CREATE PROCEDURE p2()
+BEGIN
+ DECLARE CONTINUE HANDLER FOR SQLWARNING SELECT 'triggered p2';
+ # This will trigger a warning.
+ SIGNAL SQLSTATE '01000';
+END|
+delimiter ;|
+
+SET @old_max_error_count= @@session.max_error_count;
+SET SESSION max_error_count= 0;
+CALL p1();
+CALL p2();
+SET SESSION max_error_count= @old_max_error_count;
+
+DROP PROCEDURE p1;
+DROP PROCEDURE p2;
+
+--echo
+--echo # Bug#12652873: 61392: Continue handler for NOT FOUND being triggered
+--echo # from internal stored function.
+--echo
+
+--disable_warnings
+DROP FUNCTION IF EXISTS f1;
+DROP FUNCTION IF EXISTS f2;
+DROP TABLE IF EXISTS t1;
+--enable_warnings
+
+--echo
+
+CREATE TABLE t1 (a INT, b INT);
+INSERT INTO t1 VALUES (1, 2);
+
+delimiter |;
+
+--echo
+--echo # f1() raises NOT_FOUND condition.
+--echo # Raising NOT_FOUND can not be simulated by SIGNAL,
+--echo # because SIGNAL would raise SQL-error in that case.
+--echo
+
+CREATE FUNCTION f1() RETURNS INTEGER
+BEGIN
+ DECLARE v VARCHAR(5) DEFAULT -1;
+ SELECT b FROM t1 WHERE a = 2 INTO v;
+ RETURN v;
+END|
+
+--echo
+--echo # Here we check that the NOT_FOUND condition raised in f1()
+--echo # is not visible in the outer function (f2), i.e. the continue
+--echo # handler in f2() will not be called.
+--echo
+
+CREATE FUNCTION f2() RETURNS INTEGER
+BEGIN
+ DECLARE v INTEGER;
+
+ DECLARE CONTINUE HANDLER FOR NOT FOUND
+ SET @msg = 'Handler activated.';
+
+ SELECT f1() INTO v;
+
+ RETURN v;
+END|
+
+delimiter ;|
+
+SET @msg = '';
+
+--echo
+SELECT f2();
+--echo
+SELECT @msg;
+--echo
+
+DROP FUNCTION f1;
+DROP FUNCTION f2;
+DROP TABLE t1;
diff --git a/mysql-test/t/strict.test b/mysql-test/t/strict.test
index c429e9cfe5b..71b625e0843 100644
--- a/mysql-test/t/strict.test
+++ b/mysql-test/t/strict.test
@@ -1350,3 +1350,23 @@ select count(*) from t1 where a is null;
drop table t1;
--echo End of 5.0 tests
+
+--echo #
+--echo # Start of 5.6 tests
+--echo #
+
+--echo #
+--echo # WL#946 TIME/TIMESTAMP/DATETIME with fractional seconds: CAST to DATETIME
+--echo #
+
+--echo #
+--echo # STR_TO_DATE with NO_ZERO_DATE did not return NULL (with warning)
+--echo # in get_date(). Only did in val_str() and val_int().
+SET sql_mode='NO_ZERO_DATE';
+SELECT STR_TO_DATE('2001','%Y'),CONCAT(STR_TO_DATE('2001','%Y')), STR_TO_DATE('2001','%Y')+1, STR_TO_DATE('0','%Y')+1, STR_TO_DATE('0000','%Y')+1;
+SET sql_mode='NO_ZERO_IN_DATE';
+SELECT STR_TO_DATE('2001','%Y'),CONCAT(STR_TO_DATE('2001','%Y')), STR_TO_DATE('2001','%Y')+1, STR_TO_DATE('0000','%Y')+1;
+
+--echo #
+--echo # End of 5.6 tests
+--echo #
diff --git a/mysql-test/t/system_mysql_db_fix40123.test b/mysql-test/t/system_mysql_db_fix40123.test
index 1b89ea2001b..18df3adb8b1 100644
--- a/mysql-test/t/system_mysql_db_fix40123.test
+++ b/mysql-test/t/system_mysql_db_fix40123.test
@@ -78,7 +78,7 @@ CREATE TABLE index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) N
-- disable_query_log
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, gtid_slave_pos, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv, slave_master_info, slave_relay_log_info, innodb_index_stats, innodb_table_stats, slave_worker_info, table_stats, column_stats, index_stats;
+DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, table_stats, column_stats, index_stats;
-- enable_query_log
diff --git a/mysql-test/t/system_mysql_db_fix50030.test b/mysql-test/t/system_mysql_db_fix50030.test
index 454711a93e6..36fa6285060 100644
--- a/mysql-test/t/system_mysql_db_fix50030.test
+++ b/mysql-test/t/system_mysql_db_fix50030.test
@@ -85,7 +85,7 @@ CREATE TABLE index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) N
-- disable_query_log
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, gtid_slave_pos, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv, slave_master_info, slave_relay_log_info, innodb_index_stats, innodb_table_stats, slave_worker_info, table_stats, column_stats, index_stats;
+DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, table_stats, column_stats, index_stats;
-- enable_query_log
diff --git a/mysql-test/t/system_mysql_db_fix50117.test b/mysql-test/t/system_mysql_db_fix50117.test
index af1bcf2a6e0..6ee7d0fe989 100644
--- a/mysql-test/t/system_mysql_db_fix50117.test
+++ b/mysql-test/t/system_mysql_db_fix50117.test
@@ -84,7 +84,8 @@ CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL
CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
-CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM;
+# MariaDB: don't:
+# CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM;
CREATE TABLE table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
@@ -104,7 +105,7 @@ CREATE TABLE index_stats (db_name varchar(64) NOT NULL, table_name varchar(64) N
-- disable_query_log
# Drop all tables created by this test
-DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, gtid_slave_pos, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, ndb_binlog_index, proxies_priv, slave_master_info, slave_relay_log_info, innodb_index_stats, innodb_table_stats, slave_worker_info, table_stats, column_stats, index_stats;
+DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, servers, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, general_log, slow_log, event, proxies_priv, innodb_index_stats, innodb_table_stats, table_stats, column_stats, index_stats;
-- enable_query_log
diff --git a/mysql-test/t/temp_table.test b/mysql-test/t/temp_table.test
index 92c22242cdb..dd4ee2f6676 100644
--- a/mysql-test/t/temp_table.test
+++ b/mysql-test/t/temp_table.test
@@ -1,5 +1,6 @@
# mysqltest should be fixed
-- source include/not_embedded.inc
+
#
# Test of temporary tables
#
@@ -9,6 +10,30 @@ drop table if exists t1,t2;
drop view if exists v1;
--enable_warnings
+--echo #
+--echo # test basic creation of temporary tables together with normal table
+--echo #
+
+create table t1 (a int);
+create temporary table t1 AS SELECT 1;
+--error 1050
+create temporary table t1 AS SELECT 1;
+--error 1050
+create temporary table t1 (a int);
+drop temporary table t1;
+drop table t1;
+
+create temporary table t1 AS SELECT 1;
+--error 1050
+create temporary table t1 AS SELECT 1;
+--error 1050
+create temporary table t1 (a int);
+drop temporary table t1;
+
+--echo #
+--echo # Test with rename
+--echo #
+
CREATE TABLE t1 (c int not null, d char (10) not null);
insert into t1 values(1,""),(2,"a"),(3,"b");
CREATE TEMPORARY TABLE t1 (a int not null, b char (10) not null);
diff --git a/mysql-test/t/truncate_coverage.test b/mysql-test/t/truncate_coverage.test
index 135935b53b3..6f5c773ac6a 100644
--- a/mysql-test/t/truncate_coverage.test
+++ b/mysql-test/t/truncate_coverage.test
@@ -40,7 +40,7 @@ HANDLER t1 OPEN;
--connection default
let $ID= `SELECT @id := CONNECTION_ID()`;
LOCK TABLE t1 WRITE;
-SET DEBUG_SYNC='mdl_upgrade_shared_lock_to_exclusive SIGNAL waiting';
+SET DEBUG_SYNC='mdl_upgrade_lock SIGNAL waiting';
send TRUNCATE TABLE t1;
#
# Get the default connection ID into a variable in an invisible statement.
@@ -92,7 +92,7 @@ HANDLER t1 OPEN;
--echo # connection default
--connection default
LOCK TABLE t1 WRITE;
-SET DEBUG_SYNC='mdl_upgrade_shared_lock_to_exclusive SIGNAL waiting';
+SET DEBUG_SYNC='mdl_upgrade_lock SIGNAL waiting';
send TRUNCATE TABLE t1;
#
# Remove datafile.
diff --git a/mysql-test/t/upgrade.test b/mysql-test/t/upgrade.test
index c6d01a16f49..6025fc31415 100644
--- a/mysql-test/t/upgrade.test
+++ b/mysql-test/t/upgrade.test
@@ -39,6 +39,8 @@ insert into `txu#p#p1` values (1);
select * from `txu@0023p@0023p1`;
create table `txu@0023p@0023p1` (s1 int);
show tables;
+insert into `txu@0023p@0023p1` values (2);
+select * from `txu@0023p@0023p1`;
select * from `txu#p#p1`;
drop table `txu#p#p1`;
drop table `txu@0023p@0023p1`;
diff --git a/mysys/CMakeLists.txt b/mysys/CMakeLists.txt
index 05606942d8e..a1aa5e38580 100644
--- a/mysys/CMakeLists.txt
+++ b/mysys/CMakeLists.txt
@@ -15,15 +15,14 @@
INCLUDE_DIRECTORIES(${ZLIB_INCLUDE_DIR} ${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/mysys)
-SET(MYSYS_SOURCES array.c charset-def.c charset.c checksum.c default.c
+SET(MYSYS_SOURCES array.c charset-def.c charset.c checksum.c my_default.c
errors.c hash.c list.c
- md5.c md5_compute.cc
mf_cache.c mf_dirname.c mf_fn_ext.c
mf_format.c mf_getdate.c mf_iocache.c mf_iocache2.c mf_keycache.c
mf_keycaches.c mf_loadpath.c mf_pack.c mf_path.c mf_qsort.c mf_qsort2.c
mf_radix.c mf_same.c mf_sort.c mf_soundex.c mf_arr_appstr.c mf_tempdir.c
mf_tempfile.c mf_unixpath.c mf_wcomp.c mulalloc.c my_access.c
- my_aes.c my_alloc.c my_bit.c my_bitmap.c my_chsize.c
+ my_alloc.c my_bit.c my_bitmap.c my_chsize.c
my_compress.c my_copy.c my_create.c my_delete.c
my_div.c my_error.c my_file.c my_fopen.c my_fstream.c
my_gethwaddr.c my_getopt.c my_getsystime.c my_getwd.c my_compare.c my_init.c
@@ -33,7 +32,7 @@ SET(MYSYS_SOURCES array.c charset-def.c charset.c checksum.c default.c
my_static.c my_symlink.c my_symlink2.c my_sync.c my_thr_init.c
my_basename.c
my_write.c ptr_cmp.c queues.c stacktrace.c
- rijndael.c sha1.c string.c thr_alarm.c thr_lock.c thr_mutex.c
+ string.c thr_alarm.c thr_lock.c thr_mutex.c
thr_rwlock.c tree.c typelib.c base64.c my_memmem.c my_getpagesize.c
lf_alloc-pin.c lf_dynarray.c lf_hash.c
safemalloc.c my_new.cc
diff --git a/mysys/array.c b/mysys/array.c
index 60f2202f5b3..cf377f77676 100644
--- a/mysys/array.c
+++ b/mysys/array.c
@@ -48,7 +48,7 @@ my_bool my_init_dynamic_array2(DYNAMIC_ARRAY *array, uint element_size,
DBUG_ENTER("my_init_dynamic_array2");
if (!alloc_increment)
{
- alloc_increment=max((8192-MALLOC_OVERHEAD)/element_size,16);
+ alloc_increment=MY_MAX((8192-MALLOC_OVERHEAD)/element_size,16);
if (init_alloc > 8 && alloc_increment > init_alloc * 2)
alloc_increment=init_alloc*2;
}
@@ -333,7 +333,7 @@ void delete_dynamic_element(DYNAMIC_ARRAY *array, uint idx)
void freeze_size(DYNAMIC_ARRAY *array)
{
- uint elements=max(array->elements,1);
+ uint elements=MY_MAX(array->elements,1);
/*
Do nothing if we are using a static buffer
diff --git a/mysys/hash.c b/mysys/hash.c
index aca6be4eb80..25210d3fcfe 100644
--- a/mysys/hash.c
+++ b/mysys/hash.c
@@ -69,7 +69,8 @@ static my_hash_value_type calc_hash(const HASH *hash,
@param[in] get_key get the key for the hash
@param[in] free_element pointer to the function that
does cleanup
- @return inidicates success or failure of initialization
+ @param[in] flags flags set in the hash
+ @return indicates success or failure of initialization
@retval 0 success
@retval 1 failure
*/
diff --git a/mysys/lf_alloc-pin.c b/mysys/lf_alloc-pin.c
index 88d5382947f..b599b455ff5 100644
--- a/mysys/lf_alloc-pin.c
+++ b/mysys/lf_alloc-pin.c
@@ -287,7 +287,7 @@ struct st_harvester {
static int harvest_pins(LF_PINS *el, struct st_harvester *hv)
{
int i;
- LF_PINS *el_end= el+min(hv->npins, LF_DYNARRAY_LEVEL_LENGTH);
+ LF_PINS *el_end= el+MY_MIN(hv->npins, LF_DYNARRAY_LEVEL_LENGTH);
for (; el < el_end; el++)
{
for (i= 0; i < LF_PINBOX_PINS; i++)
diff --git a/mysys/lf_dynarray.c b/mysys/lf_dynarray.c
index 3d072fd063e..16a77c0fa1a 100644
--- a/mysys/lf_dynarray.c
+++ b/mysys/lf_dynarray.c
@@ -124,7 +124,7 @@ void *_lf_dynarray_lvalue(LF_DYNARRAY *array, uint idx)
{
uchar *alloc, *data;
alloc= my_malloc(LF_DYNARRAY_LEVEL_LENGTH * array->size_of_element +
- max(array->size_of_element, sizeof(void *)),
+ MY_MAX(array->size_of_element, sizeof(void *)),
MYF(MY_WME|MY_ZEROFILL));
if (unlikely(!alloc))
return(NULL);
diff --git a/mysys/ma_dyncol.c b/mysys/ma_dyncol.c
index 71ceceaf162..1d297d918da 100644
--- a/mysys/ma_dyncol.c
+++ b/mysys/ma_dyncol.c
@@ -3853,20 +3853,19 @@ mariadb_dyncol_val_str(DYNAMIC_STRING *str, DYNAMIC_COLUMN_VALUE *val,
if (!quote)
{
/* convert to the destination */
- str->length+= copy_and_convert_extended(str->str, bufflen,
- cs,
- from, (uint32)len,
- val->x.string.charset,
- &dummy_errors);
+ str->length+= my_convert(str->str, bufflen,
+ cs,
+ from, (uint32)len,
+ val->x.string.charset,
+ &dummy_errors);
return ER_DYNCOL_OK;
}
if ((alloc= (char *)my_malloc(bufflen, MYF(0))))
{
- len=
- copy_and_convert_extended(alloc, bufflen, cs,
- from, (uint32)len,
- val->x.string.charset,
- &dummy_errors);
+ len= my_convert(alloc, bufflen, cs,
+ from, (uint32)len,
+ val->x.string.charset,
+ &dummy_errors);
from= alloc;
}
else
diff --git a/mysys/md5.c b/mysys/md5.c.THIS
index b4c2cb569fb..b4c2cb569fb 100644
--- a/mysys/md5.c
+++ b/mysys/md5.c.THIS
diff --git a/mysys/mf_dirname.c b/mysys/mf_dirname.c
index 569293f5401..bc827f60d44 100644
--- a/mysys/mf_dirname.c
+++ b/mysys/mf_dirname.c
@@ -78,7 +78,7 @@ size_t dirname_part(char *to, const char *name, size_t *to_res_length)
SYNPOSIS
convert_dirname()
to Store result here. Must be at least of size
- min(FN_REFLEN, strlen(from) + 1) to make room
+ MY_MIN(FN_REFLEN, strlen(from) + 1) to make room
for adding FN_LIBCHAR at the end.
from Original filename. May be == to
from_end Pointer at end of filename (normally end \0)
diff --git a/mysys/mf_format.c b/mysys/mf_format.c
index d20ce882459..91354db0b64 100644
--- a/mysys/mf_format.c
+++ b/mysys/mf_format.c
@@ -85,7 +85,7 @@ char * fn_format(char * to, const char *name, const char *dir,
tmp_length= strlength(startpos);
DBUG_PRINT("error",("dev: '%s' ext: '%s' length: %u",dev,ext,
(uint) length));
- (void) strmake(to,startpos,min(tmp_length,FN_REFLEN-1));
+ (void) strmake(to,startpos,MY_MIN(tmp_length,FN_REFLEN-1));
}
else
{
diff --git a/mysys/mf_iocache.c b/mysys/mf_iocache.c
index 02e5c5373ae..3fa6ec28f7d 100644
--- a/mysys/mf_iocache.c
+++ b/mysys/mf_iocache.c
@@ -1127,7 +1127,7 @@ static void copy_to_read_buffer(IO_CACHE *write_cache,
*/
while (write_length)
{
- size_t copy_length= min(write_length, write_cache->buffer_length);
+ size_t copy_length= MY_MIN(write_length, write_cache->buffer_length);
int __attribute__((unused)) rc;
rc= lock_io_cache(write_cache, write_cache->pos_in_file);
@@ -1285,7 +1285,7 @@ read_append_buffer:
TODO: figure out if the assert below is needed or correct.
*/
DBUG_ASSERT(pos_in_file == info->end_of_file);
- copy_len=min(Count, len_in_buff);
+ copy_len=MY_MIN(Count, len_in_buff);
memcpy(Buffer, info->append_read_pos, copy_len);
info->append_read_pos += copy_len;
Count -= copy_len;
@@ -1394,7 +1394,7 @@ int _my_b_async_read(register IO_CACHE *info, uchar *Buffer, size_t Count)
}
#endif
/* Copy found bytes to buffer */
- length=min(Count,read_length);
+ length=MY_MIN(Count,read_length);
memcpy(Buffer,info->read_pos,(size_t) length);
Buffer+=length;
Count-=length;
@@ -1428,7 +1428,7 @@ int _my_b_async_read(register IO_CACHE *info, uchar *Buffer, size_t Count)
if ((read_length=mysql_file_read(info->file,info->request_pos,
read_length, info->myflags)) == (size_t) -1)
return info->error= -1;
- use_length=min(Count,read_length);
+ use_length=MY_MIN(Count,read_length);
memcpy(Buffer,info->request_pos,(size_t) use_length);
info->read_pos=info->request_pos+Count;
info->read_end=info->request_pos+read_length;
diff --git a/mysys/mf_iocache2.c b/mysys/mf_iocache2.c
index efb1bcc9569..9a7ed0e01d2 100644
--- a/mysys/mf_iocache2.c
+++ b/mysys/mf_iocache2.c
@@ -497,7 +497,7 @@ process_flags:
if (my_b_write(info, (uchar*) buff, length2))
goto err;
}
- else if ((*fmt == 'l' && fmt[1] == 'd') || fmt[1] == 'u')
+ else if ((*fmt == 'l' && (fmt[1] == 'd' || fmt[1] == 'u')))
/* long parameter */
{
register long iarg;
diff --git a/mysys/my_access.c b/mysys/my_access.c
index b96e11d9809..1b63b827592 100644
--- a/mysys/my_access.c
+++ b/mysys/my_access.c
@@ -150,6 +150,66 @@ int check_if_legal_tablename(const char *name)
}
+#ifdef __WIN__
+/**
+ Checks if the drive letter supplied is valid or not. Valid drive
+ letters are A to Z, both lower case and upper case.
+
+ @param drive_letter : The drive letter to validate.
+
+ @return TRUE if the drive exists, FALSE otherwise.
+*/
+static my_bool does_drive_exists(char drive_letter)
+{
+ DWORD drive_mask= GetLogicalDrives();
+ drive_letter= toupper(drive_letter);
+
+ return (drive_letter >= 'A' && drive_letter <= 'Z') &&
+ (drive_mask & (0x1 << (drive_letter - 'A')));
+}
+
+/**
+ Verifies if the file name supplied is allowed or not. On Windows
+ file names with a colon (:) are not allowed because such file names
+ store data in Alternate Data Streams which can be used to hide
+ the data.
+
+ @param name contains the file name with or without path
+ @param length contains the length of file name
+ @param allow_current_dir TRUE if paths like C:foobar are allowed,
+ FALSE otherwise
+
+ @return TRUE if the file name is allowed, FALSE otherwise.
+*/
+my_bool is_filename_allowed(const char *name __attribute__((unused)),
+ size_t length __attribute__((unused)),
+ my_bool allow_current_dir __attribute__((unused)))
+{
+ /*
+ For Windows, check if the file name contains : character.
+ Start from end of path and search if the file name contains :
+ */
+ const char* ch = NULL;
+ for (ch= name + length - 1; ch >= name; --ch)
+ {
+ if (FN_LIBCHAR == *ch || '/' == *ch)
+ break;
+ else if (':' == *ch)
+ {
+ /*
+ File names like C:foobar.txt are allowed since the syntax means
+ file foobar.txt in current directory of C drive. However file
+ names likes CC:foobar are not allowed since this syntax means ADS
+ foobar in file CC.
+ */
+ return (allow_current_dir && (ch - name == 1) &&
+ does_drive_exists(*name));
+ }
+ }
+ return TRUE;
+} /* is_filename_allowed */
+#endif /* __WIN__ */
+
#if defined(__WIN__) || defined(__EMX__)
@@ -171,6 +231,9 @@ int check_if_legal_filename(const char *path)
const char **reserved_name;
DBUG_ENTER("check_if_legal_filename");
+ if (!is_filename_allowed(path, strlen(path), TRUE))
+ DBUG_RETURN(1);
+
path+= dirname_length(path); /* To start of filename */
if (!(end= strchr(path, FN_EXTCHAR)))
end= strend(path);
diff --git a/mysys/my_aes.c b/mysys/my_aes.c.THIS
index 5c52a0b1ab5..5c52a0b1ab5 100644
--- a/mysys/my_aes.c
+++ b/mysys/my_aes.c.THIS
diff --git a/mysys/my_alloc.c b/mysys/my_alloc.c
index 6c8a73df4a7..d61c7e171d0 100644
--- a/mysys/my_alloc.c
+++ b/mysys/my_alloc.c
@@ -228,7 +228,7 @@ void *alloc_root(MEM_ROOT *mem_root, size_t length)
{ /* Time to alloc new block */
block_size= (mem_root->block_size & ~1) * (mem_root->block_num >> 2);
get_size= length+ALIGN_SIZE(sizeof(USED_MEM));
- get_size= max(get_size, block_size);
+ get_size= MY_MAX(get_size, block_size);
if (!(next = (USED_MEM*) my_malloc(get_size,
MYF(MY_WME | ME_FATALERROR |
diff --git a/mysys/my_bitmap.c b/mysys/my_bitmap.c
index 851fe2b1026..3105f4b1daf 100644
--- a/mysys/my_bitmap.c
+++ b/mysys/my_bitmap.c
@@ -147,6 +147,26 @@ static inline void bitmap_unlock(MY_BITMAP *map __attribute__((unused)))
}
+static inline uint get_first_set(my_bitmap_map value, uint word_pos)
+{
+ uchar *byte_ptr= (uchar*)&value;
+ uchar byte_value;
+ uint byte_pos, bit_pos;
+
+ DBUG_ASSERT(value);
+ for (byte_pos=0; ; byte_pos++, byte_ptr++)
+ {
+ if ((byte_value= *byte_ptr))
+ {
+ for (bit_pos=0; ; bit_pos++)
+ if (byte_value & (1 << bit_pos))
+ return (word_pos*32) + (byte_pos*8) + bit_pos;
+ }
+ }
+ return MY_BIT_NONE; /* Impossible */
+}
+
+
my_bool bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits,
my_bool thread_safe __attribute__((unused)))
{
@@ -405,7 +425,7 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2)
DBUG_ASSERT(map->bitmap && map2->bitmap);
- end= to+min(len,len2);
+ end= to+MY_MIN(len,len2);
while (to < end)
*to++ &= *from++;
@@ -597,12 +617,10 @@ void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2)
uint bitmap_get_first_set(const MY_BITMAP *map)
{
- uchar *byte_ptr;
- uint i,j,k;
- my_bitmap_map *data_ptr, *end= map->last_word_ptr;
+ uint i;
+ my_bitmap_map *data_ptr= map->bitmap, *end= map->last_word_ptr;
DBUG_ASSERT(map->bitmap);
- data_ptr= map->bitmap;
for (i=0; data_ptr < end; data_ptr++, i++)
if (*data_ptr)
@@ -611,25 +629,66 @@ uint bitmap_get_first_set(const MY_BITMAP *map)
return MY_BIT_NONE;
found:
+ return get_first_set(*data_ptr, i);
+}
+
+
+/**
+ Get the next set bit.
+
+ @param map Bitmap
+ @param bitmap_bit Bit to start search from
+
+ @return Index to first bit set after bitmap_bit
+*/
+
+uint bitmap_get_next_set(const MY_BITMAP *map, uint bitmap_bit)
+{
+ uint word_pos, byte_to_mask, i;
+ union { my_bitmap_map bitmap ; uchar bitmap_buff[sizeof(my_bitmap_map)]; }
+ first_word;
+ uchar *ptr= &first_word.bitmap_buff[0];
+ my_bitmap_map *data_ptr, *end= map->last_word_ptr;
+
+ DBUG_ASSERT(map->bitmap);
+
+ /* Look for the next bit */
+ bitmap_bit++;
+ if (bitmap_bit >= map->n_bits)
+ return MY_BIT_NONE;
+ word_pos= bitmap_bit / 32;
+ data_ptr= map->bitmap + word_pos;
+ first_word.bitmap= *data_ptr;
+
+ /* Mask out previous bits from first_word */
+ byte_to_mask= (bitmap_bit % 32) / 8;
+ for (i= 0; i < byte_to_mask; i++)
+ ptr[i]= 0;
+ ptr[byte_to_mask]&= 0xFFU << (bitmap_bit & 7);
+
+ if (data_ptr == end)
{
- byte_ptr= (uchar*)data_ptr;
- for (j=0; ; j++, byte_ptr++)
- {
- if (*byte_ptr)
- {
- for (k=0; ; k++)
- {
- if (*byte_ptr & (1 << k))
- return (i*32) + (j*8) + k;
- }
- }
- }
+ if (first_word.bitmap & ~map->last_word_mask)
+ return get_first_set(first_word.bitmap, word_pos);
+ else
+ return MY_BIT_NONE;
}
- DBUG_ASSERT(0);
- return MY_BIT_NONE; /* Impossible */
+
+ if (first_word.bitmap)
+ return get_first_set(first_word.bitmap, word_pos);
+
+ for (data_ptr++, word_pos++; data_ptr < end; data_ptr++, word_pos++)
+ if (*data_ptr)
+ return get_first_set(*data_ptr, word_pos);
+
+ if (!(*end & ~map->last_word_mask))
+ return MY_BIT_NONE;
+ return get_first_set(*end, word_pos);
}
+/* Get first free bit */
+
uint bitmap_get_first(const MY_BITMAP *map)
{
uchar *byte_ptr;
@@ -647,17 +706,15 @@ uint bitmap_get_first(const MY_BITMAP *map)
return MY_BIT_NONE;
found:
+ byte_ptr= (uchar*)data_ptr;
+ for (j=0; ; j++, byte_ptr++)
{
- byte_ptr= (uchar*)data_ptr;
- for (j=0; ; j++, byte_ptr++)
+ if (*byte_ptr != 0xFF)
{
- if (*byte_ptr != 0xFF)
+ for (k=0; ; k++)
{
- for (k=0; ; k++)
- {
- if (!(*byte_ptr & (1 << k)))
- return (i*32) + (j*8) + k;
- }
+ if (!(*byte_ptr & (1 << k)))
+ return (i*32) + (j*8) + k;
}
}
}
diff --git a/mysys/my_compare.c b/mysys/my_compare.c
index 82b30ab3ed3..6de7ff774c0 100644
--- a/mysys/my_compare.c
+++ b/mysys/my_compare.c
@@ -36,7 +36,7 @@ static int compare_bin(const uchar *a, uint a_length,
const uchar *b, uint b_length,
my_bool part_key, my_bool skip_end_space)
{
- uint length= min(a_length,b_length);
+ uint length= MY_MIN(a_length,b_length);
const uchar *end= a+ length;
int flag;
@@ -171,7 +171,7 @@ int ha_key_cmp(HA_KEYSEG *keyseg, const uchar *a,
continue; /* To next key part */
}
}
- end= a+ min(keyseg->length,key_length);
+ end= a+ MY_MIN(keyseg->length,key_length);
next_key_length=key_length-keyseg->length;
switch ((enum ha_base_keytype) keyseg->type) {
diff --git a/mysys/my_compress.c b/mysys/my_compress.c
index 10c1903c163..4cd43596031 100644
--- a/mysys/my_compress.c
+++ b/mysys/my_compress.c
@@ -311,7 +311,7 @@ int unpackfrm(uchar **unpack_data, size_t *unpack_len,
if (ver != 1)
DBUG_RETURN(1);
- if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME))))
+ if (!(data= my_malloc(MY_MAX(orglen, complen), MYF(MY_WME))))
DBUG_RETURN(2);
memcpy(data, pack_data + BLOB_HEADER, complen);
diff --git a/mysys/my_conio.c b/mysys/my_conio.c
index dc87b83f6b4..85ea99196a4 100644
--- a/mysys/my_conio.c
+++ b/mysys/my_conio.c
@@ -165,13 +165,13 @@ char* my_cgets(char *buffer, size_t clen, size_t* plen)
though it is known it should not be more than 64K
so we cut 64K and try first size of screen buffer
if it is still to large we cut half of it and try again
- later we may want to cycle from min(clen, 65535) to allowed size
+ later we may want to cycle from MY_MIN(clen, 65535) to allowed size
with small decrement to determine exact allowed buffer
*/
- clen= min(clen, 65535);
+ clen= MY_MIN(clen, 65535);
do
{
- clen= min(clen, (size_t) csbi.dwSize.X*csbi.dwSize.Y);
+ clen= MY_MIN(clen, (size_t) csbi.dwSize.X*csbi.dwSize.Y);
if (!ReadConsole((HANDLE)my_coninpfh, (LPVOID)buffer, (DWORD) clen - 1, &plen_res,
NULL))
{
diff --git a/mysys/default.c b/mysys/my_default.c
index 046b1445c51..1e4038d17fb 100644
--- a/mysys/default.c
+++ b/mysys/my_default.c
@@ -34,8 +34,9 @@
****************************************************************************/
#include "mysys_priv.h"
-#include "m_string.h"
-#include "m_ctype.h"
+#include <my_default.h>
+#include <m_string.h>
+#include <m_ctype.h>
#include <my_dir.h>
#ifdef __WIN__
#include <winbase.h>
@@ -899,7 +900,7 @@ static int search_default_file_with_ext(Process_option_func opt_handler,
for ( ; my_isspace(&my_charset_latin1,end[-1]) ; end--) ;
end[0]=0;
- strmake(curr_gr, ptr, min((size_t) (end-ptr)+1, sizeof(curr_gr)-1));
+ strmake(curr_gr, ptr, MY_MIN((size_t) (end-ptr)+1, sizeof(curr_gr)-1));
/* signal that a new group is found */
opt_handler(handler_ctx, curr_gr, NULL);
diff --git a/mysys/my_error.c b/mysys/my_error.c
index 08c67412fe1..1200385a43d 100644
--- a/mysys/my_error.c
+++ b/mysys/my_error.c
@@ -48,44 +48,73 @@
*/
static struct my_err_head
{
- struct my_err_head *meh_next; /* chain link */
- const char** (*get_errmsgs) (); /* returns error message format */
- int meh_first; /* error number matching array slot 0 */
- int meh_last; /* error number matching last slot */
-} my_errmsgs_globerrs = {NULL, get_global_errmsgs, EE_ERROR_FIRST, EE_ERROR_LAST};
+ struct my_err_head *meh_next; /* chain link */
+ const char** (*get_errmsgs)(); /* returns error message format */
+ uint meh_first; /* error number matching array slot 0 */
+ uint meh_last; /* error number matching last slot */
+} my_errmsgs_globerrs=
+{NULL, get_global_errmsgs, EE_ERROR_FIRST, EE_ERROR_LAST};
static struct my_err_head *my_errmsgs_list= &my_errmsgs_globerrs;
-/*
- Error message to user
+/**
+ @brief Get an error format string from one of the my_error_register()ed sets
+
+ @note
+ NULL values are possible even within a registered range.
- SYNOPSIS
- my_error()
- nr Errno
- MyFlags Flags
- ... variable list
+ @param nr Errno
+ @retval NULL if no message is registered for this error number
+ @retval str C-string
*/
-void my_error(int nr, myf MyFlags, ...)
+const char *my_get_err_msg(uint nr)
{
const char *format;
struct my_err_head *meh_p;
- va_list args;
- char ebuff[ERRMSGSIZE];
- DBUG_ENTER("my_error");
- DBUG_PRINT("my", ("nr: %d MyFlags: %lu errno: %d", nr, MyFlags, errno));
- /* Search for the error messages array, which could contain the message. */
+ /* Search for the range this error is in. */
for (meh_p= my_errmsgs_list; meh_p; meh_p= meh_p->meh_next)
if (nr <= meh_p->meh_last)
break;
- /* get the error message string. Default, if NULL or empty string (""). */
- if (! (format= (meh_p && (nr >= meh_p->meh_first)) ?
- meh_p->get_errmsgs()[nr - meh_p->meh_first] : NULL) || ! *format)
- (void) my_snprintf (ebuff, sizeof(ebuff), "Unknown error %d", nr);
+ /*
+ If we found the range this error number is in, get the format string.
+ If the string is empty, or a NULL pointer, or if we're out of return,
+ we return NULL.
+ */
+ if (!(format= (meh_p && (nr >= meh_p->meh_first)) ?
+ meh_p->get_errmsgs()[nr - meh_p->meh_first] : NULL) ||
+ !*format)
+ return NULL;
+
+ return format;
+}
+
+
+/**
+ Fill in and print a previously registered error message.
+
+ @note
+ Goes through the (sole) function registered in error_handler_hook
+
+ @param nr error number
+ @param MyFlags Flags
+ @param ... variable list matching that error format string
+*/
+
+void my_error(uint nr, myf MyFlags, ...)
+{
+ const char *format;
+ va_list args;
+ char ebuff[ERRMSGSIZE];
+ DBUG_ENTER("my_error");
+ DBUG_PRINT("my", ("nr: %d MyFlags: %lu errno: %d", nr, MyFlags, errno));
+
+ if (!(format = my_get_err_msg(nr)))
+ (void) my_snprintf(ebuff, sizeof(ebuff), "Unknown error %d", nr);
else
{
va_start(args,MyFlags);
@@ -98,15 +127,16 @@ void my_error(int nr, myf MyFlags, ...)
}
-/*
- Error as printf
-
- SYNOPSIS
- my_printf_error()
- error Errno
- format Format string
- MyFlags Flags
- ... variable list
+/**
+ Print an error message.
+
+ @note
+ Goes through the (sole) function registered in error_handler_hook
+
+ @param error error number
+ @param format format string
+ @param MyFlags Flags
+ @param ... variable list matching that error format string
*/
void my_printf_error(uint error, const char *format, myf MyFlags, ...)
@@ -125,15 +155,16 @@ void my_printf_error(uint error, const char *format, myf MyFlags, ...)
DBUG_VOID_RETURN;
}
-/*
- Error with va_list
-
- SYNOPSIS
- my_printv_error()
- error Errno
- format Format string
- MyFlags Flags
- ... variable list
+/**
+ Print an error message.
+
+ @note
+ Goes through the (sole) function registered in error_handler_hook
+
+ @param error error number
+ @param format format string
+ @param MyFlags Flags
+ @param ap variable list matching that error format string
*/
void my_printv_error(uint error, const char *format, myf MyFlags, va_list ap)
@@ -149,14 +180,15 @@ void my_printv_error(uint error, const char *format, myf MyFlags, va_list ap)
}
-/*
- Give message using error_handler_hook
+/**
+ Print an error message.
- SYNOPSIS
- my_message()
- error Errno
- str Error message
- MyFlags Flags
+ @note
+ Goes through the (sole) function registered in error_handler_hook
+
+ @param error error number
+ @param str error message
+ @param MyFlags Flags
*/
void my_message(uint error, const char *str, register myf MyFlags)
@@ -165,16 +197,11 @@ void my_message(uint error, const char *str, register myf MyFlags)
}
-/*
+/**
Register error messages for use with my_error().
- SYNOPSIS
- my_error_register()
- errmsgs array of pointers to error messages
- first error number of first message in the array
- last error number of last message in the array
+ @description
- DESCRIPTION
The pointer array is expected to contain addresses to NUL-terminated
C character strings. The array contains (last - first + 1) pointers.
NULL pointers and empty strings ("") are allowed. These will be mapped to
@@ -182,12 +209,15 @@ void my_message(uint error, const char *str, register myf MyFlags)
This function registers the error numbers 'first' to 'last'.
No overlapping with previously registered error numbers is allowed.
- RETURN
- 0 OK
- != 0 Error
+ @param errmsgs array of pointers to error messages
+ @param first error number of first message in the array
+ @param last error number of last message in the array
+
+ @retval 0 OK
+ @retval != 0 Error
*/
-int my_error_register(const char** (*get_errmsgs) (), int first, int last)
+int my_error_register(const char** (*get_errmsgs) (), uint first, uint last)
{
struct my_err_head *meh_p;
struct my_err_head **search_meh_pp;
@@ -223,28 +253,27 @@ int my_error_register(const char** (*get_errmsgs) (), int first, int last)
}
-/*
+/**
Unregister formerly registered error messages.
- SYNOPSIS
- my_error_unregister()
- first error number of first message
- last error number of last message
+ @description
- DESCRIPTION
This function unregisters the error numbers 'first' to 'last'.
These must have been previously registered by my_error_register().
'first' and 'last' must exactly match the registration.
If a matching registration is present, the header is removed from the
list and the pointer to the error messages pointers array is returned.
+ (The messages themselves are not released here as they may be static.)
Otherwise, NULL is returned.
- RETURN
- non-NULL OK, returns address of error messages pointers array.
- NULL Error, no such number range registered.
+ @param first error number of first message
+ @param last error number of last message
+
+ @retval NULL Error, no such number range registered.
+ @retval non-NULL OK, returns address of error messages pointers array.
*/
-const char **my_error_unregister(int first, int last)
+const char **my_error_unregister(uint first, uint last)
{
struct my_err_head *meh_p;
struct my_err_head **search_meh_pp;
@@ -274,6 +303,17 @@ const char **my_error_unregister(int first, int last)
}
+/**
+ Unregister all formerly registered error messages.
+
+ @description
+
+ This function unregisters all error numbers that previously have
+ been previously registered by my_error_register().
+ All headers are removed from the list; the messages themselves are
+ not released here as they may be static.
+*/
+
void my_error_unregister_all(void)
{
struct my_err_head *cursor, *saved_next;
diff --git a/mysys/my_file.c b/mysys/my_file.c
index 8d01285a94b..a23ab487d00 100644
--- a/mysys/my_file.c
+++ b/mysys/my_file.c
@@ -76,7 +76,7 @@ static uint set_max_open_files(uint max_file_limit)
static uint set_max_open_files(uint max_file_limit)
{
/* We don't know the limit. Return best guess */
- return min(max_file_limit, OS_FILE_LIMIT);
+ return MY_MIN(max_file_limit, OS_FILE_LIMIT);
}
#endif
@@ -99,7 +99,7 @@ uint my_set_max_open_files(uint files)
DBUG_PRINT("enter",("files: %u my_file_limit: %u", files, my_file_limit));
files+= MY_FILE_MIN;
- files= set_max_open_files(min(files, OS_FILE_LIMIT));
+ files= set_max_open_files(MY_MIN(files, OS_FILE_LIMIT));
if (files <= MY_NFILE)
DBUG_RETURN(files);
@@ -109,9 +109,9 @@ uint my_set_max_open_files(uint files)
/* Copy any initialized files */
memcpy((char*) tmp, (char*) my_file_info,
- sizeof(*tmp) * min(my_file_limit, files));
+ sizeof(*tmp) * MY_MIN(my_file_limit, files));
bzero((char*) (tmp + my_file_limit),
- max((int) (files- my_file_limit), 0)*sizeof(*tmp));
+ MY_MAX((int) (files- my_file_limit), 0)*sizeof(*tmp));
my_free_open_file_info(); /* Free if already allocated */
my_file_info= tmp;
my_file_limit= files;
diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c
index 0645e413672..261e150d342 100644
--- a/mysys/my_getopt.c
+++ b/mysys/my_getopt.c
@@ -15,6 +15,7 @@
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */
#include <my_global.h>
+#include <my_default.h>
#include <m_string.h>
#include <stdlib.h>
#include <my_sys.h>
diff --git a/mysys/my_rnd.c b/mysys/my_rnd.c
index d043c8529ad..14f212e2f32 100644
--- a/mysys/my_rnd.c
+++ b/mysys/my_rnd.c
@@ -14,6 +14,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include "mysys_priv.h"
+#include <my_rnd.h>
#include <m_string.h>
/*
@@ -62,3 +63,39 @@ double my_rnd(struct my_rnd_struct *rand_st)
rand_st->seed1= seed1;
return (((double) seed1)/rand_st->max_value_dbl);
}
+
+
+/**
+ Generate a random number using the OpenSSL/yaSSL supplied
+ random number generator if available.
+
+ @param rand_st [INOUT] Structure used for number generation
+ only if none of the SSL libraries are
+ available.
+
+ @retval Generated random number.
+*/
+
+double my_rnd_ssl(struct my_rnd_struct *rand_st)
+{
+
+#if defined(HAVE_YASSL) || defined(HAVE_OPENSSL)
+ int rc;
+ unsigned int res;
+
+#if defined(HAVE_YASSL)
+ rc= yaSSL::RAND_bytes((unsigned char *) &res, sizeof (unsigned int));
+#else
+ rc= RAND_bytes((unsigned char *) &res, sizeof (unsigned int));
+#endif /* HAVE_YASSL */
+
+ if (rc)
+ return (double)res / (double)UINT_MAX;
+#endif /* defined(HAVE_YASSL) || defined(HAVE_OPENSSL) */
+
+ return my_rnd(rand_st);
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/mysys/my_thr_init.c b/mysys/my_thr_init.c
index 270cbc2f882..bdbf67455e1 100644
--- a/mysys/my_thr_init.c
+++ b/mysys/my_thr_init.c
@@ -376,12 +376,16 @@ void my_thread_end(void)
This must be done before trashing st_my_thread_var,
because the LF_HASH depends on it.
*/
- if (PSI_server)
- PSI_server->delete_current_thread();
+ PSI_THREAD_CALL(delete_current_thread)();
#endif
+ /*
+ We need to disable DBUG early for this thread to ensure that the
+ the mutex calls doesn't enable it again
+ To this we have to both do DBUG_POP() and also reset THR_KEY_mysys
+ as the key is used by DBUG.
+ */
DBUG_POP();
-
pthread_setspecific(THR_KEY_mysys,0);
if (tmp && tmp->init)
@@ -418,6 +422,10 @@ struct st_my_thread_var *_my_thread_var(void)
return my_pthread_getspecific(struct st_my_thread_var*,THR_KEY_mysys);
}
+int set_mysys_var(struct st_my_thread_var *mysys_var)
+{
+ return my_pthread_setspecific_ptr(THR_KEY_mysys, mysys_var);
+}
/****************************************************************************
Get name of current thread.
diff --git a/mysys/my_uuid.c b/mysys/my_uuid.c
index 01c59e42f2e..a8614afef2c 100644
--- a/mysys/my_uuid.c
+++ b/mysys/my_uuid.c
@@ -40,6 +40,7 @@
*/
#include "mysys_priv.h"
+#include <my_rnd.h>
#include <m_string.h>
#include <myisampack.h> /* mi_int2store, mi_int4store */
@@ -151,7 +152,7 @@ void my_uuid(uchar *to)
/*
-1 so we won't make tv= uuid_time for nanoseq >= (tv - uuid_time)
*/
- delta= min(nanoseq, (ulong)(tv - uuid_time -1));
+ delta= MY_MIN(nanoseq, (ulong)(tv - uuid_time -1));
tv-= delta;
nanoseq-= delta;
}
diff --git a/mysys/psi_noop.c b/mysys/psi_noop.c
index 78629ca16d7..8c9f2773170 100644
--- a/mysys/psi_noop.c
+++ b/mysys/psi_noop.c
@@ -119,7 +119,8 @@ static void destroy_cond_noop(PSI_cond* cond NNN)
}
static PSI_socket*
-init_socket_noop(PSI_socket_key key NNN, const my_socket *fd NNN)
+init_socket_noop(PSI_socket_key key NNN, const my_socket *fd NNN,
+ const struct sockaddr *addr NNN, socklen_t addr_len NNN)
{
return NULL;
}
@@ -188,12 +189,12 @@ static int spawn_thread_noop(PSI_thread_key key NNN,
static PSI_thread*
new_thread_noop(PSI_thread_key key NNN,
- const void *identity NNN, ulong thread_id NNN)
+ const void *identity NNN, ulonglong thread_id NNN)
{
return NULL;
}
-static void set_thread_id_noop(PSI_thread *thread NNN, unsigned long id NNN)
+static void set_thread_id_noop(PSI_thread *thread NNN, ulonglong id NNN)
{
return;
}
@@ -401,16 +402,17 @@ static void end_table_lock_wait_noop(PSI_table_locker* locker NNN)
return;
}
-static PSI_file* start_file_open_wait_noop(PSI_file_locker *locker NNN,
- const char *src_file NNN,
- uint src_line NNN)
+static void start_file_open_wait_noop(PSI_file_locker *locker NNN,
+ const char *src_file NNN,
+ uint src_line NNN)
{
- return NULL;
+ return;
}
-static void end_file_open_wait_noop(PSI_file_locker *locker NNN)
+static PSI_file* end_file_open_wait_noop(PSI_file_locker *locker NNN,
+ void *result NNN)
{
- return;
+ return NULL;
}
static void end_file_open_wait_and_bind_to_descriptor_noop
@@ -433,6 +435,19 @@ static void end_file_wait_noop(PSI_file_locker *locker NNN,
return;
}
+static void start_file_close_wait_noop(PSI_file_locker *locker NNN,
+ const char *src_file NNN,
+ uint src_line NNN)
+{
+ return;
+}
+
+static void end_file_close_wait_noop(PSI_file_locker *locker NNN,
+ int result NNN)
+{
+ return;
+}
+
static void start_stage_noop(PSI_stage_key key NNN,
const char *src_file NNN, int src_line NNN)
{
@@ -446,7 +461,8 @@ static void end_stage_noop(void)
static PSI_statement_locker*
get_thread_statement_locker_noop(PSI_statement_locker_state *state NNN,
- PSI_statement_key key NNN)
+ PSI_statement_key key NNN,
+ const void *charset NNN)
{
return NULL;
}
@@ -621,6 +637,14 @@ digest_add_token_noop(PSI_digest_locker *locker NNN,
return NULL;
}
+static int
+set_thread_connect_attrs_noop(const char *buffer __attribute__((unused)),
+ uint length __attribute__((unused)),
+ const void *from_cs __attribute__((unused)))
+{
+ return 0;
+}
+
static PSI PSI_noop=
{
register_mutex_noop,
@@ -687,6 +711,8 @@ static PSI PSI_noop=
end_file_open_wait_and_bind_to_descriptor_noop,
start_file_wait_noop,
end_file_wait_noop,
+ start_file_close_wait_noop,
+ end_file_close_wait_noop,
start_stage_noop,
end_stage_noop,
get_thread_statement_locker_noop,
@@ -716,7 +742,8 @@ static PSI PSI_noop=
set_socket_info_noop,
set_socket_thread_owner_noop,
digest_start_noop,
- digest_add_token_noop
+ digest_add_token_noop,
+ set_thread_connect_attrs_noop
};
/**
diff --git a/mysys/rijndael.c b/mysys/rijndael.c
deleted file mode 100644
index e893a886726..00000000000
--- a/mysys/rijndael.c
+++ /dev/null
@@ -1,1379 +0,0 @@
-/* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-
-
-/*
- Based on version 3.0 (December 2000)
-
- Optimised ANSI C code for the Rijndael cipher (now AES)
-
- author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
- author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
- author Paulo Barreto <paulo.barreto@terra.com.br>
-*/
-
-#include <my_global.h>
-#include "rijndael.h"
-
-/*
- Define the following to use fastest and much larger code (~10K extra code)
- #define FULL_UNROLL
-*/
-
-static const uint32 Te0[256]=
-{
- 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
- 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
- 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
- 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
- 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
- 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
- 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
- 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
- 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
- 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
- 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
- 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
- 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
- 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
- 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
- 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
- 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
- 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
- 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
- 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
- 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
- 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
- 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
- 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
- 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
- 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
- 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
- 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
- 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
- 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
- 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
- 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
- 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
- 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
- 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
- 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
- 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
- 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
- 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
- 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
- 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
- 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
- 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
- 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
- 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
- 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
- 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
- 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
- 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
- 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
- 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
- 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
- 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
- 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
- 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
- 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
- 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
- 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
- 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
- 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
- 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
- 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
- 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
- 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
-};
-
-static const uint32 Te1[256]=
-{
- 0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
- 0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
- 0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
- 0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
- 0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
- 0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
- 0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
- 0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
- 0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
- 0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
- 0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
- 0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
- 0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
- 0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
- 0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
- 0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
- 0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
- 0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
- 0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
- 0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
- 0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
- 0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
- 0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
- 0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
- 0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
- 0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
- 0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
- 0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
- 0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
- 0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
- 0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
- 0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
- 0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
- 0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
- 0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
- 0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
- 0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
- 0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
- 0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
- 0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
- 0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
- 0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
- 0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
- 0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
- 0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
- 0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
- 0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
- 0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
- 0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
- 0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
- 0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
- 0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
- 0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
- 0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
- 0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
- 0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
- 0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
- 0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
- 0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
- 0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
- 0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
- 0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
- 0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
- 0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
-};
-
-static const uint32 Te2[256]=
-{
- 0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
- 0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
- 0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
- 0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
- 0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
- 0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
- 0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
- 0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
- 0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
- 0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
- 0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
- 0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
- 0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
- 0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
- 0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
- 0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
- 0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
- 0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
- 0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
- 0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
- 0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
- 0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
- 0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
- 0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
- 0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
- 0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
- 0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
- 0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
- 0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
- 0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
- 0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
- 0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
- 0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
- 0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
- 0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
- 0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
- 0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
- 0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
- 0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
- 0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
- 0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
- 0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
- 0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
- 0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
- 0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
- 0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
- 0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
- 0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
- 0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
- 0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
- 0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
- 0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
- 0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
- 0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
- 0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
- 0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
- 0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
- 0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
- 0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
- 0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
- 0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
- 0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
- 0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
- 0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
-};
-
-static const uint32 Te3[256]=
-{
- 0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
- 0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
- 0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
- 0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
- 0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
- 0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
- 0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
- 0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
- 0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
- 0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
- 0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
- 0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
- 0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
- 0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
- 0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
- 0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
- 0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
- 0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
- 0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
- 0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
- 0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
- 0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
- 0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
- 0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
- 0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
- 0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
- 0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
- 0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
- 0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
- 0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
- 0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
- 0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
- 0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
- 0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
- 0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
- 0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
- 0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
- 0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
- 0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
- 0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
- 0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
- 0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
- 0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
- 0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
- 0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
- 0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
- 0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
- 0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
- 0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
- 0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
- 0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
- 0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
- 0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
- 0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
- 0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
- 0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
- 0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
- 0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
- 0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
- 0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
- 0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
- 0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
- 0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
- 0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
-};
-
-static const uint32 Te4[256]=
-{
- 0x63636363U, 0x7c7c7c7cU, 0x77777777U, 0x7b7b7b7bU,
- 0xf2f2f2f2U, 0x6b6b6b6bU, 0x6f6f6f6fU, 0xc5c5c5c5U,
- 0x30303030U, 0x01010101U, 0x67676767U, 0x2b2b2b2bU,
- 0xfefefefeU, 0xd7d7d7d7U, 0xababababU, 0x76767676U,
- 0xcacacacaU, 0x82828282U, 0xc9c9c9c9U, 0x7d7d7d7dU,
- 0xfafafafaU, 0x59595959U, 0x47474747U, 0xf0f0f0f0U,
- 0xadadadadU, 0xd4d4d4d4U, 0xa2a2a2a2U, 0xafafafafU,
- 0x9c9c9c9cU, 0xa4a4a4a4U, 0x72727272U, 0xc0c0c0c0U,
- 0xb7b7b7b7U, 0xfdfdfdfdU, 0x93939393U, 0x26262626U,
- 0x36363636U, 0x3f3f3f3fU, 0xf7f7f7f7U, 0xccccccccU,
- 0x34343434U, 0xa5a5a5a5U, 0xe5e5e5e5U, 0xf1f1f1f1U,
- 0x71717171U, 0xd8d8d8d8U, 0x31313131U, 0x15151515U,
- 0x04040404U, 0xc7c7c7c7U, 0x23232323U, 0xc3c3c3c3U,
- 0x18181818U, 0x96969696U, 0x05050505U, 0x9a9a9a9aU,
- 0x07070707U, 0x12121212U, 0x80808080U, 0xe2e2e2e2U,
- 0xebebebebU, 0x27272727U, 0xb2b2b2b2U, 0x75757575U,
- 0x09090909U, 0x83838383U, 0x2c2c2c2cU, 0x1a1a1a1aU,
- 0x1b1b1b1bU, 0x6e6e6e6eU, 0x5a5a5a5aU, 0xa0a0a0a0U,
- 0x52525252U, 0x3b3b3b3bU, 0xd6d6d6d6U, 0xb3b3b3b3U,
- 0x29292929U, 0xe3e3e3e3U, 0x2f2f2f2fU, 0x84848484U,
- 0x53535353U, 0xd1d1d1d1U, 0x00000000U, 0xededededU,
- 0x20202020U, 0xfcfcfcfcU, 0xb1b1b1b1U, 0x5b5b5b5bU,
- 0x6a6a6a6aU, 0xcbcbcbcbU, 0xbebebebeU, 0x39393939U,
- 0x4a4a4a4aU, 0x4c4c4c4cU, 0x58585858U, 0xcfcfcfcfU,
- 0xd0d0d0d0U, 0xefefefefU, 0xaaaaaaaaU, 0xfbfbfbfbU,
- 0x43434343U, 0x4d4d4d4dU, 0x33333333U, 0x85858585U,
- 0x45454545U, 0xf9f9f9f9U, 0x02020202U, 0x7f7f7f7fU,
- 0x50505050U, 0x3c3c3c3cU, 0x9f9f9f9fU, 0xa8a8a8a8U,
- 0x51515151U, 0xa3a3a3a3U, 0x40404040U, 0x8f8f8f8fU,
- 0x92929292U, 0x9d9d9d9dU, 0x38383838U, 0xf5f5f5f5U,
- 0xbcbcbcbcU, 0xb6b6b6b6U, 0xdadadadaU, 0x21212121U,
- 0x10101010U, 0xffffffffU, 0xf3f3f3f3U, 0xd2d2d2d2U,
- 0xcdcdcdcdU, 0x0c0c0c0cU, 0x13131313U, 0xececececU,
- 0x5f5f5f5fU, 0x97979797U, 0x44444444U, 0x17171717U,
- 0xc4c4c4c4U, 0xa7a7a7a7U, 0x7e7e7e7eU, 0x3d3d3d3dU,
- 0x64646464U, 0x5d5d5d5dU, 0x19191919U, 0x73737373U,
- 0x60606060U, 0x81818181U, 0x4f4f4f4fU, 0xdcdcdcdcU,
- 0x22222222U, 0x2a2a2a2aU, 0x90909090U, 0x88888888U,
- 0x46464646U, 0xeeeeeeeeU, 0xb8b8b8b8U, 0x14141414U,
- 0xdedededeU, 0x5e5e5e5eU, 0x0b0b0b0bU, 0xdbdbdbdbU,
- 0xe0e0e0e0U, 0x32323232U, 0x3a3a3a3aU, 0x0a0a0a0aU,
- 0x49494949U, 0x06060606U, 0x24242424U, 0x5c5c5c5cU,
- 0xc2c2c2c2U, 0xd3d3d3d3U, 0xacacacacU, 0x62626262U,
- 0x91919191U, 0x95959595U, 0xe4e4e4e4U, 0x79797979U,
- 0xe7e7e7e7U, 0xc8c8c8c8U, 0x37373737U, 0x6d6d6d6dU,
- 0x8d8d8d8dU, 0xd5d5d5d5U, 0x4e4e4e4eU, 0xa9a9a9a9U,
- 0x6c6c6c6cU, 0x56565656U, 0xf4f4f4f4U, 0xeaeaeaeaU,
- 0x65656565U, 0x7a7a7a7aU, 0xaeaeaeaeU, 0x08080808U,
- 0xbabababaU, 0x78787878U, 0x25252525U, 0x2e2e2e2eU,
- 0x1c1c1c1cU, 0xa6a6a6a6U, 0xb4b4b4b4U, 0xc6c6c6c6U,
- 0xe8e8e8e8U, 0xddddddddU, 0x74747474U, 0x1f1f1f1fU,
- 0x4b4b4b4bU, 0xbdbdbdbdU, 0x8b8b8b8bU, 0x8a8a8a8aU,
- 0x70707070U, 0x3e3e3e3eU, 0xb5b5b5b5U, 0x66666666U,
- 0x48484848U, 0x03030303U, 0xf6f6f6f6U, 0x0e0e0e0eU,
- 0x61616161U, 0x35353535U, 0x57575757U, 0xb9b9b9b9U,
- 0x86868686U, 0xc1c1c1c1U, 0x1d1d1d1dU, 0x9e9e9e9eU,
- 0xe1e1e1e1U, 0xf8f8f8f8U, 0x98989898U, 0x11111111U,
- 0x69696969U, 0xd9d9d9d9U, 0x8e8e8e8eU, 0x94949494U,
- 0x9b9b9b9bU, 0x1e1e1e1eU, 0x87878787U, 0xe9e9e9e9U,
- 0xcecececeU, 0x55555555U, 0x28282828U, 0xdfdfdfdfU,
- 0x8c8c8c8cU, 0xa1a1a1a1U, 0x89898989U, 0x0d0d0d0dU,
- 0xbfbfbfbfU, 0xe6e6e6e6U, 0x42424242U, 0x68686868U,
- 0x41414141U, 0x99999999U, 0x2d2d2d2dU, 0x0f0f0f0fU,
- 0xb0b0b0b0U, 0x54545454U, 0xbbbbbbbbU, 0x16161616U,
-};
-
-static const uint32 Td0[256]=
-{
- 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
- 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
- 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
- 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
- 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
- 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
- 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
- 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
- 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
- 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
- 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
- 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
- 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
- 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
- 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
- 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
- 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
- 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
- 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
- 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
- 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
- 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
- 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
- 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
- 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
- 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
- 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
- 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
- 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
- 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
- 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
- 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
- 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
- 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
- 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
- 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
- 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
- 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
- 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
- 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
- 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
- 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
- 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
- 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
- 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
- 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
- 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
- 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
- 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
- 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
- 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
- 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
- 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
- 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
- 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
- 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
- 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
- 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
- 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
- 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
- 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
- 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
- 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
- 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
-};
-
-static const uint32 Td1[256]=
-{
- 0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
- 0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
- 0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
- 0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
- 0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
- 0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
- 0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
- 0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
- 0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
- 0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
- 0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
- 0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
- 0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
- 0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
- 0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
- 0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
- 0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
- 0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
- 0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
- 0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
- 0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
- 0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
- 0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
- 0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
- 0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
- 0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
- 0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
- 0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
- 0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
- 0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
- 0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
- 0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
- 0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
- 0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
- 0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
- 0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
- 0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
- 0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
- 0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
- 0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
- 0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
- 0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
- 0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
- 0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
- 0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
- 0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
- 0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
- 0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
- 0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
- 0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
- 0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
- 0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
- 0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
- 0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
- 0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
- 0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
- 0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
- 0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
- 0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
- 0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
- 0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
- 0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
- 0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
- 0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
-};
-
-static const uint32 Td2[256]=
-{
- 0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
- 0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
- 0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
- 0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
- 0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
- 0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
- 0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
- 0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
- 0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
- 0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
- 0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
- 0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
- 0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
- 0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
- 0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
- 0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
- 0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
- 0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
- 0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
- 0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
-
- 0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
- 0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
- 0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
- 0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
- 0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
- 0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
- 0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
- 0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
- 0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
- 0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
- 0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
- 0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
- 0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
- 0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
- 0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
- 0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
- 0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
- 0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
- 0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
- 0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
- 0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
- 0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
- 0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
- 0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
- 0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
- 0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
- 0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
- 0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
- 0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
- 0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
- 0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
- 0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
- 0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
- 0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
- 0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
- 0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
- 0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
- 0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
- 0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
- 0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
- 0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
- 0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
- 0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
- 0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
-};
-
-static const uint32 Td3[256]=
-{
- 0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
- 0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
- 0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
- 0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
- 0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
- 0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
- 0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
- 0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
- 0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
- 0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
- 0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
- 0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
- 0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
- 0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
- 0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
- 0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
- 0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
- 0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
- 0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
- 0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
- 0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
- 0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
- 0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
- 0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
- 0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
- 0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
- 0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
- 0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
- 0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
- 0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
- 0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
- 0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
- 0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
- 0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
- 0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
- 0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
- 0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
- 0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
- 0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
- 0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
- 0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
- 0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
- 0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
- 0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
- 0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
- 0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
- 0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
- 0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
- 0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
- 0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
- 0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
- 0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
- 0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
- 0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
- 0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
- 0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
- 0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
- 0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
- 0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
- 0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
- 0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
- 0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
- 0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
- 0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
-};
-
-static const uint32 Td4[256]=
-{
- 0x52525252U, 0x09090909U, 0x6a6a6a6aU, 0xd5d5d5d5U,
- 0x30303030U, 0x36363636U, 0xa5a5a5a5U, 0x38383838U,
- 0xbfbfbfbfU, 0x40404040U, 0xa3a3a3a3U, 0x9e9e9e9eU,
- 0x81818181U, 0xf3f3f3f3U, 0xd7d7d7d7U, 0xfbfbfbfbU,
- 0x7c7c7c7cU, 0xe3e3e3e3U, 0x39393939U, 0x82828282U,
- 0x9b9b9b9bU, 0x2f2f2f2fU, 0xffffffffU, 0x87878787U,
- 0x34343434U, 0x8e8e8e8eU, 0x43434343U, 0x44444444U,
- 0xc4c4c4c4U, 0xdedededeU, 0xe9e9e9e9U, 0xcbcbcbcbU,
- 0x54545454U, 0x7b7b7b7bU, 0x94949494U, 0x32323232U,
- 0xa6a6a6a6U, 0xc2c2c2c2U, 0x23232323U, 0x3d3d3d3dU,
- 0xeeeeeeeeU, 0x4c4c4c4cU, 0x95959595U, 0x0b0b0b0bU,
- 0x42424242U, 0xfafafafaU, 0xc3c3c3c3U, 0x4e4e4e4eU,
- 0x08080808U, 0x2e2e2e2eU, 0xa1a1a1a1U, 0x66666666U,
- 0x28282828U, 0xd9d9d9d9U, 0x24242424U, 0xb2b2b2b2U,
- 0x76767676U, 0x5b5b5b5bU, 0xa2a2a2a2U, 0x49494949U,
- 0x6d6d6d6dU, 0x8b8b8b8bU, 0xd1d1d1d1U, 0x25252525U,
- 0x72727272U, 0xf8f8f8f8U, 0xf6f6f6f6U, 0x64646464U,
- 0x86868686U, 0x68686868U, 0x98989898U, 0x16161616U,
- 0xd4d4d4d4U, 0xa4a4a4a4U, 0x5c5c5c5cU, 0xccccccccU,
- 0x5d5d5d5dU, 0x65656565U, 0xb6b6b6b6U, 0x92929292U,
- 0x6c6c6c6cU, 0x70707070U, 0x48484848U, 0x50505050U,
- 0xfdfdfdfdU, 0xededededU, 0xb9b9b9b9U, 0xdadadadaU,
- 0x5e5e5e5eU, 0x15151515U, 0x46464646U, 0x57575757U,
- 0xa7a7a7a7U, 0x8d8d8d8dU, 0x9d9d9d9dU, 0x84848484U,
- 0x90909090U, 0xd8d8d8d8U, 0xababababU, 0x00000000U,
- 0x8c8c8c8cU, 0xbcbcbcbcU, 0xd3d3d3d3U, 0x0a0a0a0aU,
- 0xf7f7f7f7U, 0xe4e4e4e4U, 0x58585858U, 0x05050505U,
- 0xb8b8b8b8U, 0xb3b3b3b3U, 0x45454545U, 0x06060606U,
- 0xd0d0d0d0U, 0x2c2c2c2cU, 0x1e1e1e1eU, 0x8f8f8f8fU,
- 0xcacacacaU, 0x3f3f3f3fU, 0x0f0f0f0fU, 0x02020202U,
- 0xc1c1c1c1U, 0xafafafafU, 0xbdbdbdbdU, 0x03030303U,
- 0x01010101U, 0x13131313U, 0x8a8a8a8aU, 0x6b6b6b6bU,
- 0x3a3a3a3aU, 0x91919191U, 0x11111111U, 0x41414141U,
- 0x4f4f4f4fU, 0x67676767U, 0xdcdcdcdcU, 0xeaeaeaeaU,
- 0x97979797U, 0xf2f2f2f2U, 0xcfcfcfcfU, 0xcecececeU,
- 0xf0f0f0f0U, 0xb4b4b4b4U, 0xe6e6e6e6U, 0x73737373U,
- 0x96969696U, 0xacacacacU, 0x74747474U, 0x22222222U,
- 0xe7e7e7e7U, 0xadadadadU, 0x35353535U, 0x85858585U,
- 0xe2e2e2e2U, 0xf9f9f9f9U, 0x37373737U, 0xe8e8e8e8U,
- 0x1c1c1c1cU, 0x75757575U, 0xdfdfdfdfU, 0x6e6e6e6eU,
- 0x47474747U, 0xf1f1f1f1U, 0x1a1a1a1aU, 0x71717171U,
- 0x1d1d1d1dU, 0x29292929U, 0xc5c5c5c5U, 0x89898989U,
- 0x6f6f6f6fU, 0xb7b7b7b7U, 0x62626262U, 0x0e0e0e0eU,
- 0xaaaaaaaaU, 0x18181818U, 0xbebebebeU, 0x1b1b1b1bU,
- 0xfcfcfcfcU, 0x56565656U, 0x3e3e3e3eU, 0x4b4b4b4bU,
- 0xc6c6c6c6U, 0xd2d2d2d2U, 0x79797979U, 0x20202020U,
- 0x9a9a9a9aU, 0xdbdbdbdbU, 0xc0c0c0c0U, 0xfefefefeU,
- 0x78787878U, 0xcdcdcdcdU, 0x5a5a5a5aU, 0xf4f4f4f4U,
- 0x1f1f1f1fU, 0xddddddddU, 0xa8a8a8a8U, 0x33333333U,
- 0x88888888U, 0x07070707U, 0xc7c7c7c7U, 0x31313131U,
- 0xb1b1b1b1U, 0x12121212U, 0x10101010U, 0x59595959U,
- 0x27272727U, 0x80808080U, 0xececececU, 0x5f5f5f5fU,
- 0x60606060U, 0x51515151U, 0x7f7f7f7fU, 0xa9a9a9a9U,
- 0x19191919U, 0xb5b5b5b5U, 0x4a4a4a4aU, 0x0d0d0d0dU,
- 0x2d2d2d2dU, 0xe5e5e5e5U, 0x7a7a7a7aU, 0x9f9f9f9fU,
- 0x93939393U, 0xc9c9c9c9U, 0x9c9c9c9cU, 0xefefefefU,
- 0xa0a0a0a0U, 0xe0e0e0e0U, 0x3b3b3b3bU, 0x4d4d4d4dU,
- 0xaeaeaeaeU, 0x2a2a2a2aU, 0xf5f5f5f5U, 0xb0b0b0b0U,
- 0xc8c8c8c8U, 0xebebebebU, 0xbbbbbbbbU, 0x3c3c3c3cU,
- 0x83838383U, 0x53535353U, 0x99999999U, 0x61616161U,
- 0x17171717U, 0x2b2b2b2bU, 0x04040404U, 0x7e7e7e7eU,
- 0xbabababaU, 0x77777777U, 0xd6d6d6d6U, 0x26262626U,
- 0xe1e1e1e1U, 0x69696969U, 0x14141414U, 0x63636363U,
- 0x55555555U, 0x21212121U, 0x0c0c0c0cU, 0x7d7d7d7dU,
-};
-
-
-/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
-static const uint32 rcon[]=
-{
- 0x01000000, 0x02000000, 0x04000000, 0x08000000,
- 0x10000000, 0x20000000, 0x40000000, 0x80000000,
- 0x1B000000, 0x36000000,
-};
-
-#if defined(_MSC_VER) && defined(__i386__)
-
-#define RJ_SWAP(x) (_lrotl(x, 8) & 0x00ff00ff | _lrotr(x, 8) & 0xff00ff00)
-#define GETuint32(p) RJ_SWAP(*((uint32 *)(p)))
-#define PUTuint32(ct, st) { *((uint32 *)(ct)) = RJ_SWAP((st)); }
-
-#else
-
-#define GETuint32(pt) (((uint32)(pt)[0] << 24) ^ ((uint32)(pt)[1] << 16)\
- ^ ((uint32)(pt)[2] << 8) ^ ((uint32)(pt)[3]))
-#define PUTuint32(ct, st) { (ct)[0] = (uint8)((st) >> 24); (ct)[1]\
-= (uint8)((st) >> 16); (ct)[2] = (uint8)((st) >> 8); (ct)[3] = (uint8)(st); }
-
-#endif /* defined(_MSC_VER) && defined(__i386__) */
-
-
-/*
- Expand the cipher key into the encryption key schedule.
-
- RETURN
- The number of rounds for the given cipher key size.
-*/
-
-int rijndaelKeySetupEnc(uint32 rk[/*4*(Nr + 1)*/], const uint8 cipherKey[],
- int keyBits)
-{
- int i = 0;
- uint32 temp;
-
- rk[0] = GETuint32(cipherKey );
- rk[1] = GETuint32(cipherKey + 4);
- rk[2] = GETuint32(cipherKey + 8);
- rk[3] = GETuint32(cipherKey + 12);
- if (keyBits == 128)
- {
- for (;;)
- {
- temp = rk[3];
- rk[4] = (rk[0] ^
- (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
- (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
- (Te4[(temp ) & 0xff] & 0x0000ff00) ^
- (Te4[(temp >> 24) ] & 0x000000ff) ^
- rcon[i]);
- rk[5] = rk[1] ^ rk[4];
- rk[6] = rk[2] ^ rk[5];
- rk[7] = rk[3] ^ rk[6];
- if (++i == 10)
- return 10;
- rk += 4;
- }
- }
- rk[4] = GETuint32(cipherKey + 16);
- rk[5] = GETuint32(cipherKey + 20);
- if (keyBits == 192)
- {
- for (;;)
- {
- temp = rk[ 5];
- rk[ 6] = (rk[ 0] ^
- (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
- (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
- (Te4[(temp ) & 0xff] & 0x0000ff00) ^
- (Te4[(temp >> 24) ] & 0x000000ff) ^
- rcon[i]);
- rk[ 7] = rk[ 1] ^ rk[ 6];
- rk[ 8] = rk[ 2] ^ rk[ 7];
- rk[ 9] = rk[ 3] ^ rk[ 8];
- if (++i == 8)
- {
- return 12;
- }
- rk[10] = rk[ 4] ^ rk[ 9];
- rk[11] = rk[ 5] ^ rk[10];
- rk += 6;
- }
- }
- rk[6] = GETuint32(cipherKey + 24);
- rk[7] = GETuint32(cipherKey + 28);
- if (keyBits == 256)
- {
- for (;;)
- {
- temp = rk[ 7];
- rk[ 8] = (rk[ 0] ^
- (Te4[(temp >> 16) & 0xff] & 0xff000000) ^
- (Te4[(temp >> 8) & 0xff] & 0x00ff0000) ^
- (Te4[(temp ) & 0xff] & 0x0000ff00) ^
- (Te4[(temp >> 24) ] & 0x000000ff) ^
- rcon[i]);
- rk[ 9] = rk[ 1] ^ rk[ 8];
- rk[10] = rk[ 2] ^ rk[ 9];
- rk[11] = rk[ 3] ^ rk[10];
- if (++i == 7)
- {
- return 14;
- }
- temp = rk[11];
- rk[12] = (rk[ 4] ^
- (Te4[(temp >> 24) ] & 0xff000000) ^
- (Te4[(temp >> 16) & 0xff] & 0x00ff0000) ^
- (Te4[(temp >> 8) & 0xff] & 0x0000ff00) ^
- (Te4[(temp ) & 0xff] & 0x000000ff));
- rk[13] = rk[ 5] ^ rk[12];
- rk[14] = rk[ 6] ^ rk[13];
- rk[15] = rk[ 7] ^ rk[14];
- rk += 8;
- }
- }
- return 0;
-}
-
-
-/*
- Expand the cipher key into the decryption key schedule.
-
- RETURN
- The number of rounds for the given cipher key size.
-*/
-
-int rijndaelKeySetupDec(uint32 rk[/*4*(Nr + 1)*/], const uint8 cipherKey[],
- int keyBits)
-{
- int nr, i, j;
- uint32 temp;
-
- /* expand the cipher key: */
- nr = rijndaelKeySetupEnc(rk, cipherKey, keyBits);
- /* invert the order of the round keys: */
- for (i = 0, j = 4*nr; i < j; i += 4, j -= 4)
- {
- temp = rk[i ]; rk[i ] = rk[j ]; rk[j ] = temp;
- temp = rk[i + 1]; rk[i + 1] = rk[j + 1]; rk[j + 1] = temp;
- temp = rk[i + 2]; rk[i + 2] = rk[j + 2]; rk[j + 2] = temp;
- temp = rk[i + 3]; rk[i + 3] = rk[j + 3]; rk[j + 3] = temp;
- }
- /*
- Apply the inverse MixColumn transform to all round keys but the first
- and the last:
- */
- for (i = 1; i < nr; i++)
- {
- rk += 4;
-
- rk[0]= (
- Td0[Te4[(rk[0] >> 24) ] & 0xff] ^
- Td1[Te4[(rk[0] >> 16) & 0xff] & 0xff] ^
- Td2[Te4[(rk[0] >> 8) & 0xff] & 0xff] ^
- Td3[Te4[(rk[0] ) & 0xff] & 0xff]);
-
- rk[1]= (Td0[Te4[(rk[1] >> 24) ] & 0xff] ^
- Td1[Te4[(rk[1] >> 16) & 0xff] & 0xff] ^
- Td2[Te4[(rk[1] >> 8) & 0xff] & 0xff] ^
- Td3[Te4[(rk[1] ) & 0xff] & 0xff]);
-
- rk[2]= (Td0[Te4[(rk[2] >> 24) ] & 0xff] ^
- Td1[Te4[(rk[2] >> 16) & 0xff] & 0xff] ^
- Td2[Te4[(rk[2] >> 8) & 0xff] & 0xff] ^
- Td3[Te4[(rk[2] ) & 0xff] & 0xff]);
-
- rk[3]= (Td0[Te4[(rk[3] >> 24) ] & 0xff] ^
- Td1[Te4[(rk[3] >> 16) & 0xff] & 0xff] ^
- Td2[Te4[(rk[3] >> 8) & 0xff] & 0xff] ^
- Td3[Te4[(rk[3] ) & 0xff] & 0xff]);
- }
- return nr;
-}
-
-
-void rijndaelEncrypt(const uint32 rk[/*4*(Nr + 1)*/], int Nr,
- const uint8 pt[16], uint8 ct[16])
-{
- uint32 s0, s1, s2, s3, t0, t1, t2, t3;
-#ifndef FULL_UNROLL
- int r;
-#endif /* FULL_UNROLL */
-
- /* map byte array block to cipher state and add initial round key: */
- s0 = GETuint32(pt ) ^ rk[0];
- s1 = GETuint32(pt + 4) ^ rk[1];
- s2 = GETuint32(pt + 8) ^ rk[2];
- s3 = GETuint32(pt + 12) ^ rk[3];
-
-#ifdef FULL_UNROLL
- /* round 1: */
- t0= (Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff]
- ^ Te3[s3 & 0xff] ^ rk[ 4]);
- t1= (Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff]
- ^ Te3[s0 & 0xff] ^ rk[ 5]);
- t2= (Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff]
- ^ Te3[s1 & 0xff] ^ rk[ 6]);
- t3= (Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff]
- ^ Te3[s2 & 0xff] ^ rk[ 7]);
-
- /* round 2: */
- s0= (Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff]
- ^ Te3[t3 & 0xff] ^ rk[ 8]);
- s1= (Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff]
- ^ Te3[t0 & 0xff] ^ rk[ 9]);
- s2= (Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff]
- ^ Te3[t1 & 0xff] ^ rk[10]);
- s3= (Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff]
- ^ Te3[t2 & 0xff] ^ rk[11]);
-
- /* round 3: */
- t0= (Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff]
- ^ Te3[s3 & 0xff] ^ rk[12]);
- t1= (Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff]
- ^ Te3[s0 & 0xff] ^ rk[13]);
- t2= (Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff]
- ^ Te3[s1 & 0xff] ^ rk[14]);
- t3= (Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff]
- ^ Te3[s2 & 0xff] ^ rk[15]);
-
- /* round 4: */
- s0= (Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff]
- ^ Te3[t3 & 0xff] ^ rk[16]);
- s1= (Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff]
- ^ Te3[t0 & 0xff] ^ rk[17]);
- s2= (Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff]
- ^ Te3[t1 & 0xff] ^ rk[18]);
- s3= (Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff]
- ^ Te3[t2 & 0xff] ^ rk[19]);
-
- /* round 5: */
- t0= (Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff]
- ^ Te3[s3 & 0xff] ^ rk[20]);
- t1= (Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff]
- ^ Te3[s0 & 0xff] ^ rk[21]);
- t2= (Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff]
- ^ Te3[s1 & 0xff] ^ rk[22]);
- t3= (Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff]
- ^ Te3[s2 & 0xff] ^ rk[23]);
-
- /* round 6: */
- s0= (Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff]
- ^ Te3[t3 & 0xff] ^ rk[24]);
- s1= (Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff]
- ^ Te3[t0 & 0xff] ^ rk[25]);
- s2= (Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff]
- ^ Te3[t1 & 0xff] ^ rk[26]);
- s3= (Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff]
- ^ Te3[t2 & 0xff] ^ rk[27]);
-
- /* round 7: */
- t0= (Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff]
- ^ Te3[s3 & 0xff] ^ rk[28]);
- t1= (Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff]
- ^ Te3[s0 & 0xff] ^ rk[29]);
- t2= (Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff]
- ^ Te3[s1 & 0xff] ^ rk[30]);
- t3= (Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff]
- ^ Te3[s2 & 0xff] ^ rk[31]);
-
- /* round 8: */
- s0= (Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff]
- ^ Te3[t3 & 0xff] ^ rk[32]);
- s1= (Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff]
- ^ Te3[t0 & 0xff] ^ rk[33]);
- s2= (Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff]
- ^ Te3[t1 & 0xff] ^ rk[34]);
- s3= (Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff]
- ^ Te3[t2 & 0xff] ^ rk[35]);
-
- /* round 9: */
- t0= (Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff]
- ^ Te3[s3 & 0xff] ^ rk[36]);
- t1= (Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff]
- ^ Te3[s0 & 0xff] ^ rk[37]);
- t2= (Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff]
- ^ Te3[s1 & 0xff] ^ rk[38]);
- t3= (Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff]
- ^ Te3[s2 & 0xff] ^ rk[39]);
-
- if (Nr > 10)
- {
- /* round 10: */
- s0= (Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff]
- ^ Te3[t3 & 0xff] ^ rk[40]);
- s1= (Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff]
- ^ Te3[t0 & 0xff] ^ rk[41]);
- s2= (Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff]
- ^ Te3[t1 & 0xff] ^ rk[42]);
- s3= (Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff]
- ^ Te3[t2 & 0xff] ^ rk[43]);
-
- /* round 11: */
- t0= (Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff]
- ^ Te3[s3 & 0xff] ^ rk[44]);
- t1= (Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff]
- ^ Te3[s0 & 0xff] ^ rk[45]);
- t2= (Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff]
- ^ Te3[s1 & 0xff] ^ rk[46]);
- t3= (Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff]
- ^ Te3[s2 & 0xff] ^ rk[47]);
-
- if (Nr > 12)
- {
- /* round 12: */
- s0= (Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff]
- ^ Te3[t3 & 0xff] ^ rk[48]);
- s1= (Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff]
- ^ Te3[t0 & 0xff] ^ rk[49]);
- s2= (Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff]
- ^ Te3[t1 & 0xff] ^ rk[50]);
- s3= (Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff]
- ^ Te3[t2 & 0xff] ^ rk[51]);
-
- /* round 13: */
- t0= (Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff]
- ^ Te3[s3 & 0xff] ^ rk[52]);
- t1= (Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff]
- ^ Te3[s0 & 0xff] ^ rk[53]);
- t2= (Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff]
- ^ Te3[s1 & 0xff] ^ rk[54]);
- t3= (Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff]
- ^ Te3[s2 & 0xff] ^ rk[55]);
- }
- }
- rk += Nr << 2;
-#else /* !FULL_UNROLL */
-
- /* Nr - 1 full rounds: */
-
- r = Nr >> 1;
- for (;;)
- {
- t0= (Te0[(s0 >> 24) ] ^
- Te1[(s1 >> 16) & 0xff] ^
- Te2[(s2 >> 8) & 0xff] ^
- Te3[(s3 ) & 0xff] ^
- rk[4]);
-
- t1= (Te0[(s1 >> 24) ] ^
- Te1[(s2 >> 16) & 0xff] ^
- Te2[(s3 >> 8) & 0xff] ^
- Te3[(s0 ) & 0xff] ^
- rk[5]);
-
- t2= (Te0[(s2 >> 24) ] ^
- Te1[(s3 >> 16) & 0xff] ^
- Te2[(s0 >> 8) & 0xff] ^
- Te3[(s1 ) & 0xff] ^
- rk[6]);
-
- t3= (Te0[(s3 >> 24) ] ^
- Te1[(s0 >> 16) & 0xff] ^
- Te2[(s1 >> 8) & 0xff] ^
- Te3[(s2 ) & 0xff] ^
- rk[7]);
-
- rk+= 8;
- if (--r == 0)
- break;
-
- s0= (Te0[(t0 >> 24) ] ^
- Te1[(t1 >> 16) & 0xff] ^
- Te2[(t2 >> 8) & 0xff] ^
- Te3[(t3 ) & 0xff] ^
- rk[0]);
-
- s1= (Te0[(t1 >> 24) ] ^
- Te1[(t2 >> 16) & 0xff] ^
- Te2[(t3 >> 8) & 0xff] ^
- Te3[(t0 ) & 0xff] ^
- rk[1]);
-
- s2= (Te0[(t2 >> 24) ] ^
- Te1[(t3 >> 16) & 0xff] ^
- Te2[(t0 >> 8) & 0xff] ^
- Te3[(t1 ) & 0xff] ^
- rk[2]);
-
- s3= (Te0[(t3 >> 24) ] ^
- Te1[(t0 >> 16) & 0xff] ^
- Te2[(t1 >> 8) & 0xff] ^
- Te3[(t2 ) & 0xff] ^
- rk[3]);
- }
-#endif /* FULL_UNROLL */
-
- /* Apply last round and map cipher state to byte array block: */
- s0= ((Te4[(t0 >> 24) ] & 0xff000000) ^
- (Te4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
- (Te4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
- (Te4[(t3 ) & 0xff] & 0x000000ff) ^
- rk[0]);
- PUTuint32(ct , s0);
-
- s1= ((Te4[(t1 >> 24) ] & 0xff000000) ^
- (Te4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
- (Te4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
- (Te4[(t0 ) & 0xff] & 0x000000ff) ^
- rk[1]);
- PUTuint32(ct + 4, s1);
-
- s2= ((Te4[(t2 >> 24) ] & 0xff000000) ^
- (Te4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
- (Te4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
- (Te4[(t1 ) & 0xff] & 0x000000ff) ^
- rk[2]);
- PUTuint32(ct + 8, s2);
-
- s3= ((Te4[(t3 >> 24) ] & 0xff000000) ^
- (Te4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
- (Te4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
- (Te4[(t2 ) & 0xff] & 0x000000ff) ^
- rk[3]);
- PUTuint32(ct + 12, s3);
-}
-
-
-void rijndaelDecrypt(const uint32 rk[/*4*(Nr + 1)*/], int Nr,
- const uint8 ct[16], uint8 pt[16])
-{
- uint32 s0, s1, s2, s3, t0, t1, t2, t3;
-#ifndef FULL_UNROLL
- int r;
-#endif /* FULL_UNROLL */
-
- /* Map byte array block to cipher state and add initial round key: */
-
- s0 = GETuint32(ct ) ^ rk[0];
- s1 = GETuint32(ct + 4) ^ rk[1];
- s2 = GETuint32(ct + 8) ^ rk[2];
- s3 = GETuint32(ct + 12) ^ rk[3];
-
-#ifdef FULL_UNROLL
- /* round 1: */
- t0= (Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff]
- ^ Td3[s1 & 0xff] ^ rk[ 4]);
- t1= (Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff]
- ^ Td3[s2 & 0xff] ^ rk[ 5]);
- t2= (Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff]
- ^ Td3[s3 & 0xff] ^ rk[ 6]);
- t3= (Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff]
- ^ Td3[s0 & 0xff] ^ rk[ 7]);
-
- /* round 2: */
- s0= (Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff]
- ^ Td3[t1 & 0xff] ^ rk[ 8]);
- s1= (Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff]
- ^ Td3[t2 & 0xff] ^ rk[ 9]);
- s2= (Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff]
- ^ Td3[t3 & 0xff] ^ rk[10]);
- s3= (Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff]
- ^ Td3[t0 & 0xff] ^ rk[11]);
-
- /* round 3: */
- t0= (Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff]
- ^ Td3[s1 & 0xff] ^ rk[12]);
- t1= (Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff]
- ^ Td3[s2 & 0xff] ^ rk[13]);
- t2= (Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff]
- ^ Td3[s3 & 0xff] ^ rk[14]);
- t3= (Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff]
- ^ Td3[s0 & 0xff] ^ rk[15]);
-
- /* round 4: */
- s0= (Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff]
- ^ Td3[t1 & 0xff] ^ rk[16]);
- s1= (Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff]
- ^ Td3[t2 & 0xff] ^ rk[17]);
- s2= (Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff]
- ^ Td3[t3 & 0xff] ^ rk[18]);
- s3= (Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff]
- ^ Td3[t0 & 0xff] ^ rk[19]);
-
- /* round 5: */
- t0= (Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff]
- ^ Td3[s1 & 0xff] ^ rk[20]);
- t1= (Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff]
- ^ Td3[s2 & 0xff] ^ rk[21]);
- t2= (Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff]
- ^ Td3[s3 & 0xff] ^ rk[22]);
- t3= (Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff]
- ^ Td3[s0 & 0xff] ^ rk[23]);
-
- /* round 6: */
- s0= (Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff]
- ^ Td3[t1 & 0xff] ^ rk[24]);
- s1= (Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff]
- ^ Td3[t2 & 0xff] ^ rk[25]);
- s2= (Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff]
- ^ Td3[t3 & 0xff] ^ rk[26]);
- s3= (Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff]
- ^ Td3[t0 & 0xff] ^ rk[27]);
-
- /* round 7: */
- t0= (Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff]
- ^ Td3[s1 & 0xff] ^ rk[28]);
- t1= (Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff]
- ^ Td3[s2 & 0xff] ^ rk[29]);
- t2= (Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff]
- ^ Td3[s3 & 0xff] ^ rk[30]);
- t3= (Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff]
- ^ Td3[s0 & 0xff] ^ rk[31]);
-
- /* round 8: */
- s0= (Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff]
- ^ Td3[t1 & 0xff] ^ rk[32]);
- s1= (Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff]
- ^ Td3[t2 & 0xff] ^ rk[33]);
- s2= (Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff]
- ^ Td3[t3 & 0xff] ^ rk[34]);
- s3= (Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff]
- ^ Td3[t0 & 0xff] ^ rk[35]);
-
- /* round 9: */
- t0= (Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff]
- ^ Td3[s1 & 0xff] ^ rk[36]);
- t1= (Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff]
- ^ Td3[s2 & 0xff] ^ rk[37]);
- t2= (Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff]
- ^ Td3[s3 & 0xff] ^ rk[38]);
- t3= (Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff]
- ^ Td3[s0 & 0xff] ^ rk[39]);
-
- if (Nr > 10)
- {
- /* round 10: */
- s0= (Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff]
- ^ Td3[t1 & 0xff] ^ rk[40]);
- s1= (Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff]
- ^ Td3[t2 & 0xff] ^ rk[41]);
- s2= (Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff]
- ^ Td3[t3 & 0xff] ^ rk[42]);
- s3= (Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff]
- ^ Td3[t0 & 0xff] ^ rk[43]);
-
- /* round 11: */
- t0= (Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff]
- ^ Td3[s1 & 0xff] ^ rk[44]);
- t1= (Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff]
- ^ Td3[s2 & 0xff] ^ rk[45]);
- t2= (Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff]
- ^ Td3[s3 & 0xff] ^ rk[46]);
- t3= (Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff]
- ^ Td3[s0 & 0xff] ^ rk[47]);
-
- if (Nr > 12)
- {
- /* round 12: */
- s0= (Td0[t0 >> 24] ^ Td1[(t3 >> 16) & 0xff] ^ Td2[(t2 >> 8) & 0xff]
- ^ Td3[t1 & 0xff] ^ rk[48]);
- s1= (Td0[t1 >> 24] ^ Td1[(t0 >> 16) & 0xff] ^ Td2[(t3 >> 8) & 0xff]
- ^ Td3[t2 & 0xff] ^ rk[49]);
- s2= (Td0[t2 >> 24] ^ Td1[(t1 >> 16) & 0xff] ^ Td2[(t0 >> 8) & 0xff]
- ^ Td3[t3 & 0xff] ^ rk[50]);
- s3= (Td0[t3 >> 24] ^ Td1[(t2 >> 16) & 0xff] ^ Td2[(t1 >> 8) & 0xff]
- ^ Td3[t0 & 0xff] ^ rk[51]);
-
- /* round 13: */
- t0= (Td0[s0 >> 24] ^ Td1[(s3 >> 16) & 0xff] ^ Td2[(s2 >> 8) & 0xff]
- ^ Td3[s1 & 0xff] ^ rk[52]);
- t1= (Td0[s1 >> 24] ^ Td1[(s0 >> 16) & 0xff] ^ Td2[(s3 >> 8) & 0xff]
- ^ Td3[s2 & 0xff] ^ rk[53]);
- t2= (Td0[s2 >> 24] ^ Td1[(s1 >> 16) & 0xff] ^ Td2[(s0 >> 8) & 0xff]
- ^ Td3[s3 & 0xff] ^ rk[54]);
- t3= (Td0[s3 >> 24] ^ Td1[(s2 >> 16) & 0xff] ^ Td2[(s1 >> 8) & 0xff]
- ^ Td3[s0 & 0xff] ^ rk[55]);
- }
- }
- rk += Nr << 2;
-#else /* !FULL_UNROLL */
-
- /* Nr - 1 full rounds: */
- r= (Nr >> 1);
- for (;;)
- {
- t0= (Td0[(s0 >> 24) ] ^
- Td1[(s3 >> 16) & 0xff] ^
- Td2[(s2 >> 8) & 0xff] ^
- Td3[(s1 ) & 0xff] ^
- rk[4]);
-
- t1= (Td0[(s1 >> 24) ] ^
- Td1[(s0 >> 16) & 0xff] ^
- Td2[(s3 >> 8) & 0xff] ^
- Td3[(s2 ) & 0xff] ^
- rk[5]);
-
- t2= (Td0[(s2 >> 24) ] ^
- Td1[(s1 >> 16) & 0xff] ^
- Td2[(s0 >> 8) & 0xff] ^
- Td3[(s3 ) & 0xff] ^
- rk[6]);
-
- t3= (Td0[(s3 >> 24) ] ^
- Td1[(s2 >> 16) & 0xff] ^
- Td2[(s1 >> 8) & 0xff] ^
- Td3[(s0 ) & 0xff] ^
- rk[7]);
-
- rk+= 8;
- if (--r == 0)
- break;
-
- s0= (Td0[(t0 >> 24) ] ^
- Td1[(t3 >> 16) & 0xff] ^
- Td2[(t2 >> 8) & 0xff] ^
- Td3[(t1 ) & 0xff] ^
- rk[0]);
-
- s1= (Td0[(t1 >> 24) ] ^
- Td1[(t0 >> 16) & 0xff] ^
- Td2[(t3 >> 8) & 0xff] ^
- Td3[(t2 ) & 0xff] ^
- rk[1]);
-
- s2= (Td0[(t2 >> 24) ] ^
- Td1[(t1 >> 16) & 0xff] ^
- Td2[(t0 >> 8) & 0xff] ^
- Td3[(t3 ) & 0xff] ^
- rk[2]);
-
- s3= (Td0[(t3 >> 24) ] ^
- Td1[(t2 >> 16) & 0xff] ^
- Td2[(t1 >> 8) & 0xff] ^
- Td3[(t0 ) & 0xff] ^
- rk[3]);
- }
-
-#endif /* FULL_UNROLL */
-
- /* Apply last round and map cipher state to byte array block: */
-
- s0= ((Td4[(t0 >> 24) ] & 0xff000000) ^
- (Td4[(t3 >> 16) & 0xff] & 0x00ff0000) ^
- (Td4[(t2 >> 8) & 0xff] & 0x0000ff00) ^
- (Td4[(t1 ) & 0xff] & 0x000000ff) ^
- rk[0]);
- PUTuint32(pt , s0);
-
- s1= ((Td4[(t1 >> 24) ] & 0xff000000) ^
- (Td4[(t0 >> 16) & 0xff] & 0x00ff0000) ^
- (Td4[(t3 >> 8) & 0xff] & 0x0000ff00) ^
- (Td4[(t2 ) & 0xff] & 0x000000ff) ^
- rk[1]);
- PUTuint32(pt + 4, s1);
-
- s2= ((Td4[(t2 >> 24) ] & 0xff000000) ^
- (Td4[(t1 >> 16) & 0xff] & 0x00ff0000) ^
- (Td4[(t0 >> 8) & 0xff] & 0x0000ff00) ^
- (Td4[(t3 ) & 0xff] & 0x000000ff) ^
- rk[2]);
- PUTuint32(pt + 8, s2);
-
- s3= ((Td4[(t3 >> 24) ] & 0xff000000) ^
- (Td4[(t2 >> 16) & 0xff] & 0x00ff0000) ^
- (Td4[(t1 >> 8) & 0xff] & 0x0000ff00) ^
- (Td4[(t0 ) & 0xff] & 0x000000ff) ^
- rk[3]);
- PUTuint32(pt + 12, s3);
-}
diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c
index bdcfe4480db..f0447da42f6 100644
--- a/mysys/safemalloc.c
+++ b/mysys/safemalloc.c
@@ -280,7 +280,9 @@ static void free_memory(void *ptr)
static void warn(const char *format,...)
{
va_list args;
+ DBUG_PRINT("error", ("%s", format));
va_start(args,format);
+ fflush(stderr);
vfprintf(stderr, format, args);
va_end(args);
@@ -373,8 +375,8 @@ void sf_report_leaked_memory(my_thread_id id)
{
my_thread_id tid = irem->thread_id && irem->flags & MY_THREAD_SPECIFIC ?
irem->thread_id : 0;
- fprintf(stderr, "Warning: %4lu bytes lost, allocated by T@%lu at ",
- (ulong) irem->datasize,tid);
+ fprintf(stderr, "Warning: %4lu bytes lost at %p, allocated by T@%lu at ",
+ (ulong) irem->datasize, (char*) (irem + 1), tid);
print_stack(irem->frame);
total+= irem->datasize;
}
diff --git a/mysys/sha1.c b/mysys/sha1.c
deleted file mode 100644
index e5b33a9ad13..00000000000
--- a/mysys/sha1.c
+++ /dev/null
@@ -1,422 +0,0 @@
-/* Copyright (c) 2002, 2004, 2006 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-*/
-
-/*
- Original Source from: http://www.faqs.org/rfcs/rfc3174.html
-
- Copyright (C) The Internet Society (2001). All Rights Reserved.
-
- This document and translations of it may be copied and furnished to
- others, and derivative works that comment on or otherwise explain it
- or assist in its implementation may be prepared, copied, published
- and distributed, in whole or in part, without restriction of any
- kind, provided that the above copyright notice and this paragraph are
- included on all such copies and derivative works. However, this
- document itself may not be modified in any way, such as by removing
- the copyright notice or references to the Internet Society or other
- Internet organizations, except as needed for the purpose of
- developing Internet standards in which case the procedures for
- copyrights defined in the Internet Standards process must be
- followed, or as required to translate it into languages other than
- English.
-
- The limited permissions granted above are perpetual and will not be
- revoked by the Internet Society or its successors or assigns.
-
- This document and the information contained herein is provided on an
- "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING
- TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING
- BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION
- HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF
- MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
-
- Acknowledgement
- Funding for the RFC Editor function is currently provided by the
- Internet Society.
-
- DESCRIPTION
- This file implements the Secure Hashing Algorithm 1 as
- defined in FIPS PUB 180-1 published April 17, 1995.
-
- The SHA-1, produces a 160-bit message digest for a given data
- stream. It should take about 2**n steps to find a message with the
- same digest as a given message and 2**(n/2) to find any two
- messages with the same digest, when n is the digest size in bits.
- Therefore, this algorithm can serve as a means of providing a
- "fingerprint" for a message.
-
- PORTABILITY ISSUES
- SHA-1 is defined in terms of 32-bit "words". This code uses
- <stdint.h> (included via "sha1.h" to define 32 and 8 bit unsigned
- integer types. If your C compiler does not support 32 bit unsigned
- integers, this code is not appropriate.
-
- CAVEATS
- SHA-1 is designed to work with messages less than 2^64 bits long.
- Although SHA-1 allows a message digest to be generated for messages
- of any number of bits less than 2^64, this implementation only
- works with messages with a length that is a multiple of the size of
- an 8-bit character.
-
- CHANGES
- 2002 by Peter Zaitsev to
- - fit to new prototypes according to MySQL standard
- - Some optimizations
- - All checking is now done in debug only mode
- - More comments
-*/
-
-#include "my_global.h"
-#include "m_string.h"
-#include "sha1.h"
-
-/*
- Define the SHA1 circular left shift macro
-*/
-
-#define SHA1CircularShift(bits,word) \
- (((word) << (bits)) | ((word) >> (32-(bits))))
-
-/* Local Function Prototyptes */
-static void SHA1PadMessage(SHA1_CONTEXT*);
-static void SHA1ProcessMessageBlock(SHA1_CONTEXT*);
-
-
-/*
- Initialize SHA1Context
-
- SYNOPSIS
- mysql_sha1_reset()
- context [in/out] The context to reset.
-
- DESCRIPTION
- This function will initialize the SHA1Context in preparation
- for computing a new SHA1 message digest.
-
- RETURN
- SHA_SUCCESS ok
- != SHA_SUCCESS sha Error Code.
-*/
-
-
-const uint32 sha_const_key[5]=
-{
- 0x67452301,
- 0xEFCDAB89,
- 0x98BADCFE,
- 0x10325476,
- 0xC3D2E1F0
-};
-
-
-int mysql_sha1_reset(SHA1_CONTEXT *context)
-{
-#ifndef DBUG_OFF
- if (!context)
- return SHA_NULL;
-#endif
-
- context->Length = 0;
- context->Message_Block_Index = 0;
-
- context->Intermediate_Hash[0] = sha_const_key[0];
- context->Intermediate_Hash[1] = sha_const_key[1];
- context->Intermediate_Hash[2] = sha_const_key[2];
- context->Intermediate_Hash[3] = sha_const_key[3];
- context->Intermediate_Hash[4] = sha_const_key[4];
-
- context->Computed = 0;
- context->Corrupted = 0;
-
- return SHA_SUCCESS;
-}
-
-
-/*
- Return the 160-bit message digest into the array provided by the caller
-
- SYNOPSIS
- mysql_sha1_result()
- context [in/out] The context to use to calculate the SHA-1 hash.
- Message_Digest: [out] Where the digest is returned.
-
- DESCRIPTION
- NOTE: The first octet of hash is stored in the 0th element,
- the last octet of hash in the 19th element.
-
- RETURN
- SHA_SUCCESS ok
- != SHA_SUCCESS sha Error Code.
-*/
-
-int mysql_sha1_result(SHA1_CONTEXT *context,
- uint8 Message_Digest[SHA1_HASH_SIZE])
-{
- int i;
-
-#ifndef DBUG_OFF
- if (!context || !Message_Digest)
- return SHA_NULL;
-
- if (context->Corrupted)
- return context->Corrupted;
-#endif
-
- if (!context->Computed)
- {
- SHA1PadMessage(context);
- /* message may be sensitive, clear it out */
- bzero((char*) context->Message_Block,64);
- context->Length = 0; /* and clear length */
- context->Computed = 1;
- }
-
- for (i = 0; i < SHA1_HASH_SIZE; i++)
- Message_Digest[i] = (int8)((context->Intermediate_Hash[i>>2] >> 8
- * ( 3 - ( i & 0x03 ) )));
- return SHA_SUCCESS;
-}
-
-
-/*
- Accepts an array of octets as the next portion of the message.
-
- SYNOPSIS
- mysql_sha1_input()
- context [in/out] The SHA context to update
- message_array An array of characters representing the next portion
- of the message.
- length The length of the message in message_array
-
- RETURN
- SHA_SUCCESS ok
- != SHA_SUCCESS sha Error Code.
-*/
-
-int mysql_sha1_input(SHA1_CONTEXT *context, const uint8 *message_array,
- unsigned length)
-{
- if (!length)
- return SHA_SUCCESS;
-
-#ifndef DBUG_OFF
- /* We assume client konows what it is doing in non-debug mode */
- if (!context || !message_array)
- return SHA_NULL;
- if (context->Computed)
- return (context->Corrupted= SHA_STATE_ERROR);
- if (context->Corrupted)
- return context->Corrupted;
-#endif
-
- while (length--)
- {
- context->Message_Block[context->Message_Block_Index++]=
- (*message_array & 0xFF);
- context->Length += 8; /* Length is in bits */
-
-#ifndef DBUG_OFF
- /*
- Then we're not debugging we assume we never will get message longer
- 2^64 bits.
- */
- if (context->Length == 0)
- return (context->Corrupted= 1); /* Message is too long */
-#endif
-
- if (context->Message_Block_Index == 64)
- {
- SHA1ProcessMessageBlock(context);
- }
- message_array++;
- }
- return SHA_SUCCESS;
-}
-
-
-/*
- Process the next 512 bits of the message stored in the Message_Block array.
-
- SYNOPSIS
- SHA1ProcessMessageBlock()
-
- DESCRIPTION
- Many of the variable names in this code, especially the single
- character names, were used because those were the names used in
- the publication.
-*/
-
-/* Constants defined in SHA-1 */
-static const uint32 K[]=
-{
- 0x5A827999,
- 0x6ED9EBA1,
- 0x8F1BBCDC,
- 0xCA62C1D6
-};
-
-
-static void SHA1ProcessMessageBlock(SHA1_CONTEXT *context)
-{
- int t; /* Loop counter */
- uint32 temp; /* Temporary word value */
- uint32 W[80]; /* Word sequence */
- uint32 A, B, C, D, E; /* Word buffers */
- int idx;
-
- /*
- Initialize the first 16 words in the array W
- */
-
- for (t = 0; t < 16; t++)
- {
- idx=t*4;
- W[t] = context->Message_Block[idx] << 24;
- W[t] |= context->Message_Block[idx + 1] << 16;
- W[t] |= context->Message_Block[idx + 2] << 8;
- W[t] |= context->Message_Block[idx + 3];
- }
-
-
- for (t = 16; t < 80; t++)
- {
- W[t] = SHA1CircularShift(1,W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]);
- }
-
- A = context->Intermediate_Hash[0];
- B = context->Intermediate_Hash[1];
- C = context->Intermediate_Hash[2];
- D = context->Intermediate_Hash[3];
- E = context->Intermediate_Hash[4];
-
- for (t = 0; t < 20; t++)
- {
- temp= SHA1CircularShift(5,A) + ((B & C) | ((~B) & D)) + E + W[t] + K[0];
- E = D;
- D = C;
- C = SHA1CircularShift(30,B);
- B = A;
- A = temp;
- }
-
- for (t = 20; t < 40; t++)
- {
- temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[1];
- E = D;
- D = C;
- C = SHA1CircularShift(30,B);
- B = A;
- A = temp;
- }
-
- for (t = 40; t < 60; t++)
- {
- temp= (SHA1CircularShift(5,A) + ((B & C) | (B & D) | (C & D)) + E + W[t] +
- K[2]);
- E = D;
- D = C;
- C = SHA1CircularShift(30,B);
- B = A;
- A = temp;
- }
-
- for (t = 60; t < 80; t++)
- {
- temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[3];
- E = D;
- D = C;
- C = SHA1CircularShift(30,B);
- B = A;
- A = temp;
- }
-
- context->Intermediate_Hash[0] += A;
- context->Intermediate_Hash[1] += B;
- context->Intermediate_Hash[2] += C;
- context->Intermediate_Hash[3] += D;
- context->Intermediate_Hash[4] += E;
-
- context->Message_Block_Index = 0;
-}
-
-
-/*
- Pad message
-
- SYNOPSIS
- SHA1PadMessage()
- context: [in/out] The context to pad
-
- DESCRIPTION
- According to the standard, the message must be padded to an even
- 512 bits. The first padding bit must be a '1'. The last 64 bits
- represent the length of the original message. All bits in between
- should be 0. This function will pad the message according to
- those rules by filling the Message_Block array accordingly. It
- will also call the ProcessMessageBlock function provided
- appropriately. When it returns, it can be assumed that the message
- digest has been computed.
-
-*/
-
-static void SHA1PadMessage(SHA1_CONTEXT *context)
-{
- /*
- Check to see if the current message block is too small to hold
- the initial padding bits and length. If so, we will pad the
- block, process it, and then continue padding into a second
- block.
- */
-
- int i=context->Message_Block_Index;
-
- if (i > 55)
- {
- context->Message_Block[i++] = 0x80;
- bzero((char*) &context->Message_Block[i],
- sizeof(context->Message_Block[0])*(64-i));
- context->Message_Block_Index=64;
-
- /* This function sets context->Message_Block_Index to zero */
- SHA1ProcessMessageBlock(context);
-
- bzero((char*) &context->Message_Block[0],
- sizeof(context->Message_Block[0])*56);
- context->Message_Block_Index=56;
- }
- else
- {
- context->Message_Block[i++] = 0x80;
- bzero((char*) &context->Message_Block[i],
- sizeof(context->Message_Block[0])*(56-i));
- context->Message_Block_Index=56;
- }
-
- /*
- Store the message length as the last 8 octets
- */
-
- context->Message_Block[56] = (int8) (context->Length >> 56);
- context->Message_Block[57] = (int8) (context->Length >> 48);
- context->Message_Block[58] = (int8) (context->Length >> 40);
- context->Message_Block[59] = (int8) (context->Length >> 32);
- context->Message_Block[60] = (int8) (context->Length >> 24);
- context->Message_Block[61] = (int8) (context->Length >> 16);
- context->Message_Block[62] = (int8) (context->Length >> 8);
- context->Message_Block[63] = (int8) (context->Length);
-
- SHA1ProcessMessageBlock(context);
-}
diff --git a/mysys/stacktrace.c b/mysys/stacktrace.c
index 402520990b6..613911e4495 100644
--- a/mysys/stacktrace.c
+++ b/mysys/stacktrace.c
@@ -95,7 +95,7 @@ static int safe_print_str(const char *addr, int max_len)
/* Read up to the maximum number of bytes. */
while (total)
{
- count= min(sizeof(buf), total);
+ count= MY_MIN(sizeof(buf), total);
if ((nbytes= pread(fd, buf, count, offset)) < 0)
{
@@ -348,7 +348,7 @@ void my_print_stacktrace(uchar* stack_bottom, ulong thread_stack)
if (!stack_bottom || (uchar*) stack_bottom > (uchar*) &fp)
{
- ulong tmp= min(0x10000,thread_stack);
+ ulong tmp= MY_MIN(0x10000,thread_stack);
/* Assume that the stack starts at the previous even 65K */
stack_bottom= (uchar*) (((ulong) &fp + tmp) & ~(ulong) 0xFFFF);
my_safe_printf_stderr("Cannot determine thread, fp=%p, "
diff --git a/mysys/string.c b/mysys/string.c
index 1263e7824f9..42fe83ed4e1 100644
--- a/mysys/string.c
+++ b/mysys/string.c
@@ -223,77 +223,3 @@ void dynstr_reassociate(DYNAMIC_STRING *str, char **ptr, size_t *length,
*alloc_length= str->max_length;
str->str=0;
}
-
-
-/*
- copy a string from one character set to another
-
- SYNOPSIS
- copy_and_convert()
- to Store result here
- to_cs Character set of result string
- from Copy from here
- from_length Length of from string
- from_cs From character set
-
- NOTES
- 'to' must be big enough as form_length * to_cs->mbmaxlen
-
- RETURN
- length of bytes copied to 'to'
-*/
-
-uint32
-copy_and_convert_extended(char *to, uint32 to_length, CHARSET_INFO *to_cs,
- const char *from, uint32 from_length,
- CHARSET_INFO *from_cs,
- uint *errors)
-{
- int cnvres;
- my_wc_t wc;
- const uchar *from_end= (const uchar*) from+from_length;
- char *to_start= to;
- uchar *to_end= (uchar*) to+to_length;
- my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
- my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb;
- uint error_count= 0;
-
- while (1)
- {
- if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from,
- from_end)) > 0)
- from+= cnvres;
- else if (cnvres == MY_CS_ILSEQ)
- {
- error_count++;
- from++;
- wc= '?';
- }
- else if (cnvres > MY_CS_TOOSMALL)
- {
- /*
- A correct multibyte sequence detected
- But it doesn't have Unicode mapping.
- */
- error_count++;
- from+= (-cnvres);
- wc= '?';
- }
- else
- break; // Not enough characters
-
-outp:
- if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0)
- to+= cnvres;
- else if (cnvres == MY_CS_ILUNI && wc != '?')
- {
- error_count++;
- wc= '?';
- goto outp;
- }
- else
- break;
- }
- *errors= error_count;
- return (uint32) (to - to_start);
-}
diff --git a/mysys/testhash.c b/mysys/testhash.c
index ffdaaece770..3359b5dce29 100644
--- a/mysys/testhash.c
+++ b/mysys/testhash.c
@@ -79,7 +79,7 @@ static int do_test()
for (i=0 ; i < recant ; i++)
{
- n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*5,MAX_RECORDS));
+ n1=rnd(1000); n2=rnd(100); n3=rnd(MY_MIN(recant*5,MAX_RECORDS));
record= (char*) my_malloc(reclength,MYF(MY_FAE));
sprintf(record,"%6d:%4d:%8d:Pos: %4d ",n1,n2,n3,write_count);
if (my_hash_insert(&hash,record))
@@ -133,7 +133,7 @@ static int do_test()
printf("- Update\n");
for (i=0 ; i < write_count/10 ; i++)
{
- n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*2,MAX_RECORDS));
+ n1=rnd(1000); n2=rnd(100); n3=rnd(MY_MIN(recant*2,MAX_RECORDS));
for (j=rnd(1000) ; j>0 && key1[j] == 0 ; j--) ;
if (j)
{
diff --git a/mysys_ssl/CMakeLists.txt b/mysys_ssl/CMakeLists.txt
new file mode 100644
index 00000000000..b91988d1c8b
--- /dev/null
+++ b/mysys_ssl/CMakeLists.txt
@@ -0,0 +1,48 @@
+# Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
+ ${CMAKE_SOURCE_DIR}/mysys_ssl
+ ${SSL_INCLUDE_DIRS})
+
+IF(SSL_DEFINES)
+ADD_DEFINITIONS(${SSL_DEFINES})
+ENDIF()
+
+# We do RESTRICT_SYMBOL_EXPORTS(yassl) elsewhere.
+# In order to get correct symbol visibility, these files
+# must be compiled with "-fvisibility=hidden"
+IF(WITH_SSL STREQUAL "bundled" AND HAVE_VISIBILITY_HIDDEN)
+ SET_SOURCE_FILES_PROPERTIES(
+ crypt_genhash_impl.cc
+ my_aes.cc
+ my_md5.cc
+ my_sha1.cc
+ my_sha2.cc
+ PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
+ENDIF()
+
+SET(MYSYS_SSL_SOURCES
+ crypt_genhash_impl.cc
+ my_aes.cc
+ my_sha1.cc
+ my_sha2.cc
+ my_md5.cc
+ my_rnd.cc
+ )
+
+ADD_CONVENIENCE_LIBRARY(mysys_ssl ${MYSYS_SSL_SOURCES})
+TARGET_LINK_LIBRARIES(mysys_ssl dbug strings ${SSL_LIBRARIES})
+DTRACE_INSTRUMENT(mysys_ssl)
diff --git a/mysys_ssl/crypt_genhash_impl.cc b/mysys_ssl/crypt_genhash_impl.cc
new file mode 100644
index 00000000000..ab7fdec46b9
--- /dev/null
+++ b/mysys_ssl/crypt_genhash_impl.cc
@@ -0,0 +1,454 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+/* We always should include my_global first */
+
+#include <my_global.h>
+
+#ifdef HAVE_OPENSSL
+
+#ifdef HAVE_YASSL
+#include <sha.hpp>
+#include <openssl/ssl.h>
+#else
+#include <openssl/sha.h>
+#include <openssl/rand.h>
+#endif
+#include "crypt_genhash_impl.h"
+#include <string.h>
+
+#ifndef HAVE_YASSL
+#define DIGEST_CTX SHA256_CTX
+#define DIGESTInit SHA256_Init
+#define DIGESTUpdate SHA256_Update
+#define DIGESTFinal SHA256_Final
+#define DIGEST_LEN SHA256_DIGEST_LENGTH
+#else
+#define DIGEST_CTX TaoCrypt::SHA256
+#define DIGEST_LEN 32
+void DIGESTInit(DIGEST_CTX *ctx)
+{
+ ctx->Init();
+}
+
+void DIGESTUpdate(DIGEST_CTX *ctx, const void *plaintext, int len)
+{
+ ctx->Update((const TaoCrypt::byte *)plaintext, len);
+}
+
+void DIGESTFinal(void *txt, DIGEST_CTX *ctx)
+{
+ ctx->Final((TaoCrypt::byte *)txt);
+}
+
+#endif // HAVE_YASSL
+
+static const char crypt_alg_magic[] = "$5";
+
+#ifndef MAX
+#define MAX(a, b) (((a) > (b)) ? (a) : (b))
+#endif
+#ifndef MIN
+#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#endif
+
+
+/**
+ Size-bounded string copying and concatenation
+ This is a replacement for STRLCPY(3)
+*/
+
+size_t
+strlcat(char *dst, const char *src, size_t siz)
+{
+ char *d= dst;
+ const char *s= src;
+ size_t n= siz;
+ size_t dlen;
+ /* Find the end of dst and adjust bytes left but don't go past end */
+ while (n-- != 0 && *d != '\0')
+ d++;
+ dlen= d - dst;
+ n= siz - dlen;
+ if (n == 0)
+ return(dlen + siz);
+ while (*s != '\0')
+ {
+ if (n != 1)
+ {
+ *d++= *s;
+ n--;
+ }
+ s++;
+ }
+ *d= '\0';
+ return(dlen + (s - src)); /* count does not include NUL */
+}
+
+static const int crypt_alg_magic_len = sizeof (crypt_alg_magic) - 1;
+
+static unsigned char b64t[] = /* 0 ... 63 => ascii - 64 */
+ "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
+
+#define b64_from_24bit(B2, B1, B0, N) \
+{ \
+ uint32 w = ((B2) << 16) | ((B1) << 8) | (B0); \
+ int n = (N); \
+ while (--n >= 0 && ctbufflen > 0) { \
+ *p++ = b64t[w & 0x3f]; \
+ w >>= 6; \
+ ctbufflen--; \
+} \
+}
+
+#define ROUNDS "rounds="
+#define ROUNDSLEN (sizeof (ROUNDS) - 1)
+
+/**
+ Get the integer value after rounds= where ever it occurs in the string.
+ if the last char after the int is a , or $ that is fine anything else is an
+ error.
+*/
+static uint32 getrounds(const char *s)
+{
+ const char *r;
+ const char *p;
+ char *e;
+ long val;
+
+ if (s == NULL)
+ return (0);
+
+ if ((r = strstr(s, ROUNDS)) == NULL)
+ {
+ return (0);
+ }
+
+ if (strncmp(r, ROUNDS, ROUNDSLEN) != 0)
+ {
+ return (0);
+ }
+
+ p= r + ROUNDSLEN;
+ errno= 0;
+ val= strtol(p, &e, 10);
+ /*
+ An error occurred or there is non-numeric stuff at the end
+ which isn't one of the crypt(3c) special chars ',' or '$'
+ */
+ if (errno != 0 || val < 0 || !(*e == '\0' || *e == ',' || *e == '$'))
+ {
+ return (0);
+ }
+
+ return ((uint32) val);
+}
+
+/**
+ Finds the interval which envelopes the user salt in a crypt password
+ The crypt format is assumed to be $a$bbbb$cccccc\0 and the salt is found
+ by counting the delimiters and marking begin and end.
+
+ @param salt_being[in] Pointer to start of crypt passwd
+ @param salt_being[out] Pointer to first byte of the salt
+ @param salt_end[in] Pointer to the last byte in passwd
+ @param salt_end[out] Pointer to the byte immediatly following the salt ($)
+
+ @return The size of the salt identified
+*/
+
+int extract_user_salt(char **salt_begin,
+ char **salt_end)
+{
+ char *it= *salt_begin;
+ int delimiter_count= 0;
+ while(it != *salt_end)
+ {
+ if (*it == '$')
+ {
+ ++delimiter_count;
+ if (delimiter_count == 2)
+ {
+ *salt_begin= it + 1;
+ }
+ if (delimiter_count == 3)
+ break;
+ }
+ ++it;
+ }
+ *salt_end= it;
+ return *salt_end - *salt_begin;
+}
+
+const char *sha256_find_digest(char *pass)
+{
+ int sz= strlen(pass);
+ return pass + sz - SHA256_HASH_LENGTH;
+}
+
+/*
+ * Portions of the below code come from crypt_bsdmd5.so (bsdmd5.c) :
+ * ----------------------------------------------------------------------------
+ * "THE BEER-WARE LICENSE" (Revision 42):
+ * <phk@login.dknet.dk> wrote this file. As long as you retain this notice you
+ * can do whatever you want with this stuff. If we meet some day, and you think
+ * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp
+ * ----------------------------------------------------------------------------
+ *
+ * $FreeBSD: crypt.c,v 1.5 1996/10/14 08:34:02 phk Exp $
+ *
+ */
+
+/*
+ * The below code implements the specification from:
+ *
+ * From http://people.redhat.com/drepper/SHA-crypt.txt
+ *
+ * Portions of the code taken from inspired by or verified against the
+ * source in the above document which is licensed as:
+ *
+ * "Released into the Public Domain by Ulrich Drepper <drepper@redhat.com>."
+ */
+
+/*
+ Due to a Solaris namespace bug DS is a reserved word. To work around this
+ DS is undefined.
+*/
+#undef DS
+
+/* ARGSUSED4 */
+extern "C"
+char *
+my_crypt_genhash(char *ctbuffer,
+ size_t ctbufflen,
+ const char *plaintext,
+ int plaintext_len,
+ const char *switchsalt,
+ const char **params)
+{
+ int salt_len, i;
+ char *salt;
+ unsigned char A[DIGEST_LEN];
+ unsigned char B[DIGEST_LEN];
+ unsigned char DP[DIGEST_LEN];
+ unsigned char DS[DIGEST_LEN];
+ DIGEST_CTX ctxA, ctxB, ctxC, ctxDP, ctxDS;
+ int rounds = ROUNDS_DEFAULT;
+ int srounds = 0;
+ bool custom_rounds= false;
+ char *p;
+ char *P, *Pp;
+ char *S, *Sp;
+
+ /* Refine the salt */
+ salt = (char *)switchsalt;
+
+ /* skip our magic string */
+ if (strncmp((char *)salt, crypt_alg_magic, crypt_alg_magic_len) == 0)
+ {
+ salt += crypt_alg_magic_len + 1;
+ }
+
+ srounds = getrounds(salt);
+ if (srounds != 0) {
+ rounds = MAX(ROUNDS_MIN, MIN(srounds, ROUNDS_MAX));
+ custom_rounds= true;
+ p = strchr(salt, '$');
+ if (p != NULL)
+ salt = p + 1;
+ }
+
+ salt_len = MIN(strcspn(salt, "$"), CRYPT_SALT_LENGTH);
+ //plaintext_len = strlen(plaintext);
+
+ /* 1. */
+ DIGESTInit(&ctxA);
+
+ /* 2. The password first, since that is what is most unknown */
+ DIGESTUpdate(&ctxA, plaintext, plaintext_len);
+
+ /* 3. Then the raw salt */
+ DIGESTUpdate(&ctxA, salt, salt_len);
+
+ /* 4. - 8. */
+ DIGESTInit(&ctxB);
+ DIGESTUpdate(&ctxB, plaintext, plaintext_len);
+ DIGESTUpdate(&ctxB, salt, salt_len);
+ DIGESTUpdate(&ctxB, plaintext, plaintext_len);
+ DIGESTFinal(B, &ctxB);
+
+ /* 9. - 10. */
+ for (i= plaintext_len; i > MIXCHARS; i -= MIXCHARS)
+ DIGESTUpdate(&ctxA, B, MIXCHARS);
+ DIGESTUpdate(&ctxA, B, i);
+
+ /* 11. */
+ for (i= plaintext_len; i > 0; i >>= 1) {
+ if ((i & 1) != 0)
+ {
+ DIGESTUpdate(&ctxA, B, MIXCHARS);
+ }
+ else
+ {
+ DIGESTUpdate(&ctxA, plaintext, plaintext_len);
+ }
+ }
+
+ /* 12. */
+ DIGESTFinal(A, &ctxA);
+
+ /* 13. - 15. */
+ DIGESTInit(&ctxDP);
+ for (i= 0; i < plaintext_len; i++)
+ DIGESTUpdate(&ctxDP, plaintext, plaintext_len);
+ DIGESTFinal(DP, &ctxDP);
+
+ /* 16. */
+ Pp= P= (char *)alloca(plaintext_len);
+ for (i= plaintext_len; i >= MIXCHARS; i -= MIXCHARS)
+ {
+ Pp= (char *)(memcpy(Pp, DP, MIXCHARS)) + MIXCHARS;
+ }
+ (void) memcpy(Pp, DP, i);
+
+ /* 17. - 19. */
+ DIGESTInit(&ctxDS);
+ for (i= 0; i < 16 + (uint8)A[0]; i++)
+ DIGESTUpdate(&ctxDS, salt, salt_len);
+ DIGESTFinal(DS, &ctxDS);
+
+ /* 20. */
+ Sp= S= (char *)alloca(salt_len);
+ for (i= salt_len; i >= MIXCHARS; i -= MIXCHARS)
+ {
+ Sp= (char *)(memcpy(Sp, DS, MIXCHARS)) + MIXCHARS;
+ }
+ (void) memcpy(Sp, DS, i);
+
+ /* 21. */
+ for (i= 0; i < rounds; i++)
+ {
+ DIGESTInit(&ctxC);
+
+ if ((i & 1) != 0)
+ {
+ DIGESTUpdate(&ctxC, P, plaintext_len);
+ }
+ else
+ {
+ if (i == 0)
+ DIGESTUpdate(&ctxC, A, MIXCHARS);
+ else
+ DIGESTUpdate(&ctxC, DP, MIXCHARS);
+ }
+
+ if (i % 3 != 0) {
+ DIGESTUpdate(&ctxC, S, salt_len);
+ }
+
+ if (i % 7 != 0) {
+ DIGESTUpdate(&ctxC, P, plaintext_len);
+ }
+
+ if ((i & 1) != 0)
+ {
+ if (i == 0)
+ DIGESTUpdate(&ctxC, A, MIXCHARS);
+ else
+ DIGESTUpdate(&ctxC, DP, MIXCHARS);
+ }
+ else
+ {
+ DIGESTUpdate(&ctxC, P, plaintext_len);
+ }
+ DIGESTFinal(DP, &ctxC);
+ }
+
+ /* 22. Now make the output string */
+ if (custom_rounds)
+ {
+ (void) snprintf(ctbuffer, ctbufflen,
+ "%s$rounds=%zu$", crypt_alg_magic, (size_t)rounds);
+ }
+ else
+ {
+ (void) snprintf(ctbuffer, ctbufflen,
+ "%s$", crypt_alg_magic);
+ }
+ (void) strncat(ctbuffer, (const char *)salt, salt_len);
+ (void) strlcat(ctbuffer, "$", ctbufflen);
+
+ p= ctbuffer + strlen(ctbuffer);
+ ctbufflen -= strlen(ctbuffer);
+
+ b64_from_24bit(DP[ 0], DP[10], DP[20], 4);
+ b64_from_24bit(DP[21], DP[ 1], DP[11], 4);
+ b64_from_24bit(DP[12], DP[22], DP[ 2], 4);
+ b64_from_24bit(DP[ 3], DP[13], DP[23], 4);
+ b64_from_24bit(DP[24], DP[ 4], DP[14], 4);
+ b64_from_24bit(DP[15], DP[25], DP[ 5], 4);
+ b64_from_24bit(DP[ 6], DP[16], DP[26], 4);
+ b64_from_24bit(DP[27], DP[ 7], DP[17], 4);
+ b64_from_24bit(DP[18], DP[28], DP[ 8], 4);
+ b64_from_24bit(DP[ 9], DP[19], DP[29], 4);
+ b64_from_24bit(0, DP[31], DP[30], 3);
+ *p= '\0';
+
+ (void) memset(A, 0, sizeof (A));
+ (void) memset(B, 0, sizeof (B));
+ (void) memset(DP, 0, sizeof (DP));
+ (void) memset(DS, 0, sizeof (DS));
+
+ return (ctbuffer);
+}
+
+
+/**
+ Generate a random string using ASCII characters but avoid seperator character.
+ Stdlib rand and srand are used to produce pseudo random numbers between
+ with about 7 bit worth of entropty between 1-127.
+*/
+extern "C"
+void generate_user_salt(char *buffer, int buffer_len)
+{
+ char *end= buffer + buffer_len - 1;
+#ifdef HAVE_YASSL
+ yaSSL::RAND_bytes((unsigned char *) buffer, buffer_len);
+#else
+ RAND_bytes((unsigned char *) buffer, buffer_len);
+#endif
+
+ /* Sequence must be a legal UTF8 string */
+ for (; buffer < end; buffer++)
+ {
+ *buffer &= 0x7f;
+ if (*buffer == '\0' || *buffer == '$')
+ *buffer= *buffer + 1;
+ }
+ /* Make sure the buffer is terminated properly */
+ *end= '\0';
+}
+
+void xor_string(char *to, int to_len, char *pattern, int pattern_len)
+{
+ int loop= 0;
+ while(loop <= to_len)
+ {
+ *(to + loop) ^= *(pattern + loop % pattern_len);
+ ++loop;
+ }
+}
+
+#endif // HAVE_OPENSSL
diff --git a/mysys_ssl/my_aes.cc b/mysys_ssl/my_aes.cc
new file mode 100644
index 00000000000..9327bc32a3b
--- /dev/null
+++ b/mysys_ssl/my_aes.cc
@@ -0,0 +1,278 @@
+/* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+
+#include <my_global.h>
+#include <m_string.h>
+#include <my_aes.h>
+
+#if defined(HAVE_YASSL)
+#include "aes.hpp"
+#include "openssl/ssl.h"
+#elif defined(HAVE_OPENSSL)
+#include <openssl/aes.h>
+#include <openssl/evp.h>
+
+// Wrap C struct, to ensure resources are released.
+struct MyCipherCtx
+{
+ MyCipherCtx() { memset(&ctx, 0, sizeof(ctx)); }
+ ~MyCipherCtx() { EVP_CIPHER_CTX_cleanup(&ctx); }
+
+ EVP_CIPHER_CTX ctx;
+};
+#endif
+
+enum encrypt_dir { MY_AES_ENCRYPT, MY_AES_DECRYPT };
+
+#define MY_AES_BLOCK_SIZE 16 /* Block size in bytes */
+
+/* If bad data discovered during decoding */
+#define AES_BAD_DATA -1
+
+/**
+ This is internal function just keeps joint code of Key generation
+
+ SYNOPSIS
+ my_aes_create_key()
+ @param key [in] Key to use for real key creation
+ @param key_length [in] Length of the key
+ @param rkey [out] Real key (used by OpenSSL/YaSSL)
+
+ @return
+ 0 Ok
+ -1 Error; Note: The current impementation never returns this
+*/
+
+static int my_aes_create_key(const char *key, int key_length, uint8 *rkey)
+{
+ uint8 *rkey_end= rkey + AES_KEY_LENGTH / 8; /* Real key boundary */
+ uint8 *ptr; /* Start of the real key*/
+ const char *sptr; /* Start of the working key */
+ const char *key_end= key + key_length; /* Working key boundary*/
+
+ memset(rkey, 0, AES_KEY_LENGTH / 8); /* Set initial key */
+
+ for (ptr= rkey, sptr= key; sptr < key_end; ptr ++, sptr ++)
+ {
+ if (ptr == rkey_end)
+ /* Just loop over tmp_key until we used all key */
+ ptr= rkey;
+ *ptr ^= (uint8) *sptr;
+ }
+#ifdef AES_USE_KEY_BITS
+ /*
+ This block is intended to allow more weak encryption if application
+ build with libmysqld needs to correspond to export regulations
+ It should be never used in normal distribution as does not give
+ any speed improvement.
+ To get worse security define AES_USE_KEY_BITS to number of bits
+ you want key to be. It should be divisible by 8
+
+ WARNING: Changing this value results in changing of enryption for
+ all key lengths so altering this value will result in impossibility
+ to decrypt data encrypted with previous value
+ */
+#define AES_USE_KEY_BYTES (AES_USE_KEY_BITS/8)
+ /*
+ To get weaker key we use first AES_USE_KEY_BYTES bytes of created key
+ and cyclically copy them until we created all required key length
+ */
+ for (ptr= rkey+AES_USE_KEY_BYTES, sptr=rkey ; ptr < rkey_end;
+ ptr ++, sptr ++)
+ {
+ if (sptr == rkey + AES_USE_KEY_BYTES)
+ sptr= rkey;
+ *ptr= *sptr;
+ }
+#endif
+ return 0;
+}
+
+
+/**
+ Crypt buffer with AES encryption algorithm.
+
+ SYNOPSIS
+ my_aes_encrypt()
+ @param source [in] Pointer to data for encryption
+ @param source_length [in] Size of encryption data
+ @param dest [out] Buffer to place encrypted data (must be large enough)
+ @param key [in] Key to be used for encryption
+ @param key_length [in] Length of the key. Will handle keys of any length
+
+ @return
+ >= 0 Size of encrypted data
+ < 0 Error
+*/
+
+int my_aes_encrypt(const char* source, int source_length, char* dest,
+ const char* key, int key_length)
+{
+#if defined(HAVE_YASSL)
+ TaoCrypt::AES_ECB_Encryption enc;
+ /* 128 bit block used for padding */
+ uint8 block[MY_AES_BLOCK_SIZE];
+ int num_blocks; /* number of complete blocks */
+ int i;
+#elif defined(HAVE_OPENSSL)
+ MyCipherCtx ctx;
+ int u_len, f_len;
+#endif
+
+ /* The real key to be used for encryption */
+ uint8 rkey[AES_KEY_LENGTH / 8];
+ int rc; /* result codes */
+
+ if ((rc= my_aes_create_key(key, key_length, rkey)))
+ return rc;
+
+#if defined(HAVE_YASSL)
+ enc.SetKey((const TaoCrypt::byte *) rkey, MY_AES_BLOCK_SIZE);
+
+ num_blocks = source_length / MY_AES_BLOCK_SIZE;
+
+ for (i = num_blocks; i > 0; i--) /* Encode complete blocks */
+ {
+ enc.Process((TaoCrypt::byte *) dest, (const TaoCrypt::byte *) source,
+ MY_AES_BLOCK_SIZE);
+ source += MY_AES_BLOCK_SIZE;
+ dest += MY_AES_BLOCK_SIZE;
+ }
+
+ /* Encode the rest. We always have incomplete block */
+ char pad_len = MY_AES_BLOCK_SIZE - (source_length -
+ MY_AES_BLOCK_SIZE * num_blocks);
+ memcpy(block, source, 16 - pad_len);
+ memset(block + MY_AES_BLOCK_SIZE - pad_len, pad_len, pad_len);
+
+ enc.Process((TaoCrypt::byte *) dest, (const TaoCrypt::byte *) block,
+ MY_AES_BLOCK_SIZE);
+
+ return MY_AES_BLOCK_SIZE * (num_blocks + 1);
+#elif defined(HAVE_OPENSSL)
+ if (! EVP_EncryptInit(&ctx.ctx, EVP_aes_128_ecb(),
+ (const unsigned char *) rkey, NULL))
+ return AES_BAD_DATA; /* Error */
+ if (! EVP_EncryptUpdate(&ctx.ctx, (unsigned char *) dest, &u_len,
+ (unsigned const char *) source, source_length))
+ return AES_BAD_DATA; /* Error */
+ if (! EVP_EncryptFinal(&ctx.ctx, (unsigned char *) dest + u_len, &f_len))
+ return AES_BAD_DATA; /* Error */
+
+ return u_len + f_len;
+#endif
+}
+
+
+/**
+ DeCrypt buffer with AES encryption algorithm.
+
+ SYNOPSIS
+ my_aes_decrypt()
+ @param source [in] Pointer to data for decryption
+ @param source_length [in] Size of encrypted data
+ @param dest [out] Buffer to place decrypted data (must
+ be large enough)
+ @param key [in] Key to be used for decryption
+ @param key_length [in] Length of the key. Will handle keys of any length
+
+ @return
+ >= 0 Size of encrypted data
+ < 0 Error
+*/
+
+int my_aes_decrypt(const char *source, int source_length, char *dest,
+ const char *key, int key_length)
+{
+#if defined(HAVE_YASSL)
+ TaoCrypt::AES_ECB_Decryption dec;
+ /* 128 bit block used for padding */
+ uint8 block[MY_AES_BLOCK_SIZE];
+ int num_blocks; /* Number of complete blocks */
+ int i;
+#elif defined(HAVE_OPENSSL)
+ MyCipherCtx ctx;
+ int u_len, f_len;
+#endif
+
+ /* The real key to be used for decryption */
+ uint8 rkey[AES_KEY_LENGTH / 8];
+ int rc; /* Result codes */
+
+ if ((rc= my_aes_create_key(key, key_length, rkey)))
+ return rc;
+
+#if defined(HAVE_YASSL)
+ dec.SetKey((const TaoCrypt::byte *) rkey, MY_AES_BLOCK_SIZE);
+
+ num_blocks = source_length / MY_AES_BLOCK_SIZE;
+
+ if ((source_length != num_blocks * MY_AES_BLOCK_SIZE) || num_blocks == 0 )
+ /* Input size has to be even and at least one block */
+ return AES_BAD_DATA;
+
+ /* Decode all but last blocks */
+ for (i = num_blocks - 1; i > 0; i--)
+ {
+ dec.Process((TaoCrypt::byte *) dest, (const TaoCrypt::byte *) source,
+ MY_AES_BLOCK_SIZE);
+ source += MY_AES_BLOCK_SIZE;
+ dest += MY_AES_BLOCK_SIZE;
+ }
+
+ dec.Process((TaoCrypt::byte *) block, (const TaoCrypt::byte *) source,
+ MY_AES_BLOCK_SIZE);
+
+ /* Use last char in the block as size */
+ uint pad_len = (uint) (uchar) block[MY_AES_BLOCK_SIZE - 1];
+
+ if (pad_len > MY_AES_BLOCK_SIZE)
+ return AES_BAD_DATA;
+ /* We could also check whole padding but we do not really need this */
+
+ memcpy(dest, block, MY_AES_BLOCK_SIZE - pad_len);
+ return MY_AES_BLOCK_SIZE * num_blocks - pad_len;
+#elif defined(HAVE_OPENSSL)
+ if (! EVP_DecryptInit(&ctx.ctx, EVP_aes_128_ecb(),
+ (const unsigned char *) rkey, NULL))
+ return AES_BAD_DATA; /* Error */
+ if (! EVP_DecryptUpdate(&ctx.ctx, (unsigned char *) dest, &u_len,
+ (unsigned const char *) source, source_length))
+ return AES_BAD_DATA; /* Error */
+ if (! EVP_DecryptFinal(&ctx.ctx, (unsigned char *) dest + u_len, &f_len))
+ return AES_BAD_DATA; /* Error */
+ return u_len + f_len;
+#endif
+}
+
+
+/**
+ Get size of buffer which will be large enough for encrypted data
+
+ SYNOPSIS
+ my_aes_get_size()
+ @param source_length [in] Length of data to be encrypted
+
+ @return
+ Size of buffer required to store encrypted data
+*/
+
+int my_aes_get_size(int source_length)
+{
+ return MY_AES_BLOCK_SIZE * (source_length / MY_AES_BLOCK_SIZE)
+ + MY_AES_BLOCK_SIZE;
+}
+
diff --git a/mysys_ssl/my_md5.cc b/mysys_ssl/my_md5.cc
new file mode 100644
index 00000000000..4c14366a4e3
--- /dev/null
+++ b/mysys_ssl/my_md5.cc
@@ -0,0 +1,68 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+
+/**
+ @file
+
+ @brief
+ Wrapper functions for OpenSSL and YaSSL. Also provides a Compatibility layer
+ to make available YaSSL's MD5 implementation.
+*/
+
+#include <my_global.h>
+#include <my_md5.h>
+
+#if defined(HAVE_YASSL)
+#include "my_config.h"
+#include "md5.hpp"
+
+static void my_md5_hash(char *digest, const char *buf, int len)
+{
+ TaoCrypt::MD5 hasher;
+ hasher.Update((TaoCrypt::byte *) buf, len);
+ hasher.Final((TaoCrypt::byte *) digest);
+}
+
+#elif defined(HAVE_OPENSSL)
+#include <openssl/md5.h>
+
+static void my_md5_hash(unsigned char* digest, unsigned const char *buf, int len)
+{
+ MD5_CTX ctx;
+ MD5_Init (&ctx);
+ MD5_Update (&ctx, buf, len);
+ MD5_Final (digest, &ctx);
+}
+
+#endif /* HAVE_YASSL */
+
+/**
+ Wrapper function to compute MD5 message digest.
+
+ @param digest [out] Computed MD5 digest
+ @param buf [in] Message to be computed
+ @param len [in] Length of the message
+
+ @return void
+*/
+void compute_md5_hash(char *digest, const char *buf, int len)
+{
+#if defined(HAVE_YASSL)
+ my_md5_hash(digest, buf, len);
+#elif defined(HAVE_OPENSSL)
+ my_md5_hash((unsigned char*)digest, (unsigned const char*)buf, len);
+#endif /* HAVE_YASSL */
+}
diff --git a/mysys_ssl/my_rnd.cc b/mysys_ssl/my_rnd.cc
new file mode 100644
index 00000000000..aa8fb63cd4d
--- /dev/null
+++ b/mysys_ssl/my_rnd.cc
@@ -0,0 +1,103 @@
+/*
+ Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include <my_global.h>
+#include <my_rnd.h>
+#include <m_string.h>
+
+#if defined(HAVE_YASSL)
+#if defined(YASSL_PREFIX)
+#define RAND_bytes yaRAND_bytes
+#endif /* YASSL_PREFIX */
+
+#include <openssl/ssl.h>
+
+#elif defined(HAVE_OPENSSL)
+#include <openssl/rand.h>
+#endif /* HAVE_YASSL */
+
+
+/*
+ A wrapper to use OpenSSL/yaSSL PRNGs.
+*/
+
+extern "C" {
+
+/*
+ Initialize random generator
+
+ NOTES
+ MySQL's password checks depends on this, so don't do any changes
+ that changes the random numbers that are generated!
+*/
+
+void my_rnd_init(struct my_rnd_struct *rand_st, ulong seed1, ulong seed2)
+{
+#ifdef HAVE_valgrind
+ bzero((char*) rand_st,sizeof(*rand_st)); /* Avoid UMC varnings */
+#endif
+ rand_st->max_value= 0x3FFFFFFFL;
+ rand_st->max_value_dbl=(double) rand_st->max_value;
+ rand_st->seed1=seed1%rand_st->max_value ;
+ rand_st->seed2=seed2%rand_st->max_value;
+}
+
+/**
+ Generate random number.
+
+ @param rand_st [INOUT] Structure used for number generation.
+
+ @retval Generated pseudo random number.
+*/
+
+double my_rnd(struct my_rnd_struct *rand_st)
+{
+ rand_st->seed1= (rand_st->seed1*3+rand_st->seed2) % rand_st->max_value;
+ rand_st->seed2= (rand_st->seed1+rand_st->seed2+33) % rand_st->max_value;
+ return (((double) rand_st->seed1) / rand_st->max_value_dbl);
+}
+
+/**
+ Generate a random number using the OpenSSL/yaSSL supplied
+ random number generator if available.
+
+ @param rand_st [INOUT] Structure used for number generation
+ only if none of the SSL libraries are
+ available.
+
+ @retval Generated random number.
+*/
+
+double my_rnd_ssl(struct my_rnd_struct *rand_st)
+{
+
+#if defined(HAVE_YASSL) || defined(HAVE_OPENSSL)
+ int rc;
+ unsigned int res;
+
+#if defined(HAVE_YASSL)
+ rc= yaSSL::RAND_bytes((unsigned char *) &res, sizeof (unsigned int));
+#else
+ rc= RAND_bytes((unsigned char *) &res, sizeof (unsigned int));
+#endif /* HAVE_YASSL */
+ if (rc)
+ return (double)res / (double)UINT_MAX;
+
+#endif /* defined(HAVE_YASSL) || defined(HAVE_OPENSSL) */
+ return my_rnd(rand_st);
+}
+
+}
diff --git a/mysys_ssl/my_sha1.cc b/mysys_ssl/my_sha1.cc
new file mode 100644
index 00000000000..fc8f88856bb
--- /dev/null
+++ b/mysys_ssl/my_sha1.cc
@@ -0,0 +1,151 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+
+/**
+ @file
+
+ @brief
+ Wrapper functions for OpenSSL, YaSSL implementations. Also provides a
+ Compatibility layer to make available YaSSL's SHA1 implementation.
+*/
+
+#include <my_global.h>
+#include <sha1.h>
+#include <stdarg.h>
+
+#if defined(HAVE_YASSL)
+#include "sha.hpp"
+
+/**
+ Compute SHA1 message digest using YaSSL.
+
+ @param digest [out] Computed SHA1 digest
+ @param buf [in] Message to be computed
+ @param len [in] Length of the message
+
+ @return void
+*/
+void mysql_sha1_yassl(uint8 *digest, const char *buf, int len)
+{
+ TaoCrypt::SHA hasher;
+ hasher.Update((const TaoCrypt::byte *) buf, len);
+ hasher.Final ((TaoCrypt::byte *) digest);
+}
+
+/**
+ Compute SHA1 message digest for two messages in order to
+ emulate sha1(msg1, msg2) using YaSSL.
+
+ @param digest [out] Computed SHA1 digest
+ @param buf1 [in] First message
+ @param len1 [in] Length of first message
+ @param buf2 [in] Second message
+ @param len2 [in] Length of second message
+
+ @return void
+*/
+void mysql_sha1_multi_yassl(uint8 *digest, va_list args)
+{
+ const char *str;
+ TaoCrypt::SHA hasher;
+
+ for (str= va_arg(args, const char*); str; str= va_arg(args, const char*))
+ {
+ hasher.Update((const TaoCrypt::byte *) str, va_arg(args, size_t));
+ }
+ hasher.Final((TaoCrypt::byte *) digest);
+}
+
+#elif defined(HAVE_OPENSSL)
+#include <openssl/sha.h>
+
+int mysql_sha1_reset(SHA_CTX *context)
+{
+ return SHA1_Init(context);
+}
+
+
+int mysql_sha1_input(SHA_CTX *context, const uint8 *message_array,
+ unsigned length)
+{
+ return SHA1_Update(context, message_array, length);
+}
+
+
+int mysql_sha1_result(SHA_CTX *context,
+ uint8 Message_Digest[SHA1_HASH_SIZE])
+{
+ return SHA1_Final(Message_Digest, context);
+}
+
+#endif /* HAVE_YASSL */
+
+/**
+ Wrapper function to compute SHA1 message digest.
+
+ @param digest [out] Computed SHA1 digest
+ @param buf [in] Message to be computed
+ @param len [in] Length of the message
+
+ @return void
+*/
+void my_sha1(uint8 *digest, const char *buf, size_t len)
+{
+#if defined(HAVE_YASSL)
+ mysql_sha1_yassl(digest, buf, len);
+#elif defined(HAVE_OPENSSL)
+ SHA_CTX sha1_context;
+
+ mysql_sha1_reset(&sha1_context);
+ mysql_sha1_input(&sha1_context, (const uint8 *) buf, len);
+ mysql_sha1_result(&sha1_context, digest);
+#endif /* HAVE_YASSL */
+}
+
+
+/**
+ Wrapper function to compute SHA1 message digest for
+ two messages in order to emulate sha1(msg1, msg2).
+
+ @param digest [out] Computed SHA1 digest
+ @param buf1 [in] First message
+ @param len1 [in] Length of first message
+ @param buf2 [in] Second message
+ @param len2 [in] Length of second message
+
+ @return void
+*/
+void my_sha1_multi(uint8 *digest, ...)
+{
+ va_list args;
+ va_start(args, digest);
+
+#if defined(HAVE_YASSL)
+ mysql_sha1_multi_yassl(digest, args);
+#elif defined(HAVE_OPENSSL)
+ SHA_CTX sha1_context;
+ const char *str;
+
+ mysql_sha1_reset(&sha1_context);
+ for (str= va_arg(args, const char*); str; str= va_arg(args, const char*))
+ {
+ mysql_sha1_input(&sha1_context, (const uint8 *) str, va_arg(args, size_t));
+ }
+ mysql_sha1_result(&sha1_context, digest);
+#endif /* HAVE_YASSL */
+ va_end(args);
+}
+
diff --git a/mysys_ssl/my_sha2.cc b/mysys_ssl/my_sha2.cc
new file mode 100644
index 00000000000..00200337f08
--- /dev/null
+++ b/mysys_ssl/my_sha2.cc
@@ -0,0 +1,68 @@
+/* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+
+/**
+ @file
+ A compatibility layer to our built-in SSL implementation, to mimic the
+ oft-used external library, OpenSSL.
+*/
+
+#include <my_global.h>
+#include <sha2.h>
+
+#ifdef HAVE_YASSL
+
+/*
+ If TaoCrypt::SHA512 or ::SHA384 are not defined (but ::SHA256 is), it's
+ probably that neither of config.h's SIZEOF_LONG or SIZEOF_LONG_LONG are
+ 64 bits long. At present, both OpenSSL and YaSSL require 64-bit integers
+ for SHA-512. (The SIZEOF_* definitions come from autoconf's config.h .)
+*/
+
+# define GEN_YASSL_SHA2_BRIDGE(size) \
+unsigned char* SHA##size(const unsigned char *input_ptr, size_t input_length, \
+ char unsigned *output_ptr) { \
+ TaoCrypt::SHA##size hasher; \
+ \
+ hasher.Update(input_ptr, input_length); \
+ hasher.Final(output_ptr); \
+ return(output_ptr); \
+}
+
+
+/**
+ @fn SHA512
+ @fn SHA384
+ @fn SHA256
+ @fn SHA224
+
+ Instantiate an hash object, fill in the cleartext value, compute the digest,
+ and extract the result from the object.
+
+ (Generate the functions. See similar .h code for the prototypes.)
+*/
+# ifndef OPENSSL_NO_SHA512
+GEN_YASSL_SHA2_BRIDGE(512);
+GEN_YASSL_SHA2_BRIDGE(384);
+# else
+# warning Some SHA2 functionality is missing. See OPENSSL_NO_SHA512.
+# endif
+GEN_YASSL_SHA2_BRIDGE(256);
+GEN_YASSL_SHA2_BRIDGE(224);
+
+# undef GEN_YASSL_SHA2_BRIDGE
+
+#endif /* HAVE_YASSL */
diff --git a/plugin/feedback/utils.cc b/plugin/feedback/utils.cc
index f7f962deaca..c0227cf1292 100644
--- a/plugin/feedback/utils.cc
+++ b/plugin/feedback/utils.cc
@@ -389,7 +389,6 @@ int calculate_server_uid(char *dest)
{
uchar rawbuf[2 + 6];
uchar shabuf[SHA1_HASH_SIZE];
- SHA1_CONTEXT ctx;
int2store(rawbuf, mysqld_port);
if (my_gethwaddr(rawbuf + 2))
@@ -398,9 +397,7 @@ int calculate_server_uid(char *dest)
return 1;
}
- mysql_sha1_reset(&ctx);
- mysql_sha1_input(&ctx, rawbuf, sizeof(rawbuf));
- mysql_sha1_result(&ctx, shabuf);
+ compute_sha1_hash((uint8*) shabuf, (char*) rawbuf, sizeof(rawbuf));
assert(base64_needed_encoded_length(sizeof(shabuf)) <= SERVER_UID_SIZE);
base64_encode(shabuf, sizeof(shabuf), dest);
diff --git a/plugin/handler_socket/handlersocket/database.cpp b/plugin/handler_socket/handlersocket/database.cpp
index beb28ef708c..a15c18a4c70 100644
--- a/plugin/handler_socket/handlersocket/database.cpp
+++ b/plugin/handler_socket/handlersocket/database.cpp
@@ -762,7 +762,7 @@ dbcontext::cmd_find_internal(dbcallback_i& cb, const prep_stmt& pst,
return cb.dbcb_resp_short(2, "idxnum");
}
KEY& kinfo = table->key_info[pst.get_idxnum()];
- if (args.kvalslen > kinfo.key_parts) {
+ if (args.kvalslen > kinfo.user_defined_key_parts) {
return cb.dbcb_resp_short(2, "kpnum");
}
uchar *const key_buf = DENA_ALLOCA_ALLOCATE(uchar, kinfo.key_length);
diff --git a/plugin/qc_info/qc_info.cc b/plugin/qc_info/qc_info.cc
index 8489b14c5db..717c54d548b 100644
--- a/plugin/qc_info/qc_info.cc
+++ b/plugin/qc_info/qc_info.cc
@@ -113,7 +113,7 @@ static int qc_info_fill_table(THD *thd, TABLE_LIST *tables,
statement_text_length = strlen(statement_text);
/* We truncate SQL statements up to MAX_STATEMENT_TEXT_LENGTH in our I_S table */
table->field[COLUMN_STATEMENT_TEXT]->store((char*)statement_text,
- min(statement_text_length, MAX_STATEMENT_TEXT_LENGTH), scs);
+ MY_MIN(statement_text_length, MAX_STATEMENT_TEXT_LENGTH), scs);
/* get the entire key that identifies this query cache query */
key = (const char*)query_cache_query_get_key(query_cache_block_raw,
diff --git a/plugin/query_response_time/query_response_time.h b/plugin/query_response_time/query_response_time.h
index b19833a6570..35b01b0db81 100644
--- a/plugin/query_response_time/query_response_time.h
+++ b/plugin/query_response_time/query_response_time.h
@@ -44,11 +44,11 @@
#define QRT_DEFAULT_BASE 10
#define QRT_TIME_STRING_LENGTH \
- max( (QRT_TIME_STRING_POSITIVE_POWER_LENGTH + 1 /* '.' */ + 6 /*QRT_TIME_STRING_NEGATIVE_POWER_LENGTH*/), \
+ MY_MAX( (QRT_TIME_STRING_POSITIVE_POWER_LENGTH + 1 /* '.' */ + 6 /*QRT_TIME_STRING_NEGATIVE_POWER_LENGTH*/), \
(sizeof(QRT_TIME_OVERFLOW) - 1) )
#define QRT_TOTAL_STRING_LENGTH \
- max( (QRT_TOTAL_STRING_POSITIVE_POWER_LENGTH + 1 /* '.' */ + 6 /*QRT_TOTAL_STRING_NEGATIVE_POWER_LENGTH*/), \
+ MY_MAX( (QRT_TOTAL_STRING_POSITIVE_POWER_LENGTH + 1 /* '.' */ + 6 /*QRT_TOTAL_STRING_NEGATIVE_POWER_LENGTH*/), \
(sizeof(QRT_TIME_OVERFLOW) - 1) )
extern ST_SCHEMA_TABLE query_response_time_table;
diff --git a/scripts/CMakeLists.txt b/scripts/CMakeLists.txt
index 76e92899c2a..126c8a548fb 100644
--- a/scripts/CMakeLists.txt
+++ b/scripts/CMakeLists.txt
@@ -20,7 +20,7 @@ IF(NOT CMAKE_CROSSCOMPILING)
ENDIF()
-# Build mysql_fix_privilege_tables.sql (concatenate 2 sql scripts)
+# Build mysql_fix_privilege_tables.sql (concatenate 3 sql scripts)
IF(NOT WIN32 OR CMAKE_CROSSCOMPILING)
FIND_PROGRAM(CAT_EXECUTABLE cat DOC "path to the executable")
MARK_AS_ADVANCED(CAT_EXECUTABLE)
diff --git a/scripts/mysql_performance_tables.sql b/scripts/mysql_performance_tables.sql
index d08ef13a8a9..d4955782e4d 100644
--- a/scripts/mysql_performance_tables.sql
+++ b/scripts/mysql_performance_tables.sql
@@ -80,7 +80,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_waits_current("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -111,7 +111,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_waits_history("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -142,7 +142,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_waits_history_long("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -250,7 +250,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_waits_summary_by_thread_by_event_name("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
@@ -377,7 +377,7 @@ DROP PREPARE stmt;
SET @cmd="CREATE TABLE performance_schema.socket_instances("
"EVENT_NAME VARCHAR(128) not null,"
"OBJECT_INSTANCE_BEGIN BIGINT unsigned not null,"
- "THREAD_ID INTEGER,"
+ "THREAD_ID BIGINT unsigned,"
"SOCKET_ID INTEGER not null,"
"IP VARCHAR(64) not null,"
"PORT INTEGER not null,"
@@ -490,16 +490,16 @@ SET @cmd="CREATE TABLE performance_schema.host_cache("
"COUNT_INIT_CONNECT_ERRORS BIGINT not null,"
"COUNT_LOCAL_ERRORS BIGINT not null,"
"COUNT_UNKNOWN_ERRORS BIGINT not null,"
- "FIRST_SEEN TIMESTAMP(0) default 0,"
- "LAST_SEEN TIMESTAMP(0) default 0,"
+ "FIRST_SEEN TIMESTAMP(0) NOT NULL default 0,"
+ "LAST_SEEN TIMESTAMP(0) NOT NULL default 0,"
"FIRST_ERROR_SEEN TIMESTAMP(0) null default 0,"
"LAST_ERROR_SEEN TIMESTAMP(0) null default 0"
")ENGINE=PERFORMANCE_SCHEMA;";
SET @str = IF(@have_pfs = 1, @cmd, 'SET @dummy = 0');
-#PREPARE stmt FROM @str;
-#EXECUTE stmt;
-#DROP PREPARE stmt;
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
--
-- TABLE MUTEX_INSTANCES
@@ -508,7 +508,7 @@ SET @str = IF(@have_pfs = 1, @cmd, 'SET @dummy = 0');
SET @cmd="CREATE TABLE performance_schema.mutex_instances("
"NAME VARCHAR(128) not null,"
"OBJECT_INSTANCE_BEGIN BIGINT unsigned not null,"
- "LOCKED_BY_THREAD_ID INTEGER"
+ "LOCKED_BY_THREAD_ID BIGINT unsigned"
")ENGINE=PERFORMANCE_SCHEMA;";
SET @str = IF(@have_pfs = 1, @cmd, 'SET @dummy = 0');
@@ -559,7 +559,7 @@ DROP PREPARE stmt;
SET @cmd="CREATE TABLE performance_schema.rwlock_instances("
"NAME VARCHAR(128) not null,"
"OBJECT_INSTANCE_BEGIN BIGINT unsigned not null,"
- "WRITE_LOCKED_BY_THREAD_ID INTEGER,"
+ "WRITE_LOCKED_BY_THREAD_ID BIGINT unsigned,"
"READ_LOCKED_BY_COUNT INTEGER unsigned not null"
")ENGINE=PERFORMANCE_SCHEMA;";
@@ -834,10 +834,10 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.threads("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"NAME VARCHAR(128) not null,"
"TYPE VARCHAR(10) not null,"
- "PROCESSLIST_ID INTEGER,"
+ "PROCESSLIST_ID BIGINT unsigned,"
"PROCESSLIST_USER VARCHAR(16),"
"PROCESSLIST_HOST VARCHAR(60),"
"PROCESSLIST_DB VARCHAR(64),"
@@ -845,7 +845,7 @@ SET @cmd="CREATE TABLE performance_schema.threads("
"PROCESSLIST_TIME BIGINT,"
"PROCESSLIST_STATE VARCHAR(64),"
"PROCESSLIST_INFO LONGTEXT,"
- "PARENT_THREAD_ID INTEGER,"
+ "PARENT_THREAD_ID BIGINT unsigned,"
"ROLE VARCHAR(64),"
"INSTRUMENTED ENUM ('YES', 'NO') not null"
")ENGINE=PERFORMANCE_SCHEMA;";
@@ -860,7 +860,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_stages_current("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -882,7 +882,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_stages_history("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -904,7 +904,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_stages_history_long("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -926,7 +926,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_stages_summary_by_thread_by_event_name("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
@@ -1021,7 +1021,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_statements_current("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -1073,7 +1073,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_statements_history("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -1125,7 +1125,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_statements_history_long("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_ID BIGINT unsigned not null,"
"END_EVENT_ID BIGINT unsigned,"
"EVENT_NAME VARCHAR(128) not null,"
@@ -1177,7 +1177,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_statements_summary_by_thread_by_event_name("
- "THREAD_ID INTEGER not null,"
+ "THREAD_ID BIGINT unsigned not null,"
"EVENT_NAME VARCHAR(128) not null,"
"COUNT_STAR BIGINT unsigned not null,"
"SUM_TIMER_WAIT BIGINT unsigned not null,"
@@ -1413,6 +1413,7 @@ DROP PREPARE stmt;
--
SET @cmd="CREATE TABLE performance_schema.events_statements_summary_by_digest("
+ "SCHEMA_NAME VARCHAR(64),"
"DIGEST VARCHAR(32),"
"DIGEST_TEXT LONGTEXT,"
"COUNT_STAR BIGINT unsigned not null,"
@@ -1439,8 +1440,8 @@ SET @cmd="CREATE TABLE performance_schema.events_statements_summary_by_digest("
"SUM_SORT_SCAN BIGINT unsigned not null,"
"SUM_NO_INDEX_USED BIGINT unsigned not null,"
"SUM_NO_GOOD_INDEX_USED BIGINT unsigned not null,"
- "FIRST_SEEN TIMESTAMP(0) default 0,"
- "LAST_SEEN TIMESTAMP(0) default 0"
+ "FIRST_SEEN TIMESTAMP(0) NOT NULL default 0,"
+ "LAST_SEEN TIMESTAMP(0) NOT NULL default 0"
")ENGINE=PERFORMANCE_SCHEMA;";
@@ -1449,3 +1450,30 @@ PREPARE stmt FROM @str;
EXECUTE stmt;
DROP PREPARE stmt;
+--
+-- TABLE SESSION_CONNECT_ATTRS
+--
+
+SET @cmd="CREATE TABLE performance_schema.session_connect_attrs("
+ "PROCESSLIST_ID INT NOT NULL,"
+ "ATTR_NAME VARCHAR(32) NOT NULL,"
+ "ATTR_VALUE VARCHAR(1024),"
+ "ORDINAL_POSITION INT"
+ ")ENGINE=PERFORMANCE_SCHEMA CHARACTER SET utf8 COLLATE utf8_bin;";
+
+SET @str = IF(@have_pfs = 1, @cmd, 'SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
+
+--
+-- TABLE SESSION_ACCOUNT_CONNECT_ATTRS
+--
+
+SET @cmd="CREATE TABLE performance_schema.session_account_connect_attrs "
+ " LIKE performance_schema.session_connect_attrs;";
+
+SET @str = IF(@have_pfs = 1, @cmd, 'SET @dummy = 0');
+PREPARE stmt FROM @str;
+EXECUTE stmt;
+DROP PREPARE stmt;
diff --git a/scripts/mysql_system_tables.sql b/scripts/mysql_system_tables.sql
index 7b3803b05bc..ea36c7691b0 100644
--- a/scripts/mysql_system_tables.sql
+++ b/scripts/mysql_system_tables.sql
@@ -1,6 +1,6 @@
-- Copyright (c) 2007, 2011, Oracle and/or its affiliates.
-- Copyright (c) 2007, 2008 MySQL AB, 2009 Sun Microsystems, Inc.
--- Copyright (c) 2008-2012 Monty Program Ab
+-- Copyright (c) 2008-2013 Monty Program Ab & SkySQL Ab
--
-- This program is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
@@ -27,7 +27,6 @@ set sql_mode='';
CREATE TABLE IF NOT EXISTS gtid_slave_pos (domain_id INT UNSIGNED NOT NULL, sub_id BIGINT UNSIGNED NOT NULL, server_id INT UNSIGNED NOT NULL, seq_no BIGINT UNSIGNED NOT NULL, PRIMARY KEY (domain_id, sub_id)) comment='Replication slave GTID position';
set storage_engine=myisam;
-flush tables;
CREATE TABLE IF NOT EXISTS db ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db,User), KEY User (User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Database privileges';
@@ -36,7 +35,7 @@ set @had_db_table= @@warning_count != 0;
CREATE TABLE IF NOT EXISTS host ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,Db) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Host privileges; Merged with database privileges';
-CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) DEFAULT 0 NOT NULL, plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, authentication_string TEXT NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
+CREATE TABLE IF NOT EXISTS user ( Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Password char(41) character set latin1 collate latin1_bin DEFAULT '' NOT NULL, Select_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Insert_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Update_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Delete_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Drop_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Reload_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Shutdown_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Process_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, File_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Grant_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, References_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Index_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_db_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Super_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tmp_table_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Lock_tables_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Execute_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_slave_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Repl_client_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Show_view_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Alter_routine_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_user_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Event_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Trigger_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, Create_tablespace_priv enum('N','Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, ssl_type enum('','ANY','X509', 'SPECIFIED') COLLATE utf8_general_ci DEFAULT '' NOT NULL, ssl_cipher BLOB NOT NULL, x509_issuer BLOB NOT NULL, x509_subject BLOB NOT NULL, max_questions int(11) unsigned DEFAULT 0 NOT NULL, max_updates int(11) unsigned DEFAULT 0 NOT NULL, max_connections int(11) unsigned DEFAULT 0 NOT NULL, max_user_connections int(11) DEFAULT 0 NOT NULL, plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, authentication_string TEXT NOT NULL, password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL, PRIMARY KEY Host (Host,User) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Users and global privileges';
-- Remember for later if user table already existed
set @had_user_table= @@warning_count != 0;
@@ -51,9 +50,9 @@ CREATE TABLE IF NOT EXISTS plugin ( name varchar(64) DEFAULT '' NOT NULL, dl var
CREATE TABLE IF NOT EXISTS servers ( Server_name char(64) NOT NULL DEFAULT '', Host char(64) NOT NULL DEFAULT '', Db char(64) NOT NULL DEFAULT '', Username char(64) NOT NULL DEFAULT '', Password char(64) NOT NULL DEFAULT '', Port INT(4) NOT NULL DEFAULT '0', Socket char(64) NOT NULL DEFAULT '', Wrapper char(64) NOT NULL DEFAULT '', Owner char(64) NOT NULL DEFAULT '', PRIMARY KEY (Server_name)) CHARACTER SET utf8 comment='MySQL Foreign Servers table';
-CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
+CREATE TABLE IF NOT EXISTS tables_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Table_priv set('Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Table privileges';
-CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
+CREATE TABLE IF NOT EXISTS columns_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Table_name char(64) binary DEFAULT '' NOT NULL, Column_name char(64) binary DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, Column_priv set('Select','Insert','Update','References') COLLATE utf8_general_ci DEFAULT '' NOT NULL, PRIMARY KEY (Host,Db,User,Table_name,Column_name) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Column privileges';
CREATE TABLE IF NOT EXISTS help_topic ( help_topic_id int unsigned not null, name char(64) not null, help_category_id smallint unsigned not null, description text not null, example text not null, url text not null, primary key (help_topic_id), unique index (name) ) engine=MyISAM CHARACTER SET utf8 comment='help topics';
@@ -82,13 +81,15 @@ CREATE TABLE IF NOT EXISTS time_zone_transition_type ( Time_zone_id int unsign
CREATE TABLE IF NOT EXISTS time_zone_leap_second ( Transition_time bigint signed NOT NULL, Correction int signed NOT NULL, PRIMARY KEY TranTime (Transition_time) ) engine=MyISAM CHARACTER SET utf8 comment='Leap seconds information for time zones';
-CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp, modified timestamp, sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures';
+CREATE TABLE IF NOT EXISTS proc (db char(64) collate utf8_bin DEFAULT '' NOT NULL, name char(64) DEFAULT '' NOT NULL, type enum('FUNCTION','PROCEDURE') NOT NULL, specific_name char(64) DEFAULT '' NOT NULL, language enum('SQL') DEFAULT 'SQL' NOT NULL, sql_data_access enum( 'CONTAINS_SQL', 'NO_SQL', 'READS_SQL_DATA', 'MODIFIES_SQL_DATA') DEFAULT 'CONTAINS_SQL' NOT NULL, is_deterministic enum('YES','NO') DEFAULT 'NO' NOT NULL, security_type enum('INVOKER','DEFINER') DEFAULT 'DEFINER' NOT NULL, param_list blob NOT NULL, returns longblob DEFAULT '' NOT NULL, body longblob NOT NULL, definer char(77) collate utf8_bin DEFAULT '' NOT NULL, created timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified timestamp NOT NULL DEFAULT '0000-00-00 00:00:00', sql_mode set( 'REAL_AS_FLOAT', 'PIPES_AS_CONCAT', 'ANSI_QUOTES', 'IGNORE_SPACE', 'IGNORE_BAD_TABLE_OPTIONS', 'ONLY_FULL_GROUP_BY', 'NO_UNSIGNED_SUBTRACTION', 'NO_DIR_IN_CREATE', 'POSTGRESQL', 'ORACLE', 'MSSQL', 'DB2', 'MAXDB', 'NO_KEY_OPTIONS', 'NO_TABLE_OPTIONS', 'NO_FIELD_OPTIONS', 'MYSQL323', 'MYSQL40', 'ANSI', 'NO_AUTO_VALUE_ON_ZERO', 'NO_BACKSLASH_ESCAPES', 'STRICT_TRANS_TABLES', 'STRICT_ALL_TABLES', 'NO_ZERO_IN_DATE', 'NO_ZERO_DATE', 'INVALID_DATES', 'ERROR_FOR_DIVISION_BY_ZERO', 'TRADITIONAL', 'NO_AUTO_CREATE_USER', 'HIGH_NOT_PRECEDENCE', 'NO_ENGINE_SUBSTITUTION', 'PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment text collate utf8_bin NOT NULL, character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db,name,type)) engine=MyISAM character set utf8 comment='Stored Procedures';
-CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
+CREATE TABLE IF NOT EXISTS procs_priv ( Host char(60) binary DEFAULT '' NOT NULL, Db char(64) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Routine_name char(64) COLLATE utf8_general_ci DEFAULT '' NOT NULL, Routine_type enum('FUNCTION','PROCEDURE') NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Proc_priv set('Execute','Alter Routine','Grant') COLLATE utf8_general_ci DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY (Host,Db,User,Routine_name,Routine_type), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Procedure privileges';
--- Create general_log if CSV is enabled.
-SET @str = IF (@@have_csv = 'YES', 'CREATE TABLE IF NOT EXISTS general_log (event_time TIMESTAMP(6) NOT NULL, user_host MEDIUMTEXT NOT NULL, thread_id INTEGER NOT NULL, server_id INTEGER UNSIGNED NOT NULL, command_type VARCHAR(64) NOT NULL, argument MEDIUMTEXT NOT NULL) engine=CSV CHARACTER SET utf8 comment="General log"', 'SET @dummy = 0');
+-- Create general_log if CSV is enabled.
+SET @have_csv = 'NO';
+SET @have_csv = (SELECT @@have_csv);
+SET @str = IF (@have_csv = 'YES', 'CREATE TABLE IF NOT EXISTS general_log (event_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, user_host MEDIUMTEXT NOT NULL, thread_id BIGINT(21) UNSIGNED NOT NULL, server_id INTEGER UNSIGNED NOT NULL, command_type VARCHAR(64) NOT NULL, argument MEDIUMTEXT NOT NULL) engine=CSV CHARACTER SET utf8 comment="General log"', 'SET @dummy = 0');
PREPARE stmt FROM @str;
EXECUTE stmt;
@@ -96,22 +97,13 @@ DROP PREPARE stmt;
-- Create slow_log if CSV is enabled.
-SET @str = IF (@@have_csv = 'YES', 'CREATE TABLE IF NOT EXISTS slow_log (start_time TIMESTAMP(6) NOT NULL, user_host MEDIUMTEXT NOT NULL, query_time TIME(6) NOT NULL, lock_time TIME(6) NOT NULL, rows_sent INTEGER NOT NULL, rows_examined INTEGER NOT NULL, db VARCHAR(512) NOT NULL, last_insert_id INTEGER NOT NULL, insert_id INTEGER NOT NULL, server_id INTEGER UNSIGNED NOT NULL, sql_text MEDIUMTEXT NOT NULL) engine=CSV CHARACTER SET utf8 comment="Slow log"', 'SET @dummy = 0');
+SET @str = IF (@have_csv = 'YES', 'CREATE TABLE IF NOT EXISTS slow_log (start_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, user_host MEDIUMTEXT NOT NULL, query_time TIME(6) NOT NULL, lock_time TIME(6) NOT NULL, rows_sent INTEGER NOT NULL, rows_examined INTEGER NOT NULL, db VARCHAR(512) NOT NULL, last_insert_id INTEGER NOT NULL, insert_id INTEGER NOT NULL, server_id INTEGER UNSIGNED NOT NULL, sql_text MEDIUMTEXT NOT NULL, thread_id BIGINT(21) UNSIGNED NOT NULL) engine=CSV CHARACTER SET utf8 comment="Slow log"', 'SET @dummy = 0');
PREPARE stmt FROM @str;
EXECUTE stmt;
DROP PREPARE stmt;
-CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL, modified TIMESTAMP NOT NULL, last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
-
-
-CREATE TABLE IF NOT EXISTS ndb_binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM;
-
-CREATE TABLE IF NOT EXISTS slave_relay_log_info (Master_id INTEGER UNSIGNED NOT NULL, Number_of_lines INTEGER UNSIGNED NOT NULL COMMENT 'Number of lines in the file or rows in the table. Used to version table definitions.', Relay_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT 'The name of the current relay log file.', Relay_log_pos BIGINT UNSIGNED NOT NULL COMMENT 'The relay log position of the last executed event.', Master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT 'The name of the master binary log file from which the events in the relay log file were read.', Master_log_pos BIGINT UNSIGNED NOT NULL COMMENT 'The master log position of the last executed event.', Sql_delay INTEGER NOT NULL COMMENT 'The number of seconds that the slave must lag behind the master.', Number_of_workers INTEGER UNSIGNED NOT NULL, PRIMARY KEY(Master_id)) ENGINE=MYISAM DEFAULT CHARSET=utf8 COMMENT 'Relay Log Information';
-
-CREATE TABLE IF NOT EXISTS slave_master_info (Master_id INTEGER UNSIGNED NOT NULL, Number_of_lines INTEGER UNSIGNED NOT NULL COMMENT 'Number of lines in the file.', Master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT 'The name of the master binary log currently being read from the master.', Master_log_pos BIGINT UNSIGNED NOT NULL COMMENT 'The master log position of the last read event.', Host TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The host name of the master.', User_name TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The user name used to connect to the master.', User_password TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The password used to connect to the master.', Port INTEGER UNSIGNED NOT NULL COMMENT 'The network port used to connect to the master.', Connect_retry INTEGER UNSIGNED NOT NULL COMMENT 'The period (in seconds) that the slave will wait before trying to reconnect to the master.', Enabled_ssl BOOLEAN NOT NULL COMMENT 'Indicates whether the server supports SSL connections.', Ssl_ca TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The file used for the Certificate Authority (CA) certificate.', Ssl_capath TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The path to the Certificate Authority (CA) certificates.', Ssl_cert TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The name of the SSL certificate file.', Ssl_cipher TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The name of the cipher in use for the SSL connection.', Ssl_key TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The name of the SSL key file.', Ssl_verify_server_cert BOOLEAN NOT NULL COMMENT 'Whether to verify the server certificate.', Heartbeat FLOAT NOT NULL COMMENT '', Bind TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'Displays which interface is employed when connecting to the MySQL server', Ignored_server_ids TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The number of server IDs to be ignored, followed by the actual server IDs', Uuid TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The master server uuid.', Retry_count BIGINT UNSIGNED NOT NULL COMMENT 'Number of reconnect attempts, to the master, before giving up.', Ssl_crl TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The file used for the Certificate Revocation List (CRL)', Ssl_crlpath TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The path used for Certificate Revocation List (CRL) files', PRIMARY KEY(Master_id)) ENGINE=MYISAM DEFAULT CHARSET=utf8 COMMENT 'Master Information';
-
-CREATE TABLE IF NOT EXISTS slave_worker_info (Master_id INTEGER UNSIGNED NOT NULL, Worker_id INTEGER UNSIGNED NOT NULL, Relay_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, Relay_log_pos BIGINT UNSIGNED NOT NULL, Master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, Master_log_pos BIGINT UNSIGNED NOT NULL, Checkpoint_relay_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, Checkpoint_relay_log_pos BIGINT UNSIGNED NOT NULL, Checkpoint_master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL, Checkpoint_master_log_pos BIGINT UNSIGNED NOT NULL, Checkpoint_seqno INT UNSIGNED NOT NULL, Checkpoint_group_size INTEGER UNSIGNED NOT NULL, Checkpoint_group_bitmap BLOB NOT NULL, PRIMARY KEY(Master_id, Worker_id)) ENGINE=MYISAM DEFAULT CHARSET=utf8 COMMENT 'Worker Information';
+CREATE TABLE IF NOT EXISTS event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 NOT NULL default '', body longblob NOT NULL, definer char(77) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', execute_at DATETIME default NULL, interval_value int(11) default NULL, interval_field ENUM('YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND') default NULL, created TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, modified TIMESTAMP NOT NULL DEFAULT '0000-00-00 00:00:00', last_executed DATETIME default NULL, starts DATETIME default NULL, ends DATETIME default NULL, status ENUM('ENABLED','DISABLED','SLAVESIDE_DISABLED') NOT NULL default 'ENABLED', on_completion ENUM('DROP','PRESERVE') NOT NULL default 'DROP', sql_mode set('REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','IGNORE_BAD_TABLE_OPTIONS','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','POSTGRESQL','ORACLE','MSSQL','DB2','MAXDB','NO_KEY_OPTIONS','NO_TABLE_OPTIONS','NO_FIELD_OPTIONS','MYSQL323','MYSQL40','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NO_AUTO_CREATE_USER','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH') DEFAULT '' NOT NULL, comment char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', originator INTEGER UNSIGNED NOT NULL, time_zone char(64) CHARACTER SET latin1 NOT NULL DEFAULT 'SYSTEM', character_set_client char(32) collate utf8_bin, collation_connection char(32) collate utf8_bin, db_collation char(32) collate utf8_bin, body_utf8 longblob, PRIMARY KEY (db, name) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT 'Events';
SET @sql_mode_orig=@@SESSION.sql_mode;
SET SESSION sql_mode='NO_ENGINE_SUBSTITUTION';
@@ -119,18 +111,18 @@ SET SESSION sql_mode='NO_ENGINE_SUBSTITUTION';
CREATE TABLE IF NOT EXISTS innodb_table_stats (
database_name VARCHAR(64) NOT NULL,
table_name VARCHAR(64) NOT NULL,
- last_update TIMESTAMP NOT NULL,
+ last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
n_rows BIGINT UNSIGNED NOT NULL,
clustered_index_size BIGINT UNSIGNED NOT NULL,
sum_of_other_index_sizes BIGINT UNSIGNED NOT NULL,
PRIMARY KEY (database_name, table_name)
-) ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
+) ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
CREATE TABLE IF NOT EXISTS innodb_index_stats (
database_name VARCHAR(64) NOT NULL,
table_name VARCHAR(64) NOT NULL,
index_name VARCHAR(64) NOT NULL,
- last_update TIMESTAMP NOT NULL,
+ last_update TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
/* there are at least:
stat_name='size'
stat_name='n_leaf_pages'
@@ -139,18 +131,93 @@ CREATE TABLE IF NOT EXISTS innodb_index_stats (
stat_value BIGINT UNSIGNED NOT NULL,
sample_size BIGINT UNSIGNED,
stat_description VARCHAR(1024) NOT NULL,
- PRIMARY KEY (database_name, table_name, index_name, stat_name),
- FOREIGN KEY (database_name, table_name)
- REFERENCES innodb_table_stats (database_name, table_name)
-) ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;
+ PRIMARY KEY (database_name, table_name, index_name, stat_name)
+) ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0;
SET SESSION sql_mode=@sql_mode_orig;
-CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(16) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges';
+SET @have_innodb = 'NO';
+SET @have_innodb = (SELECT @@have_innodb);
+
+SET @cmd="CREATE TABLE IF NOT EXISTS slave_relay_log_info (
+ Number_of_lines INTEGER UNSIGNED NOT NULL COMMENT 'Number of lines in the file or rows in the table. Used to version table definitions.',
+ Relay_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT 'The name of the current relay log file.',
+ Relay_log_pos BIGINT UNSIGNED NOT NULL COMMENT 'The relay log position of the last executed event.',
+ Master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT 'The name of the master binary log file from which the events in the relay log file were read.',
+ Master_log_pos BIGINT UNSIGNED NOT NULL COMMENT 'The master log position of the last executed event.',
+ Sql_delay INTEGER NOT NULL COMMENT 'The number of seconds that the slave must lag behind the master.',
+ Number_of_workers INTEGER UNSIGNED NOT NULL,
+ Id INTEGER UNSIGNED NOT NULL COMMENT 'Internal Id that uniquely identifies this record.',
+ PRIMARY KEY(Id)) DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 COMMENT 'Relay Log Information'";
+
+SET @str=IF(@have_innodb <> 0, CONCAT(@cmd, ' ENGINE= INNODB;'), CONCAT(@cmd, ' ENGINE= MYISAM;'));
+-- Don't create the table; MariaDB will have another implementation
+#PREPARE stmt FROM @str;
+#EXECUTE stmt;
+#DROP PREPARE stmt;
+
+SET @cmd= "CREATE TABLE IF NOT EXISTS slave_master_info (
+ Number_of_lines INTEGER UNSIGNED NOT NULL COMMENT 'Number of lines in the file.',
+ Master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL COMMENT 'The name of the master binary log currently being read from the master.',
+ Master_log_pos BIGINT UNSIGNED NOT NULL COMMENT 'The master log position of the last read event.',
+ Host CHAR(64) CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The host name of the master.',
+ User_name TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The user name used to connect to the master.',
+ User_password TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The password used to connect to the master.',
+ Port INTEGER UNSIGNED NOT NULL COMMENT 'The network port used to connect to the master.',
+ Connect_retry INTEGER UNSIGNED NOT NULL COMMENT 'The period (in seconds) that the slave will wait before trying to reconnect to the master.',
+ Enabled_ssl BOOLEAN NOT NULL COMMENT 'Indicates whether the server supports SSL connections.',
+ Ssl_ca TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The file used for the Certificate Authority (CA) certificate.',
+ Ssl_capath TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The path to the Certificate Authority (CA) certificates.',
+ Ssl_cert TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The name of the SSL certificate file.',
+ Ssl_cipher TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The name of the cipher in use for the SSL connection.',
+ Ssl_key TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The name of the SSL key file.',
+ Ssl_verify_server_cert BOOLEAN NOT NULL COMMENT 'Whether to verify the server certificate.',
+ Heartbeat FLOAT NOT NULL COMMENT '',
+ Bind TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'Displays which interface is employed when connecting to the MySQL server',
+ Ignored_server_ids TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The number of server IDs to be ignored, followed by the actual server IDs',
+ Uuid TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The master server uuid.',
+ Retry_count BIGINT UNSIGNED NOT NULL COMMENT 'Number of reconnect attempts, to the master, before giving up.',
+ Ssl_crl TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The file used for the Certificate Revocation List (CRL)',
+ Ssl_crlpath TEXT CHARACTER SET utf8 COLLATE utf8_bin COMMENT 'The path used for Certificate Revocation List (CRL) files',
+ Enabled_auto_position BOOLEAN NOT NULL COMMENT 'Indicates whether GTIDs will be used to retrieve events from the master.',
+ PRIMARY KEY(Host, Port)) DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 COMMENT 'Master Information'";
+
+SET @str=IF(@have_innodb <> 0, CONCAT(@cmd, ' ENGINE= INNODB;'), CONCAT(@cmd, ' ENGINE= MYISAM;'));
+-- Don't create the table; MariaDB will have another implementation
+#PREPARE stmt FROM @str;
+#EXECUTE stmt;
+#DROP PREPARE stmt;
+
+SET @cmd= "CREATE TABLE IF NOT EXISTS slave_worker_info (
+ Id INTEGER UNSIGNED NOT NULL,
+ Relay_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
+ Relay_log_pos BIGINT UNSIGNED NOT NULL,
+ Master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
+ Master_log_pos BIGINT UNSIGNED NOT NULL,
+ Checkpoint_relay_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
+ Checkpoint_relay_log_pos BIGINT UNSIGNED NOT NULL,
+ Checkpoint_master_log_name TEXT CHARACTER SET utf8 COLLATE utf8_bin NOT NULL,
+ Checkpoint_master_log_pos BIGINT UNSIGNED NOT NULL,
+ Checkpoint_seqno INT UNSIGNED NOT NULL,
+ Checkpoint_group_size INTEGER UNSIGNED NOT NULL,
+ Checkpoint_group_bitmap BLOB NOT NULL,
+ PRIMARY KEY(Id)) DEFAULT CHARSET=utf8 STATS_PERSISTENT=0 COMMENT 'Worker Information'";
+
+SET @str=IF(@have_innodb <> 0, CONCAT(@cmd, ' ENGINE= INNODB;'), CONCAT(@cmd, ' ENGINE= MYISAM;'));
+-- Don't create the table; MariaDB will have another implementation
+#PREPARE stmt FROM @str;
+#EXECUTE stmt;
+#DROP PREPARE stmt;
+
+CREATE TABLE IF NOT EXISTS proxies_priv (Host char(60) binary DEFAULT '' NOT NULL, User char(16) binary DEFAULT '' NOT NULL, Proxied_host char(60) binary DEFAULT '' NOT NULL, Proxied_user char(16) binary DEFAULT '' NOT NULL, With_grant BOOL DEFAULT 0 NOT NULL, Grantor char(77) DEFAULT '' NOT NULL, Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, PRIMARY KEY Host (Host,User,Proxied_host,Proxied_user), KEY Grantor (Grantor) ) engine=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='User proxy privileges';
-- Remember for later if proxies_priv table already existed
set @had_proxies_priv_table= @@warning_count != 0;
+--
+-- Tables unique for MariaDB
+--
+
CREATE TABLE IF NOT EXISTS table_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, cardinality bigint(21) unsigned DEFAULT NULL, PRIMARY KEY (db_name,table_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Tables';
CREATE TABLE IF NOT EXISTS column_stats (db_name varchar(64) NOT NULL, table_name varchar(64) NOT NULL, column_name varchar(64) NOT NULL, min_value varchar(255) DEFAULT NULL, max_value varchar(255) DEFAULT NULL, nulls_ratio decimal(12,4) DEFAULT NULL, avg_length decimal(12,4) DEFAULT NULL, avg_frequency decimal(12,4) DEFAULT NULL, hist_size tinyint unsigned, hist_type enum('SINGLE_PREC_HB','DOUBLE_PREC_HB'), histogram varbinary(255), PRIMARY KEY (db_name,table_name,column_name) ) ENGINE=MyISAM CHARACTER SET utf8 COLLATE utf8_bin comment='Statistics on Columns';
diff --git a/scripts/mysql_system_tables_data.sql b/scripts/mysql_system_tables_data.sql
index a222d22b670..334c4adacc6 100644
--- a/scripts/mysql_system_tables_data.sql
+++ b/scripts/mysql_system_tables_data.sql
@@ -40,10 +40,10 @@ DROP TABLE tmp_db;
-- Fill "user" table with default users allowing root access
-- from local machine if "user" table didn't exist before
CREATE TEMPORARY TABLE tmp_user LIKE user;
-INSERT INTO tmp_user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','');
-REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','' FROM dual WHERE LOWER( @current_hostname) != 'localhost';
-REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','');
-REPLACE INTO tmp_user VALUES ('::1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','');
+INSERT INTO tmp_user VALUES ('localhost','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','');
+REPLACE INTO tmp_user SELECT @current_hostname,'root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','' FROM dual WHERE LOWER( @current_hostname) != 'localhost';
+REPLACE INTO tmp_user VALUES ('127.0.0.1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','');
+REPLACE INTO tmp_user VALUES ('::1','root','','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','Y','','','','',0,0,0,0,'','','');
INSERT INTO tmp_user (host,user) VALUES ('localhost','');
INSERT INTO tmp_user (host,user) SELECT @current_hostname,'' FROM dual WHERE LOWER(@current_hostname ) != 'localhost';
INSERT INTO user SELECT * FROM tmp_user WHERE @had_user_table=0;
diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql
index abe1f487915..24ded350d5f 100644
--- a/scripts/mysql_system_tables_fix.sql
+++ b/scripts/mysql_system_tables_fix.sql
@@ -1,5 +1,5 @@
-- Copyright (C) 2003, 2011 Oracle and/or its affiliates.
--- Copyright (C) 2010, 2011 Monty Program Ab
+-- Copyright (C) 2010-2013 Monty Program Ab & SkySQL Ab
--
-- This program is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
@@ -21,6 +21,9 @@
# because these just mean that your tables are already up to date.
# This script is safe to run even if your tables are already up to date!
+# Warning message(s) produced for a statement can be printed by explicitly
+# adding a 'SHOW WARNINGS' after the statement.
+
set sql_mode='';
set storage_engine=MyISAM;
@@ -234,18 +237,21 @@ ALTER TABLE func
SET @old_log_state = @@global.general_log;
SET GLOBAL general_log = 'OFF';
ALTER TABLE general_log
- MODIFY event_time TIMESTAMP(6) NOT NULL,
+ MODIFY event_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
MODIFY user_host MEDIUMTEXT NOT NULL,
MODIFY thread_id INTEGER NOT NULL,
MODIFY server_id INTEGER UNSIGNED NOT NULL,
MODIFY command_type VARCHAR(64) NOT NULL,
- MODIFY argument MEDIUMTEXT NOT NULL;
+ MODIFY argument MEDIUMTEXT NOT NULL,
+ MODIFY thread_id BIGINT(21) UNSIGNED NOT NULL;
SET GLOBAL general_log = @old_log_state;
SET @old_log_state = @@global.slow_query_log;
SET GLOBAL slow_query_log = 'OFF';
ALTER TABLE slow_log
- MODIFY start_time TIMESTAMP(6) NOT NULL,
+ ADD COLUMN thread_id BIGINT(21) UNSIGNED NOT NULL AFTER sql_text;
+ALTER TABLE slow_log
+ MODIFY start_time TIMESTAMP(6) NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
MODIFY user_host MEDIUMTEXT NOT NULL,
MODIFY query_time TIME(6) NOT NULL,
MODIFY lock_time TIME(6) NOT NULL,
@@ -255,7 +261,8 @@ ALTER TABLE slow_log
MODIFY last_insert_id INTEGER NOT NULL,
MODIFY insert_id INTEGER NOT NULL,
MODIFY server_id INTEGER UNSIGNED NOT NULL,
- MODIFY sql_text MEDIUMTEXT NOT NULL;
+ MODIFY sql_text MEDIUMTEXT NOT NULL,
+ MODIFY thread_id BIGINT(21) UNSIGNED NOT NULL;
SET GLOBAL slow_query_log = @old_log_state;
ALTER TABLE plugin
@@ -383,7 +390,7 @@ ALTER TABLE procs_priv
COLLATE utf8_general_ci NOT NULL AFTER Routine_name;
ALTER TABLE procs_priv
- MODIFY Timestamp timestamp AFTER Proc_priv;
+ MODIFY Timestamp timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP AFTER Proc_priv;
#
# proc
@@ -622,8 +629,11 @@ ALTER TABLE user MODIFY Create_tablespace_priv enum('N','Y') COLLATE utf8_genera
UPDATE user SET Create_tablespace_priv = Super_priv WHERE @hadCreateTablespacePriv = 0;
ALTER TABLE user ADD plugin char(64) DEFAULT '', ADD authentication_string TEXT;
-ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL;
-ALTER TABLE user MODIFY authentication_string TEXT NOT NULL;
+ALTER TABLE user ADD password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
+ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, MODIFY authentication_string TEXT NOT NULL;
+-- Somewhere above, we ran ALTER TABLE user .... CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin.
+-- we want password_expired column to have collation utf8_general_ci.
+ALTER TABLE user MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL;
-- Need to pre-fill mysql.proxies_priv with access for root even when upgrading from
-- older versions
diff --git a/sql-common/client.c b/sql-common/client.c
index d9d0f2fd095..6942822f889 100644
--- a/sql-common/client.c
+++ b/sql-common/client.c
@@ -35,7 +35,7 @@
*/
#include <my_global.h>
-
+#include <my_default.h>
#include "mysql.h"
/* Remove client convenience wrappers */
diff --git a/sql-common/client_authentication.cc b/sql-common/client_authentication.cc
new file mode 100644
index 00000000000..195f37bcc59
--- /dev/null
+++ b/sql-common/client_authentication.cc
@@ -0,0 +1,253 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates.
+ Copyright (c) 2013, Monty Program Ab
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+#include <my_global.h>
+
+#if defined(HAVE_OPENSSL)
+#include "crypt_genhash_impl.h"
+#include "mysql/client_authentication.h"
+#include "m_ctype.h"
+#include "sql_common.h"
+#include "errmsg.h"
+#include "m_string.h"
+#include <string.h>
+
+#if !defined(HAVE_YASSL)
+#include <openssl/rsa.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#if defined(_WIN32) && !defined(_OPENSSL_Applink) && defined(HAVE_OPENSSL_APPLINK_C)
+#include <openssl/applink.c>
+#endif
+#endif
+#include "mysql/service_my_plugin_log.h"
+
+#define MAX_CIPHER_LENGTH 1024
+
+#if !defined(HAVE_YASSL)
+mysql_mutex_t g_public_key_mutex;
+#endif
+
+int sha256_password_init(char *a, size_t b, int c, va_list d)
+{
+#if !defined(HAVE_YASSL)
+ mysql_mutex_init(0,&g_public_key_mutex, MY_MUTEX_INIT_SLOW);
+#endif
+ return 0;
+}
+
+int sha256_password_deinit(void)
+{
+#if !defined(HAVE_YASSL)
+ mysql_mutex_destroy(&g_public_key_mutex);
+#endif
+ return 0;
+}
+
+
+#if !defined(HAVE_YASSL)
+/**
+ Reads and parse RSA public key data from a file.
+
+ @param mysql connection handle with file path data
+
+ @return Pointer to the RSA public key storage buffer
+*/
+
+RSA *rsa_init(MYSQL *mysql)
+{
+ static RSA *g_public_key= NULL;
+ RSA *key= NULL;
+
+ mysql_mutex_lock(&g_public_key_mutex);
+ key= g_public_key;
+ mysql_mutex_unlock(&g_public_key_mutex);
+
+ if (key != NULL)
+ return key;
+
+ FILE *pub_key_file= NULL;
+
+ if (mysql->options.extension != NULL &&
+ mysql->options.extension->server_public_key_path != NULL &&
+ mysql->options.extension->server_public_key_path != '\0')
+ {
+ pub_key_file= fopen(mysql->options.extension->server_public_key_path,
+ "r");
+ }
+ /* No public key is used; return 0 without errors to indicate this. */
+ else
+ return 0;
+
+ if (pub_key_file == NULL)
+ {
+ /*
+ If a key path was submitted but no key located then we print an error
+ message. Else we just report that there is no public key.
+ */
+ fprintf(stderr,"Can't locate server public key '%s'\n",
+ mysql->options.extension->server_public_key_path);
+
+ return 0;
+ }
+
+ mysql_mutex_lock(&g_public_key_mutex);
+ key= g_public_key= PEM_read_RSA_PUBKEY(pub_key_file, 0, 0, 0);
+ mysql_mutex_unlock(&g_public_key_mutex);
+ fclose(pub_key_file);
+ if (g_public_key == NULL)
+ {
+ fprintf(stderr, "Public key is not in PEM format: '%s'\n",
+ mysql->options.extension->server_public_key_path);
+ return 0;
+ }
+
+ return key;
+}
+#endif // !defined(HAVE_YASSL)
+
+/**
+ Authenticate the client using the RSA or TLS and a SHA256 salted password.
+
+ @param vio Provides plugin access to communication channel
+ @param mysql Client connection handler
+
+ @return Error status
+ @retval CR_ERROR An error occurred.
+ @retval CR_OK Authentication succeeded.
+*/
+
+extern "C"
+int sha256_password_auth_client(MYSQL_PLUGIN_VIO *vio, MYSQL *mysql)
+{
+ bool uses_password= mysql->passwd[0] != 0;
+#if !defined(HAVE_YASSL)
+ unsigned char encrypted_password[MAX_CIPHER_LENGTH];
+ static char request_public_key= '\1';
+ RSA *public_key= NULL;
+ bool got_public_key_from_server= false;
+#endif
+ bool connection_is_secure= false;
+ unsigned char scramble_pkt[20];
+ unsigned char *pkt;
+
+
+ DBUG_ENTER("sha256_password_auth_client");
+
+ /*
+ Get the scramble from the server because we need it when sending encrypted
+ password.
+ */
+ if (vio->read_packet(vio, &pkt) != SCRAMBLE_LENGTH)
+ {
+ DBUG_PRINT("info",("Scramble is not of correct length."));
+ DBUG_RETURN(CR_ERROR);
+ }
+ /*
+ Copy the scramble to the stack or it will be lost on the next use of the
+ net buffer.
+ */
+ memcpy(scramble_pkt, pkt, SCRAMBLE_LENGTH);
+
+ if (mysql_get_ssl_cipher(mysql) != NULL)
+ connection_is_secure= true;
+
+ /* If connection isn't secure attempt to get the RSA public key file */
+ if (!connection_is_secure)
+ {
+ #if !defined(HAVE_YASSL)
+ public_key= rsa_init(mysql);
+#endif
+ }
+
+ if (!uses_password)
+ {
+ /* We're not using a password */
+ static const unsigned char zero_byte= '\0';
+ if (vio->write_packet(vio, (const unsigned char *) &zero_byte, 1))
+ DBUG_RETURN(CR_ERROR);
+ }
+ else
+ {
+ /* Password is a 0-terminated byte array ('\0' character included) */
+ unsigned int passwd_len= strlen(mysql->passwd) + 1;
+ if (!connection_is_secure)
+ {
+#if !defined(HAVE_YASSL)
+ /*
+ If no public key; request one from the server.
+ */
+ if (public_key == NULL)
+ {
+ if (vio->write_packet(vio, (const unsigned char *) &request_public_key,
+ 1))
+ DBUG_RETURN(CR_ERROR);
+
+ int pkt_len= 0;
+ unsigned char *pkt;
+ if ((pkt_len= vio->read_packet(vio, &pkt)) == -1)
+ DBUG_RETURN(CR_ERROR);
+ BIO* bio= BIO_new_mem_buf(pkt, pkt_len);
+ public_key= PEM_read_bio_RSA_PUBKEY(bio, NULL, NULL, NULL);
+ BIO_free(bio);
+ if (public_key == 0)
+ DBUG_RETURN(CR_ERROR);
+ got_public_key_from_server= true;
+ }
+
+ /* Obfuscate the plain text password with the session scramble */
+ xor_string(mysql->passwd, strlen(mysql->passwd), (char *) scramble_pkt,
+ SCRAMBLE_LENGTH);
+ /* Encrypt the password and send it to the server */
+ int cipher_length= RSA_size(public_key);
+ /*
+ When using RSA_PKCS1_OAEP_PADDING the password length must be less
+ than RSA_size(rsa) - 41.
+ */
+ if (passwd_len + 41 >= (unsigned) cipher_length)
+ {
+ /* password message is to long */
+ DBUG_RETURN(CR_ERROR);
+ }
+ RSA_public_encrypt(passwd_len, (unsigned char *) mysql->passwd,
+ encrypted_password,
+ public_key, RSA_PKCS1_OAEP_PADDING);
+ if (got_public_key_from_server)
+ RSA_free(public_key);
+
+ if (vio->write_packet(vio, (uchar*) encrypted_password, cipher_length))
+ DBUG_RETURN(CR_ERROR);
+#else
+ set_mysql_extended_error(mysql, CR_AUTH_PLUGIN_ERR, unknown_sqlstate,
+ ER(CR_AUTH_PLUGIN_ERR), "sha256_password",
+ "Authentication requires SSL encryption");
+ DBUG_RETURN(CR_ERROR); // If no openssl support
+#endif
+ }
+ else
+ {
+ /* The vio is encrypted already; just send the plain text passwd */
+ if (vio->write_packet(vio, (uchar*) mysql->passwd, passwd_len))
+ DBUG_RETURN(CR_ERROR);
+ }
+
+ memset(mysql->passwd, 0, passwd_len);
+ }
+
+ DBUG_RETURN(CR_OK);
+}
+
+#endif
diff --git a/sql-common/my_time.c b/sql-common/my_time.c
index 109bea7b4dd..75b94d14ac4 100644
--- a/sql-common/my_time.c
+++ b/sql-common/my_time.c
@@ -127,7 +127,7 @@ static int get_number(uint *val, uint *number_of_fields, const char **str,
static int get_digits(uint *val, uint *number_of_fields, const char **str,
const char *end, uint length)
{
- return get_number(val, number_of_fields, str, min(end, *str + length));
+ return get_number(val, number_of_fields, str, MY_MIN(end, *str + length));
}
static int get_punct(const char **str, const char *end)
diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt
index dc6ba20811d..b43474224ee 100644
--- a/sql/CMakeLists.txt
+++ b/sql/CMakeLists.txt
@@ -59,7 +59,8 @@ SET (SQL_SOURCE
sp_rcontext.cc spatial.cc sql_acl.cc sql_analyse.cc sql_base.cc
sql_cache.cc sql_class.cc sql_client.cc sql_crypt.cc sql_crypt.h
sql_cursor.cc sql_db.cc sql_delete.cc sql_derived.cc sql_do.cc
- sql_error.cc sql_handler.cc sql_help.cc sql_insert.cc sql_lex.cc
+ sql_error.cc sql_handler.cc sql_get_diagnostics.cc
+ sql_help.cc sql_insert.cc sql_lex.cc
sql_list.cc sql_load.cc sql_manager.cc
sql_parse.cc sql_bootstrap.cc sql_bootstrap.h
sql_partition.cc sql_plugin.cc sql_prepare.cc sql_rename.cc
@@ -107,7 +108,7 @@ ADD_LIBRARY(sql STATIC ${SQL_SOURCE})
ADD_DEPENDENCIES(sql GenServerSource)
DTRACE_INSTRUMENT(sql)
TARGET_LINK_LIBRARIES(sql ${MYSQLD_STATIC_PLUGIN_LIBS}
- mysys dbug strings vio regex
+ mysys mysys_ssl dbug strings vio regex
${LIBWRAP} ${LIBCRYPT} ${LIBDL}
${SSL_LIBRARIES})
@@ -142,7 +143,7 @@ IF(NOT WITHOUT_DYNAMIC_PLUGINS)
# incremental appears to crash from time to time,if used with /DEF option
SET_TARGET_PROPERTIES(mysqld PROPERTIES LINK_FLAGS "${mysqld_link_flags} /DEF:mysqld.def /INCREMENTAL:NO")
- FOREACH (CORELIB sql mysys dbug strings)
+ FOREACH (CORELIB sql mysys mysys_ssl dbug strings)
GET_TARGET_PROPERTY(LOC ${CORELIB} LOCATION)
FILE(TO_NATIVE_PATH ${LOC} LOC)
SET (LIB_LOCATIONS ${LIB_LOCATIONS} ${LOC})
@@ -172,7 +173,7 @@ ENDIF()
# On Solaris, some extra effort is required in order to get dtrace probes
# from static libraries
DTRACE_INSTRUMENT_STATIC_LIBS(mysqld
- "sql;mysys;${MYSQLD_STATIC_PLUGIN_LIBS}")
+ "sql;mysys;mysys_ssl;${MYSQLD_STATIC_PLUGIN_LIBS}")
SET(WITH_MYSQLD_LDFLAGS "" CACHE STRING "Additional linker flags for mysqld")
@@ -225,7 +226,7 @@ ADD_CUSTOM_COMMAND(
MYSQL_ADD_EXECUTABLE(mysql_tzinfo_to_sql tztime.cc COMPONENT Server)
SET_TARGET_PROPERTIES(mysql_tzinfo_to_sql PROPERTIES COMPILE_FLAGS "-DTZINFO2SQL")
-TARGET_LINK_LIBRARIES(mysql_tzinfo_to_sql mysys)
+TARGET_LINK_LIBRARIES(mysql_tzinfo_to_sql mysys mysys_ssl)
ADD_CUSTOM_TARGET(
GenServerSource
diff --git a/sql/create_options.cc b/sql/create_options.cc
index f12120bd0a1..d956d01aa66 100644
--- a/sql/create_options.cc
+++ b/sql/create_options.cc
@@ -87,7 +87,7 @@ static bool report_wrong_value(THD *thd, const char *name, const char *val,
return 1;
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_BAD_OPTION_VALUE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_BAD_OPTION_VALUE,
ER(ER_BAD_OPTION_VALUE), val, name);
return 0;
}
@@ -110,7 +110,7 @@ static bool report_unknown_option(THD *thd, engine_option_value *val,
DBUG_RETURN(TRUE);
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_OPTION, ER(ER_UNKNOWN_OPTION), val->name.str);
DBUG_RETURN(FALSE);
}
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index 25f028e5451..750f770552e 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009, 2010, Oracle and/or its affiliates.
+/* Copyright (c) 2009, 2011, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -10,8 +10,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
/* see include/mysql/service_debug_sync.h for debug sync documentation */
@@ -38,7 +38,7 @@
*/
struct st_debug_sync_action
{
- ulong activation_count; /* max(hit_limit, execute) */
+ ulong activation_count; /* MY_MAX(hit_limit, execute) */
ulong hit_limit; /* hits before kill query */
ulong execute; /* executes before self-clear */
ulong timeout; /* wait_for timeout */
@@ -82,8 +82,6 @@ struct st_debug_sync_globals
};
static st_debug_sync_globals debug_sync_global; /* All globals in one object */
-extern uint opt_debug_sync_timeout;
-
/**
Callbacks from C files.
*/
@@ -112,14 +110,11 @@ static void init_debug_sync_psi_keys(void)
const char* category= "sql";
int count;
- if (PSI_server == NULL)
- return;
-
count= array_elements(all_debug_sync_mutexes);
- PSI_server->register_mutex(category, all_debug_sync_mutexes, count);
+ mysql_mutex_register(category, all_debug_sync_mutexes, count);
count= array_elements(all_debug_sync_conds);
- PSI_server->register_cond(category, all_debug_sync_conds, count);
+ mysql_cond_register(category, all_debug_sync_conds, count);
}
#endif /* HAVE_PSI_INTERFACE */
@@ -741,7 +736,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action)
DBUG_ASSERT(action);
DBUG_ASSERT(ds_control);
- action->activation_count= max(action->hit_limit, action->execute);
+ action->activation_count= MY_MAX(action->hit_limit, action->execute);
if (!action->activation_count)
{
debug_sync_remove_action(ds_control, action);
@@ -783,7 +778,7 @@ static bool debug_sync_set_action(THD *thd, st_debug_sync_action *action)
point decremented it to 0. In this case the following happened:
- an error message was reported with my_error() and
- - the statement was killed with thd->killed= KILL_QUERY.
+ - the statement was killed with thd->killed= THD::KILL_QUERY.
If a statement reports an error, it must not call send_ok().
The calling functions will not call send_ok(), if we return TRUE
@@ -985,7 +980,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str)
DBUG_ENTER("debug_sync_eval_action");
DBUG_ASSERT(thd);
DBUG_ASSERT(action_str);
- DBUG_PRINT("debug_sync", ("action_str='%s'", action_str));
+ DBUG_PRINT("debug_sync", ("action_str: '%s'", action_str));
/*
Get debug sync point name. Or a special command.
@@ -1450,8 +1445,13 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
sig_wait, sig_glob, error));});
if (error == ETIMEDOUT || error == ETIME)
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ // We should not make the statement fail, even if in strict mode.
+ const bool save_abort_on_warning= thd->abort_on_warning;
+ thd->abort_on_warning= false;
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_DEBUG_SYNC_TIMEOUT, ER(ER_DEBUG_SYNC_TIMEOUT));
+ thd->abort_on_warning= save_abort_on_warning;
+ DBUG_EXECUTE_IF("debug_sync_abort_on_timeout", DBUG_ABORT(););
break;
}
error= 0;
@@ -1521,9 +1521,10 @@ static void debug_sync_execute(THD *thd, st_debug_sync_action *action)
static void debug_sync(THD *thd, const char *sync_point_name, size_t name_len)
{
if (!thd)
- thd= current_thd;
- if (!thd)
- return;
+ {
+ if (!(thd= current_thd))
+ return;
+ }
st_debug_sync_control *ds_control= thd->debug_sync_control;
st_debug_sync_action *action;
diff --git a/sql/debug_sync.h b/sql/debug_sync.h
index 4d29d6e7508..bf1b3167dbc 100644
--- a/sql/debug_sync.h
+++ b/sql/debug_sync.h
@@ -32,6 +32,9 @@ class THD;
#if defined(ENABLED_DEBUG_SYNC)
+/* Command line option --debug-sync-timeout. See mysqld.cc. */
+extern MYSQL_PLUGIN_IMPORT uint opt_debug_sync_timeout;
+
/* Default WAIT_FOR timeout if command line option is given without argument. */
#define DEBUG_SYNC_DEFAULT_WAIT_TIMEOUT 300
diff --git a/sql/derror.cc b/sql/derror.cc
index 665427f45bc..74e8209496b 100644
--- a/sql/derror.cc
+++ b/sql/derror.cc
@@ -76,7 +76,7 @@ bool init_errmessage(void)
&errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1) &&
!errmsgs)
{
- free(errmsgs);
+ my_free(errmsgs);
if (org_errmsgs)
{
@@ -99,7 +99,7 @@ bool init_errmessage(void)
}
}
else
- free(org_errmsgs); // Free old language
+ my_free(org_errmsgs); // Free old language
/* Register messages for use with my_error(). */
if (my_error_register(get_server_errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST))
@@ -146,8 +146,8 @@ bool read_texts(const char *file_name, const char *language,
const char ***point, uint error_messages)
{
register uint i;
- uint count,funktpos,textcount;
- size_t length;
+ uint count,funktpos;
+ size_t offset, length;
File file;
char name[FN_REFLEN];
char lang_path[FN_REFLEN];
@@ -186,9 +186,8 @@ bool read_texts(const char *file_name, const char *language,
goto err;
funktpos=2;
if (head[0] != (uchar) 254 || head[1] != (uchar) 254 ||
- head[2] != 2 || head[3] != 2)
+ head[2] != 2 || head[3] != 3)
goto err; /* purecov: inspected */
- textcount=head[4];
error_message_charset_info= system_charset_info;
length=uint4korr(head+6); count=uint2korr(head+10);
@@ -203,7 +202,7 @@ Error message file '%s' had only %d error messages, but it should contain at lea
}
if (!(*point= (const char**)
- my_malloc((size_t) (max(length,count*2)+count*sizeof(char*)),MYF(0))))
+ my_malloc((size_t) (MY_MAX(length,count*2)+count*sizeof(char*)),MYF(0))))
{
funktpos=3; /* purecov: inspected */
goto err; /* purecov: inspected */
@@ -212,18 +211,15 @@ Error message file '%s' had only %d error messages, but it should contain at lea
if (mysql_file_read(file, buff, (size_t) count*2, MYF(MY_NABP)))
goto err;
- for (i=0, pos= buff ; i< count ; i++)
+ for (i=0, offset=0, pos= buff ; i< count ; i++)
{
- (*point)[i]= (char*) buff+uint2korr(pos);
+ (*point)[i]= (char*) buff+offset;
+ offset+= uint2korr(pos);
pos+=2;
}
if (mysql_file_read(file, buff, length, MYF(MY_NABP)))
goto err;
- for (i=1 ; i < textcount ; i++)
- {
- point[i]= *point +uint2korr(head+10+i+i);
- }
(void) mysql_file_close(file, MYF(0));
i= check_error_mesg(file_name, *point);
diff --git a/sql/discover.cc b/sql/discover.cc
index cc0dece031a..9351cf034ab 100644
--- a/sql/discover.cc
+++ b/sql/discover.cc
@@ -70,7 +70,7 @@ int readfrm(const char *name, const uchar **frmdata, size_t *len)
error= 2;
if (mysql_file_fstat(file, &state, MYF(0)))
goto err;
- read_len= (size_t)min(FRM_MAX_SIZE, state.st_size); // safety
+ read_len= (size_t)MY_MIN(FRM_MAX_SIZE, state.st_size); // safety
// Read whole frm file
error= 3;
diff --git a/sql/discover.h b/sql/discover.h
index fbf94891c74..e1508107235 100644
--- a/sql/discover.h
+++ b/sql/discover.h
@@ -26,6 +26,14 @@ int readfrm(const char *name, const uchar **data, size_t *length);
int writefrm(const char *path, const char *db, const char *table,
bool tmp_table, const uchar *frmdata, size_t len);
+/* a helper to delete an frm file, given a path w/o .frm extension */
+inline void deletefrm(const char *path)
+{
+ char frm_name[FN_REFLEN];
+ strxmov(frm_name, path, reg_ext, NullS);
+ mysql_file_delete(key_file_frm, frm_name, MYF(0));
+}
+
int ext_table_discovery_simple(MY_DIR *dirp,
handlerton::discovered_list *result);
#endif
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index 2e31d20d54e..e236319d757 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -608,7 +608,7 @@ Event_timed::load_from_row(THD *thd, TABLE *table)
table, &creation_ctx))
{
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_EVENT_INVALID_CREATION_CTX,
ER(ER_EVENT_INVALID_CREATION_CTX),
(const char *) dbname.str,
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 5c77456d907..34658ab51ac 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -429,7 +429,7 @@ Event_db_repository::index_read_for_db_for_i_s(THD *thd, TABLE *schema_table,
key_info= event_table->key_info;
- if (key_info->key_parts == 0 ||
+ if (key_info->user_defined_key_parts == 0 ||
key_info->key_part[0].field != event_table->field[ET_FIELD_DB])
{
/* Corrupted table: no index or index on a wrong column */
@@ -687,7 +687,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
if (create_if_not)
{
*event_already_exists= true;
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_EVENT_ALREADY_EXISTS, ER(ER_EVENT_ALREADY_EXISTS),
parse_data->name.str);
ret= 0;
@@ -912,7 +912,7 @@ Event_db_repository::drop_event(THD *thd, LEX_STRING db, LEX_STRING name,
goto end;
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST),
"Event", name.str);
ret= 0;
@@ -958,7 +958,7 @@ Event_db_repository::find_named_event(LEX_STRING db, LEX_STRING name,
if (db.length > table->field[ET_FIELD_DB]->field_length ||
name.length > table->field[ET_FIELD_NAME]->field_length ||
table->s->keys == 0 ||
- table->key_info[0].key_parts != 2 ||
+ table->key_info[0].user_defined_key_parts != 2 ||
table->key_info[0].key_part[0].fieldnr != ET_FIELD_DB+1 ||
table->key_info[0].key_part[1].fieldnr != ET_FIELD_NAME+1)
DBUG_RETURN(TRUE);
diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc
index 4316a9f1fb8..7647419aff9 100644
--- a/sql/event_parse_data.cc
+++ b/sql/event_parse_data.cc
@@ -126,7 +126,7 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc)
{
switch (thd->lex->sql_command) {
case SQLCOM_CREATE_EVENT:
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_EVENT_CANNOT_CREATE_IN_THE_PAST,
ER(ER_EVENT_CANNOT_CREATE_IN_THE_PAST));
break;
@@ -143,7 +143,7 @@ Event_parse_data::check_if_in_the_past(THD *thd, my_time_t ltime_utc)
{
status= Event_parse_data::DISABLED;
status_changed= true;
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_EVENT_EXEC_TIME_IN_THE_PAST,
ER(ER_EVENT_EXEC_TIME_IN_THE_PAST));
}
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index 6317af1eac3..f75a8abc835 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -75,9 +75,9 @@ struct scheduler_param {
void
Event_worker_thread::print_warnings(THD *thd, Event_job_data *et)
{
- MYSQL_ERROR *err;
+ const Sql_condition *err;
DBUG_ENTER("evex_print_warnings");
- if (thd->warning_info->is_empty())
+ if (thd->get_stmt_da()->is_warning_info_empty())
DBUG_VOID_RETURN;
char msg_buf[10 * STRING_BUFFER_USUAL_SIZE];
@@ -93,7 +93,8 @@ Event_worker_thread::print_warnings(THD *thd, Event_job_data *et)
prefix.append(et->name.str, et->name.length, system_charset_info);
prefix.append("] ", 2);
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
+ Diagnostics_area::Sql_condition_iterator it=
+ thd->get_stmt_da()->sql_conditions();
while ((err= it++))
{
String err_msg(msg_buf, sizeof(msg_buf), system_charset_info);
diff --git a/sql/events.cc b/sql/events.cc
index b9c51b77f05..acf842dea44 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -808,7 +808,16 @@ Events::init(bool opt_noacl_or_bootstrap)
*/
thd->thread_stack= (char*) &thd;
thd->store_globals();
-
+ /*
+ Set current time for the thread that handles events.
+ Current time is stored in data member start_time of THD class.
+ Subsequently, this value is used to check whether event was expired
+ when make loading events from storage. Check for event expiration time
+ is done at Event_queue_element::compute_next_execution_time() where
+ event's status set to Event_parse_data::DISABLED and dropped flag set
+ to true if event was expired.
+ */
+ thd->set_time();
/*
We will need Event_db_repository anyway, even if the scheduler is
disabled - to perform events DDL.
@@ -1098,8 +1107,7 @@ Events::load_events_from_db(THD *thd)
while (!(read_record_info.read_record(&read_record_info)))
{
Event_queue_element *et;
- bool created;
- bool drop_on_completion;
+ bool created, dropped;
if (!(et= new Event_queue_element))
goto end;
@@ -1114,10 +1122,13 @@ Events::load_events_from_db(THD *thd)
delete et;
goto end;
}
- drop_on_completion= (et->on_completion ==
- Event_parse_data::ON_COMPLETION_DROP);
-
+ /**
+ Since the Event_queue_element object could be deleted inside
+ Event_queue::create_event we should save the value of dropped flag
+ into the temporary variable.
+ */
+ dropped= et->dropped;
if (event_queue->create_event(thd, et, &created))
{
/* Out of memory */
@@ -1126,7 +1137,7 @@ Events::load_events_from_db(THD *thd)
}
if (created)
count++;
- else if (drop_on_completion)
+ else if (dropped)
{
/*
If not created, a stale event - drop if immediately if
diff --git a/sql/field.cc b/sql/field.cc
index 1cdf2ffd313..9b374c2770d 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -68,7 +68,7 @@ const char field_separator=',';
#define LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE 128
#define DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE 128
#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
-((ulong) ((1LL << min(arg, 4) * 8) - 1))
+ ((ulong) ((1LL << MY_MIN(arg, 4) * 8) - 1))
#define ASSERT_COLUMN_MARKED_FOR_READ DBUG_ASSERT(!table || (!table->read_set || bitmap_is_set(table->read_set, field_index)))
#define ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED DBUG_ASSERT(is_stat_field || !table || (!table->write_set || bitmap_is_set(table->write_set, field_index) || bitmap_is_set(table->vcol_set, field_index)))
@@ -1073,13 +1073,13 @@ static void push_numerical_conversion_warning(THD* thd, const char* str,
const char* field_name="UNKNOWN",
ulong row_num=0)
{
- char buf[max(max(DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE,
+ char buf[MY_MAX(MY_MAX(DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE,
LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE),
DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE)];
String tmp(buf, sizeof(buf), cs);
tmp.copy(str, length, cs);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
error, ER(error), typestr, tmp.c_ptr(),
field_name, row_num);
}
@@ -1158,7 +1158,7 @@ double Field::pos_in_interval_val_real(Field *min, Field *max)
d= max->val_real() - min->val_real();
if (d <= 0)
return 1.0;
- return min(n/d, 1.0);
+ return MY_MIN(n/d, 1.0);
}
@@ -1233,7 +1233,7 @@ double Field::pos_in_interval_val_str(Field *min, Field *max, uint data_offset)
d= maxp - minp;
if (d <= 0)
return 1.0;
- return min(n/d, 1.0);
+ return MY_MIN(n/d, 1.0);
}
@@ -1298,17 +1298,18 @@ int Field_num::check_int(CHARSET_INFO *cs, const char *str, int length,
if (str == int_end || error == MY_ERRNO_EDOM)
{
ErrConvString err(str, length, cs);
- push_warning_printf(get_thd(), MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(get_thd(), Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"integer", err.ptr(), field_name,
- (ulong) get_thd()->warning_info->current_row_for_warning());
+ (ulong) table->in_use->get_stmt_da()->
+ current_row_for_warning());
return 1;
}
/* Test if we have garbage at the end of the given string. */
if (test_if_important_data(cs, int_end, str + length))
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
return 2;
}
return 0;
@@ -1377,7 +1378,7 @@ bool Field_num::get_int(CHARSET_INFO *cs, const char *from, uint len,
return 0;
out_of_range:
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
@@ -1398,12 +1399,12 @@ int Field::warn_if_overflow(int op_result)
{
if (op_result == E_DEC_OVERFLOW)
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
if (op_result == E_DEC_TRUNCATED)
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
/* We return 0 here as this is not a critical issue */
}
return 0;
@@ -1729,7 +1730,7 @@ longlong Field::convert_decimal2longlong(const my_decimal *val,
{
if (val->sign())
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
i= 0;
*err= 1;
}
@@ -1869,7 +1870,7 @@ uint Field::fill_cache_field(CACHE_FIELD *copy)
if (flags & BLOB_FLAG)
{
copy->type= CACHE_BLOB;
- copy->length-= table->s->blob_ptr_size;
+ copy->length-= portable_sizeof_char_ptr;
return copy->length;
}
else if (!zero_pack() &&
@@ -2037,7 +2038,7 @@ void Field_decimal::overflow(bool negative)
uint len=field_length;
uchar *to=ptr, filler= '9';
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
if (negative)
{
if (!unsigned_flag)
@@ -2145,7 +2146,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
from++;
if (from == end)
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
is_cuted_fields_incr=1;
}
else if (*from == '+' || *from == '-') // Found some sign ?
@@ -2221,7 +2222,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
for (;from != end && my_isspace(&my_charset_bin, *from); from++) ;
if (from != end) // If still something left, warn
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
is_cuted_fields_incr=1;
}
}
@@ -2258,7 +2259,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
tmp_uint=tmp_dec+(uint)(int_digits_end-int_digits_from);
else if (expo_sign_char == '-')
{
- tmp_uint=min(exponent,(uint)(int_digits_end-int_digits_from));
+ tmp_uint=MY_MIN(exponent,(uint)(int_digits_end-int_digits_from));
frac_digits_added_zeros=exponent-tmp_uint;
int_digits_end -= tmp_uint;
frac_digits_head_end=int_digits_end+tmp_uint;
@@ -2266,7 +2267,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
}
else // (expo_sign_char=='+')
{
- tmp_uint=min(exponent,(uint)(frac_digits_end-frac_digits_from));
+ tmp_uint=MY_MIN(exponent,(uint)(frac_digits_end-frac_digits_from));
int_digits_added_zeros=exponent-tmp_uint;
int_digits_tail_from=frac_digits_from;
frac_digits_from=frac_digits_from+tmp_uint;
@@ -2399,7 +2400,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
if (tmp_char != '0') // Losing a non zero digit ?
{
if (!is_cuted_fields_incr)
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
return 0;
}
@@ -2422,7 +2423,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
This is a note, not a warning, as we don't want to abort
when we cut decimals in strict mode
*/
- set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
}
return 0;
}
@@ -2685,7 +2686,7 @@ Field *Field_new_decimal::create_from_item (Item *item)
{
signed int overflow;
- dec= min(dec, DECIMAL_MAX_SCALE);
+ dec= MY_MIN(dec, DECIMAL_MAX_SCALE);
/*
If the value still overflows the field with the corrected dec,
@@ -2701,7 +2702,7 @@ Field *Field_new_decimal::create_from_item (Item *item)
overflow= required_length - len;
if (overflow > 0)
- dec= max(0, dec - overflow); // too long, discard fract
+ dec= MY_MAX(0, dec - overflow); // too long, discard fract
else
/* Corrected value fits. */
len= required_length;
@@ -2772,7 +2773,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
if (unsigned_flag && decimal_value->sign())
{
DBUG_PRINT("info", ("unsigned overflow"));
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
decimal_value= &decimal_zero;
}
@@ -2816,32 +2817,32 @@ int Field_new_decimal::store(const char *from, uint length,
thd->abort_on_warning)
{
ErrConvString errmsg(from, length, &my_charset_bin);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"decimal", errmsg.ptr(), field_name,
- (ulong) thd->warning_info->current_row_for_warning());
-
+ static_cast<ulong>(thd->get_stmt_da()->
+ current_row_for_warning()));
DBUG_RETURN(err);
}
switch (err) {
case E_DEC_TRUNCATED:
- set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
break;
case E_DEC_OVERFLOW:
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
set_value_on_overflow(&decimal_value, decimal_value.sign());
break;
case E_DEC_BAD_NUM:
{
ErrConvString errmsg(from, length, &my_charset_bin);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"decimal", errmsg.ptr(), field_name,
- (ulong) thd->warning_info->
- current_row_for_warning());
+ static_cast<ulong>(thd->get_stmt_da()->
+ current_row_for_warning()));
my_decimal_set_zero(&decimal_value);
break;
}
@@ -3148,13 +3149,13 @@ int Field_tiny::store(double nr)
if (nr < 0.0)
{
*ptr=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > 255.0)
{
*ptr= (uchar) 255;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3165,13 +3166,13 @@ int Field_tiny::store(double nr)
if (nr < -128.0)
{
*ptr= (uchar) -128;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > 127.0)
{
*ptr=127;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3191,13 +3192,13 @@ int Field_tiny::store(longlong nr, bool unsigned_val)
if (nr < 0 && !unsigned_val)
{
*ptr= 0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if ((ulonglong) nr > (ulonglong) 255)
{
*ptr= (char) 255;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3210,13 +3211,13 @@ int Field_tiny::store(longlong nr, bool unsigned_val)
if (nr < -128)
{
*ptr= (char) -128;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > 127)
{
*ptr=127;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3250,7 +3251,7 @@ String *Field_tiny::val_str(String *val_buffer,
ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_numeric;
uint length;
- uint mlength=max(field_length+1,5*cs->mbmaxlen);
+ uint mlength=MY_MAX(field_length+1,5*cs->mbmaxlen);
val_buffer->alloc(mlength);
char *to=(char*) val_buffer->ptr();
@@ -3327,13 +3328,13 @@ int Field_short::store(double nr)
if (nr < 0)
{
res=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > (double) UINT_MAX16)
{
res=(int16) UINT_MAX16;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3344,13 +3345,13 @@ int Field_short::store(double nr)
if (nr < (double) INT_MIN16)
{
res=INT_MIN16;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > (double) INT_MAX16)
{
res=INT_MAX16;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3372,13 +3373,13 @@ int Field_short::store(longlong nr, bool unsigned_val)
if (nr < 0L && !unsigned_val)
{
res=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if ((ulonglong) nr > (ulonglong) UINT_MAX16)
{
res=(int16) UINT_MAX16;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3392,13 +3393,13 @@ int Field_short::store(longlong nr, bool unsigned_val)
if (nr < INT_MIN16)
{
res=INT_MIN16;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > (longlong) INT_MAX16)
{
res=INT_MAX16;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3432,7 +3433,7 @@ String *Field_short::val_str(String *val_buffer,
ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_numeric;
uint length;
- uint mlength=max(field_length+1,7*cs->mbmaxlen);
+ uint mlength=MY_MAX(field_length+1,7*cs->mbmaxlen);
val_buffer->alloc(mlength);
char *to=(char*) val_buffer->ptr();
short j;
@@ -3515,14 +3516,14 @@ int Field_medium::store(double nr)
if (nr < 0)
{
int3store(ptr,0);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr >= (double) (long) (1L << 24))
{
uint32 tmp=(uint32) (1L << 24)-1L;
int3store(ptr,tmp);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3534,14 +3535,14 @@ int Field_medium::store(double nr)
{
long tmp=(long) INT_MIN24;
int3store(ptr,tmp);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > (double) INT_MAX24)
{
long tmp=(long) INT_MAX24;
int3store(ptr,tmp);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3561,14 +3562,14 @@ int Field_medium::store(longlong nr, bool unsigned_val)
if (nr < 0 && !unsigned_val)
{
int3store(ptr,0);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if ((ulonglong) nr >= (ulonglong) (long) (1L << 24))
{
long tmp= (long) (1L << 24)-1L;
int3store(ptr,tmp);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3583,14 +3584,14 @@ int Field_medium::store(longlong nr, bool unsigned_val)
{
long tmp= (long) INT_MIN24;
int3store(ptr,tmp);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (nr > (longlong) INT_MAX24)
{
long tmp=(long) INT_MAX24;
int3store(ptr,tmp);
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3622,7 +3623,7 @@ String *Field_medium::val_str(String *val_buffer,
ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_numeric;
uint length;
- uint mlength=max(field_length+1,10*cs->mbmaxlen);
+ uint mlength=MY_MAX(field_length+1,10*cs->mbmaxlen);
val_buffer->alloc(mlength);
char *to=(char*) val_buffer->ptr();
long j= unsigned_flag ? (long) uint3korr(ptr) : sint3korr(ptr);
@@ -3712,7 +3713,7 @@ int Field_long::store(double nr)
else if (nr > (double) UINT_MAX32)
{
res= UINT_MAX32;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else
@@ -3734,7 +3735,7 @@ int Field_long::store(double nr)
res=(int32) (longlong) nr;
}
if (error)
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
int4store(ptr,res);
return error;
@@ -3780,7 +3781,7 @@ int Field_long::store(longlong nr, bool unsigned_val)
res=(int32) nr;
}
if (error)
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
int4store(ptr,res);
return error;
@@ -3811,7 +3812,7 @@ String *Field_long::val_str(String *val_buffer,
ASSERT_COLUMN_MARKED_FOR_READ;
CHARSET_INFO *cs= &my_charset_numeric;
uint length;
- uint mlength=max(field_length+1,12*cs->mbmaxlen);
+ uint mlength=MY_MAX(field_length+1,12*cs->mbmaxlen);
val_buffer->alloc(mlength);
char *to=(char*) val_buffer->ptr();
int32 j;
@@ -3879,7 +3880,7 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs)
tmp= cs->cset->strntoull10rnd(cs,from,len,unsigned_flag,&end,&error);
if (error == MY_ERRNO_ERANGE)
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
else if (get_thd()->count_cuted_fields &&
@@ -3901,7 +3902,7 @@ int Field_longlong::store(double nr)
res= double_to_longlong(nr, unsigned_flag, &error);
if (error)
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
int8store(ptr,res);
return error;
@@ -3922,7 +3923,7 @@ int Field_longlong::store(longlong nr, bool unsigned_val)
if (unsigned_flag != unsigned_val)
{
nr= unsigned_flag ? (ulonglong) 0 : (ulonglong) LONGLONG_MAX;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
error= 1;
}
}
@@ -3961,7 +3962,7 @@ String *Field_longlong::val_str(String *val_buffer,
{
CHARSET_INFO *cs= &my_charset_numeric;
uint length;
- uint mlength=max(field_length+1,22*cs->mbmaxlen);
+ uint mlength=MY_MAX(field_length+1,22*cs->mbmaxlen);
val_buffer->alloc(mlength);
char *to=(char*) val_buffer->ptr();
longlong j;
@@ -4036,7 +4037,7 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs)
if (error || (!len || ((uint) (end-from) != len &&
get_thd()->count_cuted_fields)))
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ set_warning(Sql_condition::WARN_LEVEL_WARN,
(error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1);
error= error ? 1 : 2;
}
@@ -4053,7 +4054,7 @@ int Field_float::store(double nr)
unsigned_flag, FLT_MAX);
if (error)
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
if (error < 0) // Wrong double value
{
error= 1;
@@ -4224,7 +4225,7 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs)
if (error || (!len || ((uint) (end-from) != len &&
get_thd()->count_cuted_fields)))
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ set_warning(Sql_condition::WARN_LEVEL_WARN,
(error ? ER_WARN_DATA_OUT_OF_RANGE : WARN_DATA_TRUNCATED), 1);
error= error ? 1 : 2;
}
@@ -4241,7 +4242,7 @@ int Field_double::store(double nr)
unsigned_flag, DBL_MAX);
if (error)
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
if (error < 0) // Wrong double value
{
error= 1;
@@ -4406,7 +4407,7 @@ longlong Field_double::val_int(void)
if (error)
{
ErrConvDouble err(j);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
err.ptr());
@@ -4602,7 +4603,7 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time,
if (was_cut || !have_smth_to_conv)
{
error= 1;
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED,
+ set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED,
str, MYSQL_TIMESTAMP_DATETIME, 1);
}
/* Only convert a correct date (not a zero date) */
@@ -4614,7 +4615,7 @@ int Field_timestamp::store_TIME_with_warning(THD *thd, MYSQL_TIME *l_time,
conversion_error= ER_WARN_DATA_OUT_OF_RANGE;
if (conversion_error)
{
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, conversion_error,
+ set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, conversion_error,
str, MYSQL_TIMESTAMP_DATETIME, !error);
error= 1;
}
@@ -5084,7 +5085,7 @@ uint Field_temporal::is_equal(Create_field *new_field)
}
-void Field_temporal::set_warnings(MYSQL_ERROR::enum_warning_level trunc_level,
+void Field_temporal::set_warnings(Sql_condition::enum_warning_level trunc_level,
const ErrConv *str, int was_cut,
timestamp_type ts_type)
{
@@ -5102,7 +5103,7 @@ void Field_temporal::set_warnings(MYSQL_ERROR::enum_warning_level trunc_level,
set_datetime_warning(trunc_level, WARN_DATA_TRUNCATED,
str, mysql_type_to_time_type(type()), 1);
if (was_cut & MYSQL_TIME_WARN_OUT_OF_RANGE)
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE,
+ set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE,
str, mysql_type_to_time_type(type()), 1);
}
@@ -5122,7 +5123,7 @@ int Field_temporal_with_date::store_TIME_with_warning(MYSQL_TIME *ltime,
int was_cut,
int have_smth_to_conv)
{
- MYSQL_ERROR::enum_warning_level trunc_level= MYSQL_ERROR::WARN_LEVEL_WARN;
+ Sql_condition::enum_warning_level trunc_level= Sql_condition::WARN_LEVEL_WARN;
int ret= 2;
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
@@ -5141,7 +5142,7 @@ int Field_temporal_with_date::store_TIME_with_warning(MYSQL_TIME *ltime,
mysql_type_to_time_type(type()) == MYSQL_TIMESTAMP_DATE &&
(ltime->hour || ltime->minute || ltime->second || ltime->second_part))
{
- trunc_level= MYSQL_ERROR::WARN_LEVEL_NOTE;
+ trunc_level= Sql_condition::WARN_LEVEL_NOTE;
was_cut|= MYSQL_TIME_WARN_TRUNCATED;
ret= 3;
}
@@ -5237,7 +5238,7 @@ int Field_time::store_TIME_with_warning(MYSQL_TIME *ltime,
int was_cut,
int have_smth_to_conv)
{
- MYSQL_ERROR::enum_warning_level trunc_level= MYSQL_ERROR::WARN_LEVEL_WARN;
+ Sql_condition::enum_warning_level trunc_level= Sql_condition::WARN_LEVEL_WARN;
int ret= 2;
ASSERT_COLUMN_MARKED_FOR_WRITE_OR_COMPUTED;
@@ -5252,7 +5253,7 @@ int Field_time::store_TIME_with_warning(MYSQL_TIME *ltime,
(ltime->year || ltime->month))
{
ltime->year= ltime->month= ltime->day= 0;
- trunc_level= MYSQL_ERROR::WARN_LEVEL_NOTE;
+ trunc_level= Sql_condition::WARN_LEVEL_NOTE;
was_cut|= MYSQL_TIME_WARN_TRUNCATED;
ret= 3;
}
@@ -5373,10 +5374,10 @@ bool Field_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
(fuzzydate & TIME_NO_ZERO_IN_DATE))
{
THD *thd= get_thd();
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
ER(ER_WARN_DATA_OUT_OF_RANGE), field_name,
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
return 1;
}
long tmp=(long) sint3korr(ptr);
@@ -5563,7 +5564,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs)
error == MY_ERRNO_ERANGE)
{
*ptr=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
if (get_thd()->count_cuted_fields &&
@@ -5606,7 +5607,7 @@ int Field_year::store(longlong nr, bool unsigned_val)
if (nr < 0 || (nr >= 100 && nr <= 1900) || nr > 2155)
{
*ptr= 0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
if (nr != 0 || field_length != 4) // 0000 -> 0; 00 -> 2000
@@ -5627,7 +5628,7 @@ int Field_year::store_time_dec(MYSQL_TIME *ltime, uint dec)
if (Field_year::store(ltime->year, 0))
return 1;
- set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED,
+ set_datetime_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED,
&str, ltime->time_type, 1);
return 0;
}
@@ -6209,11 +6210,11 @@ check_string_copy_error(Field_str *field,
convert_to_printable(tmp, sizeof(tmp), pos, (end - pos), cs, 6);
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"string", tmp, field->field_name,
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
return TRUE;
}
@@ -6248,14 +6249,14 @@ Field_longstr::report_if_important_data(const char *pstr, const char *end,
if (test_if_important_data(field_charset, pstr, end))
{
if (thd->abort_on_warning)
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
return 2;
}
else if (count_spaces)
{ /* If we lost only spaces then produce a NOTE, not a WARNING */
- set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_NOTE, WARN_DATA_TRUNCATED, 1);
return 2;
}
}
@@ -6321,9 +6322,9 @@ int Field_str::store(double nr)
if (error)
{
if (get_thd()->abort_on_warning)
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
}
return store(buff, length, &my_charset_numeric);
}
@@ -6385,7 +6386,7 @@ double Field_string::val_real(void)
(char*) ptr + field_length))))
{
ErrConvString err((char*) ptr, field_length, cs);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "DOUBLE",
err.ptr());
@@ -6409,7 +6410,7 @@ longlong Field_string::val_int(void)
(char*) ptr + field_length))))
{
ErrConvString err((char*) ptr, field_length, cs);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE),
"INTEGER", err.ptr());
@@ -6445,7 +6446,7 @@ my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
if (!get_thd()->no_errors && err)
{
ErrConvString errmsg((char*) ptr, field_length, charset());
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE),
"DECIMAL", errmsg.ptr());
@@ -6541,7 +6542,7 @@ void Field_string::sql_type(String &res) const
uchar *Field_string::pack(uchar *to, const uchar *from, uint max_length)
{
- uint length= min(field_length,max_length);
+ uint length= MY_MIN(field_length,max_length);
uint local_char_length= max_length/field_charset->mbmaxlen;
DBUG_PRINT("debug", ("Packing field '%s' - length: %u ", field_name, length));
@@ -7288,7 +7289,7 @@ int Field_blob::store(const char *from,uint length,CHARSET_INFO *cs)
from= tmpstr.ptr();
}
- new_length= min(max_data_length(), field_charset->mbmaxlen * length);
+ new_length= MY_MIN(max_data_length(), field_charset->mbmaxlen * length);
if (value.alloc(new_length))
goto oom_error;
@@ -7448,7 +7449,7 @@ int Field_blob::cmp_binary(const uchar *a_ptr, const uchar *b_ptr,
b_length=get_length(b_ptr);
if (b_length > max_length)
b_length=max_length;
- diff=memcmp(a,b,min(a_length,b_length));
+ diff=memcmp(a,b,MY_MIN(a_length,b_length));
return diff ? diff : (int) (a_length - b_length);
}
@@ -7626,7 +7627,7 @@ uchar *Field_blob::pack(uchar *to, const uchar *from, uint max_length)
length given is smaller than the actual length of the blob, we
just store the initial bytes of the blob.
*/
- store_length(to, packlength, min(length, max_length));
+ store_length(to, packlength, MY_MIN(length, max_length));
/*
Store the actual blob data, which will occupy 'length' bytes.
@@ -7788,7 +7789,7 @@ int Field_geom::store(const char *from, uint length, CHARSET_INFO *cs)
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), MYF(0),
Geometry::ci_collection[geom_type]->m_name.str,
Geometry::ci_collection[wkb_type]->m_name.str, field_name,
- (ulong) table->in_use->warning_info->current_row_for_warning());
+ (ulong) table->in_use->get_stmt_da()->current_row_for_warning());
goto err_exit;
}
@@ -7871,13 +7872,13 @@ int Field_enum::store(const char *from,uint length,CHARSET_INFO *cs)
if (err || end != from+length || tmp > typelib->count)
{
tmp=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
}
if (!get_thd()->count_cuted_fields)
err= 0;
}
else
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
}
store_type((ulonglong) tmp);
return err;
@@ -7896,7 +7897,7 @@ int Field_enum::store(longlong nr, bool unsigned_val)
int error= 0;
if ((ulonglong) nr > typelib->count || nr == 0)
{
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
if (nr != 0 || get_thd()->count_cuted_fields)
{
nr= 0;
@@ -8050,11 +8051,11 @@ int Field_set::store(const char *from,uint length,CHARSET_INFO *cs)
tmp > (ulonglong) (((longlong) 1 << typelib->count) - (longlong) 1))
{
tmp=0;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
}
}
else if (got_warning)
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
store_type(tmp);
return err;
}
@@ -8074,7 +8075,7 @@ int Field_set::store(longlong nr, bool unsigned_val)
if ((ulonglong) nr > max_nr)
{
nr&= max_nr;
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
error=1;
}
store_type((ulonglong) nr);
@@ -8428,9 +8429,9 @@ int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs)
set_rec_bits((1 << bit_len) - 1, bit_ptr, bit_ofs, bit_len);
memset(ptr, 0xff, bytes_in_rec);
if (get_thd()->really_abort_on_warning())
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
/* delta is >= -1 here */
@@ -8518,7 +8519,7 @@ String *Field_bit::val_str(String *val_buffer,
{
ASSERT_COLUMN_MARKED_FOR_READ;
char buff[sizeof(longlong)];
- uint length= min(pack_length(), sizeof(longlong));
+ uint length= MY_MIN(pack_length(), sizeof(longlong));
ulonglong bits= val_int();
mi_int8store(buff,bits);
@@ -8606,7 +8607,7 @@ uint Field_bit::get_key_image(uchar *buff, uint length, imagetype type_arg)
*buff++= bits;
length--;
}
- uint data_length = min(length, bytes_in_rec);
+ uint data_length = MY_MIN(length, bytes_in_rec);
memcpy(buff, ptr, data_length);
return data_length + 1;
}
@@ -8730,7 +8731,7 @@ Field_bit::pack(uchar *to, const uchar *from, uint max_length)
uchar bits= get_rec_bits(bit_ptr + (from - ptr), bit_ofs, bit_len);
*to++= bits;
}
- length= min(bytes_in_rec, max_length - (bit_len > 0));
+ length= MY_MIN(bytes_in_rec, max_length - (bit_len > 0));
memcpy(to, from, length);
return to + length;
}
@@ -8865,9 +8866,9 @@ int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
if (bits)
*ptr&= ((1 << bits) - 1); /* set first uchar */
if (get_thd()->really_abort_on_warning())
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_DATA_TOO_LONG, 1);
else
- set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
+ set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1);
return 1;
}
bzero(ptr, delta);
@@ -9270,7 +9271,7 @@ bool Create_field::init(THD *thd, char *fld_name, enum_field_types fld_type,
/*
Otherwise a default of '' is just a warning.
*/
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BLOB_CANT_HAVE_DEFAULT,
ER(ER_BLOB_CANT_HAVE_DEFAULT),
fld_name);
@@ -9762,11 +9763,6 @@ Create_field::Create_field(Field *old_field,Field *orig_field)
option_list= old_field->option_list;
option_struct= old_field->option_struct;
- /* Fix if the original table had 4 byte pointer blobs */
- if (flags & BLOB_FLAG)
- pack_length= (pack_length- old_field->table->s->blob_ptr_size +
- portable_sizeof_char_ptr);
-
switch (sql_type) {
case MYSQL_TYPE_BLOB:
switch (pack_length - portable_sizeof_char_ptr) {
@@ -9801,7 +9797,7 @@ Create_field::Create_field(Field *old_field,Field *orig_field)
{
char buff[sizeof("YEAR()") + MY_INT64_NUM_DECIMAL_DIGITS + 1];
my_snprintf(buff, sizeof(buff), "YEAR(%lu)", length);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_NOTE,
ER_WARN_DEPRECATED_SYNTAX,
ER(ER_WARN_DEPRECATED_SYNTAX),
buff, "YEAR(4)");
@@ -9929,11 +9925,11 @@ uint32 Field_blob::max_display_length()
*****************************************************************************/
/**
- Produce warning or note about data saved into field.
+* Produce warning or note about data saved into field.
@param level - level of message (Note/Warning/Error)
@param code - error code of message to be produced
- @param cuted_increment - whenever we should increase cut fields count or not
+ @param cut_increment - whenever we should increase cut fields count
@note
This function won't produce warning and increase cut fields counter
@@ -9941,11 +9937,16 @@ uint32 Field_blob::max_display_length()
if count_cuted_fields == CHECK_FIELD_IGNORE then we ignore notes.
This allows us to avoid notes in optimisation, like convert_constant_item().
+
+ @retval
+ 1 if count_cuted_fields == CHECK_FIELD_IGNORE and error level is not NOTE
+ @retval
+ 0 otherwise
*/
-void
-Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code,
- int cuted_increment)
+bool
+Field::set_warning(Sql_condition::enum_warning_level level, uint code,
+ int cut_increment) const
{
/*
If this field was created only for type conversion purposes it
@@ -9954,10 +9955,12 @@ Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code,
THD *thd= table ? table->in_use : current_thd;
if (thd->count_cuted_fields)
{
- thd->cuted_fields+= cuted_increment;
+ thd->cuted_fields+= cut_increment;
push_warning_printf(thd, level, code, ER(code), field_name,
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
+ return 0;
}
+ return level >= Sql_condition::WARN_LEVEL_WARN;
}
@@ -9979,12 +9982,12 @@ Field::set_warning(MYSQL_ERROR::enum_warning_level level, uint code,
*/
-void Field::set_datetime_warning(MYSQL_ERROR::enum_warning_level level,
+void Field::set_datetime_warning(Sql_condition::enum_warning_level level,
uint code, const ErrConv *str,
timestamp_type ts_type, int cuted_increment)
{
THD *thd= get_thd();
- if (thd->really_abort_on_warning() && level >= MYSQL_ERROR::WARN_LEVEL_WARN)
+ if (thd->really_abort_on_warning() && level >= Sql_condition::WARN_LEVEL_WARN)
make_truncated_value_warning(thd, level, str, ts_type, field_name);
else
set_warning(level, code, cuted_increment);
diff --git a/sql/field.h b/sql/field.h
index feef0cbef08..40be4f7776a 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -29,7 +29,7 @@
#include "table.h" /* TABLE */
#include "sql_string.h" /* String */
#include "my_decimal.h" /* my_decimal */
-#include "sql_error.h" /* MYSQL_ERROR */
+#include "sql_error.h" /* Sql_condition */
#include "compat56.h"
class Send_field;
@@ -569,32 +569,53 @@ public:
*/
virtual void sql_type(String &str) const =0;
virtual uint size_of() const =0; // For new field
- inline bool is_null(my_ptrdiff_t row_offset= 0)
- { return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : table->null_row; }
- inline bool is_real_null(my_ptrdiff_t row_offset= 0)
+ inline bool is_null(my_ptrdiff_t row_offset= 0) const
+ {
+ /*
+ The table may have been marked as containing only NULL values
+ for all fields if it is a NULL-complemented row of an OUTER JOIN
+ or if the query is an implicitly grouped query (has aggregate
+ functions but no GROUP BY clause) with no qualifying rows. If
+ this is the case (in which TABLE::null_row is true), the field
+ is considered to be NULL.
+ Note that if a table->null_row is set then also all null_bits are
+ set for the row.
+
+ Otherwise, if the field is NULLable, it has a valid null_ptr
+ pointer, and its NULLity is recorded in the "null_bit" bit of
+ null_ptr[row_offset].
+ */
+ return (table->null_row ? TRUE :
+ null_ptr ? test(null_ptr[row_offset] & null_bit) : 0);
+ }
+ inline bool is_real_null(my_ptrdiff_t row_offset= 0) const
{ return null_ptr ? (null_ptr[row_offset] & null_bit ? 1 : 0) : 0; }
- inline bool is_null_in_record(const uchar *record)
+ inline bool is_null_in_record(const uchar *record) const
{
if (!null_ptr)
return 0;
return test(record[(uint) (null_ptr -table->record[0])] &
null_bit);
}
- inline bool is_null_in_record_with_offset(my_ptrdiff_t col_offset)
- {
- if (!null_ptr)
- return 0;
- return test(null_ptr[col_offset] & null_bit);
- }
inline void set_null(my_ptrdiff_t row_offset= 0)
{ if (null_ptr) null_ptr[row_offset]|= null_bit; }
inline void set_notnull(my_ptrdiff_t row_offset= 0)
{ if (null_ptr) null_ptr[row_offset]&= (uchar) ~null_bit; }
- inline bool maybe_null(void) { return null_ptr != 0 || table->maybe_null; }
- /**
- Signals that this field is NULL-able.
- */
- inline bool real_maybe_null(void) { return null_ptr != 0; }
+ inline bool maybe_null(void) const
+ { return null_ptr != 0 || table->maybe_null; }
+
+ /* @return true if this field is NULL-able, false otherwise. */
+ inline bool real_maybe_null(void) const { return null_ptr != 0; }
+ uint null_offset(const uchar *record) const
+ { return (uint) (null_ptr - record); }
+
+ uint null_offset() const
+ { return null_offset(table->record[0]); }
+ void set_null_ptr(uchar *p_null_ptr, uint p_null_bit)
+ {
+ null_ptr= p_null_ptr;
+ null_bit= p_null_bit;
+ }
inline THD *get_thd() { return table ? table->in_use : current_thd; }
@@ -762,9 +783,9 @@ public:
virtual uint repertoire(void) const { return MY_REPERTOIRE_UNICODE30; }
virtual void set_derivation(enum Derivation derivation_arg) { }
virtual int set_time() { return 1; }
- void set_warning(MYSQL_ERROR::enum_warning_level, unsigned int code,
- int cuted_increment);
- void set_datetime_warning(MYSQL_ERROR::enum_warning_level, uint code,
+ bool set_warning(Sql_condition::enum_warning_level, unsigned int code,
+ int cuted_increment) const;
+ void set_datetime_warning(Sql_condition::enum_warning_level, uint code,
const ErrConv *str, timestamp_type ts_type,
int cuted_increment);
inline bool check_overflow(int op_result)
@@ -809,6 +830,30 @@ public:
return GEOM_GEOMETRY;
}
+ ha_storage_media field_storage_type() const
+ {
+ return (ha_storage_media)
+ ((flags >> FIELD_FLAGS_STORAGE_MEDIA) & 3);
+ }
+
+ void set_storage_type(ha_storage_media storage_type_arg)
+ {
+ DBUG_ASSERT(field_storage_type() == HA_SM_DEFAULT);
+ flags |= (storage_type_arg << FIELD_FLAGS_STORAGE_MEDIA);
+ }
+
+ column_format_type column_format() const
+ {
+ return (column_format_type)
+ ((flags >> FIELD_FLAGS_COLUMN_FORMAT) & 3);
+ }
+
+ void set_column_format(column_format_type column_format_arg)
+ {
+ DBUG_ASSERT(column_format() == COLUMN_FORMAT_TYPE_DEFAULT);
+ flags |= (column_format_arg << FIELD_FLAGS_COLUMN_FORMAT);
+ }
+
key_map get_possible_keys();
/* Hash value */
@@ -1458,7 +1503,7 @@ public:
return (Field::eq_def(field) && decimals() == field->decimals());
}
my_decimal *val_decimal(my_decimal*);
- void set_warnings(MYSQL_ERROR::enum_warning_level trunc_level,
+ void set_warnings(Sql_condition::enum_warning_level trunc_level,
const ErrConv *str, int was_cut, timestamp_type ts_type);
double pos_in_interval(Field *min, Field *max)
{
@@ -2274,6 +2319,7 @@ public:
Field_blob(uint32 packlength_arg)
:Field_longstr((uchar*) 0, 0, (uchar*) "", 0, NONE, "temp", system_charset_info),
packlength(packlength_arg) {}
+ /* Note that the default copy constructor is used, in clone() */
enum_field_types type() const { return MYSQL_TYPE_BLOB;}
enum ha_base_keytype key_type() const
{ return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; }
@@ -2298,7 +2344,7 @@ public:
uint32 key_length() const { return 0; }
void sort_string(uchar *buff,uint length);
uint32 pack_length() const
- { return (uint32) (packlength+table->s->blob_ptr_size); }
+ { return (uint32) (packlength + portable_sizeof_char_ptr); }
/**
Return the packed length without the pointer size added.
@@ -2764,12 +2810,23 @@ public:
{
return (flags & (BINCMP_FLAG | BINARY_FLAG)) != 0;
}
+
+ ha_storage_media field_storage_type() const
+ {
+ return (ha_storage_media)
+ ((flags >> FIELD_FLAGS_STORAGE_MEDIA) & 3);
+ }
+
+ column_format_type column_format() const
+ {
+ return (column_format_type)
+ ((flags >> FIELD_FLAGS_COLUMN_FORMAT) & 3);
+ }
+
uint virtual_col_expr_maxlen()
{
return 255 - FRM_VCOL_HEADER_SIZE(interval != NULL);
}
-private:
- const String empty_set_string;
};
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index 6c3fcc0d355..5e16166531d 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -133,7 +133,7 @@ set_field_to_null(Field *field)
field->reset();
switch (field->table->in_use->count_cuted_fields) {
case CHECK_FIELD_WARN:
- field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
+ field->set_warning(Sql_condition::WARN_LEVEL_WARN, WARN_DATA_TRUNCATED, 1);
/* fall through */
case CHECK_FIELD_IGNORE:
return 0;
@@ -202,7 +202,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions)
}
switch (field->table->in_use->count_cuted_fields) {
case CHECK_FIELD_WARN:
- field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, 1);
+ field->set_warning(Sql_condition::WARN_LEVEL_WARN, ER_BAD_NULL_ERROR, 1);
/* fall through */
case CHECK_FIELD_IGNORE:
return 0;
@@ -272,7 +272,7 @@ static void do_copy_nullable_row_to_notnull(Copy_field *copy)
if (*copy->null_row ||
(copy->from_null_ptr && (*copy->from_null_ptr & copy->from_bit)))
{
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
copy->to_field->reset();
}
@@ -288,7 +288,7 @@ static void do_copy_not_null(Copy_field *copy)
{
if (*copy->from_null_ptr & copy->from_bit)
{
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
copy->to_field->reset();
}
@@ -440,7 +440,7 @@ static void do_cut_string(Copy_field *copy)
(char*) copy->from_ptr + copy->from_length,
MY_SEQ_SPACES) < copy->from_length - copy->to_length)
{
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
}
@@ -471,7 +471,7 @@ static void do_cut_string_complex(Copy_field *copy)
(char*) from_end,
MY_SEQ_SPACES) < (copy->from_length - copy_length))
{
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
@@ -510,7 +510,7 @@ static void do_varstring1(Copy_field *copy)
length=copy->to_length - 1;
if (copy->from_field->table->in_use->count_cuted_fields &&
copy->to_field)
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
*(uchar*) copy->to_ptr= (uchar) length;
@@ -531,7 +531,7 @@ static void do_varstring1_mb(Copy_field *copy)
if (length < from_length)
{
if (current_thd->count_cuted_fields)
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
*copy->to_ptr= (uchar) length;
@@ -547,7 +547,7 @@ static void do_varstring2(Copy_field *copy)
length=copy->to_length-HA_KEY_BLOB_LENGTH;
if (copy->from_field->table->in_use->count_cuted_fields &&
copy->to_field)
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
int2store(copy->to_ptr,length);
@@ -569,7 +569,7 @@ static void do_varstring2_mb(Copy_field *copy)
if (length < from_length)
{
if (current_thd->count_cuted_fields)
- copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ copy->to_field->set_warning(Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, 1);
}
int2store(copy->to_ptr, length);
@@ -712,8 +712,8 @@ Copy_field::get_copy_func(Field *to,Field *from)
if (from_length != to_length)
{
// Correct pointer to point at char pointer
- to_ptr+= to_length - to->table->s->blob_ptr_size;
- from_ptr+= from_length- from->table->s->blob_ptr_size;
+ to_ptr+= to_length - portable_sizeof_char_ptr;
+ from_ptr+= from_length - portable_sizeof_char_ptr;
return do_copy_blob;
}
}
@@ -829,7 +829,7 @@ Copy_field::get_copy_func(Field *to,Field *from)
int field_conv(Field *to,Field *from)
{
if (to->real_type() == from->real_type() &&
- !(to->type() == MYSQL_TYPE_BLOB && to->table->copy_blobs))
+ !(to->flags & BLOB_FLAG && to->table->copy_blobs))
{
if (to->pack_length() == from->pack_length() &&
!(to->flags & UNSIGNED_FLAG && !(from->flags & UNSIGNED_FLAG)) &&
@@ -858,7 +858,7 @@ int field_conv(Field *to,Field *from)
return 0;
}
}
- if (to->type() == MYSQL_TYPE_BLOB)
+ if (to->flags & BLOB_FLAG)
{ // Be sure the value is stored
Field_blob *blob=(Field_blob*) to;
from->val_str(&blob->value);
diff --git a/sql/filesort.cc b/sql/filesort.cc
index f5a85036faa..7cb2306eb7c 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -245,12 +245,12 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
{
DBUG_PRINT("info", ("filesort PQ is not applicable"));
- size_t min_sort_memory= max(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
+ size_t min_sort_memory= MY_MAX(MIN_SORT_MEMORY, param.sort_length*MERGEBUFF2);
set_if_bigger(min_sort_memory, sizeof(BUFFPEK*)*MERGEBUFF2);
while (memory_available >= min_sort_memory)
{
ulonglong keys= memory_available / (param.rec_length + sizeof(char*));
- param.max_keys_per_buffer= (uint) min(num_rows, keys);
+ param.max_keys_per_buffer= (uint) MY_MIN(num_rows, keys);
if (table_sort.get_sort_keys())
{
// If we have already allocated a buffer, it better have same size!
@@ -391,7 +391,8 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length,
MYF(0),
ER_THD(thd, ER_FILSORT_ABORT),
kill_errno ? ER(kill_errno) :
- thd->killed == ABORT_QUERY ? "" : thd->stmt_da->message());
+ thd->killed == ABORT_QUERY ? "" :
+ thd->get_stmt_da()->message());
if (global_system_variables.log_warnings > 1)
{
@@ -1371,7 +1372,7 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
register uint count;
uint length;
- if ((count=(uint) min((ha_rows) buffpek->max_keys,buffpek->count)))
+ if ((count=(uint) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
if (mysql_file_pread(fromfile->file, (uchar*) buffpek->base,
(length= rec_length*count),
@@ -1696,7 +1697,7 @@ int merge_buffers(Sort_param *param, IO_CACHE *from_file,
!= -1 && error != 0);
end:
- lastbuff->count= min(org_max_rows-max_rows, param->max_rows);
+ lastbuff->count= MY_MIN(org_max_rows-max_rows, param->max_rows);
lastbuff->file_pos= to_start_filepos;
err:
delete_queue(&queue);
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 6fc30fa4fa0..9524a0366d3 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -379,11 +379,11 @@ static int ndb_to_mysql_error(const NdbError *ndberr)
- Used by replication to see if the error was temporary
*/
if (ndberr->status == NdbError::TemporaryError)
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG),
ndberr->code, ndberr->message, "NDB");
else
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndberr->code, ndberr->message, "NDB");
return error;
@@ -650,7 +650,7 @@ static void set_ndb_err(THD *thd, const NdbError &err)
{
char buf[FN_REFLEN];
ndb_error_string(thd_ndb->m_error_code, buf, sizeof(buf));
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
thd_ndb->m_error_code, buf, "NDB");
}
@@ -930,7 +930,7 @@ int ha_ndbcluster::set_ndb_value(NdbOperation *ndb_op, Field *field,
DBUG_PRINT("value", ("set blob ptr: 0x%lx len: %u",
(long) blob_ptr, blob_len));
- DBUG_DUMP("value", blob_ptr, min(blob_len, 26));
+ DBUG_DUMP("value", blob_ptr, MY_MIN(blob_len, 26));
if (set_blob_value)
*set_blob_value= TRUE;
@@ -1245,8 +1245,8 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data,
}
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
- DBUG_ASSERT(key_info->key_parts == sz);
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
+ DBUG_ASSERT(key_info->user_defined_key_parts == sz);
for (unsigned i= 0; key_part != end; key_part++, i++)
{
const char *field_name= key_part->field->field_name;
@@ -1576,7 +1576,7 @@ NDB_INDEX_TYPE ha_ndbcluster::get_index_type_from_key(uint inx,
bool ha_ndbcluster::check_index_fields_not_null(KEY* key_info)
{
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
DBUG_ENTER("ha_ndbcluster::check_index_fields_not_null");
for (; key_part != end; key_part++)
@@ -1733,7 +1733,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const uchar *key)
{
KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
DBUG_ENTER("set_primary_key");
for (; key_part != end; key_part++)
@@ -1755,7 +1755,7 @@ int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const uchar *re
{
KEY* key_info= table->key_info + table_share->primary_key;
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
DBUG_ENTER("set_primary_key_from_record");
for (; key_part != end; key_part++)
@@ -1772,7 +1772,7 @@ bool ha_ndbcluster::check_index_fields_in_write_set(uint keyno)
{
KEY* key_info= table->key_info + keyno;
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
uint i;
DBUG_ENTER("check_index_fields_in_write_set");
@@ -1793,7 +1793,7 @@ int ha_ndbcluster::set_index_key_from_record(NdbOperation *op,
{
KEY* key_info= table->key_info + keyno;
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
uint i;
DBUG_ENTER("set_index_key_from_record");
@@ -1815,7 +1815,7 @@ ha_ndbcluster::set_index_key(NdbOperation *op,
DBUG_ENTER("ha_ndbcluster::set_index_key");
uint i;
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
for (i= 0; key_part != end; key_part++, i++)
{
@@ -2083,7 +2083,7 @@ check_null_in_record(const KEY* key_info, const uchar *record)
{
KEY_PART_INFO *curr_part, *end_part;
curr_part= key_info->key_part;
- end_part= curr_part + key_info->key_parts;
+ end_part= curr_part + key_info->user_defined_key_parts;
while (curr_part != end_part)
{
@@ -2177,7 +2177,7 @@ int ha_ndbcluster::peek_indexed_rows(const uchar *record,
NdbIndexOperation *iop;
const NDBINDEX *unique_index = m_index[i].unique_index;
key_part= key_info->key_part;
- end= key_part + key_info->key_parts;
+ end= key_part + key_info->user_defined_key_parts;
if (!(iop= trans->getNdbIndexOperation(unique_index, m_table)) ||
iop->readTuple(lm) != 0)
ERR_RETURN(trans->getNdbError());
@@ -2405,7 +2405,7 @@ int ha_ndbcluster::set_bounds(NdbIndexScanOperation *op,
uint range_no)
{
const KEY *const key_info= table->key_info + inx;
- const uint key_parts= key_info->key_parts;
+ const uint key_parts= key_info->user_defined_key_parts;
uint key_tot_len[2];
uint tot_len;
uint i, j;
@@ -3206,7 +3206,7 @@ int ha_ndbcluster::update_row(const uchar *old_data, uchar *new_data)
undo_res= write_row((uchar *)old_data);
if (undo_res)
push_warning(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
undo_res,
"NDB failed undoing delete at primary key update");
m_primary_key_update= FALSE;
@@ -3708,7 +3708,7 @@ check_null_in_key(const KEY* key_info, const uchar *key, uint key_len)
KEY_PART_INFO *curr_part, *end_part;
const uchar* end_ptr= key + key_len;
curr_part= key_info->key_part;
- end_part= curr_part + key_info->key_parts;
+ end_part= curr_part + key_info->user_defined_key_parts;
for (; curr_part != end_part && key < end_ptr; curr_part++)
{
@@ -4079,7 +4079,7 @@ void ha_ndbcluster::position(const uchar *record)
key_length= ref_length;
key_info= table->key_info + table_share->primary_key;
key_part= key_info->key_part;
- end= key_part + key_info->key_parts;
+ end= key_part + key_info->user_defined_key_parts;
buff= ref;
for (; key_part != end; key_part++)
@@ -5416,7 +5416,7 @@ int ha_ndbcluster::create(const char *name,
{
if (create_info->storage_media == HA_SM_MEMORY)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -5471,7 +5471,7 @@ int ha_ndbcluster::create(const char *name,
case ROW_TYPE_FIXED:
if (field_type_forces_var_part(field->type()))
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -5500,7 +5500,7 @@ int ha_ndbcluster::create(const char *name,
for (i= 0, key_info= form->key_info; i < form->s->keys; i++, key_info++)
{
KEY_PART_INFO *key_part= key_info->key_part;
- KEY_PART_INFO *end= key_part + key_info->key_parts;
+ KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts;
for (; key_part != end; key_part++)
tab.getColumn(key_part->fieldnr-1)->setStorageType(
NdbDictionary::Column::StorageTypeMemory);
@@ -5802,7 +5802,7 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info,
case UNIQUE_INDEX:
if (check_index_fields_not_null(key_info))
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_NULL_COLUMN_IN_INDEX,
"Ndb does not support unique index on NULL valued attributes, index access with NULL value will become full table scan");
}
@@ -5811,7 +5811,7 @@ int ha_ndbcluster::create_index(const char *name, KEY *key_info,
case ORDERED_INDEX:
if (key_info->algorithm == HA_KEY_ALG_HASH)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -5860,7 +5860,7 @@ int ha_ndbcluster::create_ndb_index(const char *name,
Ndb *ndb= get_ndb();
NdbDictionary::Dictionary *dict= ndb->getDictionary();
KEY_PART_INFO *key_part= key_info->key_part;
- KEY_PART_INFO *end= key_part + key_info->key_parts;
+ KEY_PART_INFO *end= key_part + key_info->user_defined_key_parts;
DBUG_ENTER("ha_ndbcluster::create_index");
DBUG_PRINT("enter", ("name: %s ", name));
@@ -7284,7 +7284,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
file_name->str));
if (ndb_create_table_from_engine(thd, db, file_name->str))
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TABLE_EXISTS_ERROR,
"Discover of table %s.%s failed",
db, file_name->str);
@@ -7310,7 +7310,7 @@ int ndbcluster_find_files(handlerton *hton, THD *thd,
file_name->length);
DBUG_ASSERT(record);
my_hash_delete(&ndb_tables, record);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TABLE_EXISTS_ERROR,
"Local table %s.%s shadows ndb table",
db, file_name->str);
@@ -8114,23 +8114,33 @@ uint8 ha_ndbcluster::table_cache_type()
}
-uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
+/**
+ Retrieve the commit count for the table object.
+
+ @param thd Thread context.
+ @param norm_name Normalized path to the table.
+ @param[out] commit_count Commit count for the table.
+
+ @return 0 on success.
+ @return 1 if an error occured.
+*/
+
+uint ndb_get_commitcount(THD *thd, char *norm_name,
Uint64 *commit_count)
{
- char name[FN_REFLEN + 1];
+ char dbname[NAME_LEN + 1];
NDB_SHARE *share;
DBUG_ENTER("ndb_get_commitcount");
- build_table_filename(name, sizeof(name) - 1,
- dbname, tabname, "", 0);
- DBUG_PRINT("enter", ("name: %s", name));
- mysql_mutex_lock(&ndbcluster_mutex);
+ DBUG_PRINT("enter", ("name: %s", norm_name));
+ pthread_mutex_lock(&ndbcluster_mutex);
if (!(share=(NDB_SHARE*) my_hash_search(&ndbcluster_open_tables,
- (uchar*) name,
- strlen(name))))
+ (const uchar*) norm_name,
+ strlen(norm_name))))
{
- mysql_mutex_unlock(&ndbcluster_mutex);
- DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables", name));
+ pthread_mutex_unlock(&ndbcluster_mutex);
+ DBUG_PRINT("info", ("Table %s not found in ndbcluster_open_tables",
+ norm_name));
DBUG_RETURN(1);
}
/* ndb_share reference temporary, free below */
@@ -8162,6 +8172,8 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
Ndb *ndb;
if (!(ndb= check_ndb_in_thd(thd)))
DBUG_RETURN(1);
+
+ ha_ndbcluster::set_dbname(norm_name, dbname);
if (ndb->setDatabaseName(dbname))
{
ERR_RETURN(ndb->getNdbError());
@@ -8171,7 +8183,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
struct Ndb_statistics stat;
{
- Ndb_table_guard ndbtab_g(ndb->getDictionary(), tabname);
+ char tblname[NAME_LEN + 1];
+ ha_ndbcluster::set_tabname(norm_name, tblname);
+ Ndb_table_guard ndbtab_g(ndb->getDictionary(), tblname);
if (ndbtab_g.get_table() == 0
|| ndb_get_table_statistics(NULL, FALSE, ndb, ndbtab_g.get_table(), &stat))
{
@@ -8221,10 +8235,9 @@ uint ndb_get_commitcount(THD *thd, char *dbname, char *tabname,
@param thd thread handle
- @param full_name concatenation of database name,
- the null character '\\0', and the table name
- @param full_name_len length of the full name,
- i.e. len(dbname) + len(tablename) + 1
+ @param full_name normalized path to the table in the canonical
+ format.
+ @param full_name_len length of the normalized path to the table.
@param engine_data parameter retrieved when query was first inserted into
the cache. If the value of engine_data is changed,
all queries for this table should be invalidated.
@@ -8243,11 +8256,15 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
ulonglong *engine_data)
{
Uint64 commit_count;
- char *dbname= full_name;
- char *tabname= dbname+strlen(dbname)+1;
+ char dbname[NAME_LEN + 1];
+ char tabname[NAME_LEN + 1];
#ifndef DBUG_OFF
char buff[22], buff2[22];
#endif
+
+ ha_ndbcluster::set_dbname(full_name, dbname);
+ ha_ndbcluster::set_tabname(full_name, tabname);
+
DBUG_ENTER("ndbcluster_cache_retrieval_allowed");
DBUG_PRINT("enter", ("dbname: %s, tabname: %s", dbname, tabname));
@@ -8257,7 +8274,7 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
DBUG_RETURN(FALSE);
}
- if (ndb_get_commitcount(thd, dbname, tabname, &commit_count))
+ if (ndb_get_commitcount(thd, full_name, &commit_count))
{
*engine_data= 0; /* invalidate */
DBUG_PRINT("exit", ("No, could not retrieve commit_count"));
@@ -8292,10 +8309,9 @@ ndbcluster_cache_retrieval_allowed(THD *thd,
the cached query is reused.
@param thd thread handle
- @param full_name concatenation of database name,
- the null character '\\0', and the table name
- @param full_name_len length of the full name,
- i.e. len(dbname) + len(tablename) + 1
+ @param full_name normalized path to the table in the
+ canonical format.
+ @param full_name_len length of the normalized path to the table.
@param engine_callback function to be called before using cache on
this table
@param[out] engine_data commit_count for this table
@@ -8325,7 +8341,7 @@ ha_ndbcluster::register_query_cache_table(THD *thd,
DBUG_RETURN(FALSE);
}
- if (ndb_get_commitcount(thd, m_dbname, m_tabname, &commit_count))
+ if (ndb_get_commitcount(thd, full_name, &commit_count))
{
*engine_data= 0;
DBUG_PRINT("exit", ("Error, could not get commitcount"));
@@ -9827,11 +9843,11 @@ char* ha_ndbcluster::get_tablespace_name(THD *thd, char* name, uint name_len)
}
err:
if (ndberr.status == NdbError::TemporaryError)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_TEMPORARY_ERRMSG, ER(ER_GET_TEMPORARY_ERRMSG),
ndberr.code, ndberr.message, "NDB");
else
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndberr.code, ndberr.message, "NDB");
return 0;
@@ -9957,7 +9973,7 @@ int ha_ndbcluster::get_default_no_partitions(HA_CREATE_INFO *create_info)
if (adjusted_frag_count(no_fragments, no_nodes, reported_frags))
{
push_warning(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Ndb might have problems storing the max amount of rows specified");
}
return (int)reported_frags;
@@ -10146,7 +10162,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info,
{
if (!current_thd->variables.new_mode)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc
index 1544678de38..61dac31e52a 100644
--- a/sql/ha_ndbcluster_binlog.cc
+++ b/sql/ha_ndbcluster_binlog.cc
@@ -290,13 +290,13 @@ static void run_query(THD *thd, char *buf, char *end,
Thd_ndb *thd_ndb= get_thd_ndb(thd);
for (i= 0; no_print_error[i]; i++)
if ((thd_ndb->m_error_code == no_print_error[i]) ||
- (thd->stmt_da->sql_errno() == (unsigned) no_print_error[i]))
+ (thd->get_stmt_da()->sql_errno() == (unsigned) no_print_error[i]))
break;
if (!no_print_error[i])
sql_print_error("NDB: %s: error %s %d(ndb: %d) %d %d",
buf,
- thd->stmt_da->message(),
- thd->stmt_da->sql_errno(),
+ thd->get_stmt_da()->message(),
+ thd->get_stmt_da()->sql_errno(),
thd_ndb->m_error_code,
(int) thd->is_error(), thd->is_slave_error);
}
@@ -310,7 +310,7 @@ static void run_query(THD *thd, char *buf, char *end,
is called from ndbcluster_reset_logs(), which is called from
mysql_flush().
*/
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
thd->variables.option_bits= save_thd_options;
thd->set_query(save_thd_query, save_thd_query_length);
@@ -984,8 +984,8 @@ static void print_could_not_discover_error(THD *thd,
"my_errno: %d",
schema->db, schema->name, schema->query,
schema->node_id, my_errno);
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
- MYSQL_ERROR *err;
+ List_iterator_fast<Sql_condition> it(thd->warning_info->warn_list());
+ Sql_condition *err;
while ((err= it++))
sql_print_warning("NDB Binlog: (%d)%s", err->get_sql_errno(),
err->get_message_text());
@@ -1230,7 +1230,7 @@ ndbcluster_update_slock(THD *thd,
char buf[1024];
my_snprintf(buf, sizeof(buf), "Could not release lock on '%s.%s'",
db, table_name);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndb_error->code, ndb_error->message, buf);
}
@@ -1559,7 +1559,7 @@ err:
}
end:
if (ndb_error)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndb_error->code,
ndb_error->message,
@@ -2349,8 +2349,8 @@ static int open_ndb_binlog_index(THD *thd, TABLE **ndb_binlog_index)
sql_print_error("NDB Binlog: Opening ndb_binlog_index: killed");
else
sql_print_error("NDB Binlog: Opening ndb_binlog_index: %d, '%s'",
- thd->stmt_da->sql_errno(),
- thd->stmt_da->message());
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->proc_info= save_proc_info;
return -1;
}
@@ -2406,9 +2406,9 @@ int ndb_add_ndb_binlog_index(THD *thd, void *_row)
}
add_ndb_binlog_index_err:
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
close_thread_tables(thd);
thd->mdl_context.release_transactional_locks();
ndb_binlog_index= 0;
@@ -2730,7 +2730,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
"with BLOB attribute and no PK is not supported",
share->key);
if (push_warning)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
ndbcluster_hton_name,
@@ -2774,7 +2774,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
failed, print a warning
*/
if (push_warning > 1)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -2802,7 +2802,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
dict->dropEvent(my_event.getName()))
{
if (push_warning > 1)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -2821,7 +2821,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
if (dict->createEvent(my_event))
{
if (push_warning > 1)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -2834,7 +2834,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab,
DBUG_RETURN(-1);
}
#ifdef NDB_BINLOG_EXTRA_WARNINGS
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
0, "NDB Binlog: Removed trailing event",
"NDB");
@@ -2945,7 +2945,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
{
sql_print_error("NDB Binlog: Creating NdbEventOperation failed for"
" %s",event_name);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
ndb->getNdbError().code,
ndb->getNdbError().message,
@@ -2994,7 +2994,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
sql_print_error("NDB Binlog: Creating NdbEventOperation"
" blob field %u handles failed (code=%d) for %s",
j, op->getNdbError().code, event_name);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
op->getNdbError().code,
op->getNdbError().message,
@@ -3033,7 +3033,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab,
retries= 0;
if (retries == 0)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
op->getNdbError().code, op->getNdbError().message,
"NDB");
@@ -3101,7 +3101,7 @@ ndbcluster_handle_drop_table(Ndb *ndb, const char *event_name,
if (dict->getNdbError().code != 4710)
{
/* drop event failed for some reason, issue a warning */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRMSG, ER(ER_GET_ERRMSG),
dict->getNdbError().code,
dict->getNdbError().message, "NDB");
@@ -4277,9 +4277,9 @@ err:
sql_print_information("Stopping Cluster Binlog");
DBUG_PRINT("info",("Shutting down cluster binlog thread"));
thd->proc_info= "Shutting down";
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
close_thread_tables(thd);
thd->mdl_context.release_transactional_locks();
mysql_mutex_lock(&injector_mutex);
diff --git a/sql/ha_ndbcluster_cond.cc b/sql/ha_ndbcluster_cond.cc
index f8b2ed8429a..22a7dbe55f7 100644
--- a/sql/ha_ndbcluster_cond.cc
+++ b/sql/ha_ndbcluster_cond.cc
@@ -1375,7 +1375,7 @@ ha_ndbcluster_cond::generate_scan_filter(NdbScanOperation *op)
{
// err.message has static storage
DBUG_PRINT("info", ("%s", err.message));
- push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
err.code, err.message);
ret=0;
}
@@ -1431,7 +1431,7 @@ int ha_ndbcluster_cond::generate_scan_filter_from_key(NdbScanOperation *op,
uchar *buf)
{
KEY_PART_INFO* key_part= key_info->key_part;
- KEY_PART_INFO* end= key_part+key_info->key_parts;
+ KEY_PART_INFO* end= key_part+key_info->user_defined_key_parts;
NdbScanFilter filter(op, true); // abort on too large
int res;
DBUG_ENTER("generate_scan_filter_from_key");
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 09ab6d48eba..a5acd5759aa 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -1,5 +1,6 @@
/*
Copyright (c) 2005, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab & SkySQL Ab
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -36,10 +37,6 @@
in the execution of queries. This functionality will grow with later
versions of MySQL.
- You can enable it in your buld by doing the following during your build
- process:
- ./configure --with-partition
-
The partition is setup to use table locks. It implements an partition "SHARE"
that is inserted into a hash by table name. You can use this to store
information of state that any partition handler object will be able to see
@@ -49,10 +46,6 @@
if this file.
*/
-#ifdef __GNUC__
-#pragma implementation // gcc: Class implementation
-#endif
-
#include "sql_priv.h"
#include "sql_parse.h" // append_file_to_dir
#include "create_options.h"
@@ -62,12 +55,25 @@
#include "sql_table.h" // tablename_to_filename
#include "key.h"
#include "sql_plugin.h"
-#include "table.h" /* HA_DATA_PARTITION */
#include "sql_show.h" // append_identifier
#include "sql_admin.h" // SQL_ADMIN_MSG_TEXT_SIZE
#include "debug_sync.h"
+/* First 4 bytes in the .par file is the number of 32-bit words in the file */
+#define PAR_WORD_SIZE 4
+/* offset to the .par file checksum */
+#define PAR_CHECKSUM_OFFSET 4
+/* offset to the total number of partitions */
+#define PAR_NUM_PARTS_OFFSET 8
+/* offset to the engines array */
+#define PAR_ENGINES_OFFSET 12
+#define PARTITION_ENABLED_TABLE_FLAGS (HA_FILE_BASED | HA_REC_NOT_IN_SEQ)
+#define PARTITION_DISABLED_TABLE_FLAGS (HA_CAN_GEOMETRY | \
+ HA_CAN_FULLTEXT | \
+ HA_DUPLICATE_POS | \
+ HA_CAN_SQL_HANDLER | \
+ HA_CAN_INSERT_DELAYED)
static const char *ha_par_ext= ".par";
/****************************************************************************
@@ -92,6 +98,24 @@ static const char *ha_partition_ext[]=
};
+#ifdef HAVE_PSI_INTERFACE
+PSI_mutex_key key_partition_auto_inc_mutex;
+
+static PSI_mutex_info all_partition_mutexes[]=
+{
+ { &key_partition_auto_inc_mutex, "Partition_share::auto_inc_mutex", 0}
+};
+
+static void init_partition_psi_keys(void)
+{
+ const char* category= "partition";
+ int count;
+
+ count= array_elements(all_partition_mutexes);
+ mysql_mutex_register(category, all_partition_mutexes, count);
+}
+#endif /* HAVE_PSI_INTERFACE */
+
static int partition_initialize(void *p)
{
@@ -108,9 +132,44 @@ static int partition_initialize(void *p)
HTON_TEMPORARY_NOT_SUPPORTED;
partition_hton->tablefile_extensions= ha_partition_ext;
+#ifdef HAVE_PSI_INTERFACE
+ init_partition_psi_keys();
+#endif
return 0;
}
+
+/**
+ Initialize and allocate space for partitions shares.
+
+ @param num_parts Number of partitions to allocate storage for.
+
+ @return Operation status.
+ @retval true Failure (out of memory).
+ @retval false Success.
+*/
+
+bool Partition_share::init(uint num_parts)
+{
+ DBUG_ENTER("Partition_share::init");
+ mysql_mutex_init(key_partition_auto_inc_mutex,
+ &auto_inc_mutex,
+ MY_MUTEX_INIT_FAST);
+ auto_inc_initialized= false;
+ partition_name_hash_initialized= false;
+ next_auto_inc_val= 0;
+ partitions_share_refs= new Parts_share_refs;
+ if (!partitions_share_refs)
+ DBUG_RETURN(true);
+ if (partitions_share_refs->init(num_parts))
+ {
+ delete partitions_share_refs;
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(false);
+}
+
+
/*
Create new partition handler
@@ -165,7 +224,7 @@ static uint alter_table_flags(uint flags __attribute__((unused)))
HA_FAST_CHANGE_PARTITION);
}
-const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF;
+const uint32 ha_partition::NO_CURRENT_PART_ID= NOT_A_PARTITION_ID;
/*
Constructor method
@@ -238,6 +297,8 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share,
m_is_sub_partitioned= m_part_info->is_sub_partitioned();
m_is_clone_of= clone_arg;
m_clone_mem_root= clone_mem_root_arg;
+ part_share= clone_arg->part_share;
+ m_tot_parts= clone_arg->m_tot_parts;
DBUG_VOID_RETURN;
}
@@ -268,7 +329,6 @@ void ha_partition::init_handler_variables()
m_added_file= NULL;
m_tot_parts= 0;
m_pkey_is_clustered= 0;
- m_lock_type= F_UNLCK;
m_part_spec.start_part= NO_CURRENT_PART_ID;
m_scan_value= 2;
m_ref_length= 0;
@@ -289,7 +349,6 @@ void ha_partition::init_handler_variables()
m_rec_length= 0;
m_last_part= 0;
m_rec0= 0;
- m_err_rec= NULL;
m_curr_key_info[0]= NULL;
m_curr_key_info[1]= NULL;
m_part_func_monotonicity_info= NON_MONOTONIC;
@@ -304,6 +363,8 @@ void ha_partition::init_handler_variables()
m_is_sub_partitioned= 0;
m_is_clone_of= NULL;
m_clone_mem_root= NULL;
+ part_share= NULL;
+ m_new_partitions_share_refs.empty();
m_part_ids_sorted_by_num_of_records= NULL;
#ifdef DONT_HAVE_TO_BE_INITALIZED
@@ -313,6 +374,13 @@ void ha_partition::init_handler_variables()
}
+const char *ha_partition::table_type() const
+{
+ // we can do this since we only support a single engine type
+ return m_file[0]->table_type();
+}
+
+
/*
Destructor method
@@ -326,6 +394,8 @@ void ha_partition::init_handler_variables()
ha_partition::~ha_partition()
{
DBUG_ENTER("ha_partition::~ha_partition()");
+ if (m_new_partitions_share_refs.elements)
+ m_new_partitions_share_refs.delete_elements();
if (m_file != NULL)
{
uint i;
@@ -476,7 +546,7 @@ int ha_partition::delete_table(const char *name)
{
DBUG_ENTER("ha_partition::delete_table");
- DBUG_RETURN(del_ren_cre_table(name, NULL, NULL, NULL));
+ DBUG_RETURN(del_ren_table(name, NULL));
}
@@ -506,7 +576,7 @@ int ha_partition::rename_table(const char *from, const char *to)
{
DBUG_ENTER("ha_partition::rename_table");
- DBUG_RETURN(del_ren_cre_table(from, to, NULL, NULL));
+ DBUG_RETURN(del_ren_table(from, to));
}
@@ -595,24 +665,86 @@ int ha_partition::create_partitioning_metadata(const char *path,
int ha_partition::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
- char t_name[FN_REFLEN];
+ int error;
+ char name_buff[FN_REFLEN], name_lc_buff[FN_REFLEN];
+ char *name_buffer_ptr;
+ const char *path;
+ uint i;
+ List_iterator_fast <partition_element> part_it(m_part_info->partitions);
+ partition_element *part_elem;
+ handler **file, **abort_file;
DBUG_ENTER("ha_partition::create");
- if (create_info->used_fields & HA_CREATE_USED_CONNECTION)
+ DBUG_ASSERT(*fn_rext((char*)name) == '\0');
+
+ /* Not allowed to create temporary partitioned tables */
+ if (create_info && create_info->options & HA_LEX_CREATE_TMP_TABLE)
{
- my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0),
- "CONNECTION not valid for partition");
- DBUG_RETURN(1);
+ my_error(ER_PARTITION_NO_TEMPORARY, MYF(0));
+ DBUG_RETURN(TRUE);
}
- strmov(t_name, name);
- DBUG_ASSERT(*fn_rext((char*)name) == '\0');
- if (del_ren_cre_table(t_name, NULL, table_arg, create_info))
+ if (get_from_handler_file(name, ha_thd()->mem_root, false))
+ DBUG_RETURN(TRUE);
+ DBUG_ASSERT(m_file_buffer);
+ DBUG_PRINT("enter", ("name: (%s)", name));
+ name_buffer_ptr= m_name_buffer_ptr;
+ file= m_file;
+ /*
+ Since ha_partition has HA_FILE_BASED, it must alter underlying table names
+ if they do not have HA_FILE_BASED and lower_case_table_names == 2.
+ See Bug#37402, for Mac OS X.
+ The appended #P#<partname>[#SP#<subpartname>] will remain in current case.
+ Using the first partitions handler, since mixing handlers is not allowed.
+ */
+ path= get_canonical_filename(*file, name, name_lc_buff);
+ for (i= 0; i < m_part_info->num_parts; i++)
{
- handler::delete_table(t_name);
- DBUG_RETURN(1);
+ part_elem= part_it++;
+ if (m_is_sub_partitioned)
+ {
+ uint j;
+ List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
+ for (j= 0; j < m_part_info->num_subparts; j++)
+ {
+ part_elem= sub_it++;
+ create_partition_name(name_buff, path, name_buffer_ptr,
+ NORMAL_PART_NAME, FALSE);
+ if ((error= set_up_table_before_create(table_arg, name_buff,
+ create_info, part_elem)) ||
+ ((error= (*file)->ha_create(name_buff, table_arg, create_info))))
+ goto create_error;
+
+ name_buffer_ptr= strend(name_buffer_ptr) + 1;
+ file++;
+ }
+ }
+ else
+ {
+ create_partition_name(name_buff, path, name_buffer_ptr,
+ NORMAL_PART_NAME, FALSE);
+ if ((error= set_up_table_before_create(table_arg, name_buff,
+ create_info, part_elem)) ||
+ ((error= (*file)->ha_create(name_buff, table_arg, create_info))))
+ goto create_error;
+
+ name_buffer_ptr= strend(name_buffer_ptr) + 1;
+ file++;
+ }
}
DBUG_RETURN(0);
+
+create_error:
+ name_buffer_ptr= m_name_buffer_ptr;
+ for (abort_file= file, file= m_file; file < abort_file; file++)
+ {
+ create_partition_name(name_buff, path, name_buffer_ptr, NORMAL_PART_NAME,
+ FALSE);
+ (void) (*file)->ha_delete_table((const char*) name_buff);
+ name_buffer_ptr= strend(name_buffer_ptr) + 1;
+ }
+ handler::delete_table(name);
+ DBUG_RETURN(error);
}
@@ -993,7 +1125,8 @@ int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt)
{
DBUG_ENTER("ha_partition::repair");
- DBUG_RETURN(handle_opt_partitions(thd, check_opt, REPAIR_PARTS));
+ int res= handle_opt_partitions(thd, check_opt, REPAIR_PARTS);
+ DBUG_RETURN(res);
}
/**
@@ -1049,11 +1182,10 @@ int ha_partition::preload_keys(THD *thd, HA_CHECK_OPT *check_opt)
0 Success
*/
-int ha_partition::handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
- uint part_id, uint flag)
+static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
+ handler *file, uint flag)
{
int error;
- handler *file= m_file[part_id];
DBUG_ENTER("handle_opt_part");
DBUG_PRINT("enter", ("flag = %u", flag));
@@ -1062,27 +1194,9 @@ int ha_partition::handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt,
else if (flag == ANALYZE_PARTS)
error= file->ha_analyze(thd, check_opt);
else if (flag == CHECK_PARTS)
- {
error= file->ha_check(thd, check_opt);
- if (!error ||
- error == HA_ADMIN_ALREADY_DONE ||
- error == HA_ADMIN_NOT_IMPLEMENTED)
- {
- if (check_opt->flags & (T_MEDIUM | T_EXTEND))
- error= check_misplaced_rows(part_id, false);
- }
- }
else if (flag == REPAIR_PARTS)
- {
error= file->ha_repair(thd, check_opt);
- if (!error ||
- error == HA_ADMIN_ALREADY_DONE ||
- error == HA_ADMIN_NOT_IMPLEMENTED)
- {
- if (check_opt->flags & (T_MEDIUM | T_EXTEND))
- error= check_misplaced_rows(part_id, true);
- }
- }
else if (flag == ASSIGN_KEYCACHE_PARTS)
error= file->assign_to_keycache(thd, check_opt);
else if (flag == PRELOAD_KEYS_PARTS)
@@ -1187,7 +1301,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
when ALTER TABLE <CMD> PARTITION ...
it should only do named partitions, otherwise all partitions
*/
- if (!(thd->lex->alter_info.flags & ALTER_ADMIN_PARTITION) ||
+ if (!(thd->lex->alter_info.flags & Alter_info::ALTER_ADMIN_PARTITION) ||
part_elem->part_state == PART_ADMIN)
{
if (m_is_sub_partitioned)
@@ -1201,7 +1315,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
part= i * num_subparts + j;
DBUG_PRINT("info", ("Optimize subpartition %u (%s)",
part, sub_elem->partition_name));
- if ((error= handle_opt_part(thd, check_opt, part, flag)))
+ if ((error= handle_opt_part(thd, check_opt, m_file[part], flag)))
{
/* print a line which partition the error belongs to */
if (error != HA_ADMIN_NOT_IMPLEMENTED &&
@@ -1227,7 +1341,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
{
DBUG_PRINT("info", ("Optimize partition %u (%s)", i,
part_elem->partition_name));
- if ((error= handle_opt_part(thd, check_opt, i, flag)))
+ if ((error= handle_opt_part(thd, check_opt, m_file[i], flag)))
{
/* print a line which partition the error belongs to */
if (error != HA_ADMIN_NOT_IMPLEMENTED &&
@@ -1261,6 +1375,8 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
@retval TRUE Error/Not supported
@retval FALSE Success
+
+ @note Called if open_table_from_share fails and ::is_crashed().
*/
bool ha_partition::check_and_repair(THD *thd)
@@ -1340,9 +1456,25 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
int error;
DBUG_ENTER("prepare_new_partition");
- if ((error= set_up_table_before_create(tbl, part_name, create_info,
- 0, p_elem)))
+ /*
+ This call to set_up_table_before_create() is done for an alter table.
+ So this may be the second time around for this partition_element,
+ depending on how many partitions and subpartitions there were before,
+ and how many there are now.
+ The first time, on the CREATE, data_file_name and index_file_name
+ came from the parser. They did not have the file name attached to
+ the end. But if this partition is less than the total number of
+ previous partitions, it's data_file_name has the filename attached.
+ So we need to take the partition filename off if it exists.
+ That file name may be different from part_name, which will be
+ attached in append_file_to_dir().
+ */
+ truncate_partition_filename(p_elem->data_file_name);
+ truncate_partition_filename(p_elem->index_file_name);
+
+ if ((error= set_up_table_before_create(tbl, part_name, create_info, p_elem)))
goto error_create;
+
tbl->s->connect_string = p_elem->connect_string;
if ((error= file->ha_create(part_name, tbl, create_info)))
{
@@ -1358,7 +1490,8 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
goto error_create;
}
DBUG_PRINT("info", ("partition %s created", part_name));
- if ((error= file->ha_open(tbl, part_name, m_mode, m_open_test_lock)))
+ if ((error= file->ha_open(tbl, part_name, m_mode,
+ m_open_test_lock | HA_OPEN_NO_PSI_CALL)))
goto error_open;
DBUG_PRINT("info", ("partition %s opened", part_name));
/*
@@ -1519,7 +1652,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
(m_reorged_parts + 1))))
{
mem_alloc_error(sizeof(handler*)*(m_reorged_parts+1));
- DBUG_RETURN(ER_OUTOFMEMORY);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
/*
@@ -1551,7 +1684,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
(2*(num_remain_partitions + 1)))))
{
mem_alloc_error(sizeof(handler*)*2*(num_remain_partitions+1));
- DBUG_RETURN(ER_OUTOFMEMORY);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
m_added_file= &new_file_array[num_remain_partitions + 1];
@@ -1621,15 +1754,33 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
part_elem->part_state == PART_TO_BE_ADDED)
{
uint j= 0;
+ Parts_share_refs *p_share_refs;
+ /*
+ The Handler_shares for each partition's handler can be allocated
+ within this handler, since there will not be any more instances of the
+ new partitions, until the table is reopened after the ALTER succeeded.
+ */
+ p_share_refs= new Parts_share_refs;
+ if (!p_share_refs)
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ if (p_share_refs->init(num_subparts))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ if (m_new_partitions_share_refs.push_back(p_share_refs))
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
do
{
- if (!(new_file_array[part_count++]=
+ handler **new_file= &new_file_array[part_count++];
+ if (!(*new_file=
get_new_handler(table->s,
thd->mem_root,
part_elem->engine_type)))
{
mem_alloc_error(sizeof(handler));
- DBUG_RETURN(ER_OUTOFMEMORY);
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ }
+ if ((*new_file)->set_ha_share_ref(&p_share_refs->ha_shares[j]))
+ {
+ DBUG_RETURN(HA_ERR_OUT_OF_MEM);
}
} while (++j < num_subparts);
if (part_elem->part_state == PART_CHANGED)
@@ -1785,7 +1936,7 @@ int ha_partition::copy_partitions(ulonglong * const copied,
late_extra_cache(reorg_part);
if ((result= file->ha_rnd_init_with_error(1)))
- goto error;
+ goto init_error;
while (TRUE)
{
if ((result= file->ha_rnd_next(m_rec0)))
@@ -1830,10 +1981,10 @@ int ha_partition::copy_partitions(ulonglong * const copied,
DBUG_RETURN(FALSE);
error:
m_reorged_file[reorg_part]->ha_rnd_end();
+init_error:
DBUG_RETURN(result);
}
-
/*
Update create info as part of ALTER TABLE
@@ -1845,11 +1996,16 @@ error:
NONE
DESCRIPTION
- Method empty so far
+ Forward this handler call to the storage engine foreach
+ partition handler. The data_file_name for each partition may
+ need to be reset if the tablespace was moved. Use a dummy
+ HA_CREATE_INFO structure and transfer necessary data.
*/
void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
{
+ DBUG_ENTER("ha_partition::update_create_info");
+
/*
Fix for bug#38751, some engines needs info-calls in ALTER.
Archive need this since it flushes in ::info.
@@ -1863,13 +2019,130 @@ void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
create_info->auto_increment_value= stats.auto_increment_value;
+ /*
+ DATA DIRECTORY and INDEX DIRECTORY are never applied to the whole
+ partitioned table, only its parts.
+ */
+ my_bool from_alter = (create_info->data_file_name == (const char*) -1);
create_info->data_file_name= create_info->index_file_name = NULL;
+
create_info->connect_string.str= NULL;
create_info->connect_string.length= 0;
- return;
+
+ /*
+ We do not need to update the individual partition DATA DIRECTORY settings
+ since they can be changed by ALTER TABLE ... REORGANIZE PARTITIONS.
+ */
+ if (from_alter)
+ DBUG_VOID_RETURN;
+
+ /*
+ send Handler::update_create_info() to the storage engine for each
+ partition that currently has a handler object. Using a dummy
+ HA_CREATE_INFO structure to collect DATA and INDEX DIRECTORYs.
+ */
+
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ partition_element *part_elem, *sub_elem;
+ uint num_subparts= m_part_info->num_subparts;
+ uint num_parts = num_subparts ? m_file_tot_parts / num_subparts
+ : m_file_tot_parts;
+ HA_CREATE_INFO dummy_info;
+ memset(&dummy_info, 0, sizeof(dummy_info));
+
+ /*
+ Since update_create_info() can be called from mysql_prepare_alter_table()
+ when not all handlers are set up, we look for that condition first.
+ If all handlers are not available, do not call update_create_info for any.
+ */
+ uint i, j, part;
+ for (i= 0; i < num_parts; i++)
+ {
+ part_elem= part_it++;
+ if (!part_elem)
+ DBUG_VOID_RETURN;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> subpart_it(part_elem->subpartitions);
+ for (j= 0; j < num_subparts; j++)
+ {
+ sub_elem= subpart_it++;
+ if (!sub_elem)
+ DBUG_VOID_RETURN;
+ part= i * num_subparts + j;
+ if (part >= m_file_tot_parts || !m_file[part])
+ DBUG_VOID_RETURN;
+ }
+ }
+ else
+ {
+ if (!m_file[i])
+ DBUG_VOID_RETURN;
+ }
+ }
+ part_it.rewind();
+
+ for (i= 0; i < num_parts; i++)
+ {
+ part_elem= part_it++;
+ DBUG_ASSERT(part_elem);
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element> subpart_it(part_elem->subpartitions);
+ for (j= 0; j < num_subparts; j++)
+ {
+ sub_elem= subpart_it++;
+ DBUG_ASSERT(sub_elem);
+ part= i * num_subparts + j;
+ DBUG_ASSERT(part < m_file_tot_parts && m_file[part]);
+ if (ha_legacy_type(m_file[part]->ht) == DB_TYPE_INNODB)
+ {
+ dummy_info.data_file_name= dummy_info.index_file_name = NULL;
+ m_file[part]->update_create_info(&dummy_info);
+
+ if (dummy_info.data_file_name || sub_elem->data_file_name)
+ {
+ sub_elem->data_file_name = (char*) dummy_info.data_file_name;
+ }
+ if (dummy_info.index_file_name || sub_elem->index_file_name)
+ {
+ sub_elem->index_file_name = (char*) dummy_info.index_file_name;
+ }
+ }
+ }
+ }
+ else
+ {
+ DBUG_ASSERT(m_file[i]);
+ if (ha_legacy_type(m_file[i]->ht) == DB_TYPE_INNODB)
+ {
+ dummy_info.data_file_name= dummy_info.index_file_name= NULL;
+ m_file[i]->update_create_info(&dummy_info);
+ if (dummy_info.data_file_name || part_elem->data_file_name)
+ {
+ part_elem->data_file_name = (char*) dummy_info.data_file_name;
+ }
+ if (dummy_info.index_file_name || part_elem->index_file_name)
+ {
+ part_elem->index_file_name = (char*) dummy_info.index_file_name;
+ }
+ }
+ }
+ }
+ DBUG_VOID_RETURN;
}
+/**
+ Change the internal TABLE_SHARE pointer
+
+ @param table_arg TABLE object
+ @param share New share to use
+
+ @note Is used in error handling in ha_delete_table.
+ All handlers should exist (lock_partitions should not be used)
+*/
+
void ha_partition::change_table_ptr(TABLE *table_arg, TABLE_SHARE *share)
{
handler **file_array;
@@ -1920,34 +2193,25 @@ char *ha_partition::update_table_comment(const char *comment)
}
+/**
+ Handle delete and rename table
-/*
- Handle delete, rename and create table
-
- SYNOPSIS
- del_ren_cre_table()
- from Full path of old table
- to Full path of new table
- table_arg Table object
- create_info Create info
+ @param from Full path of old table
+ @param to Full path of new table
- RETURN VALUE
- >0 Error
- 0 Success
+ @return Operation status
+ @retval >0 Error
+ @retval 0 Success
- DESCRIPTION
- Common routine to handle delete_table and rename_table.
- The routine uses the partition handler file to get the
- names of the partition instances. Both these routines
- are called after creating the handler without table
- object and thus the file is needed to discover the
- names of the partitions and the underlying storage engines.
+ @note Common routine to handle delete_table and rename_table.
+ The routine uses the partition handler file to get the
+ names of the partition instances. Both these routines
+ are called after creating the handler without table
+ object and thus the file is needed to discover the
+ names of the partitions and the underlying storage engines.
*/
-uint ha_partition::del_ren_cre_table(const char *from,
- const char *to,
- TABLE *table_arg,
- HA_CREATE_INFO *create_info)
+uint ha_partition::del_ren_table(const char *from, const char *to)
{
int save_error= 0;
int error;
@@ -1958,14 +2222,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
const char *to_path= NULL;
uint i;
handler **file, **abort_file;
- DBUG_ENTER("del_ren_cre_table()");
-
- /* Not allowed to create temporary partitioned tables */
- if (create_info && create_info->options & HA_LEX_CREATE_TMP_TABLE)
- {
- my_error(ER_PARTITION_NO_TEMPORARY, MYF(0));
- DBUG_RETURN(TRUE);
- }
+ DBUG_ENTER("ha_partition::del_ren_table");
if (get_from_handler_file(from, ha_thd()->mem_root, false))
DBUG_RETURN(TRUE);
@@ -1973,7 +2230,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to ? to : "(nil)"));
name_buffer_ptr= m_name_buffer_ptr;
file= m_file;
- if (to == NULL && table_arg == NULL)
+ if (to == NULL)
{
/*
Delete table, start by delete the .par file. If error, break, otherwise
@@ -1999,23 +2256,16 @@ uint ha_partition::del_ren_cre_table(const char *from,
NORMAL_PART_NAME, FALSE);
if (to != NULL)
- { // Rename branch
+ { // Rename branch
create_partition_name(to_buff, to_path, name_buffer_ptr,
NORMAL_PART_NAME, FALSE);
error= (*file)->ha_rename_table(from_buff, to_buff);
if (error)
goto rename_error;
}
- else if (table_arg == NULL) // delete branch
- error= (*file)->ha_delete_table(from_buff);
- else
+ else // delete branch
{
- if ((error= set_up_table_before_create(table_arg, from_buff,
- create_info, i, NULL)) ||
- parse_engine_table_options(ha_thd(), (*file)->ht,
- (*file)->table_share) ||
- ((error= (*file)->ha_create(from_buff, table_arg, create_info))))
- goto create_error;
+ error= (*file)->ha_delete_table(from_buff);
}
name_buffer_ptr= strend(name_buffer_ptr) + 1;
if (error)
@@ -2032,16 +2282,6 @@ uint ha_partition::del_ren_cre_table(const char *from,
}
}
DBUG_RETURN(save_error);
-create_error:
- name_buffer_ptr= m_name_buffer_ptr;
- for (abort_file= file, file= m_file; file < abort_file; file++)
- {
- create_partition_name(from_buff, from_path, name_buffer_ptr, NORMAL_PART_NAME,
- FALSE);
- (void) (*file)->ha_delete_table((const char*) from_buff);
- name_buffer_ptr= strend(name_buffer_ptr) + 1;
- }
- DBUG_RETURN(error);
rename_error:
name_buffer_ptr= m_name_buffer_ptr;
for (abort_file= file, file= m_file; file < abort_file; file++)
@@ -2058,47 +2298,6 @@ rename_error:
DBUG_RETURN(error);
}
-/*
- Find partition based on partition id
-
- SYNOPSIS
- find_partition_element()
- part_id Partition id of partition looked for
-
- RETURN VALUE
- >0 Reference to partition_element
- 0 Partition not found
-*/
-
-partition_element *ha_partition::find_partition_element(uint part_id)
-{
- uint i;
- uint curr_part_id= 0;
- List_iterator_fast <partition_element> part_it(m_part_info->partitions);
-
- for (i= 0; i < m_part_info->num_parts; i++)
- {
- partition_element *part_elem;
- part_elem= part_it++;
- if (m_is_sub_partitioned)
- {
- uint j;
- List_iterator_fast <partition_element> sub_it(part_elem->subpartitions);
- for (j= 0; j < m_part_info->num_subparts; j++)
- {
- part_elem= sub_it++;
- if (part_id == curr_part_id++)
- return part_elem;
- }
- }
- else if (part_id == curr_part_id++)
- return part_elem;
- }
- DBUG_ASSERT(0);
- my_error(ER_OUT_OF_RESOURCES, MYF(ME_FATALERROR));
- return NULL;
-}
-
uint ha_partition::count_query_cache_dependant_tables(uint8 *tables_type)
{
DBUG_ENTER("ha_partition::count_query_cache_dependant_tables");
@@ -2115,26 +2314,27 @@ uint ha_partition::count_query_cache_dependant_tables(uint8 *tables_type)
DBUG_RETURN(type == HA_CACHE_TBL_ASKTRANSACT ? m_tot_parts : 0);
}
-my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
- char *key, uint key_len,
- uint8 type,
- Query_cache *cache,
- Query_cache_block_table **block_table,
- handler *file,
- uint *n)
+my_bool ha_partition::
+reg_query_cache_dependant_table(THD *thd,
+ char *engine_key, uint engine_key_len,
+ char *cache_key, uint cache_key_len,
+ uint8 type,
+ Query_cache *cache,
+ Query_cache_block_table **block_table,
+ handler *file,
+ uint *n)
{
DBUG_ENTER("ha_partition::reg_query_cache_dependant_table");
qc_engine_callback engine_callback;
ulonglong engine_data;
/* ask undelying engine */
- if (!file->register_query_cache_table(thd, key,
- key_len,
+ if (!file->register_query_cache_table(thd, engine_key,
+ engine_key_len,
&engine_callback,
&engine_data))
{
- DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
- key,
- key + table_share->db.length + 1));
+ DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s",
+ engine_key_len, engine_key));
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query
@@ -2143,9 +2343,11 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
DBUG_RETURN(TRUE);
}
(++(*block_table))->n= ++(*n);
- if (!cache->insert_table(key_len,
- key, (*block_table),
+ if (!cache->insert_table(cache_key_len,
+ cache_key, (*block_table),
table_share->db.length,
+ (uint8) (cache_key_len -
+ table_share->table_cache_key.length),
type,
engine_callback, engine_data,
FALSE))
@@ -2154,19 +2356,19 @@ my_bool ha_partition::reg_query_cache_dependant_table(THD *thd,
}
-my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
- Query_cache *cache,
- Query_cache_block_table **block_table,
- uint *n)
+my_bool ha_partition::
+register_query_cache_dependant_tables(THD *thd,
+ Query_cache *cache,
+ Query_cache_block_table **block_table,
+ uint *n)
{
- char *name;
- uint prefix_length= table_share->table_cache_key.length + 3;
+ char *engine_key_end, *query_cache_key_end;
+ uint i;
uint num_parts= m_part_info->num_parts;
uint num_subparts= m_part_info->num_subparts;
- uint i= 0;
+ int diff_length;
List_iterator<partition_element> part_it(m_part_info->partitions);
- char key[FN_REFLEN];
-
+ char engine_key[FN_REFLEN], query_cache_key[FN_REFLEN];
DBUG_ENTER("ha_partition::register_query_cache_dependant_tables");
/* see ha_partition::count_query_cache_dependant_tables */
@@ -2174,36 +2376,51 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
DBUG_RETURN(FALSE); // nothing to register
/* prepare static part of the key */
- memmove(key, table_share->table_cache_key.str,
- table_share->table_cache_key.length);
+ memcpy(engine_key, table_share->normalized_path.str,
+ table_share->normalized_path.length);
+ memcpy(query_cache_key, table_share->table_cache_key.str,
+ table_share->table_cache_key.length);
- name= key + table_share->table_cache_key.length - 1;
- name[0]= name[2]= '#';
- name[1]= 'P';
- name+= 3;
+ diff_length= ((int) table_share->table_cache_key.length -
+ (int) table_share->normalized_path.length -1);
+ engine_key_end= engine_key + table_share->normalized_path.length;
+ query_cache_key_end= query_cache_key + table_share->table_cache_key.length -1;
+
+ engine_key_end[0]= engine_key_end[2]= query_cache_key_end[0]=
+ query_cache_key_end[2]= '#';
+ query_cache_key_end[1]= engine_key_end[1]= 'P';
+ engine_key_end+= 3;
+ query_cache_key_end+= 3;
+
+ i= 0;
do
{
partition_element *part_elem= part_it++;
- uint part_len= strmov(name, part_elem->partition_name) - name;
+ char *engine_pos= strmov(engine_key_end, part_elem->partition_name);
if (m_is_sub_partitioned)
{
List_iterator<partition_element> subpart_it(part_elem->subpartitions);
partition_element *sub_elem;
- char *sname= name + part_len;
uint j= 0, part;
- sname[0]= sname[3]= '#';
- sname[1]= 'S';
- sname[2]= 'P';
- sname += 4;
+ engine_pos[0]= engine_pos[3]= '#';
+ engine_pos[1]= 'S';
+ engine_pos[2]= 'P';
+ engine_pos += 4;
do
{
+ char *end;
+ uint length;
sub_elem= subpart_it++;
part= i * num_subparts + j;
- uint spart_len= strmov(sname, sub_elem->partition_name) - name + 1;
- if (reg_query_cache_dependant_table(thd, key,
- prefix_length + part_len + 4 +
- spart_len,
+ /* we store the end \0 as part of the key */
+ end= strmov(engine_pos, sub_elem->partition_name);
+ length= end - engine_key;
+ /* Copy the suffix also to query cache key */
+ memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
+ if (reg_query_cache_dependant_table(thd, engine_key, length,
+ query_cache_key,
+ length + diff_length,
m_file[part]->table_cache_type(),
cache,
block_table, m_file[part],
@@ -2213,8 +2430,13 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
}
else
{
- if (reg_query_cache_dependant_table(thd, key,
- prefix_length + part_len + 1,
+ char *end= engine_pos+1; // copy end \0
+ uint length= end - engine_key;
+ /* Copy the suffix also to query cache key */
+ memcpy(query_cache_key_end, engine_key_end, (end - engine_key_end));
+ if (reg_query_cache_dependant_table(thd, engine_key, length,
+ query_cache_key,
+ length + diff_length,
m_file[i]->table_cache_type(),
cache,
block_table, m_file[i],
@@ -2227,31 +2449,28 @@ my_bool ha_partition::register_query_cache_dependant_tables(THD *thd,
}
-/*
- Set up table share object before calling create on underlying handler
-
- SYNOPSIS
- set_up_table_before_create()
- table Table object
- info Create info
- part_id Partition id of partition to set-up
+/**
+ Set up table share object before calling create on underlying handler
- RETURN VALUE
- TRUE Error
- FALSE Success
+ @param table Table object
+ @param info Create info
+ @param part_elem[in,out] Pointer to used partition_element, searched if NULL
- DESCRIPTION
- Set up
- 1) Comment on partition
- 2) MAX_ROWS, MIN_ROWS on partition
- 3) Index file name on partition
- 4) Data file name on partition
+ @return status
+ @retval TRUE Error
+ @retval FALSE Success
+
+ @details
+ Set up
+ 1) Comment on partition
+ 2) MAX_ROWS, MIN_ROWS on partition
+ 3) Index file name on partition
+ 4) Data file name on partition
*/
int ha_partition::set_up_table_before_create(TABLE *tbl,
const char *partition_name_with_path,
HA_CREATE_INFO *info,
- uint part_id,
partition_element *part_elem)
{
int error= 0;
@@ -2259,12 +2478,10 @@ int ha_partition::set_up_table_before_create(TABLE *tbl,
THD *thd= ha_thd();
DBUG_ENTER("set_up_table_before_create");
+ DBUG_ASSERT(part_elem);
+
if (!part_elem)
- {
- part_elem= find_partition_element(part_id);
- if (!part_elem)
- DBUG_RETURN(1); // Fatal error
- }
+ DBUG_RETURN(1);
tbl->s->max_rows= part_elem->part_max_rows;
tbl->s->min_rows= part_elem->part_min_rows;
partition_name= strrchr(partition_name_with_path, FN_LIBCHAR);
@@ -2399,10 +2616,8 @@ bool ha_partition::create_handler_file(const char *name)
/* 4 static words (tot words, checksum, tot partitions, name length) */
tot_len_words= 4 + tot_partition_words + tot_name_words;
tot_len_byte= PAR_WORD_SIZE * tot_len_words;
- file_buffer= (uchar *) my_alloca(tot_len_byte);
- if (!file_buffer)
+ if (!(file_buffer= (uchar *) my_malloc(tot_len_byte, MYF(MY_ZEROFILL))))
DBUG_RETURN(TRUE);
- bzero(file_buffer, tot_len_byte);
engine_array= (file_buffer + PAR_ENGINES_OFFSET);
name_buffer_ptr= (char*) (engine_array + tot_partition_words * PAR_WORD_SIZE
+ PAR_WORD_SIZE);
@@ -2483,7 +2698,7 @@ bool ha_partition::create_handler_file(const char *name)
}
else
result= TRUE;
- my_afree((char*) file_buffer);
+ my_free(file_buffer);
DBUG_RETURN(result);
}
@@ -2527,8 +2742,7 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root)
for (i= 0; i < m_tot_parts; i++)
{
handlerton *hton= plugin_data(m_engine_array[i], handlerton*);
- if (!(m_file[i]= get_new_handler(table_share, mem_root,
- hton)))
+ if (!(m_file[i]= get_new_handler(table_share, mem_root, hton)))
DBUG_RETURN(TRUE);
DBUG_PRINT("info", ("engine_type: %u", hton->db_type));
}
@@ -2635,9 +2849,10 @@ error_end:
bool ha_partition::read_par_file(const char *name)
{
- char buff[FN_REFLEN], *tot_name_len_offset;
+ char buff[FN_REFLEN];
+ uchar *tot_name_len_offset;
File file;
- char *file_buffer;
+ uchar *file_buffer;
uint i, len_bytes, len_words, tot_partition_words, tot_name_words, chksum;
DBUG_ENTER("ha_partition::read_par_file");
DBUG_PRINT("enter", ("table name: '%s'", name));
@@ -2656,9 +2871,9 @@ bool ha_partition::read_par_file(const char *name)
len_bytes= PAR_WORD_SIZE * len_words;
if (mysql_file_seek(file, 0, MY_SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR)
goto err1;
- if (!(file_buffer= (char*) alloc_root(&m_mem_root, len_bytes)))
+ if (!(file_buffer= (uchar*) alloc_root(&m_mem_root, len_bytes)))
goto err1;
- if (mysql_file_read(file, (uchar *) file_buffer, len_bytes, MYF(MY_NABP)))
+ if (mysql_file_read(file, file_buffer, len_bytes, MYF(MY_NABP)))
goto err2;
chksum= 0;
@@ -2681,7 +2896,7 @@ bool ha_partition::read_par_file(const char *name)
if (len_words != (tot_partition_words + tot_name_words + 4))
goto err2;
m_file_buffer= file_buffer; // Will be freed in clear_handler_file()
- m_name_buffer_ptr= tot_name_len_offset + PAR_WORD_SIZE;
+ m_name_buffer_ptr= (char*) (tot_name_len_offset + PAR_WORD_SIZE);
if (!(m_connect_string= (LEX_STRING*)
alloc_root(&m_mem_root, m_tot_parts * sizeof(LEX_STRING))))
@@ -2731,7 +2946,8 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
{
uint i;
uchar *buff;
- handlerton **engine_array;
+ handlerton **engine_array, *first_engine;
+ enum legacy_db_type db_type, first_db_type;
DBUG_ASSERT(!m_file);
DBUG_ENTER("ha_partition::setup_engine_array");
@@ -2740,22 +2956,36 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
DBUG_RETURN(true);
buff= (uchar *) (m_file_buffer + PAR_ENGINES_OFFSET);
- for (i= 0; i < m_tot_parts; i++)
- {
- engine_array[i]= ha_resolve_by_legacy_type(ha_thd(),
- (enum legacy_db_type)
- *(buff + i));
- if (!engine_array[i])
- goto err;
- }
+ first_db_type= (enum legacy_db_type) buff[0];
+ first_engine= ha_resolve_by_legacy_type(ha_thd(), first_db_type);
+ if (!first_engine)
+ goto err;
+
if (!(m_engine_array= (plugin_ref*)
alloc_root(&m_mem_root, m_tot_parts * sizeof(plugin_ref))))
goto err;
for (i= 0; i < m_tot_parts; i++)
- m_engine_array[i]= ha_lock_engine(NULL, engine_array[i]);
+ {
+ db_type= (enum legacy_db_type) buff[i];
+ if (db_type != first_db_type)
+ {
+ DBUG_PRINT("error", ("partition %u engine %d is not same as "
+ "first partition %d", i, db_type,
+ (int) first_db_type));
+ DBUG_ASSERT(0);
+ clear_handler_file();
+ goto err;
+ }
+ m_engine_array[i]= ha_lock_engine(NULL, first_engine);
+ if (!m_engine_array[i])
+ {
+ clear_handler_file();
+ goto err;
+ }
+ }
- my_afree(engine_array);
+ my_afree((void*) engine_array);
if (create_handlers(mem_root))
{
@@ -2766,7 +2996,7 @@ bool ha_partition::setup_engine_array(MEM_ROOT *mem_root)
DBUG_RETURN(false);
err:
- my_afree(engine_array);
+ my_afree((void*) engine_array);
DBUG_RETURN(true);
}
@@ -2809,19 +3039,298 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root,
MODULE open/close object
****************************************************************************/
+/**
+ Get the partition name.
+
+ @param part Struct containing name and length
+ @param[out] length Length of the name
+
+ @return Partition name
+*/
+
+static uchar *get_part_name(PART_NAME_DEF *part, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= part->length;
+ return part->partition_name;
+}
+
+
+/**
+ Insert a partition name in the partition_name_hash.
+
+ @param name Name of partition
+ @param part_id Partition id (number)
+ @param is_subpart Set if the name belongs to a subpartition
+
+ @return Operation status
+ @retval true Failure
+ @retval false Sucess
+*/
+
+bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id,
+ bool is_subpart)
+{
+ PART_NAME_DEF *part_def;
+ uchar *part_name;
+ uint part_name_length;
+ DBUG_ENTER("ha_partition::insert_partition_name_in_hash");
+ /*
+ Calculate and store the length here, to avoid doing it when
+ searching the hash.
+ */
+ part_name_length= strlen(name);
+ /*
+ Must use memory that lives as long as table_share.
+ Freed in the Partition_share destructor.
+ Since we use my_multi_malloc, then my_free(part_def) will also free
+ part_name, as a part of my_hash_free.
+ */
+ if (!my_multi_malloc(MY_WME,
+ &part_def, sizeof(PART_NAME_DEF),
+ &part_name, part_name_length + 1,
+ NULL))
+ DBUG_RETURN(true);
+ memcpy(part_name, name, part_name_length + 1);
+ part_def->partition_name= part_name;
+ part_def->length= part_name_length;
+ part_def->part_id= part_id;
+ part_def->is_subpart= is_subpart;
+ if (my_hash_insert(&part_share->partition_name_hash, (uchar *) part_def))
+ {
+ my_free(part_def);
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Populate the partition_name_hash in part_share.
+*/
+
+bool ha_partition::populate_partition_name_hash()
+{
+ List_iterator<partition_element> part_it(m_part_info->partitions);
+ uint num_parts= m_part_info->num_parts;
+ uint num_subparts= m_is_sub_partitioned ? m_part_info->num_subparts : 1;
+ uint tot_names;
+ uint i= 0;
+ DBUG_ASSERT(part_share);
+
+ DBUG_ENTER("ha_partition::populate_partition_name_hash");
+
+ /*
+ partition_name_hash is only set once and never changed
+ -> OK to check without locking.
+ */
+
+ if (part_share->partition_name_hash_initialized)
+ DBUG_RETURN(false);
+ lock_shared_ha_data();
+ if (part_share->partition_name_hash_initialized)
+ {
+ unlock_shared_ha_data();
+ DBUG_RETURN(false);
+ }
+ tot_names= m_is_sub_partitioned ? m_tot_parts + num_parts : num_parts;
+ if (my_hash_init(&part_share->partition_name_hash,
+ system_charset_info, tot_names, 0, 0,
+ (my_hash_get_key) get_part_name,
+ my_free, HASH_UNIQUE))
+ {
+ unlock_shared_ha_data();
+ DBUG_RETURN(TRUE);
+ }
+
+ do
+ {
+ partition_element *part_elem= part_it++;
+ DBUG_ASSERT(part_elem->part_state == PART_NORMAL);
+ if (part_elem->part_state == PART_NORMAL)
+ {
+ if (insert_partition_name_in_hash(part_elem->partition_name,
+ i * num_subparts, false))
+ goto err;
+ if (m_is_sub_partitioned)
+ {
+ List_iterator<partition_element>
+ subpart_it(part_elem->subpartitions);
+ partition_element *sub_elem;
+ uint j= 0;
+ do
+ {
+ sub_elem= subpart_it++;
+ if (insert_partition_name_in_hash(sub_elem->partition_name,
+ i * num_subparts + j, true))
+ goto err;
+
+ } while (++j < num_subparts);
+ }
+ }
+ } while (++i < num_parts);
+
+ part_share->partition_name_hash_initialized= true;
+ unlock_shared_ha_data();
+
+ DBUG_RETURN(FALSE);
+err:
+ my_hash_free(&part_share->partition_name_hash);
+ unlock_shared_ha_data();
+
+ DBUG_RETURN(TRUE);
+}
+
+
+/**
+ Set Handler_share pointer and allocate Handler_share pointers
+ for each partition and set those.
+
+ @param ha_share_arg Where to store/retrieve the Partitioning_share pointer
+ to be shared by all instances of the same table.
+
+ @return Operation status
+ @retval true Failure
+ @retval false Sucess
+*/
+
+bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg)
+{
+ Handler_share **ha_shares;
+ uint i;
+ DBUG_ENTER("ha_partition::set_ha_share_ref");
+
+ DBUG_ASSERT(!part_share);
+ DBUG_ASSERT(table_share);
+ DBUG_ASSERT(!m_is_clone_of);
+ DBUG_ASSERT(m_tot_parts);
+ if (handler::set_ha_share_ref(ha_share_arg))
+ DBUG_RETURN(true);
+ if (!(part_share= get_share()))
+ DBUG_RETURN(true);
+ DBUG_ASSERT(part_share->partitions_share_refs);
+ DBUG_ASSERT(part_share->partitions_share_refs->num_parts >= m_tot_parts);
+ ha_shares= part_share->partitions_share_refs->ha_shares;
+ for (i= 0; i < m_tot_parts; i++)
+ {
+ if (m_file[i]->set_ha_share_ref(&ha_shares[i]))
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Get the PARTITION_SHARE for the table.
+
+ @return Operation status
+ @retval true Error
+ @retval false Success
+
+ @note Gets or initializes the Partition_share object used by partitioning.
+ The Partition_share is used for handling the auto_increment etc.
+*/
+
+Partition_share *ha_partition::get_share()
+{
+ Partition_share *tmp_share;
+ DBUG_ENTER("ha_partition::get_share");
+ DBUG_ASSERT(table_share);
+
+ lock_shared_ha_data();
+ if (!(tmp_share= static_cast<Partition_share*>(get_ha_share_ptr())))
+ {
+ tmp_share= new Partition_share;
+ if (!tmp_share)
+ goto err;
+ if (tmp_share->init(m_tot_parts))
+ {
+ delete tmp_share;
+ tmp_share= NULL;
+ goto err;
+ }
+ set_ha_share_ptr(static_cast<Handler_share*>(tmp_share));
+ }
+err:
+ unlock_shared_ha_data();
+ DBUG_RETURN(tmp_share);
+}
+
+
+
+/**
+ Helper function for freeing all internal bitmaps.
+*/
+
+void ha_partition::free_partition_bitmaps()
+{
+ /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
+ bitmap_free(&m_bulk_insert_started);
+ bitmap_free(&m_locked_partitions);
+ bitmap_free(&m_partitions_to_reset);
+ bitmap_free(&m_key_not_found_partitions);
+}
+
/**
- A destructor for partition-specific TABLE_SHARE data.
+ Helper function for initializing all internal bitmaps.
*/
-void ha_data_partition_destroy(HA_DATA_PARTITION* ha_part_data)
+bool ha_partition::init_partition_bitmaps()
{
- if (ha_part_data)
+ DBUG_ENTER("ha_partition::init_partition_bitmaps");
+ /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
+ if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE))
+ DBUG_RETURN(true);
+ bitmap_clear_all(&m_bulk_insert_started);
+
+ /* Initialize the bitmap we use to keep track of locked partitions */
+ if (bitmap_init(&m_locked_partitions, NULL, m_tot_parts, FALSE))
+ {
+ bitmap_free(&m_bulk_insert_started);
+ DBUG_RETURN(true);
+ }
+ bitmap_clear_all(&m_locked_partitions);
+
+ /*
+ Initialize the bitmap we use to keep track of partitions which may have
+ something to reset in ha_reset().
+ */
+ if (bitmap_init(&m_partitions_to_reset, NULL, m_tot_parts, FALSE))
+ {
+ bitmap_free(&m_bulk_insert_started);
+ bitmap_free(&m_locked_partitions);
+ DBUG_RETURN(true);
+ }
+ bitmap_clear_all(&m_partitions_to_reset);
+
+ /*
+ Initialize the bitmap we use to keep track of partitions which returned
+ HA_ERR_KEY_NOT_FOUND from index_read_map.
+ */
+ if (bitmap_init(&m_key_not_found_partitions, NULL, m_tot_parts, FALSE))
+ {
+ bitmap_free(&m_bulk_insert_started);
+ bitmap_free(&m_locked_partitions);
+ bitmap_free(&m_partitions_to_reset);
+ DBUG_RETURN(true);
+ }
+ bitmap_clear_all(&m_key_not_found_partitions);
+ m_key_not_found= false;
+ /* Initialize the bitmap for read/lock_partitions */
+ if (!m_is_clone_of)
{
- mysql_mutex_destroy(&ha_part_data->LOCK_auto_inc);
+ DBUG_ASSERT(!m_clone_mem_root);
+ if (m_part_info->set_partition_bitmaps(NULL))
+ {
+ free_partition_bitmaps();
+ DBUG_RETURN(true);
+ }
}
+ DBUG_RETURN(false);
}
+
/*
Open handler object
@@ -2851,7 +3360,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
int error= HA_ERR_INITIALIZATION;
handler **file;
char name_buff[FN_REFLEN];
- bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE);
ulonglong check_table_flags;
DBUG_ENTER("ha_partition::open");
@@ -2863,6 +3371,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
if (get_from_handler_file(name, &table->mem_root, test(m_is_clone_of)))
DBUG_RETURN(error);
name_buffer_ptr= m_name_buffer_ptr;
+ if (populate_partition_name_hash())
+ {
+ DBUG_RETURN(HA_ERR_INITIALIZATION);
+ }
m_start_key.length= 0;
m_rec0= table->record[0];
m_rec_length= table_share->stored_rec_length;
@@ -2877,32 +3389,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
m_part_ids_sorted_by_num_of_records[i]= i;
}
- /* Initialize the bitmap we use to minimize ha_start_bulk_insert calls */
- if (bitmap_init(&m_bulk_insert_started, NULL, m_tot_parts + 1, FALSE))
+ if (init_partition_bitmaps())
DBUG_RETURN(error);
- bitmap_clear_all(&m_bulk_insert_started);
- /*
- Initialize the bitmap we use to keep track of partitions which returned
- HA_ERR_KEY_NOT_FOUND from index_read_map.
- */
- if (bitmap_init(&m_key_not_found_partitions, NULL, m_tot_parts, FALSE))
- {
- bitmap_free(&m_bulk_insert_started);
- DBUG_RETURN(error);
- }
- bitmap_clear_all(&m_key_not_found_partitions);
- m_key_not_found= false;
- /* Initialize the bitmap we use to determine what partitions are used */
- if (!m_is_clone_of)
- {
- DBUG_ASSERT(!m_clone_mem_root);
- if (bitmap_init(&(m_part_info->used_partitions), NULL, m_tot_parts, TRUE))
- {
- bitmap_free(&m_bulk_insert_started);
- DBUG_RETURN(error);
- }
- bitmap_set_all(&(m_part_info->used_partitions));
- }
+
+ DBUG_ASSERT(m_part_info);
if (m_is_clone_of)
{
@@ -2911,7 +3401,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
/* Allocate an array of handler pointers for the partitions handlers. */
alloc_len= (m_tot_parts + 1) * sizeof(handler*);
if (!(m_file= (handler **) alloc_root(m_clone_mem_root, alloc_len)))
+ {
+ error= HA_ERR_INITIALIZATION;
goto err_alloc;
+ }
memset(m_file, 0, alloc_len);
/*
Populate them by cloning the original partitions. This also opens them.
@@ -2922,6 +3415,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
{
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
+ /* ::clone() will also set ha_share from the original. */
if (!(m_file[i]= file[i]->clone(name_buff, m_clone_mem_root)))
{
error= HA_ERR_INITIALIZATION;
@@ -2939,10 +3433,13 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME,
FALSE);
table->s->connect_string = m_connect_string[(uint)(file-m_file)];
- if ((error= (*file)->ha_open(table, name_buff, mode, test_if_locked)))
+ if ((error= (*file)->ha_open(table, name_buff, mode,
+ test_if_locked | HA_OPEN_NO_PSI_CALL)))
goto err_handler;
bzero(&table->s->connect_string, sizeof(LEX_STRING));
- m_num_locks+= (*file)->lock_count();
+ if (m_file == file)
+ m_num_locks= (*file)->lock_count();
+ DBUG_ASSERT(m_num_locks == (*file)->lock_count());
name_buffer_ptr+= strlen(name_buffer_ptr) + 1;
} while (*(++file));
}
@@ -2965,7 +3462,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
(PARTITION_ENABLED_TABLE_FLAGS)))
{
error= HA_ERR_INITIALIZATION;
- /* set file to last handler, so all of them is closed */
+ /* set file to last handler, so all of them are closed */
file = &m_file[m_tot_parts - 1];
goto err_handler;
}
@@ -2986,34 +3483,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
clear_handler_file();
/*
- Use table_share->ha_part_data to share auto_increment_value among
- all handlers for the same table.
- */
- if (is_not_tmp_table)
- mysql_mutex_lock(&table_share->LOCK_ha_data);
- if (!table_share->ha_part_data)
- {
- /* currently only needed for auto_increment */
- table_share->ha_part_data= (HA_DATA_PARTITION*)
- alloc_root(&table_share->mem_root,
- sizeof(HA_DATA_PARTITION));
- if (!table_share->ha_part_data)
- {
- if (is_not_tmp_table)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
- goto err_handler;
- }
- DBUG_PRINT("info", ("table_share->ha_part_data 0x%p",
- table_share->ha_part_data));
- bzero(table_share->ha_part_data, sizeof(HA_DATA_PARTITION));
- table_share->ha_part_data_destroy= ha_data_partition_destroy;
- mysql_mutex_init(key_PARTITION_LOCK_auto_inc,
- &table_share->ha_part_data->LOCK_auto_inc,
- MY_MUTEX_INIT_FAST);
- }
- if (is_not_tmp_table)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
- /*
Some handlers update statistics as part of the open call. This will in
some cases corrupt the statistics of the partition handler and thus
to ensure we have correct statistics we call info from open after
@@ -3033,15 +3502,19 @@ err_handler:
while (file-- != m_file)
(*file)->ha_close();
err_alloc:
- bitmap_free(&m_bulk_insert_started);
- bitmap_free(&m_key_not_found_partitions);
- if (!m_is_clone_of)
- bitmap_free(&(m_part_info->used_partitions));
+ free_partition_bitmaps();
DBUG_RETURN(error);
}
+/*
+ Disabled since it is not possible to prune yet.
+ without pruning, it need to rebind/unbind every partition in every
+ statement which uses a table from the table cache. Will also use
+ as many PSI_tables as there are partitions.
+*/
+#ifdef HAVE_M_PSI_PER_PARTITION
void ha_partition::unbind_psi()
{
uint i;
@@ -3069,6 +3542,7 @@ void ha_partition::rebind_psi()
}
DBUG_VOID_RETURN;
}
+#endif /* HAVE_M_PSI_PER_PARTITION */
/**
@@ -3094,22 +3568,35 @@ handler *ha_partition::clone(const char *name, MEM_ROOT *mem_root)
DBUG_ENTER("ha_partition::clone");
new_handler= new (mem_root) ha_partition(ht, table_share, m_part_info,
this, mem_root);
+ if (!new_handler)
+ DBUG_RETURN(NULL);
+
+ /*
+ We will not clone each partition's handler here, it will be done in
+ ha_partition::open() for clones. Also set_ha_share_ref is not needed
+ here, since 1) ha_share is copied in the constructor used above
+ 2) each partition's cloned handler will set it from its original.
+ */
+
/*
Allocate new_handler->ref here because otherwise ha_open will allocate it
on this->table->mem_root and we will not be able to reclaim that memory
when the clone handler object is destroyed.
*/
- if (new_handler &&
- !(new_handler->ref= (uchar*) alloc_root(mem_root,
+ if (!(new_handler->ref= (uchar*) alloc_root(mem_root,
ALIGN_SIZE(m_ref_length)*2)))
- new_handler= NULL;
+ goto err;
- if (new_handler &&
- new_handler->ha_open(table, name,
- table->db_stat, HA_OPEN_IGNORE_IF_LOCKED))
- new_handler= NULL;
+ if (new_handler->ha_open(table, name,
+ table->db_stat,
+ HA_OPEN_IGNORE_IF_LOCKED | HA_OPEN_NO_PSI_CALL))
+ goto err;
DBUG_RETURN((handler*) new_handler);
+
+err:
+ delete new_handler;
+ DBUG_RETURN(NULL);
}
@@ -3139,10 +3626,8 @@ int ha_partition::close(void)
DBUG_ASSERT(table->s == table_share);
destroy_record_priority_queue();
- bitmap_free(&m_bulk_insert_started);
- bitmap_free(&m_key_not_found_partitions);
- if (!m_is_clone_of)
- bitmap_free(&(m_part_info->used_partitions));
+ free_partition_bitmaps();
+ DBUG_ASSERT(m_part_info);
file= m_file;
repeat:
@@ -3204,41 +3689,64 @@ repeat:
int ha_partition::external_lock(THD *thd, int lock_type)
{
- bool first= TRUE;
uint error;
- handler **file;
+ uint i, first_used_partition;
+ MY_BITMAP *used_partitions;
DBUG_ENTER("ha_partition::external_lock");
DBUG_ASSERT(!auto_increment_lock && !auto_increment_safe_stmt_log_lock);
- file= m_file;
- m_lock_type= lock_type;
-repeat:
- do
+ if (lock_type == F_UNLCK)
+ used_partitions= &m_locked_partitions;
+ else
+ used_partitions= &(m_part_info->lock_partitions);
+
+ first_used_partition= bitmap_get_first_set(used_partitions);
+
+ for (i= first_used_partition;
+ i < m_tot_parts;
+ i= bitmap_get_next_set(used_partitions, i))
{
- DBUG_PRINT("info", ("external_lock(thd, %d) iteration %d",
- lock_type, (int) (file - m_file)));
- if ((error= (*file)->ha_external_lock(thd, lock_type)))
+ DBUG_PRINT("info", ("external_lock(thd, %d) part %d", lock_type, i));
+ if ((error= m_file[i]->ha_external_lock(thd, lock_type)))
{
- if (F_UNLCK != lock_type)
+ if (lock_type != F_UNLCK)
goto err_handler;
}
- } while (*(++file));
+ DBUG_PRINT("info", ("external_lock part %u lock %d", i, lock_type));
+ if (lock_type != F_UNLCK)
+ bitmap_set_bit(&m_locked_partitions, i);
+ }
+ if (lock_type == F_UNLCK)
+ {
+ bitmap_clear_all(used_partitions);
+ }
+ else
+ {
+ /* Add touched partitions to be included in reset(). */
+ bitmap_union(&m_partitions_to_reset, used_partitions);
+ }
- if (first && m_added_file && m_added_file[0])
+ if (m_added_file && m_added_file[0])
{
+ handler **file= m_added_file;
DBUG_ASSERT(lock_type == F_UNLCK);
- file= m_added_file;
- first= FALSE;
- goto repeat;
+ do
+ {
+ (void) (*file)->ha_external_lock(thd, lock_type);
+ } while (*(++file));
}
DBUG_RETURN(0);
err_handler:
- while (file-- != m_file)
+ uint j;
+ for (j= first_used_partition;
+ j < i;
+ j= bitmap_get_next_set(&m_locked_partitions, j))
{
- (*file)->ha_external_lock(thd, F_UNLCK);
+ (void) m_file[j]->ha_external_lock(thd, F_UNLCK);
}
+ bitmap_clear_all(&m_locked_partitions);
DBUG_RETURN(error);
}
@@ -3293,14 +3801,30 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
- handler **file;
+ uint i;
DBUG_ENTER("ha_partition::store_lock");
- file= m_file;
- do
+ DBUG_ASSERT(thd == current_thd);
+
+ /*
+ This can be called from get_lock_data() in mysql_lock_abort_for_thread(),
+ even when thd != table->in_use. In that case don't use partition pruning,
+ but use all partitions instead to avoid using another threads structures.
+ */
+ if (thd != table->in_use)
{
- DBUG_PRINT("info", ("store lock %d iteration", (int) (file - m_file)));
- to= (*file)->store_lock(thd, to, lock_type);
- } while (*(++file));
+ for (i= 0; i < m_tot_parts; i++)
+ to= m_file[i]->store_lock(thd, to, lock_type);
+ }
+ else
+ {
+ for (i= bitmap_get_first_set(&(m_part_info->lock_partitions));
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
+ {
+ DBUG_PRINT("info", ("store lock %d iteration", i));
+ to= m_file[i]->store_lock(thd, to, lock_type);
+ }
+ }
DBUG_RETURN(to);
}
@@ -3324,40 +3848,57 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd,
int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type)
{
int error= 0;
- handler **file;
+ uint i;
+ /* Assert that read_partitions is included in lock_partitions */
+ DBUG_ASSERT(bitmap_is_subset(&m_part_info->read_partitions,
+ &m_part_info->lock_partitions));
+ /*
+ m_locked_partitions is set in previous external_lock/LOCK TABLES.
+ Current statement's lock requests must not include any partitions
+ not previously locked.
+ */
+ DBUG_ASSERT(bitmap_is_subset(&m_part_info->lock_partitions,
+ &m_locked_partitions));
DBUG_ENTER("ha_partition::start_stmt");
- file= m_file;
- do
+ for (i= bitmap_get_first_set(&(m_part_info->lock_partitions));
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
{
- if ((error= (*file)->start_stmt(thd, lock_type)))
+ if ((error= m_file[i]->start_stmt(thd, lock_type)))
break;
- } while (*(++file));
+ /* Add partition to be called in reset(). */
+ bitmap_set_bit(&m_partitions_to_reset, i);
+ }
DBUG_RETURN(error);
}
-/*
+/**
Get number of lock objects returned in store_lock
- SYNOPSIS
- lock_count()
+ @returns Number of locks returned in call to store_lock
- RETURN VALUE
- Number of locks returned in call to store_lock
-
- DESCRIPTION
+ @desc
Returns the number of store locks needed in call to store lock.
- We return number of partitions since we call store_lock on each
- underlying handler. Assists the above functions in allocating
+ We return number of partitions we will lock multiplied with number of
+ locks needed by each partition. Assists the above functions in allocating
sufficient space for lock structures.
*/
uint ha_partition::lock_count() const
{
DBUG_ENTER("ha_partition::lock_count");
- DBUG_PRINT("info", ("m_num_locks %d", m_num_locks));
- DBUG_RETURN(m_num_locks);
+ /*
+ The caller want to know the upper bound, to allocate enough memory.
+ There is no performance lost if we simply return maximum number locks
+ needed, only some minor over allocation of memory in get_lock_data().
+
+ Also notice that this may be called for another thread != table->in_use,
+ when mysql_lock_abort_for_thread() is called. So this is more safe, then
+ using number of partitions after pruning.
+ */
+ DBUG_RETURN(m_tot_parts * m_num_locks);
}
@@ -3409,7 +3950,7 @@ bool ha_partition::was_semi_consistent_read()
{
DBUG_ENTER("ha_partition::was_semi_consistent_read");
DBUG_ASSERT(m_last_part < m_tot_parts &&
- bitmap_is_set(&(m_part_info->used_partitions), m_last_part));
+ bitmap_is_set(&(m_part_info->read_partitions), m_last_part));
DBUG_RETURN(m_file[m_last_part]->was_semi_consistent_read());
}
@@ -3434,13 +3975,16 @@ bool ha_partition::was_semi_consistent_read()
*/
void ha_partition::try_semi_consistent_read(bool yes)
{
- handler **file;
+ uint i;
DBUG_ENTER("ha_partition::try_semi_consistent_read");
- for (file= m_file; *file; file++)
+ i= bitmap_get_first_set(&(m_part_info->read_partitions));
+ DBUG_ASSERT(i != MY_BIT_NONE);
+ for (;
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- (*file)->try_semi_consistent_read(yes);
+ m_file[i]->try_semi_consistent_read(yes);
}
DBUG_VOID_RETURN;
}
@@ -3495,11 +4039,8 @@ int ha_partition::write_row(uchar * buf)
bool have_auto_increment= table->next_number_field && buf == table->record[0];
my_bitmap_map *old_map;
THD *thd= ha_thd();
- ulonglong saved_sql_mode= thd->variables.sql_mode;
+ sql_mode_t saved_sql_mode= thd->variables.sql_mode;
bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null;
-#ifdef NOT_NEEDED
- uchar *rec0= m_rec0;
-#endif
DBUG_ENTER("ha_partition::write_row");
DBUG_ASSERT(buf == m_rec0);
@@ -3509,7 +4050,7 @@ int ha_partition::write_row(uchar * buf)
*/
if (have_auto_increment)
{
- if (!table_share->ha_part_data->auto_inc_initialized &&
+ if (!part_share->auto_inc_initialized &&
!table_share->next_number_keypart)
{
/*
@@ -3546,26 +4087,20 @@ int ha_partition::write_row(uchar * buf)
}
old_map= dbug_tmp_use_all_columns(table, table->read_set);
-#ifdef NOT_NEEDED
- if (likely(buf == rec0))
-#endif
- error= m_part_info->get_partition_id(m_part_info, &part_id,
- &func_value);
-#ifdef NOT_NEEDED
- else
- {
- set_field_ptr(m_part_field_array, buf, rec0);
- error= m_part_info->get_partition_id(m_part_info, &part_id,
- &func_value);
- set_field_ptr(m_part_field_array, rec0, buf);
- }
-#endif
+ error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value);
dbug_tmp_restore_column_map(table->read_set, old_map);
if (unlikely(error))
{
m_part_info->err_value= func_value;
goto exit;
}
+ if (!bitmap_is_set(&(m_part_info->lock_partitions), part_id))
+ {
+ DBUG_PRINT("info", ("Write to non-locked partition %u (func_value: %ld)",
+ part_id, (long) func_value));
+ error= HA_ERR_NOT_IN_LOCK_PARTITIONS;
+ goto exit;
+ }
m_last_part= part_id;
DBUG_PRINT("info", ("Insert in partition %d", part_id));
start_part_bulk_insert(thd, part_id);
@@ -3603,7 +4138,7 @@ exit:
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
new_data is always record[0]
- old_data is always record[1]
+ old_data is normally record[1] but may be anything
*/
int ha_partition::update_row(const uchar *old_data, uchar *new_data)
@@ -3613,8 +4148,10 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
int error= 0;
longlong func_value;
DBUG_ENTER("ha_partition::update_row");
- m_err_rec= NULL;
+ // Need to read partition-related columns, to locate the row's partition:
+ DBUG_ASSERT(bitmap_is_subset(&m_part_info->full_part_field_set,
+ table->read_set));
if ((error= get_parts_for_update(old_data, new_data, table->record[0],
m_part_info, &old_part_id, &new_part_id,
&func_value)))
@@ -3622,26 +4159,12 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
m_part_info->err_value= func_value;
goto exit;
}
- /*
- The protocol for updating a row is:
- 1) position the handler (cursor) on the row to be updated,
- either through the last read row (rnd or index) or by rnd_pos.
- 2) call update_row with both old and new full records as arguments.
-
- This means that m_last_part should already be set to actual partition
- where the row was read from. And if that is not the same as the
- calculated part_id we found a misplaced row, we return an error to
- notify the user that something is broken in the row distribution
- between partitions! Since we don't check all rows on read, we return an
- error instead of correcting m_last_part, to make the user aware of the
- problem!
- */
- if (old_part_id != m_last_part)
+ DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id));
+ if (!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id))
{
- m_err_rec= old_data;
- DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
+ error= HA_ERR_NOT_IN_LOCK_PARTITIONS;
+ goto exit;
}
-
m_last_part= new_part_id;
start_part_bulk_insert(thd, new_part_id);
if (new_part_id == old_part_id)
@@ -3690,7 +4213,7 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
exit:
/*
if updating an auto_increment column, update
- table_share->ha_part_data->next_auto_inc_val if needed.
+ part_share->next_auto_inc_val if needed.
(not to be used if auto_increment on secondary field in a multi-column
index)
mysql_update does not set table->next_number_field, so we use
@@ -3703,7 +4226,7 @@ exit:
bitmap_is_set(table->write_set,
table->found_next_number_field->field_index))
{
- if (!table_share->ha_part_data->auto_inc_initialized)
+ if (!part_share->auto_inc_initialized)
info(HA_STATUS_AUTO);
set_auto_increment_if_higher(table->found_next_number_field);
}
@@ -3745,34 +4268,19 @@ int ha_partition::delete_row(const uchar *buf)
int error;
THD *thd= ha_thd();
DBUG_ENTER("ha_partition::delete_row");
- m_err_rec= NULL;
+ DBUG_ASSERT(bitmap_is_subset(&m_part_info->full_part_field_set,
+ table->read_set));
if ((error= get_part_for_delete(buf, m_rec0, m_part_info, &part_id)))
{
DBUG_RETURN(error);
}
- /*
- The protocol for deleting a row is:
- 1) position the handler (cursor) on the row to be deleted,
- either through the last read row (rnd or index) or by rnd_pos.
- 2) call delete_row with the full record as argument.
-
- This means that m_last_part should already be set to actual partition
- where the row was read from. And if that is not the same as the
- calculated part_id we found a misplaced row, we return an error to
- notify the user that something is broken in the row distribution
- between partitions! Since we don't check all rows on read, we return an
- error instead of forwarding the delete to the correct (m_last_part)
- partition!
- TODO: change the assert in InnoDB into an error instead and make this one
- an assert instead and remove the get_part_for_delete()!
- */
- if (part_id != m_last_part)
- {
- m_err_rec= buf;
- DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND);
- }
-
+ m_last_part= part_id;
+ /* Should never call delete_row on a partition which is not read */
+ DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id));
+ DBUG_ASSERT(bitmap_is_set(&(m_part_info->lock_partitions), part_id));
+ if (!bitmap_is_set(&(m_part_info->lock_partitions), part_id))
+ DBUG_RETURN(HA_ERR_NOT_IN_LOCK_PARTITIONS);
tmp_disable_binlog(thd);
error= m_file[part_id]->ha_delete_row(buf);
reenable_binlog(thd);
@@ -3798,22 +4306,24 @@ int ha_partition::delete_row(const uchar *buf)
Called from item_sum.cc by Item_func_group_concat::clear(),
Item_sum_count_distinct::clear(), and Item_func_group_concat::clear().
Called from sql_delete.cc by mysql_delete().
- Called from sql_select.cc by JOIN::reinit().
+ Called from sql_select.cc by JOIN::reset().
Called from sql_union.cc by st_select_lex_unit::exec().
*/
int ha_partition::delete_all_rows()
{
int error;
- handler **file;
+ uint i;
DBUG_ENTER("ha_partition::delete_all_rows");
- file= m_file;
- do
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if ((error= (*file)->ha_delete_all_rows()))
+ /* Can be pruned, like DELETE FROM t PARTITION (pX) */
+ if ((error= m_file[i]->ha_delete_all_rows()))
DBUG_RETURN(error);
- } while (*(++file));
+ }
DBUG_RETURN(0);
}
@@ -3836,8 +4346,8 @@ int ha_partition::truncate()
it so that it will be initialized again at the next use.
*/
lock_auto_increment();
- table_share->ha_part_data->next_auto_inc_val= 0;
- table_share->ha_part_data->auto_inc_initialized= FALSE;
+ part_share->next_auto_inc_val= 0;
+ part_share->auto_inc_initialized= false;
unlock_auto_increment();
file= m_file;
@@ -3878,8 +4388,8 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
it so that it will be initialized again at the next use.
*/
lock_auto_increment();
- table_share->ha_part_data->next_auto_inc_val= 0;
- table_share->ha_part_data->auto_inc_initialized= FALSE;
+ part_share->next_auto_inc_val= 0;
+ part_share->auto_inc_initialized= FALSE;
unlock_auto_increment();
*binlog_stmt= true;
@@ -3925,7 +4435,7 @@ int ha_partition::truncate_partition(Alter_info *alter_info, bool *binlog_stmt)
SYNOPSIS
start_bulk_insert()
rows Number of rows to insert
- flags Flags to control index creation
+ flags Flags to control index creation
RETURN VALUE
NONE
@@ -3955,6 +4465,7 @@ void ha_partition::start_part_bulk_insert(THD *thd, uint part_id)
if (!bitmap_is_set(&m_bulk_insert_started, part_id) &&
bitmap_is_set(&m_bulk_insert_started, m_tot_parts))
{
+ DBUG_ASSERT(bitmap_is_set(&(m_part_info->lock_partitions), part_id));
old_buffer_size= thd->variables.read_buff_size;
/* Update read_buffer_size for this partition */
thd->variables.read_buff_size= estimate_read_buffer_size(old_buffer_size);
@@ -4062,11 +4573,12 @@ int ha_partition::end_bulk_insert()
if (!bitmap_is_set(&m_bulk_insert_started, m_tot_parts))
DBUG_RETURN(error);
- for (i= 0; i < m_tot_parts; i++)
+ for (i= bitmap_get_first_set(&m_bulk_insert_started);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_bulk_insert_started, i))
{
int tmp;
- if (bitmap_is_set(&m_bulk_insert_started, i) &&
- (tmp= m_file[i]->ha_end_bulk_insert()))
+ if ((tmp= m_file[i]->ha_end_bulk_insert()))
error= tmp;
}
bitmap_clear_all(&m_bulk_insert_started);
@@ -4114,7 +4626,7 @@ int ha_partition::rnd_init(bool scan)
For operations that may need to change data, we may need to extend
read_set.
*/
- if (m_lock_type == F_WRLCK)
+ if (get_lock_type() == F_WRLCK)
{
/*
If write_set contains any of the fields used in partition and
@@ -4139,9 +4651,9 @@ int ha_partition::rnd_init(bool scan)
}
/* Now we see what the index of our first important partition is */
- DBUG_PRINT("info", ("m_part_info->used_partitions: 0x%lx",
- (long) m_part_info->used_partitions.bitmap));
- part_id= bitmap_get_first_set(&(m_part_info->used_partitions));
+ DBUG_PRINT("info", ("m_part_info->read_partitions: 0x%lx",
+ (long) m_part_info->read_partitions.bitmap));
+ part_id= bitmap_get_first_set(&(m_part_info->read_partitions));
DBUG_PRINT("info", ("m_part_spec.start_part %d", part_id));
if (MY_BIT_NONE == part_id)
@@ -4168,13 +4680,12 @@ int ha_partition::rnd_init(bool scan)
}
else
{
- for (i= part_id; i < m_tot_parts; i++)
+ for (i= part_id;
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), i))
- {
- if ((error= m_file[i]->ha_rnd_init(scan)))
- goto err;
- }
+ if ((error= m_file[i]->ha_rnd_init(scan)))
+ goto err;
}
}
m_scan_value= scan;
@@ -4184,10 +4695,12 @@ int ha_partition::rnd_init(bool scan)
DBUG_RETURN(0);
err:
- while ((int)--i >= (int)part_id)
+ /* Call rnd_end for all previously inited partitions. */
+ for (;
+ part_id < i;
+ part_id= bitmap_get_next_set(&m_part_info->read_partitions, part_id))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), i))
- m_file[i]->ha_rnd_end();
+ m_file[part_id]->ha_rnd_end();
}
err1:
m_scan_value= 2;
@@ -4209,7 +4722,6 @@ err1:
int ha_partition::rnd_end()
{
- handler **file;
DBUG_ENTER("ha_partition::rnd_end");
switch (m_scan_value) {
case 2: // Error
@@ -4222,12 +4734,13 @@ int ha_partition::rnd_end()
}
break;
case 0:
- file= m_file;
- do
+ uint i;
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- (*file)->ha_rnd_end();
- } while (*(++file));
+ m_file[i]->ha_rnd_end();
+ }
break;
}
m_scan_value= 2;
@@ -4290,7 +4803,7 @@ int ha_partition::rnd_next(uchar *buf)
}
/*
- if we get here, then the current partition rnd_next returned failure
+ if we get here, then the current partition ha_rnd_next returned failure
*/
if (result == HA_ERR_RECORD_DELETED)
continue; // Probably MyISAM
@@ -4305,9 +4818,7 @@ int ha_partition::rnd_next(uchar *buf)
break;
/* Shift to next partition */
- while (++part_id < m_tot_parts &&
- !bitmap_is_set(&(m_part_info->used_partitions), part_id))
- ;
+ part_id= bitmap_get_next_set(&m_part_info->read_partitions, part_id);
if (part_id >= m_tot_parts)
{
result= HA_ERR_END_OF_FILE;
@@ -4359,6 +4870,7 @@ void ha_partition::position(const uchar *record)
{
handler *file= m_file[m_last_part];
uint pad_length;
+ DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), m_last_part));
DBUG_ENTER("ha_partition::position");
file->position(record);
@@ -4372,14 +4884,6 @@ void ha_partition::position(const uchar *record)
}
-void ha_partition::column_bitmaps_signal()
-{
- handler::column_bitmaps_signal();
- /* Must read all partition fields to make position() call possible */
- bitmap_union(table->read_set, &m_part_info->full_part_field_set);
-}
-
-
/*
Read row using position
@@ -4411,6 +4915,7 @@ int ha_partition::rnd_pos(uchar * buf, uchar *pos)
part_id= uint2korr((const uchar *) pos);
DBUG_ASSERT(part_id < m_tot_parts);
file= m_file[part_id];
+ DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id));
m_last_part= part_id;
DBUG_RETURN(file->rnd_pos(buf, (pos + PARTITION_BYTES_IN_POS)));
}
@@ -4480,7 +4985,7 @@ bool ha_partition::init_record_priority_queue()
if (!m_ordered_rec_buffer)
{
uint alloc_len;
- uint used_parts= bitmap_bits_set(&m_part_info->used_partitions);
+ uint used_parts= bitmap_bits_set(&m_part_info->read_partitions);
/* Allocate record buffer for each used partition. */
alloc_len= used_parts * (m_rec_length + PARTITION_BYTES_IN_POS);
/* Allocate a key for temporary use when setting up the scan. */
@@ -4497,16 +5002,15 @@ bool ha_partition::init_record_priority_queue()
setting up the scan.
*/
char *ptr= (char*) m_ordered_rec_buffer;
- uint16 i= 0;
- do
+ uint i;
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (bitmap_is_set(&m_part_info->used_partitions, i))
- {
- DBUG_PRINT("info", ("init rec-buf for part %u", i));
- int2store(ptr, i);
- ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
- }
- } while (++i < m_tot_parts);
+ DBUG_PRINT("info", ("init rec-buf for part %u", i));
+ int2store(ptr, i);
+ ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
+ }
m_start_key.key= (const uchar*)ptr;
/* Initialize priority queue, initialized to reading forward. */
if (init_queue(&m_queue, used_parts, (uint) PARTITION_BYTES_IN_POS,
@@ -4558,7 +5062,7 @@ void ha_partition::destroy_record_priority_queue()
int ha_partition::index_init(uint inx, bool sorted)
{
int error= 0;
- handler **file;
+ uint i;
DBUG_ENTER("ha_partition::index_init");
DBUG_PRINT("info", ("inx %u sorted %u", inx, sorted));
@@ -4591,7 +5095,7 @@ int ha_partition::index_init(uint inx, bool sorted)
calculate the partition id to place updated and deleted records.
But this is required for operations that may need to change data only.
*/
- if (m_lock_type == F_WRLCK)
+ if (get_lock_type() == F_WRLCK)
bitmap_union(table->read_set, &m_part_info->full_part_field_set);
if (sorted)
{
@@ -4607,25 +5111,39 @@ int ha_partition::index_init(uint inx, bool sorted)
TODO: handle COUNT(*) queries via unordered scan.
*/
- uint i;
KEY **key_info= m_curr_key_info;
do
{
- for (i= 0; i < (*key_info)->key_parts; i++)
+ for (i= 0; i < (*key_info)->user_defined_key_parts; i++)
bitmap_set_bit(table->read_set,
(*key_info)->key_part[i].field->field_index);
} while (*(++key_info));
}
- file= m_file;
- do
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- if ((error= (*file)->ha_index_init(inx, sorted)))
- {
- DBUG_ASSERT(0); // Should never happen
- break;
- }
- } while (*(++file));
+ if ((error= m_file[i]->ha_index_init(inx, sorted)))
+ goto err;
+
+ DBUG_EXECUTE_IF("ha_partition_fail_index_init", {
+ i++;
+ error= HA_ERR_NO_PARTITION_FOUND;
+ goto err;
+ });
+ }
+err:
+ if (error)
+ {
+ /* End the previously initialized indexes. */
+ uint j;
+ for (j= bitmap_get_first_set(&m_part_info->read_partitions);
+ j < i;
+ j= bitmap_get_next_set(&m_part_info->read_partitions, j))
+ {
+ (void) m_file[j]->ha_index_end();
+ }
+ }
DBUG_RETURN(error);
}
@@ -4648,19 +5166,19 @@ int ha_partition::index_init(uint inx, bool sorted)
int ha_partition::index_end()
{
int error= 0;
- handler **file;
+ uint i;
DBUG_ENTER("ha_partition::index_end");
active_index= MAX_KEY;
m_part_spec.start_part= NO_CURRENT_PART_ID;
- file= m_file;
- do
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
int tmp;
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- if ((tmp= (*file)->ha_index_end()))
- error= tmp;
- } while (*(++file));
+ if ((tmp= m_file[i]->ha_index_end()))
+ error= tmp;
+ }
destroy_record_priority_queue();
DBUG_RETURN(error);
}
@@ -4906,17 +5424,20 @@ int ha_partition::index_read_idx_map(uchar *buf, uint index,
or no matching partitions (start_part > end_part)
*/
DBUG_ASSERT(m_part_spec.start_part >= m_part_spec.end_part);
-
- for (part= m_part_spec.start_part; part <= m_part_spec.end_part; part++)
+ /* The start part is must be marked as used. */
+ DBUG_ASSERT(m_part_spec.start_part > m_part_spec.end_part ||
+ bitmap_is_set(&(m_part_info->read_partitions),
+ m_part_spec.start_part));
+
+ for (part= m_part_spec.start_part;
+ part <= m_part_spec.end_part;
+ part= bitmap_get_next_set(&m_part_info->read_partitions, part))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), part))
- {
- error= m_file[part]->index_read_idx_map(buf, index, key,
- keypart_map, find_flag);
- if (error != HA_ERR_KEY_NOT_FOUND &&
- error != HA_ERR_END_OF_FILE)
- break;
- }
+ error= m_file[part]->ha_index_read_idx_map(buf, index, key,
+ keypart_map, find_flag);
+ if (error != HA_ERR_KEY_NOT_FOUND &&
+ error != HA_ERR_END_OF_FILE)
+ break;
}
if (part <= m_part_spec.end_part)
m_last_part= part;
@@ -5058,15 +5579,7 @@ int ha_partition::read_range_first(const key_range *start_key,
m_ordered= sorted;
eq_range= eq_range_arg;
- end_range= 0;
- if (end_key)
- {
- end_range= &save_end_range;
- save_end_range= *end_key;
- key_compare_result_on_equal=
- ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
- (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
- }
+ set_end_range(end_key);
range_key_part= m_curr_key_info[0]->key_part;
if (start_key)
@@ -5168,7 +5681,7 @@ int ha_partition::partition_scan_set_up(uchar * buf, bool idx_read_flag)
Verify this, also bitmap must have at least one bit set otherwise
the result from this table is the empty set.
*/
- uint start_part= bitmap_get_first_set(&(m_part_info->used_partitions));
+ uint start_part= bitmap_get_first_set(&(m_part_info->read_partitions));
if (start_part == MY_BIT_NONE)
{
DBUG_PRINT("info", ("scan with no partition to scan"));
@@ -5285,18 +5798,21 @@ int ha_partition::handle_unordered_next(uchar *buf, bool is_next_same)
int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
{
- uint i;
+ uint i= m_part_spec.start_part;
int saved_error= HA_ERR_END_OF_FILE;
DBUG_ENTER("ha_partition::handle_unordered_scan_next_partition");
- for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++)
+ if (i)
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i - 1);
+ else
+ i= bitmap_get_first_set(&m_part_info->read_partitions);
+
+ for (;
+ i <= m_part_spec.end_part;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
int error;
- handler *file;
-
- if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
- continue;
- file= m_file[i];
+ handler *file= m_file[i];
m_part_spec.start_part= i;
switch (m_index_scan_type) {
case partition_read_range:
@@ -5395,6 +5911,8 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
}
m_top_entry= NO_CURRENT_PART_ID;
queue_remove_all(&m_queue);
+ DBUG_ASSERT(bitmap_is_set(&m_part_info->read_partitions,
+ m_part_spec.start_part));
/*
Position part_rec_buf_ptr to point to the first used partition >=
@@ -5402,18 +5920,18 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
but is before start_part. These partitions has allocated record buffers
but is dynamically pruned, so those buffers must be skipped.
*/
- uint first_used_part= bitmap_get_first_set(&m_part_info->used_partitions);
- for (; first_used_part < m_part_spec.start_part; first_used_part++)
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_part_spec.start_part;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), first_used_part))
- part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
+ part_rec_buf_ptr+= m_rec_length + PARTITION_BYTES_IN_POS;
}
DBUG_PRINT("info", ("m_part_spec.start_part %u first_used_part %u",
- m_part_spec.start_part, first_used_part));
- for (i= first_used_part; i <= m_part_spec.end_part; i++)
+ m_part_spec.start_part, i));
+ for (/* continue from above */ ;
+ i <= m_part_spec.end_part;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (!(bitmap_is_set(&(m_part_info->used_partitions), i)))
- continue;
DBUG_PRINT("info", ("reading from part %u (scan_type: %u)",
i, m_index_scan_type));
DBUG_ASSERT(i == uint2korr(part_rec_buf_ptr));
@@ -5421,12 +5939,6 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
int error;
handler *file= m_file[i];
- /*
- Reset null bits (to avoid valgrind warnings) and to give a default
- value for not read null fields.
- */
- bfill(rec_buf_ptr, table->s->null_bytes, 255);
-
switch (m_index_scan_type) {
case partition_index_read:
error= file->ha_index_read_map(rec_buf_ptr,
@@ -5542,11 +6054,10 @@ int ha_partition::handle_ordered_index_scan_key_not_found()
Loop over all used partitions to get the correct offset
into m_ordered_rec_buffer.
*/
- for (i= 0; i < m_tot_parts; i++)
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (!bitmap_is_set(&m_part_info->used_partitions, i))
- continue;
-
if (bitmap_is_set(&m_key_not_found_partitions, i))
{
/*
@@ -5554,7 +6065,7 @@ int ha_partition::handle_ordered_index_scan_key_not_found()
in index_read_map.
*/
curr_rec_buf= part_buf + PARTITION_BYTES_IN_POS;
- error= m_file[i]->index_next(curr_rec_buf);
+ error= m_file[i]->ha_index_next(curr_rec_buf);
/* HA_ERR_KEY_NOT_FOUND is not allowed from index_next! */
DBUG_ASSERT(error != HA_ERR_KEY_NOT_FOUND);
if (!error)
@@ -5809,27 +6320,36 @@ int ha_partition::info(uint flag)
uint extra_var_flag= flag & HA_STATUS_VARIABLE_EXTRA;
DBUG_ENTER("ha_partition::info");
+#ifndef DBUG_OFF
+ if (bitmap_is_set_all(&(m_part_info->read_partitions)))
+ DBUG_PRINT("info", ("All partitions are used"));
+#endif /* DBUG_OFF */
if (flag & HA_STATUS_AUTO)
{
bool auto_inc_is_first_in_idx= (table_share->next_number_keypart == 0);
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
if (!table->found_next_number_field)
stats.auto_increment_value= 0;
- else if (table_share->ha_part_data->auto_inc_initialized)
+ else if (part_share->auto_inc_initialized)
{
lock_auto_increment();
- stats.auto_increment_value= table_share->ha_part_data->next_auto_inc_val;
+ stats.auto_increment_value= part_share->next_auto_inc_val;
unlock_auto_increment();
}
else
{
lock_auto_increment();
/* to avoid two concurrent initializations, check again when locked */
- if (table_share->ha_part_data->auto_inc_initialized)
- stats.auto_increment_value=
- table_share->ha_part_data->next_auto_inc_val;
+ if (part_share->auto_inc_initialized)
+ stats.auto_increment_value= part_share->next_auto_inc_val;
else
{
+ /*
+ The auto-inc mutex in the table_share is locked, so we do not need
+ to have the handlers locked.
+ HA_STATUS_NO_LOCK is not checked, since we cannot skip locking
+ the mutex, because it is initialized.
+ */
handler *file, **file_array;
ulonglong auto_increment_value= 0;
file_array= m_file;
@@ -5847,11 +6367,11 @@ int ha_partition::info(uint flag)
stats.auto_increment_value= auto_increment_value;
if (auto_inc_is_first_in_idx)
{
- set_if_bigger(table_share->ha_part_data->next_auto_inc_val,
+ set_if_bigger(part_share->next_auto_inc_val,
auto_increment_value);
- table_share->ha_part_data->auto_inc_initialized= TRUE;
+ part_share->auto_inc_initialized= true;
DBUG_PRINT("info", ("initializing next_auto_inc_val to %lu",
- (ulong) table_share->ha_part_data->next_auto_inc_val));
+ (ulong) part_share->next_auto_inc_val));
}
}
unlock_auto_increment();
@@ -5859,6 +6379,7 @@ int ha_partition::info(uint flag)
}
if (flag & HA_STATUS_VARIABLE)
{
+ uint i;
DBUG_PRINT("info", ("HA_STATUS_VARIABLE"));
/*
Calculates statistical variables
@@ -5879,29 +6400,27 @@ int ha_partition::info(uint flag)
check_time: Time of last check (only applicable to MyISAM)
We report last time of all underlying handlers
*/
- handler *file, **file_array;
+ handler *file;
stats.records= 0;
stats.deleted= 0;
stats.data_file_length= 0;
stats.index_file_length= 0;
stats.check_time= 0;
stats.delete_length= 0;
- file_array= m_file;
- do
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- if (bitmap_is_set(&(m_part_info->used_partitions), (file_array - m_file)))
- {
- file= *file_array;
- file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
- stats.records+= file->stats.records;
- stats.deleted+= file->stats.deleted;
- stats.data_file_length+= file->stats.data_file_length;
- stats.index_file_length+= file->stats.index_file_length;
- stats.delete_length+= file->stats.delete_length;
- if (file->stats.check_time > stats.check_time)
- stats.check_time= file->stats.check_time;
- }
- } while (*(++file_array));
+ file= m_file[i];
+ file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
+ stats.records+= file->stats.records;
+ stats.deleted+= file->stats.deleted;
+ stats.data_file_length+= file->stats.data_file_length;
+ stats.index_file_length+= file->stats.index_file_length;
+ stats.delete_length+= file->stats.delete_length;
+ if (file->stats.check_time > stats.check_time)
+ stats.check_time= file->stats.check_time;
+ }
if (stats.records && stats.records < 2 &&
!(m_file[0]->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT))
stats.records= 2;
@@ -5972,7 +6491,7 @@ int ha_partition::info(uint flag)
file= *file_array;
/* Get variables if not already done */
if (!(flag & HA_STATUS_VARIABLE) ||
- !bitmap_is_set(&(m_part_info->used_partitions),
+ !bitmap_is_set(&(m_part_info->read_partitions),
(file_array - m_file)))
file->info(HA_STATUS_VARIABLE | no_lock_flag | extra_var_flag);
if (file->stats.records > max_records)
@@ -6039,6 +6558,7 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
uint part_id)
{
handler *file= m_file[part_id];
+ DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), part_id));
file->info(HA_STATUS_CONST | HA_STATUS_TIME | HA_STATUS_VARIABLE |
HA_STATUS_VARIABLE_EXTRA | HA_STATUS_NO_LOCK);
@@ -6062,7 +6582,6 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
General function to prepare handler for certain behavior.
@param[in] operation operation to execute
- operation Operation type for extra call
@return status
@retval 0 success
@@ -6125,6 +6644,10 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
ensure disk based tables are flushed at end of query execution.
Currently is never used.
+ HA_EXTRA_FORCE_REOPEN:
+ Only used by MyISAM and Archive, called when altering table,
+ closing tables to enforce a reopen of the table files.
+
2) Operations used by some non-MyISAM handlers
----------------------------------------------
HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
@@ -6249,6 +6772,9 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
HA_EXTRA_PREPARE_FOR_RENAME:
Informs the handler we are about to attempt a rename of the table.
+ For handlers that have share open files (MyISAM key-file and
+ Archive writer) they must close the files before rename is possible
+ on Windows.
HA_EXTRA_READCHECK:
HA_EXTRA_NO_READCHECK:
@@ -6269,10 +6795,6 @@ void ha_partition::get_dynamic_partition_info(PARTITION_STATS *stat_info,
HA_EXTRA_NO_READCHECK=5 No readcheck on update
HA_EXTRA_READCHECK=6 Use readcheck (def)
- HA_EXTRA_FORCE_REOPEN:
- Only used by MyISAM, called when altering table, closing tables to
- enforce a reopen of the table files.
-
4) Operations only used by temporary tables for query processing
----------------------------------------------------------------
HA_EXTRA_RESET_STATE:
@@ -6381,6 +6903,10 @@ int ha_partition::extra(enum ha_extra_function operation)
case HA_EXTRA_FLUSH:
case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE:
DBUG_RETURN(loop_extra(operation));
+ case HA_EXTRA_PREPARE_FOR_RENAME:
+ case HA_EXTRA_FORCE_REOPEN:
+ DBUG_RETURN(loop_extra_alter(operation));
+ break;
/* Category 2), used by non-MyISAM handlers */
case HA_EXTRA_IGNORE_DUP_KEY:
@@ -6393,9 +6919,6 @@ int ha_partition::extra(enum ha_extra_function operation)
}
/* Category 3), used by MyISAM handlers */
- case HA_EXTRA_PREPARE_FOR_RENAME:
- DBUG_RETURN(prepare_for_rename());
- break;
case HA_EXTRA_PREPARE_FOR_UPDATE:
/*
Needs to be run on the first partition in the range now, and
@@ -6412,7 +6935,6 @@ int ha_partition::extra(enum ha_extra_function operation)
break;
case HA_EXTRA_NORMAL:
case HA_EXTRA_QUICK:
- case HA_EXTRA_FORCE_REOPEN:
case HA_EXTRA_PREPARE_FOR_DROP:
case HA_EXTRA_FLUSH_CACHE:
{
@@ -6517,33 +7039,34 @@ int ha_partition::extra(enum ha_extra_function operation)
}
-/*
+/**
Special extra call to reset extra parameters
- SYNOPSIS
- reset()
-
- RETURN VALUE
- >0 Error code
- 0 Success
+ @return Operation status.
+ @retval >0 Error code
+ @retval 0 Success
- DESCRIPTION
- Called at end of each statement to reset buffers
+ @note Called at end of each statement to reset buffers.
+ To avoid excessive calls, the m_partitions_to_reset bitmap keep records
+ of which partitions that have been used in extra(), external_lock() or
+ start_stmt() and is needed to be called.
*/
int ha_partition::reset(void)
{
- int result= 0, tmp;
- handler **file;
+ int result= 0;
+ int tmp;
+ uint i;
DBUG_ENTER("ha_partition::reset");
- if (m_part_info)
- bitmap_set_all(&m_part_info->used_partitions);
- file= m_file;
- do
+
+ for (i= bitmap_get_first_set(&m_partitions_to_reset);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_partitions_to_reset, i))
{
- if ((tmp= (*file)->ha_reset()))
+ if ((tmp= m_file[i]->ha_reset()))
result= tmp;
- } while (*(++file));
+ }
+ bitmap_clear_all(&m_partitions_to_reset);
DBUG_RETURN(result);
}
@@ -6590,41 +7113,48 @@ void ha_partition::prepare_extra_cache(uint cachesize)
m_extra_cache_size= cachesize;
if (m_part_spec.start_part != NO_CURRENT_PART_ID)
{
+ DBUG_ASSERT(bitmap_is_set(&m_partitions_to_reset,
+ m_part_spec.start_part));
+ bitmap_set_bit(&m_partitions_to_reset, m_part_spec.start_part);
late_extra_cache(m_part_spec.start_part);
}
DBUG_VOID_RETURN;
}
-/*
- Prepares our new and reorged handlers for rename or delete
+/**
+ Prepares our new and reorged handlers for rename or delete.
- SYNOPSIS
- prepare_for_delete()
+ @param operation Operation to forward
- RETURN VALUE
- >0 Error code
- 0 Success
+ @return Operation status
+ @retval 0 Success
+ @retval !0 Error
*/
-int ha_partition::prepare_for_rename()
+int ha_partition::loop_extra_alter(enum ha_extra_function operation)
{
int result= 0, tmp;
handler **file;
- DBUG_ENTER("ha_partition::prepare_for_rename()");
-
+ DBUG_ENTER("ha_partition::loop_extra_alter()");
+ DBUG_ASSERT(operation == HA_EXTRA_PREPARE_FOR_RENAME ||
+ operation == HA_EXTRA_FORCE_REOPEN);
+
if (m_new_file != NULL)
{
for (file= m_new_file; *file; file++)
- if ((tmp= (*file)->extra(HA_EXTRA_PREPARE_FOR_RENAME)))
- result= tmp;
+ if ((tmp= (*file)->extra(operation)))
+ result= tmp;
+ }
+ if (m_reorged_file != NULL)
+ {
for (file= m_reorged_file; *file; file++)
- if ((tmp= (*file)->extra(HA_EXTRA_PREPARE_FOR_RENAME)))
- result= tmp;
- DBUG_RETURN(result);
+ if ((tmp= (*file)->extra(operation)))
+ result= tmp;
}
-
- DBUG_RETURN(loop_extra(HA_EXTRA_PREPARE_FOR_RENAME));
+ if ((tmp= loop_extra(operation)))
+ result= tmp;
+ DBUG_RETURN(result);
}
/*
@@ -6642,20 +7172,18 @@ int ha_partition::prepare_for_rename()
int ha_partition::loop_extra(enum ha_extra_function operation)
{
int result= 0, tmp;
- handler **file;
- bool is_select;
+ uint i;
DBUG_ENTER("ha_partition::loop_extra()");
- is_select= (thd_sql_command(ha_thd()) == SQLCOM_SELECT);
- for (file= m_file; *file; file++)
+ for (i= bitmap_get_first_set(&m_part_info->lock_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
{
- if (!is_select ||
- bitmap_is_set(&(m_part_info->used_partitions), file - m_file))
- {
- if ((tmp= (*file)->extra(operation)))
- result= tmp;
- }
+ if ((tmp= m_file[i]->extra(operation)))
+ result= tmp;
}
+ /* Add all used partitions to be called in reset(). */
+ bitmap_union(&m_partitions_to_reset, &m_part_info->lock_partitions);
DBUG_RETURN(result);
}
@@ -6729,20 +7257,18 @@ void ha_partition::late_extra_no_cache(uint partition_id)
MODULE optimiser support
****************************************************************************/
-/*
- Get keys to use for scanning
+/**
+ Get keys to use for scanning.
- SYNOPSIS
- keys_to_use_for_scanning()
+ @return key_map of keys usable for scanning
- RETURN VALUE
- key_map of keys usable for scanning
+ @note No need to use read_partitions here, since it does not depend on
+ which partitions is used, only which storage engine used.
*/
const key_map *ha_partition::keys_to_use_for_scanning()
{
DBUG_ENTER("ha_partition::keys_to_use_for_scanning");
-
DBUG_RETURN(m_file[0]->keys_to_use_for_scanning());
}
@@ -6756,7 +7282,7 @@ ha_rows ha_partition::min_rows_for_estimate()
uint i, max_used_partitions, tot_used_partitions;
DBUG_ENTER("ha_partition::min_rows_for_estimate");
- tot_used_partitions= bitmap_bits_set(&m_part_info->used_partitions);
+ tot_used_partitions= bitmap_bits_set(&m_part_info->read_partitions);
/*
All partitions might have been left as unused during partition pruning
@@ -6819,7 +7345,7 @@ uint ha_partition::get_biggest_used_partition(uint *part_index)
while ((*part_index) < m_tot_parts)
{
part_id= m_part_ids_sorted_by_num_of_records[(*part_index)++];
- if (bitmap_is_set(&m_part_info->used_partitions, part_id))
+ if (bitmap_is_set(&m_part_info->read_partitions, part_id))
return part_id;
}
return NO_CURRENT_PART_ID;
@@ -6839,12 +7365,13 @@ uint ha_partition::get_biggest_used_partition(uint *part_index)
double ha_partition::scan_time()
{
double scan_time= 0;
- handler **file;
+ uint i;
DBUG_ENTER("ha_partition::scan_time");
- for (file= m_file; *file; file++)
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
- scan_time+= (*file)->scan_time();
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
+ scan_time+= m_file[i]->scan_time();
DBUG_RETURN(scan_time);
}
@@ -6930,7 +7457,7 @@ ha_rows ha_partition::estimate_rows_upper_bound()
do
{
- if (bitmap_is_set(&(m_part_info->used_partitions), (file - m_file)))
+ if (bitmap_is_set(&(m_part_info->read_partitions), (file - m_file)))
{
rows= (*file)->estimate_rows_upper_bound();
if (rows == HA_POS_ERROR)
@@ -6972,27 +7499,25 @@ double ha_partition::read_time(uint index, uint ranges, ha_rows rows)
/**
Number of rows in table. see handler.h
- SYNOPSIS
- records()
- RETURN VALUE
- Number of total rows in a partitioned table.
+ @return Number of records in the table (after pruning!)
*/
ha_rows ha_partition::records()
{
ha_rows rows, tot_rows= 0;
- handler **file;
+ uint i;
DBUG_ENTER("ha_partition::records");
- file= m_file;
- do
+ for (i= bitmap_get_first_set(&m_part_info->read_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->read_partitions, i))
{
- rows= (*file)->records();
+ rows= m_file[i]->records();
if (rows == HA_POS_ERROR)
DBUG_RETURN(HA_POS_ERROR);
tot_rows+= rows;
- } while (*(++file));
+ }
DBUG_RETURN(tot_rows);
}
@@ -7043,82 +7568,77 @@ uint8 ha_partition::table_cache_type()
}
+/**
+ Calculate hash value for KEY partitioning using an array of fields.
+
+ @param field_array An array of the fields in KEY partitioning
+
+ @return hash_value calculated
+
+ @note Uses the hash function on the character set of the field.
+ Integer and floating point fields use the binary character set by default.
+*/
+
+uint32 ha_partition::calculate_key_hash_value(Field **field_array)
+{
+ ulong nr1= 1;
+ ulong nr2= 4;
+
+ do
+ {
+ Field *field= *field_array;
+ field->hash(&nr1, &nr2);
+ } while (*(++field_array));
+ return (uint32) nr1;
+}
+
+
/****************************************************************************
MODULE print messages
****************************************************************************/
const char *ha_partition::index_type(uint inx)
{
+ uint first_used_partition;
DBUG_ENTER("ha_partition::index_type");
- DBUG_RETURN(m_file[0]->index_type(inx));
-}
-
-
-enum row_type ha_partition::get_row_type() const
-{
- handler **file;
- enum row_type type= (*m_file)->get_row_type();
+ first_used_partition= bitmap_get_first_set(&(m_part_info->read_partitions));
- for (file= m_file, file++; *file; file++)
+ if (first_used_partition == MY_BIT_NONE)
{
- enum row_type part_type= (*file)->get_row_type();
- if (part_type != type)
- return ROW_TYPE_NOT_USED;
+ DBUG_ASSERT(0); // How can this happen?
+ DBUG_RETURN(handler::index_type(inx));
}
- return type;
+ DBUG_RETURN(m_file[first_used_partition]->index_type(inx));
}
-void ha_partition::append_row_to_str(String &str)
+enum row_type ha_partition::get_row_type() const
{
- Field **field_ptr;
- const uchar *rec;
- bool is_rec0= !m_err_rec || m_err_rec == table->record[0];
- if (is_rec0)
- rec= table->record[0];
- else
- rec= m_err_rec;
- // If PK, use full PK instead of full part field array!
- if (table->s->primary_key != MAX_KEY)
- {
- KEY *key= table->key_info + table->s->primary_key;
- KEY_PART_INFO *key_part= key->key_part;
- KEY_PART_INFO *key_part_end= key_part + key->key_parts;
- if (!is_rec0)
- set_key_field_ptr(key, rec, table->record[0]);
- for (; key_part != key_part_end; key_part++)
- {
- Field *field= key_part->field;
- str.append(" ");
- str.append(field->field_name);
- str.append(":");
- field_unpack(&str, field, rec, 0, false);
- }
- if (!is_rec0)
- set_key_field_ptr(key, table->record[0], rec);
- }
- else
+ uint i;
+ enum row_type type;
+ DBUG_ENTER("ha_partition::get_row_type");
+
+ i= bitmap_get_first_set(&m_part_info->read_partitions);
+ DBUG_ASSERT(i < m_tot_parts);
+ if (i >= m_tot_parts)
+ DBUG_RETURN(ROW_TYPE_NOT_USED);
+
+ type= m_file[i]->get_row_type();
+ DBUG_PRINT("info", ("partition %u, row_type: %d", i, type));
+
+ for (i= bitmap_get_next_set(&m_part_info->lock_partitions, i);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
{
- if (!is_rec0)
- set_field_ptr(m_part_info->full_part_field_array, rec,
- table->record[0]);
- /* No primary key, use full partition field array. */
- for (field_ptr= m_part_info->full_part_field_array;
- *field_ptr;
- field_ptr++)
- {
- Field *field= *field_ptr;
- str.append(" ");
- str.append(field->field_name);
- str.append(":");
- field_unpack(&str, field, rec, 0, false);
- }
- if (!is_rec0)
- set_field_ptr(m_part_info->full_part_field_array, table->record[0],
- rec);
+ enum row_type part_type= m_file[i]->get_row_type();
+ DBUG_PRINT("info", ("partition %u, row_type: %d", i, type));
+ if (part_type != type)
+ DBUG_RETURN(ROW_TYPE_NOT_USED);
}
+
+ DBUG_RETURN(type);
}
@@ -7130,72 +7650,24 @@ void ha_partition::print_error(int error, myf errflag)
/* Should probably look for my own errors first */
DBUG_PRINT("enter", ("error: %d", error));
- if (error == HA_ERR_NO_PARTITION_FOUND)
+ if ((error == HA_ERR_NO_PARTITION_FOUND) &&
+ ! (thd->lex->alter_info.flags & Alter_info::ALTER_TRUNCATE_PARTITION))
+ m_part_info->print_no_partition_found(table);
+ else
{
- switch(thd_sql_command(thd))
+ /* In case m_file has not been initialized, like in bug#42438 */
+ if (m_file)
{
- case SQLCOM_DELETE:
- case SQLCOM_DELETE_MULTI:
- case SQLCOM_UPDATE:
- case SQLCOM_UPDATE_MULTI:
- if (m_err_rec)
+ if (m_last_part >= m_tot_parts)
{
- uint max_length;
- char buf[MAX_KEY_LENGTH];
- const char *msg= "Found a row in wrong partition (";
- String str(buf,sizeof(buf),system_charset_info);
- uint32 part_id;
- /* Should only happen on DELETE or UPDATE! */
- str.length(0);
- str.append_ulonglong(m_last_part);
- str.append(" != ");
- if (!get_part_for_delete(m_err_rec, m_rec0, m_part_info, &part_id))
- {
- str.append_ulonglong(part_id);
- }
- str.append(")");
- append_row_to_str(str);
- /* Log this error, so the DBA can notice it and fix it! */
- sql_print_error("Table '%-192s' corrupted: %s%s\n"
- "Please CHECK and REPAIR the table!",
- table->s->table_name.str, msg, str.c_ptr_safe());
-
- max_length= (MYSQL_ERRMSG_SIZE-
- (uint) strlen(msg));
- if (str.length() >= max_length)
- {
- str.length(max_length-4);
- str.append(STRING_WITH_LEN("..."));
- }
- my_printf_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, "%s%s", MYF(0),
- msg, str.c_ptr_safe());
- m_err_rec= NULL;
- DBUG_VOID_RETURN;
- }
- default:
- {
- if (!(thd->lex->alter_info.flags & ALTER_TRUNCATE_PARTITION))
- {
- m_part_info->print_no_partition_found(table);
- DBUG_VOID_RETURN;
- }
+ DBUG_ASSERT(0);
+ m_last_part= 0;
}
- /* fall through to generic error handling. */
+ m_file[m_last_part]->print_error(error, errflag);
}
+ else
+ handler::print_error(error, errflag);
}
-
- /* In case m_file has not been initialized, like in bug#42438 */
- if (m_file)
- {
- if (m_last_part >= m_tot_parts)
- {
- DBUG_ASSERT(0);
- m_last_part= 0;
- }
- m_file[m_last_part]->print_error(error, errflag);
- }
- else
- handler::print_error(error, errflag);
DBUG_VOID_RETURN;
}
@@ -7215,49 +7687,48 @@ bool ha_partition::get_error_message(int error, String *buf)
/****************************************************************************
- MODULE handler characteristics
+ MODULE in-place ALTER
****************************************************************************/
/**
+ Get table flags.
+*/
+
+handler::Table_flags ha_partition::table_flags() const
+{
+ uint first_used_partition= 0;
+ DBUG_ENTER("ha_partition::table_flags");
+ if (m_handler_status < handler_initialized ||
+ m_handler_status >= handler_closed)
+ DBUG_RETURN(PARTITION_ENABLED_TABLE_FLAGS);
+
+ if (get_lock_type() != F_UNLCK)
+ {
+ /*
+ The flags are cached after external_lock, and may depend on isolation
+ level. So we should use a locked partition to get the correct flags.
+ */
+ first_used_partition= bitmap_get_first_set(&m_part_info->lock_partitions);
+ if (first_used_partition == MY_BIT_NONE)
+ first_used_partition= 0;
+ }
+ DBUG_RETURN((m_file[first_used_partition]->ha_table_flags() &
+ ~(PARTITION_DISABLED_TABLE_FLAGS)) |
+ (PARTITION_ENABLED_TABLE_FLAGS));
+}
+
+
+/**
alter_table_flags must be on handler/table level, not on hton level
due to the ha_partition hton does not know what the underlying hton is.
*/
uint ha_partition::alter_table_flags(uint flags)
{
- uint flags_to_return, flags_to_check;
+ uint flags_to_return;
DBUG_ENTER("ha_partition::alter_table_flags");
flags_to_return= ht->alter_table_flags(flags);
- flags_to_return|= m_file[0]->alter_table_flags(flags);
+ flags_to_return|= m_file[0]->alter_table_flags(flags);
- /*
- If one partition fails we must be able to revert the change for the other,
- already altered, partitions. So both ADD and DROP can only be supported in
- pairs.
- */
- flags_to_check= HA_INPLACE_ADD_INDEX_NO_READ_WRITE;
- flags_to_check|= HA_INPLACE_DROP_INDEX_NO_READ_WRITE;
- if ((flags_to_return & flags_to_check) != flags_to_check)
- flags_to_return&= ~flags_to_check;
- flags_to_check= HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE;
- flags_to_check|= HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE;
- if ((flags_to_return & flags_to_check) != flags_to_check)
- flags_to_return&= ~flags_to_check;
- flags_to_check= HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE;
- flags_to_check|= HA_INPLACE_DROP_PK_INDEX_NO_READ_WRITE;
- if ((flags_to_return & flags_to_check) != flags_to_check)
- flags_to_return&= ~flags_to_check;
- flags_to_check= HA_INPLACE_ADD_INDEX_NO_WRITE;
- flags_to_check|= HA_INPLACE_DROP_INDEX_NO_WRITE;
- if ((flags_to_return & flags_to_check) != flags_to_check)
- flags_to_return&= ~flags_to_check;
- flags_to_check= HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE;
- flags_to_check|= HA_INPLACE_DROP_UNIQUE_INDEX_NO_WRITE;
- if ((flags_to_return & flags_to_check) != flags_to_check)
- flags_to_return&= ~flags_to_check;
- flags_to_check= HA_INPLACE_ADD_PK_INDEX_NO_WRITE;
- flags_to_check|= HA_INPLACE_DROP_PK_INDEX_NO_WRITE;
- if ((flags_to_return & flags_to_check) != flags_to_check)
- flags_to_return&= ~flags_to_check;
DBUG_RETURN(flags_to_return);
}
@@ -7286,228 +7757,297 @@ bool ha_partition::check_if_incompatible_data(HA_CREATE_INFO *create_info,
/**
- Helper class for [final_]add_index, see handler.h
+ Support of in-place alter table.
*/
-class ha_partition_add_index : public handler_add_index
+/**
+ Helper class for in-place alter, see handler.h
+*/
+
+class ha_partition_inplace_ctx : public inplace_alter_handler_ctx
{
public:
- handler_add_index **add_array;
- ha_partition_add_index(TABLE* table_arg, KEY* key_info_arg,
- uint num_of_keys_arg)
- : handler_add_index(table_arg, key_info_arg, num_of_keys_arg)
- {}
- ~ha_partition_add_index() {}
-};
-
+ inplace_alter_handler_ctx **handler_ctx_array;
+ bool rollback_done;
+private:
+ uint m_tot_parts;
-/**
- Support of in-place add/drop index
+public:
+ ha_partition_inplace_ctx(THD *thd, uint tot_parts)
+ : inplace_alter_handler_ctx(),
+ handler_ctx_array(NULL),
+ rollback_done(false),
+ m_tot_parts(tot_parts)
+ {}
- @param table_arg Table to add index to
- @param key_info Struct over the new keys to add
- @param num_of_keys Number of keys to add
- @param[out] add Data to be submitted with final_add_index
+ ~ha_partition_inplace_ctx()
+ {
+ if (handler_ctx_array)
+ {
+ for (uint index= 0; index < m_tot_parts; index++)
+ delete handler_ctx_array[index];
+ }
+ }
+};
- @return Operation status
- @retval 0 Success
- @retval != 0 Failure (error code returned, and all operations rollbacked)
-*/
-int ha_partition::add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
- handler_add_index **add)
+enum_alter_inplace_result
+ha_partition::check_if_supported_inplace_alter(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
{
- uint i;
- int ret= 0;
+#ifdef PARTITION_SUPPORTS_INPLACE_ALTER
+ uint index= 0;
+ enum_alter_inplace_result result= HA_ALTER_INPLACE_NO_LOCK;
+ ha_partition_inplace_ctx *part_inplace_ctx;
THD *thd= ha_thd();
- ha_partition_add_index *part_add_index;
+#else
+ enum_alter_inplace_result result= HA_ALTER_INPLACE_NOT_SUPPORTED;
+#endif
- DBUG_ENTER("ha_partition::add_index");
- /*
- There has already been a check in fix_partition_func in mysql_alter_table
- before this call, which checks for unique/primary key violations of the
- partitioning function. So no need for extra check here.
- */
-
+ DBUG_ENTER("ha_partition::check_if_supported_inplace_alter");
+
+#ifndef PARTITION_SUPPORTS_INPLACE_ALTER
/*
- This will be freed at the end of the statement.
- And destroyed at final_add_index. (Sql_alloc does not free in delete).
+ Due to bug#14760210 partitions can be out-of-sync in case
+ commit_inplace_alter_table fails after the first partition.
+
+ Until we can either commit all partitions at the same time or
+ have an atomic recover on failure/crash we don't support any
+ inplace alter.
+
+ TODO: investigate what happens when indexes are out-of-sync
+ between partitions. If safe and possible to recover from,
+ then we could allow ADD/DROP INDEX.
*/
- part_add_index= new (thd->mem_root)
- ha_partition_add_index(table_arg, key_info, num_of_keys);
- if (!part_add_index)
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- part_add_index->add_array= (handler_add_index **)
- thd->alloc(sizeof(void *) * m_tot_parts);
- if (!part_add_index->add_array)
- {
- delete part_add_index;
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ DBUG_RETURN(result);
+#else
+ part_inplace_ctx=
+ new (thd->mem_root) ha_partition_inplace_ctx(thd, m_tot_parts);
+ if (!part_inplace_ctx)
+ DBUG_RETURN(HA_ALTER_ERROR);
+
+ part_inplace_ctx->handler_ctx_array= (inplace_alter_handler_ctx **)
+ thd->alloc(sizeof(inplace_alter_handler_ctx *) * m_tot_parts);
+ if (!part_inplace_ctx->handler_ctx_array)
+ DBUG_RETURN(HA_ALTER_ERROR);
+
+ for (index= 0; index < m_tot_parts; index++)
+ part_inplace_ctx->handler_ctx_array[index]= NULL;
+
+ for (index= 0; index < m_tot_parts; index++)
+ {
+ enum_alter_inplace_result p_result=
+ m_file[index]->check_if_supported_inplace_alter(altered_table,
+ ha_alter_info);
+ part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx;
+
+ if (p_result < result)
+ result= p_result;
+ if (result == HA_ALTER_ERROR)
+ break;
}
+ ha_alter_info->handler_ctx= part_inplace_ctx;
- for (i= 0; i < m_tot_parts; i++)
- {
- if ((ret= m_file[i]->add_index(table_arg, key_info, num_of_keys,
- &part_add_index->add_array[i])))
- goto err;
- }
- *add= part_add_index;
- DBUG_RETURN(ret);
-err:
- /* Rollback all prepared partitions. i - 1 .. 0 */
- while (i)
- {
- i--;
- (void) m_file[i]->final_add_index(part_add_index->add_array[i], false);
- }
- delete part_add_index;
- DBUG_RETURN(ret);
+ DBUG_RETURN(result);
+#endif
}
-/**
- Second phase of in-place add index.
+bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
+{
+ uint index= 0;
+ bool error= false;
+ ha_partition_inplace_ctx *part_inplace_ctx;
- @param add Info from add_index
- @param commit Should we commit or rollback the add_index operation
+ DBUG_ENTER("ha_partition::prepare_inplace_alter_table");
- @return Operation status
- @retval 0 Success
- @retval != 0 Failure (error code returned)
+ part_inplace_ctx=
+ static_cast<class ha_partition_inplace_ctx*>(ha_alter_info->handler_ctx);
- @note If commit is false, index changes are rolled back by dropping the
- added indexes. If commit is true, nothing is done as the indexes
- were already made active in ::add_index()
-*/
+ for (index= 0; index < m_tot_parts && !error; index++)
+ {
+ ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index];
+ if (m_file[index]->ha_prepare_inplace_alter_table(altered_table,
+ ha_alter_info))
+ error= true;
+ part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx;
+ }
+ ha_alter_info->handler_ctx= part_inplace_ctx;
-int ha_partition::final_add_index(handler_add_index *add, bool commit)
+ DBUG_RETURN(error);
+}
+
+
+bool ha_partition::inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
{
- ha_partition_add_index *part_add_index;
- uint i;
- int ret= 0;
+ uint index= 0;
+ bool error= false;
+ ha_partition_inplace_ctx *part_inplace_ctx;
- DBUG_ENTER("ha_partition::final_add_index");
-
- if (!add)
+ DBUG_ENTER("ha_partition::inplace_alter_table");
+
+ part_inplace_ctx=
+ static_cast<class ha_partition_inplace_ctx*>(ha_alter_info->handler_ctx);
+
+ for (index= 0; index < m_tot_parts && !error; index++)
{
- DBUG_ASSERT(!commit);
- DBUG_RETURN(0);
+ ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index];
+ if (m_file[index]->ha_inplace_alter_table(altered_table,
+ ha_alter_info))
+ error= true;
+ part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx;
}
- part_add_index= static_cast<class ha_partition_add_index*>(add);
+ ha_alter_info->handler_ctx= part_inplace_ctx;
- for (i= 0; i < m_tot_parts; i++)
+ DBUG_RETURN(error);
+}
+
+
+/*
+ Note that this function will try rollback failed ADD INDEX by
+ executing DROP INDEX for the indexes that were committed (if any)
+ before the error occured. This means that the underlying storage
+ engine must be able to drop index in-place with X-lock held.
+ (As X-lock will be held here if new indexes are to be committed)
+*/
+bool ha_partition::commit_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info,
+ bool commit)
+{
+ uint index= 0;
+ ha_partition_inplace_ctx *part_inplace_ctx;
+
+ DBUG_ENTER("ha_partition::commit_inplace_alter_table");
+
+ part_inplace_ctx=
+ static_cast<class ha_partition_inplace_ctx*>(ha_alter_info->handler_ctx);
+
+ if (!commit && part_inplace_ctx->rollback_done)
+ DBUG_RETURN(false); // We have already rolled back changes.
+
+ for (index= 0; index < m_tot_parts; index++)
{
- if ((ret= m_file[i]->final_add_index(part_add_index->add_array[i], commit)))
+ ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index];
+ if (m_file[index]->ha_commit_inplace_alter_table(altered_table,
+ ha_alter_info, commit))
+ {
+ part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx;
goto err;
+ }
+ part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx;
DBUG_EXECUTE_IF("ha_partition_fail_final_add_index", {
- /* Simulate a failure by rollback the second partition */
+ /* Simulate failure by rollback of the second partition */
if (m_tot_parts > 1)
{
- i++;
- m_file[i]->final_add_index(part_add_index->add_array[i], false);
- /* Set an error that is specific to ha_partition. */
- ret= HA_ERR_NO_PARTITION_FOUND;
+ index++;
+ ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[index];
+ m_file[index]->ha_commit_inplace_alter_table(altered_table,
+ ha_alter_info, false);
+ part_inplace_ctx->handler_ctx_array[index]= ha_alter_info->handler_ctx;
goto err;
}
});
}
- delete part_add_index;
- DBUG_RETURN(ret);
-err:
- uint j;
- uint *key_numbers= NULL;
- KEY *old_key_info= NULL;
- uint num_of_keys= 0;
- int error;
-
- /* How could this happen? Needed to create a covering test case :) */
- DBUG_ASSERT(ret == HA_ERR_NO_PARTITION_FOUND);
+ ha_alter_info->handler_ctx= part_inplace_ctx;
- if (i > 0)
- {
- num_of_keys= part_add_index->num_of_keys;
- key_numbers= (uint*) ha_thd()->alloc(sizeof(uint) * num_of_keys);
- if (!key_numbers)
+ DBUG_RETURN(false);
+
+err:
+ ha_alter_info->handler_ctx= part_inplace_ctx;
+ /*
+ Reverting committed changes is (for now) only possible for ADD INDEX
+ For other changes we will just try to rollback changes.
+ */
+ if (index > 0 &&
+ ha_alter_info->handler_flags & (Alter_inplace_info::ADD_INDEX |
+ Alter_inplace_info::ADD_UNIQUE_INDEX |
+ Alter_inplace_info::ADD_PK_INDEX))
+ {
+ Alter_inplace_info drop_info(ha_alter_info->create_info,
+ ha_alter_info->alter_info,
+ NULL, 0,
+ ha_alter_info->modified_part_info,
+ ha_alter_info->ignore);
+
+ if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_INDEX)
+ drop_info.handler_flags|= Alter_inplace_info::DROP_INDEX;
+ if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_UNIQUE_INDEX)
+ drop_info.handler_flags|= Alter_inplace_info::DROP_UNIQUE_INDEX;
+ if (ha_alter_info->handler_flags & Alter_inplace_info::ADD_PK_INDEX)
+ drop_info.handler_flags|= Alter_inplace_info::DROP_PK_INDEX;
+ drop_info.index_drop_count= ha_alter_info->index_add_count;
+ drop_info.index_drop_buffer=
+ (KEY**) ha_thd()->alloc(sizeof(KEY*) * drop_info.index_drop_count);
+ if (!drop_info.index_drop_buffer)
{
sql_print_error("Failed with error handling of adding index:\n"
"committing index failed, and when trying to revert "
"already committed partitions we failed allocating\n"
"memory for the index for table '%s'",
table_share->table_name.str);
- DBUG_RETURN(HA_ERR_OUT_OF_MEM);
+ DBUG_RETURN(true);
}
- old_key_info= table->key_info;
- /*
- Use the newly added key_info as table->key_info to remove them.
- Note that this requires the subhandlers to use name lookup of the
- index. They must use given table->key_info[key_number], they cannot
- use their local view of the keys, since table->key_info only include
- the indexes to be removed here.
- */
- for (j= 0; j < num_of_keys; j++)
- key_numbers[j]= j;
- table->key_info= part_add_index->key_info;
- }
+ for (uint i= 0; i < drop_info.index_drop_count; i++)
+ drop_info.index_drop_buffer[i]=
+ &ha_alter_info->key_info_buffer[ha_alter_info->index_add_buffer[i]];
- for (j= 0; j < m_tot_parts; j++)
- {
- if (j < i)
+ // Drop index for each partition where we already committed new index.
+ for (uint i= 0; i < index; i++)
{
- /* Remove the newly added index */
- error= m_file[j]->prepare_drop_index(table, key_numbers, num_of_keys);
- if (error || m_file[j]->final_drop_index(table))
- {
+ bool error= m_file[i]->ha_prepare_inplace_alter_table(altered_table,
+ &drop_info);
+ error|= m_file[i]->ha_inplace_alter_table(altered_table, &drop_info);
+ error|= m_file[i]->ha_commit_inplace_alter_table(altered_table,
+ &drop_info, true);
+ if (error)
sql_print_error("Failed with error handling of adding index:\n"
"committing index failed, and when trying to revert "
"already committed partitions we failed removing\n"
"the index for table '%s' partition nr %d",
- table_share->table_name.str, j);
- }
+ table_share->table_name.str, i);
}
- else if (j > i)
+
+ // Rollback uncommitted changes.
+ for (uint i= index+1; i < m_tot_parts; i++)
{
- /* Rollback non finished partitions */
- if (m_file[j]->final_add_index(part_add_index->add_array[j], false))
+ ha_alter_info->handler_ctx= part_inplace_ctx->handler_ctx_array[i];
+ if (m_file[i]->ha_commit_inplace_alter_table(altered_table,
+ ha_alter_info, false))
{
/* How could this happen? */
sql_print_error("Failed with error handling of adding index:\n"
"Rollback of add_index failed for table\n"
"'%s' partition nr %d",
- table_share->table_name.str, j);
+ table_share->table_name.str, i);
}
+ part_inplace_ctx->handler_ctx_array[i]= ha_alter_info->handler_ctx;
}
+
+ // We have now reverted/rolled back changes. Set flag to prevent
+ // it from being done again.
+ part_inplace_ctx->rollback_done= true;
+
+ print_error(HA_ERR_NO_PARTITION_FOUND, MYF(0));
}
- if (i > 0)
- table->key_info= old_key_info;
- delete part_add_index;
- DBUG_RETURN(ret);
-}
-int ha_partition::prepare_drop_index(TABLE *table_arg, uint *key_num,
- uint num_of_keys)
-{
- handler **file;
- int ret= 0;
+ ha_alter_info->handler_ctx= part_inplace_ctx;
- /*
- DROP INDEX does not affect partitioning.
- */
- for (file= m_file; *file; file++)
- if ((ret= (*file)->prepare_drop_index(table_arg, key_num, num_of_keys)))
- break;
- return ret;
+ DBUG_RETURN(true);
}
-int ha_partition::final_drop_index(TABLE *table_arg)
+void ha_partition::notify_table_changed()
{
handler **file;
- int ret= HA_ERR_WRONG_COMMAND;
+
+ DBUG_ENTER("ha_partition::notify_table_changed");
for (file= m_file; *file; file++)
- if ((ret= (*file)->final_drop_index(table_arg)))
- break;
- return ret;
+ (*file)->ha_notify_table_changed();
+
+ DBUG_VOID_RETURN;
}
@@ -7647,8 +8187,8 @@ int ha_partition::reset_auto_increment(ulonglong value)
int res;
DBUG_ENTER("ha_partition::reset_auto_increment");
lock_auto_increment();
- table_share->ha_part_data->auto_inc_initialized= FALSE;
- table_share->ha_part_data->next_auto_inc_val= 0;
+ part_share->auto_inc_initialized= false;
+ part_share->next_auto_inc_val= 0;
do
{
if ((res= (*file)->ha_reset_auto_increment(value)) != 0)
@@ -7662,7 +8202,7 @@ int ha_partition::reset_auto_increment(ulonglong value)
/**
This method is called by update_auto_increment which in turn is called
by the individual handlers as part of write_row. We use the
- table_share->ha_part_data->next_auto_inc_val, or search all
+ part_share->next_auto_inc_val, or search all
partitions for the highest auto_increment_value if not initialized or
if auto_increment field is a secondary part of a key, we must search
every partition when holding a mutex to be sure of correctness.
@@ -7718,9 +8258,9 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
/*
This is initialized in the beginning of the first write_row call.
*/
- DBUG_ASSERT(table_share->ha_part_data->auto_inc_initialized);
+ DBUG_ASSERT(part_share->auto_inc_initialized);
/*
- Get a lock for handling the auto_increment in table_share->ha_part_data
+ Get a lock for handling the auto_increment in part_share
for avoiding two concurrent statements getting the same number.
*/
@@ -7747,9 +8287,8 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
}
/* this gets corrected (for offset/increment) in update_auto_increment */
- *first_value= table_share->ha_part_data->next_auto_inc_val;
- table_share->ha_part_data->next_auto_inc_val+=
- nb_desired_values * increment;
+ *first_value= part_share->next_auto_inc_val;
+ part_share->next_auto_inc_val+= nb_desired_values * increment;
unlock_auto_increment();
DBUG_PRINT("info", ("*first_value: %lu", (ulong) *first_value));
@@ -7764,14 +8303,19 @@ void ha_partition::release_auto_increment()
if (table->s->next_number_keypart)
{
- for (uint i= 0; i < m_tot_parts; i++)
+ uint i;
+ for (i= bitmap_get_first_set(&m_part_info->lock_partitions);
+ i < m_tot_parts;
+ i= bitmap_get_next_set(&m_part_info->lock_partitions, i))
+ {
m_file[i]->ha_release_auto_increment();
+ }
}
else if (next_insert_id)
{
ulonglong next_auto_inc_val;
lock_auto_increment();
- next_auto_inc_val= table_share->ha_part_data->next_auto_inc_val;
+ next_auto_inc_val= part_share->next_auto_inc_val;
/*
If the current auto_increment values is lower than the reserved
value, and the reserved value was reserved by this thread,
@@ -7786,10 +8330,10 @@ void ha_partition::release_auto_increment()
with SET INSERT_ID, i.e. forced/non generated values.
*/
if (thd->auto_inc_intervals_forced.maximum() < next_insert_id)
- table_share->ha_part_data->next_auto_inc_val= next_insert_id;
+ part_share->next_auto_inc_val= next_insert_id;
}
- DBUG_PRINT("info", ("table_share->ha_part_data->next_auto_inc_val: %lu",
- (ulong) table_share->ha_part_data->next_auto_inc_val));
+ DBUG_PRINT("info", ("part_share->next_auto_inc_val: %lu",
+ (ulong) part_share->next_auto_inc_val));
/* Unlock the multi row statement lock taken in get_auto_increment */
if (auto_increment_safe_stmt_log_lock)
@@ -7813,6 +8357,27 @@ void ha_partition::init_table_handle_for_HANDLER()
}
+/**
+ Return the checksum of the table (all partitions)
+*/
+
+uint ha_partition::checksum() const
+{
+ ha_checksum sum= 0;
+
+ DBUG_ENTER("ha_partition::checksum");
+ if ((table_flags() & (HA_HAS_OLD_CHECKSUM | HA_HAS_NEW_CHECKSUM)))
+ {
+ handler **file= m_file;
+ do
+ {
+ sum+= (*file)->checksum();
+ } while (*(++file));
+ }
+ DBUG_RETURN(sum);
+}
+
+
/****************************************************************************
MODULE enable/disable indexes
****************************************************************************/
@@ -7832,6 +8397,7 @@ int ha_partition::disable_indexes(uint mode)
handler **file;
int error= 0;
+ DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions)));
for (file= m_file; *file; file++)
{
if ((error= (*file)->ha_disable_indexes(mode)))
@@ -7856,6 +8422,7 @@ int ha_partition::enable_indexes(uint mode)
handler **file;
int error= 0;
+ DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions)));
for (file= m_file; *file; file++)
{
if ((error= (*file)->ha_enable_indexes(mode)))
@@ -7880,6 +8447,7 @@ int ha_partition::indexes_are_disabled(void)
handler **file;
int error= 0;
+ DBUG_ASSERT(bitmap_is_set_all(&(m_part_info->lock_partitions)));
for (file= m_file; *file; file++)
{
if ((error= (*file)->indexes_are_disabled()))
@@ -7889,288 +8457,6 @@ int ha_partition::indexes_are_disabled(void)
}
-/**
- Check/fix misplaced rows.
-
- @param read_part_id Partition to check/fix.
- @param repair If true, move misplaced rows to correct partition.
-
- @return Operation status.
- @retval 0 Success
- @retval != 0 Error
-*/
-
-int ha_partition::check_misplaced_rows(uint read_part_id, bool repair)
-{
- int result= 0;
- uint32 correct_part_id;
- longlong func_value;
- longlong num_misplaced_rows= 0;
-
- DBUG_ENTER("ha_partition::check_misplaced_rows");
-
- DBUG_ASSERT(m_file);
-
- if (repair)
- {
- /* We must read the full row, if we need to move it! */
- bitmap_set_all(table->read_set);
- bitmap_set_all(table->write_set);
- }
- else
- {
- /* Only need to read the partitioning fields. */
- bitmap_union(table->read_set, &m_part_info->full_part_field_set);
- }
-
- if ((result= m_file[read_part_id]->ha_rnd_init(1)))
- DBUG_RETURN(result);
-
- while (true)
- {
- if ((result= m_file[read_part_id]->rnd_next(m_rec0)))
- {
- if (result == HA_ERR_RECORD_DELETED)
- continue;
- if (result != HA_ERR_END_OF_FILE)
- break;
-
- if (num_misplaced_rows > 0)
- {
- print_admin_msg(ha_thd(), "warning", table_share->db.str, table->alias,
- opt_op_name[REPAIR_PARTS],
- "Moved %lld misplaced rows",
- num_misplaced_rows);
- }
- /* End-of-file reached, all rows are now OK, reset result and break. */
- result= 0;
- break;
- }
-
- result= m_part_info->get_partition_id(m_part_info, &correct_part_id,
- &func_value);
- if (result)
- break;
-
- if (correct_part_id != read_part_id)
- {
- num_misplaced_rows++;
- if (!repair)
- {
- /* Check. */
- print_admin_msg(ha_thd(), "error", table_share->db.str, table->alias,
- opt_op_name[CHECK_PARTS],
- "Found a misplaced row");
- /* Break on first misplaced row! */
- result= HA_ADMIN_NEEDS_UPGRADE;
- break;
- }
- else
- {
- DBUG_PRINT("info", ("Moving row from partition %d to %d",
- read_part_id, correct_part_id));
-
- /*
- Insert row into correct partition. Notice that there are no commit
- for every N row, so the repair will be one large transaction!
- */
- if ((result= m_file[correct_part_id]->ha_write_row(m_rec0)))
- {
- /*
- We have failed to insert a row, it might have been a duplicate!
- */
- char buf[MAX_KEY_LENGTH];
- String str(buf,sizeof(buf),system_charset_info);
- str.length(0);
- if (result == HA_ERR_FOUND_DUPP_KEY)
- {
- str.append("Duplicate key found, "
- "please update or delete the record:\n");
- result= HA_ADMIN_CORRUPT;
- }
- m_err_rec= NULL;
- append_row_to_str(str);
-
- /*
- If the engine supports transactions, the failure will be
- rollbacked.
- */
- if (!m_file[correct_part_id]->has_transactions())
- {
- /* Log this error, so the DBA can notice it and fix it! */
- sql_print_error("Table '%-192s' failed to move/insert a row"
- " from part %d into part %d:\n%s",
- table->s->table_name.str,
- read_part_id,
- correct_part_id,
- str.c_ptr_safe());
- }
- print_admin_msg(ha_thd(), "error", table_share->db.str, table->alias,
- opt_op_name[REPAIR_PARTS],
- "Failed to move/insert a row"
- " from part %d into part %d:\n%s",
- read_part_id,
- correct_part_id,
- str.c_ptr_safe());
- break;
- }
-
- /* Delete row from wrong partition. */
- if ((result= m_file[read_part_id]->ha_delete_row(m_rec0)))
- {
- if (m_file[correct_part_id]->has_transactions())
- break;
- /*
- We have introduced a duplicate, since we failed to remove it
- from the wrong partition.
- */
- char buf[MAX_KEY_LENGTH];
- String str(buf,sizeof(buf),system_charset_info);
- str.length(0);
- m_err_rec= NULL;
- append_row_to_str(str);
-
- /* Log this error, so the DBA can notice it and fix it! */
- sql_print_error("Table '%-192s': Delete from part %d failed with"
- " error %d. But it was already inserted into"
- " part %d, when moving the misplaced row!"
- "\nPlease manually fix the duplicate row:\n%s",
- table->s->table_name.str,
- read_part_id,
- result,
- correct_part_id,
- str.c_ptr_safe());
- break;
- }
- }
- }
- }
-
- int tmp_result= m_file[read_part_id]->ha_rnd_end();
- DBUG_RETURN(result ? result : tmp_result);
-}
-
-
-#define KEY_PARTITIONING_CHANGED_STR \
- "KEY () partitioning changed, please run:\nALTER TABLE %s.%s %s"
-
-int ha_partition::check_for_upgrade(HA_CHECK_OPT *check_opt)
-{
- int error= HA_ADMIN_NEEDS_CHECK;
- DBUG_ENTER("ha_partition::check_for_upgrade");
-
- /*
- This is called even without FOR UPGRADE,
- if the .frm version is lower than the current version.
- In that case return that it needs checking!
- */
- if (!(check_opt->sql_flags & TT_FOR_UPGRADE))
- DBUG_RETURN(error);
-
- /*
- Partitions will be checked for during their ha_check!
-
- Check if KEY (sub)partitioning was used and any field's hash calculation
- differs from 5.1, see bug#14521864.
- */
- if (table->s->mysql_version < 50503 && // 5.1 table (<5.5.3)
- ((m_part_info->part_type == HASH_PARTITION && // KEY partitioned
- m_part_info->list_of_part_fields) ||
- (m_is_sub_partitioned && // KEY subpartitioned
- m_part_info->list_of_subpart_fields)))
- {
- Field **field;
- if (m_is_sub_partitioned)
- {
- field= m_part_info->subpart_field_array;
- }
- else
- {
- field= m_part_info->part_field_array;
- }
- for (; *field; field++)
- {
- switch ((*field)->real_type()) {
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- case MYSQL_TYPE_NEWDECIMAL:
- case MYSQL_TYPE_TIMESTAMP:
- case MYSQL_TYPE_LONGLONG:
- case MYSQL_TYPE_INT24:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_YEAR:
- case MYSQL_TYPE_NEWDATE:
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- {
- THD *thd= ha_thd();
- char *part_buf;
- String db_name, table_name;
- uint part_buf_len;
- bool skip_generation= false;
- partition_info::enum_key_algorithm old_algorithm;
- old_algorithm= m_part_info->key_algorithm;
- error= HA_ADMIN_FAILED;
- append_identifier(ha_thd(), &db_name, table_share->db.str,
- table_share->db.length);
- append_identifier(ha_thd(), &table_name, table_share->table_name.str,
- table_share->table_name.length);
- if (m_part_info->key_algorithm != partition_info::KEY_ALGORITHM_NONE)
- {
- /*
- Only possible when someone tampered with .frm files,
- like during tests :)
- */
- skip_generation= true;
- }
- m_part_info->key_algorithm= partition_info::KEY_ALGORITHM_51;
- if (skip_generation ||
- !(part_buf= generate_partition_syntax(m_part_info,
- &part_buf_len,
- true,
- true,
- NULL,
- NULL,
- NULL)) ||
- /* Also check that the length is smaller than the output field! */
- (part_buf_len + db_name.length() + table_name.length()) >=
- (SQL_ADMIN_MSG_TEXT_SIZE -
- (strlen(KEY_PARTITIONING_CHANGED_STR) - 3)))
- {
- print_admin_msg(thd, "error", table_share->db.str, table->alias,
- opt_op_name[CHECK_PARTS],
- KEY_PARTITIONING_CHANGED_STR,
- db_name.c_ptr_safe(), table_name.c_ptr_safe(),
- "<old partition clause>, but add ALGORITHM = 1"
- " between 'KEY' and '(' to change the metadata"
- " without the need of a full table rebuild.");
- }
- else
- {
- print_admin_msg(thd, "error", table_share->db.str, table->alias,
- opt_op_name[CHECK_PARTS],
- KEY_PARTITIONING_CHANGED_STR,
- db_name.c_ptr_safe(), table_name.c_ptr_safe(),
- part_buf);
- }
- m_part_info->key_algorithm= old_algorithm;
- DBUG_RETURN(error);
- }
- default:
- /* Not affected! */
- ;
- }
- }
- }
-
- DBUG_RETURN(error);
-}
-
-
struct st_mysql_storage_engine partition_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index fd1056d7b3f..fc1f1a600d0 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -2,24 +2,21 @@
#define HA_PARTITION_INCLUDED
/*
- Copyright (c) 2005, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2005, 2012, Oracle and/or its affiliates.
+ Copyright (c) 2009, 2013, Monty Program Ab & SkySQL Ab.
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; version 2 of the License.
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-
-#ifdef __GNUC__
-#pragma interface /* gcc class implementation */
-#endif
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include "sql_partition.h" /* part_id_range, partition_element */
#include "queues.h" /* QUEUE */
@@ -27,27 +24,98 @@
enum partition_keywords
{
PKW_HASH= 0, PKW_RANGE, PKW_LIST, PKW_KEY, PKW_MAXVALUE, PKW_LINEAR,
- PKW_COLUMNS, PKW_ALGORITHM
+ PKW_COLUMNS
};
+
#define PARTITION_BYTES_IN_POS 2
-#define PARTITION_ENABLED_TABLE_FLAGS (HA_FILE_BASED | \
- HA_REC_NOT_IN_SEQ | \
- HA_CAN_REPAIR)
-#define PARTITION_DISABLED_TABLE_FLAGS (HA_CAN_GEOMETRY | \
- HA_CAN_FULLTEXT | \
- HA_DUPLICATE_POS | \
- HA_CAN_SQL_HANDLER | \
- HA_CAN_INSERT_DELAYED)
-
-/* First 4 bytes in the .par file is the number of 32-bit words in the file */
-#define PAR_WORD_SIZE 4
-/* offset to the .par file checksum */
-#define PAR_CHECKSUM_OFFSET 4
-/* offset to the total number of partitions */
-#define PAR_NUM_PARTS_OFFSET 8
-/* offset to the engines array */
-#define PAR_ENGINES_OFFSET 12
+
+
+/** Struct used for partition_name_hash */
+typedef struct st_part_name_def
+{
+ uchar *partition_name;
+ uint length;
+ uint32 part_id;
+ my_bool is_subpart;
+} PART_NAME_DEF;
+
+/** class where to save partitions Handler_share's */
+class Parts_share_refs
+{
+public:
+ uint num_parts; /**< Size of ha_share array */
+ Handler_share **ha_shares; /**< Storage for each part */
+ Parts_share_refs()
+ {
+ num_parts= 0;
+ ha_shares= NULL;
+ }
+ ~Parts_share_refs()
+ {
+ uint i;
+ for (i= 0; i < num_parts; i++)
+ if (ha_shares[i])
+ delete ha_shares[i];
+ if (ha_shares)
+ delete [] ha_shares;
+ }
+ bool init(uint arg_num_parts)
+ {
+ DBUG_ASSERT(!num_parts && !ha_shares);
+ num_parts= arg_num_parts;
+ /* Allocate an array of Handler_share pointers */
+ ha_shares= new Handler_share *[num_parts];
+ if (!ha_shares)
+ {
+ num_parts= 0;
+ return true;
+ }
+ memset(ha_shares, 0, sizeof(Handler_share*) * num_parts);
+ return false;
+ }
+};
+
+
+/**
+ Partition specific Handler_share.
+*/
+class Partition_share : public Handler_share
+{
+public:
+ bool auto_inc_initialized;
+ mysql_mutex_t auto_inc_mutex; /**< protecting auto_inc val */
+ ulonglong next_auto_inc_val; /**< first non reserved value */
+ /**
+ Hash of partition names. Initialized in the first ha_partition::open()
+ for the table_share. After that it is read-only, i.e. no locking required.
+ */
+ bool partition_name_hash_initialized;
+ HASH partition_name_hash;
+ /** Storage for each partitions Handler_share */
+ Parts_share_refs *partitions_share_refs;
+ Partition_share() {}
+ ~Partition_share()
+ {
+ DBUG_ENTER("Partition_share::~Partition_share");
+ mysql_mutex_destroy(&auto_inc_mutex);
+ if (partition_name_hash_initialized)
+ my_hash_free(&partition_name_hash);
+ if (partitions_share_refs)
+ delete partitions_share_refs;
+ DBUG_VOID_RETURN;
+ }
+ bool init(uint num_parts);
+ void lock_auto_inc()
+ {
+ mysql_mutex_lock(&auto_inc_mutex);
+ }
+ void unlock_auto_inc()
+ {
+ mysql_mutex_unlock(&auto_inc_mutex);
+ }
+};
+
class ha_partition :public handler
{
@@ -58,13 +126,14 @@ private:
partition_index_first= 1,
partition_index_first_unordered= 2,
partition_index_last= 3,
- partition_read_range = 4,
- partition_no_index_scan= 5
+ partition_index_read_last= 4,
+ partition_read_range = 5,
+ partition_no_index_scan= 6
};
/* Data for the partition handler */
int m_mode; // Open mode
uint m_open_test_lock; // Open test_if_locked
- char *m_file_buffer; // Content of the .par file
+ uchar *m_file_buffer; // Content of the .par file
char *m_name_buffer_ptr; // Pointer to first partition name
MEM_ROOT m_mem_root;
plugin_ref *m_engine_array; // Array of types of the handlers
@@ -86,7 +155,6 @@ private:
*/
KEY *m_curr_key_info[3]; // Current index
uchar *m_rec0; // table->record[0]
- const uchar *m_err_rec; // record which gave error
QUEUE m_queue; // Prio queue used by sorted read
/*
Since the partition handler is a handler on top of other handlers, it
@@ -108,8 +176,6 @@ private:
uint m_tot_parts; // Total number of partitions;
uint m_num_locks; // For engines like ha_blackhole, which needs no locks
uint m_last_part; // Last file that we update,write,read
- int m_lock_type; // Remembers type of last
- // external_lock
part_id_range m_part_spec; // Which parts to scan
uint m_scan_value; // Value passed in rnd_init
// call
@@ -179,16 +245,25 @@ private:
ha_rows m_bulk_inserted_rows;
/** used for prediction of start_bulk_insert rows */
enum_monotonicity_info m_part_func_monotonicity_info;
+ /** keep track of locked partitions */
+ MY_BITMAP m_locked_partitions;
+ /** Stores shared auto_increment etc. */
+ Partition_share *part_share;
+ /** Temporary storage for new partitions Handler_shares during ALTER */
+ List<Parts_share_refs> m_new_partitions_share_refs;
/** Sorted array of partition ids in descending order of number of rows. */
uint32 *m_part_ids_sorted_by_num_of_records;
/* Compare function for my_qsort2, for reversed order. */
static int compare_number_of_records(ha_partition *me,
const uint32 *a,
const uint32 *b);
+ /** keep track of partitions to call ha_reset */
+ MY_BITMAP m_partitions_to_reset;
/** partitions that returned HA_ERR_KEY_NOT_FOUND. */
MY_BITMAP m_key_not_found_partitions;
bool m_key_not_found;
public:
+ Partition_share *get_part_share() { return part_share; }
handler *clone(const char *name, MEM_ROOT *mem_root);
virtual void set_part_info(partition_info *part_info)
{
@@ -263,18 +338,16 @@ public:
virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes);
private:
- int prepare_for_rename();
int copy_partitions(ulonglong * const copied, ulonglong * const deleted);
void cleanup_new_partition(uint part_count);
int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
handler *file, const char *part_name,
partition_element *p_elem);
/*
- delete_table, rename_table and create uses very similar logic which
+ delete_table and rename_table uses very similar logic which
is packed into this routine.
*/
- uint del_ren_cre_table(const char *from, const char *to,
- TABLE *table_arg, HA_CREATE_INFO *create_info);
+ uint del_ren_table(const char *from, const char *to);
/*
One method to create the table_name.par file containing the names of the
underlying partitions, their engine and the number of partitions.
@@ -291,9 +364,16 @@ private:
int set_up_table_before_create(TABLE *table_arg,
const char *partition_name_with_path,
HA_CREATE_INFO *info,
- uint part_id,
partition_element *p_elem);
partition_element *find_partition_element(uint part_id);
+ bool insert_partition_name_in_hash(const char *name, uint part_id,
+ bool is_subpart);
+ bool populate_partition_name_hash();
+ Partition_share *get_share();
+ bool set_ha_share_ref(Handler_share **ha_share);
+ void fix_data_dir(char* path);
+ bool init_partition_bitmaps();
+ void free_partition_bitmaps();
public:
@@ -311,8 +391,6 @@ public:
If the object was opened it will also be closed before being deleted.
*/
virtual int open(const char *name, int mode, uint test_if_locked);
- virtual void unbind_psi();
- virtual void rebind_psi();
virtual int close(void);
/*
@@ -355,6 +433,18 @@ public:
virtual void try_semi_consistent_read(bool);
/*
+ NOTE: due to performance and resource issues with many partitions,
+ we only use the m_psi on the ha_partition handler, excluding all
+ partitions m_psi.
+ */
+#ifdef HAVE_M_PSI_PER_PARTITION
+ /*
+ Bind the table/handler thread to track table i/o.
+ */
+ virtual void unbind_psi();
+ virtual void rebind_psi();
+#endif
+ /*
-------------------------------------------------------------------------
MODULE change record
-------------------------------------------------------------------------
@@ -399,10 +489,13 @@ public:
virtual bool is_fatal_error(int error, uint flags)
{
if (!handler::is_fatal_error(error, flags) ||
- error == HA_ERR_NO_PARTITION_FOUND)
+ error == HA_ERR_NO_PARTITION_FOUND ||
+ error == HA_ERR_NOT_IN_LOCK_PARTITIONS)
return FALSE;
return TRUE;
}
+
+
/*
-------------------------------------------------------------------------
MODULE full table scan
@@ -527,7 +620,6 @@ private:
int handle_ordered_next(uchar * buf, bool next_same);
int handle_ordered_prev(uchar * buf);
void return_top_record(uchar * buf);
- void column_bitmaps_signal();
public:
/*
-------------------------------------------------------------------------
@@ -553,13 +645,17 @@ public:
private:
my_bool reg_query_cache_dependant_table(THD *thd,
- char *key, uint key_len, uint8 type,
+ char *engine_key,
+ uint engine_key_len,
+ char *query_key, uint query_key_len,
+ uint8 type,
Query_cache *cache,
Query_cache_block_table
**block_table,
handler *file, uint *n);
static const uint NO_CURRENT_PART_ID;
int loop_extra(enum ha_extra_function operation);
+ int loop_extra_alter(enum ha_extra_function operations);
void late_extra_cache(uint partition_id);
void late_extra_no_cache(uint partition_id);
void prepare_extra_cache(uint cachesize);
@@ -628,6 +724,9 @@ public:
virtual uint8 table_cache_type();
virtual ha_rows records();
+ /* Calculate hash value for PARTITION BY KEY tables. */
+ static uint32 calculate_key_hash_value(Field **field_array);
+
/*
-------------------------------------------------------------------------
MODULE print messages
@@ -643,6 +742,9 @@ public:
*/
virtual const char *index_type(uint inx);
+ /* The name of the table type that will be used for display purposes */
+ virtual const char *table_type() const;
+
/* The name of the row type used for the underlying tables. */
virtual enum row_type get_row_type() const;
@@ -804,17 +906,7 @@ public:
HA_CAN_INSERT_DELAYED, HA_PRIMARY_KEY_REQUIRED_FOR_POSITION is disabled
until further investigated.
*/
- virtual Table_flags table_flags() const
- {
- DBUG_ENTER("ha_partition::table_flags");
- if (m_handler_status < handler_initialized ||
- m_handler_status >= handler_closed)
- DBUG_RETURN(PARTITION_ENABLED_TABLE_FLAGS);
-
- DBUG_RETURN((m_file[0]->ha_table_flags() &
- ~(PARTITION_DISABLED_TABLE_FLAGS)) |
- (PARTITION_ENABLED_TABLE_FLAGS));
- }
+ virtual Table_flags table_flags() const;
/*
This is a bitmap of flags that says how the storage engine
@@ -955,16 +1047,15 @@ private:
/* lock already taken */
if (auto_increment_safe_stmt_log_lock)
return;
- DBUG_ASSERT(table_share->ha_part_data && !auto_increment_lock);
+ DBUG_ASSERT(!auto_increment_lock);
if(table_share->tmp_table == NO_TMP_TABLE)
{
auto_increment_lock= TRUE;
- mysql_mutex_lock(&table_share->ha_part_data->LOCK_auto_inc);
+ part_share->lock_auto_inc();
}
}
virtual void unlock_auto_increment()
{
- DBUG_ASSERT(table_share->ha_part_data);
/*
If auto_increment_safe_stmt_log_lock is true, we have to keep the lock.
It will be set to false and thus unlocked at the end of the statement by
@@ -972,7 +1063,7 @@ private:
*/
if(auto_increment_lock && !auto_increment_safe_stmt_log_lock)
{
- mysql_mutex_unlock(&table_share->ha_part_data->LOCK_auto_inc);
+ part_share->unlock_auto_inc();
auto_increment_lock= FALSE;
}
}
@@ -981,10 +1072,10 @@ private:
ulonglong nr= (((Field_num*) field)->unsigned_flag ||
field->val_int() > 0) ? field->val_int() : 0;
lock_auto_increment();
- DBUG_ASSERT(table_share->ha_part_data->auto_inc_initialized == TRUE);
+ DBUG_ASSERT(part_share->auto_inc_initialized);
/* must check when the mutex is taken */
- if (nr >= table_share->ha_part_data->next_auto_inc_val)
- table_share->ha_part_data->next_auto_inc_val= nr + 1;
+ if (nr >= part_share->next_auto_inc_val)
+ part_share->next_auto_inc_val= nr + 1;
unlock_auto_increment();
}
@@ -1050,18 +1141,23 @@ public:
/*
-------------------------------------------------------------------------
- MODULE on-line ALTER TABLE
+ MODULE in-place ALTER TABLE
-------------------------------------------------------------------------
These methods are in the handler interface. (used by innodb-plugin)
- They are used for on-line/fast alter table add/drop index:
+ They are used for in-place alter table:
-------------------------------------------------------------------------
*/
- virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
- handler_add_index **add);
- virtual int final_add_index(handler_add_index *add, bool commit);
- virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
- uint num_of_keys);
- virtual int final_drop_index(TABLE *table_arg);
+ virtual enum_alter_inplace_result
+ check_if_supported_inplace_alter(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info);
+ virtual bool prepare_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info);
+ virtual bool inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info);
+ virtual bool commit_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info,
+ bool commit);
+ virtual void notify_table_changed();
/*
-------------------------------------------------------------------------
@@ -1092,18 +1188,9 @@ public:
virtual bool check_and_repair(THD *thd);
virtual bool auto_repair(int error) const;
virtual bool is_crashed() const;
- virtual int check_for_upgrade(HA_CHECK_OPT *check_opt);
private:
int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, uint flags);
- int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, uint part_id,
- uint flag);
- /**
- Check if the rows are placed in the correct partition. If the given
- argument is true, then move the rows to the correct partition.
- */
- int check_misplaced_rows(uint read_part_id, bool repair);
- void append_row_to_str(String &str);
public:
/*
-------------------------------------------------------------------------
@@ -1115,8 +1202,8 @@ public:
virtual int restore(THD* thd, HA_CHECK_OPT *check_opt);
virtual int dump(THD* thd, int fd = -1);
virtual int net_read_dump(NET* net);
- virtual uint checksum() const;
*/
+ virtual uint checksum() const;
/* Enabled keycache for performance reasons, WL#4571 */
virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt);
virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt);
diff --git a/sql/handler.cc b/sql/handler.cc
index 726b663341b..685bb6e6c30 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -62,7 +62,7 @@ static handlerton *installed_htons[128];
#define BITMAP_STACKBUF_SIZE (128/8)
KEY_CREATE_INFO default_key_create_info=
- { HA_KEY_ALG_UNDEF, 0, {NullS, 0}, {NullS, 0} };
+{ HA_KEY_ALG_UNDEF, 0, {NullS, 0}, {NullS, 0}, true };
/* number of entries in handlertons[] */
ulong total_ha= 0;
@@ -99,6 +99,7 @@ uint known_extensions_id= 0;
static int commit_one_phase_2(THD *thd, bool all, THD_TRANS *trans,
bool is_real_trans);
+
static plugin_ref ha_default_plugin(THD *thd)
{
if (thd->variables.table_plugin)
@@ -1142,10 +1143,11 @@ int ha_prepare(THD *thd)
}
else
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_GET_ERRNO, ER(ER_GET_ERRNO),
HA_ERR_WRONG_COMMAND,
ha_resolve_storage_engine_name(ht));
+
}
}
}
@@ -1251,7 +1253,7 @@ int ha_commit_trans(THD *thd, bool all)
/* Just a random warning to test warnings pushed during autocommit. */
DBUG_EXECUTE_IF("warn_during_ha_commit_trans",
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)););
@@ -1571,7 +1573,7 @@ int ha_rollback_trans(THD *thd, bool all)
trans->no_2pc=0;
if (is_real_trans && thd->transaction_rollback_request &&
thd->transaction.xid_state.xa_state != XA_NOTR)
- thd->transaction.xid_state.rm_error= thd->stmt_da->sql_errno();
+ thd->transaction.xid_state.rm_error= thd->get_stmt_da()->sql_errno();
}
/* Always cleanup. Even if nht==0. There may be savepoints. */
if (is_real_trans)
@@ -1594,7 +1596,7 @@ int ha_rollback_trans(THD *thd, bool all)
*/
if (is_real_trans && thd->transaction.all.modified_non_trans_table &&
!thd->slave_thread && thd->killed < KILL_CONNECTION)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
ER(ER_WARNING_NOT_COMPLETE_ROLLBACK));
(void) RUN_HOOK(transaction, after_rollback, (thd, FALSE));
@@ -2087,7 +2089,7 @@ int ha_start_consistent_snapshot(THD *thd)
exist:
*/
if (warn)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"This MySQL server does not support any "
"consistent-read capable storage engine");
return 0;
@@ -2183,9 +2185,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
char buff[MYSQL_ERRMSG_SIZE];
};
@@ -2195,9 +2197,9 @@ Ha_delete_table_error_handler::
handle_condition(THD *,
uint,
const char*,
- MYSQL_ERROR::enum_warning_level,
+ Sql_condition::enum_warning_level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
/* Grab the error message */
@@ -2262,7 +2264,7 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
XXX: should we convert *all* errors to warnings here?
What if the error is fatal?
*/
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, error,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, error,
ha_delete_table_error_handler.buff);
}
delete file;
@@ -2276,8 +2278,11 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path,
handler *handler::clone(const char *name, MEM_ROOT *mem_root)
{
handler *new_handler= get_new_handler(table->s, mem_root, ht);
- if (! new_handler)
+
+ if (!new_handler)
return NULL;
+ if (new_handler->set_ha_share_ref(ha_share))
+ goto err;
/*
Allocate handler->ref here because otherwise ha_open will allocate it
@@ -2287,7 +2292,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
if (!(new_handler->ref= (uchar*) alloc_root(mem_root,
ALIGN_SIZE(ref_length)*2)))
- return NULL;
+ goto err;
/*
TODO: Implement a more efficient way to have more than one index open for
@@ -2298,9 +2303,13 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
*/
if (new_handler->ha_open(table, name, table->db_stat,
HA_OPEN_IGNORE_IF_LOCKED))
- return NULL;
+ goto err;
return new_handler;
+
+err:
+ delete new_handler;
+ return NULL;
}
@@ -2377,6 +2386,8 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
table= table_arg;
DBUG_ASSERT(table->s == table_share);
+ DBUG_ASSERT(m_lock_type == F_UNLCK);
+ DBUG_PRINT("info", ("old m_lock_type: %d F_UNLCK %d", m_lock_type, F_UNLCK));
DBUG_ASSERT(alloc_root_inited(&table->mem_root));
if ((error=open(name,mode,test_if_locked)))
@@ -2397,7 +2408,15 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode,
{
DBUG_ASSERT(m_psi == NULL);
DBUG_ASSERT(table_share != NULL);
- m_psi= PSI_CALL_open_table(ha_table_share_psi(), this);
+ /*
+ Do not call this for partitions handlers, since it may take too much
+ resources.
+ So only use the m_psi on table level, not for individual partitions.
+ */
+ if (!(test_if_locked & HA_OPEN_NO_PSI_CALL))
+ {
+ m_psi= PSI_CALL_open_table(ha_table_share_psi(), this);
+ }
if (table->s->db_options_in_use & HA_OPTION_READ_ONLY_DATA)
table->db_stat|=HA_READ_ONLY;
@@ -2431,12 +2450,18 @@ int handler::ha_close(void)
PSI_CALL_close_table(m_psi);
m_psi= NULL; /* instrumentation handle, invalid after close_table() */
+ DBUG_ASSERT(m_lock_type == F_UNLCK);
+ DBUG_ASSERT(inited == NONE);
DBUG_RETURN(close());
}
int handler::ha_rnd_next(uchar *buf)
{
int result;
+ DBUG_ENTER("handler::ha_rnd_next");
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
+ DBUG_ASSERT(inited == RND);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, MAX_KEY, 0,
{ result= rnd_next(buf); })
@@ -2451,12 +2476,17 @@ int handler::ha_rnd_next(uchar *buf)
increment_statistics(&SSV::ha_read_rnd_next_count);
table->status=result ? STATUS_NOT_FOUND: 0;
- return result;
+ DBUG_RETURN(result);
}
int handler::ha_rnd_pos(uchar *buf, uchar *pos)
{
int result;
+ DBUG_ENTER("handler::ha_rnd_pos");
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
+ /* TODO: Find out how to solve ha_rnd_pos when finding duplicate update. */
+ /* DBUG_ASSERT(inited == RND); */
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, MAX_KEY, 0,
{ result= rnd_pos(buf, pos); })
@@ -2464,7 +2494,7 @@ int handler::ha_rnd_pos(uchar *buf, uchar *pos)
if (!result)
update_rows_read();
table->status=result ? STATUS_NOT_FOUND: 0;
- return result;
+ DBUG_RETURN(result);
}
int handler::ha_index_read_map(uchar *buf, const uchar *key,
@@ -2472,6 +2502,9 @@ int handler::ha_index_read_map(uchar *buf, const uchar *key,
enum ha_rkey_function find_flag)
{
int result;
+ DBUG_ENTER("handler::ha_index_read_map");
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ASSERT(inited==INDEX);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0,
@@ -2480,7 +2513,7 @@ int handler::ha_index_read_map(uchar *buf, const uchar *key,
if (!result)
update_index_statistics();
table->status=result ? STATUS_NOT_FOUND: 0;
- return result;
+ DBUG_RETURN(result);
}
/*
@@ -2495,6 +2528,8 @@ int handler::ha_index_read_idx_map(uchar *buf, uint index, const uchar *key,
{
int result;
DBUG_ASSERT(inited==NONE);
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ASSERT(end_range == NULL);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, index, 0,
{ result= index_read_idx_map(buf, index, key, keypart_map, find_flag); })
@@ -2511,6 +2546,9 @@ int handler::ha_index_read_idx_map(uchar *buf, uint index, const uchar *key,
int handler::ha_index_next(uchar * buf)
{
int result;
+ DBUG_ENTER("handler::ha_index_next");
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ASSERT(inited==INDEX);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0,
@@ -2519,12 +2557,15 @@ int handler::ha_index_next(uchar * buf)
if (!result)
update_index_statistics();
table->status=result ? STATUS_NOT_FOUND: 0;
- return result;
+ DBUG_RETURN(result);
}
int handler::ha_index_prev(uchar * buf)
{
int result;
+ DBUG_ENTER("handler::ha_index_prev");
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ASSERT(inited==INDEX);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0,
@@ -2533,12 +2574,14 @@ int handler::ha_index_prev(uchar * buf)
if (!result)
update_index_statistics();
table->status=result ? STATUS_NOT_FOUND: 0;
- return result;
+ DBUG_RETURN(result);
}
int handler::ha_index_first(uchar * buf)
{
int result;
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ASSERT(inited==INDEX);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0,
@@ -2553,6 +2596,8 @@ int handler::ha_index_first(uchar * buf)
int handler::ha_index_last(uchar * buf)
{
int result;
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ASSERT(inited==INDEX);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0,
@@ -2567,6 +2612,8 @@ int handler::ha_index_last(uchar * buf)
int handler::ha_index_next_same(uchar *buf, const uchar *key, uint keylen)
{
int result;
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ASSERT(inited==INDEX);
MYSQL_TABLE_IO_WAIT(m_psi, PSI_TABLE_FETCH_ROW, active_index, 0,
@@ -3090,6 +3137,9 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
void handler::ha_release_auto_increment()
{
DBUG_ENTER("ha_release_auto_increment");
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK ||
+ (!next_insert_id && !insert_id_for_cur_row));
release_auto_increment();
insert_id_for_cur_row= 0;
auto_inc_interval_for_cur_row.replace(0, 0, 0);
@@ -3107,13 +3157,25 @@ void handler::ha_release_auto_increment()
}
-void handler::print_keydup_error(uint key_nr, const char *msg, myf errflag)
+/**
+ Construct and emit duplicate key error message using information
+ from table's record buffer.
+
+ @param table TABLE object which record buffer should be used as
+ source for column values.
+ @param key Key description.
+ @param msg Error message template to which key value should be
+ added.
+ @param errflag Flags for my_error() call.
+*/
+
+void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag)
{
/* Write the duplicated key in the error message */
- char key[MAX_KEY_LENGTH];
- String str(key,sizeof(key),system_charset_info);
+ char key_buff[MAX_KEY_LENGTH];
+ String str(key_buff,sizeof(key_buff),system_charset_info);
- if (key_nr == MAX_KEY)
+ if (key == NULL)
{
/* Key is unknown */
str.copy("", 0, system_charset_info);
@@ -3122,18 +3184,29 @@ void handler::print_keydup_error(uint key_nr, const char *msg, myf errflag)
else
{
/* Table is opened and defined at this point */
- key_unpack(&str,table,(uint) key_nr);
+ key_unpack(&str,table, key);
uint max_length=MYSQL_ERRMSG_SIZE-(uint) strlen(msg);
if (str.length() >= max_length)
{
str.length(max_length-4);
str.append(STRING_WITH_LEN("..."));
}
- my_printf_error(ER_DUP_ENTRY, msg,
- errflag, str.c_ptr_safe(), table->key_info[key_nr].name);
+ my_printf_error(ER_DUP_ENTRY, msg, errflag, str.c_ptr_safe(), key->name);
}
}
+/**
+ Construct and emit duplicate key error message using information
+ from table's record buffer.
+
+ @sa print_keydup_error(table, key, msg, errflag).
+*/
+
+void print_keydup_error(TABLE *table, KEY *key, myf errflag)
+{
+ print_keydup_error(table, key, ER(ER_DUP_ENTRY_WITH_KEY_NAME), errflag);
+}
+
/**
Print error that we got from handler function.
@@ -3198,7 +3271,9 @@ void handler::print_error(int error, myf errflag)
uint key_nr=get_dup_key(error);
if ((int) key_nr >= 0)
{
- print_keydup_error(key_nr, ER(ER_DUP_ENTRY_WITH_KEY_NAME), errflag);
+ print_keydup_error(table,
+ key_nr == MAX_KEY ? NULL : &table->key_info[key_nr],
+ errflag);
DBUG_VOID_RETURN;
}
}
@@ -3210,9 +3285,12 @@ void handler::print_error(int error, myf errflag)
char rec_buf[MAX_KEY_LENGTH];
String rec(rec_buf, sizeof(rec_buf), system_charset_info);
/* Table is opened and defined at this point */
- key_unpack(&rec, table, 0 /* just print the subset of fields that are
- part of the first index, printing the whole
- row from there is not easy */);
+
+ /*
+ Just print the subset of fields that are part of the first index,
+ printing the whole row from there is not easy.
+ */
+ key_unpack(&rec, table, &table->key_info[0]);
char child_table_name[NAME_LEN + 1];
char child_key_name[NAME_LEN + 1];
@@ -3349,7 +3427,7 @@ void handler::print_error(int error, myf errflag)
case HA_ERR_AUTOINC_ERANGE:
textno= error;
my_error(textno, errflag, table->next_number_field->field_name,
- table->in_use->warning_info->current_row_for_warning());
+ table->in_use->get_stmt_da()->current_row_for_warning());
DBUG_VOID_RETURN;
break;
case HA_ERR_TOO_MANY_CONCURRENT_TRXS:
@@ -3358,6 +3436,9 @@ void handler::print_error(int error, myf errflag)
case HA_ERR_INDEX_COL_TOO_LONG:
textno= ER_INDEX_COLUMN_TOO_LONG;
break;
+ case HA_ERR_NOT_IN_LOCK_PARTITIONS:
+ textno=ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET;
+ break;
case HA_ERR_INDEX_CORRUPT:
textno= ER_INDEX_CORRUPT;
break;
@@ -3455,7 +3536,7 @@ int handler::check_collation_compatibility()
for (; key < key_end; key++)
{
KEY_PART_INFO *key_part= key->key_part;
- KEY_PART_INFO *key_part_end= key_part + key->key_parts;
+ KEY_PART_INFO *key_part_end= key_part + key->user_defined_key_parts;
for (; key_part < key_part_end; key_part++)
{
if (!key_part->fieldnr)
@@ -3496,7 +3577,7 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt)
for (; keyinfo < keyend; keyinfo++)
{
keypart= keyinfo->key_part;
- keypartend= keypart + keyinfo->key_parts;
+ keypartend= keypart + keyinfo->user_defined_key_parts;
for (; keypart < keypartend; keypart++)
{
if (!keypart->fieldnr)
@@ -3588,6 +3669,8 @@ err:
*/
uint handler::get_dup_key(int error)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
DBUG_ENTER("handler::get_dup_key");
table->file->errkey = (uint) -1;
if (error == HA_ERR_FOUND_DUPP_KEY || error == HA_ERR_FOREIGN_DUPLICATE_KEY ||
@@ -3698,6 +3781,8 @@ void handler::drop_table(const char *name)
int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt)
{
int error;
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
if ((table->s->mysql_version >= MYSQL_VERSION_ID) &&
(check_opt->sql_flags & TT_FOR_UPGRADE))
@@ -3787,6 +3872,8 @@ int
handler::ha_bulk_update_row(const uchar *old_data, uchar *new_data,
uint *dup_key_found)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
mark_trx_read_write();
return bulk_update_row(old_data, new_data, dup_key_found);
@@ -3802,6 +3889,8 @@ handler::ha_bulk_update_row(const uchar *old_data, uchar *new_data,
int
handler::ha_delete_all_rows()
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
mark_trx_read_write();
return delete_all_rows();
@@ -3817,6 +3906,8 @@ handler::ha_delete_all_rows()
int
handler::ha_truncate()
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
mark_trx_read_write();
return truncate();
@@ -3832,6 +3923,8 @@ handler::ha_truncate()
int
handler::ha_reset_auto_increment(ulonglong value)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
mark_trx_read_write();
return reset_auto_increment(value);
@@ -3847,6 +3940,8 @@ handler::ha_reset_auto_increment(ulonglong value)
int
handler::ha_optimize(THD* thd, HA_CHECK_OPT* check_opt)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
mark_trx_read_write();
return optimize(thd, check_opt);
@@ -3862,6 +3957,8 @@ handler::ha_optimize(THD* thd, HA_CHECK_OPT* check_opt)
int
handler::ha_analyze(THD* thd, HA_CHECK_OPT* check_opt)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
mark_trx_read_write();
return analyze(thd, check_opt);
@@ -3877,6 +3974,8 @@ handler::ha_analyze(THD* thd, HA_CHECK_OPT* check_opt)
bool
handler::ha_check_and_repair(THD *thd)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_UNLCK);
mark_trx_read_write();
return check_and_repair(thd);
@@ -3892,6 +3991,8 @@ handler::ha_check_and_repair(THD *thd)
int
handler::ha_disable_indexes(uint mode)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
mark_trx_read_write();
return disable_indexes(mode);
@@ -3907,6 +4008,8 @@ handler::ha_disable_indexes(uint mode)
int
handler::ha_enable_indexes(uint mode)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
mark_trx_read_write();
return enable_indexes(mode);
@@ -3922,26 +4025,116 @@ handler::ha_enable_indexes(uint mode)
int
handler::ha_discard_or_import_tablespace(my_bool discard)
{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
mark_trx_read_write();
return discard_or_import_tablespace(discard);
}
-/**
- Prepare for alter: public interface.
+bool handler::ha_prepare_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
+{
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
+ mark_trx_read_write();
+
+ return prepare_inplace_alter_table(altered_table, ha_alter_info);
+}
- Called to prepare an *online* ALTER.
- @sa handler::prepare_for_alter()
+bool handler::ha_commit_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info,
+ bool commit)
+{
+ /*
+ At this point we should have an exclusive metadata lock on the table.
+ The exception is if we're about to roll back changes (commit= false).
+ In this case, we might be rolling back after a failed lock upgrade,
+ so we could be holding the same lock level as for inplace_alter_table().
+ */
+ DBUG_ASSERT(ha_thd()->mdl_context.is_lock_owner(MDL_key::TABLE,
+ table->s->db.str,
+ table->s->table_name.str,
+ MDL_EXCLUSIVE) ||
+ !commit);
+
+ return commit_inplace_alter_table(altered_table, ha_alter_info, commit);
+}
+
+
+/*
+ Default implementation to support in-place alter table
+ and old online add/drop index API
*/
-void
-handler::ha_prepare_for_alter()
+enum_alter_inplace_result
+handler::check_if_supported_inplace_alter(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
{
- mark_trx_read_write();
+ DBUG_ENTER("check_if_supported_alter");
+
+ HA_CREATE_INFO *create_info= ha_alter_info->create_info;
+
+ Alter_inplace_info::HA_ALTER_FLAGS inplace_offline_operations=
+ Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH |
+ Alter_inplace_info::ALTER_COLUMN_NAME |
+ Alter_inplace_info::ALTER_COLUMN_DEFAULT |
+ Alter_inplace_info::CHANGE_CREATE_OPTION |
+ Alter_inplace_info::ALTER_RENAME;
+
+ /* Is there at least one operation that requires copy algorithm? */
+ if (ha_alter_info->handler_flags & ~inplace_offline_operations)
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+
+ /*
+ ALTER TABLE tbl_name CONVERT TO CHARACTER SET .. and
+ ALTER TABLE table_name DEFAULT CHARSET = .. most likely
+ change column charsets and so not supported in-place through
+ old API.
+
+ Changing of PACK_KEYS, MAX_ROWS and ROW_FORMAT options were
+ not supported as in-place operations in old API either.
+ */
+ if (create_info->used_fields & (HA_CREATE_USED_CHARSET |
+ HA_CREATE_USED_DEFAULT_CHARSET |
+ HA_CREATE_USED_PACK_KEYS |
+ HA_CREATE_USED_MAX_ROWS) ||
+ (table->s->row_type != create_info->row_type))
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+
+ uint table_changes= (ha_alter_info->handler_flags &
+ Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH) ?
+ IS_EQUAL_PACK_LENGTH : IS_EQUAL_YES;
+ if (table->file->check_if_incompatible_data(create_info, table_changes)
+ == COMPATIBLE_DATA_YES)
+ DBUG_RETURN(HA_ALTER_INPLACE_EXCLUSIVE_LOCK);
- prepare_for_alter();
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+}
+
+
+/*
+ Default implementation to support in-place alter table
+ and old online add/drop index API
+*/
+
+void handler::notify_table_changed()
+{
+ ha_create_partitioning_metadata(table->s->path.str, NULL, CHF_INDEX_FLAG);
+}
+
+
+void Alter_inplace_info::report_unsupported_error(const char *not_supported,
+ const char *try_instead)
+{
+ if (unsupported_reason == NULL)
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0),
+ not_supported, try_instead);
+ else
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0),
+ not_supported, unsupported_reason, try_instead);
}
@@ -3954,6 +4147,7 @@ handler::ha_prepare_for_alter()
int
handler::ha_rename_table(const char *from, const char *to)
{
+ DBUG_ASSERT(m_lock_type == F_UNLCK);
mark_trx_read_write();
return rename_table(from, to);
@@ -3986,6 +4180,7 @@ handler::ha_delete_table(const char *name)
void
handler::ha_drop_table(const char *name)
{
+ DBUG_ASSERT(m_lock_type == F_UNLCK);
mark_trx_read_write();
return drop_table(name);
@@ -4001,6 +4196,7 @@ handler::ha_drop_table(const char *name)
int
handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info)
{
+ DBUG_ASSERT(m_lock_type == F_UNLCK);
mark_trx_read_write();
int error= create(name, form, info);
if (!error &&
@@ -4020,6 +4216,13 @@ int
handler::ha_create_partitioning_metadata(const char *name, const char *old_name,
int action_flag)
{
+ /*
+ Normally this is done when unlocked, but in fast_alter_partition_table,
+ it is done on an already locked handler when preparing to alter/rename
+ partitions.
+ */
+ DBUG_ASSERT(m_lock_type == F_UNLCK ||
+ (!old_name && strcmp(name, table_share->path.str)));
mark_trx_read_write();
return create_partitioning_metadata(name, old_name, action_flag);
@@ -4039,7 +4242,13 @@ handler::ha_change_partitions(HA_CREATE_INFO *create_info,
ulonglong * const deleted,
const uchar *pack_frm_data,
size_t pack_frm_len)
-{
+{ /*
+ Must have at least RDLCK or be a TMP table. Read lock is needed to read
+ from current partitions and write lock will be taken on new partitions.
+ */
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type != F_UNLCK);
+
mark_trx_read_write();
return change_partitions(create_info, path, copied, deleted,
@@ -4056,6 +4265,8 @@ handler::ha_change_partitions(HA_CREATE_INFO *create_info,
int
handler::ha_drop_partitions(const char *path)
{
+ DBUG_ASSERT(!table->db_stat);
+
mark_trx_read_write();
return drop_partitions(path);
@@ -4071,6 +4282,8 @@ handler::ha_drop_partitions(const char *path)
int
handler::ha_rename_partitions(const char *path)
{
+ DBUG_ASSERT(!table->db_stat);
+
mark_trx_read_write();
return rename_partitions(path);
@@ -4131,7 +4344,7 @@ int handler::index_next_same(uchar *buf, const uchar *key, uint keylen)
table->record[0]= buf;
key_info= table->key_info + active_index;
key_part= key_info->key_part;
- key_part_end= key_part + key_info->key_parts;
+ key_part_end= key_part + key_info->user_defined_key_parts;
for (; key_part < key_part_end; key_part++)
{
DBUG_ASSERT(key_part->field);
@@ -4587,9 +4800,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
if (sql_errno == ER_NO_SUCH_TABLE ||
@@ -4600,7 +4813,7 @@ public:
return TRUE;
}
- if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
+ if (level == Sql_condition::WARN_LEVEL_ERROR)
m_unhandled_errors++;
return FALSE;
}
@@ -4762,10 +4975,10 @@ void Discovered_table_list::remove_duplicates()
{
LEX_STRING **src= tables->front();
LEX_STRING **dst= src;
- while (++dst < tables->back())
+ while (++dst <= tables->back())
{
LEX_STRING *s= *src, *d= *dst;
- DBUG_ASSERT(strncmp(s->str, d->str, min(s->length, d->length)) <= 0);
+ DBUG_ASSERT(strncmp(s->str, d->str, MY_MIN(s->length, d->length)) <= 0);
if ((s->length != d->length || strncmp(s->str, d->str, d->length)))
{
src++;
@@ -4773,7 +4986,7 @@ void Discovered_table_list::remove_duplicates()
*src= *dst;
}
}
- tables->set_elements(src - tables->front() + 1);
+ tables->elements(src - tables->front() + 1);
}
struct st_discover_names_args
@@ -5005,14 +5218,7 @@ int handler::read_range_first(const key_range *start_key,
DBUG_ENTER("handler::read_range_first");
eq_range= eq_range_arg;
- end_range= 0;
- if (end_key)
- {
- end_range= &save_end_range;
- save_end_range= *end_key;
- key_compare_result_on_equal= ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
- (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
- }
+ set_end_range(end_key);
range_key_part= table->key_info[active_index].key_part;
if (!start_key) // Read first record
@@ -5088,12 +5294,26 @@ int handler::read_range_next()
}
+void handler::set_end_range(const key_range *end_key)
+{
+ end_range= 0;
+ if (end_key)
+ {
+ end_range= &save_end_range;
+ save_end_range= *end_key;
+ key_compare_result_on_equal=
+ ((end_key->flag == HA_READ_BEFORE_KEY) ? 1 :
+ (end_key->flag == HA_READ_AFTER_KEY) ? -1 : 0);
+ }
+}
+
+
/**
Compare if found key (in row) is over max-value.
@param range range to compare to row. May be 0 for no range
- @seealso
+ @see also
key.cc::key_cmp()
@return
@@ -5323,6 +5543,7 @@ static bool check_table_binlog_row_based(THD *thd, TABLE *table)
if (table->s->cached_row_logging_check == -1)
{
int const check(table->s->tmp_table == NO_TMP_TABLE &&
+ ! table->no_replicate &&
binlog_filter->db_ok(table->s->db.str));
table->s->cached_row_logging_check= check;
}
@@ -5430,8 +5651,6 @@ static int binlog_log_row(TABLE* table,
const uchar *after_record,
Log_func *log_func)
{
- if (table->no_replicate)
- return 0;
bool error= 0;
THD *const thd= table->in_use;
@@ -5486,6 +5705,12 @@ int handler::ha_external_lock(THD *thd, int lock_type)
taken a table lock), ha_release_auto_increment() was too.
*/
DBUG_ASSERT(next_insert_id == 0);
+ /* Consecutive calls for lock without unlocking in between is not allowed */
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ ((lock_type != F_UNLCK && m_lock_type == F_UNLCK) ||
+ lock_type == F_UNLCK));
+ /* SQL HANDLER call locks/unlock while scanning (RND/INDEX). */
+ DBUG_ASSERT(inited == NONE || table->open_by_handler);
if (MYSQL_HANDLER_RDLOCK_START_ENABLED() ||
MYSQL_HANDLER_WRLOCK_START_ENABLED() ||
@@ -5519,6 +5744,7 @@ int handler::ha_external_lock(THD *thd, int lock_type)
if (error == 0)
{
+ m_lock_type= lock_type;
cached_table_flags= table_flags();
if (table_share->tmp_table == NO_TMP_TABLE)
mysql_audit_external_lock(thd, table_share, lock_type);
@@ -5575,6 +5801,8 @@ int handler::ha_write_row(uchar *buf)
{
int error;
Log_func *log_func= Write_rows_log_event::binlog_row_logging_function;
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
DBUG_ENTER("handler::ha_write_row");
DEBUG_SYNC_C("ha_write_row_start");
DBUG_EXECUTE_IF("inject_error_ha_write_row",
@@ -5603,6 +5831,8 @@ int handler::ha_update_row(const uchar *old_data, uchar *new_data)
{
int error;
Log_func *log_func= Update_rows_log_event::binlog_row_logging_function;
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
/*
Some storage engines require that the new record is in record[0]
@@ -5638,6 +5868,8 @@ int handler::ha_delete_row(const uchar *buf)
buf == table->record[1]);
DBUG_EXECUTE_IF("inject_error_ha_delete_row",
return HA_ERR_INTERNAL_ERROR; );
+ DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE ||
+ m_lock_type == F_WRLCK);
MYSQL_DELETE_ROW_START(table_share->db.str, table_share->table_name.str);
mark_trx_read_write();
@@ -5668,6 +5900,77 @@ void handler::use_hidden_primary_key()
}
+/**
+ Get an initialized ha_share.
+
+ @return Initialized ha_share
+ @retval NULL ha_share is not yet initialized.
+ @retval != NULL previous initialized ha_share.
+
+ @note
+ If not a temp table, then LOCK_ha_data must be held.
+*/
+
+Handler_share *handler::get_ha_share_ptr()
+{
+ DBUG_ENTER("handler::get_ha_share_ptr");
+ DBUG_ASSERT(ha_share && table_share);
+
+#ifndef DBUG_OFF
+ if (table_share->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_assert_owner(&table_share->LOCK_ha_data);
+#endif
+
+ DBUG_RETURN(*ha_share);
+}
+
+
+/**
+ Set ha_share to be used by all instances of the same table/partition.
+
+ @param ha_share Handler_share to be shared.
+
+ @note
+ If not a temp table, then LOCK_ha_data must be held.
+*/
+
+void handler::set_ha_share_ptr(Handler_share *arg_ha_share)
+{
+ DBUG_ENTER("handler::set_ha_share_ptr");
+ DBUG_ASSERT(ha_share);
+#ifndef DBUG_OFF
+ if (table_share->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_assert_owner(&table_share->LOCK_ha_data);
+#endif
+
+ *ha_share= arg_ha_share;
+ DBUG_VOID_RETURN;
+}
+
+
+/**
+ Take a lock for protecting shared handler data.
+*/
+
+void handler::lock_shared_ha_data()
+{
+ DBUG_ASSERT(table_share);
+ if (table_share->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_lock(&table_share->LOCK_ha_data);
+}
+
+
+/**
+ Release lock for protecting ha_share.
+*/
+
+void handler::unlock_shared_ha_data()
+{
+ DBUG_ASSERT(table_share);
+ if (table_share->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_unlock(&table_share->LOCK_ha_data);
+}
+
/** @brief
Dummy function which accept information about log files which is not need
by handlers
diff --git a/sql/handler.h b/sql/handler.h
index 9f8290ee176..478317e881d 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -42,6 +42,8 @@
#error MAX_KEY is too large. Values up to 128 are supported.
#endif
+class Alter_info;
+
// the following is for checking tables
#define HA_ADMIN_ALREADY_DONE 1
@@ -59,6 +61,22 @@
#define HA_ADMIN_NEEDS_ALTER -11
#define HA_ADMIN_NEEDS_CHECK -12
+/**
+ Return values for check_if_supported_inplace_alter().
+
+ @see check_if_supported_inplace_alter() for description of
+ the individual values.
+*/
+enum enum_alter_inplace_result {
+ HA_ALTER_ERROR,
+ HA_ALTER_INPLACE_NOT_SUPPORTED,
+ HA_ALTER_INPLACE_EXCLUSIVE_LOCK,
+ HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE,
+ HA_ALTER_INPLACE_SHARED_LOCK,
+ HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE,
+ HA_ALTER_INPLACE_NO_LOCK
+};
+
/* Bits in table_flags() to show what database can do */
#define HA_NO_TRANSACTIONS (1ULL << 0) /* Doesn't support transactions */
@@ -99,8 +117,8 @@
#define HA_CAN_INSERT_DELAYED (1ULL << 14)
/*
If we get the primary key columns for free when we do an index read
- It also implies that we have to retrive the primary key when using
- position() and rnd_pos().
+ (usually, it also implies that HA_PRIMARY_KEY_REQUIRED_FOR_POSITION
+ flag is set).
*/
#define HA_PRIMARY_KEY_IN_READ_INDEX (1ULL << 15)
/*
@@ -190,6 +208,64 @@
*/
#define HA_MUST_USE_TABLE_CONDITION_PUSHDOWN (1ULL << 42)
+/**
+ The handler supports read before write removal optimization
+
+ Read before write removal may be used for storage engines which support
+ write without previous read of the row to be updated. Handler returning
+ this flag must implement start_read_removal() and end_read_removal().
+ The handler may return "fake" rows constructed from the key of the row
+ asked for. This is used to optimize UPDATE and DELETE by reducing the
+ numer of roundtrips between handler and storage engine.
+
+ Example:
+ UPDATE a=1 WHERE pk IN (<keys>)
+
+ mysql_update()
+ {
+ if (<conditions for starting read removal>)
+ start_read_removal()
+ -> handler returns true if read removal supported for this table/query
+
+ while(read_record("pk=<key>"))
+ -> handler returns fake row with column "pk" set to <key>
+
+ ha_update_row()
+ -> handler sends write "a=1" for row with "pk=<key>"
+
+ end_read_removal()
+ -> handler returns the number of rows actually written
+ }
+
+ @note This optimization in combination with batching may be used to
+ remove even more roundtrips.
+*/
+#define HA_READ_BEFORE_WRITE_REMOVAL (1LL << 43)
+
+/*
+ Engine supports extended fulltext API
+ */
+#define HA_CAN_FULLTEXT_EXT (1LL << 44)
+
+/*
+ Storage engine doesn't synchronize result set with expected table contents.
+ Used by replication slave to check if it is possible to retrieve rows from
+ the table when deciding whether to do a full table scan, index scan or hash
+ scan while applying a row event.
+ */
+#define HA_READ_OUT_OF_SYNC (1LL << 45)
+
+/*
+ Storage engine supports table export using the
+ FLUSH TABLE <table_list> FOR EXPORT statement.
+ */
+#define HA_CAN_EXPORT (1LL << 46)
+
+/*
+ The handler don't want accesses to this table to
+ be const-table optimized
+*/
+#define HA_BLOCK_CONST_TABLE (1LL << 47)
/*
Set of all binlog flags. Currently only contain the capabilities
flags.
@@ -384,9 +460,15 @@ enum legacy_db_type
enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED,
ROW_TYPE_DYNAMIC, ROW_TYPE_COMPRESSED,
ROW_TYPE_REDUNDANT, ROW_TYPE_COMPACT,
- /** Unused. Reserved for future versions. */
ROW_TYPE_PAGE };
+/* Specifies data storage format for individual columns */
+enum column_format_type {
+ COLUMN_FORMAT_TYPE_DEFAULT= 0, /* Not specified (use engine default) */
+ COLUMN_FORMAT_TYPE_FIXED= 1, /* FIXED format */
+ COLUMN_FORMAT_TYPE_DYNAMIC= 2 /* DYNAMIC format */
+};
+
enum enum_binlog_func {
BFN_RESET_LOGS= 1,
BFN_RESET_SLAVE= 2,
@@ -431,6 +513,45 @@ enum enum_binlog_command {
/* The following two are used by Maria engine: */
#define HA_CREATE_USED_TRANSACTIONAL (1L << 20)
#define HA_CREATE_USED_PAGE_CHECKSUM (1L << 21)
+/** This is set whenever STATS_PERSISTENT=0|1|default has been
+specified in CREATE/ALTER TABLE. See also HA_OPTION_STATS_PERSISTENT in
+include/my_base.h. It is possible to distinguish whether
+STATS_PERSISTENT=default has been specified or no STATS_PERSISTENT= is
+given at all. */
+#define HA_CREATE_USED_STATS_PERSISTENT (1L << 22)
+/**
+ This is set whenever STATS_AUTO_RECALC=0|1|default has been
+ specified in CREATE/ALTER TABLE. See enum_stats_auto_recalc.
+ It is possible to distinguish whether STATS_AUTO_RECALC=default
+ has been specified or no STATS_AUTO_RECALC= is given at all.
+*/
+#define HA_CREATE_USED_STATS_AUTO_RECALC (1L << 23)
+/**
+ This is set whenever STATS_SAMPLE_PAGES=N|default has been
+ specified in CREATE/ALTER TABLE. It is possible to distinguish whether
+ STATS_SAMPLE_PAGES=default has been specified or no STATS_SAMPLE_PAGES= is
+ given at all.
+*/
+#define HA_CREATE_USED_STATS_SAMPLE_PAGES (1L << 24)
+
+
+/*
+ This is master database for most of system tables. However there
+ can be other databases which can hold system tables. Respective
+ storage engines define their own system database names.
+*/
+extern const char *mysqld_system_database;
+
+/*
+ Structure to hold list of system_database.system_table.
+ This is used at both mysqld and storage engine layer.
+*/
+struct st_system_tablename
+{
+ const char *db;
+ const char *tablename;
+};
+
typedef ulonglong my_xid; // this line is the same as in log_event.h
#define MYSQL_XID_PREFIX "MySQLXid"
@@ -1244,6 +1365,7 @@ static inline sys_var *find_hton_sysvar(handlerton *hton, st_mysql_sys_var *var)
#define HTON_ALTER_NOT_SUPPORTED (1 << 1) //Engine does not support alter
#define HTON_CAN_RECREATE (1 << 2) //Delete all is used for truncate
#define HTON_HIDDEN (1 << 3) //Engine does not appear in lists
+#define HTON_FLUSH_AFTER_RENAME (1 << 4)
#define HTON_NOT_USER_SELECTABLE (1 << 5)
#define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported
#define HTON_SUPPORT_LOG_TABLES (1 << 7) //Engine supports log tables
@@ -1305,6 +1427,22 @@ struct THD_TRANS
void reset() { no_2pc= FALSE; modified_non_trans_table= FALSE; }
bool is_empty() const { return ha_list == NULL; }
THD_TRANS() {} /* Remove gcc warning */
+
+ unsigned int m_unsafe_rollback_flags;
+ /*
+ Define the type of statemens which cannot be rolled back safely.
+ Each type occupies one bit in m_unsafe_rollback_flags.
+ */
+ static unsigned int const MODIFIED_NON_TRANS_TABLE= 0x01;
+ static unsigned int const CREATED_TEMP_TABLE= 0x02;
+ static unsigned int const DROPPED_TEMP_TABLE= 0x04;
+
+ void mark_created_temp_table()
+ {
+ DBUG_PRINT("debug", ("mark_created_temp_table"));
+ m_unsafe_rollback_flags|= CREATED_TEMP_TABLE;
+ }
+
};
@@ -1428,10 +1566,13 @@ struct st_table_log_memory_entry;
class partition_info;
struct st_partition_iter;
-#define NOT_A_PARTITION_ID ((uint32)-1)
enum ha_choice { HA_CHOICE_UNDEF, HA_CHOICE_NO, HA_CHOICE_YES };
+enum enum_stats_auto_recalc { HA_STATS_AUTO_RECALC_DEFAULT= 0,
+ HA_STATS_AUTO_RECALC_ON,
+ HA_STATS_AUTO_RECALC_OFF };
+
struct HA_CREATE_INFO
{
CHARSET_INFO *table_charset, *default_table_charset;
@@ -1447,6 +1588,9 @@ struct HA_CREATE_INFO
ulong avg_row_length;
ulong used_fields;
ulong key_block_size;
+ uint stats_sample_pages; /* number of pages to sample during
+ stats estimation, if used, otherwise 0. */
+ enum_stats_auto_recalc stats_auto_recalc;
SQL_I_List<TABLE_LIST> merge_list;
handlerton *db_type;
/**
@@ -1478,12 +1622,309 @@ struct HA_CREATE_INFO
};
+/**
+ In-place alter handler context.
+
+ This is a superclass intended to be subclassed by individual handlers
+ in order to store handler unique context between in-place alter API calls.
+
+ The handler is responsible for creating the object. This can be done
+ as early as during check_if_supported_inplace_alter().
+
+ The SQL layer is responsible for destroying the object.
+ The class extends Sql_alloc so the memory will be mem root allocated.
+
+ @see Alter_inplace_info
+*/
+
+class inplace_alter_handler_ctx : public Sql_alloc
+{
+public:
+ inplace_alter_handler_ctx() {}
+
+ virtual ~inplace_alter_handler_ctx() {}
+};
+
+
+/**
+ Class describing changes to be done by ALTER TABLE.
+ Instance of this class is passed to storage engine in order
+ to determine if this ALTER TABLE can be done using in-place
+ algorithm. It is also used for executing the ALTER TABLE
+ using in-place algorithm.
+*/
+
+class Alter_inplace_info
+{
+public:
+ /**
+ Bits to show in detail what operations the storage engine is
+ to execute.
+
+ All these operations are supported as in-place operations by the
+ SQL layer. This means that operations that by their nature must
+ be performed by copying the table to a temporary table, will not
+ have their own flags here (e.g. ALTER TABLE FORCE, ALTER TABLE
+ ENGINE).
+
+ We generally try to specify handler flags only if there are real
+ changes. But in cases when it is cumbersome to determine if some
+ attribute has really changed we might choose to set flag
+ pessimistically, for example, relying on parser output only.
+ */
+ typedef ulong HA_ALTER_FLAGS;
+
+ // Add non-unique, non-primary index
+ static const HA_ALTER_FLAGS ADD_INDEX = 1L << 0;
+
+ // Drop non-unique, non-primary index
+ static const HA_ALTER_FLAGS DROP_INDEX = 1L << 1;
+
+ // Add unique, non-primary index
+ static const HA_ALTER_FLAGS ADD_UNIQUE_INDEX = 1L << 2;
+
+ // Drop unique, non-primary index
+ static const HA_ALTER_FLAGS DROP_UNIQUE_INDEX = 1L << 3;
+
+ // Add primary index
+ static const HA_ALTER_FLAGS ADD_PK_INDEX = 1L << 4;
+
+ // Drop primary index
+ static const HA_ALTER_FLAGS DROP_PK_INDEX = 1L << 5;
+
+ // Add column
+ static const HA_ALTER_FLAGS ADD_COLUMN = 1L << 6;
+
+ // Drop column
+ static const HA_ALTER_FLAGS DROP_COLUMN = 1L << 7;
+
+ // Rename column
+ static const HA_ALTER_FLAGS ALTER_COLUMN_NAME = 1L << 8;
+
+ // Change column datatype
+ static const HA_ALTER_FLAGS ALTER_COLUMN_TYPE = 1L << 9;
+
+ /**
+ Change column datatype in such way that new type has compatible
+ packed representation with old type, so it is theoretically
+ possible to perform change by only updating data dictionary
+ without changing table rows.
+ */
+ static const HA_ALTER_FLAGS ALTER_COLUMN_EQUAL_PACK_LENGTH = 1L << 10;
+
+ // Reorder column
+ static const HA_ALTER_FLAGS ALTER_COLUMN_ORDER = 1L << 11;
+
+ // Change column from NOT NULL to NULL
+ static const HA_ALTER_FLAGS ALTER_COLUMN_NULLABLE = 1L << 12;
+
+ // Change column from NULL to NOT NULL
+ static const HA_ALTER_FLAGS ALTER_COLUMN_NOT_NULLABLE = 1L << 13;
+
+ // Set or remove default column value
+ static const HA_ALTER_FLAGS ALTER_COLUMN_DEFAULT = 1L << 14;
+
+ // Add foreign key
+ static const HA_ALTER_FLAGS ADD_FOREIGN_KEY = 1L << 15;
+
+ // Drop foreign key
+ static const HA_ALTER_FLAGS DROP_FOREIGN_KEY = 1L << 16;
+
+ // table_options changed, see HA_CREATE_INFO::used_fields for details.
+ static const HA_ALTER_FLAGS CHANGE_CREATE_OPTION = 1L << 17;
+
+ // Table is renamed
+ static const HA_ALTER_FLAGS ALTER_RENAME = 1L << 18;
+
+ // Change the storage type of column
+ static const HA_ALTER_FLAGS ALTER_COLUMN_STORAGE_TYPE = 1L << 19;
+
+ // Change the column format of column
+ static const HA_ALTER_FLAGS ALTER_COLUMN_COLUMN_FORMAT = 1L << 20;
+
+ // Add partition
+ static const HA_ALTER_FLAGS ADD_PARTITION = 1L << 21;
+
+ // Drop partition
+ static const HA_ALTER_FLAGS DROP_PARTITION = 1L << 22;
+
+ // Changing partition options
+ static const HA_ALTER_FLAGS ALTER_PARTITION = 1L << 23;
+
+ // Coalesce partition
+ static const HA_ALTER_FLAGS COALESCE_PARTITION = 1L << 24;
+
+ // Reorganize partition ... into
+ static const HA_ALTER_FLAGS REORGANIZE_PARTITION = 1L << 25;
+
+ // Reorganize partition
+ static const HA_ALTER_FLAGS ALTER_TABLE_REORG = 1L << 26;
+
+ // Remove partitioning
+ static const HA_ALTER_FLAGS ALTER_REMOVE_PARTITIONING = 1L << 27;
+
+ // Partition operation with ALL keyword
+ static const HA_ALTER_FLAGS ALTER_ALL_PARTITION = 1L << 28;
+
+ // Partition operation with ALL keyword
+ static const HA_ALTER_FLAGS ALTER_COLUMN_VCOL = 1L << 29;
+
+ /**
+ Create options (like MAX_ROWS) for the new version of table.
+
+ @note The referenced instance of HA_CREATE_INFO object was already
+ used to create new .FRM file for table being altered. So it
+ has been processed by mysql_prepare_create_table() already.
+ For example, this means that it has HA_OPTION_PACK_RECORD
+ flag in HA_CREATE_INFO::table_options member correctly set.
+ */
+ HA_CREATE_INFO *create_info;
+
+ /**
+ Alter options, fields and keys for the new version of table.
+
+ @note The referenced instance of Alter_info object was already
+ used to create new .FRM file for table being altered. So it
+ has been processed by mysql_prepare_create_table() already.
+ In particular, this means that in Create_field objects for
+ fields which were present in some form in the old version
+ of table, Create_field::field member points to corresponding
+ Field instance for old version of table.
+ */
+ Alter_info *alter_info;
+
+ /**
+ Array of KEYs for new version of table - including KEYs to be added.
+
+ @note Currently this array is produced as result of
+ mysql_prepare_create_table() call.
+ This means that it follows different convention for
+ KEY_PART_INFO::fieldnr values than objects in TABLE::key_info
+ array.
+
+ @todo This is mainly due to the fact that we need to keep compatibility
+ with removed handler::add_index() call. We plan to switch to
+ TABLE::key_info numbering later.
+
+ KEYs are sorted - see sort_keys().
+ */
+ KEY *key_info_buffer;
+
+ /** Size of key_info_buffer array. */
+ uint key_count;
+
+ /** Size of index_drop_buffer array. */
+ uint index_drop_count;
+
+ /**
+ Array of pointers to KEYs to be dropped belonging to the TABLE instance
+ for the old version of the table.
+ */
+ KEY **index_drop_buffer;
+
+ /** Size of index_add_buffer array. */
+ uint index_add_count;
+
+ /**
+ Array of indexes into key_info_buffer for KEYs to be added,
+ sorted in increasing order.
+ */
+ uint *index_add_buffer;
+
+ /**
+ Context information to allow handlers to keep context between in-place
+ alter API calls.
+
+ @see inplace_alter_handler_ctx for information about object lifecycle.
+ */
+ inplace_alter_handler_ctx *handler_ctx;
+
+ /**
+ Flags describing in detail which operations the storage engine is to execute.
+ */
+ HA_ALTER_FLAGS handler_flags;
+
+ /**
+ Partition_info taking into account the partition changes to be performed.
+ Contains all partitions which are present in the old version of the table
+ with partitions to be dropped or changed marked as such + all partitions
+ to be added in the new version of table marked as such.
+ */
+ partition_info *modified_part_info;
+
+ /** true for ALTER IGNORE TABLE ... */
+ const bool ignore;
+
+ /** true for online operation (LOCK=NONE) */
+ bool online;
+
+ /**
+ Can be set by handler to describe why a given operation cannot be done
+ in-place (HA_ALTER_INPLACE_NOT_SUPPORTED) or why it cannot be done
+ online (HA_ALTER_INPLACE_NO_LOCK or
+ HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE)
+ If set, it will be used with ER_ALTER_OPERATION_NOT_SUPPORTED_REASON if
+ results from handler::check_if_supported_inplace_alter() doesn't match
+ requirements set by user. If not set, the more generic
+ ER_ALTER_OPERATION_NOT_SUPPORTED will be used.
+
+ Please set to a properly localized string, for example using
+ my_get_err_msg(), so that the error message as a whole is localized.
+ */
+ const char *unsupported_reason;
+
+ Alter_inplace_info(HA_CREATE_INFO *create_info_arg,
+ Alter_info *alter_info_arg,
+ KEY *key_info_arg, uint key_count_arg,
+ partition_info *modified_part_info_arg,
+ bool ignore_arg)
+ : create_info(create_info_arg),
+ alter_info(alter_info_arg),
+ key_info_buffer(key_info_arg),
+ key_count(key_count_arg),
+ index_drop_count(0),
+ index_drop_buffer(NULL),
+ index_add_count(0),
+ index_add_buffer(NULL),
+ handler_ctx(NULL),
+ handler_flags(0),
+ modified_part_info(modified_part_info_arg),
+ ignore(ignore_arg),
+ online(false),
+ unsupported_reason(NULL)
+ {}
+
+ ~Alter_inplace_info()
+ {
+ delete handler_ctx;
+ }
+
+ /**
+ Used after check_if_supported_inplace_alter() to report
+ error if the result does not match the LOCK/ALGORITHM
+ requirements set by the user.
+
+ @param not_supported Part of statement that was not supported.
+ @param try_instead Suggestion as to what the user should
+ replace not_supported with.
+ */
+ void report_unsupported_error(const char *not_supported,
+ const char *try_instead);
+};
+
+
typedef struct st_key_create_information
{
enum ha_key_alg algorithm;
ulong block_size;
LEX_STRING parser_name;
LEX_STRING comment;
+ /**
+ A flag to determine if we will check for duplicate indexes.
+ This typically means that the key information was specified
+ directly by the user (set by the parser).
+ */
+ bool check_for_duplicate_indexes;
} KEY_CREATE_INFO;
@@ -1885,34 +2326,61 @@ uint calculate_key_len(TABLE *, uint, const uchar *, key_part_map);
#define make_prev_keypart_map(N) (((key_part_map)1 << (N)) - 1)
-/**
- Index creation context.
- Created by handler::add_index() and destroyed by handler::final_add_index().
- And finally freed at the end of the statement.
- (Sql_alloc does not free in delete).
-*/
-
-class handler_add_index : public Sql_alloc
+/** Base class to be used by handlers different shares */
+class Handler_share
{
public:
- /* Table where the indexes are added */
- TABLE* const table;
- /* Indexes being created */
- KEY* const key_info;
- /* Size of key_info[] */
- const uint num_of_keys;
- handler_add_index(TABLE *table_arg, KEY *key_info_arg, uint num_of_keys_arg)
- : table (table_arg), key_info (key_info_arg), num_of_keys (num_of_keys_arg)
- {}
- virtual ~handler_add_index() {}
+ Handler_share() {}
+ virtual ~Handler_share() {}
};
-class Query_cache;
-struct Query_cache_block_table;
+
/**
The handler class is the interface for dynamically loadable
storage engines. Do not add ifdefs and take care when adding or
changing virtual functions to avoid vtable confusion
+
+ Functions in this class accept and return table columns data. Two data
+ representation formats are used:
+ 1. TableRecordFormat - Used to pass [partial] table records to/from
+ storage engine
+
+ 2. KeyTupleFormat - used to pass index search tuples (aka "keys") to
+ storage engine. See opt_range.cc for description of this format.
+
+ TableRecordFormat
+ =================
+ [Warning: this description is work in progress and may be incomplete]
+ The table record is stored in a fixed-size buffer:
+
+ record: null_bytes, column1_data, column2_data, ...
+
+ The offsets of the parts of the buffer are also fixed: every column has
+ an offset to its column{i}_data, and if it is nullable it also has its own
+ bit in null_bytes.
+
+ The record buffer only includes data about columns that are marked in the
+ relevant column set (table->read_set and/or table->write_set, depending on
+ the situation).
+ <not-sure>It could be that it is required that null bits of non-present
+ columns are set to 1</not-sure>
+
+ VARIOUS EXCEPTIONS AND SPECIAL CASES
+
+ If the table has no nullable columns, then null_bytes is still
+ present, its length is one byte <not-sure> which must be set to 0xFF
+ at all times. </not-sure>
+
+ If the table has columns of type BIT, then certain bits from those columns
+ may be stored in null_bytes as well. Grep around for Field_bit for
+ details.
+
+ For blob columns (see Field_blob), the record buffer stores length of the
+ data, following by memory pointer to the blob data. The pointer is owned
+ by the storage engine and is valid until the next operation.
+
+ If a blob column has NULL value, then its length and blob data pointer
+ must be set to 0.
*/
class handler :public Sql_alloc
@@ -1965,7 +2433,6 @@ public:
uint ref_length;
FT_INFO *ft_handler;
enum {NONE=0, INDEX, RND} inited;
- bool locked;
bool implicit_emptied; /* Can be !=0 only if HEAP */
bool mark_trx_done;
const COND *pushed_cond;
@@ -2026,6 +2493,21 @@ public:
virtual void unbind_psi();
virtual void rebind_psi();
+private:
+ /**
+ The lock type set by when calling::ha_external_lock(). This is
+ propagated down to the storage engine. The reason for also storing
+ it here, is that when doing MRR we need to create/clone a second handler
+ object. This cloned handler object needs to know about the lock_type used.
+ */
+ int m_lock_type;
+ /**
+ Pointer where to store/retrieve the Handler_share pointer.
+ For non partitioned handlers this is &TABLE_SHARE::ha_share.
+ */
+ Handler_share **ha_share;
+
+public:
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
:table_share(share_arg), table(0),
estimation_rows_to_insert(0), ht(ht_arg),
@@ -2033,18 +2515,21 @@ public:
in_range_check_pushed_down(FALSE),
ref_length(sizeof(my_off_t)),
ft_handler(0), inited(NONE),
- locked(FALSE), implicit_emptied(0), mark_trx_done(FALSE),
+ implicit_emptied(0), mark_trx_done(FALSE),
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
pushed_idx_cond(NULL),
pushed_idx_cond_keyno(MAX_KEY),
auto_inc_intervals_count(0),
- m_psi(NULL)
+ m_psi(NULL), m_lock_type(F_UNLCK), ha_share(NULL)
{
+ DBUG_PRINT("info",
+ ("handler created F_UNLCK %d F_RDLCK %d F_WRLCK %d",
+ F_UNLCK, F_RDLCK, F_WRLCK));
reset_statistics();
}
virtual ~handler(void)
{
- DBUG_ASSERT(locked == FALSE);
+ DBUG_ASSERT(m_lock_type == F_UNLCK);
DBUG_ASSERT(inited == NONE);
}
virtual handler *clone(const char *name, MEM_ROOT *mem_root);
@@ -2159,7 +2644,6 @@ public:
int ha_disable_indexes(uint mode);
int ha_enable_indexes(uint mode);
int ha_discard_or_import_tablespace(my_bool discard);
- void ha_prepare_for_alter();
int ha_rename_table(const char *from, const char *to);
int ha_delete_table(const char *name);
void ha_drop_table(const char *name);
@@ -2180,7 +2664,6 @@ public:
void adjust_next_insert_id_after_explicit_value(ulonglong nr);
int update_auto_increment();
- void print_keydup_error(uint key_nr, const char *msg, myf errflag);
virtual void print_error(int error, myf errflag);
virtual bool get_error_message(int error, String *buf);
uint get_dup_key(int error);
@@ -2443,6 +2926,7 @@ public:
const key_range *end_key,
bool eq_range, bool sorted);
virtual int read_range_next();
+ void set_end_range(const key_range *end_key);
int compare_key(key_range *range);
int compare_key2(key_range *range);
virtual int ft_init() { return HA_ERR_WRONG_COMMAND; }
@@ -2460,6 +2944,7 @@ private:
*/
virtual int rnd_pos_by_record(uchar *record)
{
+ DBUG_ASSERT(table_flags() & HA_PRIMARY_KEY_REQUIRED_FOR_POSITION);
position(record);
return rnd_pos(record, ref);
}
@@ -2575,10 +3060,15 @@ public:
{ return FALSE; }
virtual char* get_foreign_key_create_info()
{ return(NULL);} /* gets foreign key create string from InnoDB */
- virtual char* get_tablespace_name(THD *thd, char *name, uint name_len)
- { return(NULL);} /* gets tablespace name from handler */
- /** used in ALTER TABLE; 1 if changing storage engine is allowed */
- virtual bool can_switch_engines() { return 1; }
+ /**
+ Used in ALTER TABLE to check if changing storage engine is allowed.
+
+ @note Called without holding thr_lock.c lock.
+
+ @retval true Changing storage engine is allowed.
+ @retval false Changing storage engine not allowed.
+ */
+ virtual bool can_switch_engines() { return true; }
virtual int can_continue_handler_scan() { return 0; }
/**
Get the list of foreign keys in this table.
@@ -2629,52 +3119,16 @@ public:
virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0;
-/**
- First phase of in-place add index.
- Handlers are supposed to create new indexes here but not make them
- visible.
-
- @param table_arg Table to add index to
- @param key_info Information about new indexes
- @param num_of_key Number of new indexes
- @param add[out] Context of handler specific information needed
- for final_add_index().
-
- @note This function can be called with less than exclusive metadata
- lock depending on which flags are listed in alter_table_flags.
-*/
- virtual int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
- handler_add_index **add)
- { return (HA_ERR_WRONG_COMMAND); }
-
-/**
- Second and last phase of in-place add index.
- Commit or rollback pending new indexes.
-
- @param add Context of handler specific information from add_index().
- @param commit If true, commit. If false, rollback index changes.
-
- @note This function is called with exclusive metadata lock.
-*/
- virtual int final_add_index(handler_add_index *add, bool commit)
- { return (HA_ERR_WRONG_COMMAND); }
-
- virtual int prepare_drop_index(TABLE *table_arg, uint *key_num,
- uint num_of_keys)
- { return (HA_ERR_WRONG_COMMAND); }
- virtual int final_drop_index(TABLE *table_arg)
- { return (HA_ERR_WRONG_COMMAND); }
-
uint max_record_length() const
- { return min(HA_MAX_REC_LENGTH, max_supported_record_length()); }
+ { return MY_MIN(HA_MAX_REC_LENGTH, max_supported_record_length()); }
uint max_keys() const
- { return min(MAX_KEY, max_supported_keys()); }
+ { return MY_MIN(MAX_KEY, max_supported_keys()); }
uint max_key_parts() const
- { return min(MAX_REF_PARTS, max_supported_key_parts()); }
+ { return MY_MIN(MAX_REF_PARTS, max_supported_key_parts()); }
uint max_key_length() const
- { return min(MAX_KEY_LENGTH, max_supported_key_length()); }
+ { return MY_MIN(MAX_KEY_LENGTH, max_supported_key_length()); }
uint max_key_part_length() const
- { return min(MAX_KEY_LENGTH, max_supported_key_part_length()); }
+ { return MY_MIN(MAX_KEY_LENGTH, max_supported_key_part_length()); }
virtual uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
virtual uint max_supported_keys() const { return 0; }
@@ -2898,10 +3352,266 @@ public:
pushed_idx_cond_keyno= MAX_KEY;
in_range_check_pushed_down= false;
}
+ /**
+ Part of old, deprecated in-place ALTER API.
+ */
virtual bool check_if_incompatible_data(HA_CREATE_INFO *create_info,
uint table_changes)
{ return COMPATIBLE_DATA_NO; }
+ /* On-line/in-place ALTER TABLE interface. */
+
+ /*
+ Here is an outline of on-line/in-place ALTER TABLE execution through
+ this interface.
+
+ Phase 1 : Initialization
+ ========================
+ During this phase we determine which algorithm should be used
+ for execution of ALTER TABLE and what level concurrency it will
+ require.
+
+ *) This phase starts by opening the table and preparing description
+ of the new version of the table.
+ *) Then we check if it is impossible even in theory to carry out
+ this ALTER TABLE using the in-place algorithm. For example, because
+ we need to change storage engine or the user has explicitly requested
+ usage of the "copy" algorithm.
+ *) If in-place ALTER TABLE is theoretically possible, we continue
+ by compiling differences between old and new versions of the table
+ in the form of HA_ALTER_FLAGS bitmap. We also build a few
+ auxiliary structures describing requested changes and store
+ all these data in the Alter_inplace_info object.
+ *) Then the handler::check_if_supported_inplace_alter() method is called
+ in order to find if the storage engine can carry out changes requested
+ by this ALTER TABLE using the in-place algorithm. To determine this,
+ the engine can rely on data in HA_ALTER_FLAGS/Alter_inplace_info
+ passed to it as well as on its own checks. If the in-place algorithm
+ can be used for this ALTER TABLE, the level of required concurrency for
+ its execution is also returned.
+ If any errors occur during the handler call, ALTER TABLE is aborted
+ and no further handler functions are called.
+ *) Locking requirements of the in-place algorithm are compared to any
+ concurrency requirements specified by user. If there is a conflict
+ between them, we either switch to the copy algorithm or emit an error.
+
+ Phase 2 : Execution
+ ===================
+
+ In this phase the operations are executed.
+
+ *) As the first step, we acquire a lock corresponding to the concurrency
+ level which was returned by handler::check_if_supported_inplace_alter()
+ and requested by the user. This lock is held for most of the
+ duration of in-place ALTER (if HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE
+ or HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE were returned we acquire an
+ exclusive lock for duration of the next step only).
+ *) After that we call handler::ha_prepare_inplace_alter_table() to give the
+ storage engine a chance to update its internal structures with a higher
+ lock level than the one that will be used for the main step of algorithm.
+ After that we downgrade the lock if it is necessary.
+ *) After that, the main step of this phase and algorithm is executed.
+ We call the handler::ha_inplace_alter_table() method, which carries out the
+ changes requested by ALTER TABLE but does not makes them visible to other
+ connections yet.
+ *) We ensure that no other connection uses the table by upgrading our
+ lock on it to exclusive.
+ *) a) If the previous step succeeds, handler::ha_commit_inplace_alter_table() is
+ called to allow the storage engine to do any final updates to its structures,
+ to make all earlier changes durable and visible to other connections.
+ b) If we have failed to upgrade lock or any errors have occured during the
+ handler functions calls (including commit), we call
+ handler::ha_commit_inplace_alter_table()
+ to rollback all changes which were done during previous steps.
+
+ Phase 3 : Final
+ ===============
+
+ In this phase we:
+
+ *) Update SQL-layer data-dictionary by installing .FRM file for the new version
+ of the table.
+ *) Inform the storage engine about this change by calling the
+ handler::ha_notify_table_changed() method.
+ *) Destroy the Alter_inplace_info and handler_ctx objects.
+
+ */
+
+ /**
+ Check if a storage engine supports a particular alter table in-place
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used
+ during in-place alter.
+
+ @retval HA_ALTER_ERROR Unexpected error.
+ @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported, must use copy.
+ @retval HA_ALTER_INPLACE_EXCLUSIVE_LOCK Supported, but requires X lock.
+ @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE
+ Supported, but requires SNW lock
+ during main phase. Prepare phase
+ requires X lock.
+ @retval HA_ALTER_INPLACE_SHARED_LOCK Supported, but requires SNW lock.
+ @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE
+ Supported, concurrent reads/writes
+ allowed. However, prepare phase
+ requires X lock.
+ @retval HA_ALTER_INPLACE_NO_LOCK Supported, concurrent
+ reads/writes allowed.
+
+ @note The default implementation uses the old in-place ALTER API
+ to determine if the storage engine supports in-place ALTER or not.
+
+ @note Called without holding thr_lock.c lock.
+ */
+ virtual enum_alter_inplace_result
+ check_if_supported_inplace_alter(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info);
+
+
+ /**
+ Public functions wrapping the actual handler call.
+ @see prepare_inplace_alter_table()
+ */
+ bool ha_prepare_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info);
+
+
+ /**
+ Public function wrapping the actual handler call.
+ @see inplace_alter_table()
+ */
+ bool ha_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
+ {
+ return inplace_alter_table(altered_table, ha_alter_info);
+ }
+
+
+ /**
+ Public function wrapping the actual handler call.
+ Allows us to enforce asserts regardless of handler implementation.
+ @see commit_inplace_alter_table()
+ */
+ bool ha_commit_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info,
+ bool commit);
+
+
+ /**
+ Public function wrapping the actual handler call.
+ @see notify_table_changed()
+ */
+ void ha_notify_table_changed()
+ {
+ notify_table_changed();
+ }
+
+
+protected:
+ /**
+ Allows the storage engine to update internal structures with concurrent
+ writes blocked. If check_if_supported_inplace_alter() returns
+ HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE or
+ HA_ALTER_INPLACE_SHARED_AFTER_PREPARE, this function is called with
+ exclusive lock otherwise the same level of locking as for
+ inplace_alter_table() will be used.
+
+ @note Storage engines are responsible for reporting any errors by
+ calling my_error()/print_error()
+
+ @note If this function reports error, commit_inplace_alter_table()
+ will be called with commit= false.
+
+ @note For partitioning, failing to prepare one partition, means that
+ commit_inplace_alter_table() will be called to roll back changes for
+ all partitions. This means that commit_inplace_alter_table() might be
+ called without prepare_inplace_alter_table() having been called first
+ for a given partition.
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used
+ during in-place alter.
+
+ @retval true Error
+ @retval false Success
+ */
+ virtual bool prepare_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
+ { return false; }
+
+
+ /**
+ Alter the table structure in-place with operations specified using HA_ALTER_FLAGS
+ and Alter_inplace_info. The level of concurrency allowed during this
+ operation depends on the return value from check_if_supported_inplace_alter().
+
+ @note Storage engines are responsible for reporting any errors by
+ calling my_error()/print_error()
+
+ @note If this function reports error, commit_inplace_alter_table()
+ will be called with commit= false.
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used
+ during in-place alter.
+
+ @retval true Error
+ @retval false Success
+ */
+ virtual bool inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
+ { return false; }
+
+
+ /**
+ Commit or rollback the changes made during prepare_inplace_alter_table()
+ and inplace_alter_table() inside the storage engine.
+ Note that in case of rollback the allowed level of concurrency during
+ this operation will be the same as for inplace_alter_table() and thus
+ might be higher than during prepare_inplace_alter_table(). (For example,
+ concurrent writes were blocked during prepare, but might not be during
+ rollback).
+
+ @note Storage engines are responsible for reporting any errors by
+ calling my_error()/print_error()
+
+ @note If this function with commit= true reports error, it will be called
+ again with commit= false.
+
+ @note In case of partitioning, this function might be called for rollback
+ without prepare_inplace_alter_table() having been called first.
+ @see prepare_inplace_alter_table().
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used
+ during in-place alter.
+ @param commit True => Commit, False => Rollback.
+
+ @retval true Error
+ @retval false Success
+ */
+ virtual bool commit_inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info,
+ bool commit)
+ { return false; }
+
+
+ /**
+ Notify the storage engine that the table structure (.FRM) has been updated.
+
+ @note No errors are allowed during notify_table_changed().
+ */
+ virtual void notify_table_changed();
+
+public:
+ /* End of On-line/in-place ALTER TABLE interface. */
+
+
/**
use_hidden_primary_key() is called in case of an update/delete when
(table_flags() and HA_PRIMARY_KEY_REQUIRED_FOR_DELETE) is defined
@@ -2944,28 +3654,6 @@ protected:
*/
PSI_table_share *ha_table_share_psi() const;
- inline void psi_open()
- {
- DBUG_ASSERT(m_psi == NULL);
- DBUG_ASSERT(table_share != NULL);
- if (PSI_server)
- {
- PSI_table_share *share_psi= ha_table_share_psi();
- if (share_psi)
- m_psi= PSI_CALL_open_table(share_psi, this);
- }
- }
-
- inline void psi_close()
- {
- if (PSI_server && m_psi)
- {
- PSI_CALL_close_table(m_psi);
- m_psi= NULL; /* instrumentation handle, invalid after close_table() */
- }
- DBUG_ASSERT(m_psi == NULL);
- }
-
/**
Default rename_table() and delete_table() rename/delete files with a
given name and extensions from bas_ext().
@@ -3015,6 +3703,14 @@ private:
return HA_ERR_WRONG_COMMAND;
}
+ /**
+ Update a single row.
+
+ Note: If HA_ERR_FOUND_DUPP_KEY is returned, the handler must read
+ all columns of the row so MySQL can create an error message. If
+ the columns required for the error message are not read, the error
+ message will contain garbage.
+ */
virtual int update_row(const uchar *old_data __attribute__((unused)),
uchar *new_data __attribute__((unused)))
{
@@ -3078,9 +3774,12 @@ private:
}
virtual void start_bulk_insert(ha_rows rows, uint flags) {}
virtual int end_bulk_insert() { return 0; }
+protected:
virtual int index_read(uchar * buf, const uchar * key, uint key_len,
enum ha_rkey_function find_flag)
{ return HA_ERR_WRONG_COMMAND; }
+ friend class ha_partition;
+public:
/**
This method is similar to update_row, however the handler doesn't need
to execute the updates at this point in time. The handler can be certain
@@ -3165,7 +3864,16 @@ private:
{ return HA_ERR_WRONG_COMMAND; }
virtual int rename_partitions(const char *path)
{ return HA_ERR_WRONG_COMMAND; }
- friend class ha_partition;
+ virtual bool set_ha_share_ref(Handler_share **arg_ha_share)
+ {
+ DBUG_ASSERT(!ha_share);
+ DBUG_ASSERT(arg_ha_share);
+ if (ha_share || !arg_ha_share)
+ return true;
+ ha_share= arg_ha_share;
+ return false;
+ }
+ int get_lock_type() const { return m_lock_type; }
public:
/* XXX to be removed, see ha_partition::partition_ht() */
virtual handlerton *partition_ht() const
@@ -3174,6 +3882,11 @@ public:
inline int ha_update_tmp_row(const uchar * old_data, uchar * new_data);
friend enum icp_result handler_index_cond_check(void* h_arg);
+protected:
+ Handler_share *get_ha_share_ptr();
+ void set_ha_share_ptr(Handler_share *arg_ha_share);
+ void lock_shared_ha_data();
+ void unlock_shared_ha_data();
};
#include "multi_range_read.h"
@@ -3340,4 +4053,7 @@ inline const char *table_case_name(HA_CREATE_INFO *info, const char *name)
{
return ((lower_case_table_names == 2 && info->alias) ? info->alias : name);
}
+
+void print_keydup_error(TABLE *table, KEY *key, const char *msg, myf errflag);
+void print_keydup_error(TABLE *table, KEY *key, myf errflag);
#endif
diff --git a/sql/hash_filo.h b/sql/hash_filo.h
index b6068348d1d..abba4824c9e 100644
--- a/sql/hash_filo.h
+++ b/sql/hash_filo.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -47,8 +47,11 @@ private:
class hash_filo
{
- const uint size, key_offset, key_length;
+private:
+ const uint key_offset, key_length;
const my_hash_get_key get_key;
+ /** Size of this hash table. */
+ uint m_size;
my_hash_free_key free_element;
bool init;
CHARSET_INFO *hash_charset;
@@ -61,9 +64,12 @@ public:
hash_filo(uint size_arg, uint key_offset_arg , uint key_length_arg,
my_hash_get_key get_key_arg, my_hash_free_key free_element_arg,
CHARSET_INFO *hash_charset_arg)
- :size(size_arg), key_offset(key_offset_arg), key_length(key_length_arg),
- get_key(get_key_arg), free_element(free_element_arg),init(0),
- hash_charset(hash_charset_arg)
+ :key_offset(key_offset_arg), key_length(key_length_arg),
+ get_key(get_key_arg), m_size(size_arg),
+ free_element(free_element_arg),init(0),
+ hash_charset(hash_charset_arg),
+ first_link(NULL),
+ last_link(NULL)
{
bzero((char*) &cache,sizeof(cache));
}
@@ -86,32 +92,61 @@ public:
}
if (!locked)
mysql_mutex_lock(&lock);
+ first_link= NULL;
+ last_link= NULL;
(void) my_hash_free(&cache);
- (void) my_hash_init(&cache,hash_charset,size,key_offset,
+ (void) my_hash_init(&cache,hash_charset,m_size,key_offset,
key_length, get_key, free_element,0);
if (!locked)
mysql_mutex_unlock(&lock);
- first_link=last_link=0;
+ }
+
+ hash_filo_element *first()
+ {
+ mysql_mutex_assert_owner(&lock);
+ return first_link;
+ }
+
+ hash_filo_element *last()
+ {
+ mysql_mutex_assert_owner(&lock);
+ return last_link;
}
hash_filo_element *search(uchar* key, size_t length)
{
+ mysql_mutex_assert_owner(&lock);
+
hash_filo_element *entry=(hash_filo_element*)
my_hash_search(&cache,(uchar*) key,length);
if (entry)
{ // Found; link it first
+ DBUG_ASSERT(first_link != NULL);
+ DBUG_ASSERT(last_link != NULL);
if (entry != first_link)
{ // Relink used-chain
if (entry == last_link)
- last_link=entry->prev_used;
+ {
+ last_link= last_link->prev_used;
+ /*
+ The list must have at least 2 elements,
+ otherwise entry would be equal to first_link.
+ */
+ DBUG_ASSERT(last_link != NULL);
+ last_link->next_used= NULL;
+ }
else
{
+ DBUG_ASSERT(entry->next_used != NULL);
+ DBUG_ASSERT(entry->prev_used != NULL);
entry->next_used->prev_used = entry->prev_used;
entry->prev_used->next_used = entry->next_used;
}
- if ((entry->next_used= first_link))
- first_link->prev_used=entry;
- first_link=entry;
+ entry->prev_used= NULL;
+ entry->next_used= first_link;
+
+ first_link->prev_used= entry;
+ first_link=entry;
}
}
return entry;
@@ -119,10 +154,20 @@ public:
bool add(hash_filo_element *entry)
{
- if (cache.records == size)
+ if (!m_size) return 1;
+ if (cache.records == m_size)
{
hash_filo_element *tmp=last_link;
- last_link=last_link->prev_used;
+ last_link= last_link->prev_used;
+ if (last_link != NULL)
+ {
+ last_link->next_used= NULL;
+ }
+ else
+ {
+ /* Pathological case, m_size == 1 */
+ first_link= NULL;
+ }
my_hash_delete(&cache,(uchar*) tmp);
}
if (my_hash_insert(&cache,(uchar*) entry))
@@ -131,13 +176,27 @@ public:
(*free_element)(entry); // This should never happen
return 1;
}
- if ((entry->next_used=first_link))
- first_link->prev_used=entry;
+ entry->prev_used= NULL;
+ entry->next_used= first_link;
+ if (first_link != NULL)
+ first_link->prev_used= entry;
else
- last_link=entry;
- first_link=entry;
+ last_link= entry;
+ first_link= entry;
+
return 0;
}
+
+ uint size()
+ { return m_size; }
+
+ void resize(uint new_size)
+ {
+ mysql_mutex_lock(&lock);
+ m_size= new_size;
+ clear(true);
+ mysql_mutex_unlock(&lock);
+ }
};
#endif
diff --git a/sql/hostname.cc b/sql/hostname.cc
index 3540dd8c8ab..6c3c70aa7ea 100644
--- a/sql/hostname.cc
+++ b/sql/hostname.cc
@@ -24,7 +24,6 @@
Hostnames are checked with reverse name lookup and checked that they
doesn't resemble an IP address.
*/
-
#include "sql_priv.h"
#include "hostname.h"
#include "my_global.h"
@@ -50,54 +49,101 @@ extern "C" { // Because of SCO 3.2V4.2
}
#endif
-/*
- HOST_ENTRY_KEY_SIZE -- size of IP address string in the hash cache.
-*/
-
-#define HOST_ENTRY_KEY_SIZE INET6_ADDRSTRLEN
-
-/**
- An entry in the hostname hash table cache.
-
- Host name cache does two things:
- - caches host names to save DNS look ups;
- - counts connect errors from IP.
-
- Host name can be NULL (that means DNS look up failed), but connect errors
- still are counted.
-*/
-
-class Host_entry :public hash_filo_element
+Host_errors::Host_errors()
+: m_connect(0),
+ m_host_blocked(0),
+ m_nameinfo_transient(0),
+ m_nameinfo_permanent(0),
+ m_format(0),
+ m_addrinfo_transient(0),
+ m_addrinfo_permanent(0),
+ m_FCrDNS(0),
+ m_host_acl(0),
+ m_no_auth_plugin(0),
+ m_auth_plugin(0),
+ m_handshake(0),
+ m_proxy_user(0),
+ m_proxy_user_acl(0),
+ m_authentication(0),
+ m_ssl(0),
+ m_max_user_connection(0),
+ m_max_user_connection_per_hour(0),
+ m_default_database(0),
+ m_init_connect(0),
+ m_local(0)
+{}
+
+Host_errors::~Host_errors()
+{}
+
+void Host_errors::reset()
{
-public:
- /**
- Client IP address. This is the key used with the hash table.
-
- The client IP address is always expressed in IPv6, even when the
- network IPv6 stack is not present.
-
- This IP address is never used to connect to a socket.
- */
- char ip_key[HOST_ENTRY_KEY_SIZE];
-
- /**
- Number of errors during handshake phase from the IP address.
- */
- uint connect_errors;
+ m_connect= 0;
+ m_host_blocked= 0;
+ m_nameinfo_transient= 0;
+ m_nameinfo_permanent= 0;
+ m_format= 0;
+ m_addrinfo_transient= 0;
+ m_addrinfo_permanent= 0;
+ m_FCrDNS= 0;
+ m_host_acl= 0;
+ m_no_auth_plugin= 0;
+ m_auth_plugin= 0;
+ m_handshake= 0;
+ m_proxy_user= 0;
+ m_proxy_user_acl= 0;
+ m_authentication= 0;
+ m_ssl= 0;
+ m_max_user_connection= 0;
+ m_max_user_connection_per_hour= 0;
+ m_default_database= 0;
+ m_init_connect= 0;
+ m_local= 0;
+}
- /**
- One of the host names for the IP address. May be NULL.
- */
- const char *hostname;
-};
+void Host_errors::aggregate(const Host_errors *errors)
+{
+ m_connect+= errors->m_connect;
+ m_host_blocked+= errors->m_host_blocked;
+ m_nameinfo_transient+= errors->m_nameinfo_transient;
+ m_nameinfo_permanent+= errors->m_nameinfo_permanent;
+ m_format+= errors->m_format;
+ m_addrinfo_transient+= errors->m_addrinfo_transient;
+ m_addrinfo_permanent+= errors->m_addrinfo_permanent;
+ m_FCrDNS+= errors->m_FCrDNS;
+ m_host_acl+= errors->m_host_acl;
+ m_no_auth_plugin+= errors->m_no_auth_plugin;
+ m_auth_plugin+= errors->m_auth_plugin;
+ m_handshake+= errors->m_handshake;
+ m_proxy_user+= errors->m_proxy_user;
+ m_proxy_user_acl+= errors->m_proxy_user_acl;
+ m_authentication+= errors->m_authentication;
+ m_ssl+= errors->m_ssl;
+ m_max_user_connection+= errors->m_max_user_connection;
+ m_max_user_connection_per_hour+= errors->m_max_user_connection_per_hour;
+ m_default_database+= errors->m_default_database;
+ m_init_connect+= errors->m_init_connect;
+ m_local+= errors->m_local;
+}
static hash_filo *hostname_cache;
+ulong host_cache_size;
void hostname_cache_refresh()
{
hostname_cache->clear();
}
+uint hostname_cache_size()
+{
+ return hostname_cache->size();
+}
+
+void hostname_cache_resize(uint size)
+{
+ hostname_cache->resize(size);
+}
+
bool hostname_cache_init()
{
Host_entry tmp;
@@ -120,6 +166,16 @@ void hostname_cache_free()
hostname_cache= NULL;
}
+void hostname_cache_lock()
+{
+ mysql_mutex_lock(&hostname_cache->lock);
+}
+
+void hostname_cache_unlock()
+{
+ mysql_mutex_unlock(&hostname_cache->lock);
+}
+
static void prepare_hostname_cache_key(const char *ip_string,
char *ip_key)
{
@@ -130,69 +186,119 @@ static void prepare_hostname_cache_key(const char *ip_string,
memcpy(ip_key, ip_string, ip_string_length);
}
+Host_entry *hostname_cache_first()
+{ return (Host_entry *) hostname_cache->first(); }
+
static inline Host_entry *hostname_cache_search(const char *ip_key)
{
return (Host_entry *) hostname_cache->search((uchar *) ip_key, 0);
}
-static bool add_hostname_impl(const char *ip_key, const char *hostname)
+static void add_hostname_impl(const char *ip_key, const char *hostname,
+ bool validated, Host_errors *errors,
+ ulonglong now)
{
- if (hostname_cache_search(ip_key))
- return FALSE;
-
- size_t hostname_size= hostname ? strlen(hostname) + 1 : 0;
-
- Host_entry *entry= (Host_entry *) malloc(sizeof (Host_entry) + hostname_size);
-
- if (!entry)
- return TRUE;
-
- char *hostname_copy;
+ Host_entry *entry;
+ bool need_add= false;
- memcpy(&entry->ip_key, ip_key, HOST_ENTRY_KEY_SIZE);
+ entry= hostname_cache_search(ip_key);
- if (hostname_size)
+ if (likely(entry == NULL))
{
- hostname_copy= (char *) (entry + 1);
- memcpy(hostname_copy, hostname, hostname_size);
-
- DBUG_PRINT("info", ("Adding '%s' -> '%s' to the hostname cache...'",
- (const char *) ip_key,
- (const char *) hostname_copy));
+ entry= (Host_entry *) malloc(sizeof (Host_entry));
+ if (entry == NULL)
+ return;
+
+ need_add= true;
+ memcpy(&entry->ip_key, ip_key, HOST_ENTRY_KEY_SIZE);
+ entry->m_errors.reset();
+ entry->m_hostname_length= 0;
+ entry->m_host_validated= false;
+ entry->m_first_seen= now;
+ entry->m_last_seen= now;
+ entry->m_first_error_seen= 0;
+ entry->m_last_error_seen= 0;
}
else
{
- hostname_copy= NULL;
+ entry->m_last_seen= now;
+ }
- DBUG_PRINT("info", ("Adding '%s' -> NULL to the hostname cache...'",
- (const char *) ip_key));
+ if (validated)
+ {
+ if (hostname != NULL)
+ {
+ uint len= strlen(hostname);
+ if (len > sizeof(entry->m_hostname) - 1)
+ len= sizeof(entry->m_hostname) - 1;
+ memcpy(entry->m_hostname, hostname, len);
+ entry->m_hostname[len]= '\0';
+ entry->m_hostname_length= len;
+
+ DBUG_PRINT("info",
+ ("Adding/Updating '%s' -> '%s' (validated) to the hostname cache...'",
+ (const char *) ip_key,
+ (const char *) entry->m_hostname));
+ }
+ else
+ {
+ entry->m_hostname_length= 0;
+ DBUG_PRINT("info",
+ ("Adding/Updating '%s' -> NULL (validated) to the hostname cache...'",
+ (const char *) ip_key));
+ }
+ entry->m_host_validated= true;
+ /*
+ New errors that are considered 'blocking',
+ that will eventually cause the IP to be black listed and blocked.
+ */
+ errors->sum_connect_errors();
+ }
+ else
+ {
+ entry->m_hostname_length= 0;
+ entry->m_host_validated= false;
+ /* Do not count new blocking errors during DNS failures. */
+ errors->clear_connect_errors();
+ DBUG_PRINT("info",
+ ("Adding/Updating '%s' -> NULL (not validated) to the hostname cache...'",
+ (const char *) ip_key));
}
- entry->hostname= hostname_copy;
- entry->connect_errors= 0;
+ if (errors->has_error())
+ entry->set_error_timestamps(now);
+
+ entry->m_errors.aggregate(errors);
- return hostname_cache->add(entry);
+ if (need_add)
+ hostname_cache->add(entry);
+
+ return;
}
-static bool add_hostname(const char *ip_key, const char *hostname)
+static void add_hostname(const char *ip_key, const char *hostname,
+ bool validated, Host_errors *errors)
{
if (specialflag & SPECIAL_NO_HOST_CACHE)
- return FALSE;
+ return;
+
+ ulonglong now= my_hrtime().val;
mysql_mutex_lock(&hostname_cache->lock);
- bool err_status= add_hostname_impl(ip_key, hostname);
+ add_hostname_impl(ip_key, hostname, validated, errors, now);
mysql_mutex_unlock(&hostname_cache->lock);
- return err_status;
+ return;
}
-void inc_host_errors(const char *ip_string)
+void inc_host_errors(const char *ip_string, Host_errors *errors)
{
if (!ip_string)
return;
+ ulonglong now= my_hrtime().val;
char ip_key[HOST_ENTRY_KEY_SIZE];
prepare_hostname_cache_key(ip_string, ip_key);
@@ -201,13 +307,20 @@ void inc_host_errors(const char *ip_string)
Host_entry *entry= hostname_cache_search(ip_key);
if (entry)
- entry->connect_errors++;
+ {
+ if (entry->m_host_validated)
+ errors->sum_connect_errors();
+ else
+ errors->clear_connect_errors();
+
+ entry->m_errors.aggregate(errors);
+ entry->set_error_timestamps(now);
+ }
mysql_mutex_unlock(&hostname_cache->lock);
}
-
-void reset_host_errors(const char *ip_string)
+void reset_host_connect_errors(const char *ip_string)
{
if (!ip_string)
return;
@@ -220,12 +333,11 @@ void reset_host_errors(const char *ip_string)
Host_entry *entry= hostname_cache_search(ip_key);
if (entry)
- entry->connect_errors= 0;
+ entry->m_errors.clear_connect_errors();
mysql_mutex_unlock(&hostname_cache->lock);
}
-
static inline bool is_ip_loopback(const struct sockaddr *ip)
{
switch (ip->sa_family) {
@@ -277,6 +389,7 @@ static inline bool is_hostname_valid(const char *hostname)
- returns host name if IP-address is validated;
- set value to out-variable connect_errors -- this variable represents the
number of connection errors from the specified IP-address.
+ - update the host_cache statistics
NOTE: connect_errors are counted (are supported) only for the clients
where IP-address can be resolved and FCrDNS check is passed.
@@ -287,37 +400,43 @@ static inline bool is_hostname_valid(const char *hostname)
@param [out] connect_errors
@return Error status
- @retval FALSE Success
- @retval TRUE Error
+ @retval 0 Success
+ @retval RC_BLOCKED_HOST The host is blocked.
The function does not set/report MySQL server error in case of failure.
It's caller's responsibility to handle failures of this function
properly.
*/
-bool ip_to_hostname(struct sockaddr_storage *ip_storage,
- const char *ip_string,
- char **hostname, uint *connect_errors)
+int ip_to_hostname(struct sockaddr_storage *ip_storage,
+ const char *ip_string,
+ char **hostname,
+ uint *connect_errors)
{
const struct sockaddr *ip= (const sockaddr *) ip_storage;
int err_code;
bool err_status;
+ Host_errors errors;
DBUG_ENTER("ip_to_hostname");
DBUG_PRINT("info", ("IP address: '%s'; family: %d.",
(const char *) ip_string,
(int) ip->sa_family));
+ /* Default output values, for most cases. */
+ *hostname= NULL;
+ *connect_errors= 0;
+
/* Check if we have loopback address (127.0.0.1 or ::1). */
if (is_ip_loopback(ip))
{
DBUG_PRINT("info", ("Loopback address detected."));
- *connect_errors= 0; /* Do not count connect errors from localhost. */
+ /* Do not count connect errors from localhost. */
*hostname= (char *) my_localhost;
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
/* Prepare host name cache key. */
@@ -329,27 +448,45 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
if (!(specialflag & SPECIAL_NO_HOST_CACHE))
{
+ ulonglong now= my_hrtime().val;
+
mysql_mutex_lock(&hostname_cache->lock);
Host_entry *entry= hostname_cache_search(ip_key);
if (entry)
{
- *connect_errors= entry->connect_errors;
- *hostname= NULL;
+ entry->m_last_seen= now;
+
+ if (entry->m_errors.m_connect > max_connect_errors)
+ {
+ entry->m_errors.m_host_blocked++;
+ entry->set_error_timestamps(now);
+ *connect_errors= entry->m_errors.m_connect;
+ mysql_mutex_unlock(&hostname_cache->lock);
+ DBUG_RETURN(RC_BLOCKED_HOST);
+ }
- if (entry->hostname)
- *hostname= my_strdup(entry->hostname, MYF(0));
+ /*
+ If there is an IP -> HOSTNAME association in the cache,
+ but for a hostname that was not validated,
+ do not return that hostname: perform the network validation again.
+ */
+ if (entry->m_host_validated)
+ {
+ if (entry->m_hostname_length)
+ *hostname= my_strdup(entry->m_hostname, MYF(0));
- DBUG_PRINT("info",("IP (%s) has been found in the cache. "
- "Hostname: '%s'; connect_errors: %d",
- (const char *) ip_key,
- (const char *) (*hostname? *hostname : "null"),
- (int) *connect_errors));
+ DBUG_PRINT("info",("IP (%s) has been found in the cache. "
+ "Hostname: '%s'",
+ (const char *) ip_key,
+ (const char *) (*hostname? *hostname : "null")
+ ));
- mysql_mutex_unlock(&hostname_cache->lock);
+ mysql_mutex_unlock(&hostname_cache->lock);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
+ }
}
mysql_mutex_unlock(&hostname_cache->lock);
@@ -367,13 +504,60 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
err_code= vio_getnameinfo(ip, hostname_buffer, NI_MAXHOST, NULL, 0,
NI_NAMEREQD);
- /* BEGIN : DEBUG */
- DBUG_EXECUTE_IF("addr_fake_ipv4",
+ /*
+ ===========================================================================
+ DEBUG code only (begin)
+ Simulate various output from vio_getnameinfo().
+ ===========================================================================
+ */
+
+ DBUG_EXECUTE_IF("getnameinfo_error_noname",
+ {
+ strcpy(hostname_buffer, "<garbage>");
+ err_code= EAI_NONAME;
+ }
+ );
+
+ DBUG_EXECUTE_IF("getnameinfo_error_again",
+ {
+ strcpy(hostname_buffer, "<garbage>");
+ err_code= EAI_AGAIN;
+ }
+ );
+
+ DBUG_EXECUTE_IF("getnameinfo_fake_ipv4",
{
strcpy(hostname_buffer, "santa.claus.ipv4.example.com");
err_code= 0;
- };);
- /* END : DEBUG */
+ }
+ );
+
+ DBUG_EXECUTE_IF("getnameinfo_fake_ipv6",
+ {
+ strcpy(hostname_buffer, "santa.claus.ipv6.example.com");
+ err_code= 0;
+ }
+ );
+
+ DBUG_EXECUTE_IF("getnameinfo_format_ipv4",
+ {
+ strcpy(hostname_buffer, "12.12.12.12");
+ err_code= 0;
+ }
+ );
+
+ DBUG_EXECUTE_IF("getnameinfo_format_ipv6",
+ {
+ strcpy(hostname_buffer, "12:DEAD:BEEF:0");
+ err_code= 0;
+ }
+ );
+
+ /*
+ ===========================================================================
+ DEBUG code only (end)
+ ===========================================================================
+ */
if (err_code)
{
@@ -387,23 +571,29 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
(const char *) ip_key,
(const char *) gai_strerror(err_code));
+ bool validated;
if (vio_is_no_name_error(err_code))
{
/*
The no-name error means that there is no reverse address mapping
for the IP address. A host name can not be resolved.
-
+ */
+ errors.m_nameinfo_permanent= 1;
+ validated= true;
+ }
+ else
+ {
+ /*
If it is not the no-name error, we should not cache the hostname
(or rather its absence), because the failure might be transient.
+ Only the ip error statistics are cached.
*/
-
- add_hostname(ip_key, NULL);
-
- *hostname= NULL;
- *connect_errors= 0; /* New IP added to the cache. */
+ errors.m_nameinfo_transient= 1;
+ validated= false;
}
+ add_hostname(ip_key, NULL, validated, &errors);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(0);
}
DBUG_PRINT("info", ("IP '%s' resolved to '%s'.",
@@ -439,24 +629,21 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
(const char *) ip_key,
(const char *) hostname_buffer);
- err_status= add_hostname(ip_key, NULL);
-
- *hostname= NULL;
- *connect_errors= 0; /* New IP added to the cache. */
+ errors.m_format= 1;
+ add_hostname(ip_key, hostname_buffer, false, &errors);
- DBUG_RETURN(err_status);
+ DBUG_RETURN(false);
}
- /*
- To avoid crashing the server in DBUG_EXECUTE_IF,
- Define a variable which depicts state of addr_info_list.
- */
- bool free_addr_info_list= false;
-
/* Get IP-addresses for the resolved host name (FCrDNS technique). */
struct addrinfo hints;
struct addrinfo *addr_info_list;
+ /*
+ Makes fault injection with DBUG_EXECUTE_IF easier.
+ Invoking free_addr_info(NULL) crashes on some platforms.
+ */
+ bool free_addr_info_list= false;
memset(&hints, 0, sizeof (struct addrinfo));
hints.ai_flags= AI_PASSIVE;
@@ -470,8 +657,72 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
if (err_code == 0)
free_addr_info_list= true;
- /* BEGIN : DEBUG */
- DBUG_EXECUTE_IF("addr_fake_ipv4",
+ /*
+ ===========================================================================
+ DEBUG code only (begin)
+ Simulate various output from getaddrinfo().
+ ===========================================================================
+ */
+ DBUG_EXECUTE_IF("getaddrinfo_error_noname",
+ {
+ if (free_addr_info_list)
+ freeaddrinfo(addr_info_list);
+
+ addr_info_list= NULL;
+ err_code= EAI_NONAME;
+ free_addr_info_list= false;
+ }
+ );
+
+ DBUG_EXECUTE_IF("getaddrinfo_error_again",
+ {
+ if (free_addr_info_list)
+ freeaddrinfo(addr_info_list);
+
+ addr_info_list= NULL;
+ err_code= EAI_AGAIN;
+ free_addr_info_list= false;
+ }
+ );
+
+ DBUG_EXECUTE_IF("getaddrinfo_fake_bad_ipv4",
+ {
+ if (free_addr_info_list)
+ freeaddrinfo(addr_info_list);
+
+ struct sockaddr_in *debug_addr;
+ /*
+ Not thread safe, which is ok.
+ Only one connection at a time is tested with
+ fault injection.
+ */
+ static struct sockaddr_in debug_sock_addr[2];
+ static struct addrinfo debug_addr_info[2];
+ /* Simulating ipv4 192.0.2.126 */
+ debug_addr= & debug_sock_addr[0];
+ debug_addr->sin_family= AF_INET;
+ debug_addr->sin_addr.s_addr= inet_addr("192.0.2.126");
+
+ /* Simulating ipv4 192.0.2.127 */
+ debug_addr= & debug_sock_addr[1];
+ debug_addr->sin_family= AF_INET;
+ debug_addr->sin_addr.s_addr= inet_addr("192.0.2.127");
+
+ debug_addr_info[0].ai_addr= (struct sockaddr*) & debug_sock_addr[0];
+ debug_addr_info[0].ai_addrlen= sizeof (struct sockaddr_in);
+ debug_addr_info[0].ai_next= & debug_addr_info[1];
+
+ debug_addr_info[1].ai_addr= (struct sockaddr*) & debug_sock_addr[1];
+ debug_addr_info[1].ai_addrlen= sizeof (struct sockaddr_in);
+ debug_addr_info[1].ai_next= NULL;
+
+ addr_info_list= & debug_addr_info[0];
+ err_code= 0;
+ free_addr_info_list= false;
+ }
+ );
+
+ DBUG_EXECUTE_IF("getaddrinfo_fake_good_ipv4",
{
if (free_addr_info_list)
freeaddrinfo(addr_info_list);
@@ -500,30 +751,186 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
addr_info_list= & debug_addr_info[0];
err_code= 0;
free_addr_info_list= false;
- };);
+ }
+ );
- /* END : DEBUG */
+#ifdef HAVE_IPV6
+ DBUG_EXECUTE_IF("getaddrinfo_fake_bad_ipv6",
+ {
+ if (free_addr_info_list)
+ freeaddrinfo(addr_info_list);
- if (err_code == EAI_NONAME)
- {
- /*
- Don't cache responses when the DNS server is down, as otherwise
- transient DNS failure may leave any number of clients (those
- that attempted to connect during the outage) unable to connect
- indefinitely.
- */
+ struct sockaddr_in6 *debug_addr;
+ struct in6_addr *ip6;
+ /*
+ Not thread safe, which is ok.
+ Only one connection at a time is tested with
+ fault injection.
+ */
+ static struct sockaddr_in6 debug_sock_addr[2];
+ static struct addrinfo debug_addr_info[2];
+ /* Simulating ipv6 2001:DB8::6:7E */
+ debug_addr= & debug_sock_addr[0];
+ debug_addr->sin6_family= AF_INET6;
+ ip6= & debug_addr->sin6_addr;
+ /* inet_pton not available on Windows XP. */
+ ip6->s6_addr[ 0] = 0x20;
+ ip6->s6_addr[ 1] = 0x01;
+ ip6->s6_addr[ 2] = 0x0d;
+ ip6->s6_addr[ 3] = 0xb8;
+ ip6->s6_addr[ 4] = 0x00;
+ ip6->s6_addr[ 5] = 0x00;
+ ip6->s6_addr[ 6] = 0x00;
+ ip6->s6_addr[ 7] = 0x00;
+ ip6->s6_addr[ 8] = 0x00;
+ ip6->s6_addr[ 9] = 0x00;
+ ip6->s6_addr[10] = 0x00;
+ ip6->s6_addr[11] = 0x00;
+ ip6->s6_addr[12] = 0x00;
+ ip6->s6_addr[13] = 0x06;
+ ip6->s6_addr[14] = 0x00;
+ ip6->s6_addr[15] = 0x7e;
+
+ /* Simulating ipv6 2001:DB8::6:7F */
+ debug_addr= & debug_sock_addr[1];
+ debug_addr->sin6_family= AF_INET6;
+ ip6= & debug_addr->sin6_addr;
+ ip6->s6_addr[ 0] = 0x20;
+ ip6->s6_addr[ 1] = 0x01;
+ ip6->s6_addr[ 2] = 0x0d;
+ ip6->s6_addr[ 3] = 0xb8;
+ ip6->s6_addr[ 4] = 0x00;
+ ip6->s6_addr[ 5] = 0x00;
+ ip6->s6_addr[ 6] = 0x00;
+ ip6->s6_addr[ 7] = 0x00;
+ ip6->s6_addr[ 8] = 0x00;
+ ip6->s6_addr[ 9] = 0x00;
+ ip6->s6_addr[10] = 0x00;
+ ip6->s6_addr[11] = 0x00;
+ ip6->s6_addr[12] = 0x00;
+ ip6->s6_addr[13] = 0x06;
+ ip6->s6_addr[14] = 0x00;
+ ip6->s6_addr[15] = 0x7f;
+
+ debug_addr_info[0].ai_addr= (struct sockaddr*) & debug_sock_addr[0];
+ debug_addr_info[0].ai_addrlen= sizeof (struct sockaddr_in6);
+ debug_addr_info[0].ai_next= & debug_addr_info[1];
- err_status= add_hostname(ip_key, NULL);
+ debug_addr_info[1].ai_addr= (struct sockaddr*) & debug_sock_addr[1];
+ debug_addr_info[1].ai_addrlen= sizeof (struct sockaddr_in6);
+ debug_addr_info[1].ai_next= NULL;
- *hostname= NULL;
- *connect_errors= 0; /* New IP added to the cache. */
+ addr_info_list= & debug_addr_info[0];
+ err_code= 0;
+ free_addr_info_list= false;
+ }
+ );
- DBUG_RETURN(err_status);
- }
- else if (err_code)
+ DBUG_EXECUTE_IF("getaddrinfo_fake_good_ipv6",
+ {
+ if (free_addr_info_list)
+ freeaddrinfo(addr_info_list);
+
+ struct sockaddr_in6 *debug_addr;
+ struct in6_addr *ip6;
+ /*
+ Not thread safe, which is ok.
+ Only one connection at a time is tested with
+ fault injection.
+ */
+ static struct sockaddr_in6 debug_sock_addr[2];
+ static struct addrinfo debug_addr_info[2];
+ /* Simulating ipv6 2001:DB8::6:7 */
+ debug_addr= & debug_sock_addr[0];
+ debug_addr->sin6_family= AF_INET6;
+ ip6= & debug_addr->sin6_addr;
+ ip6->s6_addr[ 0] = 0x20;
+ ip6->s6_addr[ 1] = 0x01;
+ ip6->s6_addr[ 2] = 0x0d;
+ ip6->s6_addr[ 3] = 0xb8;
+ ip6->s6_addr[ 4] = 0x00;
+ ip6->s6_addr[ 5] = 0x00;
+ ip6->s6_addr[ 6] = 0x00;
+ ip6->s6_addr[ 7] = 0x00;
+ ip6->s6_addr[ 8] = 0x00;
+ ip6->s6_addr[ 9] = 0x00;
+ ip6->s6_addr[10] = 0x00;
+ ip6->s6_addr[11] = 0x00;
+ ip6->s6_addr[12] = 0x00;
+ ip6->s6_addr[13] = 0x06;
+ ip6->s6_addr[14] = 0x00;
+ ip6->s6_addr[15] = 0x07;
+
+ /* Simulating ipv6 2001:DB8::6:6 */
+ debug_addr= & debug_sock_addr[1];
+ debug_addr->sin6_family= AF_INET6;
+ ip6= & debug_addr->sin6_addr;
+ ip6->s6_addr[ 0] = 0x20;
+ ip6->s6_addr[ 1] = 0x01;
+ ip6->s6_addr[ 2] = 0x0d;
+ ip6->s6_addr[ 3] = 0xb8;
+ ip6->s6_addr[ 4] = 0x00;
+ ip6->s6_addr[ 5] = 0x00;
+ ip6->s6_addr[ 6] = 0x00;
+ ip6->s6_addr[ 7] = 0x00;
+ ip6->s6_addr[ 8] = 0x00;
+ ip6->s6_addr[ 9] = 0x00;
+ ip6->s6_addr[10] = 0x00;
+ ip6->s6_addr[11] = 0x00;
+ ip6->s6_addr[12] = 0x00;
+ ip6->s6_addr[13] = 0x06;
+ ip6->s6_addr[14] = 0x00;
+ ip6->s6_addr[15] = 0x06;
+
+ debug_addr_info[0].ai_addr= (struct sockaddr*) & debug_sock_addr[0];
+ debug_addr_info[0].ai_addrlen= sizeof (struct sockaddr_in6);
+ debug_addr_info[0].ai_next= & debug_addr_info[1];
+
+ debug_addr_info[1].ai_addr= (struct sockaddr*) & debug_sock_addr[1];
+ debug_addr_info[1].ai_addrlen= sizeof (struct sockaddr_in6);
+ debug_addr_info[1].ai_next= NULL;
+
+ addr_info_list= & debug_addr_info[0];
+ err_code= 0;
+ free_addr_info_list= false;
+ }
+ );
+#endif /* HAVE_IPV6 */
+
+ /*
+ ===========================================================================
+ DEBUG code only (end)
+ ===========================================================================
+ */
+
+ if (err_code != 0)
{
- DBUG_PRINT("error", ("getaddrinfo() failed with error code %d.", err_code));
- DBUG_RETURN(TRUE);
+ sql_print_warning("Host name '%s' could not be resolved: %s",
+ (const char *) hostname_buffer,
+ (const char *) gai_strerror(err_code));
+
+ bool validated;
+
+ if (err_code == EAI_NONAME)
+ {
+ errors.m_addrinfo_permanent= 1;
+ validated= true;
+ }
+ else
+ {
+ /*
+ Don't cache responses when the DNS server is down, as otherwise
+ transient DNS failure may leave any number of clients (those
+ that attempted to connect during the outage) unable to connect
+ indefinitely.
+ Only cache error statistics.
+ */
+ errors.m_addrinfo_transient= 1;
+ validated= false;
+ }
+ add_hostname(ip_key, NULL, validated, &errors);
+
+ DBUG_RETURN(false);
}
/* Check that getaddrinfo() returned the used IP (FCrDNS technique). */
@@ -545,7 +952,7 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
DBUG_PRINT("info", (" - '%s'", (const char *) ip_buffer));
- if (strcmp(ip_key, ip_buffer) == 0)
+ if (strcasecmp(ip_key, ip_buffer) == 0)
{
/* Copy host name string to be stored in the cache. */
@@ -557,7 +964,7 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
if (free_addr_info_list)
freeaddrinfo(addr_info_list);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
break;
@@ -568,9 +975,11 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
if (!*hostname)
{
- sql_print_information("Hostname '%s' does not resolve to '%s'.",
- (const char *) hostname_buffer,
- (const char *) ip_key);
+ errors.m_FCrDNS= 1;
+
+ sql_print_warning("Hostname '%s' does not resolve to '%s'.",
+ (const char *) hostname_buffer,
+ (const char *) ip_key);
sql_print_information("Hostname '%s' has the following IP addresses:",
(const char *) hostname_buffer);
@@ -584,30 +993,16 @@ bool ip_to_hostname(struct sockaddr_storage *ip_storage,
ip_buffer, sizeof (ip_buffer));
DBUG_ASSERT(!err_status);
- sql_print_information(" - %s\n", (const char *) ip_buffer);
+ sql_print_information(" - %s", (const char *) ip_buffer);
}
}
- /* Free the result of getaddrinfo(). */
+ /* Add an entry for the IP to the cache. */
+ add_hostname(ip_key, *hostname, true, &errors);
+ /* Free the result of getaddrinfo(). */
if (free_addr_info_list)
freeaddrinfo(addr_info_list);
- /* Add an entry for the IP to the cache. */
-
- if (*hostname)
- {
- err_status= add_hostname(ip_key, *hostname);
- *connect_errors= 0;
- }
- else
- {
- DBUG_PRINT("error",("Couldn't verify hostname with getaddrinfo()."));
-
- err_status= add_hostname(ip_key, NULL);
- *hostname= NULL;
- *connect_errors= 0;
- }
-
- DBUG_RETURN(err_status);
+ DBUG_RETURN(false);
}
diff --git a/sql/hostname.h b/sql/hostname.h
index 6e9535c2947..81a1d0de88d 100644
--- a/sql/hostname.h
+++ b/sql/hostname.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -17,14 +17,168 @@
#define HOSTNAME_INCLUDED
#include "my_global.h" /* uint */
+#include "my_net.h"
+#include "hash_filo.h"
-bool ip_to_hostname(struct sockaddr_storage *ip_storage,
- const char *ip_string,
- char **hostname, uint *connect_errors);
-void inc_host_errors(const char *ip_string);
-void reset_host_errors(const char *ip_string);
+struct Host_errors
+{
+public:
+ Host_errors();
+ ~Host_errors();
+
+ void reset();
+ void aggregate(const Host_errors *errors);
+
+ /** Number of connect errors. */
+ ulong m_connect;
+
+ /** Number of host blocked errors. */
+ ulong m_host_blocked;
+ /** Number of transient errors from getnameinfo(). */
+ ulong m_nameinfo_transient;
+ /** Number of permanent errors from getnameinfo(). */
+ ulong m_nameinfo_permanent;
+ /** Number of errors from is_hostname_valid(). */
+ ulong m_format;
+ /** Number of transient errors from getaddrinfo(). */
+ ulong m_addrinfo_transient;
+ /** Number of permanent errors from getaddrinfo(). */
+ ulong m_addrinfo_permanent;
+ /** Number of errors from Forward-Confirmed reverse DNS checks. */
+ ulong m_FCrDNS;
+ /** Number of errors from host grants. */
+ ulong m_host_acl;
+ /** Number of errors from missing auth plugin. */
+ ulong m_no_auth_plugin;
+ /** Number of errors from auth plugin. */
+ ulong m_auth_plugin;
+ /** Number of errors from authentication plugins. */
+ ulong m_handshake;
+ /** Number of errors from proxy user. */
+ ulong m_proxy_user;
+ /** Number of errors from proxy user acl. */
+ ulong m_proxy_user_acl;
+ /** Number of errors from authentication. */
+ ulong m_authentication;
+ /** Number of errors from ssl. */
+ ulong m_ssl;
+ /** Number of errors from max user connection. */
+ ulong m_max_user_connection;
+ /** Number of errors from max user connection per hour. */
+ ulong m_max_user_connection_per_hour;
+ /** Number of errors from the default database. */
+ ulong m_default_database;
+ /** Number of errors from init_connect. */
+ ulong m_init_connect;
+ /** Number of errors from the server itself. */
+ ulong m_local;
+
+ bool has_error() const
+ {
+ return ((m_host_blocked != 0)
+ || (m_nameinfo_transient != 0)
+ || (m_nameinfo_permanent != 0)
+ || (m_format != 0)
+ || (m_addrinfo_transient != 0)
+ || (m_addrinfo_permanent != 0)
+ || (m_FCrDNS != 0)
+ || (m_host_acl != 0)
+ || (m_no_auth_plugin != 0)
+ || (m_auth_plugin != 0)
+ || (m_handshake != 0)
+ || (m_proxy_user != 0)
+ || (m_proxy_user_acl != 0)
+ || (m_authentication != 0)
+ || (m_ssl != 0)
+ || (m_max_user_connection != 0)
+ || (m_max_user_connection_per_hour != 0)
+ || (m_default_database != 0)
+ || (m_init_connect != 0)
+ || (m_local != 0));
+ }
+
+ void sum_connect_errors()
+ {
+ /* Current (historical) behavior: */
+ m_connect= m_handshake;
+ }
+
+ void clear_connect_errors()
+ {
+ m_connect= 0;
+ }
+};
+
+/** Size of IP address string in the hash cache. */
+#define HOST_ENTRY_KEY_SIZE INET6_ADDRSTRLEN
+
+/**
+ An entry in the hostname hash table cache.
+
+ Host name cache does two things:
+ - caches host names to save DNS look ups;
+ - counts errors from IP.
+
+ Host name can be empty (that means DNS look up failed),
+ but errors still are counted.
+*/
+class Host_entry : public hash_filo_element
+{
+public:
+ Host_entry *next()
+ { return (Host_entry*) hash_filo_element::next(); }
+
+ /**
+ Client IP address. This is the key used with the hash table.
+
+ The client IP address is always expressed in IPv6, even when the
+ network IPv6 stack is not present.
+
+ This IP address is never used to connect to a socket.
+ */
+ char ip_key[HOST_ENTRY_KEY_SIZE];
+
+ /**
+ One of the host names for the IP address. May be a zero length string.
+ */
+ char m_hostname[HOSTNAME_LENGTH + 1];
+ /** Length in bytes of @c m_hostname. */
+ uint m_hostname_length;
+ /** The hostname is validated and used for authorization. */
+ bool m_host_validated;
+ ulonglong m_first_seen;
+ ulonglong m_last_seen;
+ ulonglong m_first_error_seen;
+ ulonglong m_last_error_seen;
+ /** Error statistics. */
+ Host_errors m_errors;
+
+ void set_error_timestamps(ulonglong now)
+ {
+ if (m_first_error_seen == 0)
+ m_first_error_seen= now;
+ m_last_error_seen= now;
+ }
+};
+
+/** The size of the host_cache. */
+extern ulong host_cache_size;
+
+#define RC_OK 0
+#define RC_BLOCKED_HOST 1
+int ip_to_hostname(struct sockaddr_storage *ip_storage,
+ const char *ip_string,
+ char **hostname, uint *connect_errors);
+
+void inc_host_errors(const char *ip_string, Host_errors *errors);
+void reset_host_connect_errors(const char *ip_string);
bool hostname_cache_init();
void hostname_cache_free();
void hostname_cache_refresh(void);
+uint hostname_cache_size();
+void hostname_cache_resize(uint size);
+void hostname_cache_lock();
+void hostname_cache_unlock();
+Host_entry *hostname_cache_first();
#endif /* HOSTNAME_INCLUDED */
diff --git a/sql/innodb_priv.h b/sql/innodb_priv.h
index 33ba7b0f5b3..82d74236ff9 100644
--- a/sql/innodb_priv.h
+++ b/sql/innodb_priv.h
@@ -26,11 +26,9 @@ int get_quote_char_for_identifier(THD *thd, const char *name, uint length);
bool schema_table_store_record(THD *thd, TABLE *table);
void localtime_to_TIME(MYSQL_TIME *to, struct tm *from);
bool check_global_access(THD *thd, ulong want_access, bool no_errors=false);
-uint strconvert(CHARSET_INFO *from_cs, const char *from,
+uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length,
uint *errors);
void sql_print_error(const char *format, ...);
-
-
#endif /* INNODB_PRIV_INCLUDED */
diff --git a/sql/item.cc b/sql/item.cc
index 6ce93f501fe..ac920004b80 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -105,7 +105,7 @@ void
Hybrid_type_traits_decimal::fix_length_and_dec(Item *item, Item *arg) const
{
item->decimals= arg->decimals;
- item->max_length= min(arg->max_length + DECIMAL_LONGLONG_DIGITS,
+ item->max_length= MY_MIN(arg->max_length + DECIMAL_LONGLONG_DIGITS,
DECIMAL_MAX_STR_LENGTH);
}
@@ -297,7 +297,7 @@ String *Item::val_string_from_decimal(String *str)
String *Item::val_string_from_date(String *str)
{
MYSQL_TIME ltime;
- if (get_date(&ltime, 0) ||
+ if (get_date(&ltime, sql_mode_for_dates()) ||
str->alloc(MAX_DATE_STRING_REP_LENGTH))
{
null_value= 1;
@@ -341,7 +341,7 @@ my_decimal *Item::val_decimal_from_string(my_decimal *decimal_value)
decimal_value) & E_DEC_BAD_NUM)
{
ErrConvString err(res);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "DECIMAL",
err.ptr());
@@ -354,7 +354,7 @@ my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
MYSQL_TIME ltime;
- if (get_date(&ltime, 0))
+ if (get_date(&ltime, sql_mode_for_dates()))
{
my_decimal_set_zero(decimal_value);
null_value= 1; // set NULL, stop processing
@@ -551,9 +551,9 @@ uint Item::decimal_precision() const
uint prec=
my_decimal_length_to_precision(max_char_length(), decimals,
unsigned_flag);
- return min(prec, DECIMAL_MAX_PRECISION);
+ return MY_MIN(prec, DECIMAL_MAX_PRECISION);
}
- return min(max_char_length(), DECIMAL_MAX_PRECISION);
+ return MY_MIN(max_char_length(), DECIMAL_MAX_PRECISION);
}
@@ -1005,14 +1005,14 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
{
char buff[SAFE_NAME_LEN];
strmake(buff, str_start,
- min(sizeof(buff)-1, length + (int) (str-str_start)));
+ MY_MIN(sizeof(buff)-1, length + (int) (str-str_start)));
if (length == 0)
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_NAME_BECOMES_EMPTY, ER(ER_NAME_BECOMES_EMPTY),
buff);
else
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_REMOVED_SPACES, ER(ER_REMOVED_SPACES),
buff);
}
@@ -1026,7 +1026,7 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs)
name_length= res_length;
}
else
- name= sql_strmake(str, (name_length= min(length,MAX_ALIAS_NAME)));
+ name= sql_strmake(str, (name_length= MY_MIN(length,MAX_ALIAS_NAME)));
}
@@ -1155,11 +1155,26 @@ Item *Item_static_float_func::safe_charset_converter(CHARSET_INFO *tocs)
Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
{
+ return charset_converter(tocs, true);
+}
+
+
+/**
+ Convert a string item into the requested character set.
+
+ @param tocs Character set to to convert the string to.
+ @param lossless Whether data loss is acceptable.
+
+ @return A new item representing the converted string.
+*/
+Item *Item_string::charset_converter(CHARSET_INFO *tocs, bool lossless)
+{
Item_string *conv;
uint conv_errors;
char *ptr;
String tmp, cstr, *ostr= val_str(&tmp);
cstr.copy(ostr->ptr(), ostr->length(), ostr->charset(), tocs, &conv_errors);
+ conv_errors= lossless && conv_errors;
if (conv_errors || !(conv= new Item_string(cstr.ptr(), cstr.length(),
cstr.charset(),
collation.derivation)))
@@ -1180,7 +1195,6 @@ Item *Item_string::safe_charset_converter(CHARSET_INFO *tocs)
return conv;
}
-
Item *Item_param::safe_charset_converter(CHARSET_INFO *tocs)
{
if (const_item())
@@ -2998,7 +3012,7 @@ double_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
We can use err.ptr() here as ErrConvString is guranteed to put an
end \0 here.
*/
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "DOUBLE",
err.ptr());
@@ -3035,7 +3049,7 @@ longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
(end != end_of_num && !check_if_only_end_space(cs, end_of_num, end))))
{
ErrConvString err(cptr, end - cptr, cs);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
err.ptr());
@@ -3244,7 +3258,7 @@ void Item_param::set_time(MYSQL_TIME *tm, timestamp_type time_type,
if (check_datetime_range(&value.time))
{
ErrConvTime str(&value.time);
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
&str, time_type, 0);
set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR);
}
@@ -4323,7 +4337,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
return TRUE;
if (thd->lex->describe & DESCRIBE_EXTENDED)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_WARN_FIELD_RESOLVED, ER(ER_WARN_FIELD_RESOLVED),
db_name, (db_name[0] ? "." : ""),
table_name, (table_name [0] ? "." : ""),
@@ -4571,7 +4585,7 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select)
!((*group_by_ref)->eq(*select_ref, 0)))
{
ambiguous_fields= TRUE;
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR,
ER(ER_NON_UNIQ_ERROR), ref->full_name(),
current_thd->where);
@@ -5553,7 +5567,7 @@ String *Item::check_well_formed_result(String *str, bool send_error)
{
str->length(wlen);
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_INVALID_CHARACTER_STRING,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_INVALID_CHARACTER_STRING,
ER(ER_INVALID_CHARACTER_STRING), cs->csname, hexbuf);
}
return str;
@@ -5689,10 +5703,6 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
field= new Field_double((uchar*) 0, max_length, null_ptr, 0, Field::NONE,
name, decimals, 0, unsigned_flag);
break;
- case MYSQL_TYPE_NULL:
- field= new Field_null((uchar*) 0, max_length, Field::NONE,
- name, &my_charset_bin);
- break;
case MYSQL_TYPE_INT24:
field= new Field_medium((uchar*) 0, max_length, null_ptr, 0, Field::NONE,
name, 0, unsigned_flag);
@@ -5723,6 +5733,7 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table, bool fixed_length)
/* This case should never be chosen */
DBUG_ASSERT(0);
/* If something goes awfully wrong, it's better to get a string than die */
+ case MYSQL_TYPE_NULL:
case MYSQL_TYPE_STRING:
if (fixed_length && !too_big_for_varchar())
{
@@ -6176,7 +6187,7 @@ longlong Item_hex_hybrid::val_int()
// following assert is redundant, because fixed=1 assigned in constructor
DBUG_ASSERT(fixed == 1);
char *end=(char*) str_value.ptr()+str_value.length(),
- *ptr=end-min(str_value.length(),sizeof(longlong));
+ *ptr=end-MY_MIN(str_value.length(),sizeof(longlong));
ulonglong value=0;
for (; ptr != end ; ptr++)
@@ -6212,7 +6223,7 @@ int Item_hex_hybrid::save_in_field(Field *field, bool no_conversions)
warn:
if (!field->store((longlong) nr, TRUE))
- field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE,
+ field->set_warning(Sql_condition::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE,
1);
return 1;
}
@@ -6220,7 +6231,7 @@ warn:
void Item_hex_hybrid::print(String *str, enum_query_type query_type)
{
- uint32 len= min(str_value.length(), sizeof(longlong));
+ uint32 len= MY_MIN(str_value.length(), sizeof(longlong));
const char *ptr= str_value.ptr() + str_value.length() - len;
str->append("0x");
str->append_hex(ptr, len);
@@ -8225,7 +8236,7 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions)
{
TABLE_LIST *view= cached_table->top_table();
push_warning_printf(field_arg->table->in_use,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_NO_DEFAULT_FOR_VIEW_FIELD,
ER(ER_NO_DEFAULT_FOR_VIEW_FIELD),
view->view_db.str,
@@ -8234,7 +8245,7 @@ int Item_default_value::save_in_field(Field *field_arg, bool no_conversions)
else
{
push_warning_printf(field_arg->table->in_use,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_NO_DEFAULT_FOR_FIELD,
ER(ER_NO_DEFAULT_FOR_FIELD),
field_arg->field_name);
@@ -9386,14 +9397,14 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
/* fix variable decimals which always is NOT_FIXED_DEC */
if (Field::result_merge_type(fld_type) == INT_RESULT)
item_decimals= 0;
- decimals= max(decimals, item_decimals);
+ decimals= MY_MAX(decimals, item_decimals);
}
if (Field::result_merge_type(fld_type) == DECIMAL_RESULT)
{
- decimals= min(max(decimals, item->decimals), DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(MY_MAX(decimals, item->decimals), DECIMAL_MAX_SCALE);
int item_int_part= item->decimal_int_part();
- int item_prec = max(prev_decimal_int_part, item_int_part) + decimals;
- int precision= min(item_prec, DECIMAL_MAX_PRECISION);
+ int item_prec = MY_MAX(prev_decimal_int_part, item_int_part) + decimals;
+ int precision= MY_MIN(item_prec, DECIMAL_MAX_PRECISION);
unsigned_flag&= item->unsigned_flag;
max_length= my_decimal_precision_to_length_no_truncation(precision,
decimals,
@@ -9424,7 +9435,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
*/
if (collation.collation != &my_charset_bin)
{
- max_length= max(old_max_chars * collation.collation->mbmaxlen,
+ max_length= MY_MAX(old_max_chars * collation.collation->mbmaxlen,
display_length(item) /
item->collation.collation->mbmaxlen *
collation.collation->mbmaxlen);
@@ -9446,7 +9457,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
{
int delta1= max_length_orig - decimals_orig;
int delta2= item->max_length - item->decimals;
- max_length= max(delta1, delta2) + decimals;
+ max_length= MY_MAX(delta1, delta2) + decimals;
if (fld_type == MYSQL_TYPE_FLOAT && max_length > FLT_DIG + 2)
{
max_length= MAX_FLOAT_STR_LENGTH;
@@ -9464,7 +9475,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
break;
}
default:
- max_length= max(max_length, display_length(item));
+ max_length= MY_MAX(max_length, display_length(item));
};
maybe_null|= item->maybe_null;
get_full_info(item);
diff --git a/sql/item.h b/sql/item.h
index 5514231e4fd..fb2948a9149 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -1060,7 +1060,7 @@ public:
Item **ref, bool skip_registered);
virtual bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate);
bool get_time(MYSQL_TIME *ltime)
- { return get_date(ltime, TIME_TIME_ONLY); }
+ { return get_date(ltime, TIME_TIME_ONLY | TIME_INVALID_DATES); }
bool get_seconds(ulonglong *sec, ulong *sec_part);
virtual bool get_date_result(MYSQL_TIME *ltime, ulonglong fuzzydate)
{ return get_date(ltime,fuzzydate); }
@@ -1396,7 +1396,7 @@ public:
virtual void bring_value() {}
Field *tmp_table_field_from_field_type(TABLE *table, bool fixed_length);
- virtual Item_field *filed_for_view_update() { return 0; }
+ virtual Item_field *field_for_view_update() { return 0; }
virtual Item *neg_transformer(THD *thd) { return NULL; }
virtual Item *update_value_transformer(uchar *select_arg) { return this; }
@@ -2125,7 +2125,7 @@ public:
bool set_no_const_sub(uchar *arg);
Item *replace_equal_field(uchar *arg);
inline uint32 max_disp_length() { return field->max_display_length(); }
- Item_field *filed_for_view_update() { return this; }
+ Item_field *field_for_view_update() { return this; }
Item *safe_charset_converter(CHARSET_INFO *tocs);
int fix_outer_field(THD *thd, Field **field, Item **reference);
virtual Item *update_value_transformer(uchar *select_arg);
@@ -2616,6 +2616,7 @@ public:
str_value.length(), collation.collation);
}
Item *safe_charset_converter(CHARSET_INFO *tocs);
+ Item *charset_converter(CHARSET_INFO *tocs, bool lossless);
inline void append(char *str, uint length)
{
str_value.append(str, length);
@@ -3140,8 +3141,8 @@ public:
}
virtual void print(String *str, enum_query_type query_type);
void cleanup();
- Item_field *filed_for_view_update()
- { return (*ref)->filed_for_view_update(); }
+ Item_field *field_for_view_update()
+ { return (*ref)->field_for_view_update(); }
virtual Ref_Type ref_type() { return REF; }
// Row emulation: forwarding of ROW-related calls to ref
@@ -3362,8 +3363,8 @@ public:
}
bool enumerate_field_refs_processor(uchar *arg)
{ return orig_item->enumerate_field_refs_processor(arg); }
- Item_field *filed_for_view_update()
- { return orig_item->filed_for_view_update(); }
+ Item_field *field_for_view_update()
+ { return orig_item->field_for_view_update(); }
/* Row emulation: forwarding of ROW-related calls to orig_item */
uint cols()
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index ce396736d6f..a08ae8d8403 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -71,7 +71,7 @@ Cached_item::~Cached_item() {}
Cached_item_str::Cached_item_str(THD *thd, Item *arg)
:item(arg),
- value_max_length(min(arg->max_length, thd->variables.max_sort_length)),
+ value_max_length(MY_MIN(arg->max_length, thd->variables.max_sort_length)),
value(value_max_length)
{}
@@ -81,7 +81,7 @@ bool Cached_item_str::cmp(void)
bool tmp;
if ((res=item->val_str(&tmp_value)))
- res->length(min(res->length(), value_max_length));
+ res->length(MY_MIN(res->length(), value_max_length));
if (null_value != item->null_value)
{
if ((null_value= item->null_value))
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index 5dbd7f8b152..33b94ece45d 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -679,7 +679,7 @@ int Arg_comparator::set_compare_func(Item_result_field *item, Item_result type)
{
if ((*a)->decimals < NOT_FIXED_DEC && (*b)->decimals < NOT_FIXED_DEC)
{
- precision= 5 / log_10[max((*a)->decimals, (*b)->decimals) + 1];
+ precision= 5 / log_10[MY_MAX((*a)->decimals, (*b)->decimals) + 1];
if (func == &Arg_comparator::compare_real)
func= &Arg_comparator::compare_real_fixed;
else if (func == &Arg_comparator::compare_e_real)
@@ -746,7 +746,7 @@ bool get_mysql_time_from_str(THD *thd, String *str, timestamp_type warn_type,
}
if (status.warnings > 0)
- make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
&err, warn_type, warn_name);
return value;
@@ -1020,7 +1020,7 @@ int Arg_comparator::compare_binary_string()
owner->null_value= 0;
uint res1_length= res1->length();
uint res2_length= res2->length();
- int cmp= memcmp(res1->ptr(), res2->ptr(), min(res1_length,res2_length));
+ int cmp= memcmp(res1->ptr(), res2->ptr(), MY_MIN(res1_length,res2_length));
return cmp ? cmp : (int) (res1_length - res2_length);
}
}
@@ -2418,7 +2418,7 @@ Item_func_ifnull::fix_length_and_dec()
uint32 char_length;
agg_result_type(&hybrid_type, args, 2);
maybe_null=args[1]->maybe_null;
- decimals= max(args[0]->decimals, args[1]->decimals);
+ decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
unsigned_flag= args[0]->unsigned_flag && args[1]->unsigned_flag;
if (hybrid_type == DECIMAL_RESULT || hybrid_type == INT_RESULT)
@@ -2429,10 +2429,10 @@ Item_func_ifnull::fix_length_and_dec()
int len1= args[1]->max_char_length() - args[1]->decimals
- (args[1]->unsigned_flag ? 0 : 1);
- char_length= max(len0, len1) + decimals + (unsigned_flag ? 0 : 1);
+ char_length= MY_MAX(len0, len1) + decimals + (unsigned_flag ? 0 : 1);
}
else
- char_length= max(args[0]->max_char_length(), args[1]->max_char_length());
+ char_length= MY_MAX(args[0]->max_char_length(), args[1]->max_char_length());
switch (hybrid_type) {
case STRING_RESULT:
@@ -2459,9 +2459,9 @@ uint Item_func_ifnull::decimal_precision() const
{
int arg0_int_part= args[0]->decimal_int_part();
int arg1_int_part= args[1]->decimal_int_part();
- int max_int_part= max(arg0_int_part, arg1_int_part);
+ int max_int_part= MY_MAX(arg0_int_part, arg1_int_part);
int precision= max_int_part + decimals;
- return min(precision, DECIMAL_MAX_PRECISION);
+ return MY_MIN(precision, DECIMAL_MAX_PRECISION);
}
@@ -2638,7 +2638,7 @@ Item_func_if::fix_length_and_dec()
agg_result_type(&cached_result_type, args + 1, 2);
maybe_null= args[1]->maybe_null || args[2]->maybe_null;
- decimals= max(args[1]->decimals, args[2]->decimals);
+ decimals= MY_MAX(args[1]->decimals, args[2]->decimals);
unsigned_flag=args[1]->unsigned_flag && args[2]->unsigned_flag;
if (cached_result_type == STRING_RESULT)
@@ -2662,10 +2662,10 @@ Item_func_if::fix_length_and_dec()
int len2= args[2]->max_length - args[2]->decimals
- (args[2]->unsigned_flag ? 0 : 1);
- char_length= max(len1, len2) + decimals + (unsigned_flag ? 0 : 1);
+ char_length= MY_MAX(len1, len2) + decimals + (unsigned_flag ? 0 : 1);
}
else
- char_length= max(args[1]->max_char_length(), args[2]->max_char_length());
+ char_length= MY_MAX(args[1]->max_char_length(), args[2]->max_char_length());
fix_char_length(char_length);
}
@@ -2674,8 +2674,8 @@ uint Item_func_if::decimal_precision() const
{
int arg1_prec= args[1]->decimal_int_part();
int arg2_prec= args[2]->decimal_int_part();
- int precision=max(arg1_prec,arg2_prec) + decimals;
- return min(precision, DECIMAL_MAX_PRECISION);
+ int precision=MY_MAX(arg1_prec,arg2_prec) + decimals;
+ return MY_MIN(precision, DECIMAL_MAX_PRECISION);
}
@@ -2976,7 +2976,7 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref)
void Item_func_case::agg_str_lengths(Item* arg)
{
- fix_char_length(max(max_char_length(), arg->max_char_length()));
+ fix_char_length(MY_MAX(max_char_length(), arg->max_char_length()));
set_if_bigger(decimals, arg->decimals);
unsigned_flag= unsigned_flag && arg->unsigned_flag;
}
@@ -3176,7 +3176,7 @@ uint Item_func_case::decimal_precision() const
if (else_expr_num != -1)
set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part());
- return min(max_int_part + decimals, DECIMAL_MAX_PRECISION);
+ return MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION);
}
@@ -5169,7 +5169,7 @@ void Item_func_like::turboBM_compute_suffixes(int *suff)
else
{
if (i < g)
- g = i; // g = min(i, g)
+ g = i; // g = MY_MIN(i, g)
f = i;
while (g >= 0 && pattern[g] == pattern[g + plm1 - f])
g--;
@@ -5188,7 +5188,7 @@ void Item_func_like::turboBM_compute_suffixes(int *suff)
else
{
if (i < g)
- g = i; // g = min(i, g)
+ g = i; // g = MY_MIN(i, g)
f = i;
while (g >= 0 &&
likeconv(cs, pattern[g]) == likeconv(cs, pattern[g + plm1 - f]))
@@ -5309,14 +5309,14 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
register const int v = plm1 - i;
turboShift = u - v;
bcShift = bmBc[(uint) (uchar) text[i + j]] - plm1 + i;
- shift = max(turboShift, bcShift);
- shift = max(shift, bmGs[i]);
+ shift = MY_MAX(turboShift, bcShift);
+ shift = MY_MAX(shift, bmGs[i]);
if (shift == bmGs[i])
- u = min(pattern_len - shift, v);
+ u = MY_MIN(pattern_len - shift, v);
else
{
if (turboShift < bcShift)
- shift = max(shift, u + 1);
+ shift = MY_MAX(shift, u + 1);
u = 0;
}
j+= shift;
@@ -5340,14 +5340,14 @@ bool Item_func_like::turboBM_matches(const char* text, int text_len) const
register const int v = plm1 - i;
turboShift = u - v;
bcShift = bmBc[(uint) likeconv(cs, text[i + j])] - plm1 + i;
- shift = max(turboShift, bcShift);
- shift = max(shift, bmGs[i]);
+ shift = MY_MAX(turboShift, bcShift);
+ shift = MY_MAX(shift, bmGs[i]);
if (shift == bmGs[i])
- u = min(pattern_len - shift, v);
+ u = MY_MIN(pattern_len - shift, v);
else
{
if (turboShift < bcShift)
- shift = max(shift, u + 1);
+ shift = MY_MAX(shift, u + 1);
u = 0;
}
j+= shift;
diff --git a/sql/item_create.cc b/sql/item_create.cc
index ba1ce2b0d3b..962ea73f320 100644
--- a/sql/item_create.cc
+++ b/sql/item_create.cc
@@ -56,7 +56,7 @@ static void wrong_precision_error(uint errcode, Item *a,
char buff[1024];
String buf(buff, sizeof(buff), system_charset_info);
- my_error(errcode, MYF(0), (uint) min(number, UINT_MAX32),
+ my_error(errcode, MYF(0), (uint) MY_MIN(number, UINT_MAX32),
item_name(a, &buf), maximum);
}
@@ -2080,19 +2080,6 @@ protected:
};
-class Create_func_row_count : public Create_func_arg0
-{
-public:
- virtual Item *create_builder(THD *thd);
-
- static Create_func_row_count s_singleton;
-
-protected:
- Create_func_row_count() {}
- virtual ~Create_func_row_count() {}
-};
-
-
class Create_func_rpad : public Create_func_arg3
{
public:
@@ -4838,18 +4825,6 @@ Create_func_round::create_native(THD *thd, LEX_STRING name,
}
-Create_func_row_count Create_func_row_count::s_singleton;
-
-Item*
-Create_func_row_count::create_builder(THD *thd)
-{
- DBUG_ENTER("Create_func_row_count::create");
- thd->lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
- thd->lex->safe_to_cache_query= 0;
- DBUG_RETURN(new (thd->mem_root) Item_func_row_count());
-}
-
-
Create_func_rpad Create_func_rpad::s_singleton;
Item*
@@ -5520,7 +5495,6 @@ static Native_func_registry func_array[] =
{ { C_STRING_WITH_LEN("RELEASE_LOCK") }, BUILDER(Create_func_release_lock)},
{ { C_STRING_WITH_LEN("REVERSE") }, BUILDER(Create_func_reverse)},
{ { C_STRING_WITH_LEN("ROUND") }, BUILDER(Create_func_round)},
- { { C_STRING_WITH_LEN("ROW_COUNT") }, BUILDER(Create_func_row_count)},
{ { C_STRING_WITH_LEN("RPAD") }, BUILDER(Create_func_rpad)},
{ { C_STRING_WITH_LEN("RTRIM") }, BUILDER(Create_func_rtrim)},
{ { C_STRING_WITH_LEN("SEC_TO_TIME") }, BUILDER(Create_func_sec_to_time)},
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 7ac42ebdbad..69b53871f9f 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -634,7 +634,7 @@ void Item_func::count_decimal_length()
set_if_bigger(max_int_part, args[i]->decimal_int_part());
set_if_smaller(unsigned_flag, args[i]->unsigned_flag);
}
- int precision= min(max_int_part + decimals, DECIMAL_MAX_PRECISION);
+ int precision= MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION);
fix_char_length(my_decimal_precision_to_length_no_truncation(precision,
decimals,
unsigned_flag));
@@ -694,7 +694,7 @@ void Item_func::signal_divide_by_null()
{
THD *thd= current_thd;
if (thd->variables.sql_mode & MODE_ERROR_FOR_DIVISION_BY_ZERO)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_DIVISION_BY_ZERO,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_DIVISION_BY_ZERO,
ER(ER_DIVISION_BY_ZERO));
null_value= 1;
}
@@ -1028,7 +1028,7 @@ longlong Item_func_signed::val_int_from_str(int *error)
if (*error > 0 || end != start+ length)
{
ErrConvString err(res);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
err.ptr());
@@ -1065,7 +1065,7 @@ longlong Item_func_signed::val_int()
return value;
err:
- push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
+ push_warning(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
"Cast to signed converted positive out-of-range integer to "
"it's negative complement");
return value;
@@ -1121,7 +1121,7 @@ longlong Item_func_unsigned::val_int()
return value;
err:
- push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
+ push_warning(current_thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNKNOWN_ERROR,
"Cast to unsigned converted negative integer to it's "
"positive complement");
return value;
@@ -1189,7 +1189,7 @@ my_decimal *Item_decimal_typecast::val_decimal(my_decimal *dec)
return dec;
err:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
ER(ER_WARN_DATA_OUT_OF_RANGE),
name, 1L);
@@ -1231,7 +1231,7 @@ double Item_double_typecast::val_real()
if ((error= truncate_double(&tmp, max_length, decimals, 0, DBL_MAX)))
{
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DATA_OUT_OF_RANGE,
ER(ER_WARN_DATA_OUT_OF_RANGE),
name, 1);
@@ -1369,10 +1369,10 @@ my_decimal *Item_func_plus::decimal_op(my_decimal *decimal_value)
*/
void Item_func_additive_op::result_precision()
{
- decimals= max(args[0]->decimals, args[1]->decimals);
+ decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
int arg1_int= args[0]->decimal_precision() - args[0]->decimals;
int arg2_int= args[1]->decimal_precision() - args[1]->decimals;
- int precision= max(arg1_int, arg2_int) + 1 + decimals;
+ int precision= MY_MAX(arg1_int, arg2_int) + 1 + decimals;
/* Integer operations keep unsigned_flag if one of arguments is unsigned */
if (result_type() == INT_RESULT)
@@ -1610,9 +1610,9 @@ void Item_func_mul::result_precision()
unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
else
unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
- decimals= min(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(args[0]->decimals + args[1]->decimals, DECIMAL_MAX_SCALE);
uint est_prec = args[0]->decimal_precision() + args[1]->decimal_precision();
- uint precision= min(est_prec, DECIMAL_MAX_PRECISION);
+ uint precision= MY_MIN(est_prec, DECIMAL_MAX_PRECISION);
max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
unsigned_flag);
}
@@ -1664,7 +1664,7 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
void Item_func_div::result_precision()
{
- uint precision=min(args[0]->decimal_precision() +
+ uint precision=MY_MIN(args[0]->decimal_precision() +
args[1]->decimals + prec_increment,
DECIMAL_MAX_PRECISION);
@@ -1673,7 +1673,7 @@ void Item_func_div::result_precision()
unsigned_flag= args[0]->unsigned_flag | args[1]->unsigned_flag;
else
unsigned_flag= args[0]->unsigned_flag & args[1]->unsigned_flag;
- decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
max_length= my_decimal_precision_to_length_no_truncation(precision, decimals,
unsigned_flag);
}
@@ -1687,7 +1687,7 @@ void Item_func_div::fix_length_and_dec()
switch (hybrid_type) {
case REAL_RESULT:
{
- decimals=max(args[0]->decimals,args[1]->decimals)+prec_increment;
+ decimals=MY_MAX(args[0]->decimals,args[1]->decimals)+prec_increment;
set_if_smaller(decimals, NOT_FIXED_DEC);
uint tmp=float_length(decimals);
if (decimals == NOT_FIXED_DEC)
@@ -1877,8 +1877,8 @@ my_decimal *Item_func_mod::decimal_op(my_decimal *decimal_value)
void Item_func_mod::result_precision()
{
- decimals= max(args[0]->decimals, args[1]->decimals);
- max_length= max(args[0]->max_length, args[1]->max_length);
+ decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
+ max_length= MY_MAX(args[0]->max_length, args[1]->max_length);
}
@@ -2433,7 +2433,7 @@ void Item_func_round::fix_length_and_dec()
if (args[0]->decimals == NOT_FIXED_DEC)
{
- decimals= min(decimals_to_set, NOT_FIXED_DEC);
+ decimals= MY_MIN(decimals_to_set, NOT_FIXED_DEC);
max_length= float_length(decimals);
hybrid_type= REAL_RESULT;
return;
@@ -2443,7 +2443,7 @@ void Item_func_round::fix_length_and_dec()
case REAL_RESULT:
case STRING_RESULT:
hybrid_type= REAL_RESULT;
- decimals= min(decimals_to_set, NOT_FIXED_DEC);
+ decimals= MY_MIN(decimals_to_set, NOT_FIXED_DEC);
max_length= float_length(decimals);
break;
case INT_RESULT:
@@ -2460,13 +2460,13 @@ void Item_func_round::fix_length_and_dec()
case DECIMAL_RESULT:
{
hybrid_type= DECIMAL_RESULT;
- decimals_to_set= min(DECIMAL_MAX_SCALE, decimals_to_set);
+ decimals_to_set= MY_MIN(DECIMAL_MAX_SCALE, decimals_to_set);
int decimals_delta= args[0]->decimals - decimals_to_set;
int precision= args[0]->decimal_precision();
int length_increase= ((decimals_delta <= 0) || truncate) ? 0:1;
precision-= decimals_delta - length_increase;
- decimals= min(decimals_to_set, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(decimals_to_set, DECIMAL_MAX_SCALE);
max_length= my_decimal_precision_to_length_no_truncation(precision,
decimals,
unsigned_flag);
@@ -2577,7 +2577,7 @@ my_decimal *Item_func_round::decimal_op(my_decimal *decimal_value)
my_decimal val, *value= args[0]->val_decimal(&val);
longlong dec= args[1]->val_int();
if (dec >= 0 || args[1]->unsigned_flag)
- dec= min((ulonglong) dec, decimals);
+ dec= MY_MIN((ulonglong) dec, decimals);
else if (dec < INT_MIN)
dec= INT_MIN;
@@ -3443,7 +3443,7 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func,
free_udf(u_d);
DBUG_RETURN(TRUE);
}
- func->max_length=min(initid.max_length,MAX_BLOB_WIDTH);
+ func->max_length=MY_MIN(initid.max_length,MAX_BLOB_WIDTH);
func->maybe_null=initid.maybe_null;
const_item_cache=initid.const_item;
/*
@@ -3452,7 +3452,7 @@ udf_handler::fix_fields(THD *thd, Item_result_field *func,
*/
if (!const_item_cache && !used_tables_cache)
used_tables_cache= RAND_TABLE_BIT;
- func->decimals=min(initid.decimals,NOT_FIXED_DEC);
+ func->decimals=MY_MIN(initid.decimals,NOT_FIXED_DEC);
}
initialized=1;
if (error)
@@ -3792,7 +3792,7 @@ longlong Item_master_pos_wait::val_int()
connection_name= thd->variables.default_master_connection;
if (!(mi= master_info_index->get_master_info(&connection_name,
- MYSQL_ERROR::WARN_LEVEL_WARN)))
+ Sql_condition::WARN_LEVEL_WARN)))
goto err;
if ((event_count = mi->rli.wait_for_pos(thd, log_name, pos, timeout)) == -2)
{
@@ -3991,18 +3991,18 @@ public:
bool handle_condition(THD * /* thd */, uint sql_errno,
const char * /* sqlstate */,
- MYSQL_ERROR::enum_warning_level /* level */,
+ Sql_condition::enum_warning_level /* level */,
const char *message,
- MYSQL_ERROR ** /* cond_hdl */);
+ Sql_condition ** /* cond_hdl */);
};
bool
Lock_wait_timeout_handler::
handle_condition(THD * /* thd */, uint sql_errno,
const char * /* sqlstate */,
- MYSQL_ERROR::enum_warning_level /* level */,
+ Sql_condition::enum_warning_level /* level */,
const char *message,
- MYSQL_ERROR ** /* cond_hdl */)
+ Sql_condition ** /* cond_hdl */)
{
if (sql_errno == ER_LOCK_WAIT_TIMEOUT)
{
@@ -4268,7 +4268,7 @@ longlong Item_func_benchmark::val_int()
{
char buff[22];
llstr(((longlong) loop_count), buff);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE),
"count", buff, "benchmark");
}
@@ -6118,7 +6118,7 @@ bool Item_func_match::fix_index()
for (keynr=0 ; keynr < fts ; keynr++)
{
KEY *ft_key=&table->key_info[ft_to_key[keynr]];
- uint key_parts=ft_key->key_parts;
+ uint key_parts=ft_key->user_defined_key_parts;
for (uint part=0 ; part < key_parts ; part++)
{
@@ -6150,7 +6150,7 @@ bool Item_func_match::fix_index()
{
// partial keys doesn't work
if (max_cnt < arg_count-1 ||
- max_cnt < table->key_info[ft_to_key[keynr]].key_parts)
+ max_cnt < table->key_info[ft_to_key[keynr]].user_defined_key_parts)
continue;
key=ft_to_key[keynr];
diff --git a/sql/item_func.h b/sql/item_func.h
index bbe70724f79..71225c71639 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -561,8 +561,8 @@ public:
longlong val_int_from_str(int *error);
void fix_length_and_dec()
{
- fix_char_length(min(args[0]->max_char_length(),
- MY_INT64_NUM_DECIMAL_DIGITS));
+ fix_char_length(MY_MIN(args[0]->max_char_length(),
+ MY_INT64_NUM_DECIMAL_DIGITS));
}
virtual void print(String *str, enum_query_type query_type);
uint decimal_precision() const { return args[0]->decimal_precision(); }
diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc
index 0a7f18e6546..b36375a6e40 100644
--- a/sql/item_geofunc.cc
+++ b/sql/item_geofunc.cc
@@ -500,7 +500,7 @@ String *Item_func_spatial_collection::val_str(String *str)
}
if (str->length() > current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(), current_thd->variables.max_allowed_packet);
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 09518bb4bd5..854a99bea02 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -106,23 +106,6 @@ String *Item_str_func::val_str_from_val_str_ascii(String *str, String *str2)
}
-
-/*
- Convert an array of bytes to a hexadecimal representation.
-
- Used to generate a hexadecimal representation of a message digest.
-*/
-static void array_to_hex(char *to, const unsigned char *str, uint len)
-{
- const unsigned char *str_end= str + len;
- for (; str != str_end; ++str)
- {
- *to++= _dig_vec_lower[((uchar) *str) >> 4];
- *to++= _dig_vec_lower[((uchar) *str) & 0x0F];
- }
-}
-
-
bool Item_str_func::fix_fields(THD *thd, Item **ref)
{
bool res= Item_func::fix_fields(thd, ref);
@@ -221,17 +204,11 @@ String *Item_func_sha::val_str_ascii(String *str)
String * sptr= args[0]->val_str(str);
if (sptr) /* If we got value different from NULL */
{
- SHA1_CONTEXT context; /* Context used to generate SHA1 hash */
/* Temporary buffer to store 160bit digest */
uint8 digest[SHA1_HASH_SIZE];
- mysql_sha1_reset(&context); /* We do not have to check for error here */
- /* No need to check error as the only case would be too long message */
- mysql_sha1_input(&context,
- (const uchar *) sptr->ptr(), sptr->length());
-
+ compute_sha1_hash(digest, (const char *) sptr->ptr(), sptr->length());
/* Ensure that memory is free and we got result */
- if (!( str->alloc(SHA1_HASH_SIZE*2) ||
- (mysql_sha1_result(&context,digest))))
+ if (!str->alloc(SHA1_HASH_SIZE*2))
{
array_to_hex((char *) str->ptr(), digest, SHA1_HASH_SIZE);
str->set_charset(&my_charset_numeric);
@@ -309,9 +286,9 @@ String *Item_func_sha2::val_str_ascii(String *str)
default:
if (!args[1]->const_item())
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
- ER_WRONG_PARAMETERS_TO_NATIVE_FCT,
- ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2");
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_PARAMETERS_TO_NATIVE_FCT,
+ ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2");
null_value= TRUE;
return NULL;
}
@@ -333,7 +310,7 @@ String *Item_func_sha2::val_str_ascii(String *str)
#else
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_FEATURE_DISABLED,
ER(ER_FEATURE_DISABLED),
"sha2", "--with-ssl");
@@ -371,7 +348,7 @@ void Item_func_sha2::fix_length_and_dec()
#endif
default:
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_PARAMETERS_TO_NATIVE_FCT,
ER(ER_WRONG_PARAMETERS_TO_NATIVE_FCT), "sha2");
}
@@ -390,7 +367,7 @@ void Item_func_sha2::fix_length_and_dec()
DERIVATION_COERCIBLE);
#else
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_FEATURE_DISABLED,
ER(ER_FEATURE_DISABLED),
"sha2", "--with-ssl");
@@ -592,7 +569,7 @@ String *Item_func_concat::val_str(String *str)
if (res->length()+res2->length() >
current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
current_thd->variables.max_allowed_packet);
@@ -665,7 +642,7 @@ String *Item_func_concat::val_str(String *str)
}
else
{
- uint new_len = max(tmp_value.alloced_length() * 2, concat_len);
+ uint new_len = MY_MAX(tmp_value.alloced_length() * 2, concat_len);
if (tmp_value.realloc(new_len))
goto null;
@@ -798,11 +775,11 @@ String *Item_func_des_encrypt::val_str(String *str)
return &tmp_value;
error:
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN,
code, ER(code),
"des_encrypt");
#else
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN,
ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED),
"des_encrypt", "--with-ssl");
#endif /* defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) */
@@ -876,12 +853,12 @@ String *Item_func_des_decrypt::val_str(String *str)
return &tmp_value;
error:
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN,
code, ER(code),
"des_decrypt");
wrong_key:
#else
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN,
ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED),
"des_decrypt", "--with-ssl");
#endif /* defined(HAVE_OPENSSL) && !defined(EMBEDDED_LIBRARY) */
@@ -932,7 +909,7 @@ String *Item_func_concat_ws::val_str(String *str)
if (res->length() + sep_str->length() + res2->length() >
current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
current_thd->variables.max_allowed_packet);
@@ -1014,7 +991,7 @@ String *Item_func_concat_ws::val_str(String *str)
}
else
{
- uint new_len = max(tmp_value.alloced_length() * 2, concat_len);
+ uint new_len = MY_MAX(tmp_value.alloced_length() * 2, concat_len);
if (tmp_value.realloc(new_len))
goto null;
@@ -1191,7 +1168,7 @@ redo:
if (res->length()-from_length + to_length >
current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(),
@@ -1220,7 +1197,7 @@ skip:
if (res->length()-from_length + to_length >
current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED), func_name(),
current_thd->variables.max_allowed_packet);
@@ -1307,7 +1284,7 @@ String *Item_func_insert::val_str(String *str)
if ((ulonglong) (res->length() - length + res2->length()) >
(ulonglong) current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(), current_thd->variables.max_allowed_packet);
@@ -1506,7 +1483,7 @@ String *Item_func_substr::val_str(String *str)
length= res->charpos((int) length, (uint32) start);
tmp_length= res->length() - start;
- length= min(length, tmp_length);
+ length= MY_MIN(length, tmp_length);
if (!start && (longlong) res->length() == length)
return res;
@@ -1529,7 +1506,7 @@ void Item_func_substr::fix_length_and_dec()
else if (start < 0)
max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start);
else
- max_length-= min((uint)(start - 1), max_length);
+ max_length-= MY_MIN((uint)(start - 1), max_length);
}
if (arg_count == 3 && args[2]->const_item())
{
@@ -1911,28 +1888,133 @@ void Item_func_trim::print(String *str, enum_query_type query_type)
/* Item_func_password */
+/**
+ Helper function for calculating a new password. Used in
+ Item_func_password::fix_length_and_dec() for const parameters and in
+ Item_func_password::val_str_ascii() for non-const parameters.
+ @param str The plain text password which should be digested
+ @param buffer a pointer to the buffer where the digest will be stored.
+
+ @note The buffer must be of at least CRYPT_MAX_PASSWORD_SIZE size.
+
+ @return Size of the password.
+*/
+
+static int calculate_password(String *str, char *buffer)
+{
+ DBUG_ASSERT(str);
+ if (str->length() == 0) // PASSWORD('') returns ''
+ return 0;
+
+ int buffer_len= 0;
+ THD *thd= current_thd;
+ int old_passwords= 0;
+ if (thd)
+ old_passwords= thd->variables.old_passwords;
+
+#if defined(HAVE_OPENSSL)
+ if (old_passwords == 2)
+ {
+ my_make_scrambled_password(buffer, str->ptr(),
+ str->length());
+ buffer_len= (int) strlen(buffer) + 1;
+ }
+ else
+#endif
+ if (old_passwords == 0)
+ {
+ my_make_scrambled_password_sha1(buffer, str->ptr(),
+ str->length());
+ buffer_len= SCRAMBLED_PASSWORD_CHAR_LENGTH;
+ }
+ else
+ if (old_passwords == 1)
+ {
+ my_make_scrambled_password_323(buffer, str->ptr(),
+ str->length());
+ buffer_len= SCRAMBLED_PASSWORD_CHAR_LENGTH_323;
+ }
+ return buffer_len;
+}
+
+/* Item_func_password */
+void Item_func_password::fix_length_and_dec()
+{
+ maybe_null= false; // PASSWORD() never returns NULL
+
+ if (args[0]->const_item())
+ {
+ String str;
+ String *res= args[0]->val_str(&str);
+ if (!args[0]->null_value)
+ {
+ m_hashed_password_buffer_len=
+ calculate_password(res, m_hashed_password_buffer);
+ fix_length_and_charset(m_hashed_password_buffer_len, default_charset());
+ m_recalculate_password= false;
+ return;
+ }
+ }
+
+ m_recalculate_password= true;
+ fix_length_and_charset(CRYPT_MAX_PASSWORD_SIZE, default_charset());
+}
+
String *Item_func_password::val_str_ascii(String *str)
{
DBUG_ASSERT(fixed == 1);
- String *res= args[0]->val_str(str);
- if ((null_value=args[0]->null_value))
- return 0;
- if (res->length() == 0)
+
+ String *res= args[0]->val_str(str);
+
+ if (args[0]->null_value)
+ res= make_empty_result();
+
+ /* we treat NULLs as equal to empty string when calling the plugin */
+ check_password_policy(res);
+
+ null_value= 0;
+ if (args[0]->null_value) // PASSWORD(NULL) returns ''
+ return res;
+
+ if (m_recalculate_password)
+ m_hashed_password_buffer_len= calculate_password(res,
+ m_hashed_password_buffer);
+
+ if (m_hashed_password_buffer_len == 0)
return make_empty_result();
- my_make_scrambled_password(tmp_value, res->ptr(), res->length());
- str->set(tmp_value, SCRAMBLED_PASSWORD_CHAR_LENGTH, &my_charset_latin1);
+
+ str->set(m_hashed_password_buffer, m_hashed_password_buffer_len,
+ default_charset());
+
return str;
}
-char *Item_func_password::alloc(THD *thd, const char *password,
- size_t pass_len)
+char *Item_func_password::
+ create_password_hash_buffer(THD *thd, const char *password, size_t pass_len)
{
- char *buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1);
- if (buff)
+ String *password_str= new (thd->mem_root)String(password, thd->variables.
+ character_set_client);
+ check_password_policy(password_str);
+
+ char *buff= NULL;
+ if (thd->variables.old_passwords == 0)
+ {
+ /* Allocate memory for the password scramble and one extra byte for \0 */
+ buff= (char *) thd->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH + 1);
+ my_make_scrambled_password_sha1(buff, password, pass_len);
+ }
+#if defined(HAVE_OPENSSL)
+ else
+ {
+ /* Allocate memory for the password scramble and one extra byte for \0 */
+ buff= (char *) thd->alloc(CRYPT_MAX_PASSWORD_SIZE + 1);
my_make_scrambled_password(buff, password, pass_len);
+ }
+#endif
return buff;
}
+
/* Item_func_old_password */
String *Item_func_old_password::val_str_ascii(String *str)
@@ -2223,7 +2305,7 @@ String *Item_func_soundex::val_str(String *str)
if ((null_value= args[0]->null_value))
return 0; /* purecov: inspected */
- if (tmp_value.alloc(max(res->length(), 4 * cs->mbminlen)))
+ if (tmp_value.alloc(MY_MAX(res->length(), 4 * cs->mbminlen)))
return str; /* purecov: inspected */
char *to= (char *) tmp_value.ptr();
char *to_end= to + tmp_value.alloced_length();
@@ -2333,7 +2415,7 @@ MY_LOCALE *Item_func_format::get_locale(Item *item)
if (!locale_name ||
!(lc= my_locale_by_name(locale_name->c_ptr_safe())))
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_LOCALE,
ER(ER_UNKNOWN_LOCALE),
locale_name ? locale_name->c_ptr_safe() : "NULL");
@@ -2723,7 +2805,7 @@ String *Item_func_repeat::val_str(String *str)
// Safe length check
if (length > current_thd->variables.max_allowed_packet / (uint) count)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(), current_thd->variables.max_allowed_packet);
@@ -2856,7 +2938,7 @@ String *Item_func_rpad::val_str(String *str)
byte_count= count * collation.collation->mbmaxlen;
if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(), current_thd->variables.max_allowed_packet);
@@ -2964,7 +3046,7 @@ String *Item_func_lpad::val_str(String *str)
if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(), current_thd->variables.max_allowed_packet);
@@ -3342,7 +3424,7 @@ String *Item_load_file::val_str(String *str)
}
if (stat_info.st_size > (long) current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(), current_thd->variables.max_allowed_packet);
@@ -3428,12 +3510,12 @@ String* Item_func_export_set::val_str(String* str)
const ulong max_allowed_packet= current_thd->variables.max_allowed_packet;
const uint num_separators= num_set_values > 0 ? num_set_values - 1 : 0;
const ulonglong max_total_length=
- num_set_values * max(yes->length(), no->length()) +
+ num_set_values * MY_MAX(yes->length(), no->length()) +
num_separators * sep->length();
if (unlikely(max_total_length > max_allowed_packet))
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
func_name(), max_allowed_packet);
@@ -3457,11 +3539,11 @@ String* Item_func_export_set::val_str(String* str)
void Item_func_export_set::fix_length_and_dec()
{
- uint32 length= max(args[1]->max_char_length(), args[2]->max_char_length());
+ uint32 length= MY_MAX(args[1]->max_char_length(), args[2]->max_char_length());
uint32 sep_length= (arg_count > 3 ? args[3]->max_char_length() : 1);
if (agg_arg_charsets_for_string_result(collation,
- args + 1, min(4, arg_count) - 1))
+ args + 1, MY_MIN(4, arg_count) - 1))
return;
fix_char_length(length * 64 + sep_length * 63);
}
@@ -3681,7 +3763,7 @@ longlong Item_func_uncompressed_length::val_int()
*/
if (res->length() <= 4)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_ZLIB_Z_DATA_ERROR,
ER(ER_ZLIB_Z_DATA_ERROR));
null_value= 1;
@@ -3758,7 +3840,7 @@ String *Item_func_compress::val_str(String *str)
res->length())) != Z_OK)
{
code= err==Z_MEM_ERROR ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_BUF_ERROR;
- push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,code,ER(code));
+ push_warning(current_thd,Sql_condition::WARN_LEVEL_WARN,code,ER(code));
null_value= 1;
return 0;
}
@@ -3796,7 +3878,7 @@ String *Item_func_uncompress::val_str(String *str)
/* If length is less than 4 bytes, data is corrupt */
if (res->length() <= 4)
{
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN,
ER_ZLIB_Z_DATA_ERROR,
ER(ER_ZLIB_Z_DATA_ERROR));
goto err;
@@ -3806,7 +3888,7 @@ String *Item_func_uncompress::val_str(String *str)
new_size= uint4korr(res->ptr()) & 0x3FFFFFFF;
if (new_size > current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd,Sql_condition::WARN_LEVEL_WARN,
ER_TOO_BIG_FOR_UNCOMPRESS,
ER(ER_TOO_BIG_FOR_UNCOMPRESS),
static_cast<int>(current_thd->variables.
@@ -3825,7 +3907,7 @@ String *Item_func_uncompress::val_str(String *str)
code= ((err == Z_BUF_ERROR) ? ER_ZLIB_Z_BUF_ERROR :
((err == Z_MEM_ERROR) ? ER_ZLIB_Z_MEM_ERROR : ER_ZLIB_Z_DATA_ERROR));
- push_warning(current_thd,MYSQL_ERROR::WARN_LEVEL_WARN,code,ER(code));
+ push_warning(current_thd,Sql_condition::WARN_LEVEL_WARN,code,ER(code));
err:
null_value= 1;
@@ -4100,10 +4182,8 @@ bool Item_func_dyncol_create::prepare_arguments(bool force_names_arg)
}
break;
case DYN_COL_DATETIME:
- args[valpos]->get_date(&vals[i].x.time_value, 0);
- break;
case DYN_COL_DATE:
- args[valpos]->get_date(&vals[i].x.time_value, 0);
+ args[valpos]->get_date(&vals[i].x.time_value, sql_mode_for_dates());
break;
case DYN_COL_TIME:
args[valpos]->get_time(&vals[i].x.time_value);
@@ -4517,7 +4597,7 @@ longlong Item_dyncol_get::val_int()
{
char buff[30];
sprintf(buff, "%lg", val.x.double_value);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_DATA_OVERFLOW,
ER(ER_DATA_OVERFLOW),
buff,
@@ -4535,9 +4615,9 @@ longlong Item_dyncol_get::val_int()
if (end != org_end || error > 0)
{
char buff[80];
- strmake(buff, val.x.string.value.str, min(sizeof(buff)-1,
+ strmake(buff, val.x.string.value.str, MY_MIN(sizeof(buff)-1,
val.x.string.value.length));
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER(ER_BAD_DATA),
buff,
@@ -4599,9 +4679,9 @@ double Item_dyncol_get::val_real()
error)
{
char buff[80];
- strmake(buff, val.x.string.value.str, min(sizeof(buff)-1,
+ strmake(buff, val.x.string.value.str, MY_MIN(sizeof(buff)-1,
val.x.string.value.length));
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER(ER_BAD_DATA),
buff, "DOUBLE");
@@ -4655,11 +4735,11 @@ my_decimal *Item_dyncol_get::val_decimal(my_decimal *decimal_value)
rc= str2my_decimal(0, val.x.string.value.str, val.x.string.value.length,
val.x.string.charset, decimal_value);
char buff[80];
- strmake(buff, val.x.string.value.str, min(sizeof(buff)-1,
+ strmake(buff, val.x.string.value.str, MY_MIN(sizeof(buff)-1,
val.x.string.value.length));
if (rc != E_DEC_OK)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_DATA,
ER(ER_BAD_DATA),
buff, "DECIMAL");
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 84d91a879ff..9b380108542 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -21,6 +21,8 @@
/* This file defines all string functions */
+#include "crypt_genhash_impl.h"
+
#ifdef USE_PRAGMA_INTERFACE
#pragma interface /* gcc class implementation */
#endif
@@ -329,16 +331,21 @@ public:
class Item_func_password :public Item_str_ascii_func
{
- char tmp_value[SCRAMBLED_PASSWORD_CHAR_LENGTH+1];
+ char m_hashed_password_buffer[CRYPT_MAX_PASSWORD_SIZE + 1];
+ unsigned int m_hashed_password_buffer_len;
+ bool m_recalculate_password;
public:
- Item_func_password(Item *a) :Item_str_ascii_func(a) {}
- String *val_str_ascii(String *str);
- void fix_length_and_dec()
+ Item_func_password(Item *a) :Item_str_ascii_func(a)
{
- fix_length_and_charset(SCRAMBLED_PASSWORD_CHAR_LENGTH, default_charset());
+ m_hashed_password_buffer_len= 0;
+ m_recalculate_password= false;
}
+ String *val_str_ascii(String *str);
+ void fix_length_and_dec();
const char *func_name() const { return "password"; }
static char *alloc(THD *thd, const char *password, size_t pass_len);
+ static char *create_password_hash_buffer(THD *thd, const char *password,
+ size_t pass_len);
};
@@ -823,7 +830,7 @@ public:
collation.set(args[0]->collation);
ulonglong max_result_length= (ulonglong) args[0]->max_length * 2 +
2 * collation.collation->mbmaxlen;
- max_length= (uint32) min(max_result_length, MAX_BLOB_WIDTH);
+ max_length= (uint32) MY_MIN(max_result_length, MAX_BLOB_WIDTH);
}
};
@@ -917,10 +924,10 @@ public:
const char *func_name() const { return "collate"; }
enum Functype functype() const { return COLLATE_FUNC; }
virtual void print(String *str, enum_query_type query_type);
- Item_field *filed_for_view_update()
+ Item_field *field_for_view_update()
{
/* this function is transparent for view updating */
- return args[0]->filed_for_view_update();
+ return args[0]->field_for_view_update();
}
};
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 2128de391e0..87fa8147411 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1089,7 +1089,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
{
char warn_buff[MYSQL_ERRMSG_SIZE];
sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SELECT_REDUCED, warn_buff);
}
substitution= select_lex->item_list.head();
@@ -1758,7 +1758,7 @@ Item_in_subselect::single_value_transformer(JOIN *join)
{
char warn_buff[MYSQL_ERRMSG_SIZE];
sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SELECT_REDUCED, warn_buff);
}
DBUG_RETURN(false);
@@ -4230,7 +4230,7 @@ void subselect_uniquesubquery_engine::print(String *str)
{
KEY *key_info= tab->table->key_info + tab->ref.key;
str->append(STRING_WITH_LEN("<primary_index_lookup>("));
- for (uint i= 0; i < key_info->key_parts; i++)
+ for (uint i= 0; i < key_info->user_defined_key_parts; i++)
tab->ref.items[i]->print(str);
str->append(STRING_WITH_LEN(" in "));
str->append(tab->table->s->table_name.str, tab->table->s->table_name.length);
@@ -4788,7 +4788,8 @@ bool subselect_hash_sj_engine::init(List<Item> *tmp_columns, uint subquery_id)
DBUG_ASSERT(
tmp_table->s->uniques ||
tmp_table->key_info->key_length >= tmp_table->file->max_key_length() ||
- tmp_table->key_info->key_parts > tmp_table->file->max_key_parts());
+ tmp_table->key_info->user_defined_key_parts >
+ tmp_table->file->max_key_parts());
free_tmp_table(thd, tmp_table);
tmp_table= NULL;
delete result;
@@ -4802,7 +4803,7 @@ bool subselect_hash_sj_engine::init(List<Item> *tmp_columns, uint subquery_id)
*/
DBUG_ASSERT(tmp_table->s->keys == 1 &&
((Item_in_subselect *) item)->left_expr->cols() ==
- tmp_table->key_info->key_parts);
+ tmp_table->key_info->user_defined_key_parts);
if (make_semi_join_conds() ||
/* A unique_engine is used both for complete and partial matching. */
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index bed9499834a..b3be7339849 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -36,7 +36,7 @@
ulonglong Item_sum::ram_limitation(THD *thd)
{
- return min(thd->variables.tmp_table_size,
+ return MY_MIN(thd->variables.tmp_table_size,
thd->variables.max_heap_table_size);
}
@@ -1629,18 +1629,18 @@ void Item_sum_avg::fix_length_and_dec()
if (hybrid_type == DECIMAL_RESULT)
{
int precision= args[0]->decimal_precision() + prec_increment;
- decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
max_length= my_decimal_precision_to_length_no_truncation(precision,
decimals,
unsigned_flag);
- f_precision= min(precision+DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_PRECISION);
+ f_precision= MY_MIN(precision+DECIMAL_LONGLONG_DIGITS, DECIMAL_MAX_PRECISION);
f_scale= args[0]->decimals;
dec_bin_size= my_decimal_get_binary_size(f_precision, f_scale);
}
else
{
- decimals= min(args[0]->decimals + prec_increment, NOT_FIXED_DEC);
- max_length= min(args[0]->max_length + prec_increment, float_length(decimals));
+ decimals= MY_MIN(args[0]->decimals + prec_increment, NOT_FIXED_DEC);
+ max_length= MY_MIN(args[0]->max_length + prec_increment, float_length(decimals));
}
}
@@ -1836,13 +1836,13 @@ void Item_sum_variance::fix_length_and_dec()
switch (args[0]->result_type()) {
case REAL_RESULT:
case STRING_RESULT:
- decimals= min(args[0]->decimals + 4, NOT_FIXED_DEC);
+ decimals= MY_MIN(args[0]->decimals + 4, NOT_FIXED_DEC);
break;
case INT_RESULT:
case DECIMAL_RESULT:
{
int precision= args[0]->decimal_precision()*2 + prec_increment;
- decimals= min(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
+ decimals= MY_MIN(args[0]->decimals + prec_increment, DECIMAL_MAX_SCALE);
max_length= my_decimal_precision_to_length_no_truncation(precision,
decimals,
unsigned_flag);
@@ -3127,7 +3127,7 @@ int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)),
&well_formed_error);
result->length(old_length + add_length);
item->warning_for_row= TRUE;
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_CUT_VALUE_GROUP_CONCAT, ER(ER_CUT_VALUE_GROUP_CONCAT),
item->row_count);
@@ -3555,7 +3555,7 @@ bool Item_func_group_concat::setup(THD *thd)
syntax of this function). If there is no ORDER BY clause, we don't
create this tree.
*/
- init_tree(tree, (uint) min(thd->variables.max_heap_table_size,
+ init_tree(tree, (uint) MY_MIN(thd->variables.max_heap_table_size,
thd->variables.sortbuff_size/16), 0,
tree_key_length,
group_concat_key_cmp_with_order, NULL, (void*) this,
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 788da1a5713..a4b5a18de35 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -146,14 +146,14 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
switch (*++ptr) {
/* Year */
case 'Y':
- tmp= (char*) val + min(4, val_len);
+ tmp= (char*) val + MY_MIN(4, val_len);
l_time->year= (int) my_strtoll10(val, &tmp, &error);
if ((int) (tmp-val) <= 2)
l_time->year= year_2000_handling(l_time->year);
val= tmp;
break;
case 'y':
- tmp= (char*) val + min(2, val_len);
+ tmp= (char*) val + MY_MIN(2, val_len);
l_time->year= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
l_time->year= year_2000_handling(l_time->year);
@@ -162,7 +162,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
/* Month */
case 'm':
case 'c':
- tmp= (char*) val + min(2, val_len);
+ tmp= (char*) val + MY_MIN(2, val_len);
l_time->month= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
break;
@@ -179,15 +179,15 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
/* Day */
case 'd':
case 'e':
- tmp= (char*) val + min(2, val_len);
+ tmp= (char*) val + MY_MIN(2, val_len);
l_time->day= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
break;
case 'D':
- tmp= (char*) val + min(2, val_len);
+ tmp= (char*) val + MY_MIN(2, val_len);
l_time->day= (int) my_strtoll10(val, &tmp, &error);
/* Skip 'st, 'nd, 'th .. */
- val= tmp + min((int) (val_end-tmp), 2);
+ val= tmp + MY_MIN((int) (val_end-tmp), 2);
break;
/* Hour */
@@ -198,14 +198,14 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
/* fall through */
case 'k':
case 'H':
- tmp= (char*) val + min(2, val_len);
+ tmp= (char*) val + MY_MIN(2, val_len);
l_time->hour= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
break;
/* Minute */
case 'i':
- tmp= (char*) val + min(2, val_len);
+ tmp= (char*) val + MY_MIN(2, val_len);
l_time->minute= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
break;
@@ -213,7 +213,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
/* Second */
case 's':
case 'S':
- tmp= (char*) val + min(2, val_len);
+ tmp= (char*) val + MY_MIN(2, val_len);
l_time->second= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
break;
@@ -265,7 +265,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
val= tmp;
break;
case 'j':
- tmp= (char*) val + min(val_len, 3);
+ tmp= (char*) val + MY_MIN(val_len, 3);
yearday= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
break;
@@ -277,7 +277,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
case 'u':
sunday_first_n_first_week_non_iso= (*ptr=='U' || *ptr== 'V');
strict_week_number= (*ptr=='V' || *ptr=='v');
- tmp= (char*) val + min(val_len, 2);
+ tmp= (char*) val + MY_MIN(val_len, 2);
if ((week_number= (int) my_strtoll10(val, &tmp, &error)) < 0 ||
(strict_week_number && !week_number) ||
week_number > 53)
@@ -289,7 +289,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
case 'X':
case 'x':
strict_week_number_year_type= (*ptr=='X');
- tmp= (char*) val + min(4, val_len);
+ tmp= (char*) val + MY_MIN(4, val_len);
strict_week_number_year= (int) my_strtoll10(val, &tmp, &error);
val= tmp;
break;
@@ -425,7 +425,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
{
if (!my_isspace(&my_charset_latin1,*val))
{
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
val_begin, length,
cached_timestamp_type, NullS);
break;
@@ -437,8 +437,8 @@ static bool extract_date_time(DATE_TIME_FORMAT *format,
err:
{
char buff[128];
- strmake(buff, val_begin, min(length, sizeof(buff)-1));
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ strmake(buff, val_begin, MY_MIN(length, sizeof(buff)-1));
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_TYPE, ER(ER_WRONG_VALUE_FOR_TYPE),
date_time_type, buff, "str_to_date");
}
@@ -1714,7 +1714,7 @@ overflow:
ltime->hour= TIME_MAX_HOUR+1;
check_time_range(ltime, decimals, &unused);
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
err->ptr(), err->length(),
MYSQL_TIMESTAMP_TIME, NullS);
return 0;
@@ -1744,7 +1744,7 @@ void Item_func_date_format::fix_length_and_dec()
else
{
fixed_length=0;
- max_length=min(arg1->max_length, MAX_BLOB_WIDTH) * 10 *
+ max_length=MY_MIN(arg1->max_length, MAX_BLOB_WIDTH) * 10 *
collation.collation->mbmaxlen;
set_if_smaller(max_length,MAX_BLOB_WIDTH);
}
@@ -2268,7 +2268,7 @@ String *Item_char_typecast::val_str(String *str)
if (cast_length != ~0U &&
cast_length > current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
cast_cs == &my_charset_bin ?
@@ -2326,7 +2326,7 @@ String *Item_char_typecast::val_str(String *str)
res= &str_value;
}
ErrConvString err(res);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), char_type,
err.ptr());
@@ -2348,7 +2348,7 @@ String *Item_char_typecast::val_str(String *str)
if (res->length() > current_thd->variables.max_allowed_packet)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
ER(ER_WARN_ALLOWED_PACKET_OVERFLOWED),
cast_cs == &my_charset_bin ?
@@ -2429,6 +2429,7 @@ bool Item_time_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
bool Item_date_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
{
+ fuzzy_date |= sql_mode_for_dates();
if (get_arg0_date(ltime, fuzzy_date & ~TIME_TIME_ONLY))
return 1;
@@ -2441,6 +2442,7 @@ bool Item_date_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
bool Item_datetime_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
{
+ fuzzy_date |= sql_mode_for_dates();
if (get_arg0_date(ltime, fuzzy_date & ~TIME_TIME_ONLY))
return 1;
@@ -2456,7 +2458,7 @@ bool Item_datetime_typecast::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
if (ltime->neg)
{
ErrConvTime str(ltime);
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
&str, MYSQL_TIMESTAMP_DATETIME, 0);
return (null_value= 1);
}
@@ -2513,7 +2515,7 @@ err:
void Item_func_add_time::fix_length_and_dec()
{
enum_field_types arg0_field_type;
- decimals= max(args[0]->decimals, args[1]->decimals);
+ decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
/*
The field type for the result of an Item_func_add_time function is defined
@@ -2610,7 +2612,7 @@ bool Item_func_add_time::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
check_time_range(ltime, decimals, &was_cut);
if (was_cut)
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
&str, MYSQL_TIMESTAMP_TIME, NullS);
return (null_value= 0);
@@ -2698,7 +2700,7 @@ bool Item_func_timediff::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
check_time_range(ltime, decimals, &was_cut);
if (was_cut)
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
&str, MYSQL_TIMESTAMP_TIME, NullS);
return (null_value= 0);
}
@@ -2750,7 +2752,7 @@ bool Item_func_maketime::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
char buf[28];
char *ptr= longlong10_to_str(hour, buf, args[0]->unsigned_flag ? 10 : -10);
int len = (int)(ptr - buf) + sprintf(ptr, ":%02u:%02u", (uint)minute, (uint)second);
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
buf, len, MYSQL_TIMESTAMP_TIME,
NullS);
}
@@ -3109,7 +3111,7 @@ bool Item_func_str_to_date::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
date_time_format.format.length= format->length();
if (extract_date_time(&date_time_format, val->ptr(), val->length(),
ltime, cached_timestamp_type, 0, "datetime",
- fuzzy_date))
+ fuzzy_date | sql_mode_for_dates()))
return (null_value=1);
if (cached_timestamp_type == MYSQL_TIMESTAMP_TIME && ltime->day)
{
diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h
index 8a8419c7bd8..11e84cfc1cd 100644
--- a/sql/item_timefunc.h
+++ b/sql/item_timefunc.h
@@ -115,7 +115,7 @@ public:
{
int *input_version= (int*)int_arg;
/* This function was introduced in 5.5 */
- int output_version= max(*input_version, 50500);
+ int output_version= MY_MAX(*input_version, 50500);
*input_version= output_version;
return 0;
}
@@ -933,7 +933,7 @@ public:
const char *func_name() const { return "timediff"; }
void fix_length_and_dec()
{
- decimals= max(args[0]->decimals, args[1]->decimals);
+ decimals= MY_MAX(args[0]->decimals, args[1]->decimals);
Item_timefunc::fix_length_and_dec();
}
bool get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date);
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 723429f107a..1aab6b45c74 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -1037,7 +1037,7 @@ static char simpletok[128]=
/*
! " # $ % & ' ( ) * + , - . / 0 1 2 3 4 5 6 7 8 9 : ; < = > ?
@ A B C D E F G H I J K L M N O P Q R S T U V W X Y Z [ \ ] ^ _
- ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ €
+ ` a b c d e f g h i j k l m n o p q r s t u v w x y z { | } ~ \200
*/
0,1,0,0,1,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,1,1,1,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,
@@ -2501,12 +2501,12 @@ my_xpath_parse_VariableReference(MY_XPATH *xpath)
xpath->item= new Item_func_get_user_var(name);
else
{
- sp_variable_t *spv;
+ sp_variable *spv;
sp_pcontext *spc;
LEX *lex;
if ((lex= current_thd->lex) &&
(spc= lex->spcont) &&
- (spv= spc->find_variable(&name)))
+ (spv= spc->find_variable(name, false)))
{
Item_splocal *splocal= new Item_splocal(name, spv->offset, spv->type, 0);
#ifndef DBUG_OFF
@@ -2815,7 +2815,7 @@ String *Item_xml_str_func::parse_xml(String *raw_xml, String *parsed_xml_buf)
my_xml_error_lineno(&p) + 1,
(ulong) my_xml_error_pos(&p) + 1,
my_xml_error_string(&p));
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_VALUE,
ER(ER_WRONG_VALUE), "XML", buf);
}
diff --git a/sql/key.cc b/sql/key.cc
index 0d3db2d5bf5..97388f43ebc 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -78,7 +78,7 @@ int find_ref_key(KEY *key, uint key_count, uchar *record, Field *field,
KEY_PART_INFO *key_part;
*key_length=0;
for (j=0, key_part=key_info->key_part ;
- j < key_info->key_parts ;
+ j < key_info->user_defined_key_parts ;
j++, key_part++)
{
if (key_part->offset == fieldpos)
@@ -132,7 +132,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
Don't copy data for null values
The -1 below is to subtract the null byte which is already handled
*/
- length= min(key_length, (uint) key_part->store_length-1);
+ length= MY_MIN(key_length, (uint) key_part->store_length-1);
if (with_zerofill)
bzero((char*) to_key, length);
continue;
@@ -142,7 +142,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
key_part->key_part_flag & HA_VAR_LENGTH_PART)
{
key_length-= HA_KEY_BLOB_LENGTH;
- length= min(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
uint bytes= key_part->field->get_key_image(to_key, length, Field::itRAW);
if (with_zerofill && bytes < length)
bzero((char*) to_key + bytes, length - bytes);
@@ -150,7 +150,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info,
}
else
{
- length= min(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
Field *field= key_part->field;
CHARSET_INFO *cs= field->charset();
uint bytes= field->get_key_image(to_key, length, Field::itRAW);
@@ -202,7 +202,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
Don't copy data for null bytes
The -1 below is to subtract the null byte which is already handled
*/
- length= min(key_length, (uint) key_part->store_length-1);
+ length= MY_MIN(key_length, (uint) key_part->store_length-1);
continue;
}
}
@@ -244,7 +244,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
my_ptrdiff_t ptrdiff= to_record - field->table->record[0];
field->move_field_offset(ptrdiff);
key_length-= HA_KEY_BLOB_LENGTH;
- length= min(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set);
field->set_key_image(from_key, length);
dbug_tmp_restore_column_map(field->table->write_set, old_map);
@@ -253,7 +253,7 @@ void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
}
else
{
- length= min(key_length, key_part->length);
+ length= MY_MIN(key_length, key_part->length);
/* skip the byte with 'uneven' bits, if used */
memcpy(to_record + key_part->offset, from_key + used_uneven_bits
, (size_t) length - used_uneven_bits);
@@ -311,7 +311,7 @@ bool key_cmp_if_same(TABLE *table,const uchar *key,uint idx,uint key_length)
return 1;
continue;
}
- length= min((uint) (key_end-key), store_length);
+ length= MY_MIN((uint) (key_end-key), store_length);
if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+
FIELDFLAG_PACK)))
{
@@ -389,7 +389,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
tmp.length(charpos);
}
if (max_length < field->pack_length())
- tmp.length(min(tmp.length(),max_length));
+ tmp.length(MY_MIN(tmp.length(),max_length));
ErrConvString err(&tmp);
to->append(err.ptr());
}
@@ -413,15 +413,15 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
idx Key number
*/
-void key_unpack(String *to,TABLE *table,uint idx)
+void key_unpack(String *to,TABLE *table, KEY *key)
{
KEY_PART_INFO *key_part,*key_part_end;
my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
DBUG_ENTER("key_unpack");
to->length(0);
- for (key_part=table->key_info[idx].key_part,key_part_end=key_part+
- table->key_info[idx].key_parts ;
+ for (key_part=key->key_part,key_part_end=key_part+
+ key->user_defined_key_parts ;
key_part < key_part_end;
key_part++)
{
@@ -431,8 +431,8 @@ void key_unpack(String *to,TABLE *table,uint idx)
{
if (table->record[0][key_part->null_offset] & key_part->null_bit)
{
- to->append(STRING_WITH_LEN("NULL"));
- continue;
+ to->append(STRING_WITH_LEN("NULL"));
+ continue;
}
}
field_unpack(to, key_part->field, table->record[0], key_part->length,
@@ -574,7 +574,7 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec)
/* loop over all given keys */
do
{
- key_parts= key_info->key_parts;
+ key_parts= key_info->user_defined_key_parts;
key_part= key_info->key_part;
key_part_num= 0;
@@ -586,8 +586,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec)
if (key_part->null_bit)
{
/* The key_part can contain NULL values */
- bool first_is_null= field->is_null_in_record_with_offset(first_diff);
- bool sec_is_null= field->is_null_in_record_with_offset(sec_diff);
+ bool first_is_null= field->is_real_null(first_diff);
+ bool sec_is_null= field->is_real_null(sec_diff);
/*
NULL is smaller then everything so if first is NULL and the other
not then we know that we should return -1 and for the opposite
diff --git a/sql/key.h b/sql/key.h
index 0eeda58cd17..de2b00a4773 100644
--- a/sql/key.h
+++ b/sql/key.h
@@ -32,7 +32,7 @@ void key_copy(uchar *to_key, uchar *from_record, KEY *key_info, uint key_length,
void key_restore(uchar *to_record, uchar *from_key, KEY *key_info,
uint key_length);
bool key_cmp_if_same(TABLE *form,const uchar *key,uint index,uint key_length);
-void key_unpack(String *to,TABLE *form,uint index);
+void key_unpack(String *to, TABLE *form, KEY *key);
void field_unpack(String *to, Field *field, const uchar *rec, uint max_length,
bool prefix_key);
bool is_key_used(TABLE *table, uint idx, const MY_BITMAP *fields);
diff --git a/sql/lex.h b/sql/lex.h
index 7edb1456e09..c5229beb653 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -151,6 +151,7 @@ static SYMBOL symbols[] = {
{ "CREATE", SYM(CREATE)},
{ "CROSS", SYM(CROSS)},
{ "CUBE", SYM(CUBE_SYM)},
+ { "CURRENT", SYM(CURRENT_SYM)},
{ "CURRENT_DATE", SYM(CURDATE)},
{ "CURRENT_POS", SYM(CURRENT_POS_SYM)},
{ "CURRENT_TIME", SYM(CURTIME)},
@@ -182,6 +183,7 @@ static SYMBOL symbols[] = {
{ "DESCRIBE", SYM(DESCRIBE)},
{ "DES_KEY_FILE", SYM(DES_KEY_FILE)},
{ "DETERMINISTIC", SYM(DETERMINISTIC_SYM)},
+ { "DIAGNOSTICS", SYM(DIAGNOSTICS_SYM)},
{ "DIRECTORY", SYM(DIRECTORY_SYM)},
{ "DISABLE", SYM(DISABLE_SYM)},
{ "DISCARD", SYM(DISCARD)},
@@ -214,6 +216,7 @@ static SYMBOL symbols[] = {
{ "EVENTS", SYM(EVENTS_SYM)},
{ "EVERY", SYM(EVERY_SYM)},
{ "EXAMINED", SYM(EXAMINED_SYM)},
+ { "EXCHANGE", SYM(EXCHANGE_SYM)},
{ "EXECUTE", SYM(EXECUTE_SYM)},
{ "EXISTS", SYM(EXISTS)},
{ "EXIT", SYM(EXIT_SYM)},
@@ -246,6 +249,7 @@ static SYMBOL symbols[] = {
{ "GEOMETRY", SYM(GEOMETRY_SYM)},
{ "GEOMETRYCOLLECTION",SYM(GEOMETRYCOLLECTION)},
{ "GET_FORMAT", SYM(GET_FORMAT)},
+ { "GET", SYM(GET_SYM)},
{ "GLOBAL", SYM(GLOBAL_SYM)},
{ "GRANT", SYM(GRANT)},
{ "GRANTS", SYM(GRANTS)},
@@ -398,6 +402,7 @@ static SYMBOL symbols[] = {
{ "NOT", SYM(NOT_SYM)},
{ "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)},
{ "NULL", SYM(NULL_SYM)},
+ { "NUMBER", SYM(NUMBER_SYM)},
{ "NUMERIC", SYM(NUMERIC_SYM)},
{ "NVARCHAR", SYM(NVARCHAR_SYM)},
{ "OFFSET", SYM(OFFSET_SYM)},
@@ -484,6 +489,7 @@ static SYMBOL symbols[] = {
{ "RESTORE", SYM(RESTORE_SYM)},
{ "RESTRICT", SYM(RESTRICT)},
{ "RESUME", SYM(RESUME_SYM)},
+ { "RETURNED_SQLSTATE",SYM(RETURNED_SQLSTATE_SYM)},
{ "RETURN", SYM(RETURN_SYM)},
{ "RETURNS", SYM(RETURNS_SYM)},
{ "REVOKE", SYM(REVOKE)},
@@ -493,6 +499,7 @@ static SYMBOL symbols[] = {
{ "ROLLUP", SYM(ROLLUP_SYM)},
{ "ROUTINE", SYM(ROUTINE_SYM)},
{ "ROW", SYM(ROW_SYM)},
+ { "ROW_COUNT", SYM(ROW_COUNT_SYM)},
{ "ROWS", SYM(ROWS_SYM)},
{ "ROW_FORMAT", SYM(ROW_FORMAT_SYM)},
{ "RTREE", SYM(RTREE_SYM)},
@@ -555,6 +562,9 @@ static SYMBOL symbols[] = {
{ "START", SYM(START_SYM)},
{ "STARTING", SYM(STARTING)},
{ "STARTS", SYM(STARTS_SYM)},
+ { "STATS_AUTO_RECALC",SYM(STATS_AUTO_RECALC_SYM)},
+ { "STATS_PERSISTENT", SYM(STATS_PERSISTENT_SYM)},
+ { "STATS_SAMPLE_PAGES",SYM(STATS_SAMPLE_PAGES_SYM)},
{ "STATUS", SYM(STATUS_SYM)},
{ "STOP", SYM(STOP_SYM)},
{ "STORAGE", SYM(STORAGE_SYM)},
diff --git a/sql/lock.cc b/sql/lock.cc
index 67c8b240c6f..c3f6da02ca1 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -312,8 +312,8 @@ bool mysql_lock_tables(THD *thd, MYSQL_LOCK *sql_lock, uint flags)
thd_proc_info(thd, "Table lock");
/* Copy the lock data array. thr_multi_lock() reorders its contents. */
- memcpy(sql_lock->locks + sql_lock->lock_count, sql_lock->locks,
- sql_lock->lock_count * sizeof(*sql_lock->locks));
+ memmove(sql_lock->locks + sql_lock->lock_count, sql_lock->locks,
+ sql_lock->lock_count * sizeof(*sql_lock->locks));
/* Lock on the copied half of the lock data array. */
rc= thr_lock_errno_to_mysql[(int) thr_multi_lock(sql_lock->locks +
sql_lock->lock_count,
@@ -692,7 +692,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count)
MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
{
- uint i,tables,lock_count;
+ uint i,lock_count,table_count;
MYSQL_LOCK *sql_lock;
THR_LOCK_DATA **locks, **locks_buf;
TABLE **to, **table_buf;
@@ -701,16 +701,15 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
DBUG_ASSERT((flags == GET_LOCK_UNLOCK) || (flags == GET_LOCK_STORE_LOCKS));
DBUG_PRINT("info", ("count %d", count));
- for (i=tables=lock_count=0 ; i < count ; i++)
+ for (i=lock_count=table_count=0 ; i < count ; i++)
{
TABLE *t= table_ptr[i];
-
if (t->s->tmp_table != NON_TRANSACTIONAL_TMP_TABLE &&
t->s->tmp_table != INTERNAL_TMP_TABLE)
{
- tables+= t->file->lock_count();
- lock_count++;
+ lock_count+= t->file->lock_count();
+ table_count++;
}
}
@@ -722,13 +721,13 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
*/
if (!(sql_lock= (MYSQL_LOCK*)
my_malloc(sizeof(*sql_lock) +
- sizeof(THR_LOCK_DATA*) * tables * 2 +
- sizeof(table_ptr) * lock_count,
+ sizeof(THR_LOCK_DATA*) * lock_count * 2 +
+ sizeof(table_ptr) * table_count,
MYF(0))))
DBUG_RETURN(0);
locks= locks_buf= sql_lock->locks= (THR_LOCK_DATA**) (sql_lock + 1);
- to= table_buf= sql_lock->table= (TABLE**) (locks + tables * 2);
- sql_lock->table_count=lock_count;
+ to= table_buf= sql_lock->table= (TABLE**) (locks + lock_count * 2);
+ sql_lock->table_count= table_count;
for (i=0 ; i < count ; i++)
{
@@ -764,7 +763,7 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
}
}
/*
- We do not use 'tables', because there are cases where store_lock()
+ We do not use 'lock_count', because there are cases where store_lock()
returns less locks than lock_count() claimed. This can happen when
a FLUSH TABLES tries to abort locks from a MERGE table of another
thread. When that thread has just opened the table, but not yet
@@ -778,6 +777,7 @@ MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, uint flags)
And in the FLUSH case, the memory is released quickly anyway.
*/
sql_lock->lock_count= locks - locks_buf;
+ DBUG_ASSERT(sql_lock->lock_count <= lock_count);
DBUG_PRINT("info", ("sql_lock->table_count %d sql_lock->lock_count %d",
sql_lock->table_count, sql_lock->lock_count));
DBUG_RETURN(sql_lock);
diff --git a/sql/log.cc b/sql/log.cc
index 1a3b651f76f..1295dc087fd 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -171,9 +171,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sql_state,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
const char *message() const { return m_message; }
};
@@ -181,9 +181,9 @@ bool
Silence_log_table_errors::handle_condition(THD *,
uint,
const char*,
- MYSQL_ERROR::enum_warning_level,
+ Sql_condition::enum_warning_level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
strmake_buf(m_message, msg);
@@ -783,8 +783,8 @@ bool Log_to_csv_event_handler::
Open_tables_backup open_tables_backup;
CHARSET_INFO *client_cs= thd->variables.character_set_client;
bool save_time_zone_used;
- long query_time= (long) min(query_utime/1000000, TIME_MAX_VALUE_SECONDS);
- long lock_time= (long) min(lock_utime/1000000, TIME_MAX_VALUE_SECONDS);
+ long query_time= (long) MY_MIN(query_utime/1000000, TIME_MAX_VALUE_SECONDS);
+ long lock_time= (long) MY_MIN(lock_utime/1000000, TIME_MAX_VALUE_SECONDS);
long query_time_micro= (long) (query_utime % 1000000);
long lock_time_micro= (long) (lock_utime % 1000000);
@@ -897,6 +897,9 @@ bool Log_to_csv_event_handler::
if (table->field[10]->store(sql_text, sql_text_len, client_cs) < 0)
goto err;
+ if (table->field[11]->store((longlong) thd->thread_id, TRUE))
+ goto err;
+
/* log table entries are not replicated */
if (table->file->ha_write_row(table->record[0]))
goto err;
@@ -2059,7 +2062,7 @@ bool MYSQL_BIN_LOG::check_write_error(THD *thd)
if (!thd->is_error())
DBUG_RETURN(checked);
- switch (thd->stmt_da->sql_errno())
+ switch (thd->get_stmt_da()->sql_errno())
{
case ER_TRANS_CACHE_FULL:
case ER_STMT_CACHE_FULL:
@@ -2927,7 +2930,7 @@ const char *MYSQL_LOG::generate_name(const char *log_name,
{
char *p= fn_ext(log_name);
uint length= (uint) (p - log_name);
- strmake(buff, log_name, min(length, FN_REFLEN-1));
+ strmake(buff, log_name, MY_MIN(length, FN_REFLEN-1));
return (const char*)buff;
}
return log_name;
@@ -3825,7 +3828,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
{
if (my_errno == ENOENT)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE),
linfo.log_file_name);
sql_print_information("Failed to delete file '%s'",
@@ -3835,7 +3838,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
}
else
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s; "
"consider examining correspondence "
@@ -3861,7 +3864,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
{
if (my_errno == ENOENT)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE),
index_file_name);
sql_print_information("Failed to delete file '%s'",
@@ -3871,7 +3874,7 @@ bool MYSQL_BIN_LOG::reset_logs(THD* thd, bool create_new_log)
}
else
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s; "
"consider examining correspondence "
@@ -4311,7 +4314,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space,
*/
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE),
log_info.log_file_name);
}
@@ -4326,7 +4329,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space,
*/
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with getting info on being purged %s; "
"consider examining correspondence "
@@ -4354,7 +4357,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space,
{
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s and "
"reading the binlog index file",
@@ -4390,7 +4393,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space,
{
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_LOG_PURGE_NO_FILE, ER(ER_LOG_PURGE_NO_FILE),
log_info.log_file_name);
}
@@ -4402,7 +4405,7 @@ int MYSQL_BIN_LOG::purge_index_entry(THD *thd, ulonglong *decrease_log_space,
{
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with deleting %s; "
"consider examining correspondence "
@@ -4492,7 +4495,7 @@ int MYSQL_BIN_LOG::purge_logs_before_date(time_t purge_time)
*/
if (thd)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_BINLOG_PURGE_FATAL_ERR,
"a problem with getting info on being purged %s; "
"consider examining correspondence "
@@ -5390,7 +5393,7 @@ MYSQL_BIN_LOG::write_gtid_event(THD *thd, bool standalone,
mysql_mutex_lock(&LOCK_rpl_gtid_state);
err= rpl_global_gtid_binlog_state.update(&gtid, opt_gtid_strict_mode);
mysql_mutex_unlock(&LOCK_rpl_gtid_state);
- if (err && thd->stmt_da->sql_errno()==ER_GTID_STRICT_OUT_OF_ORDER)
+ if (err && thd->get_stmt_da()->sql_errno()==ER_GTID_STRICT_OUT_OF_ORDER)
errno= ER_GTID_STRICT_OUT_OF_ORDER;
}
else
@@ -6372,9 +6375,9 @@ int query_error_code(THD *thd, bool not_killed)
if (not_killed || (killed_mask_hard(thd->killed) == KILL_BAD_DATA))
{
- error= thd->is_error() ? thd->stmt_da->sql_errno() : 0;
+ error= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0;
- /* thd->stmt_da->sql_errno() might be ER_SERVER_SHUTDOWN or
+ /* thd->get_get_stmt_da()->sql_errno() might be ER_SERVER_SHUTDOWN or
ER_QUERY_INTERRUPTED, So here we need to make sure that error
is not set to these errors when specified not_killed by the
caller.
@@ -7307,7 +7310,7 @@ static void print_buffer_to_nt_eventlog(enum loglevel level, char *buff,
DBUG_ENTER("print_buffer_to_nt_eventlog");
/* Add ending CR/LF's to string, overwrite last chars if necessary */
- strmov(buffptr+min(length, buffLen-5), "\r\n\r\n");
+ strmov(buffptr+MY_MIN(length, buffLen-5), "\r\n\r\n");
setup_windows_event_source();
if ((event= RegisterEventSource(NULL,"MySQL")))
@@ -8546,7 +8549,8 @@ binlog_background_thread(void *arg __attribute__((unused)))
sql_print_warning("Failed to load slave replication state from table "
"%s.%s: %u: %s", "mysql",
rpl_gtid_slave_state_table_name.str,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
#endif
mysql_mutex_lock(&mysql_bin_log.LOCK_binlog_background_thread);
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 4a92414c548..01a5dd7f4e3 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -16,19 +16,12 @@
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-#ifdef MYSQL_CLIENT
-
#include "sql_priv.h"
-#else
-
-#ifdef USE_PRAGMA_IMPLEMENTATION
-#pragma implementation // gcc: Class implementation
-#endif
-
+#ifndef MYSQL_CLIENT
+#include "my_global.h" // REQUIRED by log_event.h > m_string.h > my_bitmap.h
#include "sql_priv.h"
#include "unireg.h"
-#include "my_global.h" // REQUIRED by log_event.h > m_string.h > my_bitmap.h
#include "log_event.h"
#include "sql_base.h" // close_thread_tables
#include "sql_cache.h" // QUERY_CACHE_FLAGS_SIZE
@@ -56,6 +49,7 @@
#include <my_bitmap.h>
#include "rpl_utility.h"
+#define my_b_write_string(A, B) my_b_write((A), (B), (uint) (sizeof(B) - 1))
/**
BINLOG_CHECKSUM variable.
@@ -217,8 +211,9 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
char buff[MAX_SLAVE_ERRMSG], *slider;
const char *buff_end= buff + sizeof(buff);
uint len;
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
- MYSQL_ERROR *err;
+ Diagnostics_area::Sql_condition_iterator it=
+ thd->get_stmt_da()->sql_conditions();
+ const Sql_condition *err;
buff[0]= 0;
for (err= it++, slider= buff; err && slider < buff_end - 1;
@@ -230,7 +225,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
}
if (ha_error != 0)
- rli->report(level, thd->is_error() ? thd->stmt_da->sql_errno() : 0,
+ rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0,
"Could not execute %s event on table %s.%s;"
"%s handler error %s; "
"the event's master log %s, end_log_pos %lu",
@@ -238,7 +233,7 @@ static void inline slave_rows_error_report(enum loglevel level, int ha_error,
buff, handler_error == NULL ? "<unknown>" : handler_error,
log_name, pos);
else
- rli->report(level, thd->is_error() ? thd->stmt_da->sql_errno() : 0,
+ rli->report(level, thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0,
"Could not execute %s event on table %s.%s;"
"%s the event's master log %s, end_log_pos %lu",
type, table->s->db.str, table->s->table_name.str,
@@ -336,24 +331,24 @@ uint debug_not_change_ts_if_art_event= 1; // bug#29309 simulation
static void pretty_print_str(IO_CACHE* cache, const char* str, int len)
{
const char* end = str + len;
- my_b_printf(cache, "\'");
+ my_b_write_byte(cache, '\'');
while (str < end)
{
char c;
switch ((c=*str++)) {
- case '\n': my_b_printf(cache, "\\n"); break;
- case '\r': my_b_printf(cache, "\\r"); break;
- case '\\': my_b_printf(cache, "\\\\"); break;
- case '\b': my_b_printf(cache, "\\b"); break;
- case '\t': my_b_printf(cache, "\\t"); break;
- case '\'': my_b_printf(cache, "\\'"); break;
- case 0 : my_b_printf(cache, "\\0"); break;
+ case '\n': my_b_write(cache, "\\n", 2); break;
+ case '\r': my_b_write(cache, "\\r", 2); break;
+ case '\\': my_b_write(cache, "\\\\", 2); break;
+ case '\b': my_b_write(cache, "\\b", 2); break;
+ case '\t': my_b_write(cache, "\\t", 2); break;
+ case '\'': my_b_write(cache, "\\'", 2); break;
+ case 0 : my_b_write(cache, "\\0", 2); break;
default:
- my_b_printf(cache, "%c", c);
+ my_b_write_byte(cache, c);
break;
}
}
- my_b_printf(cache, "\'");
+ my_b_write_byte(cache, '\'');
}
#endif /* MYSQL_CLIENT */
@@ -442,13 +437,13 @@ inline int ignored_error_code(int err_code)
*/
int convert_handler_error(int error, THD* thd, TABLE *table)
{
- uint actual_error= (thd->is_error() ? thd->stmt_da->sql_errno() :
+ uint actual_error= (thd->is_error() ? thd->get_stmt_da()->sql_errno() :
0);
if (actual_error == 0)
{
table->file->print_error(error, MYF(0));
- actual_error= (thd->is_error() ? thd->stmt_da->sql_errno() :
+ actual_error= (thd->is_error() ? thd->get_stmt_da()->sql_errno() :
ER_UNKNOWN_ERROR);
if (actual_error == ER_UNKNOWN_ERROR)
if (global_system_variables.log_warnings)
@@ -554,9 +549,8 @@ static char *load_data_tmp_prefix(char *name,
/* Add marker that this is a multi-master-file */
*name++='-';
/* Convert connection_name to a safe filename */
- buf_length= strconvert(system_charset_info, connection_name->str,
- &my_charset_filename, name, FN_REFLEN,
- &errors);
+ buf_length= strconvert(system_charset_info, connection_name->str, FN_REFLEN,
+ &my_charset_filename, name, FN_REFLEN, &errors);
name+= buf_length;
*name++= '-';
}
@@ -756,7 +750,7 @@ static void print_set_option(IO_CACHE* file, uint32 bits_changed,
if (bits_changed & option)
{
if (*need_comma)
- my_b_printf(file,", ");
+ my_b_write(file, ", ", 2);
my_b_printf(file,"%s=%d", name, test(flags & option));
*need_comma= 1;
}
@@ -1411,7 +1405,7 @@ Log_event* Log_event::read_log_event(IO_CACHE* file,
of 13 bytes, whereas LOG_EVENT_MINIMAL_HEADER_LEN is 19 bytes (it's
"minimal" over the set {MySQL >=4.0}).
*/
- uint header_size= min(description_event->common_header_len,
+ uint header_size= MY_MIN(description_event->common_header_len,
LOG_EVENT_MINIMAL_HEADER_LEN);
LOCK_MUTEX;
@@ -1768,7 +1762,7 @@ void Log_event::print_header(IO_CACHE* file,
my_off_t hexdump_from= print_event_info->hexdump_from;
DBUG_ENTER("Log_event::print_header");
- my_b_printf(file, "#");
+ my_b_write_byte(file, '#');
print_timestamp(file);
my_b_printf(file, " server id %lu end_log_pos %s ", (ulong) server_id,
llstr(log_pos,llbuff));
@@ -1788,7 +1782,7 @@ void Log_event::print_header(IO_CACHE* file,
/* mysqlbinlog --hexdump */
if (print_event_info->hexdump_from)
{
- my_b_printf(file, "\n");
+ my_b_write_byte(file, '\n');
uchar *ptr= (uchar*)temp_buf;
my_off_t size=
uint4korr(ptr + EVENT_LEN_OFFSET) - LOG_EVENT_MINIMAL_HEADER_LEN;
@@ -1889,11 +1883,11 @@ static void
my_b_write_quoted(IO_CACHE *file, const uchar *ptr, uint length)
{
const uchar *s;
- my_b_printf(file, "'");
+ my_b_write_byte(file, '\'');
for (s= ptr; length > 0 ; s++, length--)
{
if (*s > 0x1F)
- my_b_write(file, s, 1);
+ my_b_write_byte(file, *s);
else if (*s == '\'')
my_b_write(file, "\\'", 2);
else if (*s == '\\')
@@ -1905,7 +1899,7 @@ my_b_write_quoted(IO_CACHE *file, const uchar *ptr, uint length)
my_b_write(file, hex, len);
}
}
- my_b_printf(file, "'");
+ my_b_write_byte(file, '\'');
}
@@ -1920,13 +1914,13 @@ static void
my_b_write_bit(IO_CACHE *file, const uchar *ptr, uint nbits)
{
uint bitnum, nbits8= ((nbits + 7) / 8) * 8, skip_bits= nbits8 - nbits;
- my_b_printf(file, "b'");
+ my_b_write(file, "b'", 2);
for (bitnum= skip_bits ; bitnum < nbits8; bitnum++)
{
int is_set= (ptr[(bitnum) / 8] >> (7 - bitnum % 8)) & 0x01;
- my_b_write(file, (const uchar*) (is_set ? "1" : "0"), 1);
+ my_b_write_byte(file, (is_set ? '1' : '0'));
}
- my_b_printf(file, "'");
+ my_b_write_byte(file, '\'');
}
@@ -2021,7 +2015,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
int32 si= sint4korr(ptr);
uint32 ui= uint4korr(ptr);
my_b_write_sint32_and_uint32(file, si, ui);
- my_snprintf(typestr, typestr_length, "INT");
+ strmake(typestr, "INT", typestr_length);
return 4;
}
@@ -2029,7 +2023,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
{
my_b_write_sint32_and_uint32(file, (int) (signed char) *ptr,
(uint) (unsigned char) *ptr);
- my_snprintf(typestr, typestr_length, "TINYINT");
+ strmake(typestr, "TINYINT", typestr_length);
return 1;
}
@@ -2038,7 +2032,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
int32 si= (int32) sint2korr(ptr);
uint32 ui= (uint32) uint2korr(ptr);
my_b_write_sint32_and_uint32(file, si, ui);
- my_snprintf(typestr, typestr_length, "SHORTINT");
+ strmake(typestr, "SHORTINT", typestr_length);
return 2;
}
@@ -2047,23 +2041,24 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
int32 si= sint3korr(ptr);
uint32 ui= uint3korr(ptr);
my_b_write_sint32_and_uint32(file, si, ui);
- my_snprintf(typestr, typestr_length, "MEDIUMINT");
+ strmake(typestr, "MEDIUMINT", typestr_length);
return 3;
}
case MYSQL_TYPE_LONGLONG:
{
char tmp[64];
+ size_t length;
longlong si= sint8korr(ptr);
- longlong10_to_str(si, tmp, -10);
- my_b_printf(file, "%s", tmp);
+ length= (longlong10_to_str(si, tmp, -10) - tmp);
+ my_b_write(file, tmp, length);
if (si < 0)
{
ulonglong ui= uint8korr(ptr);
longlong10_to_str((longlong) ui, tmp, 10);
my_b_printf(file, " (%s)", tmp);
}
- my_snprintf(typestr, typestr_length, "LONGINT");
+ strmake(typestr, "LONGINT", typestr_length);
return 8;
}
@@ -2072,6 +2067,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
uint precision= meta >> 8;
uint decimals= meta & 0xFF;
uint bin_size= my_decimal_get_binary_size(precision, decimals);
+ uint length;
my_decimal dec;
binary2my_decimal(E_DEC_FATAL_ERROR, (uchar*) ptr, &dec,
precision, decimals);
@@ -2083,7 +2079,8 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
for (i=0; i < end; i++)
pos+= sprintf(pos, "%09d.", dec.buf[i]);
pos+= sprintf(pos, "%09d", dec.buf[i]);
- my_b_printf(file, "%s", buff);
+ length= (uint) (pos - buff);
+ my_b_write(file, buff, length);
my_snprintf(typestr, typestr_length, "DECIMAL(%d,%d)",
precision, decimals);
return bin_size;
@@ -2096,7 +2093,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
char tmp[320];
sprintf(tmp, "%-20g", (double) fl);
my_b_printf(file, "%s", tmp); /* my_snprintf doesn't support %-20g */
- my_snprintf(typestr, typestr_length, "FLOAT");
+ strmake(typestr, "FLOAT", typestr_length);
return 4;
}
@@ -2105,8 +2102,8 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
double dbl;
float8get(dbl, ptr);
char tmp[320];
- sprintf(tmp, "%-.20g", dbl); /* my_snprintf doesn't support %-20g */
- my_b_printf(file, "%s", tmp);
+ sprintf(tmp, "%-.20g", dbl); /* strmake doesn't support %-20g */
+ my_b_printf(file, tmp, "%s");
strcpy(typestr, "DOUBLE");
return 8;
}
@@ -2125,7 +2122,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
{
uint32 i32= uint4korr(ptr);
my_b_printf(file, "%d", i32);
- my_snprintf(typestr, typestr_length, "TIMESTAMP");
+ strmake(typestr, "TIMESTAMP", typestr_length);
return 4;
}
@@ -2150,7 +2147,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
my_b_printf(file, "%04d-%02d-%02d %02d:%02d:%02d",
(int) (d / 10000), (int) (d % 10000) / 100, (int) (d % 100),
(int) (t / 10000), (int) (t % 10000) / 100, (int) t % 100);
- my_snprintf(typestr, typestr_length, "DATETIME");
+ strmake(typestr, "DATETIME", typestr_length);
return 8;
}
@@ -2173,7 +2170,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
const char *sign= tmp < 0 ? "-" : "";
my_b_printf(file, "'%s%02d:%02d:%02d'",
sign, i32 / 10000, (i32 % 10000) / 100, i32 % 100, i32);
- my_snprintf(typestr, typestr_length, "TIME");
+ strmake(typestr, "TIME", typestr_length);
return 3;
}
@@ -2212,7 +2209,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
*pos--= (char) ('0'+part%10); part/=10;
*pos= (char) ('0'+part);
my_b_printf(file , "'%s'", buf);
- my_snprintf(typestr, typestr_length, "DATE");
+ strmake(typestr, "DATE", typestr_length);
return 3;
}
@@ -2220,8 +2217,9 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
{
uint i32= uint3korr(ptr);
my_b_printf(file , "'%04d:%02d:%02d'",
- (i32 / (16L * 32L)), (i32 / 32L % 16L), (i32 % 32L));
- my_snprintf(typestr, typestr_length, "DATE");
+ (int)(i32 / (16L * 32L)), (int)(i32 / 32L % 16L),
+ (int)(i32 % 32L));
+ strmake(typestr, "DATE", typestr_length);
return 3;
}
@@ -2229,7 +2227,7 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
{
uint32 i32= *ptr;
my_b_printf(file, "%04d", i32+ 1900);
- my_snprintf(typestr, typestr_length, "YEAR");
+ strmake(typestr, "YEAR", typestr_length);
return 1;
}
@@ -2237,13 +2235,13 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
switch (meta & 0xFF) {
case 1:
my_b_printf(file, "%d", (int) *ptr);
- my_snprintf(typestr, typestr_length, "ENUM(1 byte)");
+ strmake(typestr, "ENUM(1 byte)", typestr_length);
return 1;
case 2:
{
int32 i32= uint2korr(ptr);
my_b_printf(file, "%d", i32);
- my_snprintf(typestr, typestr_length, "ENUM(2 bytes)");
+ strmake(typestr, "ENUM(2 bytes)", typestr_length);
return 2;
}
default:
@@ -2262,22 +2260,22 @@ log_event_print_value(IO_CACHE *file, const uchar *ptr,
case 1:
length= *ptr;
my_b_write_quoted(file, ptr + 1, length);
- my_snprintf(typestr, typestr_length, "TINYBLOB/TINYTEXT");
+ strmake(typestr, "TINYBLOB/TINYTEXT", typestr_length);
return length + 1;
case 2:
length= uint2korr(ptr);
my_b_write_quoted(file, ptr + 2, length);
- my_snprintf(typestr, typestr_length, "BLOB/TEXT");
+ strmake(typestr, "BLOB/TEXT", typestr_length);
return length + 2;
case 3:
length= uint3korr(ptr);
my_b_write_quoted(file, ptr + 3, length);
- my_snprintf(typestr, typestr_length, "MEDIUMBLOB/MEDIUMTEXT");
+ strmake(typestr, "MEDIUMBLOB/MEDIUMTEXT", typestr_length);
return length + 3;
case 4:
length= uint4korr(ptr);
my_b_write_quoted(file, ptr + 4, length);
- my_snprintf(typestr, typestr_length, "LONGBLOB/LONGTEXT");
+ strmake(typestr, "LONGBLOB/LONGTEXT", typestr_length);
return length + 4;
default:
my_b_printf(file, "!! Unknown BLOB packlen=%d", length);
@@ -2348,11 +2346,11 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
if (is_null)
{
- my_b_printf(file, "### @%d=NULL", i + 1);
+ my_b_printf(file, "### @%lu=NULL", (ulong)i + 1);
}
else
{
- my_b_printf(file, "### @%d=", i + 1);
+ my_b_printf(file, "### @%lu=", (ulong)i + 1);
size_t size= log_event_print_value(file, value,
td->type(i), td->field_metadata(i),
typestr, sizeof(typestr));
@@ -2364,7 +2362,7 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
if (print_event_info->verbose > 1)
{
- my_b_printf(file, " /* ");
+ my_b_write(file, " /* ", 4);
if (typestr[0])
my_b_printf(file, "%s ", typestr);
@@ -2374,10 +2372,10 @@ Rows_log_event::print_verbose_one_row(IO_CACHE *file, table_def *td,
my_b_printf(file, "meta=%d nullable=%d is_null=%d ",
td->field_metadata(i),
td->maybe_null(i), is_null);
- my_b_printf(file, "*/");
+ my_b_write(file, "*/", 2);
}
- my_b_printf(file, "\n");
+ my_b_write_byte(file, '\n');
null_bit_index++;
}
@@ -2423,10 +2421,19 @@ void Rows_log_event::print_verbose(IO_CACHE *file,
if (!(map= print_event_info->m_table_map.get_table(m_table_id)) ||
!(td= map->create_table_def()))
{
- my_b_printf(file, "### Row event for unknown table #%d", m_table_id);
+ my_b_printf(file, "### Row event for unknown table #%lu",
+ (ulong) m_table_id);
return;
}
+ /* If the write rows event contained no values for the AI */
+ if (((type_code == WRITE_ROWS_EVENT) && (m_rows_buf==m_rows_end)))
+ {
+ my_b_printf(file, "### INSERT INTO %`s.%`s VALUES ()\n",
+ map->get_db_name(), map->get_table_name());
+ goto end;
+ }
+
for (const uchar *value= m_rows_buf; value < m_rows_end; )
{
size_t length;
@@ -2484,7 +2491,7 @@ void Log_event::print_base64(IO_CACHE* file,
if (print_event_info->base64_output_mode != BASE64_OUTPUT_DECODE_ROWS)
{
if (my_b_tell(file) == 0)
- my_b_printf(file, "\nBINLOG '\n");
+ my_b_write_string(file, "\nBINLOG '\n");
my_b_printf(file, "%s\n", tmp_str);
@@ -3202,7 +3209,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
be even bigger, but this will suffice to catch most corruption
errors that can lead to a crash.
*/
- if (status_vars_len > min(data_len, MAX_SIZE_LOG_EVENT_STATUS))
+ if (status_vars_len > MY_MIN(data_len, MAX_SIZE_LOG_EVENT_STATUS))
{
DBUG_PRINT("info", ("status_vars_len (%u) > data_len (%lu); query= 0",
status_vars_len, data_len));
@@ -3670,7 +3677,7 @@ void Query_log_event::print_query_header(IO_CACHE* file,
if (unlikely(tmp)) /* some bits have changed */
{
bool need_comma= 0;
- my_b_printf(file, "SET ");
+ my_b_write_string(file, "SET ");
print_set_option(file, tmp, OPTION_NO_FOREIGN_KEY_CHECKS, ~flags2,
"@@session.foreign_key_checks", &need_comma);
print_set_option(file, tmp, OPTION_AUTO_IS_NULL, flags2,
@@ -4056,7 +4063,8 @@ int Query_log_event::do_apply_event(Relay_log_info const *rli,
"Error during COMMIT: failed to update GTID state in "
"%s.%s: %d: %s",
"mysql", rpl_gtid_slave_state_table_name.str,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
trans_rollback(thd);
sub_id= 0;
thd->is_slave_error= 1;
@@ -4129,7 +4137,8 @@ START SLAVE; . Query: '%s'", expected_error, thd->query());
}
/* If the query was not ignored, it is printed to the general log */
- if (!thd->is_error() || thd->stmt_da->sql_errno() != ER_SLAVE_IGNORED_TABLE)
+ if (!thd->is_error() ||
+ thd->get_stmt_da()->sql_errno() != ER_SLAVE_IGNORED_TABLE)
general_log_write(thd, COM_QUERY, thd->query(), thd->query_length());
else
{
@@ -4154,14 +4163,14 @@ compare_errors:
not exist errors", we silently clear the error if TEMPORARY was used.
*/
if (thd->lex->sql_command == SQLCOM_DROP_TABLE && thd->lex->drop_temporary &&
- thd->is_error() && thd->stmt_da->sql_errno() == ER_BAD_TABLE_ERROR &&
+ thd->is_error() && thd->get_stmt_da()->sql_errno() == ER_BAD_TABLE_ERROR &&
!expected_error)
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
/*
If we expected a non-zero error code, and we don't get the same error
code, and it should be ignored or is related to a concurrency issue.
*/
- actual_error= thd->is_error() ? thd->stmt_da->sql_errno() : 0;
+ actual_error= thd->is_error() ? thd->get_stmt_da()->sql_errno() : 0;
DBUG_PRINT("info",("expected_error: %d sql_errno: %d",
expected_error, actual_error));
@@ -4179,7 +4188,7 @@ Error on slave: actual message='%s', error code=%d. \
Default database: '%s'. Query: '%s'",
ER_SAFE(expected_error),
expected_error,
- actual_error ? thd->stmt_da->message() : "no error",
+ actual_error ? thd->get_stmt_da()->message() : "no error",
actual_error,
print_slave_db_safe(db), query_arg);
thd->is_slave_error= 1;
@@ -4203,7 +4212,7 @@ Default database: '%s'. Query: '%s'",
{
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' on query. Default database: '%s'. Query: '%s'",
- (actual_error ? thd->stmt_da->message() :
+ (actual_error ? thd->get_stmt_da()->message() :
"unexpected success or fatal error"),
print_slave_db_safe(thd->db), query_arg);
thd->is_slave_error= 1;
@@ -5436,33 +5445,33 @@ void Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info,
my_b_printf(&cache, "%sLOAD DATA ",
commented ? "# " : "");
if (check_fname_outside_temp_buf())
- my_b_printf(&cache, "LOCAL ");
+ my_b_write_string(&cache, "LOCAL ");
my_b_printf(&cache, "INFILE '%-*s' ", fname_len, fname);
if (sql_ex.opt_flags & REPLACE_FLAG)
- my_b_printf(&cache,"REPLACE ");
+ my_b_write_string(&cache, "REPLACE ");
else if (sql_ex.opt_flags & IGNORE_FLAG)
- my_b_printf(&cache,"IGNORE ");
+ my_b_write_string(&cache, "IGNORE ");
my_b_printf(&cache, "INTO TABLE `%s`", table_name);
- my_b_printf(&cache, " FIELDS TERMINATED BY ");
+ my_b_write_string(&cache, " FIELDS TERMINATED BY ");
pretty_print_str(&cache, sql_ex.field_term, sql_ex.field_term_len);
if (sql_ex.opt_flags & OPT_ENCLOSED_FLAG)
- my_b_printf(&cache," OPTIONALLY ");
- my_b_printf(&cache, " ENCLOSED BY ");
+ my_b_write_string(&cache, " OPTIONALLY ");
+ my_b_write_string(&cache, " ENCLOSED BY ");
pretty_print_str(&cache, sql_ex.enclosed, sql_ex.enclosed_len);
- my_b_printf(&cache, " ESCAPED BY ");
+ my_b_write_string(&cache, " ESCAPED BY ");
pretty_print_str(&cache, sql_ex.escaped, sql_ex.escaped_len);
- my_b_printf(&cache," LINES TERMINATED BY ");
+ my_b_write_string(&cache, " LINES TERMINATED BY ");
pretty_print_str(&cache, sql_ex.line_term, sql_ex.line_term_len);
if (sql_ex.line_start)
{
- my_b_printf(&cache," STARTING BY ");
+ my_b_write_string(&cache," STARTING BY ");
pretty_print_str(&cache, sql_ex.line_start, sql_ex.line_start_len);
}
if ((long) skip_lines > 0)
@@ -5472,16 +5481,16 @@ void Load_log_event::print(FILE* file_arg, PRINT_EVENT_INFO* print_event_info,
{
uint i;
const char* field = fields;
- my_b_printf(&cache, " (");
+ my_b_write_string(&cache, " (");
for (i = 0; i < num_fields; i++)
{
if (i)
- my_b_printf(&cache, ",");
+ my_b_write_byte(&cache, ',');
my_b_printf(&cache, "%`s", field);
field += field_lens[i] + 1;
}
- my_b_printf(&cache, ")");
+ my_b_write_byte(&cache, ')');
}
my_b_printf(&cache, "%s\n", print_event_info->delimiter);
@@ -5609,7 +5618,7 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli,
{
thd->set_time(when, when_sec_part);
thd->set_query_id(next_query_id());
- thd->warning_info->opt_clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
TABLE_LIST tables;
tables.init_one_table(thd->strmake(thd->db, thd->db_length),
@@ -5720,7 +5729,8 @@ int Load_log_event::do_apply_event(NET* net, Relay_log_info const *rli,
update it inside mysql_load().
*/
List<Item> tmp_list;
- if (mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list,
+ if (open_temporary_tables(thd, &tables) ||
+ mysql_load(thd, &ex, &tables, field_list, tmp_list, tmp_list,
handle_dup, ignore, net != 0))
thd->is_slave_error= 1;
if (thd->cuted_fields)
@@ -5755,9 +5765,9 @@ error:
thd->catalog= 0;
thd->set_db(NULL, 0); /* will free the current database */
thd->reset_query();
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
close_thread_tables(thd);
/*
- If inside a multi-statement transaction,
@@ -5784,8 +5794,8 @@ error:
int sql_errno;
if (thd->is_error())
{
- err= thd->stmt_da->message();
- sql_errno= thd->stmt_da->sql_errno();
+ err= thd->get_stmt_da()->message();
+ sql_errno= thd->get_stmt_da()->sql_errno();
}
else
{
@@ -5855,7 +5865,7 @@ void Rotate_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
if (print_event_info->short_form)
return;
print_header(&cache, print_event_info, FALSE);
- my_b_printf(&cache, "\tRotate to ");
+ my_b_write_string(&cache, "\tRotate to ");
if (new_log_ident)
my_b_write(&cache, (uchar*) new_log_ident, (uint)ident_len);
my_b_printf(&cache, " pos: %s\n", llstr(pos, buf));
@@ -6061,9 +6071,9 @@ void Binlog_checkpoint_log_event::print(FILE *file,
if (print_event_info->short_form)
return;
print_header(&cache, print_event_info, FALSE);
- my_b_printf(&cache, "\tBinlog checkpoint ");
+ my_b_write_string(&cache, "\tBinlog checkpoint ");
my_b_write(&cache, (uchar*)binlog_file_name, binlog_file_len);
- my_b_printf(&cache, "\n");
+ my_b_write_byte(&cache, '\n');
}
#endif /* MYSQL_CLIENT */
@@ -6264,7 +6274,7 @@ Gtid_log_event::do_apply_event(Relay_log_info const *rli)
{
/* Need to reset prior "ok" status to give an error. */
thd->clear_error();
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
if (mysql_bin_log.check_strict_gtid_sequence(this->domain_id,
this->server_id, this->seq_no))
return 1;
@@ -6669,7 +6679,7 @@ void Intvar_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
if (!print_event_info->short_form)
{
print_header(&cache, print_event_info, FALSE);
- my_b_printf(&cache, "\tIntvar\n");
+ my_b_write_string(&cache, "\tIntvar\n");
}
my_b_printf(&cache, "SET ");
@@ -6796,7 +6806,7 @@ void Rand_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
if (!print_event_info->short_form)
{
print_header(&cache, print_event_info, FALSE);
- my_b_printf(&cache, "\tRand\n");
+ my_b_write_string(&cache, "\tRand\n");
}
my_b_printf(&cache, "SET @@RAND_SEED1=%s, @@RAND_SEED2=%s%s\n",
llstr(seed1, llbuff),llstr(seed2, llbuff2),
@@ -6960,7 +6970,8 @@ int Xid_log_event::do_apply_event(Relay_log_info const *rli)
"Error during XID COMMIT: failed to update GTID state in "
"%s.%s: %d: %s",
"mysql", rpl_gtid_slave_state_table_name.str,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
trans_rollback(thd);
thd->is_slave_error= 1;
return err;
@@ -7232,7 +7243,7 @@ bool User_var_log_event::write(IO_CACHE* file)
char buf[UV_NAME_LEN_SIZE];
char buf1[UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE +
UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE];
- uchar buf2[max(8, DECIMAL_MAX_FIELD_SIZE + 2)], *pos= buf2;
+ uchar buf2[MY_MAX(8, DECIMAL_MAX_FIELD_SIZE + 2)], *pos= buf2;
uint unsigned_len= 0;
uint buf1_length;
ulong event_length;
@@ -7306,10 +7317,10 @@ void User_var_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
if (!print_event_info->short_form)
{
print_header(&cache, print_event_info, FALSE);
- my_b_printf(&cache, "\tUser_var\n");
+ my_b_write_string(&cache, "\tUser_var\n");
}
- my_b_printf(&cache, "SET @");
+ my_b_write_string(&cache, "SET @");
my_b_write_backtick_quote(&cache, name, name_len);
if (is_null)
@@ -7706,7 +7717,7 @@ void Stop_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info)
return;
print_header(&cache, print_event_info, FALSE);
- my_b_printf(&cache, "\tStop\n");
+ my_b_write_string(&cache, "\tStop\n");
}
#endif /* MYSQL_CLIENT */
@@ -7907,7 +7918,7 @@ void Create_file_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info
That one is for "file_id: etc" below: in mysqlbinlog we want the #, in
SHOW BINLOG EVENTS we don't.
*/
- my_b_printf(&cache, "#");
+ my_b_write_byte(&cache, '#');
}
my_b_printf(&cache, " file_id: %d block_len: %d\n", file_id, block_len);
@@ -8608,12 +8619,12 @@ void Execute_load_query_log_event::print(FILE* file,
if (local_fname)
{
my_b_write(&cache, (uchar*) query, fn_pos_start);
- my_b_printf(&cache, " LOCAL INFILE \'");
+ my_b_write_string(&cache, " LOCAL INFILE \'");
my_b_printf(&cache, "%s", local_fname);
- my_b_printf(&cache, "\'");
+ my_b_write_string(&cache, "\'");
if (dup_handling == LOAD_DUP_REPLACE)
- my_b_printf(&cache, " REPLACE");
- my_b_printf(&cache, " INTO");
+ my_b_write_string(&cache, " REPLACE");
+ my_b_write_string(&cache, " INTO");
my_b_write(&cache, (uchar*) query + fn_pos_end, q_len-fn_pos_end);
my_b_printf(&cache, "\n%s\n", print_event_info->delimiter);
}
@@ -9016,7 +9027,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length)
trigger false warnings.
*/
#ifndef HAVE_valgrind
- DBUG_DUMP("row_data", row_data, min(length, 32));
+ DBUG_DUMP("row_data", row_data, MY_MIN(length, 32));
#endif
DBUG_ASSERT(m_rows_buf <= m_rows_cur);
@@ -9141,7 +9152,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
if (open_and_lock_tables(thd, rli->tables_to_lock, FALSE, 0))
{
- uint actual_error= thd->stmt_da->sql_errno();
+ uint actual_error= thd->get_stmt_da()->sql_errno();
if (thd->is_slave_error || thd->is_fatal_error)
{
/*
@@ -9152,7 +9163,7 @@ int Rows_log_event::do_apply_event(Relay_log_info const *rli)
*/
rli->report(ERROR_LEVEL, actual_error,
"Error executing row event: '%s'",
- (actual_error ? thd->stmt_da->message() :
+ (actual_error ? thd->get_stmt_da()->message() :
"unexpected success or fatal error"));
thd->is_slave_error= 1;
}
@@ -10098,7 +10109,7 @@ int Table_map_log_event::rewrite_db(const char* new_db, size_t new_len,
DBUG_ENTER("Table_map_log_event::rewrite_db");
DBUG_ASSERT(temp_buf);
- uint header_len= min(desc->common_header_len,
+ uint header_len= MY_MIN(desc->common_header_len,
LOG_EVENT_MINIMAL_HEADER_LEN) + TABLE_MAP_HEADER_LEN;
int len_diff;
@@ -10485,7 +10496,7 @@ void Table_map_log_event::print(FILE *, PRINT_EVENT_INFO *print_event_info)
print_header(&print_event_info->head_cache, print_event_info, TRUE);
my_b_printf(&print_event_info->head_cache,
"\tTable_map: %`s.%`s mapped to number %lu\n",
- m_dbnam, m_tblnam, m_table_id);
+ m_dbnam, m_tblnam, (ulong) m_table_id);
print_base64(&print_event_info->body_cache, print_event_info, TRUE);
}
}
@@ -11091,7 +11102,7 @@ int Rows_log_event::find_key()
We can only use a non-unique key if it allows range scans (ie. skip
FULLTEXT indexes and such).
*/
- last_part= key->key_parts - 1;
+ last_part= key->user_defined_key_parts - 1;
DBUG_PRINT("info", ("Index %s rec_per_key[%u]= %lu",
key->name, last_part, key->rec_per_key[last_part]));
if (!(m_table->file->index_flags(i, last_part, 1) & HA_READ_NEXT))
@@ -11369,7 +11380,7 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
field in the BI image that is null and part of UNNI.
*/
bool null_found= FALSE;
- for (uint i=0; i < keyinfo->key_parts && !null_found; i++)
+ for (uint i=0; i < keyinfo->user_defined_key_parts && !null_found; i++)
{
uint fieldnr= keyinfo->key_part[i].fieldnr - 1;
Field **f= table->field+fieldnr;
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 698118e3bda..6623d7655d7 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -99,7 +99,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
if (open_and_lock_tables(ev_thd, rli->tables_to_lock, FALSE, 0))
{
- uint actual_error= ev_thd->stmt_da->sql_errno();
+ uint actual_error= ev_thd->get_stmt_da()->sql_errno();
if (ev_thd->is_slave_error || ev_thd->is_fatal_error)
{
/*
@@ -108,7 +108,7 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
*/
rli->report(ERROR_LEVEL, actual_error,
"Error '%s' on opening tables",
- (actual_error ? ev_thd->stmt_da->message() :
+ (actual_error ? ev_thd->get_stmt_da()->message() :
"unexpected success or fatal error"));
ev_thd->is_slave_error= 1;
}
@@ -243,10 +243,10 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
break;
default:
- rli->report(ERROR_LEVEL, ev_thd->stmt_da->sql_errno(),
+ rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(),
"Error in %s event: row application failed. %s",
ev->get_type_str(),
- ev_thd->is_error() ? ev_thd->stmt_da->message() : "");
+ ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : "");
thd->is_slave_error= 1;
break;
}
@@ -260,12 +260,12 @@ Old_rows_log_event::do_apply_event(Old_rows_log_event *ev, const Relay_log_info
if (error)
{ /* error has occured during the transaction */
- rli->report(ERROR_LEVEL, ev_thd->stmt_da->sql_errno(),
+ rli->report(ERROR_LEVEL, ev_thd->get_stmt_da()->sql_errno(),
"Error in %s event: error during transaction execution "
"on table %s.%s. %s",
ev->get_type_str(), table->s->db.str,
table->s->table_name.str,
- ev_thd->is_error() ? ev_thd->stmt_da->message() : "");
+ ev_thd->is_error() ? ev_thd->get_stmt_da()->message() : "");
/*
If one day we honour --skip-slave-errors in row-based replication, and
@@ -1406,7 +1406,7 @@ int Old_rows_log_event::do_add_row_data(uchar *row_data, size_t length)
trigger false warnings.
*/
#ifndef HAVE_valgrind
- DBUG_DUMP("row_data", row_data, min(length, 32));
+ DBUG_DUMP("row_data", row_data, MY_MIN(length, 32));
#endif
DBUG_ASSERT(m_rows_buf <= m_rows_cur);
@@ -2366,7 +2366,7 @@ int Old_rows_log_event::find_row(const Relay_log_info *rli)
field in the BI image that is null and part of UNNI.
*/
bool null_found= FALSE;
- for (uint i=0; i < keyinfo->key_parts && !null_found; i++)
+ for (uint i=0; i < keyinfo->user_defined_key_parts && !null_found; i++)
{
uint fieldnr= keyinfo->key_part[i].fieldnr - 1;
Field **f= table->field+fieldnr;
diff --git a/sql/mdl.cc b/sql/mdl.cc
index 03593f150bd..c3a78f4c40b 100644
--- a/sql/mdl.cc
+++ b/sql/mdl.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2007, 2011, Oracle and/or its affiliates.
+/* Copyright (c) 2007, 2012, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -14,9 +14,9 @@
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
-#include "mdl.h"
#include "sql_class.h"
#include "debug_sync.h"
+#include "sql_array.h"
#include <hash.h>
#include <mysqld_error.h>
#include <mysql/plugin.h>
@@ -29,7 +29,7 @@ static PSI_mutex_key key_MDL_wait_LOCK_wait_status;
static PSI_mutex_info all_mdl_mutexes[]=
{
- { &key_MDL_map_mutex, "MDL_map::mutex", PSI_FLAG_GLOBAL},
+ { &key_MDL_map_mutex, "MDL_map::mutex", 0},
{ &key_MDL_wait_LOCK_wait_status, "MDL_wait::LOCK_wait_status", 0}
};
@@ -114,23 +114,28 @@ class MDL_object_lock_cache_adapter;
/**
- A collection of all MDL locks. A singleton,
- there is only one instance of the map in the server.
+ A partition in a collection of all MDL locks.
+ MDL_map is partitioned for scalability reasons.
Maps MDL_key to MDL_lock instances.
*/
-class MDL_map
+class MDL_map_partition
{
public:
- void init();
- void destroy();
- MDL_lock *find_or_insert(const MDL_key *key);
- unsigned long get_lock_owner(const MDL_key *key);
- void remove(MDL_lock *lock);
+ MDL_map_partition();
+ ~MDL_map_partition();
+ inline MDL_lock *find_or_insert(const MDL_key *mdl_key,
+ my_hash_value_type hash_value);
+ unsigned long get_lock_owner(const MDL_key *key,
+ my_hash_value_type hash_value);
+ inline void remove(MDL_lock *lock);
+ my_hash_value_type get_key_hash(const MDL_key *mdl_key) const
+ {
+ return my_calc_hash(&m_locks, mdl_key->ptr(), mdl_key->length());
+ }
private:
bool move_from_hash_to_lock_mutex(MDL_lock *lock);
-private:
- /** All acquired locks in the server. */
+ /** A partition of all acquired locks in the server. */
HASH m_locks;
/* Protects access to m_locks hash. */
mysql_mutex_t m_mutex;
@@ -153,6 +158,31 @@ private:
I_P_List_counter>
Lock_cache;
Lock_cache m_unused_locks_cache;
+};
+
+
+/**
+ Start-up parameter for the number of partitions of the MDL_lock hash.
+*/
+ulong mdl_locks_hash_partitions;
+
+/**
+ A collection of all MDL locks. A singleton,
+ there is only one instance of the map in the server.
+ Contains instances of MDL_map_partition
+*/
+
+class MDL_map
+{
+public:
+ void init();
+ void destroy();
+ MDL_lock *find_or_insert(const MDL_key *key);
+ unsigned long get_lock_owner(const MDL_key *key);
+ void remove(MDL_lock *lock);
+private:
+ /** Array of partitions where the locks are actually stored. */
+ Dynamic_array<MDL_map_partition *> m_partitions;
/** Pre-allocated MDL_lock object for GLOBAL namespace. */
MDL_lock *m_global_lock;
/** Pre-allocated MDL_lock object for COMMIT namespace. */
@@ -319,7 +349,7 @@ Deadlock_detection_visitor::opt_change_victim_to(MDL_context *new_victim)
class MDL_lock
{
public:
- typedef uchar bitmap_t;
+ typedef unsigned short bitmap_t;
class Ticket_list
{
@@ -400,7 +430,9 @@ public:
bool can_grant_lock(enum_mdl_type type, MDL_context *requstor_ctx,
bool ignore_lock_priority) const;
- inline static MDL_lock *create(const MDL_key *key);
+ inline static MDL_lock *create(const MDL_key *key,
+ MDL_map_partition *map_part);
+
inline unsigned long get_lock_owner() const;
void reschedule_waiters();
@@ -428,13 +460,14 @@ public:
public:
- MDL_lock(const MDL_key *key_arg)
+ MDL_lock(const MDL_key *key_arg, MDL_map_partition *map_part)
: key(key_arg),
m_hog_lock_count(0),
m_ref_usage(0),
m_ref_release(0),
m_is_destroyed(FALSE),
- m_version(0)
+ m_version(0),
+ m_map_part(map_part)
{
mysql_prlock_init(key_MDL_lock_rwlock, &m_rwlock);
}
@@ -447,18 +480,18 @@ public:
public:
/**
These three members are used to make it possible to separate
- the mdl_locks.m_mutex mutex and MDL_lock::m_rwlock in
+ the MDL_map_partition::m_mutex mutex and MDL_lock::m_rwlock in
MDL_map::find_or_insert() for increased scalability.
The 'm_is_destroyed' member is only set by destroyers that
- have both the mdl_locks.m_mutex and MDL_lock::m_rwlock, thus
+ have both the MDL_map_partition::m_mutex and MDL_lock::m_rwlock, thus
holding any of the mutexes is sufficient to read it.
The 'm_ref_usage; is incremented under protection by
- mdl_locks.m_mutex, but when 'm_is_destroyed' is set to TRUE, this
+ MDL_map_partition::m_mutex, but when 'm_is_destroyed' is set to TRUE, this
member is moved to be protected by the MDL_lock::m_rwlock.
This means that the MDL_map::find_or_insert() which only
holds the MDL_lock::m_rwlock can compare it to 'm_ref_release'
- without acquiring mdl_locks.m_mutex again and if equal it can also
- destroy the lock object safely.
+ without acquiring MDL_map_partition::m_mutex again and if equal
+ it can also destroy the lock object safely.
The 'm_ref_release' is incremented under protection by
MDL_lock::m_rwlock.
Note since we are only interested in equality of these two
@@ -472,19 +505,23 @@ public:
/**
We use the same idea and an additional version counter to support
caching of unused MDL_lock object for further re-use.
- This counter is incremented while holding both MDL_map::m_mutex and
- MDL_lock::m_rwlock locks each time when a MDL_lock is moved from
- the hash to the unused objects list (or destroyed).
+ This counter is incremented while holding both MDL_map_partition::m_mutex
+ and MDL_lock::m_rwlock locks each time when a MDL_lock is moved from
+ the partitioned hash to the paritioned unused objects list (or destroyed).
A thread, which has found a MDL_lock object for the key in the hash
- and then released the MDL_map::m_mutex before acquiring the
+ and then released the MDL_map_partition::m_mutex before acquiring the
MDL_lock::m_rwlock, can determine that this object was moved to the
unused objects list (or destroyed) while it held no locks by comparing
- the version value which it read while holding the MDL_map::m_mutex
+ the version value which it read while holding the MDL_map_partition::m_mutex
with the value read after acquiring the MDL_lock::m_rwlock.
Note that since it takes several years to overflow this counter such
theoretically possible overflows should not have any practical effects.
*/
ulonglong m_version;
+ /**
+ Partition of MDL_map where the lock is stored.
+ */
+ MDL_map_partition *m_map_part;
};
@@ -497,8 +534,8 @@ public:
class MDL_scoped_lock : public MDL_lock
{
public:
- MDL_scoped_lock(const MDL_key *key_arg)
- : MDL_lock(key_arg)
+ MDL_scoped_lock(const MDL_key *key_arg, MDL_map_partition *map_part)
+ : MDL_lock(key_arg, map_part)
{ }
virtual const bitmap_t *incompatible_granted_types_bitmap() const
@@ -538,8 +575,8 @@ private:
class MDL_object_lock : public MDL_lock
{
public:
- MDL_object_lock(const MDL_key *key_arg)
- : MDL_lock(key_arg)
+ MDL_object_lock(const MDL_key *key_arg, MDL_map_partition *map_part)
+ : MDL_lock(key_arg, map_part)
{ }
/**
@@ -572,7 +609,7 @@ public:
}
virtual bool needs_notification(const MDL_ticket *ticket) const
{
- return ticket->is_upgradable_or_exclusive();
+ return (ticket->get_type() >= MDL_SHARED_NO_WRITE);
}
virtual void notify_conflicting_locks(MDL_context *ctx);
@@ -669,33 +706,62 @@ void mdl_destroy()
}
-/** Initialize the global hash containing all MDL locks. */
+/** Initialize the container for all MDL locks. */
void MDL_map::init()
{
MDL_key global_lock_key(MDL_key::GLOBAL, "", "");
MDL_key commit_lock_key(MDL_key::COMMIT, "", "");
+ m_global_lock= MDL_lock::create(&global_lock_key, NULL);
+ m_commit_lock= MDL_lock::create(&commit_lock_key, NULL);
+
+ for (uint i= 0; i < mdl_locks_hash_partitions; i++)
+ {
+ MDL_map_partition *part= new (std::nothrow) MDL_map_partition();
+ m_partitions.append(part);
+ }
+}
+
+
+/** Initialize the partition in the container with all MDL locks. */
+
+MDL_map_partition::MDL_map_partition()
+{
mysql_mutex_init(key_MDL_map_mutex, &m_mutex, NULL);
my_hash_init(&m_locks, &my_charset_bin, 16 /* FIXME */, 0, 0,
mdl_locks_key, 0, 0);
- m_global_lock= MDL_lock::create(&global_lock_key);
- m_commit_lock= MDL_lock::create(&commit_lock_key);
-}
+};
/**
- Destroy the global hash containing all MDL locks.
+ Destroy the container for all MDL locks.
@pre It must be empty.
*/
void MDL_map::destroy()
{
+ MDL_lock::destroy(m_global_lock);
+ MDL_lock::destroy(m_commit_lock);
+
+ while (m_partitions.elements() > 0)
+ {
+ MDL_map_partition *part= m_partitions.pop();
+ delete part;
+ }
+}
+
+
+/**
+ Destroy the partition in container for all MDL locks.
+ @pre It must be empty.
+*/
+
+MDL_map_partition::~MDL_map_partition()
+{
DBUG_ASSERT(!m_locks.records);
mysql_mutex_destroy(&m_mutex);
my_hash_free(&m_locks);
- MDL_lock::destroy(m_global_lock);
- MDL_lock::destroy(m_commit_lock);
MDL_object_lock *lock;
while ((lock= m_unused_locks_cache.pop_front()))
@@ -715,13 +781,12 @@ void MDL_map::destroy()
MDL_lock* MDL_map::find_or_insert(const MDL_key *mdl_key)
{
MDL_lock *lock;
- my_hash_value_type hash_value;
if (mdl_key->mdl_namespace() == MDL_key::GLOBAL ||
mdl_key->mdl_namespace() == MDL_key::COMMIT)
{
/*
- Avoid locking m_mutex when lock for GLOBAL or COMMIT namespace is
+ Avoid locking any m_mutex when lock for GLOBAL or COMMIT namespace is
requested. Return pointer to pre-allocated MDL_lock instance instead.
Such an optimization allows to save one mutex lock/unlock for any
statement changing data.
@@ -739,8 +804,27 @@ MDL_lock* MDL_map::find_or_insert(const MDL_key *mdl_key)
return lock;
}
+ my_hash_value_type hash_value= m_partitions.at(0)->get_key_hash(mdl_key);
+ uint part_id= hash_value % mdl_locks_hash_partitions;
+ MDL_map_partition *part= m_partitions.at(part_id);
+
+ return part->find_or_insert(mdl_key, hash_value);
+}
+
+
+/**
+ Find MDL_lock object corresponding to the key and hash value in
+ MDL_map partition, create it if it does not exist.
+
+ @retval non-NULL - Success. MDL_lock instance for the key with
+ locked MDL_lock::m_rwlock.
+ @retval NULL - Failure (OOM).
+*/
- hash_value= my_calc_hash(&m_locks, mdl_key->ptr(), mdl_key->length());
+MDL_lock* MDL_map_partition::find_or_insert(const MDL_key *mdl_key,
+ my_hash_value_type hash_value)
+{
+ MDL_lock *lock;
retry:
mysql_mutex_lock(&m_mutex);
@@ -773,7 +857,7 @@ retry:
}
else
{
- lock= MDL_lock::create(mdl_key);
+ lock= MDL_lock::create(mdl_key, this);
}
if (!lock || my_hash_insert(&m_locks, (uchar*)lock))
@@ -804,7 +888,7 @@ retry:
/**
- Release mdl_locks.m_mutex mutex and lock MDL_lock::m_rwlock for lock
+ Release MDL_map_partition::m_mutex mutex and lock MDL_lock::m_rwlock for lock
object from the hash. Handle situation when object was released
while we held no locks.
@@ -813,7 +897,7 @@ retry:
should re-try looking up MDL_lock object in the hash.
*/
-bool MDL_map::move_from_hash_to_lock_mutex(MDL_lock *lock)
+bool MDL_map_partition::move_from_hash_to_lock_mutex(MDL_lock *lock)
{
ulonglong version;
@@ -822,8 +906,8 @@ bool MDL_map::move_from_hash_to_lock_mutex(MDL_lock *lock)
/*
We increment m_ref_usage which is a reference counter protected by
- mdl_locks.m_mutex under the condition it is present in the hash and
- m_is_destroyed is FALSE.
+ MDL_map_partition::m_mutex under the condition it is present in the hash
+ and m_is_destroyed is FALSE.
*/
lock->m_ref_usage++;
/* Read value of the version counter under protection of m_mutex lock. */
@@ -897,22 +981,36 @@ MDL_map::get_lock_owner(const MDL_key *mdl_key)
}
else
{
- my_hash_value_type hash_value= my_calc_hash(&m_locks,
- mdl_key->ptr(),
- mdl_key->length());
- mysql_mutex_lock(&m_mutex);
- lock= (MDL_lock*) my_hash_search_using_hash_value(&m_locks,
- hash_value,
- mdl_key->ptr(),
- mdl_key->length());
- if (lock)
- res= lock->get_lock_owner();
- mysql_mutex_unlock(&m_mutex);
+ my_hash_value_type hash_value= m_partitions.at(0)->get_key_hash(mdl_key);
+ uint part_id= hash_value % mdl_locks_hash_partitions;
+ MDL_map_partition *part= m_partitions.at(part_id);
+ res= part->get_lock_owner(mdl_key, hash_value);
}
return res;
}
+
+unsigned long
+MDL_map_partition::get_lock_owner(const MDL_key *mdl_key,
+ my_hash_value_type hash_value)
+{
+ MDL_lock *lock;
+ unsigned long res= 0;
+
+ mysql_mutex_lock(&m_mutex);
+ lock= (MDL_lock*) my_hash_search_using_hash_value(&m_locks,
+ hash_value,
+ mdl_key->ptr(),
+ mdl_key->length());
+ if (lock)
+ res= lock->get_lock_owner();
+ mysql_mutex_unlock(&m_mutex);
+
+ return res;
+}
+
+
/**
Destroy MDL_lock object or delegate this responsibility to
whatever thread that holds the last outstanding reference to
@@ -932,28 +1030,41 @@ void MDL_map::remove(MDL_lock *lock)
return;
}
+ lock->m_map_part->remove(lock);
+}
+
+
+/**
+ Destroy MDL_lock object belonging to specific MDL_map
+ partition or delegate this responsibility to whatever
+ thread that holds the last outstanding reference to it.
+*/
+
+void MDL_map_partition::remove(MDL_lock *lock)
+{
mysql_mutex_lock(&m_mutex);
my_hash_delete(&m_locks, (uchar*) lock);
/*
To let threads holding references to the MDL_lock object know that it was
moved to the list of unused objects or destroyed, we increment the version
- counter under protection of both MDL_map::m_mutex and MDL_lock::m_rwlock
- locks. This allows us to read the version value while having either one
- of those locks.
+ counter under protection of both MDL_map_partition::m_mutex and
+ MDL_lock::m_rwlock locks. This allows us to read the version value while
+ having either one of those locks.
*/
lock->m_version++;
if ((lock->key.mdl_namespace() != MDL_key::SCHEMA) &&
- (m_unused_locks_cache.elements() < mdl_locks_cache_size))
+ (m_unused_locks_cache.elements() <
+ mdl_locks_cache_size/mdl_locks_hash_partitions))
{
/*
This is an object of MDL_object_lock type and the cache of unused
objects has not reached its maximum size yet. So instead of destroying
object we move it to the list of unused objects to allow its later
re-use with possibly different key. Any threads holding references to
- this object (owning MDL_map::m_mutex or MDL_lock::m_rwlock) will notice
- this thanks to the fact that we have changed the MDL_lock::m_version
- counter.
+ this object (owning MDL_map_partition::m_mutex or MDL_lock::m_rwlock)
+ will notice this thanks to the fact that we have changed the
+ MDL_lock::m_version counter.
*/
DBUG_ASSERT(lock->key.mdl_namespace() != MDL_key::GLOBAL &&
lock->key.mdl_namespace() != MDL_key::COMMIT);
@@ -970,8 +1081,8 @@ void MDL_map::remove(MDL_lock *lock)
has the responsibility to release it.
Setting of m_is_destroyed to TRUE while holding _both_
- mdl_locks.m_mutex and MDL_lock::m_rwlock mutexes transfers the
- protection of m_ref_usage from mdl_locks.m_mutex to
+ MDL_map_partition::m_mutex and MDL_lock::m_rwlock mutexes transfers
+ the protection of m_ref_usage from MDL_map_partition::m_mutex to
MDL_lock::m_rwlock while removal of the object from the hash
(and cache of unused objects) makes it read-only. Therefore
whoever acquires MDL_lock::m_rwlock next will see the most up
@@ -1001,7 +1112,8 @@ void MDL_map::remove(MDL_lock *lock)
*/
MDL_context::MDL_context()
- : m_thd(NULL),
+ :
+ m_owner(NULL),
m_needs_thr_lock_abort(FALSE),
m_waiting_for(NULL)
{
@@ -1023,9 +1135,9 @@ MDL_context::MDL_context()
void MDL_context::destroy()
{
- DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty() &&
- m_tickets[MDL_TRANSACTION].is_empty() &&
- m_tickets[MDL_EXPLICIT].is_empty());
+ DBUG_ASSERT(m_tickets[MDL_STATEMENT].is_empty());
+ DBUG_ASSERT(m_tickets[MDL_TRANSACTION].is_empty());
+ DBUG_ASSERT(m_tickets[MDL_EXPLICIT].is_empty());
mysql_prlock_destroy(&m_LOCK_waiting_for);
}
@@ -1090,16 +1202,17 @@ void MDL_request::init(const MDL_key *key_arg,
@note Also chooses an MDL_lock descendant appropriate for object namespace.
*/
-inline MDL_lock *MDL_lock::create(const MDL_key *mdl_key)
+inline MDL_lock *MDL_lock::create(const MDL_key *mdl_key,
+ MDL_map_partition *map_part)
{
switch (mdl_key->mdl_namespace())
{
case MDL_key::GLOBAL:
case MDL_key::SCHEMA:
case MDL_key::COMMIT:
- return new MDL_scoped_lock(mdl_key);
+ return new (std::nothrow) MDL_scoped_lock(mdl_key, map_part);
default:
- return new MDL_object_lock(mdl_key);
+ return new (std::nothrow) MDL_object_lock(mdl_key, map_part);
}
}
@@ -1124,7 +1237,8 @@ MDL_ticket *MDL_ticket::create(MDL_context *ctx_arg, enum_mdl_type type_arg
#endif
)
{
- return new MDL_ticket(ctx_arg, type_arg
+ return new (std::nothrow)
+ MDL_ticket(ctx_arg, type_arg
#ifndef DBUG_OFF
, duration_arg
#endif
@@ -1148,7 +1262,7 @@ void MDL_ticket::destroy(MDL_ticket *ticket)
uint MDL_ticket::get_deadlock_weight() const
{
return (m_lock->key.mdl_namespace() == MDL_key::GLOBAL ||
- m_type >= MDL_SHARED_NO_WRITE ?
+ m_type >= MDL_SHARED_UPGRADABLE ?
DEADLOCK_WEIGHT_DDL : DEADLOCK_WEIGHT_DML);
}
@@ -1217,6 +1331,7 @@ void MDL_wait::reset_status()
/**
Wait for the status to be assigned to this wait slot.
+ @param owner MDL context owner.
@param abs_timeout Absolute time after which waiting should stop.
@param set_status_on_timeout TRUE - If in case of timeout waiting
context should close the wait slot by
@@ -1228,7 +1343,7 @@ void MDL_wait::reset_status()
*/
MDL_wait::enum_wait_status
-MDL_wait::timed_wait(THD *thd, struct timespec *abs_timeout,
+MDL_wait::timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout,
bool set_status_on_timeout,
const PSI_stage_info *wait_state_name)
{
@@ -1239,16 +1354,16 @@ MDL_wait::timed_wait(THD *thd, struct timespec *abs_timeout,
mysql_mutex_lock(&m_LOCK_wait_status);
- THD_ENTER_COND(thd, &m_COND_wait_status, &m_LOCK_wait_status,
- wait_state_name, & old_stage);
- thd_wait_begin(thd, THD_WAIT_META_DATA_LOCK);
- while (!m_wait_status && !thd->killed &&
+ owner->ENTER_COND(&m_COND_wait_status, &m_LOCK_wait_status,
+ wait_state_name, & old_stage);
+ thd_wait_begin(NULL, THD_WAIT_META_DATA_LOCK);
+ while (!m_wait_status && !owner->is_killed() &&
wait_result != ETIMEDOUT && wait_result != ETIME)
{
wait_result= mysql_cond_timedwait(&m_COND_wait_status, &m_LOCK_wait_status,
abs_timeout);
}
- thd_wait_end(thd);
+ thd_wait_end(NULL);
if (m_wait_status == EMPTY)
{
@@ -1264,14 +1379,14 @@ MDL_wait::timed_wait(THD *thd, struct timespec *abs_timeout,
false, which means that the caller intends to restart the
wait.
*/
- if (thd->killed)
+ if (owner->is_killed())
m_wait_status= KILLED;
else if (set_status_on_timeout)
m_wait_status= TIMEOUT;
}
result= m_wait_status;
- thd->EXIT_COND(& old_stage);
+ owner->EXIT_COND(& old_stage);
DBUG_RETURN(result);
}
@@ -1480,22 +1595,12 @@ void MDL_lock::reschedule_waiters()
lock. Arrays of bitmaps which elements specify which granted/waiting locks
are incompatible with type of lock being requested.
- Here is how types of individual locks are translated to type of scoped lock:
-
- ----------------+-------------+
- Type of request | Correspond. |
- for indiv. lock | scoped lock |
- ----------------+-------------+
- S, SH, SR, SW | IS |
- SNW, SNRW, X | IX |
- SNW, SNRW -> X | IX (*) |
-
The first array specifies if particular type of request can be satisfied
if there is granted scoped lock of certain type.
| Type of active |
Request | scoped lock |
- type | IS(**) IX S X |
+ type | IS(*) IX S X |
---------+------------------+
IS | + + + + |
IX | + + - - |
@@ -1508,7 +1613,7 @@ void MDL_lock::reschedule_waiters()
| Pending |
Request | scoped lock |
- type | IS(**) IX S X |
+ type | IS(*) IX S X |
---------+-----------------+
IS | + + + + |
IX | + + - - |
@@ -1518,24 +1623,33 @@ void MDL_lock::reschedule_waiters()
Here: "+" -- means that request can be satisfied
"-" -- means that request can't be satisfied and should wait
- (*) Since for upgradable locks we always take intention exclusive scoped
- lock at the same time when obtaining the shared lock, there is no
- need to obtain such lock during the upgrade itself.
- (**) Since intention shared scoped locks are compatible with all other
- type of locks we don't even have any accounting for them.
+ (*) Since intention shared scoped locks are compatible with all other
+ type of locks we don't even have any accounting for them.
+
+ Note that relation between scoped locks and objects locks requested
+ by statement is not straightforward and is therefore fully defined
+ by SQL-layer.
+ For example, in order to support global read lock implementation
+ SQL-layer acquires IX lock in GLOBAL namespace for each statement
+ that can modify metadata or data (i.e. for each statement that
+ needs SW, SU, SNW, SNRW or X object locks). OTOH, to ensure that
+ DROP DATABASE works correctly with concurrent DDL, IX metadata locks
+ in SCHEMA namespace are acquired for DDL statements which can update
+ metadata in the schema (i.e. which acquire SU, SNW, SNRW and X locks
+ on schema objects) and aren't acquired for DML.
*/
const MDL_lock::bitmap_t MDL_scoped_lock::m_granted_incompatible[MDL_TYPE_END] =
{
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
- MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE), 0, 0, 0, 0, 0,
+ MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_INTENTION_EXCLUSIVE), 0, 0, 0, 0, 0, 0,
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED) | MDL_BIT(MDL_INTENTION_EXCLUSIVE)
};
const MDL_lock::bitmap_t MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END] =
{
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED),
- MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0
+ MDL_BIT(MDL_EXCLUSIVE), 0, 0, 0, 0, 0, 0, 0
};
@@ -1547,35 +1661,39 @@ const MDL_lock::bitmap_t MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END] =
The first array specifies if particular type of request can be satisfied
if there is granted lock of certain type.
- Request | Granted requests for lock |
- type | S SH SR SW SNW SNRW X |
- ----------+------------------------------+
- S | + + + + + + - |
- SH | + + + + + + - |
- SR | + + + + + - - |
- SW | + + + + - - - |
- SNW | + + + - - - - |
- SNRW | + + - - - - - |
- X | - - - - - - - |
- SNW -> X | - - - 0 0 0 0 |
- SNRW -> X | - - 0 0 0 0 0 |
+ Request | Granted requests for lock |
+ type | S SH SR SW SU SNW SNRW X |
+ ----------+----------------------------------+
+ S | + + + + + + + - |
+ SH | + + + + + + + - |
+ SR | + + + + + + - - |
+ SW | + + + + + - - - |
+ SU | + + + + - - - - |
+ SNW | + + + - - - - - |
+ SNRW | + + - - - - - - |
+ X | - - - - - - - - |
+ SU -> X | - - - - 0 0 0 0 |
+ SNW -> X | - - - 0 0 0 0 0 |
+ SNRW -> X | - - 0 0 0 0 0 0 |
The second array specifies if particular type of request can be satisfied
if there is waiting request for the same lock of certain type. In other
words it specifies what is the priority of different lock types.
- Request | Pending requests for lock |
- type | S SH SR SW SNW SNRW X |
- ----------+-----------------------------+
- S | + + + + + + - |
- SH | + + + + + + + |
- SR | + + + + + - - |
- SW | + + + + - - - |
- SNW | + + + + + + - |
- SNRW | + + + + + + - |
- X | + + + + + + + |
- SNW -> X | + + + + + + + |
- SNRW -> X | + + + + + + + |
+ Request | Pending requests for lock |
+ type | S SH SR SW SU SNW SNRW X |
+ ----------+---------------------------------+
+ S | + + + + + + + - |
+ SH | + + + + + + + + |
+ SR | + + + + + + - - |
+ SW | + + + + + - - - |
+ SU | + + + + + + + - |
+ SNW | + + + + + + + - |
+ SNRW | + + + + + + + - |
+ X | + + + + + + + + |
+ SU -> X | + + + + + + + + |
+ SNW -> X | + + + + + + + + |
+ SNRW -> X | + + + + + + + + |
Here: "+" -- means that request can be satisfied
"-" -- means that request can't be satisfied and should wait
@@ -1584,6 +1702,9 @@ const MDL_lock::bitmap_t MDL_scoped_lock::m_waiting_incompatible[MDL_TYPE_END] =
@note In cases then current context already has "stronger" type
of lock on the object it will be automatically granted
thanks to usage of the MDL_context::find_ticket() method.
+
+ @note IX locks are excluded since they are not used for per-object
+ metadata locks.
*/
const MDL_lock::bitmap_t
@@ -1596,14 +1717,17 @@ MDL_object_lock::m_granted_incompatible[MDL_TYPE_END] =
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
MDL_BIT(MDL_SHARED_NO_WRITE),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
- MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_WRITE),
+ MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE),
+ MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
+ MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
+ MDL_BIT(MDL_SHARED_WRITE),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
- MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_WRITE) |
- MDL_BIT(MDL_SHARED_READ),
+ MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
+ MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ),
MDL_BIT(MDL_EXCLUSIVE) | MDL_BIT(MDL_SHARED_NO_READ_WRITE) |
- MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_WRITE) |
- MDL_BIT(MDL_SHARED_READ) | MDL_BIT(MDL_SHARED_HIGH_PRIO) |
- MDL_BIT(MDL_SHARED)
+ MDL_BIT(MDL_SHARED_NO_WRITE) | MDL_BIT(MDL_SHARED_UPGRADABLE) |
+ MDL_BIT(MDL_SHARED_WRITE) | MDL_BIT(MDL_SHARED_READ) |
+ MDL_BIT(MDL_SHARED_HIGH_PRIO) | MDL_BIT(MDL_SHARED)
};
@@ -1618,6 +1742,7 @@ MDL_object_lock::m_waiting_incompatible[MDL_TYPE_END] =
MDL_BIT(MDL_SHARED_NO_WRITE),
MDL_BIT(MDL_EXCLUSIVE),
MDL_BIT(MDL_EXCLUSIVE),
+ MDL_BIT(MDL_EXCLUSIVE),
0
};
@@ -1690,7 +1815,7 @@ MDL_lock::get_lock_owner() const
MDL_ticket *ticket;
if ((ticket= it++))
- return thd_get_thread_id(ticket->get_ctx()->get_thd());
+ return ticket->get_ctx()->get_thread_id();
return 0;
}
@@ -1821,6 +1946,8 @@ MDL_context::find_ticket(MDL_request *mdl_request,
if (mdl_request->key.is_equal(&ticket->m_lock->key) &&
ticket->has_stronger_or_equal_type(mdl_request->type))
{
+ DBUG_PRINT("info", ("Adding mdl lock %d to %d",
+ mdl_request->type, ticket->m_type));
*result_duration= duration;
return ticket;
}
@@ -2047,7 +2174,7 @@ void MDL_object_lock::notify_conflicting_locks(MDL_context *ctx)
{
/* Only try to abort locks on which we back off. */
if (conflicting_ticket->get_ctx() != ctx &&
- conflicting_ticket->get_type() < MDL_SHARED_NO_WRITE)
+ conflicting_ticket->get_type() < MDL_SHARED_UPGRADABLE)
{
MDL_context *conflicting_ctx= conflicting_ticket->get_ctx();
@@ -2057,9 +2184,9 @@ void MDL_object_lock::notify_conflicting_locks(MDL_context *ctx)
lock or some other non-MDL resource we might need to wake it up
by calling code outside of MDL.
*/
- mysql_notify_thread_having_shared_lock(ctx->get_thd(),
- conflicting_ctx->get_thd(),
- conflicting_ctx->get_needs_thr_lock_abort());
+ ctx->get_owner()->
+ notify_shared_lock(conflicting_ctx->get_owner(),
+ conflicting_ctx->get_needs_thr_lock_abort());
}
}
}
@@ -2089,9 +2216,9 @@ void MDL_scoped_lock::notify_conflicting_locks(MDL_context *ctx)
insert delayed. We need to kill such threads in order to get
global shared lock. We do this my calling code outside of MDL.
*/
- mysql_notify_thread_having_shared_lock(ctx->get_thd(),
- conflicting_ctx->get_thd(),
- conflicting_ctx->get_needs_thr_lock_abort());
+ ctx->get_owner()->
+ notify_shared_lock(conflicting_ctx->get_owner(),
+ conflicting_ctx->get_needs_thr_lock_abort());
}
}
}
@@ -2117,6 +2244,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
struct timespec abs_timeout;
MDL_wait::enum_wait_status wait_status;
DBUG_ENTER("MDL_context::acquire_lock");
+ DBUG_PRINT("enter", ("lock_type: %d", mdl_request->type));
/* Do some work outside the critical section. */
set_timespec(abs_timeout, lock_wait_timeout);
@@ -2131,6 +2259,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
MDL_lock, MDL_context and MDL_request were updated
accordingly, so we can simply return success.
*/
+ DBUG_PRINT("info", ("Got lock without waiting"));
DBUG_RETURN(FALSE);
}
@@ -2164,7 +2293,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
will_wait_for(ticket);
/* There is a shared or exclusive lock on the object. */
- DEBUG_SYNC(m_thd, "mdl_acquire_lock_wait");
+ DEBUG_SYNC(get_thd(), "mdl_acquire_lock_wait");
find_deadlock();
@@ -2175,13 +2304,13 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
while (cmp_timespec(abs_shortwait, abs_timeout) <= 0)
{
/* abs_timeout is far away. Wait a short while and notify locks. */
- wait_status= m_wait.timed_wait(m_thd, &abs_shortwait, FALSE,
+ wait_status= m_wait.timed_wait(m_owner, &abs_shortwait, FALSE,
mdl_request->key.get_wait_state_name());
if (wait_status != MDL_wait::EMPTY)
break;
/* Check if the client is gone while we were waiting. */
- if (! thd_is_connected(m_thd))
+ if (! thd_is_connected(m_owner->get_thd()))
{
/*
* The client is disconnected. Don't wait forever:
@@ -2199,7 +2328,7 @@ MDL_context::acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout)
set_timespec(abs_shortwait, 1);
}
if (wait_status == MDL_wait::EMPTY)
- wait_status= m_wait.timed_wait(m_thd, &abs_timeout, TRUE,
+ wait_status= m_wait.timed_wait(m_owner, &abs_timeout, TRUE,
mdl_request->key.get_wait_state_name());
done_waiting_for();
@@ -2284,8 +2413,7 @@ bool MDL_context::acquire_locks(MDL_request_list *mdl_requests,
/* Sort requests according to MDL_key. */
if (! (sort_buf= (MDL_request **)my_malloc(req_count *
sizeof(MDL_request*),
- MYF(MY_WME |
- MY_THREAD_SPECIFIC))))
+ MYF(MY_WME))))
DBUG_RETURN(TRUE);
for (p_req= sort_buf; p_req < sort_buf + req_count; p_req++)
@@ -2321,11 +2449,12 @@ err:
/**
- Upgrade a shared metadata lock to exclusive.
+ Upgrade a shared metadata lock.
- Used in ALTER TABLE, when a copy of the table with the
- new definition has been constructed.
+ Used in ALTER TABLE.
+ @param mdl_ticket Lock to upgrade.
+ @param new_type Lock type to upgrade to.
@param lock_wait_timeout Seconds to wait before timeout.
@note In case of failure to upgrade lock (e.g. because upgrader
@@ -2333,7 +2462,7 @@ err:
shared mode).
@note There can be only one upgrader for a lock or we will have deadlock.
- This invariant is ensured by the fact that upgradeable locks SNW
+ This invariant is ensured by the fact that upgradeable locks SU, SNW
and SNRW are not compatible with each other and themselves.
@retval FALSE Success
@@ -2341,28 +2470,31 @@ err:
*/
bool
-MDL_context::upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket,
- ulong lock_wait_timeout)
+MDL_context::upgrade_shared_lock(MDL_ticket *mdl_ticket,
+ enum_mdl_type new_type,
+ ulong lock_wait_timeout)
{
MDL_request mdl_xlock_request;
MDL_savepoint mdl_svp= mdl_savepoint();
bool is_new_ticket;
-
- DBUG_ENTER("MDL_ticket::upgrade_shared_lock_to_exclusive");
- DEBUG_SYNC(get_thd(), "mdl_upgrade_shared_lock_to_exclusive");
+ DBUG_ENTER("MDL_context::upgrade_shared_lock");
+ DBUG_PRINT("enter",("new_type: %d lock_wait_timeout: %lu", new_type,
+ lock_wait_timeout));
+ DEBUG_SYNC(get_thd(), "mdl_upgrade_lock");
/*
Do nothing if already upgraded. Used when we FLUSH TABLE under
LOCK TABLES and a table is listed twice in LOCK TABLES list.
*/
- if (mdl_ticket->m_type == MDL_EXCLUSIVE)
+ if (mdl_ticket->has_stronger_or_equal_type(new_type))
DBUG_RETURN(FALSE);
- /* Only allow upgrades from MDL_SHARED_NO_WRITE/NO_READ_WRITE */
- DBUG_ASSERT(mdl_ticket->m_type == MDL_SHARED_NO_WRITE ||
+ /* Only allow upgrades from SHARED_UPGRADABLE/NO_WRITE/NO_READ_WRITE */
+ DBUG_ASSERT(mdl_ticket->m_type == MDL_SHARED_UPGRADABLE ||
+ mdl_ticket->m_type == MDL_SHARED_NO_WRITE ||
mdl_ticket->m_type == MDL_SHARED_NO_READ_WRITE);
- mdl_xlock_request.init(&mdl_ticket->m_lock->key, MDL_EXCLUSIVE,
+ mdl_xlock_request.init(&mdl_ticket->m_lock->key, new_type,
MDL_TRANSACTION);
if (acquire_lock(&mdl_xlock_request, lock_wait_timeout))
@@ -2380,7 +2512,7 @@ MDL_context::upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket,
ticket from the granted queue and then include it back.
*/
mdl_ticket->m_lock->m_granted.remove_ticket(mdl_ticket);
- mdl_ticket->m_type= MDL_EXCLUSIVE;
+ mdl_ticket->m_type= new_type;
mdl_ticket->m_lock->m_granted.add_ticket(mdl_ticket);
mysql_prlock_unlock(&mdl_ticket->m_lock->m_rwlock);
@@ -2654,8 +2786,8 @@ void MDL_context::release_lock(enum_mdl_duration duration, MDL_ticket *ticket)
{
MDL_lock *lock= ticket->m_lock;
DBUG_ENTER("MDL_context::release_lock");
- DBUG_PRINT("enter", ("db=%s name=%s", lock->key.db_name(),
- lock->key.name()));
+ DBUG_PRINT("enter", ("db: '%s' name: '%s'",
+ lock->key.db_name(), lock->key.name()));
DBUG_ASSERT(this == ticket->get_ctx());
mysql_mutex_assert_not_owner(&LOCK_open);
@@ -2744,22 +2876,29 @@ void MDL_context::release_all_locks_for_name(MDL_ticket *name)
/**
- Downgrade an exclusive lock to shared metadata lock.
+ Downgrade an EXCLUSIVE or SHARED_NO_WRITE lock to shared metadata lock.
@param type Type of lock to which exclusive lock should be downgraded.
*/
-void MDL_ticket::downgrade_exclusive_lock(enum_mdl_type type)
+void MDL_ticket::downgrade_lock(enum_mdl_type type)
{
mysql_mutex_assert_not_owner(&LOCK_open);
/*
Do nothing if already downgraded. Used when we FLUSH TABLE under
LOCK TABLES and a table is listed twice in LOCK TABLES list.
+ Note that this code might even try to "downgrade" a weak lock
+ (e.g. SW) to a stronger one (e.g SNRW). So we can't even assert
+ here that target lock is weaker than existing lock.
*/
- if (m_type != MDL_EXCLUSIVE)
+ if (m_type == type || !has_stronger_or_equal_type(type))
return;
+ /* Only allow downgrade from EXCLUSIVE and SHARED_NO_WRITE. */
+ DBUG_ASSERT(m_type == MDL_EXCLUSIVE ||
+ m_type == MDL_SHARED_NO_WRITE);
+
mysql_prlock_wrlock(&m_lock->m_rwlock);
/*
To update state of MDL_lock object correctly we need to temporarily
diff --git a/sql/mdl.h b/sql/mdl.h
index 944c6bb6349..e79df9b6cd7 100644
--- a/sql/mdl.h
+++ b/sql/mdl.h
@@ -1,6 +1,6 @@
#ifndef MDL_H
#define MDL_H
-/* Copyright (c) 2009, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
#if defined(__IBMC__) || defined(__IBMCPP__)
/* Further down, "next_in_lock" and "next_in_context" have the same type,
@@ -26,10 +26,11 @@
#include "sql_plist.h"
#include <my_sys.h>
-#include <my_pthread.h>
#include <m_string.h>
#include <mysql_com.h>
+#include <algorithm>
+
class THD;
class MDL_context;
@@ -55,6 +56,67 @@ class MDL_ticket;
#define EXIT_COND(S) exit_cond(S, __func__, __FILE__, __LINE__)
/**
+ An interface to separate the MDL module from the THD, and the rest of the
+ server code.
+ */
+
+class MDL_context_owner
+{
+public:
+ virtual ~MDL_context_owner() {}
+
+ /**
+ Enter a condition wait.
+ For @c enter_cond() / @c exit_cond() to work the mutex must be held before
+ @c enter_cond(); this mutex is then released by @c exit_cond().
+ Usage must be: lock mutex; enter_cond(); your code; exit_cond().
+ @param cond the condition to wait on
+ @param mutex the associated mutex
+ @param [in] stage the stage to enter, or NULL
+ @param [out] old_stage the previous stage, or NULL
+ @param src_function function name of the caller
+ @param src_file file name of the caller
+ @param src_line line number of the caller
+ @sa ENTER_COND(), THD::enter_cond()
+ @sa EXIT_COND(), THD::exit_cond()
+ */
+ virtual void enter_cond(mysql_cond_t *cond, mysql_mutex_t *mutex,
+ const PSI_stage_info *stage, PSI_stage_info *old_stage,
+ const char *src_function, const char *src_file,
+ int src_line) = 0;
+
+ /**
+ @def EXIT_COND(S)
+ End a wait on a condition
+ @param [in] stage the new stage to enter
+ @param src_function function name of the caller
+ @param src_file file name of the caller
+ @param src_line line number of the caller
+ @sa ENTER_COND(), THD::enter_cond()
+ @sa EXIT_COND(), THD::exit_cond()
+ */
+ virtual void exit_cond(const PSI_stage_info *stage,
+ const char *src_function, const char *src_file,
+ int src_line) = 0;
+ /**
+ Has the owner thread been killed?
+ */
+ virtual int is_killed() = 0;
+
+ /**
+ This one is only used for DEBUG_SYNC.
+ (Do not use it to peek/poke into other parts of THD.)
+ */
+ virtual THD* get_thd() = 0;
+
+ /**
+ @see THD::notify_shared_lock()
+ */
+ virtual bool notify_shared_lock(MDL_context_owner *in_use,
+ bool needs_thr_lock_abort) = 0;
+};
+
+/**
Type of metadata lock request.
@sa Comments for MDL_object_lock::can_grant_lock() and
@@ -132,6 +194,15 @@ enum enum_mdl_type {
*/
MDL_SHARED_WRITE,
/*
+ An upgradable shared metadata lock for cases when there is an intention
+ to modify (and not just read) data in the table.
+ Can be upgraded to MDL_SHARED_NO_WRITE and MDL_EXCLUSIVE.
+ A connection holding SU lock can read table metadata and modify or read
+ table data (after acquiring appropriate table and row-level locks).
+ To be used for the first phase of ALTER TABLE.
+ */
+ MDL_SHARED_UPGRADABLE,
+ /*
An upgradable shared metadata lock which blocks all attempts to update
table data, allowing reads.
A connection holding this kind of lock can read table metadata and read
@@ -270,9 +341,12 @@ public:
are not longer than NAME_LEN. Still we play safe and try to avoid
buffer overruns.
*/
- m_db_name_length= (uint16) (strmake(m_ptr + 1, db, NAME_LEN) - m_ptr - 1);
- m_length= (uint16) (strmake(m_ptr + m_db_name_length + 2, name, NAME_LEN) -
- m_ptr + 1);
+ DBUG_ASSERT(strlen(db) <= NAME_LEN);
+ DBUG_ASSERT(strlen(name) <= NAME_LEN);
+ m_db_name_length= static_cast<uint16>(strmake(m_ptr + 1, db, NAME_LEN) -
+ m_ptr - 1);
+ m_length= static_cast<uint16>(strmake(m_ptr + m_db_name_length + 2, name,
+ NAME_LEN) - m_ptr + 1);
}
void mdl_key_init(const MDL_key *rhs)
{
@@ -295,6 +369,7 @@ public:
character set is utf-8, we can safely assume that no
character starts with a zero byte.
*/
+ using std::min;
return memcmp(m_ptr, rhs->m_ptr, min(m_length, rhs->m_length));
}
@@ -509,14 +584,15 @@ public:
MDL_context *get_ctx() const { return m_ctx; }
bool is_upgradable_or_exclusive() const
{
- return m_type == MDL_SHARED_NO_WRITE ||
+ return m_type == MDL_SHARED_UPGRADABLE ||
+ m_type == MDL_SHARED_NO_WRITE ||
m_type == MDL_SHARED_NO_READ_WRITE ||
m_type == MDL_EXCLUSIVE;
}
enum_mdl_type get_type() const { return m_type; }
MDL_lock *get_lock() const { return m_lock; }
MDL_key *get_key() const;
- void downgrade_exclusive_lock(enum_mdl_type type);
+ void downgrade_lock(enum_mdl_type type);
bool has_stronger_or_equal_type(enum_mdl_type type) const;
@@ -622,7 +698,7 @@ public:
bool set_status(enum_wait_status result_arg);
enum_wait_status get_status();
void reset_status();
- enum_wait_status timed_wait(THD *thd,
+ enum_wait_status timed_wait(MDL_context_owner *owner,
struct timespec *abs_timeout,
bool signal_timeout,
const PSI_stage_info *wait_state_name);
@@ -668,8 +744,9 @@ public:
bool try_acquire_lock(MDL_request *mdl_request);
bool acquire_lock(MDL_request *mdl_request, ulong lock_wait_timeout);
bool acquire_locks(MDL_request_list *requests, ulong lock_wait_timeout);
- bool upgrade_shared_lock_to_exclusive(MDL_ticket *mdl_ticket,
- ulong lock_wait_timeout);
+ bool upgrade_shared_lock(MDL_ticket *mdl_ticket,
+ enum_mdl_type new_type,
+ ulong lock_wait_timeout);
bool clone_ticket(MDL_request *mdl_request);
@@ -704,7 +781,7 @@ public:
void release_transactional_locks();
void rollback_to_savepoint(const MDL_savepoint &mdl_savepoint);
- inline THD *get_thd() const { return m_thd; }
+ MDL_context_owner *get_owner() { return m_owner; }
/** @pre Only valid if we started waiting for lock. */
inline uint get_deadlock_weight() const
@@ -717,7 +794,7 @@ public:
already has received some signal or closed
signal slot.
*/
- void init(THD *thd_arg) { m_thd= thd_arg; }
+ void init(MDL_context_owner *arg) { m_owner= arg; }
void set_needs_thr_lock_abort(bool needs_thr_lock_abort)
{
@@ -797,7 +874,7 @@ private:
involved schemas and global intention exclusive lock.
*/
Ticket_list m_tickets[MDL_DURATION_END];
- THD *m_thd;
+ MDL_context_owner *m_owner;
/**
TRUE - if for this context we will break protocol and try to
acquire table-level locks while having only S lock on
@@ -826,6 +903,7 @@ private:
*/
MDL_wait_for_subgraph *m_waiting_for;
private:
+ THD *get_thd() const { return m_owner->get_thd(); }
MDL_ticket *find_ticket(MDL_request *mdl_req,
enum_mdl_duration *duration);
void release_locks_stored_before(enum_mdl_duration duration, MDL_ticket *sentinel);
@@ -836,6 +914,8 @@ private:
public:
void find_deadlock();
+ ulong get_thread_id() const { return thd_get_thread_id(get_thd()); }
+
bool visit_subgraph(MDL_wait_for_graph_visitor *dvisitor);
/** Inform the deadlock detector there is an edge in the wait-for graph. */
@@ -870,8 +950,6 @@ private:
void mdl_init();
void mdl_destroy();
-extern bool mysql_notify_thread_having_shared_lock(THD *thd, THD *in_use,
- bool needs_thr_lock_abort);
extern "C" unsigned long thd_get_thread_id(const MYSQL_THD thd);
/**
@@ -897,6 +975,14 @@ extern ulong mdl_locks_cache_size;
static const ulong MDL_LOCKS_CACHE_SIZE_DEFAULT = 1024;
/*
+ Start-up parameter for the number of partitions of the hash
+ containing all the MDL_lock objects and a constant for
+ its default value.
+*/
+extern ulong mdl_locks_hash_partitions;
+static const ulong MDL_LOCKS_HASH_PARTITIONS_DEFAULT = 8;
+
+/*
Metadata locking subsystem tries not to grant more than
max_write_lock_count high-prio, strong locks successively,
to avoid starving out weak, low-prio locks.
diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc
index 3aee7936b79..e42ea9ec452 100644
--- a/sql/multi_range_read.cc
+++ b/sql/multi_range_read.cc
@@ -550,7 +550,7 @@ int Mrr_ordered_index_reader::init(handler *h_arg, RANGE_SEQ_IF *seq_funcs,
KEY *key_info= &file->get_table()->key_info[file->active_index];
keypar.index_ranges_unique= test(key_info->flags & HA_NOSAME &&
- key_info->key_parts ==
+ key_info->user_defined_key_parts ==
my_count_bits(keypar.key_tuple_map));
mrr_iter= seq_funcs->init(seq_init_param, n_ranges, mode);
@@ -1497,7 +1497,7 @@ ha_rows DsMrr_impl::dsmrr_info_const(uint keyno, RANGE_SEQ_IF *seq,
bool key_uses_partial_cols(TABLE *table, uint keyno)
{
KEY_PART_INFO *kp= table->key_info[keyno].key_part;
- KEY_PART_INFO *kp_end= kp + table->key_info[keyno].key_parts;
+ KEY_PART_INFO *kp_end= kp + table->key_info[keyno].user_defined_key_parts;
for (; kp != kp_end; kp++)
{
if (!kp->field->part_of_key.is_set(keyno))
@@ -1648,7 +1648,7 @@ int DsMrr_impl::dsmrr_explain_info(uint mrr_mode, char *str, size_t size)
used_str= rowid_ordered;
uint used_str_len= strlen(used_str);
- uint copy_len= min(used_str_len, size);
+ uint copy_len= MY_MIN(used_str_len, size);
memcpy(str, used_str, copy_len);
return copy_len;
}
@@ -1709,7 +1709,7 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
else
{
cost->reset();
- *buffer_size= max(*buffer_size,
+ *buffer_size= MY_MAX(*buffer_size,
(size_t)(1.2*rows_in_last_step) * elem_size +
primary_file->ref_length + table->key_info[keynr].key_length);
}
diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc
index 21611afd87b..c11bf671cb1 100644
--- a/sql/my_decimal.cc
+++ b/sql/my_decimal.cc
@@ -45,21 +45,21 @@ int decimal_operation_results(int result, const char *value, const char *type)
case E_DEC_OK:
break;
case E_DEC_TRUNCATED:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_DATA_TRUNCATED, ER(ER_DATA_TRUNCATED),
value, type);
break;
case E_DEC_OVERFLOW:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_DATA_OVERFLOW, ER(ER_DATA_OVERFLOW),
value, type);
break;
case E_DEC_DIV_ZERO:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_DIVISION_BY_ZERO, ER(ER_DIVISION_BY_ZERO));
break;
case E_DEC_BAD_NUM:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_DATA, ER(ER_BAD_DATA),
value, type);
break;
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 2cf0dddd1aa..2575ebed209 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -614,6 +614,19 @@ const char *in_left_expr_name= "<left expr>";
const char *in_additional_cond= "<IN COND>";
const char *in_having_cond= "<IN HAVING>";
+/** Number of connection errors when selecting on the listening port */
+ulong connection_errors_select= 0;
+/** Number of connection errors when accepting sockets in the listening port. */
+ulong connection_errors_accept= 0;
+/** Number of connection errors from TCP wrappers. */
+ulong connection_errors_tcpwrap= 0;
+/** Number of connection errors from internal server errors. */
+ulong connection_errors_internal= 0;
+/** Number of connection errors from the server max_connection limit. */
+ulong connection_errors_max_connection= 0;
+/** Number of errors when reading the peer address. */
+ulong connection_errors_peer_addr= 0;
+
/* classes for comparation parsing/processing */
Eq_creator eq_creator;
Ne_creator ne_creator;
@@ -811,8 +824,6 @@ static struct my_option pfs_early_options[] __attribute__((unused)) =
GET_BOOL, OPT_ARG, TRUE, 0, 0, 0, 0, 0}
};
-
-
#ifdef HAVE_PSI_INTERFACE
#ifdef HAVE_MMAP
PSI_mutex_key key_PAGE_lock, key_LOCK_sync, key_LOCK_active, key_LOCK_pool,
@@ -854,6 +865,7 @@ PSI_mutex_key key_LOCK_stats,
PSI_mutex_key key_LOCK_rpl_gtid_state;
PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered;
+PSI_mutex_key key_TABLE_SHARE_LOCK_share;
static PSI_mutex_info all_server_mutexes[]=
{
@@ -911,6 +923,7 @@ static PSI_mutex_info all_server_mutexes[]=
{ &key_relay_log_info_sleep_lock, "Relay_log_info::sleep_lock", 0},
{ &key_structure_guard_mutex, "Query_cache::structure_guard_mutex", 0},
{ &key_TABLE_SHARE_LOCK_ha_data, "TABLE_SHARE::LOCK_ha_data", 0},
+ { &key_TABLE_SHARE_LOCK_share, "TABLE_SHARE::LOCK_share", 0},
{ &key_LOCK_error_messages, "LOCK_error_messages", PSI_FLAG_GLOBAL},
{ &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL},
{ &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL},
@@ -1102,7 +1115,8 @@ void net_after_header_psi(struct st_net *net, void *user_data, size_t /* unused:
{
thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state,
stmt_info_new_packet.m_key,
- thd->db, thd->db_length);
+ thd->db, thd->db_length,
+ thd->charset());
THD_STAGE_INFO(thd, stage_init);
}
@@ -1134,12 +1148,6 @@ void init_net_server_extension(THD *thd)
}
#endif /* EMBEDDED_LIBRARY */
-/*
- Since buffered_option_error_reporter is only used currently
- for parsing performance schema options, this code is not needed
- when the performance schema is not compiled in.
-*/
-#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
/**
A log message for the error log, buffered in memory.
Log messages are temporarily buffered when generated before the error log
@@ -1275,13 +1283,16 @@ void Buffered_logs::print()
/** Logs reported before a logger is available. */
static Buffered_logs buffered_logs;
+static MYSQL_SOCKET unix_sock, base_ip_sock, extra_ip_sock;
+struct my_rnd_struct sql_rand; ///< used by sql_class.cc:THD::THD()
+
#ifndef EMBEDDED_LIBRARY
/**
Error reporter that buffer log messages.
@param level log message level
@param format log message format string
*/
-C_MODE_START
+
static void buffered_option_error_reporter(enum loglevel level,
const char *format, ...)
{
@@ -1293,14 +1304,7 @@ static void buffered_option_error_reporter(enum loglevel level,
va_end(args);
buffered_logs.buffer(level, buffer);
}
-C_MODE_END
-#endif /* !EMBEDDED_LIBRARY */
-#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
-static MYSQL_SOCKET unix_sock, base_ip_sock, extra_ip_sock;
-struct my_rnd_struct sql_rand; ///< used by sql_class.cc:THD::THD()
-
-#ifndef EMBEDDED_LIBRARY
struct passwd *user_info;
static pthread_t select_thread;
#endif
@@ -2710,7 +2714,7 @@ static bool cache_thread()
Delete the instrumentation for the job that just completed,
before parking this pthread in the cache (blocked on COND_thread_cache).
*/
- PSI_CALL(delete_current_thread)();
+ PSI_THREAD_CALL(delete_current_thread)();
#endif
while (!abort_loop && ! wake_thread && ! kill_cached_threads)
@@ -2733,9 +2737,9 @@ static bool cache_thread()
Create new instrumentation for the new THD job,
and attach it to this running pthread.
*/
- PSI_thread *psi= PSI_CALL(new_thread)(key_thread_one_connection,
- thd, thd->thread_id);
- PSI_CALL(set_thread)(psi);
+ PSI_thread *psi= PSI_THREAD_CALL(new_thread)(key_thread_one_connection,
+ thd, thd->thread_id);
+ PSI_THREAD_CALL(set_thread)(psi);
#endif
/*
@@ -3264,7 +3268,7 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused)))
abort_loop=1; // mark abort for threads
#ifdef HAVE_PSI_THREAD_INTERFACE
/* Delete the instrumentation for the signal thread */
- PSI_CALL(delete_current_thread)();
+ PSI_THREAD_CALL(delete_current_thread)();
#endif
#ifdef USE_ONE_SIGNAL_HAND
pthread_t tmp;
@@ -3334,7 +3338,7 @@ extern "C" void my_message_sql(uint error, const char *str, myf MyFlags);
void my_message_sql(uint error, const char *str, myf MyFlags)
{
THD *thd= current_thd;
- MYSQL_ERROR::enum_warning_level level;
+ Sql_condition::enum_warning_level level;
sql_print_message_func func;
DBUG_ENTER("my_message_sql");
DBUG_PRINT("error", ("error: %u message: '%s' Flag: %lu", error, str,
@@ -3346,17 +3350,17 @@ void my_message_sql(uint error, const char *str, myf MyFlags)
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_ERROR, error, str);
if (MyFlags & ME_JUST_INFO)
{
- level= MYSQL_ERROR::WARN_LEVEL_NOTE;
+ level= Sql_condition::WARN_LEVEL_NOTE;
func= sql_print_information;
}
else if (MyFlags & ME_JUST_WARNING)
{
- level= MYSQL_ERROR::WARN_LEVEL_WARN;
+ level= Sql_condition::WARN_LEVEL_WARN;
func= sql_print_warning;
}
else
{
- level= MYSQL_ERROR::WARN_LEVEL_ERROR;
+ level= Sql_condition::WARN_LEVEL_ERROR;
func= sql_print_error;
}
@@ -3520,6 +3524,7 @@ SHOW_VAR com_status_vars[]= {
{"empty_query", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EMPTY_QUERY]), SHOW_LONG_STATUS},
{"execute_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS},
{"flush", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS},
+ {"get_diagnostics", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GET_DIAGNOSTICS]), SHOW_LONG_STATUS},
{"grant", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS},
{"ha_close", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS},
{"ha_open", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS},
@@ -3725,13 +3730,17 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
However, this should never happen, so better to assert and
fix this.
*/
+#ifdef ENABLE_BEFORE_END_OF_MERGE_QQ
DBUG_ASSERT(thd);
+#endif
if (thd)
{
DBUG_PRINT("info", ("memory_used: %lld size: %lld",
(longlong) thd->status_var.memory_used, size));
thd->status_var.memory_used+= size;
+#ifdef ENABLE_BEFORE_END_OF_MERGE_QQ
DBUG_ASSERT((longlong) thd->status_var.memory_used >= 0);
+#endif
}
}
}
@@ -3745,6 +3754,12 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific)
static int init_common_variables()
{
umask(((~my_umask) & 0666));
+ connection_errors_select= 0;
+ connection_errors_accept= 0;
+ connection_errors_tcpwrap= 0;
+ connection_errors_internal= 0;
+ connection_errors_max_connection= 0;
+ connection_errors_peer_addr= 0;
my_decimal_set_zero(&decimal_zero); // set decimal_zero constant;
if (pthread_key_create(&THR_THD,NULL) ||
@@ -4008,7 +4023,7 @@ static int init_common_variables()
can't get max_connections*5 but still got no less than was
requested (value of wanted_files).
*/
- max_open_files= max(max(wanted_files,
+ max_open_files= MY_MAX(MY_MAX(wanted_files,
(max_connections + extra_max_connections)*5),
open_files_limit);
files= my_set_max_open_files(max_open_files);
@@ -4021,15 +4036,15 @@ static int init_common_variables()
If we have requested too much file handles than we bring
max_connections in supported bounds.
*/
- max_connections= (ulong) min(files-10-TABLE_OPEN_CACHE_MIN*2,
+ max_connections= (ulong) MY_MIN(files-10-TABLE_OPEN_CACHE_MIN*2,
max_connections);
/*
Decrease table_cache_size according to max_connections, but
- not below TABLE_OPEN_CACHE_MIN. Outer min() ensures that we
+ not below TABLE_OPEN_CACHE_MIN. Outer MY_MIN() ensures that we
never increase table_cache_size automatically (that could
happen if max_connections is decreased above).
*/
- table_cache_size= (ulong) min(max((files-10-max_connections)/2,
+ table_cache_size= (ulong) MY_MIN(MY_MAX((files-10-max_connections)/2,
TABLE_OPEN_CACHE_MIN),
table_cache_size);
DBUG_PRINT("warning",
@@ -5023,7 +5038,6 @@ int mysqld_main(int argc, char **argv)
sys_var_init();
-#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
/*
The performance schema needs to be initialized as early as possible,
before to-be-instrumented objects of the server are initialized.
@@ -5051,22 +5065,30 @@ int mysqld_main(int argc, char **argv)
my_charset_error_reporter= buffered_option_error_reporter;
pfs_param.m_pfs_instrument= const_cast<char*>("");
+#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
/*
Initialize the array of performance schema instrument configurations.
*/
init_pfs_instrument_array();
+#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
ho_error= handle_options(&remaining_argc, &remaining_argv,
(my_option*)(all_early_options.buffer),
mysqld_get_one_option);
delete_dynamic(&all_early_options);
+#ifdef WITH_PERFSCHEMA_STORAGE_ENGINE
if (ho_error == 0)
{
/* Add back the program name handle_options removes */
remaining_argc++;
remaining_argv--;
- if (pfs_param.m_enabled)
+ if (pfs_param.m_enabled && !opt_help && !opt_bootstrap)
{
+ /* Add sizing hints from the server sizing parameters. */
+ pfs_param.m_hints.m_table_definition_cache= table_def_size;
+ pfs_param.m_hints.m_table_open_cache= table_cache_size;
+ pfs_param.m_hints.m_max_connections= max_connections;
+ pfs_param.m_hints.m_open_files_limit= open_files_limit;
PSI_hook= initialize_performance_schema(&pfs_param);
if (PSI_hook == NULL)
{
@@ -5107,8 +5129,8 @@ int mysqld_main(int argc, char **argv)
*/
init_server_psi_keys();
/* Instrument the main thread */
- PSI_thread *psi= PSI_CALL(new_thread)(key_thread_main, NULL, 0);
- PSI_CALL(set_thread)(psi);
+ PSI_thread *psi= PSI_THREAD_CALL(new_thread)(key_thread_main, NULL, 0);
+ PSI_THREAD_CALL(set_thread)(psi);
/*
Now that some instrumentation is in place,
@@ -5387,7 +5409,7 @@ int mysqld_main(int argc, char **argv)
Disable the main thread instrumentation,
to avoid recording events during the shutdown.
*/
- PSI_CALL(delete_current_thread)();
+ PSI_THREAD_CALL(delete_current_thread)();
#endif
/* Wait until cleanup is done */
@@ -5769,6 +5791,7 @@ void create_thread_to_handle_connection(THD *thd)
mysql_mutex_unlock(&LOCK_connection_count);
statistic_increment(aborted_connects,&LOCK_status);
+ statistic_increment(connection_errors_internal, &LOCK_status);
/* Can't use my_error() since store_globals has not been called. */
my_snprintf(error_message_buff, sizeof(error_message_buff),
ER_THD(thd, ER_CANT_CREATE_THREAD), error);
@@ -5822,6 +5845,7 @@ static void create_new_thread(THD *thd)
close_connection(thd, ER_CON_COUNT_ERROR);
statistic_increment(denied_connections, &LOCK_status);
delete thd;
+ statistic_increment(connection_errors_max_connection, &LOCK_status);
DBUG_VOID_RETURN;
}
@@ -5934,6 +5958,12 @@ void handle_connections_sockets()
{
if (socket_errno != SOCKET_EINTR)
{
+ /*
+ select(2)/poll(2) failed on the listening port.
+ There is not much details to report about the client,
+ increment the server global status variable.
+ */
+ statistic_increment(connection_errors_accept, &LOCK_status);
if (!select_errors++ && !abort_loop) /* purecov: inspected */
sql_print_error("mysqld: Got error %d from select",socket_errno); /* purecov: inspected */
}
@@ -6014,6 +6044,12 @@ void handle_connections_sockets()
#endif
if (mysql_socket_getfd(new_sock) == INVALID_SOCKET)
{
+ /*
+ accept(2) failed on the listening port, after many retries.
+ There is not much details to report about the client,
+ increment the server global status variable.
+ */
+ statistic_increment(connection_errors_accept, &LOCK_status);
if ((error_count++ & 255) == 0) // This can happen often
sql_perror("Error in accept");
MAYBE_BROKEN_SYSCALL;
@@ -6053,6 +6089,11 @@ void handle_connections_sockets()
(void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
(void) mysql_socket_close(new_sock);
+ /*
+ The connection was refused by TCP wrappers.
+ There are no details (by client IP) available to update the host_cache.
+ */
+ statistic_increment(connection_tcpwrap_errors, &LOCK_status);
continue;
}
}
@@ -6068,6 +6109,7 @@ void handle_connections_sockets()
{
(void) mysql_socket_shutdown(new_sock, SHUT_RDWR);
(void) mysql_socket_close(new_sock);
+ statistic_increment(connection_errors_internal, &LOCK_status);
continue;
}
/* Set to get io buffers to be part of THD */
@@ -6096,6 +6138,7 @@ void handle_connections_sockets()
}
delete thd;
set_current_thd(0);
+ statistic_increment(connection_errors_internal, &LOCK_status);
continue;
}
@@ -6960,7 +7003,7 @@ static int show_slave_running(THD *thd, SHOW_VAR *var, char *buff)
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_NOTE);
+ Sql_condition::WARN_LEVEL_NOTE);
if (mi)
tmp= (my_bool) (mi->slave_running == MYSQL_SLAVE_RUN_CONNECT &&
mi->rli.slave_running);
@@ -6986,7 +7029,7 @@ static int show_slave_received_heartbeats(THD *thd, SHOW_VAR *var, char *buff)
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_NOTE);
+ Sql_condition::WARN_LEVEL_NOTE);
if (mi)
tmp= mi->received_heartbeats;
mysql_mutex_unlock(&LOCK_active_mi);
@@ -7011,7 +7054,7 @@ static int show_heartbeat_period(THD *thd, SHOW_VAR *var, char *buff)
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_NOTE);
+ Sql_condition::WARN_LEVEL_NOTE);
if (mi)
tmp= mi->heartbeat_period;
mysql_mutex_unlock(&LOCK_active_mi);
@@ -7483,6 +7526,12 @@ SHOW_VAR status_vars[]= {
{"Com", (char*) com_status_vars, SHOW_ARRAY},
{"Compression", (char*) &show_net_compression, SHOW_SIMPLE_FUNC},
{"Connections", (char*) &thread_id, SHOW_LONG_NOFLUSH},
+ {"Connection_errors_accept", (char*) &connection_errors_accept, SHOW_LONG},
+ {"Connection_errors_internal", (char*) &connection_errors_internal, SHOW_LONG},
+ {"Connection_errors_max_connections", (char*) &connection_errors_max_connection, SHOW_LONG},
+ {"Connection_errors_peer_address", (char*) &connection_errors_peer_addr, SHOW_LONG},
+ {"Connection_errors_select", (char*) &connection_errors_select, SHOW_LONG},
+ {"Connection_errors_tcpwrap", (char*) &connection_errors_tcpwrap, SHOW_LONG},
{"Cpu_time", (char*) offsetof(STATUS_VAR, cpu_time), SHOW_DOUBLE_STATUS},
{"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, created_tmp_disk_tables_), SHOW_LONG_STATUS},
{"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG},
@@ -9030,6 +9079,9 @@ static PSI_file_info all_server_files[]=
PSI_stage_info stage_after_create= { 0, "After create", 0};
PSI_stage_info stage_allocating_local_table= { 0, "allocating local table", 0};
+PSI_stage_info stage_alter_inplace_prepare= { 0, "preparing for alter table", 0};
+PSI_stage_info stage_alter_inplace= { 0, "altering table", 0};
+PSI_stage_info stage_alter_inplace_commit= { 0, "committing alter table to storage engine", 0};
PSI_stage_info stage_changing_master= { 0, "Changing master", 0};
PSI_stage_info stage_checking_master_version= { 0, "Checking master version", 0};
PSI_stage_info stage_checking_permissions= { 0, "checking permissions", 0};
diff --git a/sql/mysqld.h b/sql/mysqld.h
index d82bd79d935..2cf63d093ad 100644
--- a/sql/mysqld.h
+++ b/sql/mysqld.h
@@ -24,6 +24,7 @@
#include "mysql/psi/mysql_file.h" /* MYSQL_FILE */
#include "sql_list.h" /* I_List */
#include "sql_cmd.h"
+#include <my_rnd.h>
class THD;
struct handlerton;
@@ -213,6 +214,12 @@ extern int bootstrap_error;
extern I_List<THD> threads;
extern char err_shared_dir[];
extern TYPELIB thread_handling_typelib;
+extern ulong connection_errors_select;
+extern ulong connection_errors_accept;
+extern ulong connection_errors_tcpwrap;
+extern ulong connection_errors_internal;
+extern ulong connection_errors_max_connection;
+extern ulong connection_errors_peer_addr;
extern ulong log_warnings;
/*
@@ -252,7 +259,7 @@ extern PSI_mutex_key key_BINLOG_LOCK_index, key_BINLOG_LOCK_xid_list,
extern PSI_mutex_key key_RELAYLOG_LOCK_index;
extern PSI_mutex_key key_LOCK_slave_state, key_LOCK_binlog_state;
-extern PSI_mutex_key key_LOCK_stats,
+extern PSI_mutex_key key_TABLE_SHARE_LOCK_share, key_LOCK_stats,
key_LOCK_global_user_client_stats, key_LOCK_global_table_stats,
key_LOCK_global_index_stats, key_LOCK_wakeup_ready;
@@ -310,6 +317,9 @@ void init_server_psi_keys();
*/
extern PSI_stage_info stage_after_create;
extern PSI_stage_info stage_allocating_local_table;
+extern PSI_stage_info stage_alter_inplace_prepare;
+extern PSI_stage_info stage_alter_inplace;
+extern PSI_stage_info stage_alter_inplace_commit;
extern PSI_stage_info stage_changing_master;
extern PSI_stage_info stage_checking_master_version;
extern PSI_stage_info stage_checking_permissions;
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index 93ca14337f5..fcb08bfbfc9 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -792,7 +792,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed,
{
while (remain > 0)
{
- size_t length= min(remain, net->max_packet);
+ size_t length= MY_MIN(remain, net->max_packet);
if (net_safe_read(net, net->buff, length, alarmed))
DBUG_RETURN(1);
update_statistics(thd_increment_bytes_received(length));
@@ -989,7 +989,7 @@ my_real_read(NET *net, size_t *complen)
len=uint3korr(net->buff+net->where_b);
if (!len) /* End of big multi-packet */
goto end;
- helping = max(len,*complen) + net->where_b;
+ helping = MY_MAX(len,*complen) + net->where_b;
/* The necessary size of net->buff */
if (helping >= net->max_packet)
{
diff --git a/sql/opt_index_cond_pushdown.cc b/sql/opt_index_cond_pushdown.cc
index df9dae8e442..fb55aea1968 100644
--- a/sql/opt_index_cond_pushdown.cc
+++ b/sql/opt_index_cond_pushdown.cc
@@ -117,7 +117,7 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno,
return FALSE;
KEY *key_info= tbl->key_info + keyno;
KEY_PART_INFO *key_part= key_info->key_part;
- KEY_PART_INFO *key_part_end= key_part + key_info->key_parts;
+ KEY_PART_INFO *key_part_end= key_part + key_info->user_defined_key_parts;
for ( ; key_part < key_part_end; key_part++)
{
if (field->eq(key_part->field))
@@ -129,7 +129,7 @@ bool uses_index_fields_only(Item *item, TABLE *tbl, uint keyno,
{
key_info= tbl->key_info + tbl->s->primary_key;
key_part= key_info->key_part;
- key_part_end= key_part + key_info->key_parts;
+ key_part_end= key_part + key_info->user_defined_key_parts;
for ( ; key_part < key_part_end; key_part++)
{
/*
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index 75142e87f98..d9838543b58 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -112,7 +112,7 @@
#include "key.h" // is_key_used, key_copy, key_cmp, key_restore
#include "sql_parse.h" // check_stack_overrun
#include "sql_partition.h" // get_part_id_func, PARTITION_ITERATOR,
- // struct partition_info
+ // struct partition_info, NOT_A_PARTITION_ID
#include "sql_base.h" // free_io_cache
#include "records.h" // init_read_record, end_read_record
#include <m_ctype.h>
@@ -2851,7 +2851,7 @@ static int fill_used_fields_bitmap(PARAM *param)
/* The table uses clustered PK and it is not internally generated */
KEY_PART_INFO *key_part= param->table->key_info[pk].key_part;
KEY_PART_INFO *key_part_end= key_part +
- param->table->key_info[pk].key_parts;
+ param->table->key_info[pk].user_defined_key_parts;
for (;key_part != key_part_end; ++key_part)
bitmap_clear_bit(&param->needed_fields, key_part->fieldnr-1);
}
@@ -3081,7 +3081,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
group_trp= get_best_group_min_max(&param, tree, best_read_time);
if (group_trp)
{
- param.table->quick_condition_rows= min(group_trp->records,
+ param.table->quick_condition_rows= MY_MIN(group_trp->records,
head->stat_records());
if (group_trp->read_cost < best_read_time)
{
@@ -3529,7 +3529,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item *cond)
/* Calculate selectivity of probably highly selective predicates */
ulong check_rows=
- min(thd->variables.optimizer_selectivity_sampling_limit,
+ MY_MIN(thd->variables.optimizer_selectivity_sampling_limit,
(ulong) (table_records * SELECTIVITY_SAMPLING_SHARE));
if (cond && check_rows > SELECTIVITY_SAMPLING_THRESHOLD &&
thd->variables.optimizer_use_condition_selectivity > 4)
@@ -3814,29 +3814,26 @@ static void dbug_print_singlepoint_range(SEL_ARG **start, uint num);
#endif
-/*
+/**
Perform partition pruning for a given table and condition.
- SYNOPSIS
- prune_partitions()
- thd Thread handle
- table Table to perform partition pruning for
- pprune_cond Condition to use for partition pruning
+ @param thd Thread handle
+ @param table Table to perform partition pruning for
+ @param pprune_cond Condition to use for partition pruning
- DESCRIPTION
- This function assumes that all partitions are marked as unused when it
- is invoked. The function analyzes the condition, finds partitions that
- need to be used to retrieve the records that match the condition, and
- marks them as used by setting appropriate bit in part_info->used_partitions
- In the worst case all partitions are marked as used.
-
- NOTE
- This function returns promptly if called for non-partitioned table.
-
- RETURN
- TRUE We've inferred that no partitions need to be used (i.e. no table
- records will satisfy pprune_cond)
- FALSE Otherwise
+ @note This function assumes that lock_partitions are setup when it
+ is invoked. The function analyzes the condition, finds partitions that
+ need to be used to retrieve the records that match the condition, and
+ marks them as used by setting appropriate bit in part_info->read_partitions
+ In the worst case all partitions are marked as used. If the table is not
+ yet locked, it will also unset bits in part_info->lock_partitions that is
+ not set in read_partitions.
+
+ This function returns promptly if called for non-partitioned table.
+
+ @return Operation status
+ @retval true Failure
+ @retval false Success
*/
bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
@@ -3889,7 +3886,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
thd->no_errors=1; // Don't warn about NULL
thd->mem_root=&alloc;
- bitmap_clear_all(&part_info->used_partitions);
+ bitmap_clear_all(&part_info->read_partitions);
prune_param.key= prune_param.range_param.key_parts;
SEL_TREE *tree;
@@ -3973,6 +3970,30 @@ end:
thd->mem_root= range_par->old_root;
free_root(&alloc,MYF(0)); // Return memory & allocator
DBUG_RETURN(retval);
+ /*
+ Must be a subset of the locked partitions.
+ lock_partitions contains the partitions marked by explicit partition
+ selection (... t PARTITION (pX) ...) and we must only use partitions
+ within that set.
+ */
+ bitmap_intersect(&prune_param.part_info->read_partitions,
+ &prune_param.part_info->lock_partitions);
+ /*
+ If not yet locked, also prune partitions to lock if not UPDATEing
+ partition key fields. This will also prune lock_partitions if we are under
+ LOCK TABLES, so prune away calls to start_stmt().
+ TODO: enhance this prune locking to also allow pruning of
+ 'UPDATE t SET part_key = const WHERE cond_is_prunable' so it adds
+ a lock for part_key partition.
+ */
+ if (!thd->lex->is_query_tables_locked() &&
+ !partition_key_modified(table, table->write_set))
+ {
+ bitmap_copy(&prune_param.part_info->lock_partitions,
+ &prune_param.part_info->read_partitions);
+ }
+ if (bitmap_is_clear_all(&(prune_param.part_info->read_partitions)))
+ table->all_partitions_pruned_away= true;
}
@@ -4009,7 +4030,7 @@ static void mark_full_partition_used_no_parts(partition_info* part_info,
{
DBUG_ENTER("mark_full_partition_used_no_parts");
DBUG_PRINT("enter", ("Mark partition %u as used", part_id));
- bitmap_set_bit(&part_info->used_partitions, part_id);
+ bitmap_set_bit(&part_info->read_partitions, part_id);
DBUG_VOID_RETURN;
}
@@ -4025,7 +4046,7 @@ static void mark_full_partition_used_with_parts(partition_info *part_info,
for (; start != end; start++)
{
DBUG_PRINT("info", ("1:Mark subpartition %u as used", start));
- bitmap_set_bit(&part_info->used_partitions, start);
+ bitmap_set_bit(&part_info->read_partitions, start);
}
DBUG_VOID_RETURN;
}
@@ -4053,7 +4074,7 @@ static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
MY_BITMAP all_merges;
uint bitmap_bytes;
my_bitmap_map *bitmap_buf;
- uint n_bits= ppar->part_info->used_partitions.n_bits;
+ uint n_bits= ppar->part_info->read_partitions.n_bits;
bitmap_bytes= bitmap_buffer_size(n_bits);
if (!(bitmap_buf= (my_bitmap_map*) alloc_root(ppar->range_param.mem_root,
bitmap_bytes)))
@@ -4079,14 +4100,15 @@ static int find_used_partitions_imerge_list(PART_PRUNE_PARAM *ppar,
}
if (res != -1)
- bitmap_intersect(&all_merges, &ppar->part_info->used_partitions);
+ bitmap_intersect(&all_merges, &ppar->part_info->read_partitions);
+
if (bitmap_is_clear_all(&all_merges))
return 0;
- bitmap_clear_all(&ppar->part_info->used_partitions);
+ bitmap_clear_all(&ppar->part_info->read_partitions);
}
- memcpy(ppar->part_info->used_partitions.bitmap, all_merges.bitmap,
+ memcpy(ppar->part_info->read_partitions.bitmap, all_merges.bitmap,
bitmap_bytes);
return 1;
}
@@ -4446,7 +4468,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
{
for (uint i= 0; i < ppar->part_info->num_subparts; i++)
if (bitmap_is_set(&ppar->subparts_bitmap, i))
- bitmap_set_bit(&ppar->part_info->used_partitions,
+ bitmap_set_bit(&ppar->part_info->read_partitions,
part_id * ppar->part_info->num_subparts + i);
}
goto pop_and_go_right;
@@ -4508,7 +4530,7 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree)
while ((part_id= ppar->part_iter.get_next(&ppar->part_iter)) !=
NOT_A_PARTITION_ID)
{
- bitmap_set_bit(&part_info->used_partitions,
+ bitmap_set_bit(&part_info->read_partitions,
part_id * part_info->num_subparts + subpart_id);
}
res= 1; /* Some partitions were marked as used */
@@ -4594,7 +4616,8 @@ pop_and_go_right:
static void mark_all_partitions_as_used(partition_info *part_info)
{
- bitmap_set_all(&part_info->used_partitions);
+ bitmap_copy(&(part_info->read_partitions),
+ &(part_info->lock_partitions));
}
@@ -5147,7 +5170,7 @@ TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge,
{
imerge_trp->read_cost= imerge_cost;
imerge_trp->records= non_cpk_scan_records + cpk_scan_records;
- imerge_trp->records= min(imerge_trp->records,
+ imerge_trp->records= MY_MIN(imerge_trp->records,
param->table->stat_records());
imerge_trp->range_scans= range_scans;
imerge_trp->range_scans_end= range_scans + n_child_scans;
@@ -5737,7 +5760,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
this number by #r.
If we do not make any assumptions then we can only state that
- #r<=min(#r1,#r2).
+ #r<=MY_MIN(#r1,#r2).
With this estimate we can't say that the index intersection scan will be
cheaper than the cheapest index scan.
@@ -5770,7 +5793,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
#rt2_0 of the same range for sub-index idx2_0(dept) of the index idx2.
The current code does not make an estimate either for #rt1_0, or for #rt2_0,
but it can be adjusted to provide those numbers.
- Alternatively, min(rec_per_key) for (dept) could be used to get an upper
+ Alternatively, MY_MIN(rec_per_key) for (dept) could be used to get an upper
bound for the value of sel(Rt1&Rt2). Yet this statistics is not provided
now.
@@ -5781,7 +5804,7 @@ bool prepare_search_best_index_intersect(PARAM *param,
sel(Rt1&Rt2)=sel(dept=5)*sel(last_name='Sm5')*sel(first_name='Robert')
=sel(Rt2)*sel(dept=5)
- Here max(rec_per_key) for (dept) could be used to get an upper bound for
+ Here MY_MAX(rec_per_key) for (dept) could be used to get an upper bound for
the value of sel(Rt1&Rt2).
When the intersected indexes have different major columns, but some
@@ -5834,9 +5857,9 @@ bool prepare_search_best_index_intersect(PARAM *param,
f_1 = rec_per_key[first_name]/rec_per_key[last_name].
The the number of records in the range tree:
Rt_0: (first_name='Robert' OR first_name='Bob')
- for the sub-index (first_name) is not greater than max(#r*f_1, #t).
+ for the sub-index (first_name) is not greater than MY_MAX(#r*f_1, #t).
Strictly speaking, we can state only that it's not greater than
- max(#r*max_f_1, #t), where
+ MY_MAX(#r*max_f_1, #t), where
max_f_1= max_rec_per_key[first_name]/min_rec_per_key[last_name].
Yet, if #r/#t is big enough (and this is the case of an index intersection,
because using this index range with a single index scan is cheaper than
@@ -6292,7 +6315,7 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part;
KEY_PART_INFO *key_part_end= key_part +
- param->table->key_info[keynr].key_parts;
+ param->table->key_info[keynr].user_defined_key_parts;
for (;key_part != key_part_end; ++key_part)
{
if (bitmap_is_set(&param->needed_fields, key_part->fieldnr-1))
@@ -6965,7 +6988,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param,
for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan)
(*scan)->key_components=
- param->table->key_info[(*scan)->keynr].key_parts;
+ param->table->key_info[(*scan)->keynr].user_defined_key_parts;
/*
Run covering-ROR-search algorithm.
@@ -9073,7 +9096,7 @@ and_all_keys(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2,
if (!key1)
return &null_element; // Impossible ranges
key1->use_count++;
- key1->max_part_no= max(key2->max_part_no, key2->part+1);
+ key1->max_part_no= MY_MAX(key2->max_part_no, key2->part+1);
return key1;
}
@@ -9166,7 +9189,7 @@ key_and(RANGE_OPT_PARAM *param, SEL_ARG *key1, SEL_ARG *key2, uint clone_flag)
key1->use_count--;
key2->use_count--;
SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0;
- uint max_part_no= max(key1->max_part_no, key2->max_part_no);
+ uint max_part_no= MY_MAX(key1->max_part_no, key2->max_part_no);
while (e1 && e2)
{
@@ -9364,7 +9387,7 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
b: [----
*/
- uint max_part_no= max(key1->max_part_no, key2->max_part_no);
+ uint max_part_no= MY_MAX(key1->max_part_no, key2->max_part_no);
for (key2=key2->first(); key2; )
{
@@ -9574,11 +9597,11 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
are merged into one range by deleting first...last-1 from
the key1 tree. In the figure, this applies to first and the
two consecutive ranges. The range of last is then extended:
- * last.min: Set to min(key2.min, first.min)
+ * last.min: Set to MY_MIN(key2.min, first.min)
* last.max: If there is a last->next that overlaps key2 (i.e.,
last->next has a different next_key_part):
Set adjacent to last->next.min
- Otherwise: Set to max(key2.max, last.max)
+ Otherwise: Set to MY_MAX(key2.max, last.max)
Result:
key2: [****----------------------*******]
@@ -9632,7 +9655,7 @@ key_or(RANGE_OPT_PARAM *param, SEL_ARG *key1,SEL_ARG *key2)
^ ^
last different next_key_part
- Extend range of last up to max(last.max, key2.max):
+ Extend range of last up to MY_MAX(last.max, key2.max):
key2: [--------*****]
key1: [***----------**] [xxxx]
*/
@@ -10473,7 +10496,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
param->table->quick_key_parts[keynr]= param->max_key_part+1;
param->table->quick_n_ranges[keynr]= param->range_count;
param->table->quick_condition_rows=
- min(param->table->quick_condition_rows, rows);
+ MY_MIN(param->table->quick_condition_rows, rows);
param->table->quick_rows[keynr]= rows;
}
}
@@ -10551,7 +10574,7 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts)
KEY *table_key= param->table->key_info + keynr;
KEY_PART_INFO *key_part= table_key->key_part + nparts;
KEY_PART_INFO *key_part_end= (table_key->key_part +
- table_key->key_parts);
+ table_key->user_defined_key_parts);
uint pk_number;
for (KEY_PART_INFO *kp= table_key->key_part; kp < key_part; kp++)
@@ -10572,7 +10595,7 @@ static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts)
KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part;
KEY_PART_INFO *pk_part_end= pk_part +
- param->table->key_info[pk_number].key_parts;
+ param->table->key_info[pk_number].user_defined_key_parts;
for (;(key_part!=key_part_end) && (pk_part != pk_part_end);
++key_part, ++pk_part)
{
@@ -10733,7 +10756,7 @@ get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key,
{
KEY *table_key=quick->head->key_info+quick->index;
flag=EQ_RANGE;
- if ((table_key->flags & HA_NOSAME) && key->part == table_key->key_parts-1)
+ if ((table_key->flags & HA_NOSAME) && key->part == table_key->user_defined_key_parts-1)
{
if (!(table_key->flags & HA_NULL_PART_KEY) ||
!null_part_in_key(key,
@@ -11769,7 +11792,7 @@ int QUICK_SELECT_DESC::get_next()
if (last_range)
{ // Already read through key
result = ((last_range->flag & EQ_RANGE &&
- used_key_parts <= head->key_info[index].key_parts) ?
+ used_key_parts <= head->key_info[index].user_defined_key_parts) ?
file->ha_index_next_same(record, last_range->min_key,
last_range->min_length) :
file->ha_index_prev(record));
@@ -11797,7 +11820,7 @@ int QUICK_SELECT_DESC::get_next()
}
if (last_range->flag & EQ_RANGE &&
- used_key_parts <= head->key_info[index].key_parts)
+ used_key_parts <= head->key_info[index].user_defined_key_parts)
{
result= file->ha_index_read_map(record, last_range->max_key,
@@ -11808,7 +11831,7 @@ int QUICK_SELECT_DESC::get_next()
{
DBUG_ASSERT(last_range->flag & NEAR_MAX ||
(last_range->flag & EQ_RANGE &&
- used_key_parts > head->key_info[index].key_parts) ||
+ used_key_parts > head->key_info[index].user_defined_key_parts) ||
range_reads_after_key(last_range));
result= file->ha_index_read_map(record, last_range->max_key,
last_range->max_keypart_map,
@@ -12258,7 +12281,7 @@ cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
TODO
- What happens if the query groups by the MIN/MAX field, and there is no
- other field as in: "select min(a) from t1 group by a" ?
+ other field as in: "select MY_MIN(a) from t1 group by a" ?
- We assume that the general correctness of the GROUP-BY query was checked
before this point. Is this correct, or do we have to check it completely?
- Lift the limitation in condition (B3), that is, make this access method
@@ -12425,7 +12448,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
does not qualify as covering in our case. If this is the case, below
we check that all query fields are indeed covered by 'cur_index'.
*/
- if (cur_index_info->key_parts == table->actual_n_key_parts(cur_index_info)
+ if (cur_index_info->user_defined_key_parts == table->actual_n_key_parts(cur_index_info)
&& pk < MAX_KEY && cur_index != pk &&
(table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
{
@@ -12526,7 +12549,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
cur_group_prefix_len+= cur_part->store_length;
used_key_parts_map.set_bit(key_part_nr);
++cur_group_key_parts;
- max_key_part= max(max_key_part,key_part_nr);
+ max_key_part= MY_MAX(max_key_part,key_part_nr);
}
/*
Check that used key parts forms a prefix of the index.
@@ -13312,9 +13335,9 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
{
double blocks_per_group= (double) num_blocks / (double) num_groups;
p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group;
- p_overlap= min(p_overlap, 1.0);
+ p_overlap= MY_MIN(p_overlap, 1.0);
}
- io_cost= (double) min(num_groups * (1 + p_overlap), num_blocks);
+ io_cost= (double) MY_MIN(num_groups * (1 + p_overlap), num_blocks);
}
else
io_cost= (keys_per_group > keys_per_block) ?
diff --git a/sql/opt_range.h b/sql/opt_range.h
index ccddd40686c..3dbdce00e9d 100644
--- a/sql/opt_range.h
+++ b/sql/opt_range.h
@@ -104,7 +104,7 @@ class QUICK_RANGE :public Sql_alloc {
void make_min_endpoint(key_range *kr, uint prefix_length,
key_part_map keypart_map) {
make_min_endpoint(kr);
- kr->length= min(kr->length, prefix_length);
+ kr->length= MY_MIN(kr->length, prefix_length);
kr->keypart_map&= keypart_map;
}
@@ -142,7 +142,7 @@ class QUICK_RANGE :public Sql_alloc {
void make_max_endpoint(key_range *kr, uint prefix_length,
key_part_map keypart_map) {
make_max_endpoint(kr);
- kr->length= min(kr->length, prefix_length);
+ kr->length= MY_MIN(kr->length, prefix_length);
kr->keypart_map&= keypart_map;
}
diff --git a/sql/opt_range_mrr.cc b/sql/opt_range_mrr.cc
index 8029dbf000f..bff96c7d4cb 100644
--- a/sql/opt_range_mrr.cc
+++ b/sql/opt_range_mrr.cc
@@ -270,7 +270,7 @@ walk_up_n_right:
if (!(cur->min_key_flag & ~NULL_RANGE) && !cur->max_key_flag &&
(seq->real_keyno == MAX_KEY ||
((uint)key_tree->part+1 ==
- seq->param->table->key_info[seq->real_keyno].key_parts &&
+ seq->param->table->key_info[seq->real_keyno].user_defined_key_parts &&
(seq->param->table->key_info[seq->real_keyno].flags & HA_NOSAME))) &&
range->start_key.length == range->end_key.length &&
!memcmp(seq->param->min_key,seq->param->max_key,range->start_key.length))
@@ -295,7 +295,7 @@ walk_up_n_right:
}
}
seq->param->range_count++;
- seq->param->max_key_part=max(seq->param->max_key_part,key_tree->part);
+ seq->param->max_key_part=MY_MAX(seq->param->max_key_part,key_tree->part);
return 0;
}
diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc
index 28d802375e2..7d6d58a3414 100644
--- a/sql/opt_subselect.cc
+++ b/sql/opt_subselect.cc
@@ -2185,7 +2185,7 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
double rows= 1.0;
while ((tableno = tm_it.next_bit()) != Table_map_iterator::BITMAP_END)
rows *= join->map2table[tableno]->table->quick_condition_rows;
- sjm->rows= min(sjm->rows, rows);
+ sjm->rows= MY_MIN(sjm->rows, rows);
}
memcpy(sjm->positions, join->best_positions + join->const_tables,
sizeof(POSITION) * n_tables);
@@ -2380,7 +2380,7 @@ bool find_eq_ref_candidate(TABLE *table, table_map sj_inner_tables)
keyuse++;
} while (keyuse->key == key && keyuse->table == table);
- if (bound_parts == PREV_BITS(uint, keyinfo->key_parts))
+ if (bound_parts == PREV_BITS(uint, keyinfo->user_defined_key_parts))
return TRUE;
}
else
@@ -3544,7 +3544,7 @@ bool setup_sj_materialization_part2(JOIN_TAB *sjm_tab)
KEY *tmp_key; /* The only index on the temporary table. */
uint tmp_key_parts; /* Number of keyparts in tmp_key. */
tmp_key= sjm->table->key_info;
- tmp_key_parts= tmp_key->key_parts;
+ tmp_key_parts= tmp_key->user_defined_key_parts;
/*
Create/initialize everything we will need to index lookups into the
@@ -3942,7 +3942,6 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
table->s= share;
init_tmp_table_share(thd, share, "", 0, tmpname, tmpname);
share->blob_field= blob_field;
- share->blob_ptr_size= portable_sizeof_char_ptr;
share->table_charset= NULL;
share->primary_key= MAX_KEY; // Indicate no primary key
share->keys_for_keyread.init();
@@ -3995,6 +3994,12 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
if (!table->file)
goto err;
+ if (table->file->set_ha_share_ref(&share->ha_share))
+ {
+ delete table->file;
+ goto err;
+ }
+
null_count=1;
null_pack_length= 1;
@@ -4064,7 +4069,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
share->max_rows= ~(ha_rows) 0;
else
share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
- min(thd->variables.tmp_table_size,
+ MY_MIN(thd->variables.tmp_table_size,
thd->variables.max_heap_table_size) :
thd->variables.tmp_table_size) /
share->reclength);
@@ -4080,7 +4085,7 @@ SJ_TMP_TABLE::create_sj_weedout_tmp_table(THD *thd)
table->key_info=keyinfo;
keyinfo->key_part=key_part_info;
keyinfo->flags=HA_NOSAME;
- keyinfo->usable_key_parts= keyinfo->key_parts= 1;
+ keyinfo->usable_key_parts= keyinfo->user_defined_key_parts= 1;
keyinfo->key_length=0;
keyinfo->rec_per_key=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
@@ -5180,7 +5185,7 @@ bool setup_jtbm_semi_joins(JOIN *join, List<TABLE_LIST> *join_list,
0 or 1 record. Examples of both cases:
select * from ot where col in (select ... from it where 2>3)
- select * from ot where col in (select min(it.key) from it)
+ select * from ot where col in (select MY_MIN(it.key) from it)
in this case, the subquery predicate has not been setup for
materialization. In particular, there is no materialized temp.table.
diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc
index 069fe6452e8..b8d39057ba8 100644
--- a/sql/opt_sum.cc
+++ b/sql/opt_sum.cc
@@ -464,7 +464,7 @@ int opt_sum_query(THD *thd,
}
if (thd->is_error())
- DBUG_RETURN(thd->stmt_da->sql_errno());
+ DBUG_RETURN(thd->get_stmt_da()->sql_errno());
/*
If we have a where clause, we can only ignore searching in the
diff --git a/sql/opt_table_elimination.cc b/sql/opt_table_elimination.cc
index 1542efa7415..7454e756416 100644
--- a/sql/opt_table_elimination.cc
+++ b/sql/opt_table_elimination.cc
@@ -328,7 +328,7 @@ const size_t Dep_value_table::iterator_size=
ALIGN_SIZE(sizeof(Dep_value_table::Module_iter));
const size_t Dep_value::iterator_size=
- max(Dep_value_table::iterator_size, Dep_value_field::iterator_size);
+ MY_MAX(Dep_value_table::iterator_size, Dep_value_field::iterator_size);
/*
@@ -441,7 +441,7 @@ const size_t Dep_module_key::iterator_size=
ALIGN_SIZE(sizeof(Dep_module_key::Value_iter));
const size_t Dep_module::iterator_size=
- max(Dep_module_expr::iterator_size, Dep_module_key::iterator_size);
+ MY_MAX(Dep_module_expr::iterator_size, Dep_module_key::iterator_size);
/*
@@ -1563,7 +1563,7 @@ Dep_value_table *Dep_analysis_context::create_table_value(TABLE *table)
if (key->flags & HA_NOSAME)
{
Dep_module_key *key_dep;
- if (!(key_dep= new Dep_module_key(tbl_dep, i, key->key_parts)))
+ if (!(key_dep= new Dep_module_key(tbl_dep, i, key->user_defined_key_parts)))
return NULL;
*key_list= key_dep;
key_list= &(key_dep->next_table_key);
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index 34e47331664..6556d50b218 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -22,10 +22,12 @@
#include "sql_priv.h"
// Required to get server definitions for mysql/plugin.h right
#include "sql_plugin.h"
-#include "sql_partition.h" /* partition_info.h: LIST_PART_ENTRY */
+#include "sql_partition.h" // partition_info.h: LIST_PART_ENTRY
+ // NOT_A_PARTITION_ID
#include "partition_info.h"
#include "sql_parse.h" // test_if_data_home_dir
#include "sql_acl.h" // *_ACL
+#include "sql_base.h" // fill_record
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -33,17 +35,21 @@
partition_info *partition_info::get_clone()
{
+ DBUG_ENTER("partition_info::get_clone");
if (!this)
- return 0;
+ DBUG_RETURN(NULL);
List_iterator<partition_element> part_it(partitions);
partition_element *part;
partition_info *clone= new partition_info();
if (!clone)
{
mem_alloc_error(sizeof(partition_info));
- return NULL;
+ DBUG_RETURN(NULL);
}
memcpy(clone, this, sizeof(partition_info));
+ memset(&(clone->read_partitions), 0, sizeof(clone->read_partitions));
+ memset(&(clone->lock_partitions), 0, sizeof(clone->lock_partitions));
+ clone->bitmaps_are_initialized= FALSE;
clone->partitions.empty();
while ((part= (part_it++)))
@@ -54,7 +60,7 @@ partition_info *partition_info::get_clone()
if (!part_clone)
{
mem_alloc_error(sizeof(partition_element));
- return NULL;
+ DBUG_RETURN(NULL);
}
memcpy(part_clone, part, sizeof(partition_element));
part_clone->subpartitions.empty();
@@ -64,16 +70,427 @@ partition_info *partition_info::get_clone()
if (!subpart_clone)
{
mem_alloc_error(sizeof(partition_element));
- return NULL;
+ DBUG_RETURN(NULL);
}
memcpy(subpart_clone, subpart, sizeof(partition_element));
part_clone->subpartitions.push_back(subpart_clone);
}
clone->partitions.push_back(part_clone);
}
- return clone;
+ DBUG_RETURN(clone);
+}
+
+/**
+ Mark named [sub]partition to be used/locked.
+
+ @param part_name Partition name to match.
+ @param length Partition name length.
+
+ @return Success if partition found
+ @retval true Partition found
+ @retval false Partition not found
+*/
+
+bool partition_info::add_named_partition(const char *part_name,
+ uint length)
+{
+ HASH *part_name_hash;
+ PART_NAME_DEF *part_def;
+ Partition_share *part_share;
+ DBUG_ENTER("partition_info::add_named_partition");
+ DBUG_ASSERT(table && table->s && table->s->ha_share);
+ part_share= static_cast<Partition_share*>((table->s->ha_share));
+ DBUG_ASSERT(part_share->partition_name_hash_initialized);
+ part_name_hash= &part_share->partition_name_hash;
+ DBUG_ASSERT(part_name_hash->records);
+
+ part_def= (PART_NAME_DEF*) my_hash_search(part_name_hash,
+ (const uchar*) part_name,
+ length);
+ if (!part_def)
+ {
+ my_error(ER_UNKNOWN_PARTITION, MYF(0), part_name, table->alias.c_ptr());
+ DBUG_RETURN(true);
+ }
+
+ if (part_def->is_subpart)
+ {
+ bitmap_set_bit(&read_partitions, part_def->part_id);
+ }
+ else
+ {
+ if (is_sub_partitioned())
+ {
+ /* Mark all subpartitions in the partition */
+ uint j, start= part_def->part_id;
+ uint end= start + num_subparts;
+ for (j= start; j < end; j++)
+ bitmap_set_bit(&read_partitions, j);
+ }
+ else
+ bitmap_set_bit(&read_partitions, part_def->part_id);
+ }
+ DBUG_PRINT("info", ("Found partition %u is_subpart %d for name %s",
+ part_def->part_id, part_def->is_subpart,
+ part_name));
+ DBUG_RETURN(false);
}
+
+/**
+ Mark named [sub]partition to be used/locked.
+
+ @param part_elem Partition element that matched.
+*/
+
+bool partition_info::set_named_partition_bitmap(const char *part_name,
+ uint length)
+{
+ DBUG_ENTER("partition_info::set_named_partition_bitmap");
+ bitmap_clear_all(&read_partitions);
+ if (add_named_partition(part_name, length))
+ DBUG_RETURN(true);
+ bitmap_copy(&lock_partitions, &read_partitions);
+ DBUG_RETURN(false);
+}
+
+
+
+/**
+ Prune away partitions not mentioned in the PARTITION () clause,
+ if used.
+
+ @param table_list Table list pointing to table to prune.
+
+ @return Operation status
+ @retval true Failure
+ @retval false Success
+*/
+bool partition_info::prune_partition_bitmaps(TABLE_LIST *table_list)
+{
+ List_iterator<String> partition_names_it(*(table_list->partition_names));
+ uint num_names= table_list->partition_names->elements;
+ uint i= 0;
+ DBUG_ENTER("partition_info::prune_partition_bitmaps");
+
+ if (num_names < 1)
+ DBUG_RETURN(true);
+
+ /*
+ TODO: When adding support for FK in partitioned tables, the referenced
+ table must probably lock all partitions for read, and also write depending
+ of ON DELETE/UPDATE.
+ */
+ bitmap_clear_all(&read_partitions);
+
+ /* No check for duplicate names or overlapping partitions/subpartitions. */
+
+ DBUG_PRINT("info", ("Searching through partition_name_hash"));
+ do
+ {
+ String *part_name_str= partition_names_it++;
+ if (add_named_partition(part_name_str->c_ptr(), part_name_str->length()))
+ DBUG_RETURN(true);
+ } while (++i < num_names);
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Set read/lock_partitions bitmap over non pruned partitions
+
+ @param table_list Possible TABLE_LIST which can contain
+ list of partition names to query
+
+ @return Operation status
+ @retval FALSE OK
+ @retval TRUE Failed to allocate memory for bitmap or list of partitions
+ did not match
+
+ @note OK to call multiple times without the need for free_bitmaps.
+*/
+
+bool partition_info::set_partition_bitmaps(TABLE_LIST *table_list)
+{
+ DBUG_ENTER("partition_info::set_partition_bitmaps");
+
+ DBUG_ASSERT(bitmaps_are_initialized);
+ DBUG_ASSERT(table);
+ is_pruning_completed= false;
+ if (!bitmaps_are_initialized)
+ DBUG_RETURN(TRUE);
+
+ if (table_list &&
+ table_list->partition_names &&
+ table_list->partition_names->elements)
+ {
+ if (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)
+ {
+ /*
+ Don't allow PARTITION () clause on a NDB tables yet.
+ TODO: Add partition name handling to NDB/partition_info.
+ which is currently ha_partition specific.
+ */
+ my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(true);
+ }
+ if (prune_partition_bitmaps(table_list))
+ DBUG_RETURN(TRUE);
+ }
+ else
+ {
+ bitmap_set_all(&read_partitions);
+ DBUG_PRINT("info", ("Set all partitions"));
+ }
+ bitmap_copy(&lock_partitions, &read_partitions);
+ DBUG_ASSERT(bitmap_get_first_set(&lock_partitions) != MY_BIT_NONE);
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ Checks if possible to do prune partitions on insert.
+
+ @param thd Thread context
+ @param duplic How to handle duplicates
+ @param update In case of ON DUPLICATE UPDATE, default function fields
+ @param update_fields In case of ON DUPLICATE UPDATE, which fields to update
+ @param fields Listed fields
+ @param empty_values True if values is empty (only defaults)
+ @param[out] prune_needs_default_values Set on return if copying of default
+ values is needed
+ @param[out] can_prune_partitions Enum showing if possible to prune
+ @param[inout] used_partitions If possible to prune the bitmap
+ is initialized and cleared
+
+ @return Operation status
+ @retval false Success
+ @retval true Failure
+*/
+
+bool partition_info::can_prune_insert(THD* thd,
+ enum_duplicates duplic,
+ COPY_INFO &update,
+ List<Item> &update_fields,
+ List<Item> &fields,
+ bool empty_values,
+ enum_can_prune *can_prune_partitions,
+ bool *prune_needs_default_values,
+ MY_BITMAP *used_partitions)
+{
+ uint32 *bitmap_buf;
+ uint bitmap_bytes;
+ uint num_partitions= 0;
+ *can_prune_partitions= PRUNE_NO;
+ DBUG_ASSERT(bitmaps_are_initialized);
+ DBUG_ENTER("partition_info::can_prune_insert");
+
+ if (table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)
+ DBUG_RETURN(false); /* Should not insert prune NDB tables */
+
+ /*
+ If under LOCK TABLES pruning will skip start_stmt instead of external_lock
+ for unused partitions.
+
+ Cannot prune if there are BEFORE INSERT triggers that changes any
+ partitioning column, since they may change the row to be in another
+ partition.
+ */
+ if (table->triggers &&
+ table->triggers->has_triggers(TRG_EVENT_INSERT, TRG_ACTION_BEFORE) &&
+ table->triggers->is_fields_updated_in_trigger(&full_part_field_set,
+ TRG_EVENT_INSERT,
+ TRG_ACTION_BEFORE))
+ DBUG_RETURN(false);
+
+ if (table->found_next_number_field)
+ {
+ /*
+ If the field is used in the partitioning expression, we cannot prune.
+ TODO: If all rows have not null values and
+ is not 0 (with NO_AUTO_VALUE_ON_ZERO sql_mode), then pruning is possible!
+ */
+ if (bitmap_is_set(&full_part_field_set,
+ table->found_next_number_field->field_index))
+ DBUG_RETURN(false);
+ }
+
+ /*
+ If updating a field in the partitioning expression, we cannot prune.
+
+ Note: TIMESTAMP_AUTO_SET_ON_INSERT is handled by converting Item_null
+ to the start time of the statement. Which will be the same as in
+ write_row(). So pruning of TIMESTAMP DEFAULT CURRENT_TIME will work.
+ But TIMESTAMP_AUTO_SET_ON_UPDATE cannot be pruned if the timestamp
+ column is a part of any part/subpart expression.
+ */
+ if (duplic == DUP_UPDATE)
+ {
+ /*
+ TODO: add check for static update values, which can be pruned.
+ */
+ if (is_field_in_part_expr(update_fields))
+ DBUG_RETURN(false);
+
+ /*
+ Cannot prune if there are BEFORE UPDATE triggers that changes any
+ partitioning column, since they may change the row to be in another
+ partition.
+ */
+ if (table->triggers &&
+ table->triggers->has_triggers(TRG_EVENT_UPDATE,
+ TRG_ACTION_BEFORE) &&
+ table->triggers->is_fields_updated_in_trigger(&full_part_field_set,
+ TRG_EVENT_UPDATE,
+ TRG_ACTION_BEFORE))
+ {
+ DBUG_RETURN(false);
+ }
+ }
+
+ /*
+ If not all partitioning fields are given,
+ we also must set all non given partitioning fields
+ to get correct defaults.
+ TODO: If any gain, we could enhance this by only copy the needed default
+ fields by
+ 1) check which fields needs to be set.
+ 2) only copy those fields from the default record.
+ */
+ *prune_needs_default_values= false;
+ if (fields.elements)
+ {
+ if (!is_full_part_expr_in_fields(fields))
+ *prune_needs_default_values= true;
+ }
+ else if (empty_values)
+ {
+ *prune_needs_default_values= true; // like 'INSERT INTO t () VALUES ()'
+ }
+ else
+ {
+ /*
+ In case of INSERT INTO t VALUES (...) we must get values for
+ all fields in table from VALUES (...) part, so no defaults
+ are needed.
+ */
+ }
+
+ /* Pruning possible, have to initialize the used_partitions bitmap. */
+ num_partitions= lock_partitions.n_bits;
+ bitmap_bytes= bitmap_buffer_size(num_partitions);
+ if (!(bitmap_buf= (uint32*) thd->alloc(bitmap_bytes)))
+ {
+ mem_alloc_error(bitmap_bytes);
+ DBUG_RETURN(true);
+ }
+ /* Also clears all bits. */
+ if (bitmap_init(used_partitions, bitmap_buf, num_partitions, false))
+ {
+ /* purecov: begin deadcode */
+ /* Cannot happen, due to pre-alloc. */
+ mem_alloc_error(bitmap_bytes);
+ DBUG_RETURN(true);
+ /* purecov: end */
+ }
+ /*
+ If no partitioning field in set (e.g. defaults) check pruning only once.
+ */
+ if (fields.elements &&
+ !is_field_in_part_expr(fields))
+ *can_prune_partitions= PRUNE_DEFAULTS;
+ else
+ *can_prune_partitions= PRUNE_YES;
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Mark the partition, the record belongs to, as used.
+
+ @param fields Fields to set
+ @param values Values to use
+ @param info COPY_INFO used for default values handling
+ @param copy_default_values True if we should copy default values
+ @param used_partitions Bitmap to set
+
+ @returns Operational status
+ @retval false Success
+ @retval true Failure
+*/
+
+bool partition_info::set_used_partition(List<Item> &fields,
+ List<Item> &values,
+ COPY_INFO &info,
+ bool copy_default_values,
+ MY_BITMAP *used_partitions)
+{
+ THD *thd= table->in_use;
+ uint32 part_id;
+ longlong func_value;
+ Dummy_error_handler error_handler;
+ bool ret= true;
+ DBUG_ENTER("set_partition");
+ DBUG_ASSERT(thd);
+
+ /* Only allow checking of constant values */
+ List_iterator_fast<Item> v(values);
+ Item *item;
+ thd->push_internal_handler(&error_handler);
+ while ((item= v++))
+ {
+ if (!item->const_item())
+ goto err;
+ }
+
+ if (copy_default_values)
+ restore_record(table,s->default_values);
+
+ if (fields.elements || !values.elements)
+ {
+ if (fill_record(thd, table, fields, values, false))
+ goto err;
+ }
+ else
+ {
+ if (fill_record(thd, table, table->field, values, false, false))
+ goto err;
+ }
+ DBUG_ASSERT(!table->auto_increment_field_not_null);
+
+ /*
+ Evaluate DEFAULT functions like CURRENT_TIMESTAMP.
+ TODO: avoid setting non partitioning fields default value, to avoid
+ overhead. Not yet done, since mostly only one DEFAULT function per
+ table, or at least very few such columns.
+ */
+// if (info.function_defaults_apply_on_columns(&full_part_field_set))
+// info.set_function_defaults(table);
+
+ {
+ /*
+ This function is used in INSERT; 'values' are supplied by user,
+ or are default values, not values read from a table, so read_set is
+ irrelevant.
+ */
+ my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set);
+ const int rc= get_partition_id(this, &part_id, &func_value);
+ dbug_tmp_restore_column_map(table->read_set, old_map);
+ if (rc)
+ goto err;
+ }
+
+ DBUG_PRINT("info", ("Insert into partition %u", part_id));
+ bitmap_set_bit(used_partitions, part_id);
+ ret= false;
+
+err:
+ thd->pop_internal_handler();
+ DBUG_RETURN(ret);
+}
+
+
/*
Create a memory area where default partition names are stored and fill it
up with the names.
@@ -159,8 +576,9 @@ void partition_info::set_show_version_string(String *packet)
/*
Create a unique name for the subpartition as part_name'sp''subpart_no'
+
SYNOPSIS
- create_subpartition_name()
+ create_default_subpartition_name()
subpart_no Number of subpartition
part_name Name of partition
RETURN VALUES
@@ -168,12 +586,12 @@ void partition_info::set_show_version_string(String *packet)
0 Memory allocation error
*/
-char *partition_info::create_subpartition_name(uint subpart_no,
+char *partition_info::create_default_subpartition_name(uint subpart_no,
const char *part_name)
{
uint size_alloc= strlen(part_name) + MAX_PART_NAME_SIZE;
char *ptr= (char*) sql_calloc(size_alloc);
- DBUG_ENTER("create_subpartition_name");
+ DBUG_ENTER("create_default_subpartition_name");
if (likely(ptr != NULL))
{
@@ -319,7 +737,8 @@ bool partition_info::set_up_default_subpartitions(handler *file,
if (likely(subpart_elem != 0 &&
(!part_elem->subpartitions.push_back(subpart_elem))))
{
- char *ptr= create_subpartition_name(j, part_elem->partition_name);
+ char *ptr= create_default_subpartition_name(j,
+ part_elem->partition_name);
if (!ptr)
goto end;
subpart_elem->engine_type= default_engine_type;
@@ -379,7 +798,7 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file,
Support routine for check_partition_info
SYNOPSIS
- has_unique_fields
+ find_duplicate_field
no parameters
RETURN VALUE
@@ -390,13 +809,13 @@ bool partition_info::set_up_defaults_for_partitioning(handler *file,
Check that the user haven't defined the same field twice in
key or column list partitioning.
*/
-char* partition_info::has_unique_fields()
+char* partition_info::find_duplicate_field()
{
char *field_name_outer, *field_name_inner;
List_iterator<char> it_outer(part_field_list);
uint num_fields= part_field_list.elements;
uint i,j;
- DBUG_ENTER("partition_info::has_unique_fields");
+ DBUG_ENTER("partition_info::find_duplicate_field");
for (i= 0; i < num_fields; i++)
{
@@ -418,6 +837,152 @@ char* partition_info::has_unique_fields()
DBUG_RETURN(NULL);
}
+
+/**
+ @brief Get part_elem and part_id from partition name
+
+ @param partition_name Name of partition to search for.
+ @param file_name[out] Partition file name (part after table name,
+ #P#<part>[#SP#<subpart>]), skipped if NULL.
+ @param part_id[out] Id of found partition or NOT_A_PARTITION_ID.
+
+ @retval Pointer to part_elem of [sub]partition, if not found NULL
+
+ @note Since names of partitions AND subpartitions must be unique,
+ this function searches both partitions and subpartitions and if name of
+ a partition is given for a subpartitioned table, part_elem will be
+ the partition, but part_id will be NOT_A_PARTITION_ID and file_name not set.
+*/
+partition_element *partition_info::get_part_elem(const char *partition_name,
+ char *file_name,
+ uint32 *part_id)
+{
+ List_iterator<partition_element> part_it(partitions);
+ uint i= 0;
+ DBUG_ENTER("partition_info::get_part_elem");
+ DBUG_ASSERT(part_id);
+ *part_id= NOT_A_PARTITION_ID;
+ do
+ {
+ partition_element *part_elem= part_it++;
+ if (is_sub_partitioned())
+ {
+ List_iterator<partition_element> sub_part_it(part_elem->subpartitions);
+ uint j= 0;
+ do
+ {
+ partition_element *sub_part_elem= sub_part_it++;
+ if (!my_strcasecmp(system_charset_info,
+ sub_part_elem->partition_name, partition_name))
+ {
+ if (file_name)
+ create_subpartition_name(file_name, "",
+ part_elem->partition_name,
+ partition_name,
+ NORMAL_PART_NAME);
+ *part_id= j + (i * num_subparts);
+ DBUG_RETURN(sub_part_elem);
+ }
+ } while (++j < num_subparts);
+
+ /* Naming a partition (first level) on a subpartitioned table. */
+ if (!my_strcasecmp(system_charset_info,
+ part_elem->partition_name, partition_name))
+ DBUG_RETURN(part_elem);
+ }
+ else if (!my_strcasecmp(system_charset_info,
+ part_elem->partition_name, partition_name))
+ {
+ if (file_name)
+ create_partition_name(file_name, "", partition_name,
+ NORMAL_PART_NAME, TRUE);
+ *part_id= i;
+ DBUG_RETURN(part_elem);
+ }
+ } while (++i < num_parts);
+ DBUG_RETURN(NULL);
+}
+
+
+/**
+ Helper function to find_duplicate_name.
+*/
+
+static const char *get_part_name_from_elem(const char *name, size_t *length,
+ my_bool not_used __attribute__((unused)))
+{
+ *length= strlen(name);
+ return name;
+}
+
+/*
+ A support function to check partition names for duplication in a
+ partitioned table
+
+ SYNOPSIS
+ find_duplicate_name()
+
+ RETURN VALUES
+ NULL Has unique part and subpart names
+ !NULL Pointer to duplicated name
+
+ DESCRIPTION
+ Checks that the list of names in the partitions doesn't contain any
+ duplicated names.
+*/
+
+char *partition_info::find_duplicate_name()
+{
+ HASH partition_names;
+ uint max_names;
+ const uchar *curr_name= NULL;
+ List_iterator<partition_element> parts_it(partitions);
+ partition_element *p_elem;
+
+ DBUG_ENTER("partition_info::find_duplicate_name");
+
+ /*
+ TODO: If table->s->ha_part_data->partition_name_hash.elements is > 0,
+ then we could just return NULL, but that has not been verified.
+ And this only happens when in ALTER TABLE with full table copy.
+ */
+
+ max_names= num_parts;
+ if (is_sub_partitioned())
+ max_names+= num_parts * num_subparts;
+ if (my_hash_init(&partition_names, system_charset_info, max_names, 0, 0,
+ (my_hash_get_key) get_part_name_from_elem, 0, HASH_UNIQUE))
+ {
+ DBUG_ASSERT(0);
+ curr_name= (const uchar*) "Internal failure";
+ goto error;
+ }
+ while ((p_elem= (parts_it++)))
+ {
+ curr_name= (const uchar*) p_elem->partition_name;
+ if (my_hash_insert(&partition_names, curr_name))
+ goto error;
+
+ if (!p_elem->subpartitions.is_empty())
+ {
+ List_iterator<partition_element> subparts_it(p_elem->subpartitions);
+ partition_element *subp_elem;
+ while ((subp_elem= (subparts_it++)))
+ {
+ curr_name= (const uchar*) subp_elem->partition_name;
+ if (my_hash_insert(&partition_names, curr_name))
+ goto error;
+ }
+ }
+ }
+ my_hash_free(&partition_names);
+ DBUG_RETURN(NULL);
+error:
+ my_hash_free(&partition_names);
+ DBUG_RETURN((char*) curr_name);
+}
+
+
/*
A support function to check if a partition element's name is unique
@@ -461,49 +1026,6 @@ bool partition_info::has_unique_name(partition_element *element)
/*
- A support function to check partition names for duplication in a
- partitioned table
-
- SYNOPSIS
- has_unique_names()
-
- RETURN VALUES
- TRUE Has unique part and subpart names
- FALSE Doesn't
-
- DESCRIPTION
- Checks that the list of names in the partitions doesn't contain any
- duplicated names.
-*/
-
-char *partition_info::has_unique_names()
-{
- DBUG_ENTER("partition_info::has_unique_names");
-
- List_iterator<partition_element> parts_it(partitions);
-
- partition_element *el;
- while ((el= (parts_it++)))
- {
- if (! has_unique_name(el))
- DBUG_RETURN(el->partition_name);
-
- if (!el->subpartitions.is_empty())
- {
- List_iterator<partition_element> subparts_it(el->subpartitions);
- partition_element *subel;
- while ((subel= (subparts_it++)))
- {
- if (! has_unique_name(subel))
- DBUG_RETURN(subel->partition_name);
- }
- }
- }
- DBUG_RETURN(NULL);
-}
-
-
-/*
Check that the partition/subpartition is setup to use the correct
storage engine
SYNOPSIS
@@ -1057,11 +1579,11 @@ static void warn_if_dir_in_part_elem(THD *thd, partition_element *part_elem)
#endif
{
if (part_elem->data_file_name)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
"DATA DIRECTORY");
if (part_elem->index_file_name)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
"INDEX DIRECTORY");
part_elem->data_file_name= part_elem->index_file_name= NULL;
@@ -1187,12 +1709,12 @@ bool partition_info::check_partition_info(THD *thd, handlerton **eng_type,
}
if (part_field_list.elements > 0 &&
- (same_name= has_unique_fields()))
+ (same_name= find_duplicate_field()))
{
my_error(ER_SAME_NAME_PARTITION_FIELD, MYF(0), same_name);
goto end;
}
- if ((same_name= has_unique_names()))
+ if ((same_name= find_duplicate_name()))
{
my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name);
goto end;
@@ -1644,6 +2166,71 @@ void partition_info::report_part_expr_error(bool use_subpart_expr)
}
+/**
+ Check if fields are in the partitioning expression.
+
+ @param fields List of Items (fields)
+
+ @return True if any field in the fields list is used by a partitioning expr.
+ @retval true At least one field in the field list is found.
+ @retval false No field is within any partitioning expression.
+*/
+
+bool partition_info::is_field_in_part_expr(List<Item> &fields)
+{
+ List_iterator<Item> it(fields);
+ Item *item;
+ Item_field *field;
+ DBUG_ENTER("is_fields_in_part_expr");
+ while ((item= it++))
+ {
+ field= item->field_for_view_update();
+ DBUG_ASSERT(field->field->table == table);
+ if (bitmap_is_set(&full_part_field_set, field->field->field_index))
+ DBUG_RETURN(true);
+ }
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Check if all partitioning fields are included.
+*/
+
+bool partition_info::is_full_part_expr_in_fields(List<Item> &fields)
+{
+ Field **part_field= full_part_field_array;
+ DBUG_ASSERT(*part_field);
+ DBUG_ENTER("is_full_part_expr_in_fields");
+ /*
+ It is very seldom many fields in full_part_field_array, so it is OK
+ to loop over all of them instead of creating a bitmap fields argument
+ to compare with.
+ */
+ do
+ {
+ List_iterator<Item> it(fields);
+ Item *item;
+ Item_field *field;
+ bool found= false;
+
+ while ((item= it++))
+ {
+ field= item->field_for_view_update();
+ DBUG_ASSERT(field->field->table == table);
+ if (*part_field == field->field)
+ {
+ found= true;
+ break;
+ }
+ }
+ if (!found)
+ DBUG_RETURN(false);
+ } while (*(++part_field));
+ DBUG_RETURN(true);
+}
+
+
/*
Create a new column value in current list with maxvalue
Called from parser
@@ -2251,262 +2838,6 @@ int partition_info::fix_parser_data(THD *thd)
}
-/**
- helper function to compare strings that can also be
- a NULL pointer.
-
- @param a char pointer (can be NULL).
- @param b char pointer (can be NULL).
-
- @return false if equal
- @retval true strings differs
- @retval false strings is equal
-*/
-
-static bool strcmp_null(const char *a, const char *b)
-{
- if (!a && !b)
- return false;
- if (a && b && !strcmp(a, b))
- return false;
- return true;
-}
-
-
-/**
- Check if the new part_info has the same partitioning.
-
- @param new_part_info New partition definition to compare with.
-
- @return True if not considered to have changed the partitioning.
- @retval true Allowed change (only .frm change, compatible distribution).
- @retval false Different partitioning, will need redistribution of rows.
-
- @note Currently only used to allow changing from non-set key_algorithm
- to a specified key_algorithm, to avoid rebuild when upgrading from 5.1 of
- such partitioned tables using numeric colums in the partitioning expression.
- For more info see bug#14521864.
- Does not check if columns etc has changed, i.e. only for
- alter_info->flags == ALTER_PARTITION.
-*/
-
-bool partition_info::has_same_partitioning(partition_info *new_part_info)
-{
- DBUG_ENTER("partition_info::has_same_partitioning");
-
- DBUG_ASSERT(part_field_array && part_field_array[0]);
-
- /*
- Only consider pre 5.5.3 .frm's to have same partitioning as
- a new one with KEY ALGORITHM = 1 ().
- */
-
- if (part_field_array[0]->table->s->mysql_version >= 50503)
- DBUG_RETURN(false);
-
- if (!new_part_info ||
- part_type != new_part_info->part_type ||
- num_parts != new_part_info->num_parts ||
- use_default_partitions != new_part_info->use_default_partitions ||
- new_part_info->is_sub_partitioned() != is_sub_partitioned())
- DBUG_RETURN(false);
-
- if (part_type != HASH_PARTITION)
- {
- /*
- RANGE or LIST partitioning, check if KEY subpartitioned.
- Also COLUMNS partitioning was added in 5.5, so treat that as different.
- */
- if (!is_sub_partitioned() ||
- !new_part_info->is_sub_partitioned() ||
- column_list ||
- new_part_info->column_list ||
- !list_of_subpart_fields ||
- !new_part_info->list_of_subpart_fields ||
- new_part_info->num_subparts != num_subparts ||
- new_part_info->subpart_field_list.elements !=
- subpart_field_list.elements ||
- new_part_info->use_default_subpartitions !=
- use_default_subpartitions)
- DBUG_RETURN(false);
- }
- else
- {
- /* Check if KEY partitioned. */
- if (!new_part_info->list_of_part_fields ||
- !list_of_part_fields ||
- new_part_info->part_field_list.elements != part_field_list.elements)
- DBUG_RETURN(false);
- }
-
- /* Check that it will use the same fields in KEY (fields) list. */
- List_iterator<char> old_field_name_it(part_field_list);
- List_iterator<char> new_field_name_it(new_part_info->part_field_list);
- char *old_name, *new_name;
- while ((old_name= old_field_name_it++))
- {
- new_name= new_field_name_it++;
- if (!new_name || my_strcasecmp(system_charset_info,
- new_name,
- old_name))
- DBUG_RETURN(false);
- }
-
- if (is_sub_partitioned())
- {
- /* Check that it will use the same fields in KEY subpart fields list. */
- List_iterator<char> old_field_name_it(subpart_field_list);
- List_iterator<char> new_field_name_it(new_part_info->subpart_field_list);
- char *old_name, *new_name;
- while ((old_name= old_field_name_it++))
- {
- new_name= new_field_name_it++;
- if (!new_name || my_strcasecmp(system_charset_info,
- new_name,
- old_name))
- DBUG_RETURN(false);
- }
- }
-
- if (!use_default_partitions)
- {
- /*
- Loop over partitions/subpartition to verify that they are
- the same, including state and name.
- */
- List_iterator<partition_element> part_it(partitions);
- List_iterator<partition_element> new_part_it(new_part_info->partitions);
- uint i= 0;
- do
- {
- partition_element *part_elem= part_it++;
- partition_element *new_part_elem= new_part_it++;
- /*
- The following must match:
- partition_name, tablespace_name, data_file_name, index_file_name,
- engine_type, part_max_rows, part_min_rows, nodegroup_id.
- (max_value, signed_flag, has_null_value only on partition level,
- RANGE/LIST)
- The following can differ:
- - part_comment
- part_state must be PART_NORMAL!
- */
- if (!part_elem || !new_part_elem ||
- strcmp(part_elem->partition_name,
- new_part_elem->partition_name) ||
- part_elem->part_state != PART_NORMAL ||
- new_part_elem->part_state != PART_NORMAL ||
- part_elem->max_value != new_part_elem->max_value ||
- part_elem->signed_flag != new_part_elem->signed_flag ||
- part_elem->has_null_value != new_part_elem->has_null_value)
- DBUG_RETURN(false);
-
- /* new_part_elem may not have engine_type set! */
- if (new_part_elem->engine_type &&
- part_elem->engine_type != new_part_elem->engine_type)
- DBUG_RETURN(false);
-
- if (is_sub_partitioned())
- {
- /*
- Check that both old and new partition has the same definition
- (VALUES IN/VALUES LESS THAN) (No COLUMNS partitioning, see above)
- */
- if (part_type == LIST_PARTITION)
- {
- List_iterator<part_elem_value> list_vals(part_elem->list_val_list);
- List_iterator<part_elem_value>
- new_list_vals(new_part_elem->list_val_list);
- part_elem_value *val;
- part_elem_value *new_val;
- while ((val= list_vals++))
- {
- new_val= new_list_vals++;
- if (!new_val)
- DBUG_RETURN(false);
- if ((!val->null_value && !new_val->null_value) &&
- val->value != new_val->value)
- DBUG_RETURN(false);
- }
- if (new_list_vals++)
- DBUG_RETURN(false);
- }
- else
- {
- DBUG_ASSERT(part_type == RANGE_PARTITION);
- if (new_part_elem->range_value != part_elem->range_value)
- DBUG_RETURN(false);
- }
-
- if (!use_default_subpartitions)
- {
- List_iterator<partition_element>
- sub_part_it(part_elem->subpartitions);
- List_iterator<partition_element>
- new_sub_part_it(new_part_elem->subpartitions);
- uint j= 0;
- do
- {
- partition_element *sub_part_elem= sub_part_it++;
- partition_element *new_sub_part_elem= new_sub_part_it++;
- /* new_part_elem may not have engine_type set! */
- if (new_sub_part_elem->engine_type &&
- sub_part_elem->engine_type != new_part_elem->engine_type)
- DBUG_RETURN(false);
-
- if (strcmp(sub_part_elem->partition_name,
- new_sub_part_elem->partition_name) ||
- sub_part_elem->part_state != PART_NORMAL ||
- new_sub_part_elem->part_state != PART_NORMAL ||
- sub_part_elem->part_min_rows !=
- new_sub_part_elem->part_min_rows ||
- sub_part_elem->part_max_rows !=
- new_sub_part_elem->part_max_rows ||
- sub_part_elem->nodegroup_id !=
- new_sub_part_elem->nodegroup_id)
- DBUG_RETURN(false);
-
- if (strcmp_null(sub_part_elem->data_file_name,
- new_sub_part_elem->data_file_name) ||
- strcmp_null(sub_part_elem->index_file_name,
- new_sub_part_elem->index_file_name) ||
- strcmp_null(sub_part_elem->tablespace_name,
- new_sub_part_elem->tablespace_name))
- DBUG_RETURN(false);
-
- } while (++j < num_subparts);
- }
- }
- else
- {
- if (part_elem->part_min_rows != new_part_elem->part_min_rows ||
- part_elem->part_max_rows != new_part_elem->part_max_rows ||
- part_elem->nodegroup_id != new_part_elem->nodegroup_id)
- DBUG_RETURN(false);
-
- if (strcmp_null(part_elem->data_file_name,
- new_part_elem->data_file_name) ||
- strcmp_null(part_elem->index_file_name,
- new_part_elem->index_file_name) ||
- strcmp_null(part_elem->tablespace_name,
- new_part_elem->tablespace_name))
- DBUG_RETURN(false);
- }
- } while (++i < num_parts);
- }
-
- /*
- Only if key_algorithm was not specified before and it is now set,
- consider this as nothing was changed, and allow change without rebuild!
- */
- if (key_algorithm != partition_info::KEY_ALGORITHM_NONE ||
- new_part_info->key_algorithm == partition_info::KEY_ALGORITHM_NONE)
- DBUG_RETURN(false);
-
- DBUG_RETURN(true);
-}
-
-
void partition_info::print_debug(const char *str, uint *value)
{
DBUG_ENTER("print_debug");
diff --git a/sql/partition_info.h b/sql/partition_info.h
index 17c9cb383ee..01f6b53a148 100644
--- a/sql/partition_info.h
+++ b/sql/partition_info.h
@@ -20,10 +20,11 @@
#pragma interface /* gcc class implementation */
#endif
+#include "sql_class.h"
#include "partition_element.h"
class partition_info;
-
+struct TABLE_LIST;
/* Some function typedefs */
typedef int (*get_part_id_func)(partition_info *part_info,
uint32 *part_id,
@@ -111,14 +112,30 @@ public:
struct st_ddl_log_memory_entry *frm_log_entry;
/*
- A bitmap of partitions used by the current query.
+ Bitmaps of partitions used by the current query.
+ * read_partitions - partitions to be used for reading.
+ * lock_partitions - partitions that must be locked (read or write).
+ Usually read_partitions is the same set as lock_partitions, but
+ in case of UPDATE the WHERE clause can limit the read_partitions set,
+ but not neccesarily the lock_partitions set.
Usage pattern:
- * The handler->extra(HA_EXTRA_RESET) call at query start/end sets all
- partitions to be unused.
- * Before index/rnd_init(), partition pruning code sets the bits for used
- partitions.
+ * Initialized in ha_partition::open().
+ * read+lock_partitions is set according to explicit PARTITION,
+ WL#5217, in open_and_lock_tables().
+ * Bits in read_partitions can be cleared in prune_partitions()
+ in the optimizing step.
+ (WL#4443 is about allowing prune_partitions() to affect lock_partitions
+ and be done before locking too).
+ * When the partition enabled handler get an external_lock call it locks
+ all partitions in lock_partitions (and remembers which partitions it
+ locked, so that it can unlock them later). In case of LOCK TABLES it will
+ lock all partitions, and keep them locked while lock_partitions can
+ change for each statement under LOCK TABLES.
+ * Freed at the same time item_free_list is freed.
*/
- MY_BITMAP used_partitions;
+ MY_BITMAP read_partitions;
+ MY_BITMAP lock_partitions;
+ bool bitmaps_are_initialized;
union {
longlong *range_int_array;
@@ -157,6 +174,7 @@ public:
uint curr_list_object;
uint num_columns;
+ TABLE *table;
/*
These key_map's are used for Partitioning to enable quick decisions
on whether we can derive more information about which partition to
@@ -220,6 +238,15 @@ public:
bool from_openfrm;
bool has_null_value;
bool column_list;
+ /**
+ True if pruning has been completed and can not be pruned any further,
+ even if there are subqueries or stored programs in the condition.
+
+ Some times it is needed to run prune_partitions() a second time to prune
+ read partitions after tables are locked, when subquery and
+ stored functions might have been evaluated.
+ */
+ bool is_pruning_completed;
partition_info()
: get_partition_id(NULL), get_part_partition_id(NULL),
@@ -232,6 +259,7 @@ public:
restore_part_field_ptrs(NULL), restore_subpart_field_ptrs(NULL),
part_expr(NULL), subpart_expr(NULL), item_free_list(NULL),
first_log_entry(NULL), exec_log_entry(NULL), frm_log_entry(NULL),
+ bitmaps_are_initialized(FALSE),
list_array(NULL), err_value(0),
part_info_string(NULL),
part_func_string(NULL), subpart_func_string(NULL),
@@ -252,7 +280,7 @@ public:
list_of_part_fields(FALSE), list_of_subpart_fields(FALSE),
linear_hash_ind(FALSE), fixed(FALSE),
is_auto_partitioned(FALSE), from_openfrm(FALSE),
- has_null_value(FALSE), column_list(FALSE)
+ has_null_value(FALSE), column_list(FALSE), is_pruning_completed(false)
{
all_fields_in_PF.clear_all();
all_fields_in_PPF.clear_all();
@@ -266,6 +294,8 @@ public:
~partition_info() {}
partition_info *get_clone();
+ bool set_named_partition_bitmap(const char *part_name, uint length);
+ bool set_partition_bitmaps(TABLE_LIST *table_list);
/* Answers the question if subpartitioning is used for a certain table */
bool is_sub_partitioned()
{
@@ -280,8 +310,8 @@ public:
bool set_up_defaults_for_partitioning(handler *file, HA_CREATE_INFO *info,
uint start_no);
- char *has_unique_fields();
- char *has_unique_names();
+ char *find_duplicate_field();
+ char *find_duplicate_name();
bool check_engine_mix(handlerton *engine_type, bool default_engine);
bool check_range_constants(THD *thd);
bool check_list_constants(THD *thd);
@@ -311,8 +341,34 @@ public:
bool init_column_part();
bool add_column_list_value(THD *thd, Item *item);
void set_show_version_string(String *packet);
+ partition_element *get_part_elem(const char *partition_name,
+ char *file_name,
+ uint32 *part_id);
void report_part_expr_error(bool use_subpart_expr);
- bool has_same_partitioning(partition_info *new_part_info);
+ bool set_used_partition(List<Item> &fields,
+ List<Item> &values,
+ COPY_INFO &info,
+ bool copy_default_values,
+ MY_BITMAP *used_partitions);
+ /**
+ PRUNE_NO - Unable to prune.
+ PRUNE_DEFAULTS - Partitioning field is only set to
+ DEFAULT values, only need to check
+ pruning for one row where the DEFAULTS
+ values are set.
+ PRUNE_YES - Pruning is possible, calculate the used partition set
+ by evaluate the partition_id on row by row basis.
+ */
+ enum enum_can_prune {PRUNE_NO=0, PRUNE_DEFAULTS, PRUNE_YES};
+ bool can_prune_insert(THD *thd,
+ enum_duplicates duplic,
+ COPY_INFO &update,
+ List<Item> &update_fields,
+ List<Item> &fields,
+ bool empty_values,
+ enum_can_prune *can_prune_partitions,
+ bool *prune_needs_default_values,
+ MY_BITMAP *used_partitions);
private:
static int list_part_cmp(const void* a, const void* b);
bool set_up_default_partitions(handler *file, HA_CREATE_INFO *info,
@@ -320,7 +376,12 @@ private:
bool set_up_default_subpartitions(handler *file, HA_CREATE_INFO *info);
char *create_default_partition_names(uint part_no, uint num_parts,
uint start_no);
- char *create_subpartition_name(uint subpart_no, const char *part_name);
+ char *create_default_subpartition_name(uint subpart_no,
+ const char *part_name);
+ bool prune_partition_bitmaps(TABLE_LIST *table_list);
+ bool add_named_partition(const char *part_name, uint length);
+ bool is_field_in_part_expr(List<Item> &fields);
+ bool is_full_part_expr_in_fields(List<Item> &fields);
public:
bool has_unique_name(partition_element *element);
};
diff --git a/sql/password.c b/sql/password.c
index 947620ddf7a..954daf2d8d1 100644
--- a/sql/password.c
+++ b/sql/password.c
@@ -60,12 +60,14 @@
*****************************************************************************/
-#include <password.h>
#include <my_global.h>
#include <my_sys.h>
#include <m_string.h>
+#include <password.h>
+#include <mysql.h>
+#include <my_rnd.h>
#include <sha1.h>
-#include "mysql.h"
+#include <crypt_genhash_impl.h>
/************ MySQL 3.23-4.0 authentication routines: untouched ***********/
@@ -372,6 +374,47 @@ my_crypt(char *to, const uchar *s1, const uchar *s2, uint len)
}
+#if defined(HAVE_OPENSSL)
+void my_make_scrambled_password(char *to, const char *password,
+ size_t pass_len)
+{
+
+ char salt[CRYPT_SALT_LENGTH + 1];
+
+ generate_user_salt(salt, CRYPT_SALT_LENGTH + 1);
+ my_crypt_genhash(to,
+ CRYPT_MAX_PASSWORD_SIZE,
+ password,
+ pass_len,
+ salt,
+ 0);
+
+}
+#endif
+/**
+ Compute two stage SHA1 hash of the password :
+
+ hash_stage1=sha1("password")
+ hash_stage2=sha1(hash_stage1)
+
+ @param password [IN] Password string.
+ @param pass_len [IN] Length of the password.
+ @param hash_stage1 [OUT] sha1(password)
+ @param hash_stage2 [OUT] sha1(hash_stage1)
+*/
+
+inline static
+void compute_two_stage_sha1_hash(const char *password, size_t pass_len,
+ uint8 *hash_stage1, uint8 *hash_stage2)
+{
+ /* Stage 1: hash password */
+ compute_sha1_hash(hash_stage1, password, pass_len);
+
+ /* Stage 2 : hash first stage's output. */
+ compute_sha1_hash(hash_stage2, (const char *) hash_stage1, SHA1_HASH_SIZE);
+}
+
+
/*
MySQL 4.1.1 password hashing: SHA conversion (see RFC 2289, 3174) twice
applied to the password string, and then produced octet sequence is
@@ -379,27 +422,20 @@ my_crypt(char *to, const uchar *s1, const uchar *s2, uint len)
The result of this function is used as return value from PASSWORD() and
is stored in the database.
SYNOPSIS
- my_make_scrambled_password()
+ my_make_scrambled_password_sha1()
buf OUT buffer of size 2*SHA1_HASH_SIZE + 2 to store hex string
password IN password string
pass_len IN length of password string
*/
-void my_make_scrambled_password(char *to, const char *password,
- size_t pass_len)
+void my_make_scrambled_password_sha1(char *to, const char *password,
+ size_t pass_len)
{
- SHA1_CONTEXT sha1_context;
uint8 hash_stage2[SHA1_HASH_SIZE];
- mysql_sha1_reset(&sha1_context);
- /* stage 1: hash password */
- mysql_sha1_input(&sha1_context, (uint8 *) password, (uint) pass_len);
- mysql_sha1_result(&sha1_context, (uint8 *) to);
- /* stage 2: hash stage1 output */
- mysql_sha1_reset(&sha1_context);
- mysql_sha1_input(&sha1_context, (uint8 *) to, SHA1_HASH_SIZE);
- /* separate buffer is used to pass 'to' in octet2hex */
- mysql_sha1_result(&sha1_context, hash_stage2);
+ /* Two stage SHA1 hash of the password. */
+ compute_two_stage_sha1_hash(password, pass_len, (uint8 *) to, hash_stage2);
+
/* convert hash_stage2 to hex string */
*to++= PVERSION41_CHAR;
octet2hex(to, (const char*) hash_stage2, SHA1_HASH_SIZE);
@@ -419,7 +455,7 @@ void my_make_scrambled_password(char *to, const char *password,
void make_scrambled_password(char *to, const char *password)
{
- my_make_scrambled_password(to, password, strlen(password));
+ my_make_scrambled_password_sha1(to, password, strlen(password));
}
@@ -443,24 +479,16 @@ void make_scrambled_password(char *to, const char *password)
void
scramble(char *to, const char *message, const char *password)
{
- SHA1_CONTEXT sha1_context;
uint8 hash_stage1[SHA1_HASH_SIZE];
uint8 hash_stage2[SHA1_HASH_SIZE];
- mysql_sha1_reset(&sha1_context);
- /* stage 1: hash password */
- mysql_sha1_input(&sha1_context, (uint8 *) password, (uint) strlen(password));
- mysql_sha1_result(&sha1_context, hash_stage1);
- /* stage 2: hash stage 1; note that hash_stage2 is stored in the database */
- mysql_sha1_reset(&sha1_context);
- mysql_sha1_input(&sha1_context, hash_stage1, SHA1_HASH_SIZE);
- mysql_sha1_result(&sha1_context, hash_stage2);
+ /* Two stage SHA1 hash of the password. */
+ compute_two_stage_sha1_hash(password, strlen(password), hash_stage1,
+ hash_stage2);
+
/* create crypt string as sha1(message, hash_stage2) */;
- mysql_sha1_reset(&sha1_context);
- mysql_sha1_input(&sha1_context, (const uint8 *) message, SCRAMBLE_LENGTH);
- mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE);
- /* xor allows 'from' and 'to' overlap: lets take advantage of it */
- mysql_sha1_result(&sha1_context, (uint8 *) to);
+ compute_sha1_hash_multi((uint8 *) to, message, SCRAMBLE_LENGTH,
+ (const char *) hash_stage2, SHA1_HASH_SIZE);
my_crypt(to, (const uchar *) to, hash_stage1, SCRAMBLE_LENGTH);
}
@@ -472,7 +500,7 @@ scramble(char *to, const char *message, const char *password)
null-terminated, reply and hash_stage2 must be at least SHA1_HASH_SIZE
long (if not, something fishy is going on).
SYNOPSIS
- check_scramble()
+ check_scramble_sha1()
scramble clients' reply, presumably produced by scramble()
message original random string, previously sent to client
(presumably second argument of scramble()), must be
@@ -486,27 +514,30 @@ scramble(char *to, const char *message, const char *password)
*/
my_bool
-check_scramble(const uchar *scramble_arg, const char *message,
- const uint8 *hash_stage2)
+check_scramble_sha1(const uchar *scramble_arg, const char *message,
+ const uint8 *hash_stage2)
{
- SHA1_CONTEXT sha1_context;
uint8 buf[SHA1_HASH_SIZE];
uint8 hash_stage2_reassured[SHA1_HASH_SIZE];
- mysql_sha1_reset(&sha1_context);
/* create key to encrypt scramble */
- mysql_sha1_input(&sha1_context, (const uint8 *) message, SCRAMBLE_LENGTH);
- mysql_sha1_input(&sha1_context, hash_stage2, SHA1_HASH_SIZE);
- mysql_sha1_result(&sha1_context, buf);
+ compute_sha1_hash_multi(buf, message, SCRAMBLE_LENGTH,
+ (const char *) hash_stage2, SHA1_HASH_SIZE);
/* encrypt scramble */
- my_crypt((char *) buf, buf, scramble_arg, SCRAMBLE_LENGTH);
+ my_crypt((char *) buf, buf, scramble_arg, SCRAMBLE_LENGTH);
+
/* now buf supposedly contains hash_stage1: so we can get hash_stage2 */
- mysql_sha1_reset(&sha1_context);
- mysql_sha1_input(&sha1_context, buf, SHA1_HASH_SIZE);
- mysql_sha1_result(&sha1_context, hash_stage2_reassured);
+ compute_sha1_hash(hash_stage2_reassured, (const char *) buf, SHA1_HASH_SIZE);
+
return test(memcmp(hash_stage2, hash_stage2_reassured, SHA1_HASH_SIZE));
}
+my_bool
+check_scramble(const uchar *scramble_arg, const char *message,
+ const uint8 *hash_stage2)
+{
+ return check_scramble_sha1(scramble_arg, message, hash_stage2);
+}
/*
Convert scrambled password from asciiz hex string to binary form.
diff --git a/sql/protocol.cc b/sql/protocol.cc
index be16d8c3ed8..effeee9b4aa 100644
--- a/sql/protocol.cc
+++ b/sql/protocol.cc
@@ -161,14 +161,14 @@ bool net_send_error(THD *thd, uint sql_errno, const char *err,
It's one case when we can push an error even though there
is an OK or EOF already.
*/
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
/* Abort multi-result sets */
thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS;
error= net_send_error_packet(thd, sql_errno, err, sqlstate);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
DBUG_RETURN(error);
}
@@ -233,7 +233,7 @@ net_send_ok(THD *thd,
pos+=2;
/* We can only return up to 65535 warnings in two bytes */
- uint tmp= min(statement_warn_count, 65535);
+ uint tmp= MY_MIN(statement_warn_count, 65535);
int2store(pos, tmp);
pos+= 2;
}
@@ -242,7 +242,7 @@ net_send_ok(THD *thd,
int2store(pos, server_status);
pos+=2;
}
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
if (message && message[0])
pos= net_store_data(pos, (uchar*) message, strlen(message));
@@ -251,7 +251,7 @@ net_send_ok(THD *thd,
error= net_flush(net);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
DBUG_PRINT("info", ("OK sent, so no more error sending allowed"));
DBUG_RETURN(error);
@@ -291,11 +291,11 @@ net_send_eof(THD *thd, uint server_status, uint statement_warn_count)
/* Set to TRUE if no active vio, to work well in case of --init-file */
if (net->vio != 0)
{
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
error= write_eof_packet(thd, net, server_status, statement_warn_count);
if (!error)
error= net_flush(net);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
DBUG_PRINT("info", ("EOF sent, so no more error sending allowed"));
}
DBUG_RETURN(error);
@@ -329,7 +329,7 @@ static bool write_eof_packet(THD *thd, NET *net,
Don't send warn count during SP execution, as the warn_list
is cleared between substatements, and mysqltest gets confused
*/
- uint tmp= min(statement_warn_count, 65535);
+ uint tmp= MY_MIN(statement_warn_count, 65535);
buff[0]= 254;
int2store(buff+1, tmp);
/*
@@ -486,30 +486,30 @@ static uchar *net_store_length_fast(uchar *packet, uint length)
void Protocol::end_statement()
{
DBUG_ENTER("Protocol::end_statement");
- DBUG_ASSERT(! thd->stmt_da->is_sent);
+ DBUG_ASSERT(! thd->get_stmt_da()->is_sent());
bool error= FALSE;
/* Can not be true, but do not take chances in production. */
- if (thd->stmt_da->is_sent)
+ if (thd->get_stmt_da()->is_sent())
DBUG_VOID_RETURN;
- switch (thd->stmt_da->status()) {
+ switch (thd->get_stmt_da()->status()) {
case Diagnostics_area::DA_ERROR:
/* The query failed, send error to log and abort bootstrap. */
- error= send_error(thd->stmt_da->sql_errno(),
- thd->stmt_da->message(),
- thd->stmt_da->get_sqlstate());
+ error= send_error(thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message(),
+ thd->get_stmt_da()->get_sqlstate());
break;
case Diagnostics_area::DA_EOF:
error= send_eof(thd->server_status,
- thd->stmt_da->statement_warn_count());
+ thd->get_stmt_da()->statement_warn_count());
break;
case Diagnostics_area::DA_OK:
error= send_ok(thd->server_status,
- thd->stmt_da->statement_warn_count(),
- thd->stmt_da->affected_rows(),
- thd->stmt_da->last_insert_id(),
- thd->stmt_da->message());
+ thd->get_stmt_da()->statement_warn_count(),
+ thd->get_stmt_da()->affected_rows(),
+ thd->get_stmt_da()->last_insert_id(),
+ thd->get_stmt_da()->message());
break;
case Diagnostics_area::DA_DISABLED:
break;
@@ -520,7 +520,7 @@ void Protocol::end_statement()
break;
}
if (!error)
- thd->stmt_da->is_sent= TRUE;
+ thd->get_stmt_da()->set_is_sent(true);
DBUG_VOID_RETURN;
}
@@ -606,17 +606,17 @@ void net_send_progress_packet(THD *thd)
*pos++= (uchar) 1; // Number of strings
*pos++= (uchar) thd->progress.stage + 1;
/*
- We have the max() here to avoid problems if max_stage is not set,
+ We have the MY_MAX() here to avoid problems if max_stage is not set,
which may happen during automatic repair of table
*/
- *pos++= (uchar) max(thd->progress.max_stage, thd->progress.stage + 1);
+ *pos++= (uchar) MY_MAX(thd->progress.max_stage, thd->progress.stage + 1);
progress= 0;
if (thd->progress.max_counter)
progress= 100000ULL * thd->progress.counter / thd->progress.max_counter;
int3store(pos, progress); // Between 0 & 100000
pos+= 3;
pos= net_store_data(pos, (const uchar*) proc_info,
- min(length, sizeof(buff)-7));
+ MY_MIN(length, sizeof(buff)-7));
net_write_command(&thd->net, (uchar) 255, progress_header,
sizeof(progress_header), (uchar*) buff,
(uint) (pos - buff));
@@ -688,9 +688,9 @@ bool Protocol::flush()
{
#ifndef EMBEDDED_LIBRARY
bool error;
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
error= net_flush(&thd->net);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
return error;
#else
return 0;
@@ -856,7 +856,7 @@ bool Protocol::send_result_set_metadata(List<Item> *list, uint flags)
Send no warning information, as it will be sent at statement end.
*/
if (write_eof_packet(thd, &thd->net, thd->server_status,
- thd->warning_info->statement_warn_count()))
+ thd->get_stmt_da()->current_statement_warn_count()))
DBUG_RETURN(1);
}
DBUG_RETURN(prepare_for_send(list->elements));
diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc
index 12bdf722bec..fced238e334 100644
--- a/sql/rpl_mi.cc
+++ b/sql/rpl_mi.cc
@@ -186,7 +186,7 @@ void init_master_log_pos(Master_info* mi)
if CHANGE MASTER did not specify it. (no data loss in conversion
as hb period has a max)
*/
- mi->heartbeat_period= (float) min(SLAVE_MAX_HEARTBEAT_PERIOD,
+ mi->heartbeat_period= (float) MY_MIN(SLAVE_MAX_HEARTBEAT_PERIOD,
(slave_net_timeout/2.0));
DBUG_ASSERT(mi->heartbeat_period > (float) 0.001
|| mi->heartbeat_period == 0);
@@ -766,20 +766,20 @@ void create_logfile_name_with_suffix(char *res_file_name, size_t length,
{
const char *info_file_end= info_file + (p - res_file_name);
const char *ext= append ? info_file_end : fn_ext2(info_file);
- size_t res_length, ext_pos;
+ size_t res_length, ext_pos, from_length;
uint errors;
/* Create null terminated string */
- strmake(buff, suffix->str, suffix->length);
+ from_length= strmake(buff, suffix->str, suffix->length) - buff;
/* Convert to characters usable in a file name */
- res_length= strconvert(system_charset_info, buff,
+ res_length= strconvert(system_charset_info, buff, from_length,
&my_charset_filename, res, sizeof(res), &errors);
ext_pos= (size_t) (ext - info_file);
length-= (suffix->length - ext_pos); /* Leave place for extension */
p= res_file_name + ext_pos;
*p++= '-'; /* Add separator */
- p= strmake(p, res, min((size_t) (length - (p - res_file_name)),
+ p= strmake(p, res, MY_MIN((size_t) (length - (p - res_file_name)),
res_length));
/* Add back extension. We have checked above that there is space for it */
strmov(p, ext);
@@ -957,7 +957,7 @@ bool Master_info_index::init_all_master_info()
sql_print_error("Initialized Master_info from '%s' failed",
buf_master_info_file);
if (!master_info_index->get_master_info(&connection_name,
- MYSQL_ERROR::WARN_LEVEL_NOTE))
+ Sql_condition::WARN_LEVEL_NOTE))
{
/* Master_info is not in HASH; Add it */
if (master_info_index->add_master_info(mi, FALSE))
@@ -982,7 +982,7 @@ bool Master_info_index::init_all_master_info()
sql_print_information("Initialized Master_info from '%s'",
buf_master_info_file);
if (master_info_index->get_master_info(&connection_name,
- MYSQL_ERROR::WARN_LEVEL_NOTE))
+ Sql_condition::WARN_LEVEL_NOTE))
{
/* Master_info was already registered */
sql_print_error(ER(ER_CONNECTION_ALREADY_EXISTS),
@@ -1079,7 +1079,7 @@ bool Master_info_index::write_master_name_to_index_file(LEX_STRING *name,
Master_info *
Master_info_index::get_master_info(LEX_STRING *connection_name,
- MYSQL_ERROR::enum_warning_level warning)
+ Sql_condition::enum_warning_level warning)
{
Master_info *mi;
char buff[MAX_CONNECTION_NAME+1], *res;
@@ -1096,10 +1096,10 @@ Master_info_index::get_master_info(LEX_STRING *connection_name,
mi= (Master_info*) my_hash_search(&master_info_hash,
(uchar*) buff, buff_length);
- if (!mi && warning != MYSQL_ERROR::WARN_LEVEL_NOTE)
+ if (!mi && warning != Sql_condition::WARN_LEVEL_NOTE)
{
my_error(WARN_NO_MASTER_INFO,
- MYF(warning == MYSQL_ERROR::WARN_LEVEL_WARN ? ME_JUST_WARNING :
+ MYF(warning == Sql_condition::WARN_LEVEL_WARN ? ME_JUST_WARNING :
0),
(int) connection_name->length,
connection_name->str);
@@ -1118,7 +1118,7 @@ bool Master_info_index::check_duplicate_master_info(LEX_STRING *name_arg,
/* Get full host and port name */
if ((mi= master_info_index->get_master_info(name_arg,
- MYSQL_ERROR::WARN_LEVEL_NOTE)))
+ Sql_condition::WARN_LEVEL_NOTE)))
{
if (!host)
host= mi->host;
@@ -1182,7 +1182,7 @@ bool Master_info_index::remove_master_info(LEX_STRING *name)
Master_info* mi;
DBUG_ENTER("remove_master_info");
- if ((mi= get_master_info(name, MYSQL_ERROR::WARN_LEVEL_WARN)))
+ if ((mi= get_master_info(name, Sql_condition::WARN_LEVEL_WARN)))
{
// Delete Master_info and rewrite others to file
if (!my_hash_delete(&master_info_hash, (uchar*) mi))
@@ -1294,7 +1294,7 @@ bool Master_info_index::start_all_slaves(THD *thd)
break;
}
else
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SLAVE_STARTED, ER(ER_SLAVE_STARTED),
(int) mi->connection_name.length,
mi->connection_name.str);
@@ -1339,7 +1339,7 @@ bool Master_info_index::stop_all_slaves(THD *thd)
break;
}
else
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SLAVE_STOPPED, ER(ER_SLAVE_STOPPED),
(int) mi->connection_name.length,
mi->connection_name.str);
diff --git a/sql/rpl_mi.h b/sql/rpl_mi.h
index ad6c57e21c4..991f6673c3a 100644
--- a/sql/rpl_mi.h
+++ b/sql/rpl_mi.h
@@ -208,7 +208,7 @@ public:
bool add_master_info(Master_info *mi, bool write_to_file);
bool remove_master_info(LEX_STRING *connection_name);
Master_info *get_master_info(LEX_STRING *connection_name,
- MYSQL_ERROR::enum_warning_level warning);
+ Sql_condition::enum_warning_level warning);
bool give_error_if_slave_running();
bool start_all_slaves(THD *thd);
bool stop_all_slaves(THD *thd);
diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc
index 99bf8a82004..aa8c118cfe6 100644
--- a/sql/rpl_record.cc
+++ b/sql/rpl_record.cc
@@ -287,7 +287,7 @@ unpack_row(Relay_log_info const *rli,
else
{
f->set_default();
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_BAD_NULL_ERROR, ER(ER_BAD_NULL_ERROR),
f->field_name);
}
@@ -362,7 +362,7 @@ unpack_row(Relay_log_info const *rli,
/*
throw away master's extra fields
*/
- uint max_cols= min(tabledef->size(), cols->n_bits);
+ uint max_cols= MY_MIN(tabledef->size(), cols->n_bits);
for (; i < max_cols; i++)
{
if (bitmap_is_set(cols, i))
@@ -447,7 +447,7 @@ int prepare_record(TABLE *const table, const uint skip, const bool check)
{
f->set_default();
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_NO_DEFAULT_FOR_FIELD,
ER(ER_NO_DEFAULT_FOR_FIELD),
f->field_name);
diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc
index a455779bb6e..3a6bb4c33dc 100644
--- a/sql/rpl_rli.cc
+++ b/sql/rpl_rli.cc
@@ -256,7 +256,7 @@ a file name for --relay-log-index option", opt_relaylog_index_name);
{
sql_print_error("Failed to create a new relay log info file (\
file '%s', errno %d)", fname, my_errno);
- msg= current_thd->stmt_da->message();
+ msg= current_thd->get_stmt_da()->message();
goto err;
}
if (init_io_cache(&rli->info_file, info_fd, IO_SIZE*2, READ_CACHE, 0L,0,
@@ -264,7 +264,7 @@ file '%s', errno %d)", fname, my_errno);
{
sql_print_error("Failed to create a cache on relay log info file '%s'",
fname);
- msg= current_thd->stmt_da->message();
+ msg= current_thd->get_stmt_da()->message();
goto err;
}
@@ -741,7 +741,7 @@ int Relay_log_info::wait_for_pos(THD* thd, String* log_name,
ulong log_name_extension;
char log_name_tmp[FN_REFLEN]; //make a char[] from String
- strmake(log_name_tmp, log_name->ptr(), min(log_name->length(), FN_REFLEN-1));
+ strmake(log_name_tmp, log_name->ptr(), MY_MIN(log_name->length(), FN_REFLEN-1));
char *p= fn_ext(log_name_tmp);
char *p_end;
@@ -751,7 +751,7 @@ int Relay_log_info::wait_for_pos(THD* thd, String* log_name,
goto err;
}
// Convert 0-3 to 4
- log_pos= max(log_pos, BIN_LOG_HEADER_SIZE);
+ log_pos= MY_MAX(log_pos, BIN_LOG_HEADER_SIZE);
/* p points to '.' */
log_name_extension= strtoul(++p, &p_end, 10);
/*
@@ -1236,7 +1236,7 @@ void Relay_log_info::stmt_done(my_off_t event_master_log_pos,
"Failed to update GTID state in %s.%s, slave state may become "
"inconsistent: %d: %s",
"mysql", rpl_gtid_slave_state_table_name.str,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message());
/*
At this point we are not in a transaction (for example after DDL),
so we can not roll back. Anyway, normally updates to the slave
@@ -1355,9 +1355,9 @@ void Relay_log_info::clear_tables_to_lock()
void Relay_log_info::slave_close_thread_tables(THD *thd)
{
DBUG_ENTER("Relay_log_info::slave_close_thread_tables(THD *thd)");
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
close_thread_tables(thd);
/*
diff --git a/sql/rpl_utility.cc b/sql/rpl_utility.cc
index ac8a8fe356b..db47c3c164a 100644
--- a/sql/rpl_utility.cc
+++ b/sql/rpl_utility.cc
@@ -815,7 +815,7 @@ table_def::compatible_with(THD *thd, Relay_log_info *rli,
/*
We only check the initial columns for the tables.
*/
- uint const cols_to_check= min(table->s->fields, size());
+ uint const cols_to_check= MY_MIN(table->s->fields, size());
TABLE *tmp_table= NULL;
for (uint col= 0 ; col < cols_to_check ; ++col)
@@ -916,10 +916,10 @@ TABLE *table_def::create_conversion_table(THD *thd, Relay_log_info *rli, TABLE *
List<Create_field> field_list;
/*
At slave, columns may differ. So we should create
- min(columns@master, columns@slave) columns in the
+ MY_MIN(columns@master, columns@slave) columns in the
conversion table.
*/
- uint const cols_to_create= min(target_table->s->fields, size());
+ uint const cols_to_create= MY_MIN(target_table->s->fields, size());
for (uint col= 0 ; col < cols_to_create; ++col)
{
Create_field *field_def=
diff --git a/sql/rpl_utility.h b/sql/rpl_utility.h
index b08721aa8c2..9ac17f68a1f 100644
--- a/sql/rpl_utility.h
+++ b/sql/rpl_utility.h
@@ -295,7 +295,7 @@ public:
do { \
char buf[256]; \
uint i; \
- for (i = 0 ; i < min(sizeof(buf) - 1, (BS)->n_bits) ; i++) \
+ for (i = 0 ; i < MY_MIN(sizeof(buf) - 1, (BS)->n_bits) ; i++) \
buf[i] = bitmap_is_set((BS), i) ? '1' : '0'; \
buf[i] = '\0'; \
DBUG_PRINT((N), ((FRM), buf)); \
diff --git a/sql/set_var.cc b/sql/set_var.cc
index d9741ca3481..db74d8f0d9d 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -384,7 +384,7 @@ void sys_var::do_deprecated_warning(THD *thd)
? ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT
: ER_WARN_DEPRECATED_SYNTAX;
if (thd)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DEPRECATED_SYNTAX, ER(errmsg),
buf1, deprecation_substitute);
else
@@ -421,7 +421,7 @@ bool throw_bounds_warning(THD *thd, const char *name,
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buf);
return true;
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), name, buf);
}
@@ -441,7 +441,7 @@ bool throw_bounds_warning(THD *thd, const char *name, bool fixed, double v)
my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, buf);
return true;
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), name, buf);
}
diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt
index f62f5d917f7..35f2cfb330c 100644
--- a/sql/share/errmsg-utf8.txt
+++ b/sql/share/errmsg-utf8.txt
@@ -1,4 +1,4 @@
-languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, japanese-sjis=jps sjis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u;
+languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u, bulgarian=bgn cp1251;
default-language eng
@@ -51,7 +51,7 @@ ER_YES
spa "SI"
ukr "ТÐК"
ER_CANT_CREATE_FILE
- cze "Nemohu vytvo-Břit soubor '%-.200s' (chybový kód: %M)"
+ cze "Nemohu vytvořit soubor '%-.200s' (chybový kód: %M)"
dan "Kan ikke oprette filen '%-.200s' (Fejlkode: %M)"
nla "Kan file '%-.200s' niet aanmaken (Errcode: %M)"
eng "Can't create file '%-.200s' (errno: %M)"
@@ -61,7 +61,7 @@ ER_CANT_CREATE_FILE
greek "ΑδÏνατη η δημιουÏγία του αÏχείου '%-.200s' (κωδικός λάθους: %M)"
hun "A '%-.200s' file nem hozhato letre (hibakod: %M)"
ita "Impossibile creare il file '%-.200s' (errno: %M)"
- jpn "'%-.200s' ファイルãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)"
+ jpn "ファイル '%-.200s' を作æˆã§ãã¾ã›ã‚“。(エラー番å·: %M)"
kor "í™”ì¼ '%-.200s'를 만들지 못했습니다. (ì—러번호: %M)"
nor "Kan ikke opprette fila '%-.200s' (Feilkode: %M)"
norwegian-ny "Kan ikkje opprette fila '%-.200s' (Feilkode: %M)"
@@ -75,7 +75,7 @@ ER_CANT_CREATE_FILE
swe "Kan inte skapa filen '%-.200s' (Felkod: %M)"
ukr "Ðе можу Ñтворити файл '%-.200s' (помилка: %M)"
ER_CANT_CREATE_TABLE
- cze "Nemohu vytvo-Břit tabulku %`s.%`s (chybový kód: %M)"
+ cze "Nemohu vytvořit tabulku %`s.%`s (chybový kód: %M)"
dan "Kan ikke oprette tabellen %`s.%`s (Fejlkode: %M)"
nla "Kan tabel %`s.%`s niet aanmaken (Errcode: %M)"
eng "Can't create table %`s.%`s (errno: %M)"
@@ -100,18 +100,17 @@ ER_CANT_CREATE_TABLE
swe "Kan inte skapa tabellen %`s.%`s (Felkod: %M)"
ukr "Ðе можу Ñтворити таблицю %`s.%`s (помилка: %M)"
ER_CANT_CREATE_DB
- cze "Nemohu vytvo-Břit databázi '%-.192s' (chybový kód: %M)"
+ cze "Nemohu vytvořit databázi '%-.192s' (chybový kód: %M)"
dan "Kan ikke oprette databasen '%-.192s' (Fejlkode: %M)"
nla "Kan database '%-.192s' niet aanmaken (Errcode: %M)"
eng "Can't create database '%-.192s' (errno: %M)"
- jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)",
est "Ei suuda luua andmebaasi '%-.192s' (veakood: %M)"
fre "Ne peut créer la base '%-.192s' (Erreur %M)"
ger "Kann Datenbank '%-.192s' nicht erzeugen (Fehler: %M)"
greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομένων '%-.192s' (κωδικός λάθους: %M)"
hun "Az '%-.192s' adatbazis nem hozhato letre (hibakod: %M)"
ita "Impossibile creare il database '%-.192s' (errno: %M)"
- jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“ (errno: %M)"
+ jpn "データベース '%-.192s' を作æˆã§ãã¾ã›ã‚“。(エラー番å·: %M)"
kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 만들지 못했습니다.. (ì—러번호: %M)"
nor "Kan ikke opprette databasen '%-.192s' (Feilkode: %M)"
norwegian-ny "Kan ikkje opprette databasen '%-.192s' (Feilkode: %M)"
@@ -125,18 +124,17 @@ ER_CANT_CREATE_DB
swe "Kan inte skapa databasen '%-.192s' (Felkod: %M)"
ukr "Ðе можу Ñтворити базу данних '%-.192s' (помилка: %M)"
ER_DB_CREATE_EXISTS
- cze "Nemohu vytvo-Břit databázi '%-.192s'; databáze již existuje"
+ cze "Nemohu vytvořit databázi '%-.192s'; databáze již existuje"
dan "Kan ikke oprette databasen '%-.192s'; databasen eksisterer"
nla "Kan database '%-.192s' niet aanmaken; database bestaat reeds"
eng "Can't create database '%-.192s'; database exists"
- jps "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“.æ—¢ã«ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒå­˜åœ¨ã—ã¾ã™",
est "Ei suuda luua andmebaasi '%-.192s': andmebaas juba eksisteerib"
fre "Ne peut créer la base '%-.192s'; elle existe déjà"
ger "Kann Datenbank '%-.192s' nicht erzeugen. Datenbank existiert bereits"
greek "ΑδÏνατη η δημιουÏγία της βάσης δεδομένων '%-.192s'; Η βάση δεδομένων υπάÏχει ήδη"
hun "Az '%-.192s' adatbazis nem hozhato letre Az adatbazis mar letezik"
ita "Impossibile creare il database '%-.192s'; il database esiste"
- jpn "'%-.192s' データベースãŒä½œã‚Œã¾ã›ã‚“.æ—¢ã«ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒå­˜åœ¨ã—ã¾ã™"
+ jpn "データベース '%-.192s' を作æˆã§ãã¾ã›ã‚“。データベースã¯ã™ã§ã«å­˜åœ¨ã—ã¾ã™ã€‚"
kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 만들지 못했습니다.. ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 존재함"
nor "Kan ikke opprette databasen '%-.192s'; databasen eksisterer"
norwegian-ny "Kan ikkje opprette databasen '%-.192s'; databasen eksisterer"
@@ -150,18 +148,17 @@ ER_DB_CREATE_EXISTS
swe "Databasen '%-.192s' existerar redan"
ukr "Ðе можу Ñтворити базу данних '%-.192s'. База данних Ñ–Ñнує"
ER_DB_DROP_EXISTS
- cze "Nemohu zru-Bšit databázi '%-.192s', databáze neexistuje"
+ cze "Nemohu zrušit databázi '%-.192s', databáze neexistuje"
dan "Kan ikke slette (droppe) '%-.192s'; databasen eksisterer ikke"
nla "Kan database '%-.192s' niet verwijderen; database bestaat niet"
eng "Can't drop database '%-.192s'; database doesn't exist"
- jps "'%-.192s' データベースを破棄ã§ãã¾ã›ã‚“. ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒãªã„ã®ã§ã™.",
est "Ei suuda kustutada andmebaasi '%-.192s': andmebaasi ei eksisteeri"
fre "Ne peut effacer la base '%-.192s'; elle n'existe pas"
ger "Kann Datenbank '%-.192s' nicht löschen; Datenbank nicht vorhanden"
greek "ΑδÏνατη η διαγÏαφή της βάσης δεδομένων '%-.192s'. Η βάση δεδομένων δεν υπάÏχει"
hun "A(z) '%-.192s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik"
ita "Impossibile cancellare '%-.192s'; il database non esiste"
- jpn "'%-.192s' データベースを破棄ã§ãã¾ã›ã‚“. ãã®ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãŒãªã„ã®ã§ã™."
+ jpn "データベース '%-.192s' を削除ã§ãã¾ã›ã‚“。データベースã¯å­˜åœ¨ã—ã¾ã›ã‚“。"
kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'를 제거하지 못했습니다. ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 존재하지 ì•ŠìŒ "
nor "Kan ikke fjerne (drop) '%-.192s'; databasen eksisterer ikke"
norwegian-ny "Kan ikkje fjerne (drop) '%-.192s'; databasen eksisterer ikkje"
@@ -175,18 +172,17 @@ ER_DB_DROP_EXISTS
swe "Kan inte radera databasen '%-.192s'; databasen finns inte"
ukr "Ðе можу видалити базу данних '%-.192s'. База данних не Ñ–Ñнує"
ER_DB_DROP_DELETE
- cze "Chyba p-Bři rušení databáze (nemohu vymazat '%-.192s', chyba %M)"
+ cze "Chyba při rušení databáze (nemohu vymazat '%-.192s', chyba %M)"
dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.192s', Fejlkode %M)"
nla "Fout bij verwijderen database (kan '%-.192s' niet verwijderen, Errcode: %M)"
eng "Error dropping database (can't delete '%-.192s', errno: %M)"
- jps "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %M)",
est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.192s', veakood: %M)"
fre "Ne peut effacer la base '%-.192s' (erreur %M)"
ger "Fehler beim Löschen der Datenbank ('%-.192s' kann nicht gelöscht werden, Fehler: %M)"
greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή '%-.192s', κωδικός λάθους: %M)"
hun "Adatbazis megszuntetesi hiba ('%-.192s' nem torolheto, hibakod: %M)"
ita "Errore durante la cancellazione del database (impossibile cancellare '%-.192s', errno: %M)"
- jpn "データベース破棄エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“, errno: %M)"
+ jpn "データベース削除エラー ('%-.192s' を削除ã§ãã¾ã›ã‚“。エラー番å·: %M)"
kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러('%-.192s'를 삭제할 수 ì—†ì니다, ì—러번호: %M)"
nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.192s', feil %M)"
norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.192s', feil %M)"
@@ -200,18 +196,17 @@ ER_DB_DROP_DELETE
swe "Fel vid radering av databasen (Kan inte radera '%-.192s'. Felkod: %M)"
ukr "Ðе можу видалити базу данних (Ðе можу видалити '%-.192s', помилка: %M)"
ER_DB_DROP_RMDIR
- cze "Chyba p-Bři rušení databáze (nemohu vymazat adresář '%-.192s', chyba %M)"
+ cze "Chyba při rušení databáze (nemohu vymazat adresář '%-.192s', chyba %M)"
dan "Fejl ved sletting af database (kan ikke slette folderen '%-.192s', Fejlkode %M)"
nla "Fout bij verwijderen database (kan rmdir '%-.192s' niet uitvoeren, Errcode: %M)"
eng "Error dropping database (can't rmdir '%-.192s', errno: %M)"
- jps "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %M)",
est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.192s', veakood: %M)"
fre "Erreur en effaçant la base (rmdir '%-.192s', erreur %M)"
ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.192s' kann nicht gelöscht werden, Fehler: %M)"
greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή της βάσης δεδομένων (αδÏνατη η διαγÏαφή του φακέλλου '%-.192s', κωδικός λάθους: %M)"
hun "Adatbazis megszuntetesi hiba ('%-.192s' nem szuntetheto meg, hibakod: %M)"
ita "Errore durante la cancellazione del database (impossibile rmdir '%-.192s', errno: %M)"
- jpn "データベース破棄エラー ('%-.192s' ã‚’ rmdir ã§ãã¾ã›ã‚“, errno: %M)"
+ jpn "データベース削除エラー (ディレクトリ '%-.192s' を削除ã§ãã¾ã›ã‚“。エラー番å·: %M)"
kor "ë°ì´íƒ€ë² ì´ìŠ¤ 제거 ì—러(rmdir '%-.192s'를 í•  수 ì—†ì니다, ì—러번호: %M)"
nor "Feil ved sletting av database (kan ikke slette katalogen '%-.192s', feil %M)"
norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.192s', feil %M)"
@@ -225,18 +220,17 @@ ER_DB_DROP_RMDIR
swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.192s'. Felkod: %M)"
ukr "Ðе можу видалити базу данних (Ðе можу видалити теку '%-.192s', помилка: %M)"
ER_CANT_DELETE_FILE
- cze "Chyba p-Bři výmazu '%-.192s' (chybový kód: %M)"
+ cze "Chyba při výmazu '%-.192s' (chybový kód: %M)"
dan "Fejl ved sletning af '%-.192s' (Fejlkode: %M)"
nla "Fout bij het verwijderen van '%-.192s' (Errcode: %M)"
eng "Error on delete of '%-.192s' (errno: %M)"
- jps "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %M)",
est "Viga '%-.192s' kustutamisel (veakood: %M)"
fre "Erreur en effaçant '%-.192s' (Errcode: %M)"
ger "Fehler beim Löschen von '%-.192s' (Fehler: %M)"
greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κατά τη διαγÏαφή '%-.192s' (κωδικός λάθους: %M)"
hun "Torlesi hiba: '%-.192s' (hibakod: %M)"
ita "Errore durante la cancellazione di '%-.192s' (errno: %M)"
- jpn "'%-.192s' ã®å‰Šé™¤ãŒã‚¨ãƒ©ãƒ¼ (errno: %M)"
+ jpn "ファイル '%-.192s' ã®å‰Šé™¤ã‚¨ãƒ©ãƒ¼ (エラー番å·: %M)"
kor "'%-.192s' ì‚­ì œ 중 ì—러 (ì—러번호: %M)"
nor "Feil ved sletting av '%-.192s' (Feilkode: %M)"
norwegian-ny "Feil ved sletting av '%-.192s' (Feilkode: %M)"
@@ -250,18 +244,17 @@ ER_CANT_DELETE_FILE
swe "Kan inte radera filen '%-.192s' (Felkod: %M)"
ukr "Ðе можу видалити '%-.192s' (помилка: %M)"
ER_CANT_FIND_SYSTEM_REC
- cze "Nemohu -BÄíst záznam v systémové tabulce"
+ cze "Nemohu Äíst záznam v systémové tabulce"
dan "Kan ikke læse posten i systemfolderen"
nla "Kan record niet lezen in de systeem tabel"
eng "Can't read record in system table"
- jps "system table ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’読む事ãŒã§ãã¾ã›ã‚“ã§ã—ãŸ",
est "Ei suuda lugeda kirjet süsteemsest tabelist"
fre "Ne peut lire un enregistrement de la table 'system'"
ger "Datensatz in der Systemtabelle nicht lesbar"
greek "ΑδÏνατη η ανάγνωση εγγÏαφής από πίνακα του συστήματος"
hun "Nem olvashato rekord a rendszertablaban"
ita "Impossibile leggere il record dalla tabella di sistema"
- jpn "system table ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’読む事ãŒã§ãã¾ã›ã‚“ã§ã—ãŸ"
+ jpn "システム表ã®ãƒ¬ã‚³ãƒ¼ãƒ‰ã‚’読ã¿è¾¼ã‚ã¾ã›ã‚“。"
kor "system í…Œì´ë¸”ì—ì„œ 레코드를 ì½ì„ 수 없습니다."
nor "Kan ikke lese posten i systemkatalogen"
norwegian-ny "Kan ikkje lese posten i systemkatalogen"
@@ -275,18 +268,17 @@ ER_CANT_FIND_SYSTEM_REC
swe "Hittar inte posten i systemregistret"
ukr "Ðе можу зчитати Ð·Ð°Ð¿Ð¸Ñ Ð· ÑиÑтемної таблиці"
ER_CANT_GET_STAT
- cze "Nemohu z-Bískat stav '%-.200s' (chybový kód: %M)"
+ cze "Nemohu získat stav '%-.200s' (chybový kód: %M)"
dan "Kan ikke læse status af '%-.200s' (Fejlkode: %M)"
nla "Kan de status niet krijgen van '%-.200s' (Errcode: %M)"
eng "Can't get status of '%-.200s' (errno: %M)"
- jps "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %M)",
est "Ei suuda lugeda '%-.200s' olekut (veakood: %M)"
fre "Ne peut obtenir le status de '%-.200s' (Errcode: %M)"
ger "Kann Status von '%-.200s' nicht ermitteln (Fehler: %M)"
greek "ΑδÏνατη η λήψη πληÏοφοÏιών για την κατάσταση του '%-.200s' (κωδικός λάθους: %M)"
hun "A(z) '%-.200s' statusza nem allapithato meg (hibakod: %M)"
ita "Impossibile leggere lo stato di '%-.200s' (errno: %M)"
- jpn "'%-.200s' ã®ã‚¹ãƒ†ã‚¤ã‚¿ã‚¹ãŒå¾—られã¾ã›ã‚“. (errno: %M)"
+ jpn "'%-.200s' ã®çŠ¶æ…‹ã‚’å–å¾—ã§ãã¾ã›ã‚“。(エラー番å·: %M)"
kor "'%-.200s'ì˜ ìƒíƒœë¥¼ 얻지 못했습니다. (ì—러번호: %M)"
nor "Kan ikke lese statusen til '%-.200s' (Feilkode: %M)"
norwegian-ny "Kan ikkje lese statusen til '%-.200s' (Feilkode: %M)"
@@ -300,18 +292,17 @@ ER_CANT_GET_STAT
swe "Kan inte läsa filinformationen (stat) från '%-.200s' (Felkod: %M)"
ukr "Ðе можу отримати ÑÑ‚Ð°Ñ‚ÑƒÑ '%-.200s' (помилка: %M)"
ER_CANT_GET_WD
- cze "Chyba p-Bři zjišťování pracovní adresář (chybový kód: %M)"
+ cze "Chyba při zjišťování pracovní adresář (chybový kód: %M)"
dan "Kan ikke læse aktive folder (Fejlkode: %M)"
nla "Kan de werkdirectory niet krijgen (Errcode: %M)"
eng "Can't get working directory (errno: %M)"
- jps "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %M)",
est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %M)"
fre "Ne peut obtenir le répertoire de travail (Errcode: %M)"
ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %M)"
greek "Ο φάκελλος εÏγασίας δεν βÏέθηκε (κωδικός λάθους: %M)"
hun "A munkakonyvtar nem allapithato meg (hibakod: %M)"
ita "Impossibile leggere la directory di lavoro (errno: %M)"
- jpn "working directory を得る事ãŒã§ãã¾ã›ã‚“ã§ã—㟠(errno: %M)"
+ jpn "作業ディレクトリをå–å¾—ã§ãã¾ã›ã‚“。(エラー番å·: %M)"
kor "수행 디렉토리를 찾지 못했습니다. (ì—러번호: %M)"
nor "Kan ikke lese aktiv katalog(Feilkode: %M)"
norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %M)"
@@ -325,18 +316,17 @@ ER_CANT_GET_WD
swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %M)"
ukr "Ðе можу визначити робочу теку (помилка: %M)"
ER_CANT_LOCK
- cze "Nemohu uzamknout soubor (chybov-Bý kód: %M)"
+ cze "Nemohu uzamknout soubor (chybový kód: %M)"
dan "Kan ikke låse fil (Fejlkode: %M)"
nla "Kan de file niet blokeren (Errcode: %M)"
eng "Can't lock file (errno: %M)"
- jps "ファイルをロックã§ãã¾ã›ã‚“ (errno: %M)",
est "Ei suuda lukustada faili (veakood: %M)"
fre "Ne peut verrouiller le fichier (Errcode: %M)"
ger "Datei kann nicht gesperrt werden (Fehler: %M)"
greek "Το αÏχείο δεν μποÏεί να κλειδωθεί (κωδικός λάθους: %M)"
hun "A file nem zarolhato. (hibakod: %M)"
ita "Impossibile il locking il file (errno: %M)"
- jpn "ファイルをロックã§ãã¾ã›ã‚“ (errno: %M)"
+ jpn "ファイルをロックã§ãã¾ã›ã‚“。(エラー番å·: %M)"
kor "í™”ì¼ì„ 잠그지(lock) 못했습니다. (ì—러번호: %M)"
nor "Kan ikke låse fila (Feilkode: %M)"
norwegian-ny "Kan ikkje låse fila (Feilkode: %M)"
@@ -350,18 +340,17 @@ ER_CANT_LOCK
swe "Kan inte låsa filen. (Felkod: %M)"
ukr "Ðе можу заблокувати файл (помилка: %M)"
ER_CANT_OPEN_FILE
- cze "Nemohu otev-Břít soubor '%-.200s' (chybový kód: %M)"
+ cze "Nemohu otevřít soubor '%-.200s' (chybový kód: %M)"
dan "Kan ikke åbne fil: '%-.200s' (Fejlkode: %M)"
nla "Kan de file '%-.200s' niet openen (Errcode: %M)"
eng "Can't open file: '%-.200s' (errno: %M)"
- jps "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)",
est "Ei suuda avada faili '%-.200s' (veakood: %M)"
fre "Ne peut ouvrir le fichier: '%-.200s' (Errcode: %M)"
ger "Kann Datei '%-.200s' nicht öffnen (Fehler: %M)"
greek "Δεν είναι δυνατό να ανοιχτεί το αÏχείο: '%-.200s' (κωδικός λάθους: %M)"
hun "A '%-.200s' file nem nyithato meg (hibakod: %M)"
ita "Impossibile aprire il file: '%-.200s' (errno: %M)"
- jpn "'%-.200s' ファイルを開ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)"
+ jpn "ファイル '%-.200s' をオープンã§ãã¾ã›ã‚“。(エラー番å·: %M)"
kor "í™”ì¼ì„ 열지 못했습니다.: '%-.200s' (ì—러번호: %M)"
nor "Kan ikke åpne fila: '%-.200s' (Feilkode: %M)"
norwegian-ny "Kan ikkje åpne fila: '%-.200s' (Feilkode: %M)"
@@ -375,18 +364,17 @@ ER_CANT_OPEN_FILE
swe "Kan inte använda '%-.200s' (Felkod: %M)"
ukr "Ðе можу відкрити файл: '%-.200s' (помилка: %M)"
ER_FILE_NOT_FOUND
- cze "Nemohu naj-Bít soubor '%-.200s' (chybový kód: %M)"
+ cze "Nemohu najít soubor '%-.200s' (chybový kód: %M)"
dan "Kan ikke finde fila: '%-.200s' (Fejlkode: %M)"
nla "Kan de file: '%-.200s' niet vinden (Errcode: %M)"
eng "Can't find file: '%-.200s' (errno: %M)"
- jps "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %M)",
est "Ei suuda leida faili '%-.200s' (veakood: %M)"
fre "Ne peut trouver le fichier: '%-.200s' (Errcode: %M)"
ger "Kann Datei '%-.200s' nicht finden (Fehler: %M)"
greek "Δεν βÏέθηκε το αÏχείο: '%-.200s' (κωδικός λάθους: %M)"
hun "A(z) '%-.200s' file nem talalhato (hibakod: %M)"
ita "Impossibile trovare il file: '%-.200s' (errno: %M)"
- jpn "'%-.200s' ファイルを見付ã‘る事ãŒã§ãã¾ã›ã‚“.(errno: %M)"
+ jpn "ファイル '%-.200s' ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。(エラー番å·: %M)"
kor "í™”ì¼ì„ 찾지 못했습니다.: '%-.200s' (ì—러번호: %M)"
nor "Kan ikke finne fila: '%-.200s' (Feilkode: %M)"
norwegian-ny "Kan ikkje finne fila: '%-.200s' (Feilkode: %M)"
@@ -400,18 +388,17 @@ ER_FILE_NOT_FOUND
swe "Hittar inte filen '%-.200s' (Felkod: %M)"
ukr "Ðе можу знайти файл: '%-.200s' (помилка: %M)"
ER_CANT_READ_DIR
- cze "Nemohu -BÄíst adresář '%-.192s' (chybový kód: %M)"
+ cze "Nemohu Äíst adresář '%-.192s' (chybový kód: %M)"
dan "Kan ikke læse folder '%-.192s' (Fejlkode: %M)"
nla "Kan de directory niet lezen van '%-.192s' (Errcode: %M)"
eng "Can't read dir of '%-.192s' (errno: %M)"
- jps "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %M)",
est "Ei suuda lugeda kataloogi '%-.192s' (veakood: %M)"
fre "Ne peut lire le répertoire de '%-.192s' (Errcode: %M)"
ger "Verzeichnis von '%-.192s' nicht lesbar (Fehler: %M)"
greek "Δεν είναι δυνατό να διαβαστεί ο φάκελλος του '%-.192s' (κωδικός λάθους: %M)"
hun "A(z) '%-.192s' konyvtar nem olvashato. (hibakod: %M)"
ita "Impossibile leggere la directory di '%-.192s' (errno: %M)"
- jpn "'%-.192s' ディレクトリãŒèª­ã‚ã¾ã›ã‚“.(errno: %M)"
+ jpn "ディレクトリ '%-.192s' を読ã¿è¾¼ã‚ã¾ã›ã‚“。(エラー番å·: %M)"
kor "'%-.192s'디렉토리를 ì½ì§€ 못했습니다. (ì—러번호: %M)"
nor "Kan ikke lese katalogen '%-.192s' (Feilkode: %M)"
norwegian-ny "Kan ikkje lese katalogen '%-.192s' (Feilkode: %M)"
@@ -425,18 +412,17 @@ ER_CANT_READ_DIR
swe "Kan inte läsa från bibliotek '%-.192s' (Felkod: %M)"
ukr "Ðе можу прочитати теку '%-.192s' (помилка: %M)"
ER_CANT_SET_WD
- cze "Nemohu zm-Běnit adresář na '%-.192s' (chybový kód: %M)"
+ cze "Nemohu změnit adresář na '%-.192s' (chybový kód: %M)"
dan "Kan ikke skifte folder til '%-.192s' (Fejlkode: %M)"
nla "Kan de directory niet veranderen naar '%-.192s' (Errcode: %M)"
eng "Can't change dir to '%-.192s' (errno: %M)"
- jps "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %M)",
est "Ei suuda siseneda kataloogi '%-.192s' (veakood: %M)"
fre "Ne peut changer le répertoire pour '%-.192s' (Errcode: %M)"
ger "Kann nicht in das Verzeichnis '%-.192s' wechseln (Fehler: %M)"
greek "ΑδÏνατη η αλλαγή του Ï„Ïέχοντος καταλόγου σε '%-.192s' (κωδικός λάθους: %M)"
hun "Konyvtarvaltas nem lehetseges a(z) '%-.192s'-ba. (hibakod: %M)"
ita "Impossibile cambiare la directory in '%-.192s' (errno: %M)"
- jpn "'%-.192s' ディレクトリ㫠chdir ã§ãã¾ã›ã‚“.(errno: %M)"
+ jpn "ディレクトリ '%-.192s' ã«ç§»å‹•ã§ãã¾ã›ã‚“。(エラー番å·: %M)"
kor "'%-.192s'디렉토리로 ì´ë™í•  수 없었습니다. (ì—러번호: %M)"
nor "Kan ikke skifte katalog til '%-.192s' (Feilkode: %M)"
norwegian-ny "Kan ikkje skifte katalog til '%-.192s' (Feilkode: %M)"
@@ -450,7 +436,7 @@ ER_CANT_SET_WD
swe "Kan inte byta till '%-.192s' (Felkod: %M)"
ukr "Ðе можу перейти у теку '%-.192s' (помилка: %M)"
ER_CHECKREAD
- cze "Z-Báznam byl zmÄ›nÄ›n od posledního Ätení v tabulce '%-.192s'"
+ cze "Záznam byl zmÄ›nÄ›n od posledního Ätení v tabulce '%-.192s'"
dan "Posten er ændret siden sidste læsning '%-.192s'"
nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.192s'"
eng "Record has changed since last read in table '%-.192s'"
@@ -460,6 +446,7 @@ ER_CHECKREAD
greek "Η εγγÏαφή έχει αλλάξει από την τελευταία φοÏά που ανασÏÏθηκε από τον πίνακα '%-.192s'"
hun "A(z) '%-.192s' tablaban talalhato rekord megvaltozott az utolso olvasas ota"
ita "Il record e` cambiato dall'ultima lettura della tabella '%-.192s'"
+ jpn "表 '%-.192s' ã®æœ€å¾Œã®èª­ã¿è¾¼ã¿æ™‚点ã‹ã‚‰ã€ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒå¤‰åŒ–ã—ã¾ã—ãŸã€‚"
kor "í…Œì´ë¸” '%-.192s'ì—ì„œ 마지막으로 ì½ì€ 후 Recordê°€ 변경ë˜ì—ˆìŠµë‹ˆë‹¤."
nor "Posten har blitt endret siden den ble lest '%-.192s'"
norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.192s'"
@@ -472,44 +459,42 @@ ER_CHECKREAD
spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.192s'"
swe "Posten har förändrats sedan den lästes i register '%-.192s'"
ukr "Ð—Ð°Ð¿Ð¸Ñ Ð±ÑƒÐ»Ð¾ змінено з чаÑу оÑтаннього Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð· таблиці '%-.192s'"
-ER_DISK_FULL
- cze "Disk je pln-Bý (%s), Äekám na uvolnÄ›ní nÄ›jakého místa ..."
- dan "Ikke mere diskplads (%s). Venter på at få frigjort plads..."
- nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..."
- eng "Disk full (%s); waiting for someone to free some space..."
- jps "Disk full (%s). 誰ã‹ãŒä½•ã‹ã‚’減らã™ã¾ã§ã¾ã£ã¦ãã ã•ã„...",
- est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..."
- fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..."
- ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ..."
- greek "Δεν υπάÏχει χώÏος στο δίσκο (%s). ΠαÏακαλώ, πεÏιμένετε να ελευθεÏωθεί χώÏος..."
- hun "A lemez megtelt (%s)."
- ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..."
- jpn "Disk full (%s). 誰ã‹ãŒä½•ã‹ã‚’減らã™ã¾ã§ã¾ã£ã¦ãã ã•ã„..."
- kor "Disk full (%s). 다른 ì‚¬ëžŒì´ ì§€ìš¸ë•Œê¹Œì§€ 기다립니다..."
- nor "Ikke mer diskplass (%s). Venter på å få frigjort plass..."
- norwegian-ny "Ikkje meir diskplass (%s). Ventar på å få frigjort plass..."
- pol "Dysk pełny (%s). Oczekiwanie na zwolnienie miejsca..."
- por "Disco cheio (%s). Aguardando alguém liberar algum espaço..."
- rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..."
- rus "ДиÑк заполнен. (%s). Ожидаем, пока кто-то не уберет поÑле ÑÐµÐ±Ñ Ð¼ÑƒÑор..."
- serbian "Disk je pun (%s). Čekam nekoga da dođe i oslobodi nešto mesta..."
- slo "Disk je plný (%s), Äakám na uvoľnenie miesta..."
- spa "Disco lleno (%s). Esperando para que se libere algo de espacio..."
- swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..."
- ukr "ДиÑк заповнений (%s). Вичикую, доки звільнитьÑÑ Ñ‚Ñ€Ð¾Ñ…Ð¸ міÑцÑ..."
+ER_DISK_FULL
+ cze "Disk je plný (%s), Äekám na uvolnÄ›ní nÄ›jakého místa ... (chybový kód: %M)"
+ dan "Ikke mere diskplads (%s). Venter på at få frigjort plads... (Fejlkode: %M)"
+ nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt... (Errcode: %M)"
+ eng "Disk full (%s); waiting for someone to free some space... (errno: %M)"
+ est "Ketas täis (%s). Ootame kuni tekib vaba ruumi... (veakood: %M)"
+ fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace... (Errcode: %M)"
+ ger "Festplatte voll (%s). Warte, bis jemand Platz schafft ... (Fehler: %M)"
+ greek "Δεν υπάÏχει χώÏος στο δίσκο (%s). ΠαÏακαλώ, πεÏιμένετε να ελευθεÏωθεί χώÏος... (κωδικός λάθους: %M)"
+ hun "A lemez megtelt (%s). (hibakod: %M)"
+ ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio... (errno: %M)"
+ jpn "ディスク領域ä¸è¶³ã§ã™(%s)。(エラー番å·: %M)"
+ kor "Disk full (%s). 다른 ì‚¬ëžŒì´ ì§€ìš¸ë•Œê¹Œì§€ 기다립니다... (ì—러번호: %M)"
+ nor "Ikke mer diskplass (%s). Venter på å få frigjort plass... (Feilkode: %M)"
+ norwegian-ny "Ikkje meir diskplass (%s). Ventar på å få frigjort plass... (Feilkode: %M)"
+ pol "Dysk pełny (%s). Oczekiwanie na zwolnienie miejsca... (Kod błędu: %M)"
+ por "Disco cheio (%s). Aguardando alguém liberar algum espaço... (erro no. %M)"
+ rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu... (Eroare: %M)"
+ rus "ДиÑк заполнен. (%s). Ожидаем, пока кто-то не уберет поÑле ÑÐµÐ±Ñ Ð¼ÑƒÑор... (ошибка: %M)"
+ serbian "Disk je pun (%s). Čekam nekoga da dođe i oslobodi nešto mesta... (errno: %M)"
+ slo "Disk je plný (%s), Äakám na uvoľnenie miesta... (chybový kód: %M)"
+ spa "Disco lleno (%s). Esperando para que se libere algo de espacio... (Error: %M)"
+ swe "Disken är full (%s). Väntar tills det finns ledigt utrymme... (Felkod: %M)"
+ ukr "ДиÑк заповнений (%s). Вичикую, доки звільнитьÑÑ Ñ‚Ñ€Ð¾Ñ…Ð¸ міÑцÑ... (помилка: %M)"
ER_DUP_KEY 23000
- cze "Nemohu zapsat, zdvojen-Bý klÃ­Ä v tabulce '%-.192s'"
+ cze "Nemohu zapsat, zdvojený klÃ­Ä v tabulce '%-.192s'"
dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.192s'"
nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.192s'"
eng "Can't write; duplicate key in table '%-.192s'"
- jps "table '%-.192s' ã« key ãŒé‡è¤‡ã—ã¦ã„ã¦æ›¸ãã“ã‚ã¾ã›ã‚“",
est "Ei saa kirjutada, korduv võti tabelis '%-.192s'"
fre "Ecriture impossible, doublon dans une clé de la table '%-.192s'"
ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.192s'"
greek "Δεν είναι δυνατή η καταχώÏηση, η τιμή υπάÏχει ήδη στον πίνακα '%-.192s'"
hun "Irasi hiba, duplikalt kulcs a '%-.192s' tablaban."
ita "Scrittura impossibile: chiave duplicata nella tabella '%-.192s'"
- jpn "table '%-.192s' ã« key ãŒé‡è¤‡ã—ã¦ã„ã¦æ›¸ãã“ã‚ã¾ã›ã‚“"
+ jpn "書ãè¾¼ã‚ã¾ã›ã‚“。表 '%-.192s' ã«é‡è¤‡ã™ã‚‹ã‚­ãƒ¼ãŒã‚ã‚Šã¾ã™ã€‚"
kor "기ë¡í•  수 ì—†ì니다., í…Œì´ë¸” '%-.192s'ì—ì„œ 중복 키"
nor "Kan ikke skrive, flere like nøkler i tabellen '%-.192s'"
norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.192s'"
@@ -523,7 +508,7 @@ ER_DUP_KEY 23000
swe "Kan inte skriva, dubbel söknyckel i register '%-.192s'"
ukr "Ðе можу запиÑати, дублюючийÑÑ ÐºÐ»ÑŽÑ‡ в таблиці '%-.192s'"
ER_ERROR_ON_CLOSE
- cze "Chyba p-Bři zavírání '%-.192s' (chybový kód: %M)"
+ cze "Chyba při zavírání '%-.192s' (chybový kód: %M)"
dan "Fejl ved lukning af '%-.192s' (Fejlkode: %M)"
nla "Fout bij het sluiten van '%-.192s' (Errcode: %M)"
eng "Error on close of '%-.192s' (errno: %M)"
@@ -533,6 +518,7 @@ ER_ERROR_ON_CLOSE
greek "ΠαÏουσιάστηκε Ï€Ïόβλημα κλείνοντας το '%-.192s' (κωδικός λάθους: %M)"
hun "Hiba a(z) '%-.192s' zarasakor. (hibakod: %M)"
ita "Errore durante la chiusura di '%-.192s' (errno: %M)"
+ jpn "'%-.192s' ã®ã‚¯ãƒ­ãƒ¼ã‚ºæ™‚エラー (エラー番å·: %M)"
kor "'%-.192s'닫는 중 ì—러 (ì—러번호: %M)"
nor "Feil ved lukking av '%-.192s' (Feilkode: %M)"
norwegian-ny "Feil ved lukking av '%-.192s' (Feilkode: %M)"
@@ -546,18 +532,17 @@ ER_ERROR_ON_CLOSE
swe "Fick fel vid stängning av '%-.192s' (Felkod: %M)"
ukr "Ðе можу закрити '%-.192s' (помилка: %M)"
ER_ERROR_ON_READ
- cze "Chyba p-BÅ™i Ätení souboru '%-.200s' (chybový kód: %M)"
+ cze "Chyba pÅ™i Ätení souboru '%-.200s' (chybový kód: %M)"
dan "Fejl ved læsning af '%-.200s' (Fejlkode: %M)"
nla "Fout bij het lezen van file '%-.200s' (Errcode: %M)"
eng "Error reading file '%-.200s' (errno: %M)"
- jps "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %M)",
est "Viga faili '%-.200s' lugemisel (veakood: %M)"
fre "Erreur en lecture du fichier '%-.200s' (Errcode: %M)"
ger "Fehler beim Lesen der Datei '%-.200s' (Fehler: %M)"
greek "ΠÏόβλημα κατά την ανάγνωση του αÏχείου '%-.200s' (κωδικός λάθους: %M)"
hun "Hiba a '%-.200s'file olvasasakor. (hibakod: %M)"
ita "Errore durante la lettura del file '%-.200s' (errno: %M)"
- jpn "'%-.200s' ファイルã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (errno: %M)"
+ jpn "ファイル '%-.200s' ã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ (エラー番å·: %M)"
kor "'%-.200s'í™”ì¼ ì½ê¸° ì—러 (ì—러번호: %M)"
nor "Feil ved lesing av '%-.200s' (Feilkode: %M)"
norwegian-ny "Feil ved lesing av '%-.200s' (Feilkode: %M)"
@@ -571,18 +556,17 @@ ER_ERROR_ON_READ
swe "Fick fel vid läsning av '%-.200s' (Felkod %M)"
ukr "Ðе можу прочитати файл '%-.200s' (помилка: %M)"
ER_ERROR_ON_RENAME
- cze "Chyba p-Bři přejmenování '%-.210s' na '%-.210s' (chybový kód: %M)"
+ cze "Chyba při přejmenování '%-.210s' na '%-.210s' (chybový kód: %M)"
dan "Fejl ved omdøbning af '%-.210s' til '%-.210s' (Fejlkode: %M)"
nla "Fout bij het hernoemen van '%-.210s' naar '%-.210s' (Errcode: %M)"
eng "Error on rename of '%-.210s' to '%-.210s' (errno: %M)"
- jps "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %M)",
est "Viga faili '%-.210s' ümbernimetamisel '%-.210s'-ks (veakood: %M)"
fre "Erreur en renommant '%-.210s' en '%-.210s' (Errcode: %M)"
ger "Fehler beim Umbenennen von '%-.210s' in '%-.210s' (Fehler: %M)"
greek "ΠÏόβλημα κατά την μετονομασία του αÏχείου '%-.210s' to '%-.210s' (κωδικός λάθους: %M)"
hun "Hiba a '%-.210s' file atnevezesekor '%-.210s'. (hibakod: %M)"
ita "Errore durante la rinominazione da '%-.210s' a '%-.210s' (errno: %M)"
- jpn "'%-.210s' ã‚’ '%-.210s' ã« rename ã§ãã¾ã›ã‚“ (errno: %M)"
+ jpn "'%-.210s' ã®åå‰ã‚’ '%-.210s' ã«å¤‰æ›´ã§ãã¾ã›ã‚“ (エラー番å·: %M)"
kor "'%-.210s'를 '%-.210s'ë¡œ ì´ë¦„ 변경중 ì—러 (ì—러번호: %M)"
nor "Feil ved omdøping av '%-.210s' til '%-.210s' (Feilkode: %M)"
norwegian-ny "Feil ved omdøyping av '%-.210s' til '%-.210s' (Feilkode: %M)"
@@ -596,18 +580,17 @@ ER_ERROR_ON_RENAME
swe "Kan inte byta namn från '%-.210s' till '%-.210s' (Felkod: %M)"
ukr "Ðе можу перейменувати '%-.210s' у '%-.210s' (помилка: %M)"
ER_ERROR_ON_WRITE
- cze "Chyba p-Bři zápisu do souboru '%-.200s' (chybový kód: %M)"
+ cze "Chyba při zápisu do souboru '%-.200s' (chybový kód: %M)"
dan "Fejl ved skriving av filen '%-.200s' (Fejlkode: %M)"
nla "Fout bij het wegschrijven van file '%-.200s' (Errcode: %M)"
eng "Error writing file '%-.200s' (errno: %M)"
- jps "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)",
est "Viga faili '%-.200s' kirjutamisel (veakood: %M)"
fre "Erreur d'écriture du fichier '%-.200s' (Errcode: %M)"
ger "Fehler beim Speichern der Datei '%-.200s' (Fehler: %M)"
greek "ΠÏόβλημα κατά την αποθήκευση του αÏχείου '%-.200s' (κωδικός λάθους: %M)"
hun "Hiba a '%-.200s' file irasakor. (hibakod: %M)"
ita "Errore durante la scrittura del file '%-.200s' (errno: %M)"
- jpn "'%-.200s' ファイルを書ã事ãŒã§ãã¾ã›ã‚“ (errno: %M)"
+ jpn "ファイル '%-.200s' ã®æ›¸ãè¾¼ã¿ã‚¨ãƒ©ãƒ¼ (エラー番å·: %M)"
kor "'%-.200s'í™”ì¼ ê¸°ë¡ ì¤‘ ì—러 (ì—러번호: %M)"
nor "Feil ved skriving av fila '%-.200s' (Feilkode: %M)"
norwegian-ny "Feil ved skriving av fila '%-.200s' (Feilkode: %M)"
@@ -621,18 +604,17 @@ ER_ERROR_ON_WRITE
swe "Fick fel vid skrivning till '%-.200s' (Felkod %M)"
ukr "Ðе можу запиÑати файл '%-.200s' (помилка: %M)"
ER_FILE_USED
- cze "'%-.192s' je zam-BÄen proti zmÄ›nám"
+ cze "'%-.192s' je zamÄen proti zmÄ›nám"
dan "'%-.192s' er låst mod opdateringer"
nla "'%-.192s' is geblokeerd tegen veranderingen"
eng "'%-.192s' is locked against change"
- jps "'%-.192s' ã¯ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™",
est "'%-.192s' on lukustatud muudatuste vastu"
fre "'%-.192s' est verrouillé contre les modifications"
ger "'%-.192s' ist für Änderungen gesperrt"
greek "'%-.192s' δεν επιτÏέπονται αλλαγές"
hun "'%-.192s' a valtoztatas ellen zarolva"
ita "'%-.192s' e` soggetto a lock contro i cambiamenti"
- jpn "'%-.192s' ã¯ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™"
+ jpn "'%-.192s' ã¯ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã™ã€‚"
kor "'%-.192s'ê°€ 변경할 수 ì—†ë„ë¡ ìž ê²¨ìžˆì니다."
nor "'%-.192s' er låst mot oppdateringer"
norwegian-ny "'%-.192s' er låst mot oppdateringar"
@@ -646,18 +628,17 @@ ER_FILE_USED
swe "'%-.192s' är låst mot användning"
ukr "'%-.192s' заблокований на внеÑÐµÐ½Ð½Ñ Ð·Ð¼Ñ–Ð½"
ER_FILSORT_ABORT
- cze "T-Břídění přerušeno"
+ cze "Třídění přerušeno"
dan "Sortering afbrudt"
nla "Sorteren afgebroken"
eng "Sort aborted"
- jps "Sort 中断",
est "Sorteerimine katkestatud"
fre "Tri alphabétique abandonné"
ger "Sortiervorgang abgebrochen"
greek "Η διαδικασία ταξινόμισης ακυÏώθηκε"
hun "Sikertelen rendezes"
ita "Operazione di ordinamento abbandonata"
- jpn "Sort 中断"
+ jpn "ソート処ç†ã‚’中断ã—ã¾ã—ãŸã€‚"
kor "소트가 중단ë˜ì—ˆìŠµë‹ˆë‹¤."
nor "Sortering avbrutt"
norwegian-ny "Sortering avbrote"
@@ -675,14 +656,13 @@ ER_FORM_NOT_FOUND
dan "View '%-.192s' eksisterer ikke for '%-.192s'"
nla "View '%-.192s' bestaat niet voor '%-.192s'"
eng "View '%-.192s' doesn't exist for '%-.192s'"
- jps "View '%-.192s' ㌠'%-.192s' ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“",
est "Vaade '%-.192s' ei eksisteeri '%-.192s' jaoks"
fre "La vue (View) '%-.192s' n'existe pas pour '%-.192s'"
ger "View '%-.192s' existiert für '%-.192s' nicht"
greek "Το View '%-.192s' δεν υπάÏχει για '%-.192s'"
hun "A(z) '%-.192s' nezet nem letezik a(z) '%-.192s'-hoz"
ita "La view '%-.192s' non esiste per '%-.192s'"
- jpn "View '%-.192s' ㌠'%-.192s' ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ jpn "ビュー '%-.192s' 㯠'%-.192s' ã«å­˜åœ¨ã—ã¾ã›ã‚“。"
kor "ë·° '%-.192s'ê°€ '%-.192s'ì—서는 존재하지 ì•Šì니다."
nor "View '%-.192s' eksisterer ikke for '%-.192s'"
norwegian-ny "View '%-.192s' eksisterar ikkje for '%-.192s'"
@@ -717,18 +697,17 @@ ER_ILLEGAL_HA
rus "Обработчик %s таблицы %`s.%`s не поддерживает Ñту возможноÑÑ‚ÑŒ"
ukr "ДеÑкриптор %s таблиці %`s.%`s не має цієї влаÑтивоÑÑ‚Ñ–"
ER_KEY_NOT_FOUND
- cze "Nemohu naj-Bít záznam v '%-.192s'"
+ cze "Nemohu najít záznam v '%-.192s'"
dan "Kan ikke finde posten i '%-.192s'"
nla "Kan record niet vinden in '%-.192s'"
eng "Can't find record in '%-.192s'"
- jps "'%-.192s'ã®ãªã‹ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ä»˜ã‹ã‚Šã¾ã›ã‚“",
est "Ei suuda leida kirjet '%-.192s'-s"
fre "Ne peut trouver l'enregistrement dans '%-.192s'"
ger "Kann Datensatz in '%-.192s' nicht finden"
greek "ΑδÏνατη η ανεÏÏεση εγγÏαφής στο '%-.192s'"
hun "Nem talalhato a rekord '%-.192s'-ben"
ita "Impossibile trovare il record in '%-.192s'"
- jpn "'%-.192s'ã®ãªã‹ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ä»˜ã‹ã‚Šã¾ã›ã‚“"
+ jpn "'%-.192s' ã«ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
kor "'%-.192s'ì—ì„œ 레코드를 ì°¾ì„ ìˆ˜ ì—†ì니다."
nor "Kan ikke finne posten i '%-.192s'"
norwegian-ny "Kan ikkje finne posten i '%-.192s'"
@@ -742,18 +721,17 @@ ER_KEY_NOT_FOUND
swe "Hittar inte posten '%-.192s'"
ukr "Ðе можу запиÑати у '%-.192s'"
ER_NOT_FORM_FILE
- cze "Nespr-Bávná informace v souboru '%-.200s'"
+ cze "Nesprávná informace v souboru '%-.200s'"
dan "Forkert indhold i: '%-.200s'"
nla "Verkeerde info in file: '%-.200s'"
eng "Incorrect information in file: '%-.200s'"
- jps "ファイル '%-.200s' ã® info ãŒé–“é•ã£ã¦ã„るよã†ã§ã™",
est "Vigane informatsioon failis '%-.200s'"
fre "Information erronnée dans le fichier: '%-.200s'"
ger "Falsche Information in Datei '%-.200s'"
greek "Λάθος πληÏοφοÏίες στο αÏχείο: '%-.200s'"
hun "Ervenytelen info a file-ban: '%-.200s'"
ita "Informazione errata nel file: '%-.200s'"
- jpn "ファイル '%-.200s' ã® info ãŒé–“é•ã£ã¦ã„るよã†ã§ã™"
+ jpn "ファイル '%-.200s' 内ã®æƒ…å ±ãŒä¸æ­£ã§ã™ã€‚"
kor "í™”ì¼ì˜ 부정확한 ì •ë³´: '%-.200s'"
nor "Feil informasjon i filen: '%-.200s'"
norwegian-ny "Feil informasjon i fila: '%-.200s'"
@@ -767,18 +745,17 @@ ER_NOT_FORM_FILE
swe "Felaktig fil: '%-.200s'"
ukr "Хибна Ñ–Ð½Ñ„Ð¾Ñ€Ð¼Ð°Ñ†Ñ–Ñ Ñƒ файлі: '%-.200s'"
ER_NOT_KEYFILE
- cze "Nespr-Bávný klÃ­Ä pro tabulku '%-.200s'; pokuste se ho opravit"
+ cze "Nesprávný klÃ­Ä pro tabulku '%-.200s'; pokuste se ho opravit"
dan "Fejl i indeksfilen til tabellen '%-.200s'; prøv at reparere den"
nla "Verkeerde zoeksleutel file voor tabel: '%-.200s'; probeer het te repareren"
eng "Incorrect key file for table '%-.200s'; try to repair it"
- jps "'%-.200s' テーブル㮠key file ãŒé–“é•ã£ã¦ã„るよã†ã§ã™. 修復をã—ã¦ãã ã•ã„",
est "Tabeli '%-.200s' võtmefail on vigane; proovi seda parandada"
fre "Index corrompu dans la table: '%-.200s'; essayez de le réparer"
ger "Fehlerhafte Index-Datei für Tabelle '%-.200s'; versuche zu reparieren"
greek "Λάθος αÏχείο ταξινόμισης (key file) για τον πίνακα: '%-.200s'; ΠαÏακαλώ, διοÏθώστε το!"
hun "Ervenytelen kulcsfile a tablahoz: '%-.200s'; probalja kijavitani!"
ita "File chiave errato per la tabella : '%-.200s'; prova a riparalo"
- jpn "'%-.200s' テーブル㮠key file ãŒé–“é•ã£ã¦ã„るよã†ã§ã™. 修復をã—ã¦ãã ã•ã„"
+ jpn "表 '%-.200s' ã®ç´¢å¼•ãƒ•ã‚¡ã‚¤ãƒ«(key file)ã®å†…容ãŒä¸æ­£ã§ã™ã€‚修復を試行ã—ã¦ãã ã•ã„。"
kor "'%-.200s' í…Œì´ë¸”ì˜ ë¶€ì •í™•í•œ 키 존재. 수정하시오!"
nor "Tabellen '%-.200s' har feil i nøkkelfilen; forsøk å reparer den"
norwegian-ny "Tabellen '%-.200s' har feil i nykkelfila; prøv å reparere den"
@@ -792,18 +769,17 @@ ER_NOT_KEYFILE
swe "Fatalt fel vid hantering av register '%-.200s'; kör en reparation"
ukr "Хибний файл ключей Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ–: '%-.200s'; Спробуйте його відновити"
ER_OLD_KEYFILE
- cze "Star-Bý klíÄový soubor pro '%-.192s'; opravte ho."
+ cze "Starý klíÄový soubor pro '%-.192s'; opravte ho."
dan "Gammel indeksfil for tabellen '%-.192s'; reparer den"
nla "Oude zoeksleutel file voor tabel '%-.192s'; repareer het!"
eng "Old key file for table '%-.192s'; repair it!"
- jps "'%-.192s' テーブルã¯å¤ã„å½¢å¼ã® key file ã®ã‚ˆã†ã§ã™; 修復をã—ã¦ãã ã•ã„",
est "Tabeli '%-.192s' võtmefail on aegunud; paranda see!"
fre "Vieux fichier d'index pour la table '%-.192s'; réparez le!"
ger "Alte Index-Datei für Tabelle '%-.192s'. Bitte reparieren"
greek "Παλαιό αÏχείο ταξινόμισης (key file) για τον πίνακα '%-.192s'; ΠαÏακαλώ, διοÏθώστε το!"
hun "Regi kulcsfile a '%-.192s'tablahoz; probalja kijavitani!"
ita "File chiave vecchio per la tabella '%-.192s'; riparalo!"
- jpn "'%-.192s' テーブルã¯å¤ã„å½¢å¼ã® key file ã®ã‚ˆã†ã§ã™; 修復をã—ã¦ãã ã•ã„"
+ jpn "表 '%-.192s' ã®ç´¢å¼•ãƒ•ã‚¡ã‚¤ãƒ«(key file)ã¯å¤ã„å½¢å¼ã§ã™ã€‚修復ã—ã¦ãã ã•ã„。"
kor "'%-.192s' í…Œì´ë¸”ì˜ ì´ì „ë²„ì ¼ì˜ í‚¤ 존재. 수정하시오!"
nor "Gammel nøkkelfil for tabellen '%-.192s'; reparer den!"
norwegian-ny "Gammel nykkelfil for tabellen '%-.192s'; reparer den!"
@@ -817,18 +793,17 @@ ER_OLD_KEYFILE
swe "Gammal nyckelfil '%-.192s'; reparera registret"
ukr "Старий файл ключей Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.192s'; Відновіть його!"
ER_OPEN_AS_READONLY
- cze "'%-.192s' je jen pro -BÄtení"
+ cze "'%-.192s' je jen pro Ätení"
dan "'%-.192s' er skrivebeskyttet"
nla "'%-.192s' is alleen leesbaar"
eng "Table '%-.192s' is read only"
- jps "'%-.192s' ã¯èª­ã¿è¾¼ã¿å°‚用ã§ã™",
est "Tabel '%-.192s' on ainult lugemiseks"
fre "'%-.192s' est en lecture seulement"
ger "Tabelle '%-.192s' ist nur lesbar"
greek "'%-.192s' επιτÏέπεται μόνο η ανάγνωση"
hun "'%-.192s' irasvedett"
ita "'%-.192s' e` di sola lettura"
- jpn "'%-.192s' ã¯èª­ã¿è¾¼ã¿å°‚用ã§ã™"
+ jpn "表 '%-.192s' ã¯èª­ã¿è¾¼ã¿å°‚用ã§ã™ã€‚"
kor "í…Œì´ë¸” '%-.192s'는 ì½ê¸°ì „ìš© 입니다."
nor "'%-.192s' er skrivebeskyttet"
norwegian-ny "'%-.192s' er skrivetryggja"
@@ -842,18 +817,17 @@ ER_OPEN_AS_READONLY
swe "'%-.192s' är skyddad mot förändring"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' тільки Ð´Ð»Ñ Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ"
ER_OUTOFMEMORY HY001 S1001
- cze "M-Bálo paměti. Přestartujte daemona a zkuste znovu (je potřeba %d bytů)"
+ cze "Málo paměti. Přestartujte daemona a zkuste znovu (je potřeba %d bytů)"
dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)"
nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)"
eng "Out of memory; restart server and try again (needed %d bytes)"
- jps "Out of memory. デーモンをリスタートã—ã¦ã¿ã¦ãã ã•ã„ (%d bytes å¿…è¦)",
est "Mälu sai otsa. Proovi MariaDB uuesti käivitada (puudu jäi %d baiti)"
fre "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)"
ger "Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten"
greek "Δεν υπάÏχει διαθέσιμη μνήμη. ΠÏοσπαθήστε πάλι, επανεκινώντας τη διαδικασία (demon) (χÏειάζονται %d bytes)"
hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)"
ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)"
- jpn "Out of memory. デーモンをリスタートã—ã¦ã¿ã¦ãã ã•ã„ (%d bytes å¿…è¦)"
+ jpn "メモリãŒä¸è¶³ã—ã¦ã„ã¾ã™ã€‚サーãƒãƒ¼ã‚’å†èµ·å‹•ã—ã¦ã¿ã¦ãã ã•ã„。(%d ãƒã‚¤ãƒˆã®å‰²ã‚Šå½“ã¦ã«å¤±æ•—)"
kor "Out of memory. ë°ëª¬ì„ 재 실행 후 다시 시작하시오 (needed %d bytes)"
nor "Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)"
norwegian-ny "Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)"
@@ -867,18 +841,17 @@ ER_OUTOFMEMORY HY001 S1001
swe "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)"
ukr "Брак пам'ÑÑ‚Ñ–. РеÑтартуйте Ñервер та Ñпробуйте знову (потрібно %d байтів)"
ER_OUT_OF_SORTMEMORY HY001 S1001
- cze "M-Bálo paměti pro třídění. Zvyšte velikost třídícího bufferu"
+ cze "Málo paměti pro třídění. Zvyšte velikost třídícího bufferu"
dan "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren"
nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size"
eng "Out of sort memory, consider increasing server sort buffer size"
- jps "Out of sort memory. sort buffer size ãŒè¶³ã‚Šãªã„よã†ã§ã™.",
est "Mälu sai sorteerimisel otsa. Suurenda MariaDB-i sorteerimispuhvrit"
fre "Manque de mémoire pour le tri. Augmentez-la."
ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte im Server erhöht werden"
greek "Δεν υπάÏχει διαθέσιμη μνήμη για ταξινόμιση. Αυξήστε το sort buffer size για τη διαδικασία (demon)"
hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet"
ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone"
- jpn "Out of sort memory. sort buffer size ãŒè¶³ã‚Šãªã„よã†ã§ã™."
+ jpn "ソートメモリãŒä¸è¶³ã—ã¦ã„ã¾ã™ã€‚ソートãƒãƒƒãƒ•ã‚¡ã‚µã‚¤ã‚º(sort buffer size)ã®å¢—加を検討ã—ã¦ãã ã•ã„。"
kor "Out of sort memory. daemon sort bufferì˜ í¬ê¸°ë¥¼ ì¦ê°€ì‹œí‚¤ì„¸ìš”"
nor "Ikke mer sorteringsminne. Vurder å øke sorteringsminnet (sort buffer size) for tjenesten"
norwegian-ny "Ikkje meir sorteringsminne. Vurder å auke sorteringsminnet (sorteringsbuffer storleik) for tenesten"
@@ -892,18 +865,17 @@ ER_OUT_OF_SORTMEMORY HY001 S1001
swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna"
ukr "Брак пам'ÑÑ‚Ñ– Ð´Ð»Ñ ÑортуваннÑ. Треба збільшити розмір буфера ÑÐ¾Ñ€Ñ‚ÑƒÐ²Ð°Ð½Ð½Ñ Ñƒ Ñервера"
ER_UNEXPECTED_EOF
- cze "Neo-BÄekávaný konec souboru pÅ™i Ätení '%-.192s' (chybový kód: %M)"
+ cze "NeoÄekávaný konec souboru pÅ™i Ätení '%-.192s' (chybový kód: %M)"
dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.192s' (Fejlkode: %M)"
nla "Onverwachte eof gevonden tijdens het lezen van file '%-.192s' (Errcode: %M)"
eng "Unexpected EOF found when reading file '%-.192s' (errno: %M)"
- jps "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %M)",
est "Ootamatu faililõpumärgend faili '%-.192s' lugemisel (veakood: %M)"
fre "Fin de fichier inattendue en lisant '%-.192s' (Errcode: %M)"
ger "Unerwartetes Ende beim Lesen der Datei '%-.192s' (Fehler: %M)"
greek "Κατά τη διάÏκεια της ανάγνωσης, βÏέθηκε απÏοσδόκητα το τέλος του αÏχείου '%-.192s' (κωδικός λάθους: %M)"
hun "Varatlan filevege-jel a '%-.192s'olvasasakor. (hibakod: %M)"
ita "Fine del file inaspettata durante la lettura del file '%-.192s' (errno: %M)"
- jpn "'%-.192s' ファイルを読ã¿è¾¼ã¿ä¸­ã« EOF ãŒäºˆæœŸã›ã¬æ‰€ã§ç¾ã‚Œã¾ã—ãŸ. (errno: %M)"
+ jpn "ファイル '%-.192s' を読ã¿è¾¼ã¿ä¸­ã«äºˆæœŸã›ãšãƒ•ã‚¡ã‚¤ãƒ«ã®çµ‚端ã«é”ã—ã¾ã—ãŸã€‚(エラー番å·: %M)"
kor "'%-.192s' í™”ì¼ì„ ì½ëŠ” ë„중 ìž˜ëª»ëœ eofì„ ë°œê²¬ (ì—러번호: %M)"
nor "Uventet slutt på fil (eof) ved lesing av filen '%-.192s' (Feilkode: %M)"
norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.192s' (Feilkode: %M)"
@@ -917,18 +889,17 @@ ER_UNEXPECTED_EOF
swe "Oväntat filslut vid läsning från '%-.192s' (Felkod: %M)"
ukr "Хибний кінець файлу '%-.192s' (помилка: %M)"
ER_CON_COUNT_ERROR 08004
- cze "P-Bříliš mnoho spojení"
+ cze "Příliš mnoho spojení"
dan "For mange forbindelser (connections)"
nla "Te veel verbindingen"
eng "Too many connections"
- jps "接続ãŒå¤šã™ãŽã¾ã™",
est "Liiga palju samaaegseid ühendusi"
fre "Trop de connexions"
ger "Zu viele Verbindungen"
greek "ΥπάÏχουν πολλές συνδέσεις..."
hun "Tul sok kapcsolat"
ita "Troppe connessioni"
- jpn "接続ãŒå¤šã™ãŽã¾ã™"
+ jpn "接続ãŒå¤šã™ãŽã¾ã™ã€‚"
kor "너무 ë§Žì€ ì—°ê²°... max_connectionì„ ì¦ê°€ 시키시오..."
nor "For mange tilkoblinger (connections)"
norwegian-ny "For mange tilkoplingar (connections)"
@@ -942,18 +913,17 @@ ER_CON_COUNT_ERROR 08004
swe "För många anslutningar"
ukr "Забагато з'єднань"
ER_OUT_OF_RESOURCES
- cze "M-Bálo prostoru/paměti pro thread"
+ cze "Málo prostoru/paměti pro thread"
dan "Udgået for tråde/hukommelse"
nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen"
eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space"
- jps "Out of memory; mysqld ã‹ãã®ä»–ã®ãƒ—ロセスãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’å…¨ã¦ä½¿ã£ã¦ã„ã‚‹ã‹ç¢ºèªã—ã¦ãã ã•ã„. メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit' を設定ã—㦠mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨é™ç•Œé‡ã‚’多ãã™ã‚‹ã‹ã€swap space を増やã—ã¦ã¿ã¦ãã ã•ã„",
est "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MariaDB-le rohkema mälu kasutamise lubamine"
fre "Manque de 'threads'/mémoire"
ger "Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess den gesamten Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten"
greek "ΠÏόβλημα με τη διαθέσιμη μνήμη (Out of thread space/memory)"
hun "Elfogyott a thread-memoria"
ita "Fine dello spazio/memoria per i thread"
- jpn "Out of memory; mysqld ã‹ãã®ä»–ã®ãƒ—ロセスãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’å…¨ã¦ä½¿ã£ã¦ã„ã‚‹ã‹ç¢ºèªã—ã¦ãã ã•ã„. メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit' を設定ã—㦠mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨é™ç•Œé‡ã‚’多ãã™ã‚‹ã‹ã€swap space を増やã—ã¦ã¿ã¦ãã ã•ã„"
+ jpn "メモリãŒä¸è¶³ã—ã¦ã„ã¾ã™ã€‚mysqld ã‚„ãã®ä»–ã®ãƒ—ロセスãŒãƒ¡ãƒ¢ãƒªãƒ¼ã‚’使ã„切ã£ã¦ã„ãªã„ã‹ç¢ºèªã—ã¦ä¸‹ã•ã„。メモリーを使ã„切ã£ã¦ã„ãªã„å ´åˆã€'ulimit'ã®è¨­å®šç­‰ã§ mysqld ã®ãƒ¡ãƒ¢ãƒªãƒ¼ä½¿ç”¨æœ€å¤§é‡ã‚’多ãã™ã‚‹ã‹ã€ã‚¹ãƒ¯ãƒƒãƒ—領域を増やã™å¿…è¦ãŒã‚ã‚‹ã‹ã‚‚ã—ã‚Œã¾ã›ã‚“。"
# This message failed to convert from euc-kr, skipped
nor "Tomt for tråd plass/minne"
norwegian-ny "Tomt for tråd plass/minne"
@@ -967,18 +937,17 @@ ER_OUT_OF_RESOURCES
swe "Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap"
ukr "Брак пам'ÑÑ‚Ñ–; Перевірте чи mysqld або ÑкіÑÑŒ інші процеÑи викориÑтовують уÑÑŽ доÑтупну пам'ÑÑ‚ÑŒ. Як ні, то ви можете ÑкориÑтатиÑÑ 'ulimit', аби дозволити mysqld викориÑтовувати більше пам'ÑÑ‚Ñ– або ви можете додати більше міÑÑ†Ñ Ð¿Ñ–Ð´ Ñвап"
ER_BAD_HOST_ERROR 08S01
- cze "Nemohu zjistit jm-Béno stroje pro Vaši adresu"
+ cze "Nemohu zjistit jméno stroje pro Vaši adresu"
dan "Kan ikke få værtsnavn for din adresse"
nla "Kan de hostname niet krijgen van uw adres"
eng "Can't get hostname for your address"
- jps "ãã® address ã® hostname ãŒå¼•ã‘ã¾ã›ã‚“.",
est "Ei suuda lahendada IP aadressi masina nimeks"
fre "Ne peut obtenir de hostname pour votre adresse"
ger "Kann Hostnamen für diese Adresse nicht erhalten"
greek "Δεν έγινε γνωστό το hostname για την address σας"
hun "A gepnev nem allapithato meg a cimbol"
ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)"
- jpn "ãã® address ã® hostname ãŒå¼•ã‘ã¾ã›ã‚“."
+ jpn "IPアドレスã‹ã‚‰ãƒ›ã‚¹ãƒˆåを解決ã§ãã¾ã›ã‚“。"
kor "ë‹¹ì‹ ì˜ ì»´í“¨í„°ì˜ í˜¸ìŠ¤íŠ¸ì´ë¦„ì„ ì–»ì„ ìˆ˜ ì—†ì니다."
nor "Kan ikke få tak i vertsnavn for din adresse"
norwegian-ny "Kan ikkje få tak i vertsnavn for di adresse"
@@ -992,7 +961,7 @@ ER_BAD_HOST_ERROR 08S01
swe "Kan inte hitta 'hostname' för din adress"
ukr "Ðе можу визначити ім'Ñ Ñ…Ð¾Ñту Ð´Ð»Ñ Ð²Ð°ÑˆÐ¾Ñ— адреÑи"
ER_HANDSHAKE_ERROR 08S01
- cze "Chyba p-Bři ustavování spojení"
+ cze "Chyba při ustavování spojení"
dan "Forkert håndtryk (handshake)"
nla "Verkeerde handshake"
eng "Bad handshake"
@@ -1002,6 +971,7 @@ ER_HANDSHAKE_ERROR 08S01
greek "Η αναγνώÏιση (handshake) δεν έγινε σωστά"
hun "A kapcsolatfelvetel nem sikerult (Bad handshake)"
ita "Negoziazione impossibile"
+ jpn "ãƒãƒ³ãƒ‰ã‚·ã‚§ã‚¤ã‚¯ã‚¨ãƒ©ãƒ¼"
nor "Feil håndtrykk (handshake)"
norwegian-ny "Feil handtrykk (handshake)"
pol "ZÅ‚y uchwyt(handshake)"
@@ -1014,7 +984,7 @@ ER_HANDSHAKE_ERROR 08S01
swe "Fel vid initiering av kommunikationen med klienten"
ukr "Ðевірна уÑтановка зв'Ñзку"
ER_DBACCESS_DENIED_ERROR 42000
- cze "P-Břístup pro uživatele '%s'@'%s' k databázi '%-.192s' není povolen"
+ cze "Přístup pro uživatele '%s'@'%s' k databázi '%-.192s' není povolen"
dan "Adgang nægtet bruger: '%s'@'%s' til databasen '%-.192s'"
nla "Toegang geweigerd voor gebruiker: '%s'@'%s' naar database '%-.192s'"
eng "Access denied for user '%s'@'%s' to database '%-.192s'"
@@ -1038,7 +1008,7 @@ ER_DBACCESS_DENIED_ERROR 42000
swe "Användare '%s'@'%s' är ej berättigad att använda databasen %-.192s"
ukr "ДоÑтуп заборонено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача: '%s'@'%s' до бази данних '%-.192s'"
ER_ACCESS_DENIED_ERROR 28000
- cze "P-Břístup pro uživatele '%s'@'%s' (s heslem %s)"
+ cze "Přístup pro uživatele '%s'@'%s' (s heslem %s)"
dan "Adgang nægtet bruger: '%s'@'%s' (Bruger adgangskode: %s)"
nla "Toegang geweigerd voor gebruiker: '%s'@'%s' (Wachtwoord gebruikt: %s)"
eng "Access denied for user '%s'@'%s' (using password: %s)"
@@ -1062,18 +1032,17 @@ ER_ACCESS_DENIED_ERROR 28000
swe "Användare '%s'@'%s' är ej berättigad att logga in (Använder lösen: %s)"
ukr "ДоÑтуп заборонено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача: '%s'@'%s' (ВикориÑтано пароль: %s)"
ER_NO_DB_ERROR 3D000
- cze "Nebyla vybr-Bána žádná databáze"
+ cze "Nebyla vybrána žádná databáze"
dan "Ingen database valgt"
nla "Geen database geselecteerd"
eng "No database selected"
- jps "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“.",
est "Andmebaasi ei ole valitud"
fre "Aucune base n'a été sélectionnée"
ger "Keine Datenbank ausgewählt"
greek "Δεν επιλέχθηκε βάση δεδομένων"
hun "Nincs kivalasztott adatbazis"
ita "Nessun database selezionato"
- jpn "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“."
+ jpn "データベースãŒé¸æŠžã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "ì„ íƒëœ ë°ì´íƒ€ë² ì´ìŠ¤ê°€ 없습니다."
nor "Ingen database valgt"
norwegian-ny "Ingen database vald"
@@ -1087,18 +1056,17 @@ ER_NO_DB_ERROR 3D000
swe "Ingen databas i användning"
ukr "Базу данних не вибрано"
ER_UNKNOWN_COM_ERROR 08S01
- cze "Nezn-Bámý příkaz"
+ cze "Neznámý příkaz"
dan "Ukendt kommando"
nla "Onbekend commando"
eng "Unknown command"
- jps "ãã®ã‚³ãƒžãƒ³ãƒ‰ã¯ä½•ï¼Ÿ",
est "Tundmatu käsk"
fre "Commande inconnue"
ger "Unbekannter Befehl"
greek "Αγνωστη εντολή"
hun "Ervenytelen parancs"
ita "Comando sconosciuto"
- jpn "ãã®ã‚³ãƒžãƒ³ãƒ‰ã¯ä½•ï¼Ÿ"
+ jpn "ä¸æ˜Žãªã‚³ãƒžãƒ³ãƒ‰ã§ã™ã€‚"
kor "명령어가 뭔지 모르겠어요..."
nor "Ukjent kommando"
norwegian-ny "Ukjent kommando"
@@ -1109,21 +1077,20 @@ ER_UNKNOWN_COM_ERROR 08S01
serbian "Nepoznata komanda"
slo "Neznámy príkaz"
spa "Comando desconocido"
- swe "Okänt commando"
+ swe "Okänt kommando"
ukr "Ðевідома команда"
ER_BAD_NULL_ERROR 23000
- cze "Sloupec '%-.192s' nem-Bůže být null"
+ cze "Sloupec '%-.192s' nemůže být null"
dan "Kolonne '%-.192s' kan ikke være NULL"
nla "Kolom '%-.192s' kan niet null zijn"
eng "Column '%-.192s' cannot be null"
- jps "Column '%-.192s' 㯠null ã«ã¯ã§ããªã„ã®ã§ã™",
est "Tulp '%-.192s' ei saa omada nullväärtust"
fre "Le champ '%-.192s' ne peut être vide (null)"
ger "Feld '%-.192s' darf nicht NULL sein"
greek "Το πεδίο '%-.192s' δεν μποÏεί να είναι κενό (null)"
hun "A(z) '%-.192s' oszlop erteke nem lehet nulla"
ita "La colonna '%-.192s' non puo` essere nulla"
- jpn "Column '%-.192s' 㯠null ã«ã¯ã§ããªã„ã®ã§ã™"
+ jpn "列 '%-.192s' 㯠null ã«ã§ãã¾ã›ã‚“。"
kor "칼럼 '%-.192s'는 ë„(Null)ì´ ë˜ë©´ 안ë©ë‹ˆë‹¤. "
nor "Kolonne '%-.192s' kan ikke vere null"
norwegian-ny "Kolonne '%-.192s' kan ikkje vere null"
@@ -1137,18 +1104,17 @@ ER_BAD_NULL_ERROR 23000
swe "Kolumn '%-.192s' får inte vara NULL"
ukr "Стовбець '%-.192s' не може бути нульовим"
ER_BAD_DB_ERROR 42000
- cze "Nezn-Bámá databáze '%-.192s'"
+ cze "Neznámá databáze '%-.192s'"
dan "Ukendt database '%-.192s'"
nla "Onbekende database '%-.192s'"
eng "Unknown database '%-.192s'"
- jps "'%-.192s' ãªã‚“ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã¯çŸ¥ã‚Šã¾ã›ã‚“.",
est "Tundmatu andmebaas '%-.192s'"
fre "Base '%-.192s' inconnue"
ger "Unbekannte Datenbank '%-.192s'"
greek "Αγνωστη βάση δεδομένων '%-.192s'"
hun "Ervenytelen adatbazis: '%-.192s'"
ita "Database '%-.192s' sconosciuto"
- jpn "'%-.192s' ãªã‚“ã¦ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã¯çŸ¥ã‚Šã¾ã›ã‚“."
+ jpn "'%-.192s' ã¯ä¸æ˜Žãªãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ã§ã™ã€‚"
kor "ë°ì´íƒ€ë² ì´ìŠ¤ '%-.192s'는 알수 ì—†ìŒ"
nor "Ukjent database '%-.192s'"
norwegian-ny "Ukjent database '%-.192s'"
@@ -1162,18 +1128,17 @@ ER_BAD_DB_ERROR 42000
swe "Okänd databas: '%-.192s'"
ukr "Ðевідома база данних '%-.192s'"
ER_TABLE_EXISTS_ERROR 42S01
- cze "Tabulka '%-.192s' ji-Bž existuje"
+ cze "Tabulka '%-.192s' již existuje"
dan "Tabellen '%-.192s' findes allerede"
nla "Tabel '%-.192s' bestaat al"
eng "Table '%-.192s' already exists"
- jps "Table '%-.192s' ã¯æ—¢ã«ã‚ã‚Šã¾ã™",
est "Tabel '%-.192s' juba eksisteerib"
fre "La table '%-.192s' existe déjà"
ger "Tabelle '%-.192s' bereits vorhanden"
greek "Ο πίνακας '%-.192s' υπάÏχει ήδη"
hun "A(z) '%-.192s' tabla mar letezik"
ita "La tabella '%-.192s' esiste gia`"
- jpn "Table '%-.192s' ã¯æ—¢ã«ã‚ã‚Šã¾ã™"
+ jpn "表 '%-.192s' ã¯ã™ã§ã«å­˜åœ¨ã—ã¾ã™ã€‚"
kor "í…Œì´ë¸” '%-.192s'는 ì´ë¯¸ 존재함"
nor "Tabellen '%-.192s' eksisterer allerede"
norwegian-ny "Tabellen '%-.192s' eksisterar allereide"
@@ -1187,18 +1152,17 @@ ER_TABLE_EXISTS_ERROR 42S01
swe "Tabellen '%-.192s' finns redan"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' вже Ñ–Ñнує"
ER_BAD_TABLE_ERROR 42S02
- cze "Nezn-Bámá tabulka '%-.100s'"
+ cze "Neznámá tabulka '%-.100s'"
dan "Ukendt tabel '%-.100s'"
nla "Onbekende tabel '%-.100s'"
eng "Unknown table '%-.100s'"
- jps "table '%-.100s' ã¯ã‚ã‚Šã¾ã›ã‚“.",
est "Tundmatu tabel '%-.100s'"
fre "Table '%-.100s' inconnue"
ger "Unbekannte Tabelle '%-.100s'"
greek "Αγνωστος πίνακας '%-.100s'"
hun "Ervenytelen tabla: '%-.100s'"
ita "Tabella '%-.100s' sconosciuta"
- jpn "table '%-.100s' ã¯ã‚ã‚Šã¾ã›ã‚“."
+ jpn "'%-.100s' ã¯ä¸æ˜Žãªè¡¨ã§ã™ã€‚"
kor "í…Œì´ë¸” '%-.100s'는 알수 ì—†ìŒ"
nor "Ukjent tabell '%-.100s'"
norwegian-ny "Ukjent tabell '%-.100s'"
@@ -1212,7 +1176,7 @@ ER_BAD_TABLE_ERROR 42S02
swe "Okänd tabell '%-.100s'"
ukr "Ðевідома Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.100s'"
ER_NON_UNIQ_ERROR 23000
- cze "Sloupec '%-.192s' v %-.192s nen-Bí zcela jasný"
+ cze "Sloupec '%-.192s' v %-.192s není zcela jasný"
dan "Felt: '%-.192s' i tabel %-.192s er ikke entydigt"
nla "Kolom: '%-.192s' in %-.192s is niet eenduidig"
eng "Column '%-.192s' in %-.192s is ambiguous"
@@ -1222,7 +1186,7 @@ ER_NON_UNIQ_ERROR 23000
greek "Το πεδίο: '%-.192s' σε %-.192s δεν έχει καθοÏιστεί"
hun "A(z) '%-.192s' oszlop %-.192s-ben ketertelmu"
ita "Colonna: '%-.192s' di %-.192s e` ambigua"
- jpn "Column: '%-.192s' in %-.192s is ambiguous"
+ jpn "列 '%-.192s' 㯠%-.192s 内ã§æ›–昧ã§ã™ã€‚"
kor "칼럼: '%-.192s' in '%-.192s' ì´ ëª¨í˜¸í•¨"
nor "Felt: '%-.192s' i tabell %-.192s er ikke entydig"
norwegian-ny "Kolonne: '%-.192s' i tabell %-.192s er ikkje eintydig"
@@ -1236,18 +1200,17 @@ ER_NON_UNIQ_ERROR 23000
swe "Kolumn '%-.192s' i %-.192s är inte unik"
ukr "Стовбець '%-.192s' у %-.192s визначений неоднозначно"
ER_SERVER_SHUTDOWN 08S01
- cze "Prob-Bíhá ukonÄování práce serveru"
+ cze "Probíhá ukonÄování práce serveru"
dan "Database nedlukning er i gang"
nla "Bezig met het stoppen van de server"
eng "Server shutdown in progress"
- jps "Server を shutdown 中...",
est "Serveri seiskamine käib"
fre "Arrêt du serveur en cours"
ger "Der Server wird heruntergefahren"
greek "ΕναÏξη διαδικασίας αποσÏνδεσης του εξυπηÏετητή (server shutdown)"
hun "A szerver leallitasa folyamatban"
ita "Shutdown del server in corso"
- jpn "Server を shutdown 中..."
+ jpn "サーãƒãƒ¼ã‚’シャットダウン中ã§ã™ã€‚"
kor "Server가 셧다운 중입니다."
nor "Database nedkobling er i gang"
norwegian-ny "Tenar nedkopling er i gang"
@@ -1261,18 +1224,17 @@ ER_SERVER_SHUTDOWN 08S01
swe "Servern går nu ned"
ukr "ЗавершуєтьÑÑ Ñ€Ð°Ð±Ð¾Ñ‚Ð° Ñервера"
ER_BAD_FIELD_ERROR 42S22 S0022
- cze "Nezn-Bámý sloupec '%-.192s' v %-.192s"
+ cze "Neznámý sloupec '%-.192s' v %-.192s"
dan "Ukendt kolonne '%-.192s' i tabel %-.192s"
nla "Onbekende kolom '%-.192s' in %-.192s"
eng "Unknown column '%-.192s' in '%-.192s'"
- jps "'%-.192s' column 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“.",
est "Tundmatu tulp '%-.192s' '%-.192s'-s"
fre "Champ '%-.192s' inconnu dans %-.192s"
ger "Unbekanntes Tabellenfeld '%-.192s' in %-.192s"
greek "Αγνωστο πεδίο '%-.192s' σε '%-.192s'"
hun "A(z) '%-.192s' oszlop ervenytelen '%-.192s'-ben"
ita "Colonna sconosciuta '%-.192s' in '%-.192s'"
- jpn "'%-.192s' column 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“."
+ jpn "列 '%-.192s' 㯠'%-.192s' ã«ã¯ã‚ã‚Šã¾ã›ã‚“。"
kor "Unknown 칼럼 '%-.192s' in '%-.192s'"
nor "Ukjent kolonne '%-.192s' i tabell %-.192s"
norwegian-ny "Ukjent felt '%-.192s' i tabell %-.192s"
@@ -1286,17 +1248,17 @@ ER_BAD_FIELD_ERROR 42S22 S0022
swe "Okänd kolumn '%-.192s' i %-.192s"
ukr "Ðевідомий Ñтовбець '%-.192s' у '%-.192s'"
ER_WRONG_FIELD_WITH_GROUP 42000 S1009
- cze "Pou-Bžité '%-.192s' nebylo v group by"
+ cze "Použité '%-.192s' nebylo v group by"
dan "Brugte '%-.192s' som ikke var i group by"
nla "Opdracht gebruikt '%-.192s' dat niet in de GROUP BY voorkomt"
eng "'%-.192s' isn't in GROUP BY"
- jps "'%-.192s' isn't in GROUP BY",
est "'%-.192s' puudub GROUP BY klauslis"
fre "'%-.192s' n'est pas dans 'group by'"
ger "'%-.192s' ist nicht in GROUP BY vorhanden"
greek "ΧÏησιμοποιήθηκε '%-.192s' που δεν υπήÏχε στο group by"
hun "Used '%-.192s' with wasn't in group by"
ita "Usato '%-.192s' che non e` nel GROUP BY"
+ jpn "'%-.192s' ã¯GROUP BYå¥ã§æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "'%-.192s'ì€ GROUP BYì†ì— ì—†ìŒ"
nor "Brukte '%-.192s' som ikke var i group by"
norwegian-ny "Brukte '%-.192s' som ikkje var i group by"
@@ -1310,7 +1272,7 @@ ER_WRONG_FIELD_WITH_GROUP 42000 S1009
swe "'%-.192s' finns inte i GROUP BY"
ukr "'%-.192s' не є у GROUP BY"
ER_WRONG_GROUP_FIELD 42000 S1009
- cze "Nemohu pou-Bžít group na '%-.192s'"
+ cze "Nemohu použít group na '%-.192s'"
dan "Kan ikke gruppere på '%-.192s'"
nla "Kan '%-.192s' niet groeperen"
eng "Can't group on '%-.192s'"
@@ -1320,6 +1282,7 @@ ER_WRONG_GROUP_FIELD 42000 S1009
greek "ΑδÏνατη η ομαδοποίηση (group on) '%-.192s'"
hun "A group nem hasznalhato: '%-.192s'"
ita "Impossibile raggruppare per '%-.192s'"
+ jpn "'%-.192s' ã§ã®ã‚°ãƒ«ãƒ¼ãƒ—化ã¯ã§ãã¾ã›ã‚“。"
kor "'%-.192s'를 그룹할 수 ì—†ìŒ"
nor "Kan ikke gruppere på '%-.192s'"
norwegian-ny "Kan ikkje gruppere på '%-.192s'"
@@ -1333,7 +1296,7 @@ ER_WRONG_GROUP_FIELD 42000 S1009
swe "Kan inte använda GROUP BY med '%-.192s'"
ukr "Ðе можу групувати по '%-.192s'"
ER_WRONG_SUM_SELECT 42000 S1009
- cze "P-Bříkaz obsahuje zároveň funkci sum a sloupce"
+ cze "Příkaz obsahuje zároveň funkci sum a sloupce"
dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk"
nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht"
eng "Statement has sum functions and columns in same statement"
@@ -1342,6 +1305,7 @@ ER_WRONG_SUM_SELECT 42000 S1009
ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt"
greek "Η διατÏπωση πεÏιέχει sum functions και columns στην ίδια διατÏπωση"
ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY"
+ jpn "集計関数ã¨é€šå¸¸ã®åˆ—ãŒåŒæ™‚ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã™ã€‚"
kor "Statement ê°€ sumê¸°ëŠ¥ì„ ë™ìž‘중ì´ê³  ì¹¼ëŸ¼ë„ ë™ì¼í•œ statement입니다."
nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk"
norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk"
@@ -1355,7 +1319,7 @@ ER_WRONG_SUM_SELECT 42000 S1009
swe "Kommandot har både sum functions och enkla funktioner"
ukr "У виразі викориÑтано підÑумовуючі функції порÑд з іменами Ñтовбців"
ER_WRONG_VALUE_COUNT 21S01
- cze "Po-BÄet sloupců neodpovídá zadané hodnotÄ›"
+ cze "PoÄet sloupců neodpovídá zadané hodnotÄ›"
dan "Kolonne tæller stemmer ikke med antallet af værdier"
nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes"
eng "Column count doesn't match value count"
@@ -1364,6 +1328,7 @@ ER_WRONG_VALUE_COUNT 21S01
greek "Το Column count δεν ταιÏιάζει με το value count"
hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel"
ita "Il numero delle colonne non e` uguale al numero dei valori"
+ jpn "列数ãŒå€¤ã®å€‹æ•°ã¨ä¸€è‡´ã—ã¾ã›ã‚“。"
kor "ì¹¼ëŸ¼ì˜ ì¹´ìš´íŠ¸ê°€ ê°’ì˜ ì¹´ìš´íŠ¸ì™€ ì¼ì¹˜í•˜ì§€ 않습니다."
nor "Felt telling stemmer verdi telling"
norwegian-ny "Kolonne telling stemmer verdi telling"
@@ -1377,18 +1342,17 @@ ER_WRONG_VALUE_COUNT 21S01
swe "Antalet kolumner motsvarar inte antalet värden"
ukr "КількіÑÑ‚ÑŒ Ñтовбців не Ñпівпадає з кількіÑÑ‚ÑŽ значень"
ER_TOO_LONG_IDENT 42000 S1009
- cze "Jm-Béno identifikátoru '%-.100s' je příliš dlouhé"
+ cze "Jméno identifikátoru '%-.100s' je příliš dlouhé"
dan "Navnet '%-.100s' er for langt"
nla "Naam voor herkenning '%-.100s' is te lang"
eng "Identifier name '%-.100s' is too long"
- jps "Identifier name '%-.100s' ã¯é•·ã™ãŽã¾ã™",
est "Identifikaatori '%-.100s' nimi on liiga pikk"
fre "Le nom de l'identificateur '%-.100s' est trop long"
ger "Name des Bezeichners '%-.100s' ist zu lang"
greek "Το identifier name '%-.100s' είναι Ï€Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿"
hun "A(z) '%-.100s' azonositonev tul hosszu."
ita "Il nome dell'identificatore '%-.100s' e` troppo lungo"
- jpn "Identifier name '%-.100s' ã¯é•·ã™ãŽã¾ã™"
+ jpn "識別å­å '%-.100s' ã¯é•·ã™ãŽã¾ã™ã€‚"
kor "Identifier '%-.100s'는 너무 길군요."
nor "Identifikator '%-.100s' er for lang"
norwegian-ny "Identifikator '%-.100s' er for lang"
@@ -1402,18 +1366,17 @@ ER_TOO_LONG_IDENT 42000 S1009
swe "Kolumnnamn '%-.100s' är för långt"
ukr "Ім'Ñ Ñ–Ð´ÐµÐ½Ñ‚Ð¸Ñ„Ñ–ÐºÐ°Ñ‚Ð¾Ñ€Ð° '%-.100s' задовге"
ER_DUP_FIELDNAME 42S21 S1009
- cze "Zdvojen-Bé jméno sloupce '%-.192s'"
+ cze "Zdvojené jméno sloupce '%-.192s'"
dan "Feltnavnet '%-.192s' findes allerede"
nla "Dubbele kolom naam '%-.192s'"
eng "Duplicate column name '%-.192s'"
- jps "'%-.192s' ã¨ã„ㆠcolumn åã¯é‡è¤‡ã—ã¦ã¾ã™",
est "Kattuv tulba nimi '%-.192s'"
fre "Nom du champ '%-.192s' déjà utilisé"
ger "Doppelter Spaltenname: '%-.192s'"
greek "Επανάληψη column name '%-.192s'"
hun "Duplikalt oszlopazonosito: '%-.192s'"
ita "Nome colonna duplicato '%-.192s'"
- jpn "'%-.192s' ã¨ã„ㆠcolumn åã¯é‡è¤‡ã—ã¦ã¾ã™"
+ jpn "列å '%-.192s' ã¯é‡è¤‡ã—ã¦ã¾ã™ã€‚"
kor "ì¤‘ë³µëœ ì¹¼ëŸ¼ ì´ë¦„: '%-.192s'"
nor "Feltnavnet '%-.192s' eksisterte fra før"
norwegian-ny "Feltnamnet '%-.192s' eksisterte frå før"
@@ -1427,18 +1390,17 @@ ER_DUP_FIELDNAME 42S21 S1009
swe "Kolumnnamn '%-.192s finns flera gånger"
ukr "Дублююче ім'Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s'"
ER_DUP_KEYNAME 42000 S1009
- cze "Zdvojen-Bé jméno klíÄe '%-.192s'"
+ cze "Zdvojené jméno klíÄe '%-.192s'"
dan "Indeksnavnet '%-.192s' findes allerede"
nla "Dubbele zoeksleutel naam '%-.192s'"
eng "Duplicate key name '%-.192s'"
- jps "'%-.192s' ã¨ã„ㆠkey ã®åå‰ã¯é‡è¤‡ã—ã¦ã„ã¾ã™",
est "Kattuv võtme nimi '%-.192s'"
fre "Nom de clef '%-.192s' déjà utilisé"
ger "Doppelter Name für Schlüssel vorhanden: '%-.192s'"
greek "Επανάληψη key name '%-.192s'"
hun "Duplikalt kulcsazonosito: '%-.192s'"
ita "Nome chiave duplicato '%-.192s'"
- jpn "'%-.192s' ã¨ã„ㆠkey ã®åå‰ã¯é‡è¤‡ã—ã¦ã„ã¾ã™"
+ jpn "索引å '%-.192s' ã¯é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚"
kor "ì¤‘ë³µëœ í‚¤ ì´ë¦„ : '%-.192s'"
nor "Nøkkelnavnet '%-.192s' eksisterte fra før"
norwegian-ny "Nøkkelnamnet '%-.192s' eksisterte frå før"
@@ -1454,32 +1416,31 @@ ER_DUP_KEYNAME 42000 S1009
# When using this error code, please use ER(ER_DUP_ENTRY_WITH_KEY_NAME)
# for the message string. See, for example, code in handler.cc.
ER_DUP_ENTRY 23000 S1009
- cze "Zdvojen-Bý klÃ­Ä '%-.192s' (Äíslo klíÄe %d)"
+ cze "Zdvojený klÃ­Ä '%-.192s' (Äíslo klíÄe %d)"
dan "Ens værdier '%-.192s' for indeks %d"
nla "Dubbele ingang '%-.192s' voor zoeksleutel %d"
eng "Duplicate entry '%-.192s' for key %d"
- jps "'%-.192s' 㯠key %d ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™",
est "Kattuv väärtus '%-.192s' võtmele %d"
fre "Duplicata du champ '%-.192s' pour la clef %d"
ger "Doppelter Eintrag '%-.192s' für Schlüssel %d"
greek "Διπλή εγγÏαφή '%-.192s' για το κλειδί %d"
hun "Duplikalt bejegyzes '%-.192s' a %d kulcs szerint."
ita "Valore duplicato '%-.192s' per la chiave %d"
- jpn "'%-.192s' 㯠key %d ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™"
+ jpn "'%-.192s' ã¯ç´¢å¼• %d ã§é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚"
kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’ '%-.192s': key %d"
nor "Like verdier '%-.192s' for nøkkel %d"
norwegian-ny "Like verdiar '%-.192s' for nykkel %d"
- pol "Powtórzone wyst?pienie '%-.192s' dla klucza %d"
+ pol "Powtórzone wystąpienie '%-.192s' dla klucza %d"
por "Entrada '%-.192s' duplicada para a chave %d"
rum "Cimpul '%-.192s' e duplicat pentru cheia %d"
rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ '%-.192s' по ключу %d"
serbian "Dupliran unos '%-.192s' za kljuÄ '%d'"
slo "Opakovaný kÄ¾ÃºÄ '%-.192s' (Äíslo kľúÄa %d)"
spa "Entrada duplicada '%-.192s' para la clave %d"
- swe "Dubbel nyckel '%-.192s' för nyckel %d"
+ swe "Dublett '%-.192s' för nyckel %d"
ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ '%-.192s' Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° %d"
ER_WRONG_FIELD_SPEC 42000 S1009
- cze "Chybn-Bá specifikace sloupce '%-.192s'"
+ cze "Chybná specifikace sloupce '%-.192s'"
dan "Forkert kolonnespecifikaton for felt '%-.192s'"
nla "Verkeerde kolom specificatie voor kolom '%-.192s'"
eng "Incorrect column specifier for column '%-.192s'"
@@ -1489,6 +1450,7 @@ ER_WRONG_FIELD_SPEC 42000 S1009
greek "Εσφαλμένο column specifier για το πεδίο '%-.192s'"
hun "Rossz oszlopazonosito: '%-.192s'"
ita "Specifica errata per la colonna '%-.192s'"
+ jpn "列 '%-.192s' ã®å®šç¾©ãŒä¸æ­£ã§ã™ã€‚"
kor "칼럼 '%-.192s'ì˜ ë¶€ì •í™•í•œ 칼럼 ì •ì˜ìž"
nor "Feil kolonne spesifikator for felt '%-.192s'"
norwegian-ny "Feil kolonne spesifikator for kolonne '%-.192s'"
@@ -1502,18 +1464,17 @@ ER_WRONG_FIELD_SPEC 42000 S1009
swe "Felaktigt kolumntyp för kolumn '%-.192s'"
ukr "Ðевірний Ñпецифікатор ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s'"
ER_PARSE_ERROR 42000 s1009
- cze "%s bl-Bízko '%-.80s' na řádku %d"
+ cze "%s blízko '%-.80s' na řádku %d"
dan "%s nær '%-.80s' på linje %d"
nla "%s bij '%-.80s' in regel %d"
eng "%s near '%-.80s' at line %d"
- jps "%s : '%-.80s' 付近 : %d 行目",
est "%s '%-.80s' ligidal real %d"
fre "%s près de '%-.80s' à la ligne %d"
ger "%s bei '%-.80s' in Zeile %d"
greek "%s πλησίον '%-.80s' στη γÏαμμή %d"
hun "A %s a '%-.80s'-hez kozeli a %d sorban"
ita "%s vicino a '%-.80s' linea %d"
- jpn "%s : '%-.80s' 付近 : %d 行目"
+ jpn "%s : '%-.80s' 付近 %d 行目"
kor "'%s' ì—러 ê°™ì니다. ('%-.80s' 명령어 ë¼ì¸ %d)"
nor "%s nær '%-.80s' på linje %d"
norwegian-ny "%s attmed '%-.80s' på line %d"
@@ -1527,18 +1488,17 @@ ER_PARSE_ERROR 42000 s1009
swe "%s nära '%-.80s' på rad %d"
ukr "%s Ð±Ñ–Ð»Ñ '%-.80s' в Ñтроці %d"
ER_EMPTY_QUERY 42000
- cze "V-Býsledek dotazu je prázdný"
+ cze "Výsledek dotazu je prázdný"
dan "Forespørgsel var tom"
nla "Query was leeg"
eng "Query was empty"
- jps "Query ãŒç©ºã§ã™.",
est "Tühi päring"
fre "Query est vide"
ger "Leere Abfrage"
greek "Το εÏώτημα (query) που θέσατε ήταν κενό"
hun "Ures lekerdezes."
ita "La query e` vuota"
- jpn "Query ãŒç©ºã§ã™."
+ jpn "クエリãŒç©ºã§ã™ã€‚"
kor "쿼리결과가 없습니다."
nor "Forespørsel var tom"
norwegian-ny "Førespurnad var tom"
@@ -1552,18 +1512,17 @@ ER_EMPTY_QUERY 42000
swe "Frågan var tom"
ukr "ПуÑтий запит"
ER_NONUNIQ_TABLE 42000 S1009
- cze "Nejednozna-BÄná tabulka/alias: '%-.192s'"
+ cze "NejednoznaÄná tabulka/alias: '%-.192s'"
dan "Tabellen/aliaset: '%-.192s' er ikke unikt"
nla "Niet unieke waarde tabel/alias: '%-.192s'"
eng "Not unique table/alias: '%-.192s'"
- jps "'%-.192s' ã¯ä¸€æ„ã® table/alias åã§ã¯ã‚ã‚Šã¾ã›ã‚“",
est "Ei ole unikaalne tabel/alias '%-.192s'"
fre "Table/alias: '%-.192s' non unique"
ger "Tabellenname/Alias '%-.192s' nicht eindeutig"
greek "ΑδÏνατη η ανεÏÏεση unique table/alias: '%-.192s'"
hun "Nem egyedi tabla/alias: '%-.192s'"
ita "Tabella/alias non unico: '%-.192s'"
- jpn "'%-.192s' ã¯ä¸€æ„ã® table/alias åã§ã¯ã‚ã‚Šã¾ã›ã‚“"
+ jpn "表åï¼åˆ¥å '%-.192s' ã¯ä¸€æ„ã§ã¯ã‚ã‚Šã¾ã›ã‚“。"
kor "Unique 하지 ì•Šì€ í…Œì´ë¸”/alias: '%-.192s'"
nor "Ikke unikt tabell/alias: '%-.192s'"
norwegian-ny "Ikkje unikt tabell/alias: '%-.192s'"
@@ -1577,7 +1536,7 @@ ER_NONUNIQ_TABLE 42000 S1009
swe "Icke unikt tabell/alias: '%-.192s'"
ukr "Ðеунікальна таблицÑ/пÑевдонім: '%-.192s'"
ER_INVALID_DEFAULT 42000 S1009
- cze "Chybn-Bá defaultní hodnota pro '%-.192s'"
+ cze "Chybná defaultní hodnota pro '%-.192s'"
dan "Ugyldig standardværdi for '%-.192s'"
nla "Foutieve standaard waarde voor '%-.192s'"
eng "Invalid default value for '%-.192s'"
@@ -1587,6 +1546,7 @@ ER_INVALID_DEFAULT 42000 S1009
greek "Εσφαλμένη Ï€ÏοκαθοÏισμένη τιμή (default value) για '%-.192s'"
hun "Ervenytelen ertek: '%-.192s'"
ita "Valore di default non valido per '%-.192s'"
+ jpn "'%-.192s' ã¸ã®ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ãŒç„¡åŠ¹ã§ã™ã€‚"
kor "'%-.192s'ì˜ ìœ íš¨í•˜ì§€ 못한 ë””í´íŠ¸ ê°’ì„ ì‚¬ìš©í•˜ì…¨ìŠµë‹ˆë‹¤."
nor "Ugyldig standardverdi for '%-.192s'"
norwegian-ny "Ugyldig standardverdi for '%-.192s'"
@@ -1600,18 +1560,17 @@ ER_INVALID_DEFAULT 42000 S1009
swe "Ogiltigt DEFAULT värde för '%-.192s'"
ukr "Ðевірне Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð¿Ð¾ замовчуванню Ð´Ð»Ñ '%-.192s'"
ER_MULTIPLE_PRI_KEY 42000 S1009
- cze "Definov-Báno více primárních klíÄů"
+ cze "Definováno více primárních klíÄů"
dan "Flere primærnøgler specificeret"
nla "Meerdere primaire zoeksleutels gedefinieerd"
eng "Multiple primary key defined"
- jps "複数㮠primary key ãŒå®šç¾©ã•ã‚Œã¾ã—ãŸ",
est "Mitut primaarset võtit ei saa olla"
fre "Plusieurs clefs primaires définies"
ger "Mehrere Primärschlüssel (PRIMARY KEY) definiert"
greek "ΠεÏισσότεÏα από ένα primary key οÏίστηκαν"
hun "Tobbszoros elsodleges kulcs definialas."
ita "Definite piu` chiave primarie"
- jpn "複数㮠primary key ãŒå®šç¾©ã•ã‚Œã¾ã—ãŸ"
+ jpn "PRIMARY KEY ãŒè¤‡æ•°å®šç¾©ã•ã‚Œã¦ã„ã¾ã™ã€‚"
kor "Multiple primary keyê°€ ì •ì˜ë˜ì–´ 있슴"
nor "Fleire primærnøkle spesifisert"
norwegian-ny "Fleire primærnyklar spesifisert"
@@ -1625,18 +1584,17 @@ ER_MULTIPLE_PRI_KEY 42000 S1009
swe "Flera PRIMARY KEY använda"
ukr "Первинного ключа визначено неодноразово"
ER_TOO_MANY_KEYS 42000 S1009
- cze "Zad-Báno příliÅ¡ mnoho klíÄů, je povoleno nejvíce %d klíÄů"
+ cze "Zadáno příliÅ¡ mnoho klíÄů, je povoleno nejvíce %d klíÄů"
dan "For mange nøgler specificeret. Kun %d nøgler må bruges"
nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan"
eng "Too many keys specified; max %d keys allowed"
- jps "key ã®æŒ‡å®šãŒå¤šã™ãŽã¾ã™. key ã¯æœ€å¤§ %d ã¾ã§ã§ã™",
est "Liiga palju võtmeid. Maksimaalselt võib olla %d võtit"
fre "Trop de clefs sont définies. Maximum de %d clefs alloué"
ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt"
greek "ΠάÏα πολλά key οÏίσθηκαν. Το Ï€Î¿Î»Ï %d επιτÏέπονται"
hun "Tul sok kulcs. Maximum %d kulcs engedelyezett."
ita "Troppe chiavi. Sono ammesse max %d chiavi"
- jpn "key ã®æŒ‡å®šãŒå¤šã™ãŽã¾ã™. key ã¯æœ€å¤§ %d ã¾ã§ã§ã™"
+ jpn "索引ã®æ•°ãŒå¤šã™ãŽã¾ã™ã€‚最大 %d 個ã¾ã§ã§ã™ã€‚"
kor "너무 ë§Žì€ í‚¤ê°€ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %dì˜ í‚¤ê°€ 가능함"
nor "For mange nøkler spesifisert. Maks %d nøkler tillatt"
norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt"
@@ -1650,7 +1608,7 @@ ER_TOO_MANY_KEYS 42000 S1009
swe "För många nycklar använda. Man får ha högst %d nycklar"
ukr "Забагато ключів зазначено. Дозволено не більше %d ключів"
ER_TOO_MANY_KEY_PARTS 42000 S1009
- cze "Zad-Báno příliÅ¡ mnoho Äást klíÄů, je povoleno nejvíce %d Äástí"
+ cze "Zadáno příliÅ¡ mnoho Äást klíÄů, je povoleno nejvíce %d Äástí"
dan "For mange nøgledele specificeret. Kun %d dele må bruges"
nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan"
eng "Too many key parts specified; max %d parts allowed"
@@ -1660,6 +1618,7 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009
greek "ΠάÏα πολλά key parts οÏίσθηκαν. Το Ï€Î¿Î»Ï %d επιτÏέπονται"
hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett"
ita "Troppe parti di chiave specificate. Sono ammesse max %d parti"
+ jpn "索引ã®ã‚­ãƒ¼åˆ—指定ãŒå¤šã™ãŽã¾ã™ã€‚最大 %d 個ã¾ã§ã§ã™ã€‚"
kor "너무 ë§Žì€ í‚¤ 부분(parts)ë“¤ì´ ì •ì˜ë˜ì–´ 있ì니다.. 최대 %d ë¶€ë¶„ì´ ê°€ëŠ¥í•¨"
nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt"
norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt"
@@ -1673,18 +1632,17 @@ ER_TOO_MANY_KEY_PARTS 42000 S1009
swe "För många nyckeldelar använda. Man får ha högst %d nyckeldelar"
ukr "Забагато чаÑтин ключа зазначено. Дозволено не більше %d чаÑтин"
ER_TOO_LONG_KEY 42000 S1009
- cze "Zadan-Bý klÃ­Ä byl příliÅ¡ dlouhý, nejvÄ›tší délka klíÄe je %d"
+ cze "Zadaný klÃ­Ä byl příliÅ¡ dlouhý, nejvÄ›tší délka klíÄe je %d"
dan "Specificeret nøgle var for lang. Maksimal nøglelængde er %d"
nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d"
eng "Specified key was too long; max key length is %d bytes"
- jps "key ãŒé•·ã™ãŽã¾ã™. key ã®é•·ã•ã¯æœ€å¤§ %d ã§ã™",
est "Võti on liiga pikk. Maksimaalne võtmepikkus on %d"
fre "La clé est trop longue. Longueur maximale: %d"
ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d"
greek "Το κλειδί που οÏίσθηκε είναι Ï€Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿. Το μέγιστο μήκος είναι %d"
hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d"
ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d"
- jpn "key ãŒé•·ã™ãŽã¾ã™. key ã®é•·ã•ã¯æœ€å¤§ %d ã§ã™"
+ jpn "索引ã®ã‚­ãƒ¼ãŒé•·ã™ãŽã¾ã™ã€‚最大 %d ãƒã‚¤ãƒˆã¾ã§ã§ã™ã€‚"
kor "ì •ì˜ëœ 키가 너무 ê¹ë‹ˆë‹¤. 최대 í‚¤ì˜ ê¸¸ì´ëŠ” %d입니다."
nor "Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d"
norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d"
@@ -1698,18 +1656,17 @@ ER_TOO_LONG_KEY 42000 S1009
swe "För lång nyckel. Högsta tillåtna nyckellängd är %d"
ukr "Зазначений ключ задовгий. Ðайбільша довжина ключа %d байтів"
ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009
- cze "Kl-BíÄový sloupec '%-.192s' v tabulce neexistuje"
+ cze "KlíÄový sloupec '%-.192s' v tabulce neexistuje"
dan "Nøglefeltet '%-.192s' eksisterer ikke i tabellen"
nla "Zoeksleutel kolom '%-.192s' bestaat niet in tabel"
eng "Key column '%-.192s' doesn't exist in table"
- jps "Key column '%-.192s' ãŒãƒ†ãƒ¼ãƒ–ルã«ã‚ã‚Šã¾ã›ã‚“.",
est "Võtme tulp '%-.192s' puudub tabelis"
fre "La clé '%-.192s' n'existe pas dans la table"
ger "In der Tabelle gibt es kein Schlüsselfeld '%-.192s'"
greek "Το πεδίο κλειδί '%-.192s' δεν υπάÏχει στον πίνακα"
hun "A(z) '%-.192s'kulcsoszlop nem letezik a tablaban"
ita "La colonna chiave '%-.192s' non esiste nella tabella"
- jpn "Key column '%-.192s' ãŒãƒ†ãƒ¼ãƒ–ルã«ã‚ã‚Šã¾ã›ã‚“."
+ jpn "キー列 '%-.192s' ã¯è¡¨ã«ã‚ã‚Šã¾ã›ã‚“。"
kor "Key 칼럼 '%-.192s'는 í…Œì´ë¸”ì— ì¡´ìž¬í•˜ì§€ 않습니다."
nor "Nøkkel felt '%-.192s' eksiterer ikke i tabellen"
norwegian-ny "Nykkel kolonne '%-.192s' eksiterar ikkje i tabellen"
@@ -1728,18 +1685,17 @@ ER_BLOB_USED_AS_KEY 42000 S1009
rus "Столбец типа BLOB %`s не может быть иÑпользован как значение ключа в %s таблице"
ukr "BLOB Ñтовбець %`s не може бути викориÑтаний у визначенні ключа в %s таблиці"
ER_TOO_BIG_FIELDLENGTH 42000 S1009
- cze "P-Bříliš velká délka sloupce '%-.192s' (nejvíce %lu). Použijte BLOB"
+ cze "Příliš velká délka sloupce '%-.192s' (nejvíce %lu). Použijte BLOB"
dan "For stor feltlængde for kolonne '%-.192s' (maks = %lu). Brug BLOB i stedet"
nla "Te grote kolomlengte voor '%-.192s' (max = %lu). Maak hiervoor gebruik van het type BLOB"
eng "Column length too big for column '%-.192s' (max = %lu); use BLOB or TEXT instead"
- jps "column '%-.192s' ã¯,確ä¿ã™ã‚‹ column ã®å¤§ãã•ãŒå¤šã™ãŽã¾ã™. (最大 %lu ã¾ã§). BLOB ã‚’ã‹ã‚ã‚Šã«ä½¿ç”¨ã—ã¦ãã ã•ã„."
est "Tulba '%-.192s' pikkus on liiga pikk (maksimaalne pikkus: %lu). Kasuta BLOB väljatüüpi"
fre "Champ '%-.192s' trop long (max = %lu). Utilisez un BLOB"
ger "Feldlänge für Feld '%-.192s' zu groß (maximal %lu). BLOB- oder TEXT-Spaltentyp verwenden!"
greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿ μήκος για το πεδίο '%-.192s' (max = %lu). ΠαÏακαλώ χÏησιμοποιείστε τον Ï„Ïπο BLOB"
hun "A(z) '%-.192s' oszlop tul hosszu. (maximum = %lu). Hasznaljon BLOB tipust inkabb."
ita "La colonna '%-.192s' e` troppo grande (max=%lu). Utilizza un BLOB."
- jpn "column '%-.192s' ã¯,確ä¿ã™ã‚‹ column ã®å¤§ãã•ãŒå¤šã™ãŽã¾ã™. (最大 %lu ã¾ã§). BLOB ã‚’ã‹ã‚ã‚Šã«ä½¿ç”¨ã—ã¦ãã ã•ã„."
+ jpn "列 '%-.192s' ã®ã‚µã‚¤ã‚ºå®šç¾©ãŒå¤§ãã™ãŽã¾ã™ (最大 %lu ã¾ã§)。代ã‚ã‚Šã« BLOB ã¾ãŸã¯ TEXT を使用ã—ã¦ãã ã•ã„。"
kor "칼럼 '%-.192s'ì˜ ì¹¼ëŸ¼ 길ì´ê°€ 너무 ê¹ë‹ˆë‹¤ (최대 = %lu). ëŒ€ì‹ ì— BLOB를 사용하세요."
nor "For stor nøkkellengde for kolonne '%-.192s' (maks = %lu). Bruk BLOB istedenfor"
norwegian-ny "For stor nykkellengde for felt '%-.192s' (maks = %lu). Bruk BLOB istadenfor"
@@ -1753,18 +1709,17 @@ ER_TOO_BIG_FIELDLENGTH 42000 S1009
swe "För stor kolumnlängd angiven för '%-.192s' (max= %lu). Använd en BLOB instället"
ukr "Задовга довжина ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' (max = %lu). ВикориÑтайте тип BLOB"
ER_WRONG_AUTO_KEY 42000 S1009
- cze "M-Bůžete mít pouze jedno AUTO pole a to musí být definováno jako klíÄ"
+ cze "Můžete mít pouze jedno AUTO pole a to musí být definováno jako klíÄ"
dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret"
nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd."
eng "Incorrect table definition; there can be only one auto column and it must be defined as a key"
- jps "テーブルã®å®šç¾©ãŒé•ã„ã¾ã™; there can be only one auto column and it must be defined as a key",
est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena"
fre "Un seul champ automatique est permis et il doit être indexé"
ger "Falsche Tabellendefinition. Es darf nur eine AUTO_INCREMENT-Spalte geben, und diese muss als Schlüssel definiert werden"
greek "ΜποÏεί να υπάÏχει μόνο ένα auto field και Ï€Ïέπει να έχει οÏισθεί σαν key"
hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni."
ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave"
- jpn "テーブルã®å®šç¾©ãŒé•ã„ã¾ã™; there can be only one auto column and it must be defined as a key"
+ jpn "ä¸æ­£ãªè¡¨å®šç¾©ã§ã™ã€‚AUTO_INCREMENT列ã¯ï¼‘個ã¾ã§ã§ã€ç´¢å¼•ã‚’定義ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
kor "부정확한 í…Œì´ë¸” ì •ì˜; í…Œì´ë¸”ì€ í•˜ë‚˜ì˜ auto ì¹¼ëŸ¼ì´ ì¡´ìž¬í•˜ê³  키로 ì •ì˜ë˜ì–´ì ¸ì•¼ 합니다."
nor "Bare ett auto felt kan være definert som nøkkel."
norwegian-ny "Bare eitt auto felt kan være definert som nøkkel."
@@ -1778,18 +1733,17 @@ ER_WRONG_AUTO_KEY 42000 S1009
swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel"
ukr "Ðевірне Ð²Ð¸Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ–; Може бути лише один автоматичний Ñтовбець, що повинен бути визначений Ñк ключ"
ER_READY
- cze "%s: p-Břipraven na spojení\nVersion: '%s' socket: '%s' port: %d""
+ cze "%s: připraven na spojení\nVersion: '%s' socket: '%s' port: %d""
dan "%s: klar til tilslutninger\nVersion: '%s' socket: '%s' port: %d""
nla "%s: klaar voor verbindingen\nVersion: '%s' socket: '%s' port: %d""
eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d"
- jps "%s: 準備完了¥nVersion: '%s' socket: '%s' port: %d"",
est "%s: ootab ühendusi\nVersion: '%s' socket: '%s' port: %d""
fre "%s: Prêt pour des connexions\nVersion: '%s' socket: '%s' port: %d""
ger "%s: Bereit für Verbindungen.\nVersion: '%s' Socket: '%s' Port: %d"
greek "%s: σε αναμονή συνδέσεων\nVersion: '%s' socket: '%s' port: %d""
hun "%s: kapcsolatra kesz\nVersion: '%s' socket: '%s' port: %d""
ita "%s: Pronto per le connessioni\nVersion: '%s' socket: '%s' port: %d""
- jpn "%s: 準備完了\nVersion: '%s' socket: '%s' port: %d""
+ jpn "%s: 接続準備完了。\nãƒãƒ¼ã‚¸ãƒ§ãƒ³: '%s' socket: '%s' port: %d""
kor "%s: 연결 준비중입니다\nVersion: '%s' socket: '%s' port: %d""
nor "%s: klar for tilkoblinger\nVersion: '%s' socket: '%s' port: %d""
norwegian-ny "%s: klar for tilkoblingar\nVersion: '%s' socket: '%s' port: %d""
@@ -1803,7 +1757,7 @@ ER_READY
swe "%s: klar att ta emot klienter\nVersion: '%s' socket: '%s' port: %d""
ukr "%s: Готовий Ð´Ð»Ñ Ð·'єднань!\nVersion: '%s' socket: '%s' port: %d""
ER_NORMAL_SHUTDOWN
- cze "%s: norm-Bální ukonÄení\n"
+ cze "%s: normální ukonÄení\n"
dan "%s: Normal nedlukning\n"
nla "%s: Normaal afgesloten \n"
eng "%s: Normal shutdown\n"
@@ -1813,6 +1767,7 @@ ER_NORMAL_SHUTDOWN
greek "%s: Φυσιολογική διαδικασία shutdown\n"
hun "%s: Normal leallitas\n"
ita "%s: Shutdown normale\n"
+ jpn "%s: 通常シャットダウン\n"
kor "%s: ì •ìƒì ì¸ shutdown\n"
nor "%s: Normal avslutning\n"
norwegian-ny "%s: Normal nedkopling\n"
@@ -1826,18 +1781,17 @@ ER_NORMAL_SHUTDOWN
swe "%s: Normal avslutning\n"
ukr "%s: Ðормальне завершеннÑ\n"
ER_GOT_SIGNAL
- cze "%s: p-BÅ™ijat signal %d, konÄím\n"
+ cze "%s: pÅ™ijat signal %d, konÄím\n"
dan "%s: Fangede signal %d. Afslutter!!\n"
nla "%s: Signaal %d. Systeem breekt af!\n"
eng "%s: Got signal %d. Aborting!\n"
- jps "%s: Got signal %d. 中断!¥n",
est "%s: sain signaali %d. Lõpetan!\n"
fre "%s: Reçu le signal %d. Abandonne!\n"
ger "%s: Signal %d erhalten. Abbruch!\n"
greek "%s: Ελήφθη το μήνυμα %d. Η διαδικασία εγκαταλείπεται!\n"
hun "%s: %d jelzes. Megszakitva!\n"
ita "%s: Ricevuto segnale %d. Interruzione!\n"
- jpn "%s: Got signal %d. 中断!\n"
+ jpn "%s: シグナル %d ã‚’å—ä¿¡ã—ã¾ã—ãŸã€‚強制終了ã—ã¾ã™ï¼\n"
kor "%s: %d 신호가 들어왔ìŒ. 중지!\n"
nor "%s: Oppdaget signal %d. Avslutter!\n"
norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n"
@@ -1851,18 +1805,17 @@ ER_GOT_SIGNAL
swe "%s: Fick signal %d. Avslutar!\n"
ukr "%s: Отримано Ñигнал %d. ПерериваюÑÑŒ!\n"
ER_SHUTDOWN_COMPLETE
- cze "%s: ukon-BÄení práce hotovo\n"
+ cze "%s: ukonÄení práce hotovo\n"
dan "%s: Server lukket\n"
nla "%s: Afsluiten afgerond\n"
eng "%s: Shutdown complete\n"
- jps "%s: Shutdown 完了¥n",
est "%s: Lõpp\n"
fre "%s: Arrêt du serveur terminé\n"
ger "%s: Herunterfahren beendet\n"
greek "%s: Η διαδικασία Shutdown ολοκληÏώθηκε\n"
hun "%s: A leallitas kesz\n"
ita "%s: Shutdown completato\n"
- jpn "%s: Shutdown 完了\n"
+ jpn "%s: シャットダウン完了\n"
kor "%s: Shutdown ì´ ì™„ë£Œë¨!\n"
nor "%s: Avslutning komplett\n"
norwegian-ny "%s: Nedkopling komplett\n"
@@ -1876,18 +1829,17 @@ ER_SHUTDOWN_COMPLETE
swe "%s: Avslutning klar\n"
ukr "%s: Роботу завершено\n"
ER_FORCING_CLOSE 08S01
- cze "%s: n-Básilné uzavření threadu %ld uživatele '%-.48s'\n"
+ cze "%s: násilné uzavření threadu %ld uživatele '%-.48s'\n"
dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.48s'\n"
nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.48s'\n"
eng "%s: Forcing close of thread %ld user: '%-.48s'\n"
- jps "%s: スレッド %ld 強制終了 user: '%-.48s'¥n",
est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.48s'\n"
fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.48s'\n"
ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.48s'\n"
greek "%s: Το thread θα κλείσει %ld user: '%-.48s'\n"
hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.48s'\n"
ita "%s: Forzata la chiusura del thread %ld utente: '%-.48s'\n"
- jpn "%s: スレッド %ld 強制終了 user: '%-.48s'\n"
+ jpn "%s: スレッド %ld を強制終了ã—ã¾ã™ (ユーザー: '%-.48s')\n"
kor "%s: thread %ldì˜ ê°•ì œ 종료 user: '%-.48s'\n"
nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.48s'\n"
norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.48s'\n"
@@ -1901,18 +1853,17 @@ ER_FORCING_CLOSE 08S01
swe "%s: Stänger av tråd %ld; användare: '%-.48s'\n"
ukr "%s: ПриÑкорюю Ð·Ð°ÐºÑ€Ð¸Ñ‚Ñ‚Ñ Ð³Ñ–Ð»ÐºÐ¸ %ld кориÑтувача: '%-.48s'\n"
ER_IPSOCK_ERROR 08S01
- cze "Nemohu vytvo-Břit IP socket"
+ cze "Nemohu vytvořit IP socket"
dan "Kan ikke oprette IP socket"
nla "Kan IP-socket niet openen"
eng "Can't create IP socket"
- jps "IP socket ãŒä½œã‚Œã¾ã›ã‚“",
est "Ei suuda luua IP socketit"
fre "Ne peut créer la connexion IP (socket)"
ger "Kann IP-Socket nicht erzeugen"
greek "Δεν είναι δυνατή η δημιουÏγία IP socket"
hun "Az IP socket nem hozhato letre"
ita "Impossibile creare il socket IP"
- jpn "IP socket ãŒä½œã‚Œã¾ã›ã‚“"
+ jpn "IPソケットを作æˆã§ãã¾ã›ã‚“。"
kor "IP ì†Œì¼“ì„ ë§Œë“¤ì§€ 못했습니다."
nor "Kan ikke opprette IP socket"
norwegian-ny "Kan ikkje opprette IP socket"
@@ -1926,18 +1877,17 @@ ER_IPSOCK_ERROR 08S01
swe "Kan inte skapa IP-socket"
ukr "Ðе можу Ñтворити IP роз'єм"
ER_NO_SUCH_INDEX 42S12 S1009
- cze "Tabulka '%-.192s' nem-Bá index odpovídající CREATE INDEX. Vytvořte tabulku znovu"
+ cze "Tabulka '%-.192s' nemá index odpovídající CREATE INDEX. Vytvořte tabulku znovu"
dan "Tabellen '%-.192s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen"
nla "Tabel '%-.192s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw"
eng "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table"
- jps "Table '%-.192s' ã¯ãã®ã‚ˆã†ãª index ã‚’æŒã£ã¦ã„ã¾ã›ã‚“(CREATE INDEX 実行時ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“). テーブルを作り直ã—ã¦ãã ã•ã„",
est "Tabelil '%-.192s' puuduvad võtmed. Loo tabel uuesti"
fre "La table '%-.192s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table"
ger "Tabelle '%-.192s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Tabelle neu anlegen"
greek "Ο πίνακας '%-.192s' δεν έχει ευÏετήÏιο (index) σαν αυτό που χÏησιμοποιείτε στην CREATE INDEX. ΠαÏακαλώ, ξαναδημιουÏγήστε τον πίνακα"
hun "A(z) '%-.192s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat"
ita "La tabella '%-.192s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella"
- jpn "Table '%-.192s' ã¯ãã®ã‚ˆã†ãª index ã‚’æŒã£ã¦ã„ã¾ã›ã‚“(CREATE INDEX 実行時ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“). テーブルを作り直ã—ã¦ãã ã•ã„"
+ jpn "表 '%-.192s' ã«ä»¥å‰CREATE INDEXã§ä½œæˆã•ã‚ŒãŸç´¢å¼•ãŒã‚ã‚Šã¾ã›ã‚“。表を作り直ã—ã¦ãã ã•ã„。"
kor "í…Œì´ë¸” '%-.192s'는 ì¸ë±ìŠ¤ë¥¼ 만들지 않았습니다. alter í…Œì´ë¸”ëª…ë ¹ì„ ì´ìš©í•˜ì—¬ í…Œì´ë¸”ì„ ìˆ˜ì •í•˜ì„¸ìš”..."
nor "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen"
norwegian-ny "Tabellen '%-.192s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt"
@@ -1951,7 +1901,7 @@ ER_NO_SUCH_INDEX 42S12 S1009
swe "Tabellen '%-.192s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' має індекÑ, що не Ñпівпадає з вказанним у CREATE INDEX. Створіть таблицю знову"
ER_WRONG_FIELD_TERMINATORS 42000 S1009
- cze "Argument separ-Bátoru položek nebyl oÄekáván. PÅ™eÄtÄ›te si manuál"
+ cze "Argument separátoru položek nebyl oÄekáván. PÅ™eÄtÄ›te si manuál"
dan "Felt adskiller er ikke som forventet, se dokumentationen"
nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding"
eng "Field separator argument is not what is expected; check the manual"
@@ -1961,6 +1911,7 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009
greek "Ο διαχωÏιστής πεδίων δεν είναι αυτός που αναμενόταν. ΠαÏακαλώ ανατÏέξτε στο manual"
hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!"
ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale"
+ jpn "フィールド区切り文字ãŒäºˆæœŸã›ã¬ä½¿ã‚れ方をã—ã¦ã„ã¾ã™ã€‚マニュアルを確èªã—ã¦ä¸‹ã•ã„。"
kor "í•„ë“œ êµ¬ë¶„ìž ì¸ìˆ˜ë“¤ì´ 완전하지 않습니다. ë©”ë‰´ì–¼ì„ ì°¾ì•„ 보세요."
nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen"
norwegian-ny "Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen"
@@ -1974,7 +1925,7 @@ ER_WRONG_FIELD_TERMINATORS 42000 S1009
swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen"
ukr "Хибний розділювач полів. Почитайте документацію"
ER_BLOBS_AND_NO_TERMINATED 42000 S1009
- cze "Nen-Bí možné použít pevný rowlength s BLOBem. Použijte 'fields terminated by'."
+ cze "Není možné použít pevný rowlength s BLOBem. Použijte 'fields terminated by'."
dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'."
nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'."
eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'"
@@ -1984,7 +1935,7 @@ ER_BLOBS_AND_NO_TERMINATED 42000 S1009
greek "Δεν μποÏείτε να χÏησιμοποιήσετε fixed rowlength σε BLOBs. ΠαÏακαλώ χÏησιμοποιείστε 'fields terminated by'."
hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ."
ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'."
- jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'."
+ jpn "BLOBã«ã¯å›ºå®šé•·ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒä½¿ç”¨ã§ãã¾ã›ã‚“。'FIELDS TERMINATED BY'å¥ã‚’使用ã—ã¦ä¸‹ã•ã„。"
kor "BLOB로는 고정길ì´ì˜ lowlength를 사용할 수 없습니다. 'fields terminated by'를 사용하세요."
nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'."
@@ -1998,18 +1949,17 @@ ER_BLOBS_AND_NO_TERMINATED 42000 S1009
swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'"
ukr "Ðе можна викориÑтовувати Ñталу довжину Ñтроки з BLOB. ЗкориÑтайтеÑÑ 'fields terminated by'"
ER_TEXTFILE_NOT_READABLE
- cze "Soubor '%-.128s' mus-Bí být v adresáři databáze nebo Äitelný pro vÅ¡echny"
+ cze "Soubor '%-.128s' musí být v adresáři databáze nebo Äitelný pro vÅ¡echny"
dan "Filen '%-.128s' skal være i database-folderen, eller kunne læses af alle"
nla "Het bestand '%-.128s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn."
eng "The file '%-.128s' must be in the database directory or be readable by all"
- jps "ファイル '%-.128s' 㯠databse ã® directory ã«ã‚ã‚‹ã‹å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒèª­ã‚るよã†ã«è¨±å¯ã•ã‚Œã¦ã„ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“.",
est "Fail '%-.128s' peab asuma andmebaasi kataloogis või olema kõigile loetav"
fre "Le fichier '%-.128s' doit être dans le répertoire de la base et lisible par tous"
ger "Datei '%-.128s' muss im Datenbank-Verzeichnis vorhanden oder lesbar für alle sein"
greek "Το αÏχείο '%-.128s' Ï€Ïέπει να υπάÏχει στο database directory ή να μποÏεί να διαβαστεί από όλους"
hun "A(z) '%-.128s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak"
ita "Il file '%-.128s' deve essere nella directory del database e deve essere leggibile da tutti"
- jpn "ファイル '%-.128s' 㯠databse ã® directory ã«ã‚ã‚‹ã‹å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ãŒèª­ã‚るよã†ã«è¨±å¯ã•ã‚Œã¦ã„ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“."
+ jpn "ファイル '%-.128s' ã¯ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹ãƒ‡ã‚£ãƒ¬ã‚¯ãƒˆãƒªã«ã‚ã‚‹ã‹ã€å…¨ã¦ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã‹ã‚‰èª­ã‚ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
kor "'%-.128s' í™”ì¼ëŠ” ë°ì´íƒ€ë² ì´ìŠ¤ ë””ë ‰í† ë¦¬ì— ì¡´ìž¬í•˜ê±°ë‚˜ 모ë‘ì—게 ì½ê¸° 가능하여야 합니다."
nor "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle"
norwegian-ny "Filen '%-.128s' må være i database-katalogen for å være lesbar for alle"
@@ -2023,18 +1973,17 @@ ER_TEXTFILE_NOT_READABLE
swe "Textfilen '%-.128s' måste finnas i databasbiblioteket eller vara läsbar för alla"
ukr "Файл '%-.128s' повинен бути у теці бази данних або мати вÑтановлене право на Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð´Ð»Ñ ÑƒÑÑ–Ñ…"
ER_FILE_EXISTS_ERROR
- cze "Soubor '%-.200s' ji-Bž existuje"
+ cze "Soubor '%-.200s' již existuje"
dan "Filen '%-.200s' eksisterer allerede"
nla "Het bestand '%-.200s' bestaat reeds"
eng "File '%-.200s' already exists"
- jps "File '%-.200s' ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™",
est "Fail '%-.200s' juba eksisteerib"
fre "Le fichier '%-.200s' existe déjà"
ger "Datei '%-.200s' bereits vorhanden"
greek "Το αÏχείο '%-.200s' υπάÏχει ήδη"
hun "A '%-.200s' file mar letezik."
ita "Il file '%-.200s' esiste gia`"
- jpn "File '%-.200s' ã¯æ—¢ã«å­˜åœ¨ã—ã¾ã™"
+ jpn "ファイル '%-.200s' ã¯ã™ã§ã«å­˜åœ¨ã—ã¾ã™ã€‚"
kor "'%-.200s' í™”ì¼ì€ ì´ë¯¸ 존재합니다."
nor "Filen '%-.200s' eksisterte allerede"
norwegian-ny "Filen '%-.200s' eksisterte allereide"
@@ -2048,18 +1997,17 @@ ER_FILE_EXISTS_ERROR
swe "Filen '%-.200s' existerar redan"
ukr "Файл '%-.200s' вже Ñ–Ñнує"
ER_LOAD_INFO
- cze "Z-Báznamů: %ld Vymazáno: %ld PÅ™eskoÄeno: %ld Varování: %ld"
+ cze "Záznamů: %ld Vymazáno: %ld PÅ™eskoÄeno: %ld Varování: %ld"
dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld"
nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld"
eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld"
- jps "レコード数: %ld 削除: %ld Skipped: %ld Warnings: %ld",
est "Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld"
fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld"
ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld"
greek "ΕγγÏαφές: %ld ΔιαγÏαφές: %ld ΠαÏεκάμφθησαν: %ld ΠÏοειδοποιήσεις: %ld"
hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld"
ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld"
- jpn "レコード数: %ld 削除: %ld Skipped: %ld Warnings: %ld"
+ jpn "レコード数: %ld 削除: %ld スキップ: %ld 警告: %ld"
kor "레코드: %ld개 삭제: %ld개 스킵: %ld개 경고: %ld개"
nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld"
norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld Ã…tvaringar: %ld"
@@ -2073,11 +2021,10 @@ ER_LOAD_INFO
swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld"
ukr "ЗапиÑів: %ld Видалено: %ld Пропущено: %ld ЗаÑтережень: %ld"
ER_ALTER_INFO
- cze "Z-Báznamů: %ld Zdvojených: %ld"
+ cze "Záznamů: %ld Zdvojených: %ld"
dan "Poster: %ld Ens: %ld"
nla "Records: %ld Dubbel: %ld"
eng "Records: %ld Duplicates: %ld"
- jps "レコード数: %ld é‡è¤‡: %ld",
est "Kirjeid: %ld Kattuvaid: %ld"
fre "Enregistrements: %ld Doublons: %ld"
ger "Datensätze: %ld Duplikate: %ld"
@@ -2098,7 +2045,7 @@ ER_ALTER_INFO
swe "Rader: %ld Dubletter: %ld"
ukr "ЗапиÑів: %ld Дублікатів: %ld"
ER_WRONG_SUB_KEY
- cze "Chybn-Bá podÄást klíÄe -- není to Å™etÄ›zec nebo je delší než délka Äásti klíÄe"
+ cze "Chybná podÄást klíÄe -- není to Å™etÄ›zec nebo je delší než délka Äásti klíÄe"
dan "Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden"
nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel"
eng "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys"
@@ -2108,7 +2055,7 @@ ER_WRONG_SUB_KEY
greek "Εσφαλμένο sub part key. Το χÏησιμοποιοÏμενο key part δεν είναι string ή το μήκος του είναι μεγαλÏτεÏο"
hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz"
ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave."
- jpn "Incorrect prefix key; the used key part isn't a string or the used length is longer than the key part"
+ jpn "キーã®ãƒ—レフィックスãŒä¸æ­£ã§ã™ã€‚キーãŒæ–‡å­—列ã§ã¯ãªã„ã‹ã€ãƒ—レフィックス長ãŒã‚­ãƒ¼ã‚ˆã‚Šã‚‚é•·ã„ã‹ã€ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ãŒä¸€æ„索引ã®ãƒ—レフィックス指定をサãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“。"
kor "부정확한 서버 파트 키. ì‚¬ìš©ëœ í‚¤ 파트가 스트ë§ì´ 아니거나 키 íŒŒíŠ¸ì˜ ê¸¸ì´ê°€ 너무 ê¹ë‹ˆë‹¤."
nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden"
norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden"
@@ -2122,18 +2069,17 @@ ER_WRONG_SUB_KEY
swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden"
ukr "Ðевірна чаÑтина ключа. ВикориÑтана чаÑтина ключа не Ñ” Ñтрокою, задовга або вказівник таблиці не підтримує унікальних чаÑтин ключей"
ER_CANT_REMOVE_ALL_FIELDS 42000
- cze "Nen-Bí možné vymazat všechny položky s ALTER TABLE. Použijte DROP TABLE"
+ cze "Není možné vymazat všechny položky s ALTER TABLE. Použijte DROP TABLE"
dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet."
nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!"
eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead"
- jps "ALTER TABLE ã§å…¨ã¦ã® column ã¯å‰Šé™¤ã§ãã¾ã›ã‚“. DROP TABLE を使用ã—ã¦ãã ã•ã„",
est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil"
fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE"
ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden"
greek "Δεν είναι δυνατή η διαγÏαφή όλων των πεδίων με ALTER TABLE. ΠαÏακαλώ χÏησιμοποιείστε DROP TABLE"
hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette"
ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE"
- jpn "ALTER TABLE ã§å…¨ã¦ã® column ã¯å‰Šé™¤ã§ãã¾ã›ã‚“. DROP TABLE を使用ã—ã¦ãã ã•ã„"
+ jpn "ALTER TABLE ã§ã¯å…¨ã¦ã®åˆ—ã®å‰Šé™¤ã¯ã§ãã¾ã›ã‚“。DROP TABLE を使用ã—ã¦ãã ã•ã„。"
kor "ALTER TABLE 명령으로는 모든 ì¹¼ëŸ¼ì„ ì§€ìš¸ 수 없습니다. DROP TABLE ëª…ë ¹ì„ ì´ìš©í•˜ì„¸ìš”."
nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden."
norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor."
@@ -2147,18 +2093,17 @@ ER_CANT_REMOVE_ALL_FIELDS 42000
swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället"
ukr "Ðе можливо видалити вÑÑ– Ñтовбці за допомогою ALTER TABLE. Ð”Ð»Ñ Ñ†ÑŒÐ¾Ð³Ð¾ ÑкориÑтайтеÑÑ DROP TABLE"
ER_CANT_DROP_FIELD_OR_KEY 42000
- cze "Nemohu zru-BÅ¡it '%-.192s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíÄe"
+ cze "Nemohu zruÅ¡it '%-.192s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíÄe"
dan "Kan ikke udføre DROP '%-.192s'. Undersøg om feltet/nøglen eksisterer."
nla "Kan '%-.192s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat."
eng "Can't DROP '%-.192s'; check that column/key exists"
- jps "'%-.192s' を破棄ã§ãã¾ã›ã‚“ã§ã—ãŸ; check that column/key exists",
est "Ei suuda kustutada '%-.192s'. Kontrolli kas tulp/võti eksisteerib"
fre "Ne peut effacer (DROP) '%-.192s'. Vérifiez s'il existe"
ger "Kann '%-.192s' nicht löschen. Existiert die Spalte oder der Schlüssel?"
greek "ΑδÏνατη η διαγÏαφή (DROP) '%-.192s'. ΠαÏακαλώ ελέγξτε αν το πεδίο/κλειδί υπάÏχει"
hun "A DROP '%-.192s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e"
ita "Impossibile cancellare '%-.192s'. Controllare che il campo chiave esista"
- jpn "'%-.192s' を破棄ã§ãã¾ã›ã‚“ã§ã—ãŸ; check that column/key exists"
+ jpn "'%-.192s' を削除ã§ãã¾ã›ã‚“。列ï¼ç´¢å¼•ã®å­˜åœ¨ã‚’確èªã—ã¦ä¸‹ã•ã„。"
kor "'%-.192s'를 DROPí•  수 없습니다. 칼럼ì´ë‚˜ 키가 존재하는지 채í¬í•˜ì„¸ìš”."
nor "Kan ikke DROP '%-.192s'. Undersøk om felt/nøkkel eksisterer."
norwegian-ny "Kan ikkje DROP '%-.192s'. Undersøk om felt/nøkkel eksisterar."
@@ -2172,18 +2117,17 @@ ER_CANT_DROP_FIELD_OR_KEY 42000
swe "Kan inte ta bort '%-.192s'. Kontrollera att fältet/nyckel finns"
ukr "Ðе можу DROP '%-.192s'. Перевірте, чи цей Ñтовбець/ключ Ñ–Ñнує"
ER_INSERT_INFO
- cze "Z-Báznamů: %ld Zdvojených: %ld Varování: %ld"
+ cze "Záznamů: %ld Zdvojených: %ld Varování: %ld"
dan "Poster: %ld Ens: %ld Advarsler: %ld"
nla "Records: %ld Dubbel: %ld Waarschuwing: %ld"
eng "Records: %ld Duplicates: %ld Warnings: %ld"
- jps "レコード数: %ld é‡è¤‡æ•°: %ld Warnings: %ld",
est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld"
fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld"
ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld"
greek "ΕγγÏαφές: %ld Επαναλήψεις: %ld ΠÏοειδοποιήσεις: %ld"
hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld"
ita "Records: %ld Duplicati: %ld Avvertimenti: %ld"
- jpn "レコード数: %ld é‡è¤‡æ•°: %ld Warnings: %ld"
+ jpn "レコード数: %ld é‡è¤‡æ•°: %ld 警告: %ld"
kor "레코드: %ld개 중복: %ld개 경고: %ld개"
nor "Poster: %ld Like: %ld Advarsler: %ld"
norwegian-ny "Postar: %ld Like: %ld Ã…tvaringar: %ld"
@@ -2199,22 +2143,22 @@ ER_INSERT_INFO
ER_UPDATE_TABLE_USED
eng "You can't specify target table '%-.192s' for update in FROM clause"
ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.192s' ist in der FROM-Klausel nicht zulässig."
+ jpn "FROMå¥ã«ã‚る表 '%-.192s' ã¯UPDATEã®å¯¾è±¡ã«ã§ãã¾ã›ã‚“。"
rus "Ðе допуÑкаетÑÑ ÑƒÐºÐ°Ð·Ð°Ð½Ð¸Ðµ таблицы '%-.192s' в ÑпиÑке таблиц FROM Ð´Ð»Ñ Ð²Ð½ÐµÑÐµÐ½Ð¸Ñ Ð² нее изменений"
swe "INSERT-table '%-.192s' får inte finnas i FROM tabell-listan"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' що змінюєтьÑÑ Ð½Ðµ дозволена у переліку таблиць FROM"
ER_NO_SUCH_THREAD
- cze "Nezn-Bámá identifikace threadu: %lu"
+ cze "Neznámá identifikace threadu: %lu"
dan "Ukendt tråd id: %lu"
nla "Onbekend thread id: %lu"
eng "Unknown thread id: %lu"
- jps "thread id: %lu ã¯ã‚ã‚Šã¾ã›ã‚“",
est "Tundmatu lõim: %lu"
fre "Numéro de tâche inconnu: %lu"
ger "Unbekannte Thread-ID: %lu"
greek "Αγνωστο thread id: %lu"
hun "Ervenytelen szal (thread) id: %lu"
ita "Thread id: %lu sconosciuto"
- jpn "thread id: %lu ã¯ã‚ã‚Šã¾ã›ã‚“"
+ jpn "ä¸æ˜Žãªã‚¹ãƒ¬ãƒƒãƒ‰IDã§ã™: %lu"
kor "알수 없는 쓰레드 id: %lu"
nor "Ukjent tråd id: %lu"
norwegian-ny "Ukjent tråd id: %lu"
@@ -2228,18 +2172,17 @@ ER_NO_SUCH_THREAD
swe "Finns ingen tråd med id %lu"
ukr "Ðевідомий ідентифікатор гілки: %lu"
ER_KILL_DENIED_ERROR
- cze "Nejste vlastn-Bíkem threadu %lu"
+ cze "Nejste vlastníkem threadu %lu"
dan "Du er ikke ejer af tråden %lu"
nla "U bent geen bezitter van thread %lu"
eng "You are not owner of thread %lu"
- jps "thread %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“",
est "Ei ole lõime %lu omanik"
fre "Vous n'êtes pas propriétaire de la tâche no: %lu"
ger "Sie sind nicht Eigentümer von Thread %lu"
greek "Δεν είσθε owner του thread %lu"
hun "A %lu thread-nek mas a tulajdonosa"
ita "Utente non proprietario del thread %lu"
- jpn "thread %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“"
+ jpn "スレッド %lu ã®ã‚ªãƒ¼ãƒŠãƒ¼ã§ã¯ã‚ã‚Šã¾ã›ã‚“。"
kor "쓰레드(Thread) %luì˜ ì†Œìœ ìžê°€ 아닙니다."
nor "Du er ikke eier av tråden %lu"
norwegian-ny "Du er ikkje eigar av tråd %lu"
@@ -2253,7 +2196,7 @@ ER_KILL_DENIED_ERROR
swe "Du är inte ägare till tråd %lu"
ukr "Ви не володар гілки %lu"
ER_NO_TABLES_USED
- cze "Nejsou pou-Bžity žádné tabulky"
+ cze "Nejsou použity žádné tabulky"
dan "Ingen tabeller i brug"
nla "Geen tabellen gebruikt."
eng "No tables used"
@@ -2263,6 +2206,7 @@ ER_NO_TABLES_USED
greek "Δεν χÏησιμοποιήθηκαν πίνακες"
hun "Nincs hasznalt tabla"
ita "Nessuna tabella usata"
+ jpn "表ãŒæŒ‡å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "ì–´ë–¤ í…Œì´ë¸”ë„ ì‚¬ìš©ë˜ì§€ 않았습니다."
nor "Ingen tabeller i bruk"
norwegian-ny "Ingen tabellar i bruk"
@@ -2276,7 +2220,7 @@ ER_NO_TABLES_USED
swe "Inga tabeller angivna"
ukr "Ðе викориÑтано таблиць"
ER_TOO_BIG_SET
- cze "P-Bříliš mnoho řetězců pro sloupec %-.192s a SET"
+ cze "Příliš mnoho řetězců pro sloupec %-.192s a SET"
dan "For mange tekststrenge til specifikationen af SET i kolonne %-.192s"
nla "Teveel strings voor kolom %-.192s en SET"
eng "Too many strings for column %-.192s and SET"
@@ -2286,6 +2230,7 @@ ER_TOO_BIG_SET
greek "ΠάÏα πολλά strings για το πεδίο %-.192s και SET"
hun "Tul sok karakter: %-.192s es SET"
ita "Troppe stringhe per la colonna %-.192s e la SET"
+ jpn "SETåž‹ã®åˆ— '%-.192s' ã®ãƒ¡ãƒ³ãƒãƒ¼ã®æ•°ãŒå¤šã™ãŽã¾ã™ã€‚"
kor "칼럼 %-.192s와 SETì—ì„œ 스트ë§ì´ 너무 많습니다."
nor "For mange tekststrenger kolonne %-.192s og SET"
norwegian-ny "For mange tekststrengar felt %-.192s og SET"
@@ -2299,7 +2244,7 @@ ER_TOO_BIG_SET
swe "För många alternativ till kolumn %-.192s för SET"
ukr "Забагато Ñтрок Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ %-.192s та SET"
ER_NO_UNIQUE_LOGFILE
- cze "Nemohu vytvo-BÅ™it jednoznaÄné jméno logovacího souboru %-.200s.(1-999)\n"
+ cze "Nemohu vytvoÅ™it jednoznaÄné jméno logovacího souboru %-.200s.(1-999)\n"
dan "Kan ikke lave unikt log-filnavn %-.200s.(1-999)\n"
nla "Het is niet mogelijk een unieke naam te maken voor de logfile %-.200s.(1-999)\n"
eng "Can't generate a unique log-filename %-.200s.(1-999)\n"
@@ -2309,6 +2254,7 @@ ER_NO_UNIQUE_LOGFILE
greek "ΑδÏνατη η δημιουÏγία unique log-filename %-.200s.(1-999)\n"
hun "Egyedi log-filenev nem generalhato: %-.200s.(1-999)\n"
ita "Impossibile generare un nome del file log unico %-.200s.(1-999)\n"
+ jpn "一æ„ãªãƒ­ã‚°ãƒ•ã‚¡ã‚¤ãƒ«å %-.200s.(1-999) を生æˆã§ãã¾ã›ã‚“。\n"
kor "Unique ë¡œê·¸í™”ì¼ '%-.200s'를 만들수 없습니다.(1-999)\n"
nor "Kan ikke lage unikt loggfilnavn %-.200s.(1-999)\n"
norwegian-ny "Kan ikkje lage unikt loggfilnavn %-.200s.(1-999)\n"
@@ -2322,18 +2268,17 @@ ER_NO_UNIQUE_LOGFILE
swe "Kan inte generera ett unikt filnamn %-.200s.(1-999)\n"
ukr "Ðе можу згенерувати унікальне ім'Ñ log-файлу %-.200s.(1-999)\n"
ER_TABLE_NOT_LOCKED_FOR_WRITE
- cze "Tabulka '%-.192s' byla zam-BÄena s READ a nemůže být zmÄ›nÄ›na"
+ cze "Tabulka '%-.192s' byla zamÄena s READ a nemůže být zmÄ›nÄ›na"
dan "Tabellen '%-.192s' var låst med READ lås og kan ikke opdateres"
nla "Tabel '%-.192s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen."
eng "Table '%-.192s' was locked with a READ lock and can't be updated"
- jps "Table '%-.192s' 㯠READ lock ã«ãªã£ã¦ã„ã¦ã€æ›´æ–°ã¯ã§ãã¾ã›ã‚“",
est "Tabel '%-.192s' on lukustatud READ lukuga ning ei ole muudetav"
fre "Table '%-.192s' verrouillée lecture (READ): modification impossible"
ger "Tabelle '%-.192s' ist mit Lesesperre versehen und kann nicht aktualisiert werden"
greek "Ο πίνακας '%-.192s' έχει κλειδωθεί με READ lock και δεν επιτÏέπονται αλλαγές"
hun "A(z) '%-.192s' tabla zarolva lett (READ lock) es nem lehet frissiteni"
ita "La tabella '%-.192s' e` soggetta a lock in lettura e non puo` essere aggiornata"
- jpn "Table '%-.192s' 㯠READ lock ã«ãªã£ã¦ã„ã¦ã€æ›´æ–°ã¯ã§ãã¾ã›ã‚“"
+ jpn "表 '%-.192s' ã¯READロックã•ã‚Œã¦ã„ã¦ã€æ›´æ–°ã§ãã¾ã›ã‚“。"
kor "í…Œì´ë¸” '%-.192s'는 READ ë½ì´ 잠겨있어서 갱신할 수 없습니다."
nor "Tabellen '%-.192s' var låst med READ lås og kan ikke oppdateres"
norwegian-ny "Tabellen '%-.192s' var låst med READ lås og kan ikkje oppdaterast"
@@ -2347,18 +2292,17 @@ ER_TABLE_NOT_LOCKED_FOR_WRITE
swe "Tabell '%-.192s' kan inte uppdateras emedan den är låst för läsning"
ukr "Таблицю '%-.192s' заблоковано тільки Ð´Ð»Ñ Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ, тому Ñ—Ñ— не можна оновити"
ER_TABLE_NOT_LOCKED
- cze "Tabulka '%-.192s' nebyla zam-BÄena s LOCK TABLES"
+ cze "Tabulka '%-.192s' nebyla zamÄena s LOCK TABLES"
dan "Tabellen '%-.192s' var ikke låst med LOCK TABLES"
nla "Tabel '%-.192s' was niet gelocked met LOCK TABLES"
eng "Table '%-.192s' was not locked with LOCK TABLES"
- jps "Table '%-.192s' 㯠LOCK TABLES ã«ã‚ˆã£ã¦ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
est "Tabel '%-.192s' ei ole lukustatud käsuga LOCK TABLES"
fre "Table '%-.192s' non verrouillée: utilisez LOCK TABLES"
ger "Tabelle '%-.192s' wurde nicht mit LOCK TABLES gesperrt"
greek "Ο πίνακας '%-.192s' δεν έχει κλειδωθεί με LOCK TABLES"
hun "A(z) '%-.192s' tabla nincs zarolva a LOCK TABLES-szel"
ita "Non e` stato impostato il lock per la tabella '%-.192s' con LOCK TABLES"
- jpn "Table '%-.192s' 㯠LOCK TABLES ã«ã‚ˆã£ã¦ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ jpn "表 '%-.192s' 㯠LOCK TABLES ã§ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "í…Œì´ë¸” '%-.192s'는 LOCK TABLES 명령으로 잠기지 않았습니다."
nor "Tabellen '%-.192s' var ikke låst med LOCK TABLES"
norwegian-ny "Tabellen '%-.192s' var ikkje låst med LOCK TABLES"
@@ -2372,7 +2316,7 @@ ER_TABLE_NOT_LOCKED
swe "Tabell '%-.192s' är inte låst med LOCK TABLES"
ukr "Таблицю '%-.192s' не було блоковано з LOCK TABLES"
ER_BLOB_CANT_HAVE_DEFAULT 42000
- cze "Blob polo-Bžka '%-.192s' nemůže mít defaultní hodnotu"
+ cze "Blob položka '%-.192s' nemůže mít defaultní hodnotu"
dan "BLOB feltet '%-.192s' kan ikke have en standard værdi"
nla "Blob veld '%-.192s' can geen standaardwaarde bevatten"
eng "BLOB/TEXT column '%-.192s' can't have a default value"
@@ -2382,7 +2326,7 @@ ER_BLOB_CANT_HAVE_DEFAULT 42000
greek "Τα Blob πεδία '%-.192s' δεν μποÏοÏν να έχουν Ï€ÏοκαθοÏισμένες τιμές (default value)"
hun "A(z) '%-.192s' blob objektumnak nem lehet alapertelmezett erteke"
ita "Il campo BLOB '%-.192s' non puo` avere un valore di default"
- jpn "BLOB column '%-.192s' can't have a default value"
+ jpn "BLOB/TEXT 列 '%-.192s' ã«ã¯ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ã‚’指定ã§ãã¾ã›ã‚“。"
kor "BLOB 칼럼 '%-.192s' 는 ë””í´íŠ¸ ê°’ì„ ê°€ì§ˆ 수 없습니다."
nor "Blob feltet '%-.192s' kan ikke ha en standard verdi"
norwegian-ny "Blob feltet '%-.192s' kan ikkje ha ein standard verdi"
@@ -2396,18 +2340,17 @@ ER_BLOB_CANT_HAVE_DEFAULT 42000
swe "BLOB fält '%-.192s' kan inte ha ett DEFAULT-värde"
ukr "Стовбець BLOB '%-.192s' не може мати Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ Ð¿Ð¾ замовчуванню"
ER_WRONG_DB_NAME 42000
- cze "Nep-Břípustné jméno databáze '%-.100s'"
+ cze "Nepřípustné jméno databáze '%-.100s'"
dan "Ugyldigt database navn '%-.100s'"
nla "Databasenaam '%-.100s' is niet getoegestaan"
eng "Incorrect database name '%-.100s'"
- jps "指定ã—㟠database å '%-.100s' ãŒé–“é•ã£ã¦ã„ã¾ã™",
est "Vigane andmebaasi nimi '%-.100s'"
fre "Nom de base de donnée illégal: '%-.100s'"
ger "Unerlaubter Datenbankname '%-.100s'"
greek "Λάθος όνομα βάσης δεδομένων '%-.100s'"
hun "Hibas adatbazisnev: '%-.100s'"
ita "Nome database errato '%-.100s'"
- jpn "指定ã—㟠database å '%-.100s' ãŒé–“é•ã£ã¦ã„ã¾ã™"
+ jpn "データベースå '%-.100s' ã¯ä¸æ­£ã§ã™ã€‚"
kor "'%-.100s' ë°ì´íƒ€ë² ì´ìŠ¤ì˜ ì´ë¦„ì´ ë¶€ì •í™•í•©ë‹ˆë‹¤."
nor "Ugyldig database navn '%-.100s'"
norwegian-ny "Ugyldig database namn '%-.100s'"
@@ -2421,18 +2364,17 @@ ER_WRONG_DB_NAME 42000
swe "Felaktigt databasnamn '%-.100s'"
ukr "Ðевірне ім'Ñ Ð±Ð°Ð·Ð¸ данних '%-.100s'"
ER_WRONG_TABLE_NAME 42000
- cze "Nep-Břípustné jméno tabulky '%-.100s'"
+ cze "Nepřípustné jméno tabulky '%-.100s'"
dan "Ugyldigt tabel navn '%-.100s'"
nla "Niet toegestane tabelnaam '%-.100s'"
eng "Incorrect table name '%-.100s'"
- jps "指定ã—㟠table å '%-.100s' ã¯ã¾ã¡ãŒã£ã¦ã„ã¾ã™",
est "Vigane tabeli nimi '%-.100s'"
fre "Nom de table illégal: '%-.100s'"
ger "Unerlaubter Tabellenname '%-.100s'"
greek "Λάθος όνομα πίνακα '%-.100s'"
hun "Hibas tablanev: '%-.100s'"
ita "Nome tabella errato '%-.100s'"
- jpn "指定ã—㟠table å '%-.100s' ã¯ã¾ã¡ãŒã£ã¦ã„ã¾ã™"
+ jpn "表å '%-.100s' ã¯ä¸æ­£ã§ã™ã€‚"
kor "'%-.100s' í…Œì´ë¸” ì´ë¦„ì´ ë¶€ì •í™•í•©ë‹ˆë‹¤."
nor "Ugyldig tabell navn '%-.100s'"
norwegian-ny "Ugyldig tabell namn '%-.100s'"
@@ -2446,7 +2388,7 @@ ER_WRONG_TABLE_NAME 42000
swe "Felaktigt tabellnamn '%-.100s'"
ukr "Ðевірне ім'Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.100s'"
ER_TOO_BIG_SELECT 42000
- cze "Zadan-Bý SELECT by procházel příliš mnoho záznamů a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v pořádku, použijte SET SQL_BIG_SELECTS=1"
+ cze "Zadaný SELECT by procházel příliš mnoho záznamů a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v pořádku, použijte SET SQL_BIG_SELECTS=1"
dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt"
nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is."
eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay"
@@ -2456,6 +2398,7 @@ ER_TOO_BIG_SELECT 42000
greek "Το SELECT θα εξετάσει μεγάλο αÏιθμό εγγÏαφών και πιθανώς θα καθυστεÏήσει. ΠαÏακαλώ εξετάστε τις παÏαμέτÏους του WHERE και χÏησιμοποιείστε SET SQL_BIG_SELECTS=1 αν το SELECT είναι σωστό"
hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay"
ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto."
+ jpn "SELECTãŒMAX_JOIN_SIZEを超ãˆã‚‹è¡Œæ•°ã‚’処ç†ã—ã¾ã—ãŸã€‚WHEREå¥ã‚’確èªã—ã€SELECTæ–‡ã«å•é¡ŒãŒãªã‘ã‚Œã°ã€ SET SQL_BIG_SELECTS=1 ã¾ãŸã¯ SET MAX_JOIN_SIZE=# を使用ã—ã¦ä¸‹ã•ã„。"
kor "SELECT 명령ì—ì„œ 너무 ë§Žì€ ë ˆì½”ë“œë¥¼ 찾기 ë•Œë¬¸ì— ë§Žì€ ì‹œê°„ì´ ì†Œìš”ë©ë‹ˆë‹¤. ë”°ë¼ì„œ WHERE ë¬¸ì„ ì ê²€í•˜ê±°ë‚˜, 만약 SELECTê°€ okë˜ë©´ SET SQL_BIG_SELECTS=1 ì˜µì…˜ì„ ì‚¬ìš©í•˜ì„¸ìš”."
nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
norwegian-ny "SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt"
@@ -2469,7 +2412,7 @@ ER_TOO_BIG_SELECT 42000
swe "Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins"
ukr "Запиту SELECT потрібно обробити багато запиÑів, що, певне, займе дуже багато чаÑу. Перевірте ваше WHERE та викориÑтовуйте SET SQL_BIG_SELECTS=1, Ñкщо цей запит SELECT Ñ” вірним"
ER_UNKNOWN_ERROR
- cze "Nezn-Bámá chyba"
+ cze "Neznámá chyba"
dan "Ukendt fejl"
nla "Onbekende Fout"
eng "Unknown error"
@@ -2479,6 +2422,7 @@ ER_UNKNOWN_ERROR
greek "ΠÏοέκυψε άγνωστο λάθος"
hun "Ismeretlen hiba"
ita "Errore sconosciuto"
+ jpn "ä¸æ˜Žãªã‚¨ãƒ©ãƒ¼"
kor "알수 없는 ì—러입니다."
nor "Ukjent feil"
norwegian-ny "Ukjend feil"
@@ -2488,10 +2432,10 @@ ER_UNKNOWN_ERROR
serbian "Nepoznata greška"
slo "Neznámá chyba"
spa "Error desconocido"
- swe "Oidentifierat fel"
+ swe "Okänt fel"
ukr "Ðевідома помилка"
ER_UNKNOWN_PROCEDURE 42000
- cze "Nezn-Bámá procedura %-.192s"
+ cze "Neznámá procedura %-.192s"
dan "Ukendt procedure %-.192s"
nla "Onbekende procedure %-.192s"
eng "Unknown procedure '%-.192s'"
@@ -2501,6 +2445,7 @@ ER_UNKNOWN_PROCEDURE 42000
greek "Αγνωστη διαδικασία '%-.192s'"
hun "Ismeretlen eljaras: '%-.192s'"
ita "Procedura '%-.192s' sconosciuta"
+ jpn "'%-.192s' ã¯ä¸æ˜Žãªãƒ—ロシージャã§ã™ã€‚"
kor "알수 없는 수행문 : '%-.192s'"
nor "Ukjent prosedyre %-.192s"
norwegian-ny "Ukjend prosedyre %-.192s"
@@ -2514,7 +2459,7 @@ ER_UNKNOWN_PROCEDURE 42000
swe "Okänd procedur: %-.192s"
ukr "Ðевідома процедура '%-.192s'"
ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000
- cze "Chybn-Bý poÄet parametrů procedury %-.192s"
+ cze "Chybný poÄet parametrů procedury %-.192s"
dan "Forkert antal parametre til proceduren %-.192s"
nla "Foutief aantal parameters doorgegeven aan procedure %-.192s"
eng "Incorrect parameter count to procedure '%-.192s'"
@@ -2524,6 +2469,7 @@ ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000
greek "Λάθος αÏιθμός παÏαμέτÏων στη διαδικασία '%-.192s'"
hun "Rossz parameter a(z) '%-.192s'eljaras szamitasanal"
ita "Numero di parametri errato per la procedura '%-.192s'"
+ jpn "プロシージャ '%-.192s' ã¸ã®ãƒ‘ラメータ数ãŒä¸æ­£ã§ã™ã€‚"
kor "'%-.192s' ìˆ˜í–‰ë¬¸ì— ëŒ€í•œ 부정확한 파ë¼ë©”í„°"
nor "Feil parameter antall til prosedyren %-.192s"
norwegian-ny "Feil parameter tal til prosedyra %-.192s"
@@ -2537,7 +2483,7 @@ ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000
swe "Felaktigt antal parametrar till procedur %-.192s"
ukr "Хибна кількіÑÑ‚ÑŒ параметрів процедури '%-.192s'"
ER_WRONG_PARAMETERS_TO_PROCEDURE
- cze "Chybn-Bé parametry procedury %-.192s"
+ cze "Chybné parametry procedury %-.192s"
dan "Forkert(e) parametre til proceduren %-.192s"
nla "Foutieve parameters voor procedure %-.192s"
eng "Incorrect parameters to procedure '%-.192s'"
@@ -2547,6 +2493,7 @@ ER_WRONG_PARAMETERS_TO_PROCEDURE
greek "Λάθος παÏάμετÏοι στην διαδικασία '%-.192s'"
hun "Rossz parameter a(z) '%-.192s' eljarasban"
ita "Parametri errati per la procedura '%-.192s'"
+ jpn "プロシージャ '%-.192s' ã¸ã®ãƒ‘ラメータãŒä¸æ­£ã§ã™ã€‚"
kor "'%-.192s' ìˆ˜í–‰ë¬¸ì— ëŒ€í•œ 부정확한 파ë¼ë©”í„°"
nor "Feil parametre til prosedyren %-.192s"
norwegian-ny "Feil parameter til prosedyra %-.192s"
@@ -2560,7 +2507,7 @@ ER_WRONG_PARAMETERS_TO_PROCEDURE
swe "Felaktiga parametrar till procedur %-.192s"
ukr "Хибний параметер процедури '%-.192s'"
ER_UNKNOWN_TABLE 42S02
- cze "Nezn-Bámá tabulka '%-.192s' v %-.32s"
+ cze "Neznámá tabulka '%-.192s' v %-.32s"
dan "Ukendt tabel '%-.192s' i %-.32s"
nla "Onbekende tabel '%-.192s' in %-.32s"
eng "Unknown table '%-.192s' in %-.32s"
@@ -2570,7 +2517,7 @@ ER_UNKNOWN_TABLE 42S02
greek "Αγνωστος πίνακας '%-.192s' σε %-.32s"
hun "Ismeretlen tabla: '%-.192s' %-.32s-ban"
ita "Tabella '%-.192s' sconosciuta in %-.32s"
- jpn "Unknown table '%-.192s' in %-.32s"
+ jpn "'%-.192s' 㯠%-.32s ã§ã¯ä¸æ˜Žãªè¡¨ã§ã™ã€‚"
kor "알수 없는 í…Œì´ë¸” '%-.192s' (ë°ì´íƒ€ë² ì´ìŠ¤ %-.32s)"
nor "Ukjent tabell '%-.192s' i %-.32s"
norwegian-ny "Ukjend tabell '%-.192s' i %-.32s"
@@ -2584,7 +2531,7 @@ ER_UNKNOWN_TABLE 42S02
swe "Okänd tabell '%-.192s' i '%-.32s'"
ukr "Ðевідома Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' у %-.32s"
ER_FIELD_SPECIFIED_TWICE 42000
- cze "Polo-Bžka '%-.192s' je zadána dvakrát"
+ cze "Položka '%-.192s' je zadána dvakrát"
dan "Feltet '%-.192s' er anvendt to gange"
nla "Veld '%-.192s' is dubbel gespecificeerd"
eng "Column '%-.192s' specified twice"
@@ -2594,6 +2541,7 @@ ER_FIELD_SPECIFIED_TWICE 42000
greek "Το πεδίο '%-.192s' έχει οÏισθεί δÏο φοÏές"
hun "A(z) '%-.192s' mezot ketszer definialta"
ita "Campo '%-.192s' specificato 2 volte"
+ jpn "列 '%-.192s' ã¯2回指定ã•ã‚Œã¦ã„ã¾ã™ã€‚"
kor "칼럼 '%-.192s'는 ë‘번 ì •ì˜ë˜ì–´ 있ì니다."
nor "Feltet '%-.192s' er spesifisert to ganger"
norwegian-ny "Feltet '%-.192s' er spesifisert to gangar"
@@ -2607,7 +2555,7 @@ ER_FIELD_SPECIFIED_TWICE 42000
swe "Fält '%-.192s' är redan använt"
ukr "Стовбець '%-.192s' зазначено двічі"
ER_INVALID_GROUP_FUNC_USE
- cze "Nespr-Bávné použití funkce group"
+ cze "Nesprávné použití funkce group"
dan "Forkert brug af grupperings-funktion"
nla "Ongeldig gebruik van GROUP-functie"
eng "Invalid use of group function"
@@ -2617,6 +2565,7 @@ ER_INVALID_GROUP_FUNC_USE
greek "Εσφαλμένη χÏήση της group function"
hun "A group funkcio ervenytelen hasznalata"
ita "Uso non valido di una funzione di raggruppamento"
+ jpn "集計関数ã®ä½¿ç”¨æ–¹æ³•ãŒä¸æ­£ã§ã™ã€‚"
kor "ìž˜ëª»ëœ ê·¸ë£¹ 함수를 사용하였습니다."
por "Uso inválido de função de agrupamento (GROUP)"
rum "Folosire incorecta a functiei group"
@@ -2627,7 +2576,7 @@ ER_INVALID_GROUP_FUNC_USE
swe "Felaktig användning av SQL grupp function"
ukr "Хибне викориÑÑ‚Ð°Ð½Ð½Ñ Ñ„ÑƒÐ½ÐºÑ†Ñ–Ñ— групуваннÑ"
ER_UNSUPPORTED_EXTENSION 42000
- cze "Tabulka '%-.192s' pou-Bžívá rozšíření, které v této verzi MariaDB není"
+ cze "Tabulka '%-.192s' používá rozšíření, které v této verzi MySQL není"
dan "Tabellen '%-.192s' bruger et filtypenavn som ikke findes i denne MariaDB version"
nla "Tabel '%-.192s' gebruikt een extensie, die niet in deze MariaDB-versie voorkomt."
eng "Table '%-.192s' uses an extension that doesn't exist in this MariaDB version"
@@ -2637,6 +2586,7 @@ ER_UNSUPPORTED_EXTENSION 42000
greek "Ο πίνακς '%-.192s' χÏησιμοποιεί κάποιο extension που δεν υπάÏχει στην έκδοση αυτή της MariaDB"
hun "A(z) '%-.192s' tabla olyan bovitest hasznal, amely nem letezik ebben a MariaDB versioban."
ita "La tabella '%-.192s' usa un'estensione che non esiste in questa versione di MariaDB"
+ jpn "表 '%-.192s' ã¯ã€ã“ã®MySQLãƒãƒ¼ã‚¸ãƒ§ãƒ³ã«ã¯ç„¡ã„機能を使用ã—ã¦ã„ã¾ã™ã€‚"
kor "í…Œì´ë¸” '%-.192s'는 í™•ìž¥ëª…ë ¹ì„ ì´ìš©í•˜ì§€ë§Œ í˜„ìž¬ì˜ MariaDB 버젼ì—서는 존재하지 않습니다."
nor "Table '%-.192s' uses a extension that doesn't exist in this MariaDB version"
norwegian-ny "Table '%-.192s' uses a extension that doesn't exist in this MariaDB version"
@@ -2650,18 +2600,17 @@ ER_UNSUPPORTED_EXTENSION 42000
swe "Tabell '%-.192s' har en extension som inte finns i denna version av MariaDB"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' викориÑтовує розширеннÑ, що не Ñ–Ñнує у цій верÑÑ–Ñ— MariaDB"
ER_TABLE_MUST_HAVE_COLUMNS 42000
- cze "Tabulka mus-Bí mít alespoň jeden sloupec"
+ cze "Tabulka musí mít alespoň jeden sloupec"
dan "En tabel skal have mindst een kolonne"
nla "Een tabel moet minstens 1 kolom bevatten"
eng "A table must have at least 1 column"
- jps "テーブルã¯æœ€ä½Ž 1 個㮠column ãŒå¿…è¦ã§ã™",
est "Tabelis peab olema vähemalt üks tulp"
fre "Une table doit comporter au moins une colonne"
ger "Eine Tabelle muss mindestens eine Spalte besitzen"
greek "Ενας πίνακας Ï€Ïέπει να έχει τουλάχιστον ένα πεδίο"
hun "A tablanak legalabb egy oszlopot tartalmazni kell"
ita "Una tabella deve avere almeno 1 colonna"
- jpn "テーブルã¯æœ€ä½Ž 1 個㮠column ãŒå¿…è¦ã§ã™"
+ jpn "表ã«ã¯æœ€ä½Žã§ã‚‚1個ã®åˆ—ãŒå¿…è¦ã§ã™ã€‚"
kor "í•˜ë‚˜ì˜ í…Œì´ë¸”ì—서는 ì ì–´ë„ í•˜ë‚˜ì˜ ì¹¼ëŸ¼ì´ ì¡´ìž¬í•˜ì—¬ì•¼ 합니다."
por "Uma tabela tem que ter pelo menos uma (1) coluna"
rum "O tabela trebuie sa aiba cel putin o coloana"
@@ -2672,18 +2621,17 @@ ER_TABLE_MUST_HAVE_COLUMNS 42000
swe "Tabeller måste ha minst 1 kolumn"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ Ð¿Ð¾Ð²Ð¸Ð½Ð½Ð° мати хочаб один Ñтовбець"
ER_RECORD_FILE_FULL
- cze "Tabulka '%-.192s' je pln-Bá"
+ cze "Tabulka '%-.192s' je plná"
dan "Tabellen '%-.192s' er fuld"
nla "De tabel '%-.192s' is vol"
eng "The table '%-.192s' is full"
- jps "table '%-.192s' ã¯ã„ã£ã±ã„ã§ã™",
est "Tabel '%-.192s' on täis"
fre "La table '%-.192s' est pleine"
ger "Tabelle '%-.192s' ist voll"
greek "Ο πίνακας '%-.192s' είναι γεμάτος"
hun "A '%-.192s' tabla megtelt"
ita "La tabella '%-.192s' e` piena"
- jpn "table '%-.192s' ã¯ã„ã£ã±ã„ã§ã™"
+ jpn "表 '%-.192s' ã¯æº€æ¯ã§ã™ã€‚"
kor "í…Œì´ë¸” '%-.192s'ê°€ full났습니다. "
por "Tabela '%-.192s' está cheia"
rum "Tabela '%-.192s' e plina"
@@ -2694,18 +2642,17 @@ ER_RECORD_FILE_FULL
swe "Tabellen '%-.192s' är full"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s' заповнена"
ER_UNKNOWN_CHARACTER_SET 42000
- cze "Nezn-Bámá znaková sada: '%-.64s'"
+ cze "Neznámá znaková sada: '%-.64s'"
dan "Ukendt tegnsæt: '%-.64s'"
nla "Onbekende character set: '%-.64s'"
eng "Unknown character set: '%-.64s'"
- jps "character set '%-.64s' ã¯ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“",
est "Vigane kooditabel '%-.64s'"
fre "Jeu de caractères inconnu: '%-.64s'"
ger "Unbekannter Zeichensatz: '%-.64s'"
greek "Αγνωστο character set: '%-.64s'"
hun "Ervenytelen karakterkeszlet: '%-.64s'"
ita "Set di caratteri '%-.64s' sconosciuto"
- jpn "character set '%-.64s' ã¯ã‚µãƒãƒ¼ãƒˆã—ã¦ã„ã¾ã›ã‚“"
+ jpn "ä¸æ˜Žãªæ–‡å­—コードセット: '%-.64s'"
kor "알수없는 언어 Set: '%-.64s'"
por "Conjunto de caracteres '%-.64s' desconhecido"
rum "Set de caractere invalid: '%-.64s'"
@@ -2716,18 +2663,17 @@ ER_UNKNOWN_CHARACTER_SET 42000
swe "Okänd teckenuppsättning: '%-.64s'"
ukr "Ðевідома кодова таблицÑ: '%-.64s'"
ER_TOO_MANY_TABLES
- cze "P-Bříliš mnoho tabulek, MariaDB jich může mít v joinu jen %d"
+ cze "Příliš mnoho tabulek, MySQL jich může mít v joinu jen %d"
dan "For mange tabeller. MariaDB kan kun bruge %d tabeller i et join"
nla "Teveel tabellen. MariaDB kan slechts %d tabellen in een join bevatten"
eng "Too many tables; MariaDB can only use %d tables in a join"
- jps "テーブルãŒå¤šã™ãŽã¾ã™; MariaDB can only use %d tables in a join",
est "Liiga palju tabeleid. MariaDB suudab JOINiga ühendada kuni %d tabelit"
fre "Trop de tables. MariaDB ne peut utiliser que %d tables dans un JOIN"
ger "Zu viele Tabellen. MariaDB kann in einem Join maximal %d Tabellen verwenden"
greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿Ï‚ αÏιθμός πινάκων. Η MariaDB μποÏεί να χÏησιμοποιήσει %d πίνακες σε διαδικασία join"
hun "Tul sok tabla. A MariaDB csak %d tablat tud kezelni osszefuzeskor"
ita "Troppe tabelle. MariaDB puo` usare solo %d tabelle in una join"
- jpn "テーブルãŒå¤šã™ãŽã¾ã™; MariaDB can only use %d tables in a join"
+ jpn "表ãŒå¤šã™ãŽã¾ã™ã€‚MySQLãŒJOINã§ãる表㯠%d 個ã¾ã§ã§ã™ã€‚"
kor "너무 ë§Žì€ í…Œì´ë¸”ì´ Joinë˜ì—ˆìŠµë‹ˆë‹¤. MariaDBì—서는 JOINì‹œ %dê°œì˜ í…Œì´ë¸”만 사용할 수 있습니다."
por "Tabelas demais. O MariaDB pode usar somente %d tabelas em uma junção (JOIN)"
rum "Prea multe tabele. MariaDB nu poate folosi mai mult de %d tabele intr-un join"
@@ -2738,18 +2684,17 @@ ER_TOO_MANY_TABLES
swe "För många tabeller. MariaDB can ha högst %d tabeller i en och samma join"
ukr "Забагато таблиць. MariaDB може викориÑтовувати лише %d таблиць у об'єднанні"
ER_TOO_MANY_FIELDS
- cze "P-Bříliš mnoho položek"
+ cze "Příliš mnoho položek"
dan "For mange felter"
nla "Te veel velden"
eng "Too many columns"
- jps "column ãŒå¤šã™ãŽã¾ã™",
est "Liiga palju tulpasid"
fre "Trop de champs"
ger "Zu viele Felder"
greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿Ï‚ αÏιθμός πεδίων"
hun "Tul sok mezo"
ita "Troppi campi"
- jpn "column ãŒå¤šã™ãŽã¾ã™"
+ jpn "列ãŒå¤šã™ãŽã¾ã™ã€‚"
kor "ì¹¼ëŸ¼ì´ ë„ˆë¬´ 많습니다."
por "Colunas demais"
rum "Prea multe coloane"
@@ -2760,18 +2705,17 @@ ER_TOO_MANY_FIELDS
swe "För många fält"
ukr "Забагато Ñтовбців"
ER_TOO_BIG_ROWSIZE 42000
- cze "-BŘádek je příliÅ¡ velký. Maximální velikost řádku, nepoÄítaje položky blob, je %ld. Musíte zmÄ›nit nÄ›které položky na blob"
+ cze "Řádek je příliÅ¡ velký. Maximální velikost řádku, nepoÄítaje položky blob, je %ld. Musíte zmÄ›nit nÄ›které položky na blob"
dan "For store poster. Max post størrelse, uden BLOB's, er %ld. Du må lave nogle felter til BLOB's"
nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %ld. U dient sommige velden in blobs te veranderen."
eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs"
- jps "row size ãŒå¤§ãã™ãŽã¾ã™. BLOB ã‚’å«ã¾ãªã„å ´åˆã® row size ã®æœ€å¤§ã¯ %ld ã§ã™. ã„ãã¤ã‹ã® field ã‚’ BLOB ã«å¤‰ãˆã¦ãã ã•ã„.",
est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %ld. Muuda mõned väljad BLOB-tüüpi väljadeks"
fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %ld. Changez le type de quelques colonnes en BLOB"
ger "Zeilenlänge zu groß. Die maximale Zeilenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %ld. Einige Felder müssen in BLOB oder TEXT umgewandelt werden"
greek "Î Î¿Î»Ï Î¼ÎµÎ³Î¬Î»Î¿ μέγεθος εγγÏαφής. Το μέγιστο μέγεθος εγγÏαφής, χωÏίς να υπολογίζονται τα blobs, είναι %ld. ΠÏέπει να οÏίσετε κάποια πεδία σαν blobs"
hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %ld. Nehany mezot meg kell valtoztatnia"
ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %ld. Devi cambiare alcuni campi in BLOB"
- jpn "row size ãŒå¤§ãã™ãŽã¾ã™. BLOB ã‚’å«ã¾ãªã„å ´åˆã® row size ã®æœ€å¤§ã¯ %ld ã§ã™. ã„ãã¤ã‹ã® field ã‚’ BLOB ã«å¤‰ãˆã¦ãã ã•ã„."
+ jpn "行サイズãŒå¤§ãã™ãŽã¾ã™ã€‚ã“ã®è¡¨ã®æœ€å¤§è¡Œã‚µã‚¤ã‚ºã¯ BLOB ã‚’å«ã¾ãšã« %ld ã§ã™ã€‚æ ¼ç´æ™‚ã®ã‚ªãƒ¼ãƒãƒ¼ãƒ˜ãƒƒãƒ‰ã‚‚å«ã¾ã‚Œã¾ã™(マニュアルを確èªã—ã¦ãã ã•ã„)。列をTEXTã¾ãŸã¯BLOBã«å¤‰æ›´ã™ã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚"
kor "너무 í° row 사ì´ì¦ˆìž…니다. BLOB를 계산하지 ì•Šê³  최대 row 사ì´ì¦ˆëŠ” %ld입니다. ì–¼ë§ˆê°„ì˜ í•„ë“œë“¤ì„ BLOBë¡œ 바꾸셔야 ê² êµ°ìš”.."
por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %ld. Você tem que mudar alguns campos para BLOBs"
rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %ld. Trebuie sa schimbati unele cimpuri in BLOB-uri"
@@ -2782,17 +2726,16 @@ ER_TOO_BIG_ROWSIZE 42000
swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %ld. Ändra några av dina fält till BLOB"
ukr "Задовга Ñтрока. Ðайбільшою довжиною Ñтроки, не рахуючи BLOB, Ñ” %ld. Вам потрібно привеÑти деÑкі Ñтовбці до типу BLOB"
ER_STACK_OVERRUN
- cze "P-BÅ™eteÄení zásobníku threadu: použito %ld z %ld. Použijte 'mysqld --thread_stack=#' k zadání vÄ›tšího zásobníku"
+ cze "PÅ™eteÄení zásobníku threadu: použito %ld z %ld. Použijte 'mysqld --thread_stack=#' k zadání vÄ›tšího zásobníku"
dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld --thread_stack=#' for at allokere en større stak om nødvendigt"
nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld --thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)."
eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld --thread_stack=#' to specify a bigger stack if needed"
- jps "Thread stack overrun: Used: %ld of a %ld stack. スタック領域を多ãã¨ã‚ŠãŸã„å ´åˆã€'mysqld --thread_stack=#' ã¨æŒ‡å®šã—ã¦ãã ã•ã„",
fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld --thread_stack=#' pour indiquer une plus grande valeur"
ger "Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld --thread_stack=#' verwenden, um bei Bedarf einen größeren Stack anzulegen"
greek "Stack overrun στο thread: Used: %ld of a %ld stack. ΠαÏακαλώ χÏησιμοποιείστε 'mysqld --thread_stack=#' για να οÏίσετε ένα μεγαλÏτεÏο stack αν χÏειάζεται"
hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld --thread_stack=#' nagyobb verem definialasahoz"
ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld --thread_stack=#' per specificare uno stack piu` grande."
- jpn "Thread stack overrun: Used: %ld of a %ld stack. スタック領域を多ãã¨ã‚ŠãŸã„å ´åˆã€'mysqld --thread_stack=#' ã¨æŒ‡å®šã—ã¦ãã ã•ã„"
+ jpn "スレッドスタックä¸è¶³ã§ã™(使用: %ld ; サイズ: %ld)。必è¦ã«å¿œã˜ã¦ã€ã‚ˆã‚Šå¤§ãã„値㧠'mysqld --thread_stack=#' ã®æŒ‡å®šã‚’ã—ã¦ãã ã•ã„。"
kor "쓰레드 스íƒì´ 넘쳤습니다. 사용: %ldê°œ 스íƒ: %ldê°œ. 만약 필요시 ë”í° ìŠ¤íƒì„ ì›í• ë•Œì—는 'mysqld --thread_stack=#' 를 ì •ì˜í•˜ì„¸ìš”"
por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld --thread_stack=#' para especificar uma pilha maior, se necessário"
rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld --thread_stack=#' ca sa specifici un stack mai mare"
@@ -2803,7 +2746,7 @@ ER_STACK_OVERRUN
swe "Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld --thread_stack=#' ifall du behöver en större stack"
ukr "Стек гілок переповнено: ВикориÑтано: %ld з %ld. ВикориÑтовуйте 'mysqld --thread_stack=#' аби зазначити більший Ñтек, Ñкщо необхідно"
ER_WRONG_OUTER_JOIN 42000
- cze "V OUTER JOIN byl nalezen k-Břížový odkaz. Prověřte ON podmínky"
+ cze "V OUTER JOIN byl nalezen křížový odkaz. Prověřte ON podmínky"
dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions"
nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions"
eng "Cross dependency found in OUTER JOIN; examine your ON conditions"
@@ -2813,6 +2756,7 @@ ER_WRONG_OUTER_JOIN 42000
greek "Cross dependency βÏέθηκε σε OUTER JOIN. ΠαÏακαλώ εξετάστε τις συνθήκες που θέσατε στο ON"
hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket"
ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON"
+ jpn "OUTER JOINã«ç›¸äº’ä¾å­˜ãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸã€‚ONå¥ã®æ¡ä»¶ã‚’確èªã—ã¦ä¸‹ã•ã„。"
por "Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'"
rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON"
rus "Ð’ OUTER JOIN обнаружена перекреÑÑ‚Ð½Ð°Ñ Ð·Ð°Ð²Ð¸ÑимоÑÑ‚ÑŒ. Внимательно проанализируйте Ñвои уÑÐ»Ð¾Ð²Ð¸Ñ ON"
@@ -2825,18 +2769,17 @@ ER_NULL_COLUMN_IN_INDEX 42000
eng "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler"
swe "Tabell hanteraren kan inte indexera NULL kolumner för den givna index typen. Ändra '%-.192s' till NOT NULL eller använd en annan hanterare"
ER_CANT_FIND_UDF
- cze "Nemohu na-BÄíst funkci '%-.192s'"
+ cze "Nemohu naÄíst funkci '%-.192s'"
dan "Kan ikke læse funktionen '%-.192s'"
nla "Kan functie '%-.192s' niet laden"
eng "Can't load function '%-.192s'"
- jps "function '%-.192s' ã‚’ ロードã§ãã¾ã›ã‚“",
est "Ei suuda avada funktsiooni '%-.192s'"
fre "Imposible de charger la fonction '%-.192s'"
ger "Kann Funktion '%-.192s' nicht laden"
greek "Δεν είναι δυνατή η διαδικασία load για τη συνάÏτηση '%-.192s'"
hun "A(z) '%-.192s' fuggveny nem toltheto be"
ita "Impossibile caricare la funzione '%-.192s'"
- jpn "function '%-.192s' ã‚’ ロードã§ãã¾ã›ã‚“"
+ jpn "関数 '%-.192s' をロードã§ãã¾ã›ã‚“。"
kor "'%-.192s' 함수를 로드하지 못했습니다."
por "Não pode carregar a função '%-.192s'"
rum "Nu pot incarca functia '%-.192s'"
@@ -2851,14 +2794,13 @@ ER_CANT_INITIALIZE_UDF
dan "Kan ikke starte funktionen '%-.192s'; %-.80s"
nla "Kan functie '%-.192s' niet initialiseren; %-.80s"
eng "Can't initialize function '%-.192s'; %-.80s"
- jps "function '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“; %-.80s",
est "Ei suuda algväärtustada funktsiooni '%-.192s'; %-.80s"
fre "Impossible d'initialiser la fonction '%-.192s'; %-.80s"
ger "Kann Funktion '%-.192s' nicht initialisieren: %-.80s"
greek "Δεν είναι δυνατή η έναÏξη της συνάÏτησης '%-.192s'; %-.80s"
hun "A(z) '%-.192s' fuggveny nem inicializalhato; %-.80s"
ita "Impossibile inizializzare la funzione '%-.192s'; %-.80s"
- jpn "function '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“; %-.80s"
+ jpn "関数 '%-.192s' ã‚’åˆæœŸåŒ–ã§ãã¾ã›ã‚“。; %-.80s"
kor "'%-.192s' 함수를 초기화 하지 못했습니다.; %-.80s"
por "Não pode inicializar a função '%-.192s' - '%-.80s'"
rum "Nu pot initializa functia '%-.192s'; %-.80s"
@@ -2869,18 +2811,17 @@ ER_CANT_INITIALIZE_UDF
swe "Kan inte initialisera funktionen '%-.192s'; '%-.80s'"
ukr "Ðе можу ініціалізувати функцію '%-.192s'; %-.80s"
ER_UDF_NO_PATHS
- cze "Pro sd-Bílenou knihovnu nejsou povoleny cesty"
+ cze "Pro sdílenou knihovnu nejsou povoleny cesty"
dan "Angivelse af sti ikke tilladt for delt bibliotek"
nla "Geen pad toegestaan voor shared library"
eng "No paths allowed for shared library"
- jps "shared library ã¸ã®ãƒ‘スãŒé€šã£ã¦ã„ã¾ã›ã‚“",
est "Teegi nimes ei tohi olla kataloogi"
fre "Chemin interdit pour les bibliothèques partagées"
ger "Keine Pfade gestattet für Shared Library"
greek "Δεν βÏέθηκαν paths για την shared library"
hun "Nincs ut a megosztott konyvtarakhoz (shared library)"
ita "Non sono ammessi path per le librerie condivisa"
- jpn "shared library ã¸ã®ãƒ‘スãŒé€šã£ã¦ã„ã¾ã›ã‚“"
+ jpn "共有ライブラリã«ã¯ãƒ‘スを指定ã§ãã¾ã›ã‚“。"
kor "공유 ë¼ì´ë²„러리를 위한 패스가 ì •ì˜ë˜ì–´ 있지 않습니다."
por "Não há caminhos (paths) permitidos para biblioteca compartilhada"
rum "Nici un paths nu e permis pentru o librarie shared"
@@ -2891,18 +2832,17 @@ ER_UDF_NO_PATHS
swe "Man får inte ange sökväg för dynamiska bibliotek"
ukr "Ðе дозволено викориÑтовувати путі Ð´Ð»Ñ Ñ€Ð¾Ð·Ð´Ñ–Ð»ÑŽÐ²Ð°Ð½Ð¸Ñ… бібліотек"
ER_UDF_EXISTS
- cze "Funkce '%-.192s' ji-Bž existuje"
+ cze "Funkce '%-.192s' již existuje"
dan "Funktionen '%-.192s' findes allerede"
nla "Functie '%-.192s' bestaat reeds"
eng "Function '%-.192s' already exists"
- jps "Function '%-.192s' ã¯æ—¢ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™",
est "Funktsioon '%-.192s' juba eksisteerib"
fre "La fonction '%-.192s' existe déjà"
ger "Funktion '%-.192s' existiert schon"
greek "Η συνάÏτηση '%-.192s' υπάÏχει ήδη"
hun "A '%-.192s' fuggveny mar letezik"
ita "La funzione '%-.192s' esiste gia`"
- jpn "Function '%-.192s' ã¯æ—¢ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™"
+ jpn "関数 '%-.192s' ã¯ã™ã§ã«å®šç¾©ã•ã‚Œã¦ã„ã¾ã™ã€‚"
kor "'%-.192s' 함수는 ì´ë¯¸ 존재합니다."
por "Função '%-.192s' já existe"
rum "Functia '%-.192s' exista deja"
@@ -2913,18 +2853,17 @@ ER_UDF_EXISTS
swe "Funktionen '%-.192s' finns redan"
ukr "Ð¤ÑƒÐ½ÐºÑ†Ñ–Ñ '%-.192s' вже Ñ–Ñнує"
ER_CANT_OPEN_LIBRARY
- cze "Nemohu otev-Břít sdílenou knihovnu '%-.192s' (errno: %d %-.128s)"
+ cze "Nemohu otevřít sdílenou knihovnu '%-.192s' (errno: %d %-.128s)"
dan "Kan ikke åbne delt bibliotek '%-.192s' (errno: %d %-.128s)"
nla "Kan shared library '%-.192s' niet openen (Errcode: %d %-.128s)"
eng "Can't open shared library '%-.192s' (errno: %d %-.128s)"
- jps "shared library '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“ (errno: %d %-.128s)",
est "Ei suuda avada jagatud teeki '%-.192s' (veakood: %d %-.128s)"
fre "Impossible d'ouvrir la bibliothèque partagée '%-.192s' (errno: %d %-.128s)"
ger "Kann Shared Library '%-.192s' nicht öffnen (Fehler: %d %-.128s)"
greek "Δεν είναι δυνατή η ανάγνωση της shared library '%-.192s' (κωδικός λάθους: %d %-.128s)"
hun "A(z) '%-.192s' megosztott konyvtar nem hasznalhato (hibakod: %d %-.128s)"
ita "Impossibile aprire la libreria condivisa '%-.192s' (errno: %d %-.128s)"
- jpn "shared library '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“ (errno: %d %-.128s)"
+ jpn "共有ライブラリ '%-.192s' ã‚’é–‹ã事ãŒã§ãã¾ã›ã‚“。(エラー番å·: %d %-.128s)"
kor "'%-.192s' 공유 ë¼ì´ë²„러리를 열수 없습니다.(ì—러번호: %d %-.128s)"
nor "Can't open shared library '%-.192s' (errno: %d %-.128s)"
norwegian-ny "Can't open shared library '%-.192s' (errno: %d %-.128s)"
@@ -2938,18 +2877,17 @@ ER_CANT_OPEN_LIBRARY
swe "Kan inte öppna det dynamiska biblioteket '%-.192s' (Felkod: %d %-.128s)"
ukr "Ðе можу відкрити розділювану бібліотеку '%-.192s' (помилка: %d %-.128s)"
ER_CANT_FIND_DL_ENTRY
- cze "Nemohu naj-Bít funkci '%-.128s' v knihovně"
+ cze "Nemohu najít funkci '%-.128s' v knihovně"
dan "Kan ikke finde funktionen '%-.128s' i bibliotek"
nla "Kan functie '%-.128s' niet in library vinden"
eng "Can't find symbol '%-.128s' in library"
- jps "function '%-.128s' をライブラリー中ã«è¦‹ä»˜ã‘る事ãŒã§ãã¾ã›ã‚“",
est "Ei leia funktsiooni '%-.128s' antud teegis"
fre "Impossible de trouver la fonction '%-.128s' dans la bibliothèque"
ger "Kann Funktion '%-.128s' in der Library nicht finden"
greek "Δεν είναι δυνατή η ανεÏÏεση της συνάÏτησης '%-.128s' στην βιβλιοθήκη"
hun "A(z) '%-.128s' fuggveny nem talalhato a konyvtarban"
ita "Impossibile trovare la funzione '%-.128s' nella libreria"
- jpn "function '%-.128s' をライブラリー中ã«è¦‹ä»˜ã‘る事ãŒã§ãã¾ã›ã‚“"
+ jpn "関数 '%-.128s' ã¯å…±æœ‰ãƒ©ã‚¤ãƒ–ラリー中ã«ã‚ã‚Šã¾ã›ã‚“。"
kor "ë¼ì´ë²„러리ì—ì„œ '%-.128s' 함수를 ì°¾ì„ ìˆ˜ 없습니다."
por "Não pode encontrar a função '%-.128s' na biblioteca"
rum "Nu pot gasi functia '%-.128s' in libraria"
@@ -2960,18 +2898,17 @@ ER_CANT_FIND_DL_ENTRY
swe "Hittar inte funktionen '%-.128s' in det dynamiska biblioteket"
ukr "Ðе можу знайти функцію '%-.128s' у бібліотеці"
ER_FUNCTION_NOT_DEFINED
- cze "Funkce '%-.192s' nen-Bí definována"
+ cze "Funkce '%-.192s' není definována"
dan "Funktionen '%-.192s' er ikke defineret"
nla "Functie '%-.192s' is niet gedefinieerd"
eng "Function '%-.192s' is not defined"
- jps "Function '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“",
est "Funktsioon '%-.192s' ei ole defineeritud"
fre "La fonction '%-.192s' n'est pas définie"
ger "Funktion '%-.192s' ist nicht definiert"
greek "Η συνάÏτηση '%-.192s' δεν έχει οÏισθεί"
hun "A '%-.192s' fuggveny nem definialt"
ita "La funzione '%-.192s' non e` definita"
- jpn "Function '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ jpn "関数 '%-.192s' ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "'%-.192s' 함수가 ì •ì˜ë˜ì–´ 있지 않습니다."
por "Função '%-.192s' não está definida"
rum "Functia '%-.192s' nu e definita"
@@ -2982,18 +2919,17 @@ ER_FUNCTION_NOT_DEFINED
swe "Funktionen '%-.192s' är inte definierad"
ukr "Функцію '%-.192s' не визначено"
ER_HOST_IS_BLOCKED
- cze "Stroj '%-.64s' je zablokov-Bán kvůli mnoha chybám při připojování. Odblokujete použitím 'mysqladmin flush-hosts'"
+ cze "Stroj '%-.64s' je zablokován kvůli mnoha chybám při připojování. Odblokujete použitím 'mysqladmin flush-hosts'"
dan "Værten '%-.64s' er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'"
nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'"
eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'"
- jps "Host '%-.64s' 㯠many connection error ã®ãŸã‚ã€æ‹’å¦ã•ã‚Œã¾ã—ãŸ. 'mysqladmin flush-hosts' ã§è§£é™¤ã—ã¦ãã ã•ã„",
est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga"
fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connexion. Débloquer le par 'mysqladmin flush-hosts'"
ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'"
greek "Ο υπολογιστής '%-.64s' έχει αποκλεισθεί λόγω πολλαπλών λαθών σÏνδεσης. ΠÏοσπαθήστε να διοÏώσετε με 'mysqladmin flush-hosts'"
hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot"
ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'"
- jpn "Host '%-.64s' 㯠many connection error ã®ãŸã‚ã€æ‹’å¦ã•ã‚Œã¾ã—ãŸ. 'mysqladmin flush-hosts' ã§è§£é™¤ã—ã¦ãã ã•ã„"
+ jpn "接続エラーãŒå¤šã„ãŸã‚ã€ãƒ›ã‚¹ãƒˆ '%-.64s' ã¯æ‹’å¦ã•ã‚Œã¾ã—ãŸã€‚'mysqladmin flush-hosts' ã§è§£é™¤ã§ãã¾ã™ã€‚"
kor "너무 ë§Žì€ ì—°ê²°ì˜¤ë¥˜ë¡œ ì¸í•˜ì—¬ 호스트 '%-.64s'는 블ë½ë˜ì—ˆìŠµë‹ˆë‹¤. 'mysqladmin flush-hosts'를 ì´ìš©í•˜ì—¬ 블ë½ì„ 해제하세요"
por "'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'"
rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'"
@@ -3003,18 +2939,17 @@ ER_HOST_IS_BLOCKED
swe "Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna"
ukr "ХоÑÑ‚ '%-.64s' заблоковано з причини великої кількоÑÑ‚Ñ– помилок з'єднаннÑ. Ð”Ð»Ñ Ñ€Ð¾Ð·Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð²Ð¸ÐºÐ¾Ñ€Ð¸Ñтовуйте 'mysqladmin flush-hosts'"
ER_HOST_NOT_PRIVILEGED
- cze "Stroj '%-.64s' nem-Bá povoleno se k tomuto MariaDB serveru připojit"
+ cze "Stroj '%-.64s' nemá povoleno se k tomuto MySQL serveru připojit"
dan "Værten '%-.64s' kan ikke tilkoble denne MariaDB-server"
nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MariaDB server"
eng "Host '%-.64s' is not allowed to connect to this MariaDB server"
- jps "Host '%-.64s' 㯠MariaDB server ã«æŽ¥ç¶šã‚’許å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
est "Masinal '%-.64s' puudub ligipääs sellele MariaDB serverile"
fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MariaDB"
ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MariaDB-Server zu verbinden"
greek "Ο υπολογιστής '%-.64s' δεν έχει δικαίωμα σÏνδεσης με τον MariaDB server"
hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MariaDB szerverhez"
ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MariaDB"
- jpn "Host '%-.64s' 㯠MariaDB server ã«æŽ¥ç¶šã‚’許å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ jpn "ホスト '%-.64s' ã‹ã‚‰ã®ã“ã® MySQL server ã¸ã®æŽ¥ç¶šã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "'%-.64s' 호스트는 ì´ MariaDBì„œë²„ì— ì ‘ì†í•  허가를 받지 못했습니다."
por "'Host' '%-.64s' não tem permissão para se conectar com este servidor MariaDB"
rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MariaDB"
@@ -3024,18 +2959,17 @@ ER_HOST_NOT_PRIVILEGED
swe "Denna dator, '%-.64s', har inte privileger att använda denna MariaDB server"
ukr "ХоÑту '%-.64s' не доволено зв'ÑзуватиÑÑŒ з цим Ñервером MariaDB"
ER_PASSWORD_ANONYMOUS_USER 42000
- cze "Pou-Bžíváte MariaDB jako anonymní uživatel a anonymní uživatelé nemají povoleno měnit hesla"
+ cze "Používáte MySQL jako anonymní uživatel a anonymní uživatelé nemají povoleno měnit hesla"
dan "Du bruger MariaDB som anonym bruger. Anonyme brugere må ikke ændre adgangskoder"
nla "U gebruikt MariaDB als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen"
eng "You are using MariaDB as an anonymous user and anonymous users are not allowed to change passwords"
- jps "MariaDB ã‚’ anonymous users ã§ä½¿ç”¨ã—ã¦ã„る状態ã§ã¯ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“",
est "Te kasutate MariaDB-i anonüümse kasutajana, kelledel pole parooli muutmise õigust"
fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe"
ger "Sie benutzen MariaDB als anonymer Benutzer und dürfen daher keine Passwörter ändern"
greek "ΧÏησιμοποιείτε την MariaDB σαν anonymous user και έτσι δεν μποÏείτε να αλλάξετε τα passwords άλλων χÏηστών"
hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas"
ita "Impossibile cambiare la password usando MariaDB come utente anonimo"
- jpn "MariaDB ã‚’ anonymous users ã§ä½¿ç”¨ã—ã¦ã„る状態ã§ã¯ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“"
+ jpn "MySQL を匿åユーザーã§ä½¿ç”¨ã—ã¦ã„ã‚‹ã®ã§ã€ãƒ‘スワードã®å¤‰æ›´ã¯ã§ãã¾ã›ã‚“。"
kor "ë‹¹ì‹ ì€ MariaDBì„œë²„ì— ìµëª…ì˜ ì‚¬ìš©ìžë¡œ ì ‘ì†ì„ 하셨습니다.ìµëª…ì˜ ì‚¬ìš©ìžëŠ” 암호를 변경할 수 없습니다."
por "Você está usando o MariaDB como usuário anônimo e usuários anônimos não têm permissão para mudar senhas"
rum "Dumneavoastra folositi MariaDB ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele"
@@ -3045,18 +2979,17 @@ ER_PASSWORD_ANONYMOUS_USER 42000
swe "Du använder MariaDB som en anonym användare och som sådan får du inte ändra ditt lösenord"
ukr "Ви викориÑтовуєте MariaDB Ñк анонімний кориÑтувач, тому вам не дозволено змінювати паролі"
ER_PASSWORD_NOT_ALLOWED 42000
- cze "Na zm-Běnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql"
+ cze "Na změnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql"
dan "Du skal have tilladelse til at opdatere tabeller i MariaDB databasen for at ændre andres adgangskoder"
nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen"
eng "You must have privileges to update tables in the mysql database to be able to change passwords for others"
- jps "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯, mysql データベースã«å¯¾ã—㦠update ã®è¨±å¯ãŒãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“.",
est "Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis"
fre "Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres"
ger "Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können"
greek "ΠÏέπει να έχετε δικαίωμα διόÏθωσης πινάκων (update) στη βάση δεδομένων mysql για να μποÏείτε να αλλάξετε τα passwords άλλων χÏηστών"
hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz"
ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti"
- jpn "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯, mysql データベースã«å¯¾ã—㦠update ã®è¨±å¯ãŒãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“."
+ jpn "ä»–ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼ã®ãƒ‘スワードを変更ã™ã‚‹ãŸã‚ã«ã¯ã€mysqlデータベースã®è¡¨ã‚’æ›´æ–°ã™ã‚‹æ¨©é™ãŒå¿…è¦ã§ã™ã€‚"
kor "ë‹¹ì‹ ì€ ë‹¤ë¥¸ì‚¬ìš©ìžë“¤ì˜ 암호를 변경할 수 있ë„ë¡ ë°ì´íƒ€ë² ì´ìŠ¤ ë³€ê²½ê¶Œí•œì„ ê°€ì ¸ì•¼ 합니다."
por "Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros"
rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora"
@@ -3066,7 +2999,7 @@ ER_PASSWORD_NOT_ALLOWED 42000
swe "För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen"
ukr "Ви повині мати право на Ð¾Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†ÑŒ у базі данних mysql, аби мати можливіÑÑ‚ÑŒ змінювати пароль іншим"
ER_PASSWORD_NO_MATCH 42000
- cze "V tabulce user nen-Bí žádný odpovídající řádek"
+ cze "V tabulce user není žádný odpovídající řádek"
dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen"
nla "Kan geen enkele passende rij vinden in de gebruikers tabel"
eng "Can't find any matching row in the user table"
@@ -3076,6 +3009,7 @@ ER_PASSWORD_NO_MATCH 42000
greek "Δεν είναι δυνατή η ανεÏÏεση της αντίστοιχης εγγÏαφής στον πίνακα των χÏηστών"
hun "Nincs megegyezo sor a user tablaban"
ita "Impossibile trovare la riga corrispondente nella tabella user"
+ jpn "ユーザーテーブルã«è©²å½“ã™ã‚‹ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
kor "ì‚¬ìš©ìž í…Œì´ë¸”ì—ì„œ ì¼ì¹˜í•˜ëŠ” ê²ƒì„ ì°¾ì„ ìˆ˜ ì—†ì니다."
por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)"
rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului"
@@ -3085,17 +3019,16 @@ ER_PASSWORD_NO_MATCH 42000
swe "Hittade inte användaren i 'user'-tabellen"
ukr "Ðе можу знайти відповідних запиÑів у таблиці кориÑтувача"
ER_UPDATE_INFO
- cze "Nalezen-Bých řádků: %ld Změněno: %ld Varování: %ld"
+ cze "Nalezených řádků: %ld Změněno: %ld Varování: %ld"
dan "Poster fundet: %ld Ændret: %ld Advarsler: %ld"
nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld"
eng "Rows matched: %ld Changed: %ld Warnings: %ld"
- jps "一致数(Rows matched): %ld 変更: %ld Warnings: %ld",
est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld"
fre "Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld"
ger "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld"
hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld"
ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld"
- jpn "一致数(Rows matched): %ld 変更: %ld Warnings: %ld"
+ jpn "該当ã—ãŸè¡Œ: %ld 変更: %ld 警告: %ld"
kor "ì¼ì¹˜í•˜ëŠ” Rows : %ldê°œ 변경ë¨: %ldê°œ 경고: %ldê°œ"
por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld"
rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld"
@@ -3105,17 +3038,16 @@ ER_UPDATE_INFO
swe "Rader: %ld Uppdaterade: %ld Varningar: %ld"
ukr "ЗапиÑів відповідає: %ld Змінено: %ld ЗаÑтережень: %ld"
ER_CANT_CREATE_THREAD
- cze "Nemohu vytvo-BÅ™it nový thread (errno %M). Pokud je jeÅ¡tÄ› nÄ›jaká volná paměť, podívejte se do manuálu na Äást o chybách specifických pro jednotlivé operaÄní systémy"
+ cze "Nemohu vytvoÅ™it nový thread (errno %M). Pokud je jeÅ¡tÄ› nÄ›jaká volná paměť, podívejte se do manuálu na Äást o chybách specifických pro jednotlivé operaÄní systémy"
dan "Kan ikke danne en ny tråd (fejl nr. %M). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl"
nla "Kan geen nieuwe thread aanmaken (Errcode: %M). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout"
eng "Can't create a new thread (errno %M); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug"
- jps "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %M). ã‚‚ã—最大使用許å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’越ãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„.",
est "Ei suuda luua uut lõime (veakood %M). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga"
fre "Impossible de créer une nouvelle tâche (errno %M). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS"
ger "Kann keinen neuen Thread erzeugen (Fehler: %M). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen"
hun "Uj thread letrehozasa nem lehetseges (Hibakod: %M). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet"
ita "Impossibile creare un nuovo thread (errno %M). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO"
- jpn "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ãŒä½œã‚Œã¾ã›ã‚“ã§ã—㟠(errno %M). ã‚‚ã—最大使用許å¯ãƒ¡ãƒ¢ãƒªãƒ¼æ•°ã‚’越ãˆã¦ã„ãªã„ã®ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¦ã„ã‚‹ãªã‚‰, マニュアルã®ä¸­ã‹ã‚‰ 'possible OS-dependent bug' ã¨ã„ã†æ–‡å­—を探ã—ã¦ãã¿ã¦ã ã•ã„."
+ jpn "æ–°è¦ã«ã‚¹ãƒ¬ãƒƒãƒ‰ã‚’作æˆã§ãã¾ã›ã‚“。(ã‚¨ãƒ©ãƒ¼ç•ªå· %M) ã‚‚ã—も使用å¯èƒ½ãƒ¡ãƒ¢ãƒªãƒ¼ã®ä¸è¶³ã§ãªã‘ã‚Œã°ã€OSä¾å­˜ã®ãƒã‚°ã§ã‚ã‚‹å¯èƒ½æ€§ãŒã‚ã‚Šã¾ã™ã€‚"
kor "새로운 쓰레드를 만들 수 없습니다.(ì—러번호 %M). 만약 여유메모리가 있다면 OS-dependent버그 ì˜ ë©”ë‰´ì–¼ ë¶€ë¶„ì„ ì°¾ì•„ë³´ì‹œì˜¤."
nor "Can't create a new thread (errno %M); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
norwegian-ny "Can't create a new thread (errno %M); if you are not out of available memory you can consult the manual for any possible OS dependent bug"
@@ -3128,7 +3060,7 @@ ER_CANT_CREATE_THREAD
swe "Kan inte skapa en ny tråd (errno %M)"
ukr "Ðе можу Ñтворити нову гілку (помилка %M). Якщо ви не викориÑтали уÑÑŽ пам'ÑÑ‚ÑŒ, то прочитайте документацію до вашої ОС - можливо це помилка ОС"
ER_WRONG_VALUE_COUNT_ON_ROW 21S01
- cze "Po-BÄet sloupců neodpovídá poÄtu hodnot na řádku %lu"
+ cze "PoÄet sloupců neodpovídá poÄtu hodnot na řádku %lu"
dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %lu"
nla "Kolom aantal komt niet overeen met waarde aantal in rij %lu"
eng "Column count doesn't match value count at row %lu"
@@ -3136,6 +3068,7 @@ ER_WRONG_VALUE_COUNT_ON_ROW 21S01
ger "Anzahl der Felder stimmt nicht mit der Anzahl der Werte in Zeile %lu überein"
hun "Az oszlopban talalhato ertek nem egyezik meg a %lu sorban szamitott ertekkel"
ita "Il numero delle colonne non corrisponde al conteggio alla riga %lu"
+ jpn "%lu 行目ã§ã€åˆ—ã®æ•°ãŒå€¤ã®æ•°ã¨ä¸€è‡´ã—ã¾ã›ã‚“。"
kor "Row %luì—ì„œ 칼럼 카운트와 value 카운터와 ì¼ì¹˜í•˜ì§€ 않습니다."
por "Contagem de colunas não confere com a contagem de valores na linha %lu"
rum "Numarul de coloane nu corespunde cu numarul de valori la linia %lu"
@@ -3145,7 +3078,7 @@ ER_WRONG_VALUE_COUNT_ON_ROW 21S01
swe "Antalet kolumner motsvarar inte antalet värden på rad: %lu"
ukr "КількіÑÑ‚ÑŒ Ñтовбців не Ñпівпадає з кількіÑÑ‚ÑŽ значень у Ñтроці %lu"
ER_CANT_REOPEN_TABLE
- cze "Nemohu znovuotev-Břít tabulku: '%-.192s"
+ cze "Nemohu znovuotevřít tabulku: '%-.192s"
dan "Kan ikke genåbne tabel '%-.192s"
nla "Kan tabel niet opnieuw openen: '%-.192s"
eng "Can't reopen table: '%-.192s'"
@@ -3154,6 +3087,7 @@ ER_CANT_REOPEN_TABLE
ger "Kann Tabelle'%-.192s' nicht erneut öffnen"
hun "Nem lehet ujra-megnyitni a tablat: '%-.192s"
ita "Impossibile riaprire la tabella: '%-.192s'"
+ jpn "表をå†ã‚ªãƒ¼ãƒ—ンã§ãã¾ã›ã‚“。: '%-.192s'"
kor "í…Œì´ë¸”ì„ ë‹¤ì‹œ 열수 없군요: '%-.192s"
nor "Can't reopen table: '%-.192s"
norwegian-ny "Can't reopen table: '%-.192s"
@@ -3167,17 +3101,16 @@ ER_CANT_REOPEN_TABLE
swe "Kunde inte stänga och öppna tabell '%-.192s"
ukr "Ðе можу перевідкрити таблицю: '%-.192s'"
ER_INVALID_USE_OF_NULL 22004
- cze "Neplatn-Bé užití hodnoty NULL"
+ cze "Neplatné užití hodnoty NULL"
dan "Forkert brug af nulværdi (NULL)"
nla "Foutief gebruik van de NULL waarde"
eng "Invalid use of NULL value"
- jps "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™",
est "NULL väärtuse väärkasutus"
fre "Utilisation incorrecte de la valeur NULL"
ger "Unerlaubte Verwendung eines NULL-Werts"
hun "A NULL ervenytelen hasznalata"
ita "Uso scorretto del valore NULL"
- jpn "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™"
+ jpn "NULL 値ã®ä½¿ç”¨æ–¹æ³•ãŒä¸é©åˆ‡ã§ã™ã€‚"
kor "NULL ê°’ì„ ìž˜ëª» 사용하셨군요..."
por "Uso inválido do valor NULL"
rum "Folosirea unei value NULL e invalida"
@@ -3187,7 +3120,7 @@ ER_INVALID_USE_OF_NULL 22004
swe "Felaktig använding av NULL"
ukr "Хибне викориÑÑ‚Ð°Ð½Ð½Ñ Ð·Ð½Ð°Ñ‡ÐµÐ½Ð½Ñ NULL"
ER_REGEXP_ERROR 42000
- cze "Regul-Bární výraz vrátil chybu '%-.64s'"
+ cze "Regulární výraz vrátil chybu '%-.64s'"
dan "Fik fejl '%-.64s' fra regexp"
nla "Fout '%-.64s' ontvangen van regexp"
eng "Got error '%-.64s' from regexp"
@@ -3196,6 +3129,7 @@ ER_REGEXP_ERROR 42000
ger "regexp lieferte Fehler '%-.64s'"
hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)"
ita "Errore '%-.64s' da regexp"
+ jpn "regexp ãŒã‚¨ãƒ©ãƒ¼ '%-.64s' ã‚’è¿”ã—ã¾ã—ãŸã€‚"
kor "regexpì—ì„œ '%-.64s'ê°€ 났습니다."
por "Obteve erro '%-.64s' em regexp"
rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)"
@@ -3205,7 +3139,7 @@ ER_REGEXP_ERROR 42000
swe "Fick fel '%-.64s' från REGEXP"
ukr "Отримано помилку '%-.64s' від регулÑрного виразу"
ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000
- cze "Pokud nen-Bí žádná GROUP BY klauzule, není dovoleno souÄasné použití GROUP položek (MIN(),MAX(),COUNT()...) s ne GROUP položkami"
+ cze "Pokud není žádná GROUP BY klauzule, není dovoleno souÄasné použití GROUP položek (MIN(),MAX(),COUNT()...) s ne GROUP položkami"
dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat"
nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is"
eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause"
@@ -3214,6 +3148,7 @@ ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000
ger "Das Vermischen von GROUP-Feldern (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Feldern ist nicht zulässig, wenn keine GROUP-BY-Klausel vorhanden ist"
hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul"
ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY"
+ jpn "GROUP BYå¥ãŒç„¡ã„å ´åˆã€é›†è¨ˆé–¢æ•°(MIN(),MAX(),COUNT(),...)ã¨é€šå¸¸ã®åˆ—ã‚’åŒæ™‚ã«ä½¿ç”¨ã§ãã¾ã›ã‚“。"
kor "Mixing of GROUP 칼럼s (MIN(),MAX(),COUNT(),...) with no GROUP 칼럼s is illegal if there is no GROUP BY clause"
por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)"
rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY"
@@ -3223,17 +3158,16 @@ ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000
swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del"
ukr "Ð—Ð¼Ñ–ÑˆÑƒÐ²Ð°Ð½Ð½Ñ GROUP Ñтовбців (MIN(),MAX(),COUNT()...) з не GROUP ÑтовбцÑми Ñ” забороненим, Ñкщо не має GROUP BY"
ER_NONEXISTING_GRANT 42000
- cze "Neexistuje odpov-Bídající grant pro uživatele '%-.48s' na stroji '%-.64s'"
+ cze "Neexistuje odpovídající grant pro uživatele '%-.48s' na stroji '%-.64s'"
dan "Denne tilladelse findes ikke for brugeren '%-.48s' på vært '%-.64s'"
nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s'"
eng "There is no such grant defined for user '%-.48s' on host '%-.64s'"
- jps "ユーザー '%-.48s' (ホスト '%-.64s' ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“",
est "Sellist õigust ei ole defineeritud kasutajale '%-.48s' masinast '%-.64s'"
fre "Un tel droit n'est pas défini pour l'utilisateur '%-.48s' sur l'hôte '%-.64s'"
ger "Für Benutzer '%-.48s' auf Host '%-.64s' gibt es keine solche Berechtigung"
hun "A '%-.48s' felhasznalonak nincs ilyen joga a '%-.64s' host-on"
ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s'"
- jpn "ユーザー '%-.48s' (ホスト '%-.64s' ã®ãƒ¦ãƒ¼ã‚¶ãƒ¼) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“"
+ jpn "ユーザー '%-.48s' (ホスト '%-.64s' 上) ã¯è¨±å¯ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "ì‚¬ìš©ìž '%-.48s' (호스트 '%-.64s')를 위하여 ì •ì˜ëœ 그런 승ì¸ì€ 없습니다."
por "Não existe tal permissão (grant) definida para o usuário '%-.48s' no 'host' '%-.64s'"
rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.48s' de pe host-ul '%-.64s'"
@@ -3243,7 +3177,7 @@ ER_NONEXISTING_GRANT 42000
swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s'"
ukr "Повноважень не визначено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача '%-.48s' з хоÑту '%-.64s'"
ER_TABLEACCESS_DENIED_ERROR 42000
- cze "%-.32s p-Bříkaz nepřístupný pro uživatele: '%s'@'%s' pro tabulku '%-.192s'"
+ cze "%-.32s příkaz nepřístupný pro uživatele: '%s'@'%s' pro tabulku '%-.192s'"
dan "%-.32s-kommandoen er ikke tilladt for brugeren '%s'@'%s' for tabellen '%-.192s'"
nla "%-.32s commando geweigerd voor gebruiker: '%s'@'%s' voor tabel '%-.192s'"
eng "%-.32s command denied to user '%s'@'%s' for table '%-.192s'"
@@ -3263,7 +3197,7 @@ ER_TABLEACCESS_DENIED_ERROR 42000
swe "%-.32s ej tillåtet för '%s'@'%s' för tabell '%-.192s'"
ukr "%-.32s команда заборонена кориÑтувачу: '%s'@'%s' у таблиці '%-.192s'"
ER_COLUMNACCESS_DENIED_ERROR 42000
- cze "%-.32s p-Bříkaz nepřístupný pro uživatele: '%s'@'%s' pro sloupec '%-.192s' v tabulce '%-.192s'"
+ cze "%-.32s příkaz nepřístupný pro uživatele: '%s'@'%s' pro sloupec '%-.192s' v tabulce '%-.192s'"
dan "%-.32s-kommandoen er ikke tilladt for brugeren '%s'@'%s' for kolonne '%-.192s' in tabellen '%-.192s'"
nla "%-.32s commando geweigerd voor gebruiker: '%s'@'%s' voor kolom '%-.192s' in tabel '%-.192s'"
eng "%-.32s command denied to user '%s'@'%s' for column '%-.192s' in table '%-.192s'"
@@ -3283,7 +3217,7 @@ ER_COLUMNACCESS_DENIED_ERROR 42000
swe "%-.32s ej tillåtet för '%s'@'%s' för kolumn '%-.192s' i tabell '%-.192s'"
ukr "%-.32s команда заборонена кориÑтувачу: '%s'@'%s' Ð´Ð»Ñ ÑÑ‚Ð¾Ð²Ð±Ñ†Ñ '%-.192s' у таблиці '%-.192s'"
ER_ILLEGAL_GRANT_FOR_TABLE 42000
- cze "Neplatn-Bý příkaz GRANT/REVOKE. Prosím, pÅ™eÄtÄ›te si v manuálu, jaká privilegia je možné použít."
+ cze "Neplatný příkaz GRANT/REVOKE. Prosím, pÅ™eÄtÄ›te si v manuálu, jaká privilegia je možné použít."
dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres."
nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden."
eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used"
@@ -3293,7 +3227,7 @@ ER_ILLEGAL_GRANT_FOR_TABLE 42000
greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used."
hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek"
ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati."
- jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
+ jpn "ä¸æ­£ãª GRANT/REVOKE コマンドã§ã™ã€‚ã©ã®æ¨©é™ã§åˆ©ç”¨å¯èƒ½ã‹ã¯ãƒžãƒ‹ãƒ¥ã‚¢ãƒ«ã‚’å‚ç…§ã—ã¦ä¸‹ã•ã„。"
kor "ìž˜ëª»ëœ GRANT/REVOKE 명령. ì–´ë–¤ 권리와 승ì¸ì´ 사용ë˜ì–´ 질 수 있는지 ë©”ë‰´ì–¼ì„ ë³´ì‹œì˜¤."
nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used."
@@ -3307,7 +3241,7 @@ ER_ILLEGAL_GRANT_FOR_TABLE 42000
swe "Felaktigt GRANT-privilegium använt"
ukr "Хибна GRANT/REVOKE команда; прочитайте документацію ÑтоÑовно того, Ñкі права можна викориÑтовувати"
ER_GRANT_WRONG_HOST_OR_USER 42000
- cze "Argument p-Bříkazu GRANT uživatel nebo stroj je příliš dlouhý"
+ cze "Argument příkazu GRANT uživatel nebo stroj je příliš dlouhý"
dan "Værts- eller brugernavn for langt til GRANT"
nla "De host of gebruiker parameter voor GRANT is te lang"
eng "The host or user argument to GRANT is too long"
@@ -3316,6 +3250,7 @@ ER_GRANT_WRONG_HOST_OR_USER 42000
ger "Das Host- oder User-Argument für GRANT ist zu lang"
hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban"
ita "L'argomento host o utente per la GRANT e` troppo lungo"
+ jpn "GRANTコマンドã¸ã®ã€ãƒ›ã‚¹ãƒˆåやユーザーåãŒé•·ã™ãŽã¾ã™ã€‚"
kor "승ì¸(GRANT)ì„ ìœ„í•˜ì—¬ 사용한 사용ìžë‚˜ í˜¸ìŠ¤íŠ¸ì˜ ê°’ë“¤ì´ ë„ˆë¬´ ê¹ë‹ˆë‹¤."
por "Argumento de 'host' ou de usuário para o GRANT é longo demais"
rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung"
@@ -3334,7 +3269,7 @@ ER_NO_SUCH_TABLE 42S02
ger "Tabelle '%-.192s.%-.192s' existiert nicht"
hun "A '%-.192s.%-.192s' tabla nem letezik"
ita "La tabella '%-.192s.%-.192s' non esiste"
- jpn "Table '%-.192s.%-.192s' doesn't exist"
+ jpn "表 '%-.192s.%-.192s' ã¯å­˜åœ¨ã—ã¾ã›ã‚“。"
kor "í…Œì´ë¸” '%-.192s.%-.192s' 는 존재하지 않습니다."
nor "Table '%-.192s.%-.192s' doesn't exist"
norwegian-ny "Table '%-.192s.%-.192s' doesn't exist"
@@ -3348,7 +3283,7 @@ ER_NO_SUCH_TABLE 42S02
swe "Det finns ingen tabell som heter '%-.192s.%-.192s'"
ukr "Ð¢Ð°Ð±Ð»Ð¸Ñ†Ñ '%-.192s.%-.192s' не Ñ–Ñнує"
ER_NONEXISTING_TABLE_GRANT 42000
- cze "Neexistuje odpov-Bídající grant pro uživatele '%-.48s' na stroji '%-.64s' pro tabulku '%-.192s'"
+ cze "Neexistuje odpovídající grant pro uživatele '%-.48s' na stroji '%-.64s' pro tabulku '%-.192s'"
dan "Denne tilladelse eksisterer ikke for brugeren '%-.48s' på vært '%-.64s' for tabellen '%-.192s'"
nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.48s' op host '%-.64s' op tabel '%-.192s'"
eng "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'"
@@ -3357,6 +3292,7 @@ ER_NONEXISTING_TABLE_GRANT 42000
ger "Eine solche Berechtigung ist für User '%-.48s' auf Host '%-.64s' an Tabelle '%-.192s' nicht definiert"
hun "A '%-.48s' felhasznalo szamara a '%-.64s' host '%-.192s' tablajaban ez a parancs nem engedelyezett"
ita "GRANT non definita per l'utente '%-.48s' dalla macchina '%-.64s' sulla tabella '%-.192s'"
+ jpn "ユーザー '%-.48s' (ホスト '%-.64s' 上) ã®è¡¨ '%-.192s' ã¸ã®æ¨©é™ã¯å®šç¾©ã•ã‚Œã¦ã„ã¾ã›ã‚“。"
kor "ì‚¬ìš©ìž '%-.48s'(호스트 '%-.64s')는 í…Œì´ë¸” '%-.192s'를 사용하기 위하여 ì •ì˜ëœ 승ì¸ì€ 없습니다. "
por "Não existe tal permissão (grant) definido para o usuário '%-.48s' no 'host' '%-.64s', na tabela '%-.192s'"
rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.48s' de pe host-ul '%-.64s' pentru tabela '%-.192s'"
@@ -3366,7 +3302,7 @@ ER_NONEXISTING_TABLE_GRANT 42000
swe "Det finns inget privilegium definierat för användare '%-.48s' på '%-.64s' för tabell '%-.192s'"
ukr "Повноважень не визначено Ð´Ð»Ñ ÐºÐ¾Ñ€Ð¸Ñтувача '%-.48s' з хоÑту '%-.64s' Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– '%-.192s'"
ER_NOT_ALLOWED_COMMAND 42000
- cze "Pou-Bžitý příkaz není v této verzi MariaDB povolen"
+ cze "Použitý příkaz není v této verzi MySQL povolen"
dan "Den brugte kommando er ikke tilladt med denne udgave af MariaDB"
nla "Het used commando is niet toegestaan in deze MariaDB versie"
eng "The used command is not allowed with this MariaDB version"
@@ -3375,6 +3311,7 @@ ER_NOT_ALLOWED_COMMAND 42000
ger "Der verwendete Befehl ist in dieser MariaDB-Version nicht zulässig"
hun "A hasznalt parancs nem engedelyezett ebben a MariaDB verzioban"
ita "Il comando utilizzato non e` supportato in questa versione di MariaDB"
+ jpn "ã“ã®MySQLãƒãƒ¼ã‚¸ãƒ§ãƒ³ã§ã¯åˆ©ç”¨ã§ããªã„コマンドã§ã™ã€‚"
kor "ì‚¬ìš©ëœ ëª…ë ¹ì€ í˜„ìž¬ì˜ MariaDB 버젼ì—서는 ì´ìš©ë˜ì§€ 않습니다."
por "Comando usado não é permitido para esta versão do MariaDB"
rum "Comanda folosita nu este permisa pentru aceasta versiune de MariaDB"
@@ -3384,7 +3321,7 @@ ER_NOT_ALLOWED_COMMAND 42000
swe "Du kan inte använda detta kommando med denna MariaDB version"
ukr "ВикориÑтовувана команда не дозволена у цій верÑÑ–Ñ— MariaDB"
ER_SYNTAX_ERROR 42000
- cze "Va-Bše syntaxe je nějaká divná"
+ cze "Vaše syntaxe je nějaká divná"
dan "Der er en fejl i SQL syntaksen"
nla "Er is iets fout in de gebruikte syntax"
eng "You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use"
@@ -3394,7 +3331,7 @@ ER_SYNTAX_ERROR 42000
greek "You have an error in your SQL syntax"
hun "Szintaktikai hiba"
ita "Errore di sintassi nella query SQL"
- jpn "Something is wrong in your syntax"
+ jpn "SQL構文エラーã§ã™ã€‚ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã«å¯¾å¿œã™ã‚‹ãƒžãƒ‹ãƒ¥ã‚¢ãƒ«ã‚’å‚ç…§ã—ã¦æ­£ã—ã„構文を確èªã—ã¦ãã ã•ã„。"
kor "SQL êµ¬ë¬¸ì— ì˜¤ë¥˜ê°€ 있습니다."
nor "Something is wrong in your syntax"
norwegian-ny "Something is wrong in your syntax"
@@ -3408,7 +3345,7 @@ ER_SYNTAX_ERROR 42000
swe "Du har något fel i din syntax"
ukr "У Ð²Ð°Ñ Ð¿Ð¾Ð¼Ð¸Ð»ÐºÐ° у ÑинтакÑиÑÑ– SQL"
ER_DELAYED_CANT_CHANGE_LOCK
- cze "Zpo-Bžděný insert threadu nebyl schopen získat požadovaný zámek pro tabulku %-.192s"
+ cze "Zpožděný insert threadu nebyl schopen získat požadovaný zámek pro tabulku %-.192s"
dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.192s"
nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.192s"
eng "Delayed insert thread couldn't get requested lock for table %-.192s"
@@ -3417,6 +3354,7 @@ ER_DELAYED_CANT_CHANGE_LOCK
ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.192s' nicht erhalten"
hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.192s tablahoz"
ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.192s"
+ jpn "'Delayed insert'スレッドãŒè¡¨ '%-.192s' ã®ãƒ­ãƒƒã‚¯ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚"
kor "ì§€ì—°ëœ insert 쓰레드가 í…Œì´ë¸” %-.192sì˜ ìš”êµ¬ëœ ë½í‚¹ì„ 처리할 수 없었습니다."
por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.192s'"
rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.192s"
@@ -3426,7 +3364,7 @@ ER_DELAYED_CANT_CHANGE_LOCK
swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.192s'"
ukr "Гілка Ð´Ð»Ñ INSERT DELAYED не може отримати Ð±Ð»Ð¾ÐºÑƒÐ²Ð°Ð½Ð½Ñ Ð´Ð»Ñ Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ– %-.192s"
ER_TOO_MANY_DELAYED_THREADS
- cze "P-Bříliš mnoho zpožděných threadů"
+ cze "Příliš mnoho zpožděných threadů"
dan "For mange slettede tråde (threads) i brug"
nla "Te veel 'delayed' threads in gebruik"
eng "Too many delayed threads in use"
@@ -3435,6 +3373,7 @@ ER_TOO_MANY_DELAYED_THREADS
ger "Zu viele verzögerte (DELAYED) Threads in Verwendung"
hun "Tul sok kesletetett thread (delayed)"
ita "Troppi threads ritardati in uso"
+ jpn "'Delayed insert'スレッドãŒå¤šã™ãŽã¾ã™ã€‚"
kor "너무 ë§Žì€ ì§€ì—° 쓰레드를 사용하고 있습니다."
por "Excesso de 'threads' retardadas (atrasadas) em uso"
rum "Prea multe threaduri aminate care sint in uz"
@@ -3444,7 +3383,7 @@ ER_TOO_MANY_DELAYED_THREADS
swe "Det finns redan 'max_delayed_threads' trådar i använding"
ukr "Забагато затриманих гілок викориÑтовуєтьÑÑ"
ER_ABORTING_CONNECTION 08S01
- cze "Zru-Bšeno spojení %ld do databáze: '%-.192s' uživatel: '%-.48s' (%-.64s)"
+ cze "Zrušeno spojení %ld do databáze: '%-.192s' uživatel: '%-.48s' (%-.64s)"
dan "Afbrudt forbindelse %ld til database: '%-.192s' bruger: '%-.48s' (%-.64s)"
nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' (%-.64s)"
eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
@@ -3453,7 +3392,7 @@ ER_ABORTING_CONNECTION 08S01
ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s' (%-.64s)"
hun "Megszakitott kapcsolat %ld db: '%-.192s' adatbazishoz, felhasznalo: '%-.48s' (%-.64s)"
ita "Interrotta la connessione %ld al db: '%-.192s' utente: '%-.48s' (%-.64s)"
- jpn "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
+ jpn "接続 %ld ãŒä¸­æ–­ã•ã‚Œã¾ã—ãŸã€‚データベース: '%-.192s' ユーザー: '%-.48s' (%-.64s)"
kor "ë°ì´íƒ€ë² ì´ìŠ¤ ì ‘ì†ì„ 위한 ì—°ê²° %ldê°€ ì¤‘ë‹¨ë¨ : '%-.192s' 사용ìž: '%-.48s' (%-.64s)"
nor "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
norwegian-ny "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)"
@@ -3467,7 +3406,7 @@ ER_ABORTING_CONNECTION 08S01
swe "Avbröt länken för tråd %ld till db '%-.192s', användare '%-.48s' (%-.64s)"
ukr "Перервано з'Ñ”Ð´Ð½Ð°Ð½Ð½Ñ %ld до бази данних: '%-.192s' кориÑтувача: '%-.48s' (%-.64s)"
ER_NET_PACKET_TOO_LARGE 08S01
- cze "Zji-Bštěn příchozí packet delší než 'max_allowed_packet'"
+ cze "Zjištěn příchozí packet delší než 'max_allowed_packet'"
dan "Modtog en datapakke som var større end 'max_allowed_packet'"
nla "Groter pakket ontvangen dan 'max_allowed_packet'"
eng "Got a packet bigger than 'max_allowed_packet' bytes"
@@ -3476,6 +3415,7 @@ ER_NET_PACKET_TOO_LARGE 08S01
ger "Empfangenes Paket ist größer als 'max_allowed_packet' Bytes"
hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'"
ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'"
+ jpn "'max_allowed_packet'よりも大ããªãƒ‘ケットをå—ä¿¡ã—ã¾ã—ãŸã€‚"
kor "'max_allowed_packet'보다 ë”í° íŒ¨í‚·ì„ ë°›ì•˜ìŠµë‹ˆë‹¤."
por "Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)"
rum "Un packet mai mare decit 'max_allowed_packet' a fost primit"
@@ -3485,7 +3425,7 @@ ER_NET_PACKET_TOO_LARGE 08S01
swe "Kommunkationspaketet är större än 'max_allowed_packet'"
ukr "Отримано пакет більший ніж max_allowed_packet"
ER_NET_READ_ERROR_FROM_PIPE 08S01
- cze "Zji-BÅ¡tÄ›na chyba pÅ™i Ätení z roury spojení"
+ cze "ZjiÅ¡tÄ›na chyba pÅ™i Ätení z roury spojení"
dan "Fik læsefejl fra forbindelse (connection pipe)"
nla "Kreeg leesfout van de verbindings pipe"
eng "Got a read error from the connection pipe"
@@ -3494,6 +3434,7 @@ ER_NET_READ_ERROR_FROM_PIPE 08S01
ger "Lese-Fehler bei einer Verbindungs-Pipe"
hun "Olvasasi hiba a kapcsolat soran"
ita "Rilevato un errore di lettura dalla pipe di connessione"
+ jpn "接続パイプã®èª­ã¿è¾¼ã¿ã‚¨ãƒ©ãƒ¼ã§ã™ã€‚"
kor "ì—°ê²° 파ì´í”„로부터 ì—러가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
por "Obteve um erro de leitura no 'pipe' da conexão"
rum "Eroare la citire din cauza lui 'connection pipe'"
@@ -3503,7 +3444,7 @@ ER_NET_READ_ERROR_FROM_PIPE 08S01
swe "Fick läsfel från klienten vid läsning från 'PIPE'"
ukr "Отримано помилку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð· комунікаційного каналу"
ER_NET_FCNTL_ERROR 08S01
- cze "Zji-Bštěna chyba fcntl()"
+ cze "Zjištěna chyba fcntl()"
dan "Fik fejlmeddelelse fra fcntl()"
nla "Kreeg fout van fcntl()"
eng "Got an error from fcntl()"
@@ -3512,6 +3453,7 @@ ER_NET_FCNTL_ERROR 08S01
ger "fcntl() lieferte einen Fehler"
hun "Hiba a fcntl() fuggvenyben"
ita "Rilevato un errore da fcntl()"
+ jpn "fcntl()ãŒã‚¨ãƒ©ãƒ¼ã‚’è¿”ã—ã¾ã—ãŸã€‚"
kor "fcntl() 함수로부터 ì—러가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
por "Obteve um erro em fcntl()"
rum "Eroare obtinuta de la fcntl()"
@@ -3521,7 +3463,7 @@ ER_NET_FCNTL_ERROR 08S01
swe "Fick fatalt fel från 'fcntl()'"
ukr "Отримано помилкку від fcntl()"
ER_NET_PACKETS_OUT_OF_ORDER 08S01
- cze "P-Bříchozí packety v chybném pořadí"
+ cze "Příchozí packety v chybném pořadí"
dan "Modtog ikke datapakker i korrekt rækkefølge"
nla "Pakketten in verkeerde volgorde ontvangen"
eng "Got packets out of order"
@@ -3530,6 +3472,7 @@ ER_NET_PACKETS_OUT_OF_ORDER 08S01
ger "Pakete nicht in der richtigen Reihenfolge empfangen"
hun "Helytelen sorrendben erkezett adatcsomagok"
ita "Ricevuti pacchetti non in ordine"
+ jpn "ä¸æ­£ãªé †åºã®ãƒ‘ケットをå—ä¿¡ã—ã¾ã—ãŸã€‚"
kor "순서가 맞지않는 íŒ¨í‚·ì„ ë°›ì•˜ìŠµë‹ˆë‹¤."
por "Obteve pacotes fora de ordem"
rum "Packets care nu sint ordonati au fost gasiti"
@@ -3539,7 +3482,7 @@ ER_NET_PACKETS_OUT_OF_ORDER 08S01
swe "Kommunikationspaketen kom i fel ordning"
ukr "Отримано пакети у неналежному порÑдку"
ER_NET_UNCOMPRESS_ERROR 08S01
- cze "Nemohu rozkomprimovat komunika-BÄní packet"
+ cze "Nemohu rozkomprimovat komunikaÄní packet"
dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)"
nla "Communicatiepakket kon niet worden gedecomprimeerd"
eng "Couldn't uncompress communication packet"
@@ -3548,6 +3491,7 @@ ER_NET_UNCOMPRESS_ERROR 08S01
ger "Kommunikationspaket lässt sich nicht entpacken"
hun "A kommunikacios adatcsomagok nem tomorithetok ki"
ita "Impossibile scompattare i pacchetti di comunicazione"
+ jpn "圧縮パケットã®å±•é–‹ãŒã§ãã¾ã›ã‚“ã§ã—ãŸã€‚"
kor "통신 íŒ¨í‚·ì˜ ì••ì¶•í•´ì œë¥¼ í•  수 없었습니다."
por "Não conseguiu descomprimir pacote de comunicação"
rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)"
@@ -3557,7 +3501,7 @@ ER_NET_UNCOMPRESS_ERROR 08S01
swe "Kunde inte packa up kommunikationspaketet"
ukr "Ðе можу декомпреÑувати комунікаційний пакет"
ER_NET_READ_ERROR 08S01
- cze "Zji-BÅ¡tÄ›na chyba pÅ™i Ätení komunikaÄního packetu"
+ cze "ZjiÅ¡tÄ›na chyba pÅ™i Ätení komunikaÄního packetu"
dan "Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)"
nla "Fout bij het lezen van communicatiepakketten"
eng "Got an error reading communication packets"
@@ -3566,6 +3510,7 @@ ER_NET_READ_ERROR 08S01
ger "Fehler beim Lesen eines Kommunikationspakets"
hun "HIba a kommunikacios adatcsomagok olvasasa soran"
ita "Rilevato un errore ricevendo i pacchetti di comunicazione"
+ jpn "パケットã®å—ä¿¡ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
kor "통신 íŒ¨í‚·ì„ ì½ëŠ” 중 오류가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
por "Obteve um erro na leitura de pacotes de comunicação"
rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)"
@@ -3575,7 +3520,7 @@ ER_NET_READ_ERROR 08S01
swe "Fick ett fel vid läsning från klienten"
ukr "Отримано помилку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ ÐºÐ¾Ð¼ÑƒÐ½Ñ–ÐºÐ°Ñ†Ñ–Ð¹Ð½Ð¸Ñ… пакетів"
ER_NET_READ_INTERRUPTED 08S01
- cze "Zji-BÅ¡tÄ›n timeout pÅ™i Ätení komunikaÄního packetu"
+ cze "ZjiÅ¡tÄ›n timeout pÅ™i Ätení komunikaÄního packetu"
dan "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)"
nla "Timeout bij het lezen van communicatiepakketten"
eng "Got timeout reading communication packets"
@@ -3584,6 +3529,7 @@ ER_NET_READ_INTERRUPTED 08S01
ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets"
hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran"
ita "Rilevato un timeout ricevendo i pacchetti di comunicazione"
+ jpn "パケットã®å—ä¿¡ã§ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
kor "통신 íŒ¨í‚·ì„ ì½ëŠ” 중 timeoutì´ ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
por "Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação"
rum "Timeout obtinut citind pachetele de comunicatie (communication packets)"
@@ -3593,7 +3539,7 @@ ER_NET_READ_INTERRUPTED 08S01
swe "Fick 'timeout' vid läsning från klienten"
ukr "Отримано затримку Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ ÐºÐ¾Ð¼ÑƒÐ½Ñ–ÐºÐ°Ñ†Ñ–Ð¹Ð½Ð¸Ñ… пакетів"
ER_NET_ERROR_ON_WRITE 08S01
- cze "Zji-BÅ¡tÄ›na chyba pÅ™i zápisu komunikaÄního packetu"
+ cze "ZjiÅ¡tÄ›na chyba pÅ™i zápisu komunikaÄního packetu"
dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)"
nla "Fout bij het schrijven van communicatiepakketten"
eng "Got an error writing communication packets"
@@ -3602,6 +3548,7 @@ ER_NET_ERROR_ON_WRITE 08S01
ger "Fehler beim Schreiben eines Kommunikationspakets"
hun "Hiba a kommunikacios csomagok irasa soran"
ita "Rilevato un errore inviando i pacchetti di comunicazione"
+ jpn "パケットã®é€ä¿¡ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
kor "통신 íŒ¨í‚·ì„ ê¸°ë¡í•˜ëŠ” 중 오류가 ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
por "Obteve um erro na escrita de pacotes de comunicação"
rum "Eroare in scrierea pachetelor de comunicatie (communication packets)"
@@ -3611,7 +3558,7 @@ ER_NET_ERROR_ON_WRITE 08S01
swe "Fick ett fel vid skrivning till klienten"
ukr "Отримано помилку запиÑу комунікаційних пакетів"
ER_NET_WRITE_INTERRUPTED 08S01
- cze "Zji-BÅ¡tÄ›n timeout pÅ™i zápisu komunikaÄního packetu"
+ cze "ZjiÅ¡tÄ›n timeout pÅ™i zápisu komunikaÄního packetu"
dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)"
nla "Timeout bij het schrijven van communicatiepakketten"
eng "Got timeout writing communication packets"
@@ -3620,6 +3567,7 @@ ER_NET_WRITE_INTERRUPTED 08S01
ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets"
hun "Idotullepes a kommunikacios csomagok irasa soran"
ita "Rilevato un timeout inviando i pacchetti di comunicazione"
+ jpn "パケットã®é€ä¿¡ã§ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
kor "통신 íŒ¨íŒƒì„ ê¸°ë¡í•˜ëŠ” 중 timeoutì´ ë°œìƒí•˜ì˜€ìŠµë‹ˆë‹¤."
por "Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação"
rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)"
@@ -3629,7 +3577,7 @@ ER_NET_WRITE_INTERRUPTED 08S01
swe "Fick 'timeout' vid skrivning till klienten"
ukr "Отримано затримку запиÑу комунікаційних пакетів"
ER_TOO_LONG_STRING 42000
- cze "V-Býsledný řetězec je delší než 'max_allowed_packet'"
+ cze "Výsledný řetězec je delší než 'max_allowed_packet'"
dan "Strengen med resultater er større end 'max_allowed_packet'"
nla "Resultaat string is langer dan 'max_allowed_packet'"
eng "Result string is longer than 'max_allowed_packet' bytes"
@@ -3638,6 +3586,7 @@ ER_TOO_LONG_STRING 42000
ger "Ergebnis-String ist länger als 'max_allowed_packet' Bytes"
hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'"
ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'"
+ jpn "çµæžœã®æ–‡å­—列㌠'max_allowed_packet' よりも大ãã„ã§ã™ã€‚"
por "'String' resultante é mais longa do que 'max_allowed_packet'"
rum "Sirul rezultat este mai lung decit 'max_allowed_packet'"
rus "Ð ÐµÐ·ÑƒÐ»ÑŒÑ‚Ð¸Ñ€ÑƒÑŽÑ‰Ð°Ñ Ñтрока больше, чем 'max_allowed_packet'"
@@ -3646,7 +3595,7 @@ ER_TOO_LONG_STRING 42000
swe "Resultatsträngen är längre än max_allowed_packet"
ukr "Строка результату довша ніж max_allowed_packet"
ER_TABLE_CANT_HANDLE_BLOB 42000
- cze "Typ pou-Bžité tabulky (%s) nepodporuje BLOB/TEXT sloupce"
+ cze "Typ použité tabulky (%s) nepodporuje BLOB/TEXT sloupce"
dan "Denne tabeltype (%s) understøtter ikke brug af BLOB og TEXT kolonner"
nla "Het gebruikte tabel type (%s) ondersteunt geen BLOB/TEXT kolommen"
eng "Storage engine %s doesn't support BLOB/TEXT columns"
@@ -3663,7 +3612,7 @@ ER_TABLE_CANT_HANDLE_BLOB 42000
swe "Den använda tabelltypen (%s) kan inte hantera BLOB/TEXT-kolumner"
ukr "%s таблиці не підтримують BLOB/TEXT Ñтовбці"
ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000
- cze "Typ pou-Bžité tabulky (%s) nepodporuje AUTO_INCREMENT sloupce"
+ cze "Typ použité tabulky (%s) nepodporuje AUTO_INCREMENT sloupce"
dan "Denne tabeltype understøtter (%s) ikke brug af AUTO_INCREMENT kolonner"
nla "Het gebruikte tabel type (%s) ondersteunt geen AUTO_INCREMENT kolommen"
eng "Storage engine %s doesn't support AUTO_INCREMENT columns"
@@ -3680,7 +3629,7 @@ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000
swe "Den använda tabelltypen (%s) kan inte hantera AUTO_INCREMENT-kolumner"
ukr "%s таблиці не підтримують AUTO_INCREMENT Ñтовбці"
ER_DELAYED_INSERT_TABLE_LOCKED
- cze "INSERT DELAYED nen-Bí možno s tabulkou '%-.192s' použít, protože je zamÄená pomocí LOCK TABLES"
+ cze "INSERT DELAYED není možno s tabulkou '%-.192s' použít, protože je zamÄená pomocí LOCK TABLES"
dan "INSERT DELAYED kan ikke bruges med tabellen '%-.192s', fordi tabellen er låst med LOCK TABLES"
nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.192s', vanwege een 'lock met LOCK TABLES"
eng "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES"
@@ -3690,7 +3639,7 @@ ER_DELAYED_INSERT_TABLE_LOCKED
greek "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
hun "Az INSERT DELAYED nem hasznalhato a '%-.192s' tablahoz, mert a tabla zarolt (LOCK TABLES)"
ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.192s', perche` soggetta a lock da 'LOCK TABLES'"
- jpn "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
+ jpn "表 '%-.192s' ã¯LOCK TABLESã§ãƒ­ãƒƒã‚¯ã•ã‚Œã¦ã„ã‚‹ãŸã‚ã€INSERT DELAYEDを使用ã§ãã¾ã›ã‚“。"
kor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
nor "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
norwegian-ny "INSERT DELAYED can't be used with table '%-.192s', because it is locked with LOCK TABLES"
@@ -3704,7 +3653,7 @@ ER_DELAYED_INSERT_TABLE_LOCKED
swe "INSERT DELAYED kan inte användas med tabell '%-.192s', emedan den är låst med LOCK TABLES"
ukr "INSERT DELAYED не може бути викориÑтано з таблицею '%-.192s', тому що Ñ—Ñ— заблоковано з LOCK TABLES"
ER_WRONG_COLUMN_NAME 42000
- cze "Nespr-Bávné jméno sloupce '%-.100s'"
+ cze "Nesprávné jméno sloupce '%-.100s'"
dan "Forkert kolonnenavn '%-.100s'"
nla "Incorrecte kolom naam '%-.100s'"
eng "Incorrect column name '%-.100s'"
@@ -3713,6 +3662,7 @@ ER_WRONG_COLUMN_NAME 42000
ger "Falscher Spaltenname '%-.100s'"
hun "Ervenytelen mezonev: '%-.100s'"
ita "Nome colonna '%-.100s' non corretto"
+ jpn "列å '%-.100s' ã¯ä¸æ­£ã§ã™ã€‚"
por "Nome de coluna '%-.100s' incorreto"
rum "Nume increct de coloana '%-.100s'"
rus "Ðеверное Ð¸Ð¼Ñ Ñтолбца '%-.100s'"
@@ -3726,7 +3676,7 @@ ER_WRONG_KEY_COLUMN 42000
rus "Обработчик таблиц %s не может проиндекÑировать Ñтолбец %`s"
ukr "Вказівник таблиц %s не може індекÑувати Ñтовбець %`s"
ER_WRONG_MRG_TABLE
- cze "V-Bšechny tabulky v MERGE tabulce nejsou definovány stejně"
+ cze "Všechny tabulky v MERGE tabulce nejsou definovány stejně"
dan "Tabellerne i MERGE er ikke defineret ens"
nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities"
eng "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist"
@@ -3735,7 +3685,7 @@ ER_WRONG_MRG_TABLE
ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert"
hun "A MERGE tablaban talalhato tablak definicioja nem azonos"
ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica"
- jpn "All tables in the MERGE table are not defined identically"
+ jpn "MERGE表ã®æ§‹æˆè¡¨ãŒã‚ªãƒ¼ãƒ—ンã§ãã¾ã›ã‚“。列定義ãŒç•°ãªã‚‹ã‹ã€MyISAM表ã§ã¯ãªã„ã‹ã€å­˜åœ¨ã—ã¾ã›ã‚“。"
kor "All tables in the MERGE table are not defined identically"
nor "All tables in the MERGE table are not defined identically"
norwegian-ny "All tables in the MERGE table are not defined identically"
@@ -3749,7 +3699,7 @@ ER_WRONG_MRG_TABLE
swe "Tabellerna i MERGE-tabellen är inte identiskt definierade"
ukr "Таблиці у MERGE TABLE мають різну Ñтруктуру"
ER_DUP_UNIQUE 23000
- cze "Kv-Bůli unique constraintu nemozu zapsat do tabulky '%-.192s'"
+ cze "Kvůli unique constraintu nemozu zapsat do tabulky '%-.192s'"
dan "Kan ikke skrive til tabellen '%-.192s' fordi det vil bryde CONSTRAINT regler"
nla "Kan niet opslaan naar table '%-.192s' vanwege 'unique' beperking"
eng "Can't write, because of unique constraint, to table '%-.192s'"
@@ -3757,6 +3707,7 @@ ER_DUP_UNIQUE 23000
fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.192s'"
ger "Schreiben in Tabelle '%-.192s' nicht möglich wegen einer Eindeutigkeitsbeschränkung (unique constraint)"
hun "A '%-.192s' nem irhato, az egyedi mezok miatt"
+ jpn "一æ„性制約é•åã®ãŸã‚ã€è¡¨ '%-.192s' ã«æ›¸ãè¾¼ã‚ã¾ã›ã‚“。"
ita "Impossibile scrivere nella tabella '%-.192s' per limitazione di unicita`"
por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.192s'"
rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.192s'"
@@ -3766,7 +3717,7 @@ ER_DUP_UNIQUE 23000
swe "Kan inte skriva till tabell '%-.192s'; UNIQUE-test"
ukr "Ðе можу запиÑати до таблиці '%-.192s', з причини вимог унікальноÑÑ‚Ñ–"
ER_BLOB_KEY_WITHOUT_LENGTH 42000
- cze "BLOB sloupec '%-.192s' je pou-Bžit ve specifikaci klíÄe bez délky"
+ cze "BLOB sloupec '%-.192s' je použit ve specifikaci klíÄe bez délky"
dan "BLOB kolonnen '%-.192s' brugt i nøglespecifikation uden nøglelængde"
nla "BLOB kolom '%-.192s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte"
eng "BLOB/TEXT column '%-.192s' used in key specification without a key length"
@@ -3776,7 +3727,7 @@ ER_BLOB_KEY_WITHOUT_LENGTH 42000
greek "BLOB column '%-.192s' used in key specification without a key length"
hun "BLOB mezo '%-.192s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul"
ita "La colonna '%-.192s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza"
- jpn "BLOB column '%-.192s' used in key specification without a key length"
+ jpn "BLOB列 '%-.192s' をキーã«ä½¿ç”¨ã™ã‚‹ã«ã¯é•·ã•æŒ‡å®šãŒå¿…è¦ã§ã™ã€‚"
kor "BLOB column '%-.192s' used in key specification without a key length"
nor "BLOB column '%-.192s' used in key specification without a key length"
norwegian-ny "BLOB column '%-.192s' used in key specification without a key length"
@@ -3790,7 +3741,7 @@ ER_BLOB_KEY_WITHOUT_LENGTH 42000
swe "Du har inte angett någon nyckellängd för BLOB '%-.192s'"
ukr "Стовбець BLOB '%-.192s' викориÑтано у визначенні ключа без Ð²ÐºÐ°Ð·Ð°Ð½Ð½Ñ Ð´Ð¾Ð²Ð¶Ð¸Ð½Ð¸ ключа"
ER_PRIMARY_CANT_HAVE_NULL 42000
- cze "V-BÅ¡echny Äásti primárního klíÄe musejí být NOT NULL; pokud potÅ™ebujete NULL, použijte UNIQUE"
+ cze "VÅ¡echny Äásti primárního klíÄe musejí být NOT NULL; pokud potÅ™ebujete NULL, použijte UNIQUE"
dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet"
nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken"
eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead"
@@ -3799,6 +3750,7 @@ ER_PRIMARY_CANT_HAVE_NULL 42000
ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel benötigt wird, muss ein UNIQUE-Schlüssel verwendet werden"
hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot"
ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE"
+ jpn "PRIMARY KEYã®åˆ—ã¯å…¨ã¦NOT NULLã§ãªã‘ã‚Œã°ã„ã‘ã¾ã›ã‚“。UNIQUE索引ã§ã‚ã‚Œã°NULLã‚’å«ã‚€ã“ã¨ãŒå¯èƒ½ã§ã™ã€‚"
por "Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar"
rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb"
rus "Ð’Ñе чаÑти первичного ключа (PRIMARY KEY) должны быть определены как NOT NULL; ЕÑли вам нужна поддержка величин NULL в ключе, воÑпользуйтеÑÑŒ индекÑом UNIQUE"
@@ -3807,7 +3759,7 @@ ER_PRIMARY_CANT_HAVE_NULL 42000
swe "Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället"
ukr "УÑÑ– чаÑтини PRIMARY KEY повинні бути NOT NULL; Якщо ви потребуєте NULL у ключі, ÑкориÑтайтеÑÑ UNIQUE"
ER_TOO_MANY_ROWS 42000
- cze "V-Býsledek obsahuje více než jeden řádek"
+ cze "Výsledek obsahuje více než jeden řádek"
dan "Resultatet bestod af mere end een række"
nla "Resultaat bevatte meer dan een rij"
eng "Result consisted of more than one row"
@@ -3816,6 +3768,7 @@ ER_TOO_MANY_ROWS 42000
ger "Ergebnis besteht aus mehr als einer Zeile"
hun "Az eredmeny tobb, mint egy sort tartalmaz"
ita "Il risultato consiste di piu` di una riga"
+ jpn "çµæžœãŒ2行以上ã§ã™ã€‚"
por "O resultado consistiu em mais do que uma linha"
rum "Resultatul constista din mai multe linii"
rus "Ð’ результате возвращена более чем одна Ñтрока"
@@ -3824,7 +3777,7 @@ ER_TOO_MANY_ROWS 42000
swe "Resultet bestod av mera än en rad"
ukr "Результат знаходитьÑÑ Ñƒ більше ніж одній Ñтроці"
ER_REQUIRES_PRIMARY_KEY 42000
- cze "Tento typ tabulky vy-Bžaduje primární klíÄ"
+ cze "Tento typ tabulky vyžaduje primární klíÄ"
dan "Denne tabeltype kræver en primærnøgle"
nla "Dit tabel type heeft een primaire zoeksleutel nodig"
eng "This table type requires a primary key"
@@ -3833,6 +3786,7 @@ ER_REQUIRES_PRIMARY_KEY 42000
ger "Dieser Tabellentyp benötigt einen Primärschlüssel (PRIMARY KEY)"
hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo"
ita "Questo tipo di tabella richiede una chiave primaria"
+ jpn "使用ã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ã§ã¯ã€PRIMARY KEYãŒå¿…è¦ã§ã™ã€‚"
por "Este tipo de tabela requer uma chave primária"
rum "Aceast tip de tabela are nevoie de o cheie primara"
rus "Этот тип таблицы требует Ð¾Ð¿Ñ€ÐµÐ´ÐµÐ»ÐµÐ½Ð¸Ñ Ð¿ÐµÑ€Ð²Ð¸Ñ‡Ð½Ð¾Ð³Ð¾ ключа"
@@ -3841,7 +3795,7 @@ ER_REQUIRES_PRIMARY_KEY 42000
swe "Denna tabelltyp kräver en PRIMARY KEY"
ukr "Цей тип таблиці потребує первинного ключа"
ER_NO_RAID_COMPILED
- cze "Tato verze MariaDB nen-Bí zkompilována s podporou RAID"
+ cze "Tato verze MySQL není zkompilována s podporou RAID"
dan "Denne udgave af MariaDB er ikke oversat med understøttelse af RAID"
nla "Deze versie van MariaDB is niet gecompileerd met RAID ondersteuning"
eng "This version of MariaDB is not compiled with RAID support"
@@ -3850,6 +3804,7 @@ ER_NO_RAID_COMPILED
ger "Diese MariaDB-Version ist nicht mit RAID-Unterstützung kompiliert"
hun "Ezen leforditott MariaDB verzio nem tartalmaz RAID support-ot"
ita "Questa versione di MYSQL non e` compilata con il supporto RAID"
+ jpn "ã“ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®MySQLã¯RAIDサãƒãƒ¼ãƒˆã‚’å«ã‚ã¦ã‚³ãƒ³ãƒ‘イルã•ã‚Œã¦ã„ã¾ã›ã‚“。"
por "Esta versão do MariaDB não foi compilada com suporte a RAID"
rum "Aceasta versiune de MariaDB, nu a fost compilata cu suport pentru RAID"
rus "Эта верÑÐ¸Ñ MariaDB Ñкомпилирована без поддержки RAID"
@@ -3858,7 +3813,7 @@ ER_NO_RAID_COMPILED
swe "Denna version av MariaDB är inte kompilerad med RAID"
ukr "Ð¦Ñ Ð²ÐµÑ€ÑÑ–Ñ MariaDB не зкомпільована з підтримкою RAID"
ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
- cze "Update tabulky bez WHERE s kl-BíÄem není v módu bezpeÄných update dovoleno"
+ cze "Update tabulky bez WHERE s klíÄem není v módu bezpeÄných update dovoleno"
dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt"
nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom"
eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column"
@@ -3867,6 +3822,7 @@ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
ger "MariaDB läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel ein KEY-Feld anzugeben"
hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column"
ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave"
+ jpn "'safe update mode'ã§ã€ç´¢å¼•ã‚’利用ã™ã‚‹WHEREå¥ã®ç„¡ã„更新処ç†ã‚’実行ã—よã†ã¨ã—ã¾ã—ãŸã€‚"
por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave"
rus "Ð’Ñ‹ работаете в режиме безопаÑных обновлений (safe update mode) и попробовали изменить таблицу без иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ ÐºÐ»ÑŽÑ‡ÐµÐ²Ð¾Ð³Ð¾ Ñтолбца в чаÑти WHERE"
serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuÄa"
@@ -3874,7 +3830,7 @@ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE
swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel"
ukr "Ви у режимі безпечного Ð¾Ð½Ð¾Ð²Ð»ÐµÐ½Ð½Ñ Ñ‚Ð° намагаєтеÑÑŒ оновити таблицю без оператора WHERE, що викориÑтовує KEY Ñтовбець"
ER_KEY_DOES_NOT_EXITS 42000 S1009
- cze "Kl-BÃ­Ä '%-.192s' v tabulce '%-.192s' neexistuje"
+ cze "KlÃ­Ä '%-.192s' v tabulce '%-.192s' neexistuje"
dan "Nøglen '%-.192s' eksisterer ikke i tabellen '%-.192s'"
nla "Zoeksleutel '%-.192s' bestaat niet in tabel '%-.192s'"
eng "Key '%-.192s' doesn't exist in table '%-.192s'"
@@ -3883,6 +3839,7 @@ ER_KEY_DOES_NOT_EXITS 42000 S1009
ger "Schlüssel '%-.192s' existiert in der Tabelle '%-.192s' nicht"
hun "A '%-.192s' kulcs nem letezik a '%-.192s' tablaban"
ita "La chiave '%-.192s' non esiste nella tabella '%-.192s'"
+ jpn "索引 '%-.192s' ã¯è¡¨ '%-.192s' ã«ã¯å­˜åœ¨ã—ã¾ã›ã‚“。"
por "Chave '%-.192s' não existe na tabela '%-.192s'"
rus "Ключ '%-.192s' не ÑущеÑтвует в таблице '%-.192s'"
serbian "KljuÄ '%-.192s' ne postoji u tabeli '%-.192s'"
@@ -3890,7 +3847,7 @@ ER_KEY_DOES_NOT_EXITS 42000 S1009
swe "Nyckel '%-.192s' finns inte in tabell '%-.192s'"
ukr "Ключ '%-.192s' не Ñ–Ñнує в таблиці '%-.192s'"
ER_CHECK_NO_SUCH_TABLE 42000
- cze "Nemohu otev-Břít tabulku"
+ cze "Nemohu otevřít tabulku"
dan "Kan ikke åbne tabellen"
nla "Kan tabel niet openen"
eng "Can't open table"
@@ -3899,6 +3856,7 @@ ER_CHECK_NO_SUCH_TABLE 42000
ger "Kann Tabelle nicht öffnen"
hun "Nem tudom megnyitni a tablat"
ita "Impossibile aprire la tabella"
+ jpn "表をオープンã§ãã¾ã›ã‚“。"
por "Não pode abrir a tabela"
rus "Ðевозможно открыть таблицу"
serbian "Ne mogu da otvorim tabelu"
@@ -3916,7 +3874,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000
greek "The handler for the table doesn't support %s"
hun "A tabla kezeloje (handler) nem tamogatja az %s"
ita "Il gestore per la tabella non supporta il %s"
- jpn "The handler for the table doesn't support %s"
+ jpn "ã“ã®è¡¨ã®ã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ã¯ '%s' を利用ã§ãã¾ã›ã‚“。"
kor "The handler for the table doesn't support %s"
nor "The handler for the table doesn't support %s"
norwegian-ny "The handler for the table doesn't support %s"
@@ -3930,7 +3888,7 @@ ER_CHECK_NOT_IMPLEMENTED 42000
swe "Tabellhanteraren för denna tabell kan inte göra %s"
ukr "Вказівник таблиці не підтримуе %s"
ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000
- cze "Proveden-Bí tohoto příkazu není v transakci dovoleno"
+ cze "Provedení tohoto příkazu není v transakci dovoleno"
dan "Du må ikke bruge denne kommando i en transaktion"
nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie"
eng "You are not allowed to execute this command in a transaction"
@@ -3939,6 +3897,7 @@ ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000
ger "Sie dürfen diesen Befehl nicht in einer Transaktion ausführen"
hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban"
ita "Non puoi eseguire questo comando in una transazione"
+ jpn "ã“ã®ã‚³ãƒžãƒ³ãƒ‰ã¯ãƒˆãƒ©ãƒ³ã‚¶ã‚¯ã‚·ãƒ§ãƒ³å†…ã§å®Ÿè¡Œã§ãã¾ã›ã‚“。"
por "Não lhe é permitido executar este comando em uma transação"
rus "Вам не разрешено выполнÑÑ‚ÑŒ Ñту команду в транзакции"
serbian "Nije Vam dozvoljeno da izvršite ovu komandu u transakciji"
@@ -3946,7 +3905,7 @@ ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000
swe "Du får inte utföra detta kommando i en transaktion"
ukr "Вам не дозволено виконувати цю команду в транзакції"
ER_ERROR_DURING_COMMIT
- cze "Chyba %M p-Bři COMMIT"
+ cze "Chyba %M při COMMIT"
dan "Modtog fejl %M mens kommandoen COMMIT blev udført"
nla "Kreeg fout %M tijdens COMMIT"
eng "Got error %M during COMMIT"
@@ -3955,6 +3914,7 @@ ER_ERROR_DURING_COMMIT
ger "Fehler %M beim COMMIT"
hun "%M hiba a COMMIT vegrehajtasa soran"
ita "Rilevato l'errore %M durante il COMMIT"
+ jpn "COMMIT中ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
por "Obteve erro %M durante COMMIT"
rus "Получена ошибка %M в процеÑÑе COMMIT"
serbian "Greška %M za vreme izvršavanja komande 'COMMIT'"
@@ -3962,7 +3922,7 @@ ER_ERROR_DURING_COMMIT
swe "Fick fel %M vid COMMIT"
ukr "Отримано помилку %M під Ñ‡Ð°Ñ COMMIT"
ER_ERROR_DURING_ROLLBACK
- cze "Chyba %M p-Bři ROLLBACK"
+ cze "Chyba %M při ROLLBACK"
dan "Modtog fejl %M mens kommandoen ROLLBACK blev udført"
nla "Kreeg fout %M tijdens ROLLBACK"
eng "Got error %M during ROLLBACK"
@@ -3971,6 +3931,7 @@ ER_ERROR_DURING_ROLLBACK
ger "Fehler %M beim ROLLBACK"
hun "%M hiba a ROLLBACK vegrehajtasa soran"
ita "Rilevato l'errore %M durante il ROLLBACK"
+ jpn "ROLLBACK中ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
por "Obteve erro %M durante ROLLBACK"
rus "Получена ошибка %M в процеÑÑе ROLLBACK"
serbian "Greška %M za vreme izvršavanja komande 'ROLLBACK'"
@@ -3978,7 +3939,7 @@ ER_ERROR_DURING_ROLLBACK
swe "Fick fel %M vid ROLLBACK"
ukr "Отримано помилку %M під Ñ‡Ð°Ñ ROLLBACK"
ER_ERROR_DURING_FLUSH_LOGS
- cze "Chyba %M p-Bři FLUSH_LOGS"
+ cze "Chyba %M při FLUSH_LOGS"
dan "Modtog fejl %M mens kommandoen FLUSH_LOGS blev udført"
nla "Kreeg fout %M tijdens FLUSH_LOGS"
eng "Got error %M during FLUSH_LOGS"
@@ -3987,6 +3948,7 @@ ER_ERROR_DURING_FLUSH_LOGS
ger "Fehler %M bei FLUSH_LOGS"
hun "%M hiba a FLUSH_LOGS vegrehajtasa soran"
ita "Rilevato l'errore %M durante il FLUSH_LOGS"
+ jpn "FLUSH_LOGS中ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
por "Obteve erro %M durante FLUSH_LOGS"
rus "Получена ошибка %M в процеÑÑе FLUSH_LOGS"
serbian "Greška %M za vreme izvršavanja komande 'FLUSH_LOGS'"
@@ -3994,7 +3956,7 @@ ER_ERROR_DURING_FLUSH_LOGS
swe "Fick fel %M vid FLUSH_LOGS"
ukr "Отримано помилку %M під Ñ‡Ð°Ñ FLUSH_LOGS"
ER_ERROR_DURING_CHECKPOINT
- cze "Chyba %M p-Bři CHECKPOINT"
+ cze "Chyba %M při CHECKPOINT"
dan "Modtog fejl %M mens kommandoen CHECKPOINT blev udført"
nla "Kreeg fout %M tijdens CHECKPOINT"
eng "Got error %M during CHECKPOINT"
@@ -4003,6 +3965,7 @@ ER_ERROR_DURING_CHECKPOINT
ger "Fehler %M bei CHECKPOINT"
hun "%M hiba a CHECKPOINT vegrehajtasa soran"
ita "Rilevato l'errore %M durante il CHECKPOINT"
+ jpn "CHECKPOINT中ã«ã‚¨ãƒ©ãƒ¼ %M ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚"
por "Obteve erro %M durante CHECKPOINT"
rus "Получена ошибка %M в процеÑÑе CHECKPOINT"
serbian "Greška %M za vreme izvršavanja komande 'CHECKPOINT'"
@@ -4010,7 +3973,7 @@ ER_ERROR_DURING_CHECKPOINT
swe "Fick fel %M vid CHECKPOINT"
ukr "Отримано помилку %M під Ñ‡Ð°Ñ CHECKPOINT"
ER_NEW_ABORTING_CONNECTION 08S01
- cze "Spojen-Bí %ld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno"
+ cze "Spojení %ld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno"
dan "Afbrød forbindelsen %ld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)"
nla "Afgebroken verbinding %ld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)"
eng "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)"
@@ -4018,6 +3981,7 @@ ER_NEW_ABORTING_CONNECTION 08S01
fre "Connection %ld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)"
ger "Abbruch der Verbindung %ld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)"
ita "Interrotta la connessione %ld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)"
+ jpn "接続 %ld ãŒä¸­æ–­ã•ã‚Œã¾ã—ãŸã€‚データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s' (%-.64s)"
por "Conexão %ld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')"
rus "Прервано Ñоединение %ld к базе данных '%-.192s' Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%-.48s' Ñ Ñ…Ð¾Ñта '%-.64s' (%-.64s)"
serbian "Prekinuta konekcija broj %ld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)"
@@ -4029,12 +3993,13 @@ ER_unused_2
ER_FLUSH_MASTER_BINLOG_CLOSED
eng "Binlog closed, cannot RESET MASTER"
ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen"
+ jpn "ãƒã‚¤ãƒŠãƒªãƒ­ã‚°ãŒã‚¯ãƒ­ãƒ¼ã‚ºã•ã‚Œã¦ã„ã¾ã™ã€‚RESET MASTER を実行ã§ãã¾ã›ã‚“。"
por "Binlog fechado. Não pode fazer RESET MASTER"
rus "Двоичный журнал Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð·Ð°ÐºÑ€Ñ‹Ñ‚, невозможно выполнить RESET MASTER"
serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'"
ukr "Реплікаційний лог закрито, не можу виконати RESET MASTER"
ER_INDEX_REBUILD
- cze "P-Břebudování indexu dumpnuté tabulky '%-.192s' nebylo úspěšné"
+ cze "Přebudování indexu dumpnuté tabulky '%-.192s' nebylo úspěšné"
dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.192s'"
nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.192s'"
eng "Failed rebuilding the index of dumped table '%-.192s'"
@@ -4043,6 +4008,7 @@ ER_INDEX_REBUILD
greek "Failed rebuilding the index of dumped table '%-.192s'"
hun "Failed rebuilding the index of dumped table '%-.192s'"
ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.192s'"
+ jpn "ダンプ表 '%-.192s' ã®ç´¢å¼•å†æ§‹ç¯‰ã«å¤±æ•—ã—ã¾ã—ãŸã€‚"
por "Falhou na reconstrução do índice da tabela 'dumped' '%-.192s'"
rus "Ошибка переÑтройки индекÑа Ñохраненной таблицы '%-.192s'"
serbian "Izgradnja indeksa dump-ovane tabele '%-.192s' nije uspela"
@@ -4056,20 +4022,22 @@ ER_MASTER
fre "Erreur reçue du maître: '%-.64s'"
ger "Fehler vom Master: '%-.64s'"
ita "Errore dal master: '%-.64s"
+ jpn "マスターã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿ: '%-.64s'"
por "Erro no 'master' '%-.64s'"
rus "Ошибка от головного Ñервера: '%-.64s'"
serbian "Greška iz glavnog servera '%-.64s' u klasteru"
spa "Error del master: '%-.64s'"
- swe "Fick en master: '%-.64s'"
+ swe "Fel från master: '%-.64s'"
ukr "Помилка від головного: '%-.64s'"
ER_MASTER_NET_READ 08S01
- cze "S-Bíťová chyba pÅ™i Ätení z masteru"
+ cze "Síťová chyba pÅ™i Ätení z masteru"
dan "Netværksfejl ved læsning fra master"
nla "Net fout tijdens lezen van master"
eng "Net error reading from master"
fre "Erreur de lecture réseau reçue du maître"
ger "Netzfehler beim Lesen vom Master"
ita "Errore di rete durante la ricezione dal master"
+ jpn "マスターã‹ã‚‰ã®ãƒ‡ãƒ¼ã‚¿å—信中ã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã‚¨ãƒ©ãƒ¼"
por "Erro de rede lendo do 'master'"
rus "Возникла ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð² процеÑÑе коммуникации Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером"
serbian "Greška u primanju mrežnih paketa sa glavnog servera u klasteru"
@@ -4077,13 +4045,14 @@ ER_MASTER_NET_READ 08S01
swe "Fick nätverksfel vid läsning från master"
ukr "Мережева помилка Ñ‡Ð¸Ñ‚Ð°Ð½Ð½Ñ Ð²Ñ–Ð´ головного"
ER_MASTER_NET_WRITE 08S01
- cze "S-Bíťová chyba při zápisu na master"
+ cze "Síťová chyba při zápisu na master"
dan "Netværksfejl ved skrivning til master"
nla "Net fout tijdens schrijven naar master"
eng "Net error writing to master"
fre "Erreur d'écriture réseau reçue du maître"
ger "Netzfehler beim Schreiben zum Master"
ita "Errore di rete durante l'invio al master"
+ jpn "マスターã¸ã®ãƒ‡ãƒ¼ã‚¿é€ä¿¡ä¸­ã®ãƒãƒƒãƒˆãƒ¯ãƒ¼ã‚¯ã‚¨ãƒ©ãƒ¼"
por "Erro de rede gravando no 'master'"
rus "Возникла ошибка запиÑи в процеÑÑе коммуникации Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером"
serbian "Greška u slanju mrežnih paketa na glavni server u klasteru"
@@ -4091,7 +4060,7 @@ ER_MASTER_NET_WRITE 08S01
swe "Fick nätverksfel vid skrivning till master"
ukr "Мережева помилка запиÑу до головного"
ER_FT_MATCHING_KEY_NOT_FOUND
- cze "-BŽádný sloupec nemá vytvořen fulltextový index"
+ cze "Žádný sloupec nemá vytvořen fulltextový index"
dan "Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen"
nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst"
eng "Can't find FULLTEXT index matching the column list"
@@ -4099,6 +4068,7 @@ ER_FT_MATCHING_KEY_NOT_FOUND
fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes"
ger "Kann keinen FULLTEXT-Index finden, der der Feldliste entspricht"
ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne"
+ jpn "列リストã«å¯¾å¿œã™ã‚‹å…¨æ–‡ç´¢å¼•(FULLTEXT)ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。"
por "Não pode encontrar um índice para o texto todo que combine com a lista de colunas"
rus "Ðевозможно отыÑкать полнотекÑтовый (FULLTEXT) индекÑ, ÑоответÑтвующий ÑпиÑку Ñтолбцов"
serbian "Ne mogu da pronađem 'FULLTEXT' indeks koli odgovara listi kolona"
@@ -4106,7 +4076,7 @@ ER_FT_MATCHING_KEY_NOT_FOUND
swe "Hittar inte ett FULLTEXT-index i kolumnlistan"
ukr "Ðе можу знайти FULLTEXT індекÑ, що відповідає переліку Ñтовбців"
ER_LOCK_OR_ACTIVE_TRANSACTION
- cze "Nemohu prov-Bést zadaný příkaz, protože existují aktivní zamÄené tabulky nebo aktivní transakce"
+ cze "Nemohu provést zadaný příkaz, protože existují aktivní zamÄené tabulky nebo aktivní transakce"
dan "Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion"
nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie"
eng "Can't execute the given command because you have active locked tables or an active transaction"
@@ -4114,6 +4084,7 @@ ER_LOCK_OR_ACTIVE_TRANSACTION
fre "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active"
ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen"
ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto"
+ jpn "ã™ã§ã«ã‚¢ã‚¯ãƒ†ã‚£ãƒ–ãªè¡¨ãƒ­ãƒƒã‚¯ã‚„トランザクションãŒã‚ã‚‹ãŸã‚ã€ã‚³ãƒžãƒ³ãƒ‰ã‚’実行ã§ãã¾ã›ã‚“。"
por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa"
rus "Ðевозможно выполнить указанную команду, поÑкольку у Ð²Ð°Ñ Ð¿Ñ€Ð¸ÑутÑтвуют активно заблокированные таблица или Ð¾Ñ‚ÐºÑ€Ñ‹Ñ‚Ð°Ñ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ñ"
serbian "Ne mogu da izvrÅ¡im datu komandu zbog toga Å¡to su tabele zakljuÄane ili je transakcija u toku"
@@ -4121,7 +4092,7 @@ ER_LOCK_OR_ACTIVE_TRANSACTION
swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion"
ukr "Ðе можу виконати подану команду тому, що Ñ‚Ð°Ð±Ð»Ð¸Ñ†Ñ Ð·Ð°Ð±Ð»Ð¾ÐºÐ¾Ð²Ð°Ð½Ð° або виконуєтьÑÑ Ñ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ñ–Ñ"
ER_UNKNOWN_SYSTEM_VARIABLE
- cze "Nezn-Bámá systémová proměnná '%-.64s'"
+ cze "Neznámá systémová proměnná '%-.64s'"
dan "Ukendt systemvariabel '%-.64s'"
nla "Onbekende systeem variabele '%-.64s'"
eng "Unknown system variable '%-.64s'"
@@ -4129,6 +4100,7 @@ ER_UNKNOWN_SYSTEM_VARIABLE
fre "Variable système '%-.64s' inconnue"
ger "Unbekannte Systemvariable '%-.64s'"
ita "Variabile di sistema '%-.64s' sconosciuta"
+ jpn "'%-.64s' ã¯ä¸æ˜Žãªã‚·ã‚¹ãƒ†ãƒ å¤‰æ•°ã§ã™ã€‚"
por "Variável de sistema '%-.64s' desconhecida"
rus "ÐеизвеÑÑ‚Ð½Ð°Ñ ÑиÑÑ‚ÐµÐ¼Ð½Ð°Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s'"
serbian "Nepoznata sistemska promenljiva '%-.64s'"
@@ -4136,7 +4108,7 @@ ER_UNKNOWN_SYSTEM_VARIABLE
swe "Okänd systemvariabel: '%-.64s'"
ukr "Ðевідома ÑиÑтемна змінна '%-.64s'"
ER_CRASHED_ON_USAGE
- cze "Tabulka '%-.192s' je ozna-BÄena jako poruÅ¡ená a mÄ›la by být opravena"
+ cze "Tabulka '%-.192s' je oznaÄena jako poruÅ¡ená a mÄ›la by být opravena"
dan "Tabellen '%-.192s' er markeret med fejl og bør repareres"
nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en dient te worden gerepareerd"
eng "Table '%-.192s' is marked as crashed and should be repaired"
@@ -4144,6 +4116,7 @@ ER_CRASHED_ON_USAGE
fre "La table '%-.192s' est marquée 'crashed' et devrait être réparée"
ger "Tabelle '%-.192s' ist als defekt markiert und sollte repariert werden"
ita "La tabella '%-.192s' e` segnalata come corrotta e deve essere riparata"
+ jpn "表 '%-.192s' ã¯å£Šã‚Œã¦ã„ã¾ã™ã€‚修復ãŒå¿…è¦ã§ã™ã€‚"
por "Tabela '%-.192s' está marcada como danificada e deve ser reparada"
rus "Таблица '%-.192s' помечена как иÑÐ¿Ð¾Ñ€Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¸ должна пройти проверку и ремонт"
serbian "Tabela '%-.192s' je markirana kao oštećena i trebala bi biti popravljena"
@@ -4151,7 +4124,7 @@ ER_CRASHED_ON_USAGE
swe "Tabell '%-.192s' är trasig och bör repareras med REPAIR TABLE"
ukr "Таблицю '%-.192s' марковано Ñк зіпÑовану та Ñ—Ñ— потрібно відновити"
ER_CRASHED_ON_REPAIR
- cze "Tabulka '%-.192s' je ozna-BÄena jako poruÅ¡ená a poslední (automatická?) oprava se nezdaÅ™ila"
+ cze "Tabulka '%-.192s' je oznaÄena jako poruÅ¡ená a poslední (automatická?) oprava se nezdaÅ™ila"
dan "Tabellen '%-.192s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede"
nla "Tabel '%-.192s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte"
eng "Table '%-.192s' is marked as crashed and last (automatic?) repair failed"
@@ -4159,6 +4132,7 @@ ER_CRASHED_ON_REPAIR
fre "La table '%-.192s' est marquée 'crashed' et le dernier 'repair' a échoué"
ger "Tabelle '%-.192s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl"
ita "La tabella '%-.192s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita"
+ jpn "表 '%-.192s' ã¯å£Šã‚Œã¦ã„ã¾ã™ã€‚修復(自動?)ã«ã‚‚失敗ã—ã¦ã„ã¾ã™ã€‚"
por "Tabela '%-.192s' está marcada como danificada e a última reparação (automática?) falhou"
rus "Таблица '%-.192s' помечена как иÑÐ¿Ð¾Ñ€Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¸ поÑледний (автоматичеÑкий?) ремонт не был уÑпешным"
serbian "Tabela '%-.192s' je markirana kao oštećena, a zadnja (automatska?) popravka je bila neuspela"
@@ -4173,6 +4147,7 @@ ER_WARNING_NOT_COMPLETE_ROLLBACK
fre "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées"
ger "Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden"
ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)"
+ jpn "トランザクション対応ã§ã¯ãªã„表ã¸ã®å¤‰æ›´ã¯ãƒ­ãƒ¼ãƒ«ãƒãƒƒã‚¯ã•ã‚Œã¾ã›ã‚“。"
por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)"
rus "Внимание: по некоторым измененным нетранзакционным таблицам невозможно будет произвеÑти откат транзакции"
serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'"
@@ -4187,6 +4162,7 @@ ER_TRANS_CACHE_FULL
fre "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez"
ger "Transaktionen, die aus mehreren Befehlen bestehen, benötigten mehr als 'max_binlog_cache_size' Bytes an Speicher. Btte vergrössern Sie diese Server-Variable versuchen Sie es noch einmal"
ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare"
+ jpn "複数ステートメントã‹ã‚‰æˆã‚‹ãƒˆãƒ©ãƒ³ã‚¶ã‚¯ã‚·ãƒ§ãƒ³ãŒ 'max_binlog_cache_size' 以上ã®å®¹é‡ã‚’å¿…è¦ã¨ã—ã¾ã—ãŸã€‚ã“ã®ã‚·ã‚¹ãƒ†ãƒ å¤‰æ•°ã‚’増加ã—ã¦ã€å†è©¦è¡Œã—ã¦ãã ã•ã„。"
por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente"
rus "Транзакции, включающей большое количеÑтво команд, потребовалоÑÑŒ более чем 'max_binlog_cache_size' байт. Увеличьте Ñту переменную Ñервера mysqld и попробуйте еще раз"
spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo"
@@ -4212,6 +4188,7 @@ ER_SLAVE_NOT_RUNNING
fre "Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE"
ger "Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren"
ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE"
+ jpn "ã“ã®å‡¦ç†ã¯ã€ç¨¼åƒä¸­ã®ã‚¹ãƒ¬ãƒ¼ãƒ–ã§ãªã‘ã‚Œã°å®Ÿè¡Œã§ãã¾ã›ã‚“。スレーブã®è¨­å®šã‚’ã—ã¦START SLAVEコマンドを実行ã—ã¦ãã ã•ã„。"
por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE"
rus "Ð”Ð»Ñ Ñтой операции требуетÑÑ Ñ€Ð°Ð±Ð¾Ñ‚Ð°ÑŽÑ‰Ð¸Ð¹ подчиненный Ñервер. Сначала выполните START SLAVE"
serbian "Ova operacija zahteva da je aktivan podređeni server. Konfigurišite prvo podređeni server i onda izvršite komandu 'START SLAVE'"
@@ -4225,6 +4202,7 @@ ER_BAD_SLAVE
fre "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO"
ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben"
ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO"
+ jpn "ã“ã®ã‚µãƒ¼ãƒãƒ¼ã¯ã‚¹ãƒ¬ãƒ¼ãƒ–ã¨ã—ã¦è¨­å®šã•ã‚Œã¦ã„ã¾ã›ã‚“。コンフィグファイルã‹CHANGE MASTER TOコマンドã§è¨­å®šã—ã¦ä¸‹ã•ã„。"
por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO"
rus "Этот Ñервер не наÑтроен как подчиненный. ВнеÑите иÑÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð² конфигурационном файле или Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ CHANGE MASTER TO"
serbian "Server nije konfigurisan kao podređeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'"
@@ -4235,15 +4213,17 @@ ER_MASTER_INFO
eng "Could not initialize master info structure for '%.*s'; more error messages can be found in the MariaDB error log"
fre "Impossible d'initialiser les structures d'information de maître '%.*s', vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MariaDB"
ger "Konnte Master-Info-Struktur '%.*s' nicht initialisieren. Weitere Fehlermeldungen können im MariaDB-Error-Log eingesehen werden"
+ jpn "'master info '%.*s''構造体ã®åˆæœŸåŒ–ãŒã§ãã¾ã›ã‚“ã§ã—ãŸã€‚MariaDBエラーログã§ã‚¨ãƒ©ãƒ¼ãƒ¡ãƒƒã‚»ãƒ¼ã‚¸ã‚’確èªã—ã¦ãã ã•ã„。"
serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info' '%.*s'"
swe "Kunde inte initialisera replikationsstrukturerna för '%.*s'. See MariaDB fel fil för mera information"
-ER_SLAVE_THREAD
+ER_SLAVE_THREAD
dan "Kunne ikke danne en slave-tråd; check systemressourcerne"
nla "Kon slave thread niet aanmaken, controleer systeem resources"
eng "Could not create slave thread; check system resources"
fre "Impossible de créer une tâche esclave, vérifiez les ressources système"
ger "Konnte Slave-Thread nicht starten. Bitte System-Ressourcen überprüfen"
ita "Impossibile creare il thread 'slave', controllare le risorse di sistema"
+ jpn "スレーブスレッドを作æˆã§ãã¾ã›ã‚“。システムリソースを確èªã—ã¦ãã ã•ã„。"
por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema"
rus "Ðевозможно Ñоздать поток подчиненного Ñервера. Проверьте ÑиÑтемные реÑурÑÑ‹"
serbian "Nisam mogao da startujem thread za podređeni server, proverite sistemske resurse"
@@ -4258,6 +4238,7 @@ ER_TOO_MANY_USER_CONNECTIONS 42000
fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connexions actives"
ger "Benutzer '%-.64s' hat mehr als 'max_user_connections' aktive Verbindungen"
ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive"
+ jpn "ユーザー '%-.64s' ã¯ã™ã§ã« 'max_user_connections' 以上ã®ã‚¢ã‚¯ãƒ†ã‚£ãƒ–ãªæŽ¥ç¶šã‚’è¡Œã£ã¦ã„ã¾ã™ã€‚"
por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas"
rus "У Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ %-.64s уже больше чем 'max_user_connections' активных Ñоединений"
serbian "Korisnik %-.64s već ima više aktivnih konekcija nego što je to određeno 'max_user_connections' promenljivom"
@@ -4272,6 +4253,7 @@ ER_SET_CONSTANTS_ONLY
fre "Seules les expressions constantes sont autorisées avec SET"
ger "Bei diesem Befehl dürfen nur konstante Ausdrücke verwendet werden"
ita "Si possono usare solo espressioni costanti con SET"
+ jpn "SET処ç†ãŒå¤±æ•—ã—ã¾ã—ãŸã€‚"
por "Você pode usar apenas expressões constantes com SET"
rus "С Ñтой командой вы можете иÑпользовать только конÑтантные выражениÑ"
serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'"
@@ -4286,6 +4268,7 @@ ER_LOCK_WAIT_TIMEOUT
fre "Timeout sur l'obtention du verrou"
ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten"
ita "E' scaduto il timeout per l'attesa del lock"
+ jpn "ロック待ã¡ãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã—ã¾ã—ãŸã€‚トランザクションをå†è©¦è¡Œã—ã¦ãã ã•ã„。"
por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação."
rus "Таймаут Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸ иÑтек; попробуйте перезапуÑтить транзакцию"
serbian "Vremenski limit za zakljuÄavanje tabele je istekao; Probajte da ponovo startujete transakciju"
@@ -4300,6 +4283,7 @@ ER_LOCK_TABLE_FULL
fre "Le nombre total de verrou dépasse la taille de la table des verrous"
ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle"
ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock"
+ jpn "ロックã®æ•°ãŒå¤šã™ãŽã¾ã™ã€‚"
por "O número total de travamentos excede o tamanho da tabela de travamentos"
rus "Общее количеÑтво блокировок превыÑило размеры таблицы блокировок"
serbian "Broj totalnih zakljuÄavanja tabele premaÅ¡uje veliÄinu tabele zakljuÄavanja"
@@ -4314,6 +4298,7 @@ ER_READ_ONLY_TRANSACTION 25000
fre "Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED"
ger "Während einer READ-UNCOMMITTED-Transaktion können keine UPDATE-Sperren angefordert werden"
ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'"
+ jpn "読ã¿è¾¼ã¿å°‚用トランザクションã§ã™ã€‚"
por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED"
rus "Блокировки обновлений Ð½ÐµÐ»ÑŒÐ·Ñ Ð¿Ð¾Ð»ÑƒÑ‡Ð¸Ñ‚ÑŒ в процеÑÑе Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð½Ðµ принÑтой (в режиме READ UNCOMMITTED) транзакции"
serbian "ZakljuÄavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija"
@@ -4328,6 +4313,7 @@ ER_DROP_DB_WITH_READ_LOCK
fre "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
+ jpn "グローãƒãƒ«ãƒªãƒ¼ãƒ‰ãƒ­ãƒƒã‚¯ã‚’ä¿æŒã—ã¦ã„ã‚‹é–“ã¯ã€DROP DATABASE を実行ã§ãã¾ã›ã‚“。"
por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
rus "Ðе допуÑкаетÑÑ DROP DATABASE, пока поток держит глобальную блокировку чтениÑ"
serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuÄava Äitanje podataka"
@@ -4342,6 +4328,7 @@ ER_CREATE_DB_WITH_READ_LOCK
fre "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture"
ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält"
ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura"
+ jpn "グローãƒãƒ«ãƒªãƒ¼ãƒ‰ãƒ­ãƒƒã‚¯ã‚’ä¿æŒã—ã¦ã„ã‚‹é–“ã¯ã€CREATE DATABASE を実行ã§ãã¾ã›ã‚“。"
por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura"
rus "Ðе допуÑкаетÑÑ CREATE DATABASE, пока поток держит глобальную блокировку чтениÑ"
serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuÄava Äitanje podataka"
@@ -4355,6 +4342,7 @@ ER_WRONG_ARGUMENTS
fre "Mauvais arguments à %s"
ger "Falsche Argumente für %s"
ita "Argomenti errati a %s"
+ jpn "%s ã®å¼•æ•°ãŒä¸æ­£ã§ã™"
por "Argumentos errados para %s"
rus "Ðеверные параметры Ð´Ð»Ñ %s"
serbian "Pogrešni argumenti prosleđeni na %s"
@@ -4381,6 +4369,7 @@ ER_UNION_TABLES_IN_DIFFERENT_DIR
fre "Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée"
ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden"
ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database"
+ jpn "ä¸æ­£ãªè¡¨å®šç¾©ã§ã™ã€‚MERGE表ã®æ§‹æˆè¡¨ã¯ã™ã¹ã¦åŒã˜ãƒ‡ãƒ¼ã‚¿ãƒ™ãƒ¼ã‚¹å†…ã«ãªã‘ã‚Œã°ãªã‚Šã¾ã›ã‚“。"
por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados."
rus "Ðеверное определение таблицы; Ð’Ñе таблицы в MERGE должны принадлежать одной и той же базе данных"
serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka"
@@ -4393,6 +4382,7 @@ ER_LOCK_DEADLOCK 40001
fre "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction"
ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion neu zu starten"
ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione"
+ jpn "ロックå–得中ã«ãƒ‡ãƒƒãƒ‰ãƒ­ãƒƒã‚¯ãŒæ¤œå‡ºã•ã‚Œã¾ã—ãŸã€‚トランザクションをå†è©¦è¡Œã—ã¦ãã ã•ã„。"
por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação."
rus "Возникла Ñ‚ÑƒÐ¿Ð¸ÐºÐ¾Ð²Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² процеÑÑе Ð¿Ð¾Ð»ÑƒÑ‡ÐµÐ½Ð¸Ñ Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²ÐºÐ¸; Попробуйте перезапуÑтить транзакцию"
serbian "Unakrsno zakljuÄavanje pronaÄ‘eno kada sam pokuÅ¡ao da dobijem pravo na zakljuÄavanje; Probajte da restartujete transakciju"
@@ -4417,6 +4407,7 @@ ER_CANNOT_ADD_FOREIGN
fre "Impossible d'ajouter des contraintes d'index externe"
ger "Fremdschlüssel-Beschränkung kann nicht hinzugefügt werden"
ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)"
+ jpn "外部キー制約を追加ã§ãã¾ã›ã‚“。"
por "Não pode acrescentar uma restrição de chave estrangeira"
rus "Ðевозможно добавить Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð¸Ñ Ð²Ð½ÐµÑˆÐ½ÐµÐ³Ð¾ ключа"
serbian "Ne mogu da dodam proveru spoljnog kljuÄa"
@@ -4430,6 +4421,7 @@ ER_NO_REFERENCED_ROW 23000
greek "Cannot add a child row: a foreign key constraint fails"
hun "Cannot add a child row: a foreign key constraint fails"
ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
+ jpn "親キーãŒã‚ã‚Šã¾ã›ã‚“。外部キー制約é•åã§ã™ã€‚"
norwegian-ny "Cannot add a child row: a foreign key constraint fails"
por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou"
rus "Ðевозможно добавить или обновить дочернюю Ñтроку: проверка ограничений внешнего ключа не выполнÑетÑÑ"
@@ -4442,6 +4434,7 @@ ER_ROW_IS_REFERENCED 23000
greek "Cannot delete a parent row: a foreign key constraint fails"
hun "Cannot delete a parent row: a foreign key constraint fails"
ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto"
+ jpn "å­ãƒ¬ã‚³ãƒ¼ãƒ‰ãŒã‚ã‚Šã¾ã™ã€‚外部キー制約é•åã§ã™ã€‚"
por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou"
rus "Ðевозможно удалить или обновить родительÑкую Ñтроку: проверка ограничений внешнего ключа не выполнÑетÑÑ"
serbian "Ne mogu da izbriÅ¡em roditeljski slog: provera spoljnog kljuÄa je neuspela"
@@ -4452,6 +4445,7 @@ ER_CONNECT_TO_MASTER 08S01
eng "Error connecting to master: %-.128s"
ger "Fehler bei der Verbindung zum Master: %-.128s"
ita "Errore durante la connessione al master: %-.128s"
+ jpn "マスターã¸ã®æŽ¥ç¶šã‚¨ãƒ©ãƒ¼: %-.128s"
por "Erro conectando com o master: %-.128s"
rus "Ошибка ÑÐ¾ÐµÐ´Ð¸Ð½ÐµÐ½Ð¸Ñ Ñ Ð³Ð¾Ð»Ð¾Ð²Ð½Ñ‹Ð¼ Ñервером: %-.128s"
spa "Error de coneccion a master: %-.128s"
@@ -4461,6 +4455,7 @@ ER_QUERY_ON_MASTER
eng "Error running query on master: %-.128s"
ger "Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s"
ita "Errore eseguendo una query sul master: %-.128s"
+ jpn "マスターã§ã®ã‚¯ã‚¨ãƒªå®Ÿè¡Œã‚¨ãƒ©ãƒ¼: %-.128s"
por "Erro rodando consulta no master: %-.128s"
rus "Ошибка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа на головном Ñервере: %-.128s"
spa "Error executando el query en master: %-.128s"
@@ -4471,6 +4466,7 @@ ER_ERROR_WHEN_EXECUTING_COMMAND
est "Viga käsu %s täitmisel: %-.128s"
ger "Fehler beim Ausführen des Befehls %s: %-.128s"
ita "Errore durante l'esecuzione del comando %s: %-.128s"
+ jpn "%s コマンドã®å®Ÿè¡Œã‚¨ãƒ©ãƒ¼: %-.128s"
por "Erro quando executando comando %s: %-.128s"
rus "Ошибка при выполнении команды %s: %-.128s"
serbian "Greška pri izvršavanju komande %s: %-.128s"
@@ -4482,6 +4478,7 @@ ER_WRONG_USAGE
est "Vigane %s ja %s kasutus"
ger "Falsche Verwendung von %s und %s"
ita "Uso errato di %s e %s"
+ jpn "%s ã® %s ã«é–¢ã™ã‚‹ä¸æ­£ãªä½¿ç”¨æ³•ã§ã™ã€‚"
por "Uso errado de %s e %s"
rus "Ðеверное иÑпользование %s и %s"
serbian "Pogrešna upotreba %s i %s"
@@ -4494,6 +4491,7 @@ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000
est "Tulpade arv kasutatud SELECT lausetes ei kattu"
ger "Die verwendeten SELECT-Befehle liefern unterschiedliche Anzahlen von Feldern zurück"
ita "La SELECT utilizzata ha un numero di colonne differente"
+ jpn "使用ã®SELECTæ–‡ãŒè¿”ã™åˆ—æ•°ãŒé•ã„ã¾ã™ã€‚"
por "Os comandos SELECT usados têm diferente número de colunas"
rus "ИÑпользованные операторы выборки (SELECT) дают разное количеÑтво Ñтолбцов"
serbian "Upotrebljene 'SELECT' komande adresiraju razliÄit broj kolona"
@@ -4505,6 +4503,7 @@ ER_CANT_UPDATE_WITH_READLOCK
est "Ei suuda täita päringut konfliktse luku tõttu"
ger "Augrund eines READ-LOCK-Konflikts kann die Abfrage nicht ausgeführt werden"
ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura"
+ jpn "競åˆã™ã‚‹ãƒªãƒ¼ãƒ‰ãƒ­ãƒƒã‚¯ã‚’ä¿æŒã—ã¦ã„ã‚‹ã®ã§ã€ã‚¯ã‚¨ãƒªã‚’実行ã§ãã¾ã›ã‚“。"
por "Não posso executar a consulta porque você tem um conflito de travamento de leitura"
rus "Ðевозможно иÑполнить запроÑ, поÑкольку у Ð²Ð°Ñ ÑƒÑтановлены конфликтующие блокировки чтениÑ"
serbian "Ne mogu da izvrÅ¡im upit zbog toga Å¡to imate zakljuÄavanja Äitanja podataka u konfliktu"
@@ -4516,6 +4515,7 @@ ER_MIXING_NOT_ALLOWED
est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud"
ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert"
ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali"
+ jpn "トランザクション対応ã®è¡¨ã¨éžå¯¾å¿œã®è¡¨ã®åŒæ™‚使用ã¯ç„¡åŠ¹åŒ–ã•ã‚Œã¦ã„ã¾ã™ã€‚"
por "Mistura de tabelas transacional e não-transacional está desabilitada"
rus "ИÑпользование транзакционных таблиц нарÑду Ñ Ð½ÐµÑ‚Ñ€Ð°Ð½Ð·Ð°ÐºÑ†Ð¸Ð¾Ð½Ð½Ñ‹Ð¼Ð¸ запрещено"
serbian "MeÅ¡anje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuÄeno"
@@ -4527,6 +4527,7 @@ ER_DUP_ARGUMENT
est "Määrangut '%s' on lauses kasutatud topelt"
ger "Option '%s' wird im Befehl zweimal verwendet"
ita "L'opzione '%s' e' stata usata due volte nel comando"
+ jpn "オプション '%s' ãŒ2度使用ã•ã‚Œã¦ã„ã¾ã™ã€‚"
por "Opção '%s' usada duas vezes no comando"
rus "ÐžÐ¿Ñ†Ð¸Ñ '%s' дважды иÑпользована в выражении"
spa "Opción '%s' usada dos veces en el comando"
@@ -4536,6 +4537,7 @@ ER_USER_LIMIT_REACHED 42000
eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)"
ger "Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)"
ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)"
+ jpn "ユーザー '%-.64s' ã¯ãƒªã‚½ãƒ¼ã‚¹ã®ä¸Šé™ '%s' ã«é”ã—ã¾ã—ãŸã€‚(ç¾åœ¨å€¤: %ld)"
por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)"
rus "Пользователь '%-.64s' превыÑил иÑпользование реÑурÑа '%s' (текущее значение: %ld)"
spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)"
@@ -4545,6 +4547,7 @@ ER_SPECIFIC_ACCESS_DENIED_ERROR 42000
eng "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation"
ger "Kein Zugriff. Hierfür wird die Berechtigung %-.128s benötigt"
ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione"
+ jpn "アクセスã¯æ‹’å¦ã•ã‚Œã¾ã—ãŸã€‚ã“ã®æ“作ã«ã¯ %-.128s 権é™ãŒ(複数ã®å ´åˆã¯ã©ã‚Œã‹1ã¤)å¿…è¦ã§ã™ã€‚"
por "Acesso negado. Você precisa o privilégio %-.128s para essa operação"
rus "Ð’ доÑтупе отказано. Вам нужны привилегии %-.128s Ð´Ð»Ñ Ñтой операции"
spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación"
@@ -4555,6 +4558,7 @@ ER_LOCAL_VARIABLE
eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL"
ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden"
ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL"
+ jpn "変数 '%-.64s' ã¯ã‚»ãƒƒã‚·ãƒ§ãƒ³å¤‰æ•°ã§ã™ã€‚SET GLOBALã§ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。"
por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL"
rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' ÑвлÑетÑÑ Ð¿Ð¾Ñ‚Ð¾ÐºÐ¾Ð²Ð¾Ð¹ (SESSION) переменной и не может быть изменена Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ SET GLOBAL"
spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL"
@@ -4564,6 +4568,7 @@ ER_GLOBAL_VARIABLE
eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL"
ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden"
ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL"
+ jpn "変数 '%-.64s' ã¯ã‚°ãƒ­ãƒ¼ãƒãƒ«å¤‰æ•°ã§ã™ã€‚SET GLOBALを使用ã—ã¦ãã ã•ã„。"
por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL"
rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' ÑвлÑетÑÑ Ð³Ð»Ð¾Ð±Ð°Ð»ÑŒÐ½Ð¾Ð¹ (GLOBAL) переменной, и ее Ñледует изменÑÑ‚ÑŒ Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ SET GLOBAL"
spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL"
@@ -4573,6 +4578,7 @@ ER_NO_DEFAULT 42000
eng "Variable '%-.64s' doesn't have a default value"
ger "Variable '%-.64s' hat keinen Vorgabewert"
ita "La variabile '%-.64s' non ha un valore di default"
+ jpn "変数 '%-.64s' ã«ã¯ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ãŒã‚ã‚Šã¾ã›ã‚“。"
por "Variável '%-.64s' não tem um valor padrão"
rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' не имеет Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ Ð¿Ð¾ умолчанию"
spa "Variable '%-.64s' no tiene un valor patrón"
@@ -4582,6 +4588,7 @@ ER_WRONG_VALUE_FOR_VAR 42000
eng "Variable '%-.64s' can't be set to the value of '%-.200s'"
ger "Variable '%-.64s' kann nicht auf '%-.200s' gesetzt werden"
ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.200s'"
+ jpn "変数 '%-.64s' ã«å€¤ '%-.200s' を設定ã§ãã¾ã›ã‚“。"
por "Variável '%-.64s' não pode ser configurada para o valor de '%-.200s'"
rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' не может быть уÑтановлена в значение '%-.200s'"
spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.200s'"
@@ -4591,6 +4598,7 @@ ER_WRONG_TYPE_FOR_VAR 42000
eng "Incorrect argument type to variable '%-.64s'"
ger "Falscher Argumenttyp für Variable '%-.64s'"
ita "Tipo di valore errato per la variabile '%-.64s'"
+ jpn "変数 '%-.64s' ã¸ã®å€¤ã®åž‹ãŒä¸æ­£ã§ã™ã€‚"
por "Tipo errado de argumento para variável '%-.64s'"
rus "Ðеверный тип аргумента Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð¾Ð¹ '%-.64s'"
spa "Tipo de argumento equivocado para variable '%-.64s'"
@@ -4600,6 +4608,7 @@ ER_VAR_CANT_BE_READ
eng "Variable '%-.64s' can only be set, not read"
ger "Variable '%-.64s' kann nur verändert, nicht gelesen werden"
ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto"
+ jpn "変数 '%-.64s' ã¯æ›¸ãè¾¼ã¿å°‚用ã§ã™ã€‚読ã¿è¾¼ã¿ã¯ã§ãã¾ã›ã‚“。"
por "Variável '%-.64s' somente pode ser configurada, não lida"
rus "ÐŸÐµÑ€ÐµÐ¼ÐµÐ½Ð½Ð°Ñ '%-.64s' может быть только уÑтановлена, но не Ñчитана"
spa "Variable '%-.64s' solamente puede ser configurada, no leída"
@@ -4609,6 +4618,7 @@ ER_CANT_USE_OPTION_HERE 42000
eng "Incorrect usage/placement of '%s'"
ger "Falsche Verwendung oder Platzierung von '%s'"
ita "Uso/posizione di '%s' sbagliato"
+ jpn "'%s' ã®ä½¿ç”¨æ³•ã¾ãŸã¯å ´æ‰€ãŒä¸æ­£ã§ã™ã€‚"
por "Errado uso/colocação de '%s'"
rus "Ðеверное иÑпользование или в неверном меÑте указан '%s'"
spa "Equivocado uso/colocación de '%s'"
@@ -4618,6 +4628,7 @@ ER_NOT_SUPPORTED_YET 42000
eng "This version of MariaDB doesn't yet support '%s'"
ger "Diese MariaDB-Version unterstützt '%s' nicht"
ita "Questa versione di MariaDB non supporta ancora '%s'"
+ jpn "ã“ã®ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®MariaDBã§ã¯ã€ã¾ã  '%s' を利用ã§ãã¾ã›ã‚“。"
por "Esta versão de MariaDB não suporta ainda '%s'"
rus "Эта верÑÐ¸Ñ MariaDB пока еще не поддерживает '%s'"
spa "Esta versión de MariaDB no soporta todavia '%s'"
@@ -4627,6 +4638,7 @@ ER_MASTER_FATAL_ERROR_READING_BINLOG
eng "Got fatal error %d from master when reading data from binary log: '%-.320s'"
ger "Schwerer Fehler %d: '%-.320s vom Master beim Lesen des binären Logs"
ita "Errore fatale %d: '%-.320s' dal master leggendo i dati dal log binario"
+ jpn "致命的ãªã‚¨ãƒ©ãƒ¼ %d: '%-.320s' ãŒãƒžã‚¹ã‚¿ãƒ¼ã§ãƒã‚¤ãƒŠãƒªãƒ­ã‚°èª­ã¿è¾¼ã¿ä¸­ã«ç™ºç”Ÿã—ã¾ã—ãŸã€‚"
por "Obteve fatal erro %d: '%-.320s' do master quando lendo dados do binary log"
rus "Получена неиÑÐ¿Ñ€Ð°Ð²Ð¸Ð¼Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° %d: '%-.320s' от головного Ñервера в процеÑÑе выборки данных из двоичного журнала"
spa "Recibió fatal error %d: '%-.320s' del master cuando leyendo datos del binary log"
@@ -4634,6 +4646,7 @@ ER_MASTER_FATAL_ERROR_READING_BINLOG
ER_SLAVE_IGNORED_TABLE
eng "Slave SQL thread ignored the query because of replicate-*-table rules"
ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert"
+ jpn "replicate-*-table ルールã«å¾“ã£ã¦ã€ã‚¹ãƒ¬ãƒ¼ãƒ–SQLスレッドã¯ã‚¯ã‚¨ãƒªã‚’無視ã—ã¾ã—ãŸã€‚"
nla "Slave SQL thread negeerde de query vanwege replicate-*-table opties"
por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela"
spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla"
@@ -4642,12 +4655,14 @@ ER_INCORRECT_GLOBAL_LOCAL_VAR
eng "Variable '%-.192s' is a %s variable"
serbian "Promenljiva '%-.192s' je %s promenljiva"
ger "Variable '%-.192s' ist eine %s-Variable"
+ jpn "変数 '%-.192s' 㯠%s 変数ã§ã™ã€‚"
nla "Variabele '%-.192s' is geen %s variabele"
spa "Variable '%-.192s' es una %s variable"
swe "Variabel '%-.192s' är av typ %s"
ER_WRONG_FK_DEF 42000
eng "Incorrect foreign key definition for '%-.192s': %s"
ger "Falsche Fremdschlüssel-Definition für '%-.192s': %s"
+ jpn "外部キー '%-.192s' ã®å®šç¾©ã®ä¸æ­£: %s"
nla "Incorrecte foreign key definitie voor '%-.192s': %s"
por "Definição errada da chave estrangeira para '%-.192s': %s"
spa "Equivocada definición de llave extranjera para '%-.192s': %s"
@@ -4655,6 +4670,7 @@ ER_WRONG_FK_DEF 42000
ER_KEY_REF_DO_NOT_MATCH_TABLE_REF
eng "Key reference and table reference don't match"
ger "Schlüssel- und Tabellenverweis passen nicht zusammen"
+ jpn "外部キーã®å‚照表ã¨å®šç¾©ãŒä¸€è‡´ã—ã¾ã›ã‚“。"
nla "Sleutel- en tabelreferentie komen niet overeen"
por "Referência da chave e referência da tabela não coincidem"
spa "Referencia de llave y referencia de tabla no coinciden"
@@ -4662,6 +4678,7 @@ ER_KEY_REF_DO_NOT_MATCH_TABLE_REF
ER_OPERAND_COLUMNS 21000
eng "Operand should contain %d column(s)"
ger "Operand sollte %d Spalte(n) enthalten"
+ jpn "オペランド㫠%d 個ã®åˆ—ãŒå¿…è¦ã§ã™ã€‚"
nla "Operand behoort %d kolommen te bevatten"
rus "Операнд должен Ñодержать %d колонок"
spa "Operando debe tener %d columna(s)"
@@ -4669,6 +4686,7 @@ ER_OPERAND_COLUMNS 21000
ER_SUBQUERY_NO_1_ROW 21000
eng "Subquery returns more than 1 row"
ger "Unterabfrage lieferte mehr als einen Datensatz zurück"
+ jpn "サブクエリãŒ2行以上ã®çµæžœã‚’è¿”ã—ã¾ã™ã€‚"
nla "Subquery retourneert meer dan 1 rij"
por "Subconsulta retorna mais que 1 registro"
rus "ÐŸÐ¾Ð´Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð²Ð¾Ð·Ð²Ñ€Ð°Ñ‰Ð°ÐµÑ‚ более одной запиÑи"
@@ -4679,6 +4697,7 @@ ER_UNKNOWN_STMT_HANDLER
dan "Unknown prepared statement handler (%.*s) given to %s"
eng "Unknown prepared statement handler (%.*s) given to %s"
ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben"
+ jpn "'%.*s' ã¯ãƒ—リペアードステートメントã®ä¸æ˜Žãªãƒãƒ³ãƒ‰ãƒ«ã§ã™ã€‚(%s ã§æŒ‡å®šã•ã‚Œã¾ã—ãŸ)"
nla "Onebekende prepared statement handler (%.*s) voor %s aangegeven"
por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s"
spa "Desconocido preparado comando handler (%.*s) dado para %s"
@@ -4687,6 +4706,7 @@ ER_UNKNOWN_STMT_HANDLER
ER_CORRUPT_HELP_DB
eng "Help database is corrupt or does not exist"
ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht"
+ jpn "ヘルプデータベースã¯å£Šã‚Œã¦ã„ã‚‹ã‹å­˜åœ¨ã—ã¾ã›ã‚“。"
nla "Help database is beschadigd of bestaat niet"
por "Banco de dado de ajuda corrupto ou não existente"
spa "Base de datos Help está corrupto o no existe"
@@ -4694,6 +4714,7 @@ ER_CORRUPT_HELP_DB
ER_CYCLIC_REFERENCE
eng "Cyclic reference on subqueries"
ger "Zyklischer Verweis in Unterabfragen"
+ jpn "サブクエリã®å‚ç…§ãŒãƒ«ãƒ¼ãƒ—ã—ã¦ã„ã¾ã™ã€‚"
nla "Cyclische verwijzing in subqueries"
por "Referência cíclica em subconsultas"
rus "ЦикличеÑÐºÐ°Ñ ÑÑылка на подзапроÑ"
@@ -4703,6 +4724,7 @@ ER_CYCLIC_REFERENCE
ER_AUTO_CONVERT
eng "Converting column '%s' from %s to %s"
ger "Feld '%s' wird von %s nach %s umgewandelt"
+ jpn "列 '%s' ã‚’ %s ã‹ã‚‰ %s ã¸å¤‰æ›ã—ã¾ã™ã€‚"
nla "Veld '%s' wordt van %s naar %s geconverteerd"
por "Convertendo coluna '%s' de %s para %s"
rus "Преобразование Ð¿Ð¾Ð»Ñ '%s' из %s в %s"
@@ -4712,6 +4734,7 @@ ER_AUTO_CONVERT
ER_ILLEGAL_REFERENCE 42S22
eng "Reference '%-.64s' not supported (%s)"
ger "Verweis '%-.64s' wird nicht unterstützt (%s)"
+ jpn "'%-.64s' ã®å‚ç…§ã¯ã§ãã¾ã›ã‚“。(%s)"
nla "Verwijzing '%-.64s' niet ondersteund (%s)"
por "Referência '%-.64s' não suportada (%s)"
rus "СÑылка '%-.64s' не поддерживаетÑÑ (%s)"
@@ -4721,6 +4744,7 @@ ER_ILLEGAL_REFERENCE 42S22
ER_DERIVED_MUST_HAVE_ALIAS 42000
eng "Every derived table must have its own alias"
ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden"
+ jpn "導出表ã«ã¯åˆ¥åãŒå¿…é ˆã§ã™ã€‚"
nla "Voor elke afgeleide tabel moet een unieke alias worden gebruikt"
por "Cada tabela derivada deve ter seu próprio alias"
spa "Cada tabla derivada debe tener su propio alias"
@@ -4728,6 +4752,7 @@ ER_DERIVED_MUST_HAVE_ALIAS 42000
ER_SELECT_REDUCED 01000
eng "Select %u was reduced during optimization"
ger "Select %u wurde während der Optimierung reduziert"
+ jpn "Select %u ã¯æœ€é©åŒ–ã«ã‚ˆã£ã¦æ¸›ã‚‰ã•ã‚Œã¾ã—ãŸã€‚"
nla "Select %u werd geredureerd tijdens optimtalisatie"
por "Select %u foi reduzido durante otimização"
rus "Select %u был упразднен в процеÑÑе оптимизации"
@@ -4737,6 +4762,7 @@ ER_SELECT_REDUCED 01000
ER_TABLENAME_NOT_ALLOWED_HERE 42000
eng "Table '%-.192s' from one of the SELECTs cannot be used in %-.32s"
ger "Tabelle '%-.192s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden"
+ jpn "特定ã®SELECTã®ã¿ã§ä½¿ç”¨ã®è¡¨ '%-.192s' 㯠%-.32s ã§ã¯ä½¿ç”¨ã§ãã¾ã›ã‚“。"
nla "Tabel '%-.192s' uit een van de SELECTS kan niet in %-.32s gebruikt worden"
por "Tabela '%-.192s' de um dos SELECTs não pode ser usada em %-.32s"
spa "Tabla '%-.192s' de uno de los SELECT no puede ser usada en %-.32s"
@@ -4744,6 +4770,7 @@ ER_TABLENAME_NOT_ALLOWED_HERE 42000
ER_NOT_SUPPORTED_AUTH_MODE 08004
eng "Client does not support authentication protocol requested by server; consider upgrading MariaDB client"
ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MariaDB-Client"
+ jpn "クライアントã¯ã‚µãƒ¼ãƒãƒ¼ãŒè¦æ±‚ã™ã‚‹èªè¨¼ãƒ—ロトコルã«å¯¾å¿œã§ãã¾ã›ã‚“。MariaDBクライアントã®ã‚¢ãƒƒãƒ—グレードを検討ã—ã¦ãã ã•ã„。"
nla "Client ondersteunt het door de server verwachtte authenticatieprotocol niet. Overweeg een nieuwere MariaDB client te gebruiken"
por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MariaDB"
spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MariaDB"
@@ -4751,6 +4778,7 @@ ER_NOT_SUPPORTED_AUTH_MODE 08004
ER_SPATIAL_CANT_HAVE_NULL 42000
eng "All parts of a SPATIAL index must be NOT NULL"
ger "Alle Teile eines SPATIAL-Index müssen als NOT NULL deklariert sein"
+ jpn "空間索引ã®ã‚­ãƒ¼åˆ—㯠NOT NULL ã§ãªã‘ã‚Œã°ã„ã‘ã¾ã›ã‚“。"
nla "Alle delete van een SPATIAL index dienen als NOT NULL gedeclareerd te worden"
por "Todas as partes de uma SPATIAL index devem ser NOT NULL"
spa "Todas las partes de una SPATIAL index deben ser NOT NULL"
@@ -4758,6 +4786,7 @@ ER_SPATIAL_CANT_HAVE_NULL 42000
ER_COLLATION_CHARSET_MISMATCH 42000
eng "COLLATION '%s' is not valid for CHARACTER SET '%s'"
ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig"
+ jpn "COLLATION '%s' 㯠CHARACTER SET '%s' ã«é©ç”¨ã§ãã¾ã›ã‚“。"
nla "COLLATION '%s' is niet geldig voor CHARACTER SET '%s'"
por "COLLATION '%s' não é válida para CHARACTER SET '%s'"
spa "COLLATION '%s' no es válido para CHARACTER SET '%s'"
@@ -4765,6 +4794,7 @@ ER_COLLATION_CHARSET_MISMATCH 42000
ER_SLAVE_WAS_RUNNING
eng "Slave is already running"
ger "Slave läuft bereits"
+ jpn "スレーブã¯ã™ã§ã«ç¨¼åƒä¸­ã§ã™ã€‚"
nla "Slave is reeds actief"
por "O slave já está rodando"
spa "Slave ya está funcionando"
@@ -4772,6 +4802,7 @@ ER_SLAVE_WAS_RUNNING
ER_SLAVE_WAS_NOT_RUNNING
eng "Slave already has been stopped"
ger "Slave wurde bereits angehalten"
+ jpn "スレーブã¯ã™ã§ã«åœæ­¢ã—ã¦ã„ã¾ã™ã€‚"
nla "Slave is reeds gestopt"
por "O slave já está parado"
spa "Slave ya fué parado"
@@ -4779,24 +4810,28 @@ ER_SLAVE_WAS_NOT_RUNNING
ER_TOO_BIG_FOR_UNCOMPRESS
eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)"
ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
+ jpn "展開後ã®ãƒ‡ãƒ¼ã‚¿ãŒå¤§ãã™ãŽã¾ã™ã€‚最大サイズ㯠%d ã§ã™ã€‚(展開後データã®é•·ã•æƒ…å ±ãŒå£Šã‚Œã¦ã„ã‚‹å¯èƒ½æ€§ã‚‚ã‚ã‚Šã¾ã™ã€‚)"
nla "Ongecomprimeerder data is te groot; de maximum lengte is %d (waarschijnlijk, de lengte van de gecomprimeerde data was beschadigd)"
por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)"
ER_ZLIB_Z_MEM_ERROR
eng "ZLIB: Not enough memory"
ger "ZLIB: Nicht genug Speicher"
+ jpn "ZLIB: メモリä¸è¶³ã§ã™ã€‚"
nla "ZLIB: Onvoldoende geheugen"
por "ZLIB: Não suficiente memória disponível"
spa "Z_MEM_ERROR: No suficiente memoria para zlib"
ER_ZLIB_Z_BUF_ERROR
eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)"
ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)"
+ jpn "ZLIB: 出力ãƒãƒƒãƒ•ã‚¡ã«å分ãªç©ºããŒã‚ã‚Šã¾ã›ã‚“。(展開後データã®é•·ã•æƒ…å ±ãŒå£Šã‚Œã¦ã„ã‚‹å¯èƒ½æ€§ã‚‚ã‚ã‚Šã¾ã™ã€‚)"
nla "ZLIB: Onvoldoende ruimte in uitgaande buffer (waarschijnlijk, de lengte van de ongecomprimeerde data was beschadigd)"
por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)"
spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)"
ER_ZLIB_Z_DATA_ERROR
eng "ZLIB: Input data corrupted"
ger "ZLIB: Eingabedaten beschädigt"
+ jpn "ZLIB: 入力データãŒå£Šã‚Œã¦ã„ã¾ã™ã€‚"
nla "ZLIB: Invoer data beschadigd"
por "ZLIB: Dados de entrada está corrupto"
spa "ZLIB: Dato de entrada fué corrompido para zlib"
@@ -4805,18 +4840,21 @@ ER_CUT_VALUE_GROUP_CONCAT
ER_WARN_TOO_FEW_RECORDS 01000
eng "Row %lu doesn't contain data for all columns"
ger "Zeile %lu enthält nicht für alle Felder Daten"
+ jpn "è¡Œ %lu ã¯ã™ã¹ã¦ã®åˆ—ã¸ã®ãƒ‡ãƒ¼ã‚¿ã‚’å«ã‚“ã§ã„ã¾ã›ã‚“。"
nla "Rij %lu bevat niet de data voor alle kolommen"
por "Conta de registro é menor que a conta de coluna na linha %lu"
spa "Línea %lu no contiene datos para todas las columnas"
ER_WARN_TOO_MANY_RECORDS 01000
eng "Row %lu was truncated; it contained more data than there were input columns"
ger "Zeile %lu gekürzt, die Zeile enthielt mehr Daten, als es Eingabefelder gibt"
+ jpn "è¡Œ %lu ã¯ãƒ‡ãƒ¼ã‚¿ã‚’切りæ¨ã¦ã‚‰ã‚Œã¾ã—ãŸã€‚列よりも多ã„データをå«ã‚“ã§ã„ã¾ã—ãŸã€‚"
nla "Regel %lu ingekort, bevatte meer data dan invoer kolommen"
por "Conta de registro é maior que a conta de coluna na linha %lu"
spa "Línea %lu fué truncada; La misma contine mas datos que las que existen en las columnas de entrada"
ER_WARN_NULL_TO_NOTNULL 22004
eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %lu"
ger "Feld auf Vorgabewert gesetzt, da NULL für NOT-NULL-Feld '%s' in Zeile %lu angegeben"
+ jpn "列ã«ãƒ‡ãƒ•ã‚©ãƒ«ãƒˆå€¤ãŒè¨­å®šã•ã‚Œã¾ã—ãŸã€‚NOT NULLã®åˆ— '%s' ã« è¡Œ %lu 㧠NULL ãŒä¸Žãˆã‚‰ã‚Œã¾ã—ãŸã€‚"
por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %lu"
spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %lu"
ER_WARN_DATA_OUT_OF_RANGE 22003
@@ -4824,17 +4862,20 @@ ER_WARN_DATA_OUT_OF_RANGE 22003
WARN_DATA_TRUNCATED 01000
eng "Data truncated for column '%s' at row %lu"
ger "Daten abgeschnitten für Feld '%s' in Zeile %lu"
+ jpn "列 '%s' ã® è¡Œ %lu ã§ãƒ‡ãƒ¼ã‚¿ãŒåˆ‡ã‚Šæ¨ã¦ã‚‰ã‚Œã¾ã—ãŸã€‚"
por "Dado truncado para coluna '%s' na linha %lu"
spa "Datos truncados para columna '%s' en la línea %lu"
ER_WARN_USING_OTHER_HANDLER
eng "Using storage engine %s for table '%s'"
ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt"
+ jpn "ストレージエンジン %s ãŒè¡¨ '%s' ã«åˆ©ç”¨ã•ã‚Œã¦ã„ã¾ã™ã€‚"
por "Usando engine de armazenamento %s para tabela '%s'"
spa "Usando motor de almacenamiento %s para tabla '%s'"
swe "Använder handler %s för tabell '%s'"
ER_CANT_AGGREGATE_2COLLATIONS
eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'"
ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s) und (%s, %s) für Operation '%s'"
+ jpn "ç…§åˆé †åº (%s,%s) 㨠(%s,%s) ã®æ··åœ¨ã¯æ“作 '%s' ã§ã¯ä¸æ­£ã§ã™ã€‚"
por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'"
spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'"
ER_DROP_USER
@@ -4843,42 +4884,50 @@ ER_DROP_USER
ER_REVOKE_GRANTS
eng "Can't revoke all privileges for one or more of the requested users"
ger "Kann nicht alle Berechtigungen widerrufen, die für einen oder mehrere Benutzer gewährt wurden"
+ jpn "指定ã•ã‚ŒãŸãƒ¦ãƒ¼ã‚¶ãƒ¼ã‹ã‚‰æŒ‡å®šã•ã‚ŒãŸå…¨ã¦ã®æ¨©é™ã‚’剥奪ã™ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“ã§ã—ãŸã€‚"
por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos"
spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados"
ER_CANT_AGGREGATE_3COLLATIONS
eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'"
ger "Unerlaubte Mischung von Sortierreihenfolgen (%s, %s), (%s, %s), (%s, %s) für Operation '%s'"
+ jpn "ç…§åˆé †åº (%s,%s), (%s,%s), (%s,%s) ã®æ··åœ¨ã¯æ“作 '%s' ã§ã¯ä¸æ­£ã§ã™ã€‚"
por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'"
spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'"
ER_CANT_AGGREGATE_NCOLLATIONS
eng "Illegal mix of collations for operation '%s'"
ger "Unerlaubte Mischung von Sortierreihenfolgen für Operation '%s'"
+ jpn "æ“作 '%s' ã§ã¯ä¸æ­£ãªç…§åˆé †åºã®æ··åœ¨ã§ã™ã€‚"
por "Ilegal combinação de collations para operação '%s'"
spa "Ilegal mezcla de collations para operación '%s'"
ER_VARIABLE_IS_NOT_STRUCT
eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)"
ger "Variable '%-.64s' ist keine Variablen-Komponente (kann nicht als XXXX.variablen_name verwendet werden)"
+ jpn "変数 '%-.64s' ã¯æ§‹é€ å¤‰æ•°ã®æ§‹æˆè¦ç´ ã§ã¯ã‚ã‚Šã¾ã›ã‚“。(XXXX.変数å ã¨ã„ã†æŒ‡å®šã¯ã§ãã¾ã›ã‚“。)"
por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)"
spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)"
ER_UNKNOWN_COLLATION
eng "Unknown collation: '%-.64s'"
ger "Unbekannte Sortierreihenfolge: '%-.64s'"
+ jpn "ä¸æ˜Žãªç…§åˆé †åº: '%-.64s'"
por "Collation desconhecida: '%-.64s'"
spa "Collation desconocida: '%-.64s'"
ER_SLAVE_IGNORED_SSL_PARAMS
eng "SSL parameters in CHANGE MASTER are ignored because this MariaDB slave was compiled without SSL support; they can be used later if MariaDB slave with SSL is started"
ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MariaDB-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn ein MariaDB-Slave mit SSL gestartet wird"
+ jpn "ã“ã®MySQLスレーブã¯SSLサãƒãƒ¼ãƒˆã‚’å«ã‚ã¦ã‚³ãƒ³ãƒ‘イルã•ã‚Œã¦ã„ãªã„ã®ã§ã€CHANGE MASTER ã®SSLパラメータã¯ç„¡è¦–ã•ã‚Œã¾ã—ãŸã€‚今後SSLサãƒãƒ¼ãƒˆã‚’æŒã¤MySQLスレーブを起動ã™ã‚‹éš›ã«åˆ©ç”¨ã•ã‚Œã¾ã™ã€‚"
por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MariaDB foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MariaDB com SSL seja iniciado."
spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MariaDB fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MariaDB con SSL sea inicializado"
ER_SERVER_IS_IN_SECURE_AUTH_MODE
eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format"
ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern"
+ jpn "サーãƒãƒ¼ã¯ --secure-auth モードã§ç¨¼åƒã—ã¦ã„ã¾ã™ã€‚ã—ã‹ã— '%s'@'%s' ã¯å¤ã„å½¢å¼ã®ãƒ‘スワードを使用ã—ã¦ã„ã¾ã™ã€‚æ–°ã—ã„å½¢å¼ã®ãƒ‘スワードã«å¤‰æ›´ã—ã¦ãã ã•ã„。"
por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato"
rus "Сервер запущен в режиме --secure-auth (безопаÑной авторизации), но Ð´Ð»Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»Ñ '%s'@'%s' пароль Ñохранён в Ñтаром формате; необходимо обновить формат паролÑ"
spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato"
ER_WARN_FIELD_RESOLVED
eng "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d"
ger "Feld oder Verweis '%-.192s%s%-.192s%s%-.192s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst"
+ jpn "フィールドã¾ãŸã¯å‚ç…§ '%-.192s%s%-.192s%s%-.192s' 㯠SELECT #%d ã§ã¯ãªãã€SELECT #%d ã§è§£æ±ºã•ã‚Œã¾ã—ãŸã€‚"
por "Campo ou referência '%-.192s%s%-.192s%s%-.192s' de SELECT #%d foi resolvido em SELECT #%d"
rus "Поле или ÑÑылка '%-.192s%s%-.192s%s%-.192s' из SELECTа #%d была найдена в SELECTе #%d"
spa "Campo o referencia '%-.192s%s%-.192s%s%-.192s' de SELECT #%d fue resolvido en SELECT #%d"
@@ -4886,27 +4935,32 @@ ER_WARN_FIELD_RESOLVED
ER_BAD_SLAVE_UNTIL_COND
eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL"
ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL"
+ jpn "START SLAVE UNTIL ã¸ã®ãƒ‘ラメータã¾ãŸã¯ãã®çµ„ã¿åˆã‚ã›ãŒä¸æ­£ã§ã™ã€‚"
por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL"
spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL"
ER_MISSING_SKIP_SLAVE
eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart"
ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn ein Slave-Server unerwartet neu startet"
+ jpn "START SLAVE UNTIL ã§æ®µéšŽçš„ã«ãƒ¬ãƒ—リケーションを行ã†éš›ã«ã¯ã€--skip-slave-start オプションを使ã†ã“ã¨ã‚’推奨ã—ã¾ã™ã€‚使ã‚ãªã„å ´åˆã€ã‚¹ãƒ¬ãƒ¼ãƒ–ã®mysqldãŒä¸æ…®ã®å†èµ·å‹•ã‚’ã™ã‚‹ã¨å•é¡ŒãŒç™ºç”Ÿã—ã¾ã™ã€‚"
por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo"
spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave"
ER_UNTIL_COND_IGNORED
eng "SQL thread is not to be started so UNTIL options are ignored"
ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert"
+ jpn "スレーブSQLスレッドãŒé–‹å§‹ã•ã‚Œãªã„ãŸã‚ã€UNTILオプションã¯ç„¡è¦–ã•ã‚Œã¾ã—ãŸã€‚"
por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas"
spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas"
ER_WRONG_NAME_FOR_INDEX 42000
eng "Incorrect index name '%-.100s'"
ger "Falscher Indexname '%-.100s'"
+ jpn "索引å '%-.100s' ã¯ä¸æ­£ã§ã™ã€‚"
por "Incorreto nome de índice '%-.100s'"
spa "Nombre de índice incorrecto '%-.100s'"
swe "Felaktigt index namn '%-.100s'"
ER_WRONG_NAME_FOR_CATALOG 42000
eng "Incorrect catalog name '%-.100s'"
ger "Falscher Katalogname '%-.100s'"
+ jpn "カタログå '%-.100s' ã¯ä¸æ­£ã§ã™ã€‚"
por "Incorreto nome de catálogo '%-.100s'"
spa "Nombre de catalog incorrecto '%-.100s'"
swe "Felaktigt katalog namn '%-.100s'"
@@ -4921,33 +4975,39 @@ ER_WARN_QC_RESIZE
ER_BAD_FT_COLUMN
eng "Column '%-.192s' cannot be part of FULLTEXT index"
ger "Feld '%-.192s' kann nicht Teil eines FULLTEXT-Index sein"
+ jpn "列 '%-.192s' ã¯å…¨æ–‡ç´¢å¼•ã®ã‚­ãƒ¼ã«ã¯ã§ãã¾ã›ã‚“。"
por "Coluna '%-.192s' não pode ser parte de índice FULLTEXT"
spa "Columna '%-.192s' no puede ser parte de FULLTEXT index"
swe "Kolumn '%-.192s' kan inte vara del av ett FULLTEXT index"
ER_UNKNOWN_KEY_CACHE
eng "Unknown key cache '%-.100s'"
ger "Unbekannter Schlüssel-Cache '%-.100s'"
+ jpn "'%-.100s' ã¯ä¸æ˜Žãªã‚­ãƒ¼ã‚­ãƒ£ãƒƒã‚·ãƒ¥ã§ã™ã€‚"
por "Key cache desconhecida '%-.100s'"
spa "Desconocida key cache '%-.100s'"
swe "Okänd nyckel cache '%-.100s'"
ER_WARN_HOSTNAME_WONT_WORK
eng "MariaDB is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work"
ger "MariaDB wurde mit --skip-name-resolve gestartet. Diese Option darf nicht verwendet werden, damit diese Rechtevergabe möglich ist"
+ jpn "MariaDB㯠--skip-name-resolve モードã§èµ·å‹•ã—ã¦ã„ã¾ã™ã€‚ã“ã®ã‚ªãƒ—ションを外ã—ã¦å†èµ·å‹•ã—ãªã‘ã‚Œã°ã€ã“ã®æ¨©é™æ“作ã¯æ©Ÿèƒ½ã—ã¾ã›ã‚“。"
por "MariaDB foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar"
spa "MariaDB esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar"
ER_UNKNOWN_STORAGE_ENGINE 42000
eng "Unknown storage engine '%s'"
ger "Unbekannte Speicher-Engine '%s'"
+ jpn "'%s' ã¯ä¸æ˜Žãªã‚¹ãƒˆãƒ¬ãƒ¼ã‚¸ã‚¨ãƒ³ã‚¸ãƒ³ã§ã™ã€‚"
por "Motor de tabela desconhecido '%s'"
spa "Desconocido motor de tabla '%s'"
ER_WARN_DEPRECATED_SYNTAX
eng "'%s' is deprecated and will be removed in a future release. Please use %s instead"
ger "'%s' ist veraltet. Bitte benutzen Sie '%s'"
+ jpn "'%s' ã¯å°†æ¥ã®ãƒªãƒªãƒ¼ã‚¹ã§å»ƒæ­¢äºˆå®šã§ã™ã€‚代ã‚ã‚Šã« %s を使用ã—ã¦ãã ã•ã„。"
por "'%s' é desatualizado. Use '%s' em seu lugar"
spa "'%s' está desaprobado, use '%s' en su lugar"
ER_NON_UPDATABLE_TABLE
eng "The target table %-.100s of the %s is not updatable"
ger "Die Zieltabelle %-.100s von %s ist nicht aktualisierbar"
+ jpn "対象表 %-.100s ã¯æ›´æ–°å¯èƒ½ã§ã¯ãªã„ã®ã§ã€%s ã‚’è¡Œãˆã¾ã›ã‚“。"
por "A tabela destino %-.100s do %s não é atualizável"
rus "Таблица %-.100s в %s не может изменÑÑ‚ÑÑ"
spa "La tabla destino %-.100s del %s no es actualizable"
@@ -4956,33 +5016,39 @@ ER_NON_UPDATABLE_TABLE
ER_FEATURE_DISABLED
eng "The '%s' feature is disabled; you need MariaDB built with '%s' to have it working"
ger "Das Feature '%s' ist ausgeschaltet, Sie müssen MariaDB mit '%s' übersetzen, damit es verfügbar ist"
+ jpn "機能 '%s' ã¯ç„¡åŠ¹ã§ã™ã€‚利用ã™ã‚‹ãŸã‚ã«ã¯ '%s' ã‚’å«ã‚ã¦ãƒ“ルドã—ãŸMariaDBãŒå¿…è¦ã§ã™ã€‚"
por "O recurso '%s' foi desativado; você necessita MariaDB construído com '%s' para ter isto funcionando"
spa "El recurso '%s' fue deshabilitado; usted necesita construir MariaDB con '%s' para tener eso funcionando"
swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MariaDB med '%s' definierad"
ER_OPTION_PREVENTS_STATEMENT
eng "The MariaDB server is running with the %s option so it cannot execute this statement"
ger "Der MariaDB-Server läuft mit der Option %s und kann diese Anweisung deswegen nicht ausführen"
+ jpn "MariaDBサーãƒãƒ¼ãŒ %s オプションã§å®Ÿè¡Œã•ã‚Œã¦ã„ã‚‹ã®ã§ã€ã“ã®ã‚¹ãƒ†ãƒ¼ãƒˆãƒ¡ãƒ³ãƒˆã¯å®Ÿè¡Œã§ãã¾ã›ã‚“。"
por "O servidor MariaDB está rodando com a opção %s razão pela qual não pode executar esse commando"
spa "El servidor MariaDB está rodando con la opción %s tal que no puede ejecutar este comando"
swe "MariaDB är startad med %s. Pga av detta kan du inte använda detta kommando"
ER_DUPLICATED_VALUE_IN_TYPE
eng "Column '%-.100s' has duplicated value '%-.64s' in %s"
ger "Feld '%-.100s' hat doppelten Wert '%-.64s' in %s"
+ jpn "列 '%-.100s' ã§ã€é‡è¤‡ã™ã‚‹å€¤ '%-.64s' ㌠%s ã«æŒ‡å®šã•ã‚Œã¦ã„ã¾ã™ã€‚"
por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s"
spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s"
ER_TRUNCATED_WRONG_VALUE 22007
eng "Truncated incorrect %-.32s value: '%-.128s'"
ger "Falscher %-.32s-Wert gekürzt: '%-.128s'"
+ jpn "ä¸æ­£ãª %-.32s ã®å€¤ãŒåˆ‡ã‚Šæ¨ã¦ã‚‰ã‚Œã¾ã—ãŸã€‚: '%-.128s'"
por "Truncado errado %-.32s valor: '%-.128s'"
spa "Equivocado truncado %-.32s valor: '%-.128s'"
ER_TOO_MUCH_AUTO_TIMESTAMP_COLS
eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause"
ger "Fehlerhafte Tabellendefinition. Es kann nur eine einzige TIMESTAMP-Spalte mit CURRENT_TIMESTAMP als DEFAULT oder in einer ON-UPDATE-Klausel geben"
+ jpn "ä¸æ­£ãªè¡¨å®šç¾©ã§ã™ã€‚DEFAULTå¥ã¾ãŸã¯ON UPDATEå¥ã« CURRENT_TIMESTAMP ã‚’ã¨ã‚‚ãªã†TIMESTAMPåž‹ã®åˆ—ã¯1ã¤ã¾ã§ã§ã™ã€‚"
por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula"
spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula"
ER_INVALID_ON_UPDATE
eng "Invalid ON UPDATE clause for '%-.192s' column"
ger "Ungültige ON-UPDATE-Klausel für Spalte '%-.192s'"
+ jpn "列 '%-.192s' ã« ON UPDATEå¥ã¯ç„¡åŠ¹ã§ã™ã€‚"
por "Inválida cláusula ON UPDATE para campo '%-.192s'"
spa "Inválido ON UPDATE cláusula para campo '%-.192s'"
ER_UNSUPPORTED_PS
@@ -4992,11 +5058,13 @@ ER_GET_ERRMSG
dan "Modtog fejl %d '%-.100s' fra %s"
eng "Got error %d '%-.100s' from %s"
ger "Fehler %d '%-.100s' von %s"
+ jpn "エラー %d '%-.100s' ㌠%s ã‹ã‚‰è¿”ã•ã‚Œã¾ã—ãŸã€‚"
nor "Mottok feil %d '%-.100s' fa %s"
norwegian-ny "Mottok feil %d '%-.100s' fra %s"
ER_GET_TEMPORARY_ERRMSG
dan "Modtog temporary fejl %d '%-.100s' fra %s"
eng "Got temporary error %d '%-.100s' from %s"
+ jpn "一時エラー %d '%-.100s' ㌠%s ã‹ã‚‰è¿”ã•ã‚Œã¾ã—ãŸã€‚"
ger "Temporärer Fehler %d '%-.100s' von %s"
nor "Mottok temporary feil %d '%-.100s' fra %s"
norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s"
@@ -5462,6 +5530,7 @@ ER_TRG_IN_WRONG_SCHEMA
ER_STACK_OVERRUN_NEED_MORE
eng "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack."
ger "Thread-Stack-Überlauf: %ld Bytes eines %ld-Byte-Stacks in Verwendung, und %ld Bytes benötigt. Verwenden Sie 'mysqld --thread_stack=#', um einen größeren Stack anzugeben"
+ jpn "スレッドスタックä¸è¶³ã§ã™(使用: %ld ; サイズ: %ld ; è¦æ±‚: %ld)。より大ãã„値㧠'mysqld --thread_stack=#' ã®æŒ‡å®šã‚’ã—ã¦ãã ã•ã„。"
ER_TOO_LONG_BODY 42000 S1009
eng "Routine body for '%-.100s' is too long"
ger "Routinen-Body für '%-.100s' ist zu lang"
@@ -5567,6 +5636,7 @@ ER_WRONG_STRING_LENGTH
ER_NON_INSERTABLE_TABLE
eng "The target table %-.100s of the %s is not insertable-into"
ger "Die Zieltabelle %-.100s von %s ist nicht einfügbar"
+ jpn "対象表 %-.100s ã¯æŒ¿å…¥å¯èƒ½ã§ã¯ãªã„ã®ã§ã€%s ã‚’è¡Œãˆã¾ã›ã‚“。"
ER_ADMIN_WRONG_MRG_TABLE
eng "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist"
ger "Tabelle '%-.64s' ist unterschiedlich definiert, nicht vom Typ MyISAM oder existiert nicht"
@@ -5836,12 +5906,10 @@ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT
ger "Kein DATETIME-Ausdruck angegeben"
ER_UNUSED_2
- eng "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted"
- ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt"
+ eng ""
ER_UNUSED_3
- eng "Cannot load from mysql.%s. The table is probably corrupted"
- ger "Kann mysql.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt"
+ eng ""
ER_EVENT_CANNOT_DELETE
eng "Failed to delete the event from mysql.event"
ger "Löschen des Events aus mysql.event fehlgeschlagen"
@@ -5869,8 +5937,7 @@ ER_CANT_LOCK_LOG_TABLE
eng "You can't use locks with log tables."
ger "Log-Tabellen können nicht gesperrt werden."
ER_UNUSED_4
- eng "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry"
- ger "Aufrechterhalten der Fremdschlüssel-Beschränkungen für Tabelle '%.192s', Eintrag '%-.192s', Schlüssel %d würde zu einem doppelten Eintrag führen"
+ eng ""
ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE
eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MariaDB %d, now running %d. Please use mysql_upgrade to fix this error."
ger "Spaltenanzahl von mysql.%s falsch. %d erwartet, aber %d erhalten. Erzeugt mit MariaDB %d, jetzt unter %d. Bitte benutzen Sie mysql_upgrade, um den Fehler zu beheben"
@@ -5963,29 +6030,28 @@ ER_NATIVE_FCT_NAME_COLLISION
# When using this error message, use the ER_DUP_ENTRY error code. See, for
# example, code in handler.cc.
ER_DUP_ENTRY_WITH_KEY_NAME 23000 S1009
- cze "Zvojen-Bý klÃ­Ä '%-.64s' (Äíslo klíÄe '%-.192s')"
+ cze "Zvojený klÃ­Ä '%-.64s' (Äíslo klíÄe '%-.192s')"
dan "Ens værdier '%-.64s' for indeks '%-.192s'"
nla "Dubbele ingang '%-.64s' voor zoeksleutel '%-.192s'"
eng "Duplicate entry '%-.64s' for key '%-.192s'"
- jps "'%-.64s' 㯠key '%-.192s' ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™",
est "Kattuv väärtus '%-.64s' võtmele '%-.192s'"
fre "Duplicata du champ '%-.64s' pour la clef '%-.192s'"
ger "Doppelter Eintrag '%-.64s' für Schlüssel '%-.192s'"
greek "Διπλή εγγÏαφή '%-.64s' για το κλειδί '%-.192s'"
hun "Duplikalt bejegyzes '%-.64s' a '%-.192s' kulcs szerint."
ita "Valore duplicato '%-.64s' per la chiave '%-.192s'"
- jpn "'%-.64s' 㯠key '%-.192s' ã«ãŠã„ã¦é‡è¤‡ã—ã¦ã„ã¾ã™"
+ jpn "'%-.64s' ã¯ç´¢å¼• '%-.192s' ã§é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚"
kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’ '%-.64s': key '%-.192s'"
nor "Like verdier '%-.64s' for nøkkel '%-.192s'"
norwegian-ny "Like verdiar '%-.64s' for nykkel '%-.192s'"
- pol "Powtórzone wyst?pienie '%-.64s' dla klucza '%-.192s'"
+ pol "Powtórzone wystąpienie '%-.64s' dla klucza '%-.192s'"
por "Entrada '%-.64s' duplicada para a chave '%-.192s'"
rum "Cimpul '%-.64s' e duplicat pentru cheia '%-.192s'"
rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ '%-.64s' по ключу '%-.192s'"
serbian "Dupliran unos '%-.64s' za kljuÄ '%-.192s'"
slo "Opakovaný kÄ¾ÃºÄ '%-.64s' (Äíslo kľúÄa '%-.192s')"
spa "Entrada duplicada '%-.64s' para la clave '%-.192s'"
- swe "Dubbel nyckel '%-.64s' för nyckel '%-.192s'"
+ swe "Dublett '%-.64s' för nyckel '%-.192s'"
ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ '%-.64s' Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° '%-.192s'"
ER_BINLOG_PURGE_EMFILE
eng "Too many files opened, please execute the command again"
@@ -6051,9 +6117,8 @@ ER_TRG_CANT_OPEN_TABLE
ER_CANT_CREATE_SROUTINE
eng "Cannot create stored routine `%-.64s`. Check warnings"
ger "Kann gespeicherte Routine `%-.64s` nicht erzeugen. Beachten Sie die Warnungen"
-ER_NEVER_USED
- eng "Ambiguous slave modes combination. %s"
- ger "Mehrdeutige Kombination von Slave-Modi. %s"
+ER_UNUSED
+ eng ""
ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT
eng "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement."
ger "Der BINLOG-Anweisung vom Typ `%s` ging keine BINLOG-Anweisung zur Formatbeschreibung voran."
@@ -6316,7 +6381,7 @@ ER_VALUES_IS_NOT_INT_TYPE_ERROR
swe "Värden i VALUES för partition '%-.64s' måste ha typen INT"
ER_ACCESS_DENIED_NO_PASSWORD_ERROR 28000
- cze "P-Břístup pro uživatele '%s'@'%s'"
+ cze "Přístup pro uživatele '%s'@'%s'"
dan "Adgang nægtet bruger: '%s'@'%s'"
nla "Toegang geweigerd voor gebruiker: '%s'@'%s'"
eng "Access denied for user '%s'@'%s'"
@@ -6409,7 +6474,6 @@ ER_PLUGIN_NO_UNINSTALL
ER_PLUGIN_NO_INSTALL
eng "Plugin '%s' is marked as not dynamically installable. You have to stop the server to install it."
-
ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT
eng "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave."
@@ -6432,15 +6496,10 @@ ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST
# End of 5.5 error messages.
#
-ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2
- eng "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted"
- ger "Spaltenanzahl von %s.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt"
-
ER_CANNOT_LOAD_FROM_TABLE_V2
eng "Cannot load from %s.%s. The table is probably corrupted"
ger "Kann %s.%s nicht einlesen. Tabelle ist wahrscheinlich beschädigt"
-
ER_MASTER_DELAY_VALUE_OUT_OF_RANGE
eng "The requested value %u for the master delay exceeds the maximum %u"
ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT
@@ -6495,25 +6554,9 @@ ER_PARTITION_CLAUSE_ON_NONPARTITIONED
ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET
eng "Found a row not matching the given partition set"
swe "Hittade en rad som inte passar i någon given partition"
-ER_NO_SUCH_PARTITION
- cze "partion '%-.64s' neexistuje"
- dan "partition '%-.64s' eksisterer ikke"
- nla "partition '%-.64s' bestaat niet"
- eng "partition '%-.64s' doesn't exist"
- est "partition '%-.64s' ei eksisteeri"
- fre "La partition '%-.64s' n'existe pas"
- ger "Die partition '%-.64s' existiert nicht"
- hun "A '%-.64s' partition nem letezik"
- ita "La tabella particione '%-.64s' non esiste"
- nor "Partition '%-.64s' doesn't exist"
- norwegian-ny "Partition '%-.64s' doesn't exist"
- pol "Partition '%-.64s' doesn't exist"
- por "Particion '%-.64s' n�o existe"
- rum "Partition '%-.64s' nu exista"
- serbian "Partition '%-.64s' ne postoji"
- slo "Partition '%-.64s' doesn't exist"
- spa "Particion '%-.64s' no existe"
- swe "Det finns ingen partition som heter '%-.64s'"
+
+ER_UNUSED_5
+ eng ""
ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE
eng "Failure while changing the type of replication repository: %s."
@@ -6555,18 +6598,13 @@ ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO 23000 S1009
eng "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in a child table"
ger "Fremdschlüssel-Beschränkung für Tabelle '%.192s', Datensatz '%-.192s' würde zu einem doppelten Eintrag in einer Kind-Tabelle führen"
swe "FOREIGN KEY constraint för tabell '%.192s', posten '%-.192s' kan inte uppdatera en barntabell på grund av UNIQUE-test"
+
ER_SQLTHREAD_WITH_SECURE_SLAVE
eng "Setting authentication options is not possible when only the Slave SQL Thread is being started."
ER_TABLE_HAS_NO_FT
eng "The table does not have FULLTEXT index to support this query"
-ER_INNODB_FT_LIMIT
- eng "InnoDB presently supports one FULLTEXT index per table"
-
-ER_INNODB_NO_FT_TEMP_TABLE
- eng "Cannot create FULLTEXT index on temporary InnoDB table"
-
ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER
eng "The system variable %.200s cannot be set in stored functions or triggers."
@@ -6604,13 +6642,13 @@ ER_BAD_SLAVE_AUTO_POSITION
eng "Parameters MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active."
ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON
- eng "CHANGE MASTER TO AUTO_POSITION = 1 can only be executed when GTID_MODE = ON."
+ eng "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 can only be executed when GTID_MODE = ON."
ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET
eng "Cannot execute statements with implicit commit inside a transaction when GTID_NEXT != AUTOMATIC or GTID_NEXT_LIST != NULL."
-ER_GTID_MODE_2_OR_3_REQUIRES_DISABLE_GTID_UNSAFE_STATEMENTS_ON
- eng "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires DISABLE_GTID_UNSAFE_STATEMENTS = 1."
+ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON
+ eng "GTID_MODE = ON or GTID_MODE = UPGRADE_STEP_2 requires ENFORCE_GTID_CONSISTENCY = 1."
ER_GTID_MODE_REQUIRES_BINLOG
eng "GTID_MODE = ON or UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates."
@@ -6628,13 +6666,13 @@ ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF
eng "Found a Gtid_log_event or Previous_gtids_log_event when GTID_MODE = OFF."
ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE
- eng "Updates to non-transactional tables are forbidden when DISABLE_GTID_UNSAFE_STATEMENTS = 1."
+ eng "When ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables."
ER_GTID_UNSAFE_CREATE_SELECT
- eng "CREATE TABLE ... SELECT is forbidden when DISABLE_GTID_UNSAFE_STATEMENTS = 1."
+ eng "CREATE TABLE ... SELECT is forbidden when ENFORCE_GTID_CONSISTENCY = 1."
ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION
- eng "When DISABLE_GTID_UNSAFE_STATEMENTS = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1."
+ eng "When ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1."
ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME
eng "The value of GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions."
@@ -6652,6 +6690,241 @@ ER_UNKNOWN_EXPLAIN_FORMAT
ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION 25006
eng "Cannot execute statement in a READ ONLY transaction."
+ER_TOO_LONG_TABLE_PARTITION_COMMENT
+ eng "Comment for table partition '%-.64s' is too long (max = %lu)"
+
+ER_SLAVE_CONFIGURATION
+ eng "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log."
+
+ER_INNODB_FT_LIMIT
+ eng "InnoDB presently supports one FULLTEXT index creation at a time"
+
+ER_INNODB_NO_FT_TEMP_TABLE
+ eng "Cannot create FULLTEXT index on temporary InnoDB table"
+
+ER_INNODB_FT_WRONG_DOCID_COLUMN
+ eng "Column '%-.192s' is of wrong type for an InnoDB FULLTEXT index"
+
+ER_INNODB_FT_WRONG_DOCID_INDEX
+ eng "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index"
+
+ER_INNODB_ONLINE_LOG_TOO_BIG
+ eng "Creating index '%-.192s' required more than 'innodb_online_alter_log_max_size' bytes of modification log. Please try again."
+
+ER_UNKNOWN_ALTER_ALGORITHM
+ eng "Unknown ALGORITHM '%s'"
+
+ER_UNKNOWN_ALTER_LOCK
+ eng "Unknown LOCK type '%s'"
+
+ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS
+ eng "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL."
+
+ER_MTS_RECOVERY_FAILURE
+ eng "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log."
+
+ER_MTS_RESET_WORKERS
+ eng "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log."
+
+ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2
+ eng "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted"
+ ger "Spaltenanzahl von %s.%s falsch. %d erwartet, aber %d gefunden. Tabelle ist wahrscheinlich beschädigt"
+
+ER_SLAVE_SILENT_RETRY_TRANSACTION
+ eng "Slave must silently retry current transaction"
+
+ER_DISCARD_FK_CHECKS_RUNNING
+ eng "There is a foreign key check running on table '%-.192s'. Cannot discard the table."
+
+ER_TABLE_SCHEMA_MISMATCH
+ eng "Schema mismatch (%s)"
+
+ER_TABLE_IN_SYSTEM_TABLESPACE
+ eng "Table '%-.192s' in system tablespace"
+
+ER_IO_READ_ERROR
+ eng "IO Read error: (%lu, %s) %s"
+
+ER_IO_WRITE_ERROR
+ eng "IO Write error: (%lu, %s) %s"
+
+ER_TABLESPACE_MISSING
+ eng "Tablespace is missing for table '%-.192s'"
+
+ER_TABLESPACE_EXISTS
+ eng "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT."
+
+ER_TABLESPACE_DISCARDED
+ eng "Tablespace has been discarded for table '%-.192s'"
+
+ER_INTERNAL_ERROR
+ eng "Internal error: '%-.192s'"
+
+ER_INNODB_IMPORT_ERROR
+ eng "ALTER TABLE '%-.192s' IMPORT TABLESPACE failed with error %lu : '%s'"
+
+ER_INNODB_INDEX_CORRUPT
+ eng "Index corrupt: %s"
+
+ER_INVALID_YEAR_COLUMN_LENGTH
+ eng "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead."
+ rus "Тип YEAR(%lu) более не поддерживаетÑÑ, вмеÑто него будет Ñоздана колонка Ñ Ñ‚Ð¸Ð¿Ð¾Ð¼ YEAR(4)."
+
+ER_NOT_VALID_PASSWORD
+ eng "Your password does not satisfy the current policy requirements"
+
+ER_MUST_CHANGE_PASSWORD
+ eng "You must SET PASSWORD before executing this statement"
+ bgn "ТрÑбва първо да Ñи Ñмените паролата ÑÑŠÑ SET PASSWORD за да можете да изпълните тази команда"
+
+ER_FK_NO_INDEX_CHILD
+ eng "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'"
+
+ER_FK_NO_INDEX_PARENT
+ eng "Failed to add the foreign key constaint. Missing index for constraint '%s' in the referenced table '%s'"
+
+ER_FK_FAIL_ADD_SYSTEM
+ eng "Failed to add the foreign key constraint '%s' to system tables"
+
+ER_FK_CANNOT_OPEN_PARENT
+ eng "Failed to open the referenced table '%s'"
+
+ER_FK_INCORRECT_OPTION
+ eng "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'"
+
+ER_FK_DUP_NAME
+ eng "Duplicate foreign key constraint name '%s'"
+
+ER_PASSWORD_FORMAT
+ eng "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function."
+
+ER_FK_COLUMN_CANNOT_DROP
+ eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'"
+ ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' benötigt"
+
+ER_FK_COLUMN_CANNOT_DROP_CHILD
+ eng "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table '%-.192s'"
+ ger "Kann Spalte '%-.192s' nicht löschen: wird für eine Fremdschlüsselbeschränkung '%-.192s' der Tabelle '%-.192s' benötigt"
+
+ER_FK_COLUMN_NOT_NULL
+ eng "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL"
+ ger "Spalte '%-.192s' kann nicht NOT NULL sein: wird für eine Fremdschlüsselbeschränkung '%-.192s' SET NULL benötigt"
+
+ER_DUP_INDEX
+ eng "Duplicate index '%-.64s' defined on the table '%-.64s.%-.64s'. This is deprecated and will be disallowed in a future release."
+
+ER_FK_COLUMN_CANNOT_CHANGE
+ eng "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'"
+
+ER_FK_COLUMN_CANNOT_CHANGE_CHILD
+ eng "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s' of table '%-.192s'"
+
+ER_FK_CANNOT_DELETE_PARENT
+ eng "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'"
+
+ER_MALFORMED_PACKET
+ eng "Malformed communication packet."
+
+ER_READ_ONLY_MODE
+ eng "Running in read-only mode"
+
+ER_GTID_NEXT_TYPE_UNDEFINED_GROUP
+ eng "When GTID_NEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET GTID_NEXT before a transaction and forgot to set GTID_NEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current GTID_NEXT is '%s'."
+
+ER_VARIABLE_NOT_SETTABLE_IN_SP
+ eng "The system variable %.200s cannot be set in stored procedures."
+
+ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF
+ eng "GTID_PURGED can only be set when GTID_MODE = ON."
+
+ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY
+ eng "GTID_PURGED can only be set when GTID_EXECUTED is empty."
+
+ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY
+ eng "GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients)."
+
+ER_GTID_PURGED_WAS_CHANGED
+ eng "GTID_PURGED was changed from '%s' to '%s'."
+
+ER_GTID_EXECUTED_WAS_CHANGED
+ eng "GTID_EXECUTED was changed from '%s' to '%s'."
+
+ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES
+ eng "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non replicated tables are written to."
+
+ER_ALTER_OPERATION_NOT_SUPPORTED 0A000
+ eng "%s is not supported for this operation. Try %s."
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON 0A000
+ eng "%s is not supported. Reason: %s. Try %s."
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY
+ eng "COPY algorithm requires a lock"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION
+ eng "Partition specific operations do not yet support LOCK/ALGORITHM"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME
+ eng "Columns participating in a foreign key are renamed"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE
+ eng "Cannot change column type INPLACE"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK
+ eng "Adding foreign keys needs foreign_key_checks=OFF"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE
+ eng "Creating unique indexes with IGNORE requires COPY algorithm to remove duplicate rows"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK
+ eng "Dropping a primary key is not allowed without also adding a new primary key"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC
+ eng "Adding an auto-increment column requires a lock"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS
+ eng "Cannot replace hidden FTS_DOC_ID with a user-visible one"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS
+ eng "Cannot drop or rename FTS_DOC_ID"
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS
+ eng "Fulltext index creation requires a lock"
+
+ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE
+ eng "sql_slave_skip_counter can not be set when the server is running with GTID_MODE = ON. Instead, for each transaction that you want to skip, generate an empty transaction with the same GTID as the transaction"
+
+ER_DUP_UNKNOWN_IN_INDEX 23000
+ cze "Zdvojený klÃ­Ä (Äíslo klíÄe '%-.192s')"
+ dan "Flere ens nøgler for indeks '%-.192s'"
+ nla "Dubbele ingang voor zoeksleutel '%-.192s'"
+ eng "Duplicate entry for key '%-.192s'"
+ est "Kattuv väärtus võtmele '%-.192s'"
+ fre "Duplicata du champ pour la clef '%-.192s'"
+ ger "Doppelter Eintrag für Schlüssel '%-.192s'"
+ greek "Διπλή εγγÏαφή για το κλειδί '%-.192s'"
+ hun "Duplikalt bejegyzes a '%-.192s' kulcs szerint."
+ ita "Valore duplicato per la chiave '%-.192s'"
+ jpn "ã¯ç´¢å¼• '%-.192s' ã§é‡è¤‡ã—ã¦ã„ã¾ã™ã€‚"
+ kor "ì¤‘ë³µëœ ìž…ë ¥ ê°’: key '%-.192s'"
+ nor "Like verdier for nøkkel '%-.192s'"
+ norwegian-ny "Like verdiar for nykkel '%-.192s'"
+ pol "Powtórzone wystąpienie dla klucza '%-.192s'"
+ por "Entrada duplicada para a chave '%-.192s'"
+ rum "Cimpul e duplicat pentru cheia '%-.192s'"
+ rus "ДублирующаÑÑÑ Ð·Ð°Ð¿Ð¸ÑÑŒ по ключу '%-.192s'"
+ serbian "Dupliran unos za kljuÄ '%-.192s'"
+ slo "Opakovaný kÄ¾ÃºÄ (Äíslo kľúÄa '%-.192s')"
+ spa "Entrada duplicada para la clave '%-.192s'"
+ swe "Dublett för nyckel '%-.192s'"
+ ukr "Дублюючий Ð·Ð°Ð¿Ð¸Ñ Ð´Ð»Ñ ÐºÐ»ÑŽÑ‡Ð° '%-.192s'"
+
+ER_IDENT_CAUSES_TOO_LONG_PATH
+ eng "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'."
+
+ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL
+ eng "cannot silently convert NULL values, as required in this SQL_MODE"
+
#
# MariaDB error messages section starts here
#
@@ -6691,12 +6964,12 @@ ER_UNKNOWN_OPTION
eng "Unknown option '%-.64s'"
ER_BAD_OPTION_VALUE
eng "Incorrect value '%-.64s' for option '%-.64s'"
-ER_NOT_USED_ERROR_MESSAGE
+ER_UNUSED_6
eng ""
-ER_NOT_USED_ERROR_MESSAGE2
+ER_UNUSED_7
+ eng ""
+ER_UNUSED_8
eng ""
-ER_CANT_DO_ONLINE
- eng "Can't execute the given '%s' command as online"
ER_DATA_OVERFLOW 22003
eng "Got overflow when converting '%-.128s' to %-.32s. Value truncated."
ER_DATA_TRUNCATED 22003
@@ -6721,7 +6994,7 @@ ER_VIEW_ORDERBY_IGNORED
eng "View '%-.192s'.'%-.192s' ORDER BY clause ignored because there is other ORDER BY clause already."
ER_CONNECTION_KILLED 70100
eng "Connection was killed"
-ER_INTERNAL_ERROR
+ER_UNSED
eng "Internal error: '%-.192s'"
ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SKIP_REPLICATION
eng "Cannot modify @@session.skip_replication inside a transaction"
diff --git a/sql/signal_handler.cc b/sql/signal_handler.cc
index edc33c4d63b..9437db6c318 100644
--- a/sql/signal_handler.cc
+++ b/sql/signal_handler.cc
@@ -190,7 +190,7 @@ extern "C" sig_handler handle_fatal_signal(int sig)
"Some pointers may be invalid and cause the dump to abort.\n");
my_safe_printf_stderr("Query (%p): ", thd->query());
- my_safe_print_str(thd->query(), min(65536U, thd->query_length()));
+ my_safe_print_str(thd->query(), MY_MIN(65536U, thd->query_length()));
my_safe_printf_stderr("\nConnection ID (thread ID): %lu\n",
(ulong) thd->thread_id);
my_safe_printf_stderr("Status: %s\n\n", kreason);
diff --git a/sql/slave.cc b/sql/slave.cc
index 55fe53345da..d46be570b5e 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -306,7 +306,8 @@ handle_slave_init(void *arg __attribute__((unused)))
sql_print_warning("Failed to load slave replication state from table "
"%s.%s: %u: %s", "mysql",
rpl_gtid_slave_state_table_name.str,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
mysql_mutex_lock(&LOCK_thread_count);
delete thd;
@@ -473,7 +474,7 @@ int init_recovery(Master_info* mi, const char** errmsg)
Relay_log_info *rli= &mi->rli;
if (rli->group_master_log_name[0])
{
- mi->master_log_pos= max(BIN_LOG_HEADER_SIZE,
+ mi->master_log_pos= MY_MAX(BIN_LOG_HEADER_SIZE,
rli->group_master_log_pos);
strmake_buf(mi->master_log_name, rli->group_master_log_name);
@@ -925,7 +926,8 @@ int start_slave_threads(bool need_slave_mutex, bool wait_for_start,
keep them in case connection with GTID fails and user wants to go
back and continue with previous old-style replication coordinates).
*/
- mi->master_log_pos = max(BIN_LOG_HEADER_SIZE, mi->rli.group_master_log_pos);
+ mi->master_log_pos = MY_MAX(BIN_LOG_HEADER_SIZE,
+ mi->rli.group_master_log_pos);
strmake(mi->master_log_name, mi->rli.group_master_log_name,
sizeof(mi->master_log_name)-1);
purge_relay_logs(&mi->rli, NULL, 0, &errmsg);
@@ -2592,13 +2594,13 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full,
slave is 2. At SHOW SLAVE STATUS time, assume that the difference
between timestamp of slave and rli->last_master_timestamp is 0
(i.e. they are in the same second), then we get 0-(2-1)=-1 as a result.
- This confuses users, so we don't go below 0: hence the max().
+ This confuses users, so we don't go below 0: hence the MY_MAX().
last_master_timestamp == 0 (an "impossible" timestamp 1970) is a
special marker to say "consider we have caught up".
*/
protocol->store((longlong)(mi->rli.last_master_timestamp ?
- max(0, time_diff) : 0));
+ MY_MAX(0, time_diff) : 0));
}
else
{
@@ -2987,7 +2989,7 @@ static int has_temporary_error(THD *thd)
DBUG_ENTER("has_temporary_error");
DBUG_EXECUTE_IF("all_errors_are_temporary_errors",
- if (thd->stmt_da->is_error())
+ if (thd->get_stmt_da()->is_error())
{
thd->clear_error();
my_error(ER_LOCK_DEADLOCK, MYF(0));
@@ -3006,16 +3008,16 @@ static int has_temporary_error(THD *thd)
currently, InnoDB deadlock detected by InnoDB or lock
wait timeout (innodb_lock_wait_timeout exceeded
*/
- if (thd->stmt_da->sql_errno() == ER_LOCK_DEADLOCK ||
- thd->stmt_da->sql_errno() == ER_LOCK_WAIT_TIMEOUT)
+ if (thd->get_stmt_da()->sql_errno() == ER_LOCK_DEADLOCK ||
+ thd->get_stmt_da()->sql_errno() == ER_LOCK_WAIT_TIMEOUT)
DBUG_RETURN(1);
#ifdef HAVE_NDB_BINLOG
/*
currently temporary error set in ndbcluster
*/
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
- MYSQL_ERROR *err;
+ List_iterator_fast<Sql_condition> it(thd->warning_info->warn_list());
+ Sql_condition *err;
while ((err= it++))
{
DBUG_PRINT("info", ("has condition %d %s", err->get_sql_errno(),
@@ -3362,7 +3364,7 @@ static int exec_relay_log_event(THD* thd, Relay_log_info* rli)
exec_res= 0;
rli->cleanup_context(thd, 1);
/* chance for concurrent connection to get more locks */
- slave_sleep(thd, min(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
+ slave_sleep(thd, MY_MIN(rli->trans_retries, MAX_SLAVE_RETRY_PAUSE),
sql_slave_killed, rli);
mysql_mutex_lock(&rli->data_lock); // because of SHOW STATUS
rli->trans_retries++;
@@ -3580,9 +3582,10 @@ pthread_handler_t handle_slave_io(void *arg)
/* Load the set of seen GTIDs, if we did not already. */
if (rpl_load_gtid_slave_state(thd))
{
- mi->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
+ mi->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
"Unable to load replication GTID slave state from mysql.%s: %s",
- rpl_gtid_slave_state_table_name.str, thd->stmt_da->message());
+ rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->message());
/*
If we are using old-style replication, we can continue, even though we
then will not be able to record the GTIDs we receive. But if using GTID,
@@ -4174,18 +4177,19 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
if (check_temp_dir(rli->slave_patternload_file))
{
- rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
"Unable to use slave's temporary directory %s - %s",
- slave_load_tmpdir, thd->stmt_da->message());
+ slave_load_tmpdir, thd->get_stmt_da()->message());
goto err;
}
/* Load the set of seen GTIDs, if we did not already. */
if (rpl_load_gtid_slave_state(thd))
{
- rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
"Unable to load replication GTID slave state from mysql.%s: %s",
- rpl_gtid_slave_state_table_name.str, thd->stmt_da->message());
+ rpl_gtid_slave_state_table_name.str,
+ thd->get_stmt_da()->message());
/*
If we are using old-style replication, we can continue, even though we
then will not be able to record the GTIDs we receive. But if using GTID,
@@ -4201,7 +4205,7 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
execute_init_command(thd, &opt_init_slave, &LOCK_sys_init_slave);
if (thd->is_slave_error)
{
- rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(),
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(),
"Slave SQL thread aborted. Can't execute init_slave query");
goto err;
}
@@ -4269,20 +4273,20 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
if (thd->is_error())
{
- char const *const errmsg= thd->stmt_da->message();
+ char const *const errmsg= thd->get_stmt_da()->message();
DBUG_PRINT("info",
- ("thd->stmt_da->sql_errno()=%d; rli->last_error.number=%d",
- thd->stmt_da->sql_errno(), last_errno));
+ ("thd->get_stmt_da()->sql_errno()=%d; rli->last_error.number=%d",
+ thd->get_stmt_da()->sql_errno(), last_errno));
if (last_errno == 0)
{
/*
This function is reporting an error which was not reported
while executing exec_relay_log_event().
*/
- rli->report(ERROR_LEVEL, thd->stmt_da->sql_errno(), "%s", errmsg);
+ rli->report(ERROR_LEVEL, thd->get_stmt_da()->sql_errno(), "%s", errmsg);
}
- else if (last_errno != thd->stmt_da->sql_errno())
+ else if (last_errno != thd->get_stmt_da()->sql_errno())
{
/*
* An error was reported while executing exec_relay_log_event()
@@ -4291,13 +4295,14 @@ log '%s' at position %s, relay log '%s' position: %s%s", RPL_LOG_NAME,
* what caused the problem.
*/
sql_print_error("Slave (additional info): %s Error_code: %d",
- errmsg, thd->stmt_da->sql_errno());
+ errmsg, thd->get_stmt_da()->sql_errno());
}
}
/* Print any warnings issued */
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
- MYSQL_ERROR *err;
+ Diagnostics_area::Sql_condition_iterator it=
+ thd->get_stmt_da()->sql_conditions();
+ const Sql_condition *err;
/*
Added controlled slave thread cancel for replication
of user-defined variables.
@@ -5744,7 +5749,7 @@ static IO_CACHE *reopen_relay_log(Relay_log_info *rli, const char **errmsg)
relay_log_pos Current log pos
pending Number of bytes already processed from the event
*/
- rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE);
+ rli->event_relay_log_pos= MY_MAX(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE);
my_b_seek(cur_log,rli->event_relay_log_pos);
DBUG_RETURN(cur_log);
}
diff --git a/sql/sp.cc b/sql/sp.cc
index 978d7a2eb13..c1c162267a8 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -325,7 +325,7 @@ Stored_routine_creation_ctx::load_from_db(THD *thd,
if (invalid_creation_ctx)
{
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_SR_INVALID_CREATION_CTX,
ER(ER_SR_INVALID_CREATION_CTX),
(const char *) db_name,
@@ -683,9 +683,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
};
bool
@@ -693,13 +693,13 @@ Silence_deprecated_warning::handle_condition(
THD *,
uint sql_errno,
const char*,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char*,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
if (sql_errno == ER_WARN_DEPRECATED_SYNTAX &&
- level == MYSQL_ERROR::WARN_LEVEL_WARN)
+ level == Sql_condition::WARN_LEVEL_WARN)
return TRUE;
return FALSE;
@@ -772,9 +772,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* message,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
bool error_caught() const { return m_error_caught; }
@@ -786,9 +786,9 @@ bool
Bad_db_error_handler::handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* message,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
if (sql_errno == ER_BAD_DB_ERROR)
{
@@ -1390,9 +1390,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
if (sql_errno == ER_NO_SUCH_TABLE ||
sql_errno == ER_NO_SUCH_TABLE_IN_ENGINE ||
@@ -1757,7 +1757,7 @@ sp_exist_routines(THD *thd, TABLE_LIST *routines, bool any)
&thd->sp_proc_cache, FALSE) != NULL ||
sp_find_routine(thd, TYPE_ENUM_FUNCTION, name,
&thd->sp_func_cache, FALSE) != NULL;
- thd->warning_info->clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->clear_warning_info(thd->query_id);
if (sp_object_found)
{
if (any)
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 6b591edca5e..cb689735925 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -773,7 +773,7 @@ sp_head::~sp_head()
for (uint ip = 0 ; (i = get_instr(ip)) ; ip++)
delete i;
delete_dynamic(&m_instr);
- m_pcont->destroy();
+ delete m_pcont;
free_items();
/*
@@ -976,7 +976,7 @@ subst_spvars(THD *thd, sp_instr *instr, LEX_STRING *query_str)
thd->query_name_consts= 0;
for (Item_splocal **splocal= sp_vars_uses.front();
- splocal < sp_vars_uses.back(); splocal++)
+ splocal <= sp_vars_uses.back(); splocal++)
{
Item *val;
@@ -1079,105 +1079,6 @@ void sp_head::recursion_level_error(THD *thd)
}
-/**
- Find an SQL handler for any condition (warning or error) after execution
- of a stored routine instruction. Basically, this function looks for an
- appropriate SQL handler in RT-contexts. If an SQL handler is found, it is
- remembered in the RT-context for future activation (the context can be
- inactive at the moment).
-
- If there is no pending condition, the function just returns.
-
- If there was an error during the execution, an SQL handler for it will be
- searched within the current and outer scopes.
-
- There might be several errors in the Warning Info (that's possible by using
- SIGNAL/RESIGNAL in nested scopes) -- the function is looking for an SQL
- handler for the latest (current) error only.
-
- If there was a warning during the execution, an SQL handler for it will be
- searched within the current scope only.
-
- If several warnings were thrown during the execution and there are different
- SQL handlers for them, it is not determined which SQL handler will be chosen.
- Only one SQL handler will be executed.
-
- If warnings and errors were thrown during the execution, the error takes
- precedence. I.e. error handler will be executed. If there is no handler
- for that error, condition will remain unhandled.
-
- Once a warning or an error has been handled it is not removed from
- Warning Info.
-
- According to The Standard (quoting PeterG):
-
- An SQL procedure statement works like this ...
- SQL/Foundation 13.5 <SQL procedure statement>
- (General Rules) (greatly summarized) says:
- (1) Empty diagnostics area, thus clearing the condition.
- (2) Execute statement.
- During execution, if Exception Condition occurs,
- set Condition Area = Exception Condition and stop
- statement.
- During execution, if No Data occurs,
- set Condition Area = No Data Condition and continue
- statement.
- During execution, if Warning occurs,
- and Condition Area is not already full due to
- an earlier No Data condition, set Condition Area
- = Warning and continue statement.
- (3) Finish statement.
- At end of execution, if Condition Area is not
- already full due to an earlier No Data or Warning,
- set Condition Area = Successful Completion.
- In effect, this system means there is a precedence:
- Exception trumps No Data, No Data trumps Warning,
- Warning trumps Successful Completion.
-
- NB: "Procedure statements" include any DDL or DML or
- control statements. So CREATE and DELETE and WHILE
- and CALL and RETURN are procedure statements. But
- DECLARE and END are not procedure statements.
-
- @param thd thread handle
- @param ctx runtime context of the stored routine
-*/
-
-static void
-find_handler_after_execution(THD *thd, sp_rcontext *ctx)
-{
- if (thd->is_error())
- {
- ctx->find_handler(thd,
- thd->stmt_da->sql_errno(),
- thd->stmt_da->get_sqlstate(),
- MYSQL_ERROR::WARN_LEVEL_ERROR,
- thd->stmt_da->message());
- }
- else if (thd->warning_info->statement_warn_count())
- {
- List_iterator<MYSQL_ERROR> it(thd->warning_info->warn_list());
- MYSQL_ERROR *err;
- while ((err= it++))
- {
- if ((err->get_level() != MYSQL_ERROR::WARN_LEVEL_WARN &&
- err->get_level() != MYSQL_ERROR::WARN_LEVEL_NOTE) ||
- err->handled())
- continue;
-
- if (ctx->find_handler(thd,
- err->get_sql_errno(),
- err->get_sqlstate(),
- err->get_level(),
- err->get_message_text()))
- {
- err->mark_handled();
- break;
- }
- }
- }
-}
-
/**
Execute the routine. The main instruction jump loop is there.
@@ -1224,8 +1125,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
String old_packet;
Reprepare_observer *save_reprepare_observer= thd->m_reprepare_observer;
Object_creation_ctx *saved_creation_ctx;
- Warning_info *saved_warning_info;
- Warning_info warning_info(thd->warning_info->warn_id(), false);
+ Diagnostics_area *da= thd->get_stmt_da();
+ Warning_info sp_wi(da->warning_info_id(), false, true);
/*
Just reporting a stack overrun error
@@ -1296,9 +1197,8 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
old_arena= thd->stmt_arena;
/* Push a new warning information area. */
- warning_info.append_warning_info(thd, thd->warning_info);
- saved_warning_info= thd->warning_info;
- thd->warning_info= &warning_info;
+ da->copy_sql_conditions_to_wi(thd, &sp_wi);
+ da->push_warning_info(&sp_wi);
/*
Switch query context. This has to be done early as this is sometimes
@@ -1398,7 +1298,7 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
}
/* Reset number of warnings for this query. */
- thd->warning_info->reset_for_next_command();
+ thd->get_stmt_da()->reset_for_next_command();
DBUG_PRINT("execute", ("Instruction %u", ip));
@@ -1449,19 +1349,10 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
errors are not catchable by SQL handlers) or the connection has been
killed during execution.
*/
- if (!thd->is_fatal_error && !thd->killed_errno())
+ if (!thd->is_fatal_error && !thd->killed_errno() &&
+ ctx->handle_sql_condition(thd, &ip, i))
{
- /*
- Find SQL handler in the appropriate RT-contexts:
- - warnings can be handled by SQL handlers within
- the current scope only;
- - errors can be handled by any SQL handler from outer scope.
- */
- find_handler_after_execution(thd, ctx);
-
- /* If found, activate handler for the current scope. */
- if (ctx->activate_handler(thd, &ip, i, &execute_arena, &backup_arena))
- err_status= FALSE;
+ err_status= FALSE;
}
/* Reset sp_rcontext::end_partial_result_set flag. */
@@ -1506,9 +1397,40 @@ sp_head::execute(THD *thd, bool merge_da_on_success)
- if there was an exception during execution, warning info should be
propagated to the caller in any case.
*/
+ da->pop_warning_info();
+
if (err_status || merge_da_on_success)
- saved_warning_info->merge_with_routine_info(thd, thd->warning_info);
- thd->warning_info= saved_warning_info;
+ {
+ /*
+ If a routine body is empty or if a routine did not generate any warnings,
+ do not duplicate our own contents by appending the contents of the called
+ routine. We know that the called routine did not change its warning info.
+
+ On the other hand, if the routine body is not empty and some statement in
+ the routine generates a warning or uses tables, warning info is guaranteed
+ to have changed. In this case we know that the routine warning info
+ contains only new warnings, and thus we perform a copy.
+ */
+ if (da->warning_info_changed(&sp_wi))
+ {
+ /*
+ If the invocation of the routine was a standalone statement,
+ rather than a sub-statement, in other words, if it's a CALL
+ of a procedure, rather than invocation of a function or a
+ trigger, we need to clear the current contents of the caller's
+ warning info.
+
+ This is per MySQL rules: if a statement generates a warning,
+ warnings from the previous statement are flushed. Normally
+ it's done in push_warning(). However, here we don't use
+ push_warning() to avoid invocation of condition handlers or
+ escalation of warnings to errors.
+ */
+ da->opt_clear_warning_info(thd->query_id);
+ da->copy_sql_conditions_from_wi(thd, &sp_wi);
+ da->remove_marked_sql_conditions();
+ }
+ }
done:
DBUG_PRINT("info", ("err_status: %d killed: %d is_slave_error: %d report_error: %d",
@@ -1716,8 +1638,7 @@ sp_head::execute_trigger(THD *thd,
init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
thd->set_n_backup_active_arena(&call_arena, &backup_arena);
- if (!(nctx= new sp_rcontext(m_pcont, 0, octx)) ||
- nctx->init(thd))
+ if (!(nctx= sp_rcontext::create(thd, m_pcont, NULL)))
{
err_status= TRUE;
goto err_with_cleanup;
@@ -1833,8 +1754,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
init_sql_alloc(&call_mem_root, MEM_ROOT_BLOCK_SIZE, 0, MYF(0));
thd->set_n_backup_active_arena(&call_arena, &backup_arena);
- if (!(nctx= new sp_rcontext(m_pcont, return_value_fld, octx)) ||
- nctx->init(thd))
+ if (!(nctx= sp_rcontext::create(thd, m_pcont, return_value_fld)))
{
thd->restore_active_arena(&call_arena, &backup_arena);
err_status= TRUE;
@@ -1962,7 +1882,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount,
if (mysql_bin_log.write(&qinfo) &&
thd->binlog_evt_union.unioned_events_trans)
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Invoked ROUTINE modified a transactional table but MySQL "
"failed to reflect this change in the binary log");
err_status= TRUE;
@@ -2051,9 +1971,9 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
if (! octx)
{
/* Create a temporary old context. */
- if (!(octx= new sp_rcontext(m_pcont, NULL, octx)) || octx->init(thd))
+ if (!(octx= sp_rcontext::create(thd, m_pcont, NULL)))
{
- delete octx; /* Delete octx if it was init() that failed. */
+ DBUG_PRINT("error", ("Could not create octx"));
DBUG_RETURN(TRUE);
}
@@ -2066,8 +1986,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
thd->spcont->callers_arena= thd;
}
- if (!(nctx= new sp_rcontext(m_pcont, NULL, octx)) ||
- nctx->init(thd))
+ if (!(nctx= sp_rcontext::create(thd, m_pcont, NULL)))
{
delete nctx; /* Delete nctx if it was init() that failed. */
thd->spcont= save_spcont;
@@ -2090,12 +2009,12 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
if (!arg_item)
break;
- sp_variable_t *spvar= m_pcont->find_variable(i);
+ sp_variable *spvar= m_pcont->find_variable(i);
if (!spvar)
continue;
- if (spvar->mode != sp_param_in)
+ if (spvar->mode != sp_variable::MODE_IN)
{
Settable_routine_parameter *srp=
arg_item->get_settable_routine_parameter();
@@ -2107,10 +2026,10 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
break;
}
- srp->set_required_privilege(spvar->mode == sp_param_inout);
+ srp->set_required_privilege(spvar->mode == sp_variable::MODE_INOUT);
}
- if (spvar->mode == sp_param_out)
+ if (spvar->mode == sp_variable::MODE_OUT)
{
Item_null *null_item= new Item_null();
Item *tmp_item= null_item;
@@ -2118,6 +2037,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
if (!null_item ||
nctx->set_variable(thd, i, &tmp_item))
{
+ DBUG_PRINT("error", ("set variable failed"));
err_status= TRUE;
break;
}
@@ -2126,6 +2046,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
{
if (nctx->set_variable(thd, i, it_args.ref()))
{
+ DBUG_PRINT("error", ("set variable 2 failed"));
err_status= TRUE;
break;
}
@@ -2141,9 +2062,9 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
if (!thd->in_sub_stmt)
{
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
}
thd_proc_info(thd, "closing tables");
@@ -2182,7 +2103,10 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
#endif
if (!err_status)
+ {
err_status= execute(thd, TRUE);
+ DBUG_PRINT("info", ("execute returned %d", (int) err_status));
+ }
if (save_log_general)
thd->variables.option_bits &= ~OPTION_LOG_OFF;
@@ -2210,9 +2134,9 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
if (!arg_item)
break;
- sp_variable_t *spvar= m_pcont->find_variable(i);
+ sp_variable *spvar= m_pcont->find_variable(i);
- if (spvar->mode == sp_param_in)
+ if (spvar->mode == sp_variable::MODE_IN)
continue;
Settable_routine_parameter *srp=
@@ -2222,6 +2146,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
if (srp->set_value(thd, octx, nctx->get_item_addr(i)))
{
+ DBUG_PRINT("error", ("set value failed"));
err_status= TRUE;
break;
}
@@ -2372,7 +2297,7 @@ sp_head::restore_lex(THD *thd)
Put the instruction on the backpatch list, associated with the label.
*/
int
-sp_head::push_backpatch(sp_instr *i, sp_label_t *lab)
+sp_head::push_backpatch(sp_instr *i, sp_label *lab)
{
bp_t *bp= (bp_t *)sql_alloc(sizeof(bp_t));
@@ -2388,7 +2313,7 @@ sp_head::push_backpatch(sp_instr *i, sp_label_t *lab)
the current position.
*/
void
-sp_head::backpatch(sp_label_t *lab)
+sp_head::backpatch(sp_label *lab)
{
bp_t *bp;
uint dest= instructions();
@@ -2400,7 +2325,7 @@ sp_head::backpatch(sp_label_t *lab)
if (bp->lab == lab)
{
DBUG_PRINT("info", ("backpatch: (m_ip %d, label 0x%lx <%s>) to dest %d",
- bp->instr->m_ip, (ulong) lab, lab->name, dest));
+ bp->instr->m_ip, (ulong) lab, lab->name.str, dest));
bp->instr->backpatch(dest, lab->ctx);
}
}
@@ -2667,7 +2592,7 @@ sp_head::show_create_routine(THD *thd, int type)
Item_empty_string *stmt_fld=
new Item_empty_string(col3_caption,
- max(m_defstr.length, 1024));
+ MY_MAX(m_defstr.length, 1024));
stmt_fld->maybe_null= TRUE;
@@ -2867,7 +2792,7 @@ sp_head::show_routine_code(THD *thd)
field_list.push_back(new Item_uint("Pos", 9));
// 1024 is for not to confuse old clients
field_list.push_back(new Item_empty_string("Instruction",
- max(buffer.length(), 1024)));
+ MY_MAX(buffer.length(), 1024)));
if (protocol->send_result_set_metadata(&field_list, Protocol::SEND_NUM_ROWS |
Protocol::SEND_EOF))
DBUG_RETURN(1);
@@ -2888,7 +2813,7 @@ sp_head::show_routine_code(THD *thd)
Since this is for debugging purposes only, we don't bother to
introduce a special error code for it.
*/
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, tmp);
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, tmp);
}
protocol->prepare_for_resend();
protocol->store((longlong)ip);
@@ -2995,9 +2920,9 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
/* Here we also commit or rollback the current statement. */
if (! thd->in_sub_stmt)
{
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
thd->is_error() ? trans_rollback_stmt(thd) : trans_commit_stmt(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
}
thd_proc_info(thd, "closing tables");
close_thread_tables(thd);
@@ -3031,10 +2956,10 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp,
open_tables stage.
*/
if (!res || !thd->is_error() ||
- (thd->stmt_da->sql_errno() != ER_CANT_REOPEN_TABLE &&
- thd->stmt_da->sql_errno() != ER_NO_SUCH_TABLE &&
- thd->stmt_da->sql_errno() != ER_NO_SUCH_TABLE_IN_ENGINE &&
- thd->stmt_da->sql_errno() != ER_UPDATE_TABLE_USED))
+ (thd->get_stmt_da()->sql_errno() != ER_CANT_REOPEN_TABLE &&
+ thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE &&
+ thd->get_stmt_da()->sql_errno() != ER_NO_SUCH_TABLE_IN_ENGINE &&
+ thd->get_stmt_da()->sql_errno() != ER_UPDATE_TABLE_USED))
thd->stmt_arena->state= Query_arena::STMT_EXECUTED;
/*
@@ -3067,7 +2992,8 @@ int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables)
Check whenever we have access to tables for this statement
and open and lock them before executing instructions core function.
*/
- if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)
+ if (open_temporary_tables(thd, tables) ||
+ check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE)
|| open_and_lock_tables(thd, tables, TRUE, 0))
result= -1;
else
@@ -3079,7 +3005,7 @@ int sp_instr::exec_open_and_lock_tables(THD *thd, TABLE_LIST *tables)
return result;
}
-uint sp_instr::get_cont_dest()
+uint sp_instr::get_cont_dest() const
{
return (m_ip+1);
}
@@ -3121,7 +3047,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
{
res= m_lex_keeper.reset_lex_and_exec_core(thd, nextp, FALSE, this);
- if (thd->stmt_da->is_eof())
+ if (thd->get_stmt_da()->is_eof())
{
/* Finalize server status flags after executing a statement. */
thd->update_server_status();
@@ -3132,7 +3058,8 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
query_cache_end_of_result(thd);
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS,
- thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0,
+ thd->get_stmt_da()->is_error() ?
+ thd->get_stmt_da()->sql_errno() : 0,
command_name[COM_QUERY].str);
if (!res && unlikely(thd->enable_slow_log))
@@ -3144,7 +3071,7 @@ sp_instr_stmt::execute(THD *thd, uint *nextp)
thd->query_name_consts= 0;
if (!thd->is_error())
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
}
DBUG_RETURN(res || thd->is_error());
}
@@ -3237,7 +3164,7 @@ sp_instr_set::print(String *str)
{
/* set name@offset ... */
int rsrv = SP_INSTR_UINT_MAXLEN+6;
- sp_variable_t *var = m_ctx->find_variable(m_offset);
+ sp_variable *var = m_ctx->find_variable(m_offset);
/* 'var' should always be non-null, but just in case... */
if (var)
@@ -3290,7 +3217,7 @@ sp_instr_set_trigger_field::print(String *str)
sp_instr_opt_meta
*/
-uint sp_instr_opt_meta::get_cont_dest()
+uint sp_instr_opt_meta::get_cont_dest() const
{
return m_cont_dest;
}
@@ -3471,6 +3398,14 @@ int
sp_instr_freturn::exec_core(THD *thd, uint *nextp)
{
/*
+ RETURN is a "procedure statement" (in terms of the SQL standard).
+ That means, Diagnostics Area should be clean before its execution.
+ */
+
+ Diagnostics_area *da= thd->get_stmt_da();
+ da->clear_warning_info(da->warning_info_id());
+
+ /*
Change <next instruction pointer>, so that this will be the last
instruction in the stored function.
*/
@@ -3508,14 +3443,12 @@ int
sp_instr_hpush_jump::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_hpush_jump::execute");
- List_iterator_fast<sp_cond_type_t> li(m_cond);
- sp_cond_type_t *p;
- while ((p= li++))
- thd->spcont->push_handler(p, m_ip+1, m_type);
+ int ret= thd->spcont->push_handler(m_handler, m_ip + 1);
*nextp= m_dest;
- DBUG_RETURN(0);
+
+ DBUG_RETURN(ret);
}
@@ -3525,27 +3458,22 @@ sp_instr_hpush_jump::print(String *str)
/* hpush_jump dest fsize type */
if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 21))
return;
+
str->qs_append(STRING_WITH_LEN("hpush_jump "));
str->qs_append(m_dest);
str->qs_append(' ');
str->qs_append(m_frame);
- switch (m_type) {
- case SP_HANDLER_NONE:
- str->qs_append(STRING_WITH_LEN(" NONE")); // This would be a bug
- break;
- case SP_HANDLER_EXIT:
+
+ switch (m_handler->type) {
+ case sp_handler::EXIT:
str->qs_append(STRING_WITH_LEN(" EXIT"));
break;
- case SP_HANDLER_CONTINUE:
+ case sp_handler::CONTINUE:
str->qs_append(STRING_WITH_LEN(" CONTINUE"));
break;
- case SP_HANDLER_UNDO:
- str->qs_append(STRING_WITH_LEN(" UNDO"));
- break;
default:
- // This would be a bug as well
- str->qs_append(STRING_WITH_LEN(" UNKNOWN:"));
- str->qs_append(m_type);
+ // The handler type must be either CONTINUE or EXIT.
+ DBUG_ASSERT(0);
}
}
@@ -3573,7 +3501,7 @@ sp_instr_hpush_jump::opt_mark(sp_head *sp, List<sp_instr> *leads)
above, so we start on m_dest+1 here.
m_opt_hpop is the hpop marking the end of the handler scope.
*/
- if (m_type == SP_HANDLER_CONTINUE)
+ if (m_handler->type == sp_handler::CONTINUE)
{
for (uint scope_ip= m_dest+1; scope_ip <= m_opt_hpop; scope_ip++)
sp->add_mark_lead(scope_ip, leads);
@@ -3615,13 +3543,11 @@ int
sp_instr_hreturn::execute(THD *thd, uint *nextp)
{
DBUG_ENTER("sp_instr_hreturn::execute");
- if (m_dest)
- *nextp= m_dest;
- else
- {
- *nextp= thd->spcont->pop_hstack();
- }
- thd->spcont->exit_handler();
+
+ uint continue_ip= thd->spcont->exit_handler(thd->get_stmt_da());
+
+ *nextp= m_dest ? m_dest : continue_ip;
+
DBUG_RETURN(0);
}
@@ -3633,12 +3559,17 @@ sp_instr_hreturn::print(String *str)
if (str->reserve(SP_INSTR_UINT_MAXLEN*2 + 9))
return;
str->qs_append(STRING_WITH_LEN("hreturn "));
- str->qs_append(m_frame);
if (m_dest)
{
- str->qs_append(' ');
+ // NOTE: this is legacy: hreturn instruction for EXIT handler
+ // should print out 0 as frame index.
+ str->qs_append(STRING_WITH_LEN("0 "));
str->qs_append(m_dest);
}
+ else
+ {
+ str->qs_append(m_frame);
+ }
}
@@ -3670,41 +3601,32 @@ sp_instr_hreturn::opt_mark(sp_head *sp, List<sp_instr> *leads)
int
sp_instr_cpush::execute(THD *thd, uint *nextp)
{
- Query_arena backup_arena;
DBUG_ENTER("sp_instr_cpush::execute");
- /*
- We should create cursors in the callers arena, as
- it could be (and usually is) used in several instructions.
- */
- thd->set_n_backup_active_arena(thd->spcont->callers_arena, &backup_arena);
-
- thd->spcont->push_cursor(&m_lex_keeper, this);
-
- thd->restore_active_arena(thd->spcont->callers_arena, &backup_arena);
+ int ret= thd->spcont->push_cursor(&m_lex_keeper, this);
*nextp= m_ip+1;
- DBUG_RETURN(0);
+ DBUG_RETURN(ret);
}
void
sp_instr_cpush::print(String *str)
{
- LEX_STRING n;
- my_bool found= m_ctx->find_cursor(m_cursor, &n);
+ const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor);
+
/* cpush name@offset */
uint rsrv= SP_INSTR_UINT_MAXLEN+7;
- if (found)
- rsrv+= n.length;
+ if (cursor_name)
+ rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("cpush "));
- if (found)
+ if (cursor_name)
{
- str->qs_append(n.str, n.length);
+ str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
@@ -3792,19 +3714,19 @@ sp_instr_copen::exec_core(THD *thd, uint *nextp)
void
sp_instr_copen::print(String *str)
{
- LEX_STRING n;
- my_bool found= m_ctx->find_cursor(m_cursor, &n);
+ const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor);
+
/* copen name@offset */
uint rsrv= SP_INSTR_UINT_MAXLEN+7;
- if (found)
- rsrv+= n.length;
+ if (cursor_name)
+ rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("copen "));
- if (found)
+ if (cursor_name)
{
- str->qs_append(n.str, n.length);
+ str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
@@ -3834,19 +3756,19 @@ sp_instr_cclose::execute(THD *thd, uint *nextp)
void
sp_instr_cclose::print(String *str)
{
- LEX_STRING n;
- my_bool found= m_ctx->find_cursor(m_cursor, &n);
+ const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor);
+
/* cclose name@offset */
uint rsrv= SP_INSTR_UINT_MAXLEN+8;
- if (found)
- rsrv+= n.length;
+ if (cursor_name)
+ rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("cclose "));
- if (found)
+ if (cursor_name)
{
- str->qs_append(n.str, n.length);
+ str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
@@ -3875,21 +3797,21 @@ sp_instr_cfetch::execute(THD *thd, uint *nextp)
void
sp_instr_cfetch::print(String *str)
{
- List_iterator_fast<struct sp_variable> li(m_varlist);
- sp_variable_t *pv;
- LEX_STRING n;
- my_bool found= m_ctx->find_cursor(m_cursor, &n);
+ List_iterator_fast<sp_variable> li(m_varlist);
+ sp_variable *pv;
+ const LEX_STRING *cursor_name= m_ctx->find_cursor(m_cursor);
+
/* cfetch name@offset vars... */
uint rsrv= SP_INSTR_UINT_MAXLEN+8;
- if (found)
- rsrv+= n.length;
+ if (cursor_name)
+ rsrv+= cursor_name->length;
if (str->reserve(rsrv))
return;
str->qs_append(STRING_WITH_LEN("cfetch "));
- if (found)
+ if (cursor_name)
{
- str->qs_append(n.str, n.length);
+ str->qs_append(cursor_name->str, cursor_name->length);
str->qs_append('@');
}
str->qs_append(m_cursor);
diff --git a/sql/sp_head.h b/sql/sp_head.h
index 409db33ef02..77adbf091b8 100644
--- a/sql/sp_head.h
+++ b/sql/sp_head.h
@@ -30,8 +30,9 @@
#include "my_global.h" /* NO_EMBEDDED_ACCESS_CHECKS */
#include "sql_class.h" // THD, set_var.h: THD
#include "set_var.h" // Item
-#include "sp.h"
+#include "sp_pcontext.h" // sp_pcontext
#include <stddef.h>
+#include "sp.h"
/**
@defgroup Stored_Routines Stored Routines
@@ -39,6 +40,11 @@
@{
*/
+// Values for the type enum. This reflects the order of the enum declaration
+// in the CREATE TABLE command.
+//#define TYPE_ENUM_FUNCTION 1 #define TYPE_ENUM_PROCEDURE 2 #define
+//TYPE_ENUM_TRIGGER 3 #define TYPE_ENUM_PROXY 4
+
Item_result
sp_map_result_type(enum enum_field_types type);
@@ -48,12 +54,9 @@ sp_map_item_type(enum enum_field_types type);
uint
sp_get_flags_for_command(LEX *lex);
-struct sp_label;
class sp_instr;
class sp_instr_opt_meta;
class sp_instr_jump_if_not;
-struct sp_cond_type;
-struct sp_variable;
/*************************************************************************/
@@ -274,6 +277,15 @@ public:
*/
Security_context m_security_ctx;
+ /**
+ List of all items (Item_trigger_field objects) representing fields in
+ old/new version of row in trigger. We use this list for checking whenever
+ all such fields are valid at trigger creation time and for binding these
+ fields to TABLE object at table open (although for latter pointer to table
+ being opened is probably enough).
+ */
+ SQL_I_List<Item_trigger_field> m_trg_table_fields;
+
static void *
operator new(size_t size) throw ();
@@ -352,12 +364,12 @@ public:
/// Put the instruction on the backpatch list, associated with the label.
int
- push_backpatch(sp_instr *, struct sp_label *);
+ push_backpatch(sp_instr *, sp_label *);
/// Update all instruction with this label in the backpatch list to
/// the current position.
void
- backpatch(struct sp_label *);
+ backpatch(sp_label *);
/// Start a new cont. backpatch level. If 'i' is NULL, the level is just incr.
int
@@ -493,7 +505,7 @@ private:
DYNAMIC_ARRAY m_instr; ///< The "instructions"
typedef struct
{
- struct sp_label *lab;
+ sp_label *lab;
sp_instr *instr;
} bp_t;
List<bp_t> m_backpatch; ///< Instructions needing backpatching
@@ -593,7 +605,7 @@ public:
Get the continuation destination of this instruction.
@return the continuation destination
*/
- virtual uint get_cont_dest();
+ virtual uint get_cont_dest() const;
/*
Execute core function of instruction after all preparations (e.g.
@@ -865,7 +877,7 @@ public:
virtual void set_destination(uint old_dest, uint new_dest)
= 0;
- virtual uint get_cont_dest();
+ virtual uint get_cont_dest() const;
protected:
@@ -1016,15 +1028,21 @@ class sp_instr_hpush_jump : public sp_instr_jump
public:
- sp_instr_hpush_jump(uint ip, sp_pcontext *ctx, int htype, uint fp)
- : sp_instr_jump(ip, ctx), m_type(htype), m_frame(fp), m_opt_hpop(0)
+ sp_instr_hpush_jump(uint ip,
+ sp_pcontext *ctx,
+ sp_handler *handler)
+ :sp_instr_jump(ip, ctx),
+ m_handler(handler),
+ m_opt_hpop(0),
+ m_frame(ctx->current_var_count())
{
- m_cond.empty();
+ DBUG_ASSERT(m_handler->condition_values.elements == 0);
}
virtual ~sp_instr_hpush_jump()
{
- m_cond.empty();
+ m_handler->condition_values.empty();
+ m_handler= NULL;
}
virtual int execute(THD *thd, uint *nextp);
@@ -1048,17 +1066,24 @@ public:
m_opt_hpop= dest;
}
- inline void add_condition(struct sp_cond_type *cond)
- {
- m_cond.push_front(cond);
- }
+ void add_condition(sp_condition_value *condition_value)
+ { m_handler->condition_values.push_back(condition_value); }
+
+ sp_handler *get_handler()
+ { return m_handler; }
private:
- int m_type; ///< Handler type
+private:
+ /// Handler.
+ sp_handler *m_handler;
+
+ /// hpop marking end of handler scope.
+ uint m_opt_hpop;
+
+ // This attribute is needed for SHOW PROCEDURE CODE only (i.e. it's needed in
+ // debug version only). It's used in print().
uint m_frame;
- uint m_opt_hpop; // hpop marking end of handler scope.
- List<struct sp_cond_type> m_cond;
}; // class sp_instr_hpush_jump : public sp_instr_jump
@@ -1095,8 +1120,9 @@ class sp_instr_hreturn : public sp_instr_jump
public:
- sp_instr_hreturn(uint ip, sp_pcontext *ctx, uint fp)
- : sp_instr_jump(ip, ctx), m_frame(fp)
+ sp_instr_hreturn(uint ip, sp_pcontext *ctx)
+ :sp_instr_jump(ip, ctx),
+ m_frame(ctx->current_var_count())
{}
virtual ~sp_instr_hreturn()
@@ -1251,7 +1277,7 @@ public:
virtual void print(String *str);
- void add_to_varlist(struct sp_variable *var)
+ void add_to_varlist(sp_variable *var)
{
m_varlist.push_back(var);
}
@@ -1259,7 +1285,7 @@ public:
private:
uint m_cursor;
- List<struct sp_variable> m_varlist;
+ List<sp_variable> m_varlist;
}; // class sp_instr_cfetch : public sp_instr
diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc
index f11daeecb7b..7c44e675811 100644
--- a/sql/sp_pcontext.cc
+++ b/sql/sp_pcontext.cc
@@ -22,133 +22,86 @@
#include "sp_pcontext.h"
#include "sp_head.h"
-/* Initial size for the dynamic arrays in sp_pcontext */
-#define PCONTEXT_ARRAY_INIT_ALLOC 16
-/* Increment size for the dynamic arrays in sp_pcontext */
-#define PCONTEXT_ARRAY_INCREMENT_ALLOC 8
-
-/*
- Sanity check for SQLSTATEs. Will not check if it's really an existing
- state (there are just too many), but will check length and bad characters.
- Returns TRUE if it's ok, FALSE if it's bad.
-*/
-bool
-sp_cond_check(LEX_STRING *sqlstate)
+bool sp_condition_value::equals(const sp_condition_value *cv) const
{
- int i;
- const char *p;
+ DBUG_ASSERT(cv);
- if (sqlstate->length != 5)
- return FALSE;
- for (p= sqlstate->str, i= 0 ; i < 5 ; i++)
+ if (this == cv)
+ return true;
+
+ if (type != cv->type)
+ return false;
+
+ switch (type)
{
- char c = p[i];
+ case sp_condition_value::ERROR_CODE:
+ return (mysqlerr == cv->mysqlerr);
+
+ case sp_condition_value::SQLSTATE:
+ return (strcmp(sql_state, cv->sql_state) == 0);
- if ((c < '0' || '9' < c) &&
- (c < 'A' || 'Z' < c))
- return FALSE;
+ default:
+ return true;
}
- /* SQLSTATE class '00' : completion condition */
- if (strncmp(sqlstate->str, "00", 2) == 0)
- return FALSE;
- return TRUE;
}
+
+void sp_pcontext::init(uint var_offset,
+ uint cursor_offset,
+ int num_case_expressions)
+{
+ m_var_offset= var_offset;
+ m_cursor_offset= cursor_offset;
+ m_num_case_exprs= num_case_expressions;
+
+ m_labels.empty();
+}
+
+
sp_pcontext::sp_pcontext()
: Sql_alloc(),
- m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0),
- m_context_handlers(0), m_parent(NULL), m_pboundary(0),
- m_label_scope(LABEL_DEFAULT_SCOPE)
+ m_max_var_index(0), m_max_cursor_index(0),
+ m_parent(NULL), m_pboundary(0),
+ m_scope(REGULAR_SCOPE)
{
- (void) my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- m_label.empty();
- m_children.empty();
-
- m_var_offset= m_cursor_offset= 0;
- m_num_case_exprs= 0;
+ init(0, 0, 0);
}
-sp_pcontext::sp_pcontext(sp_pcontext *prev, label_scope_type label_scope)
+
+sp_pcontext::sp_pcontext(sp_pcontext *prev, sp_pcontext::enum_scope scope)
: Sql_alloc(),
- m_max_var_index(0), m_max_cursor_index(0), m_max_handler_index(0),
- m_context_handlers(0), m_parent(prev), m_pboundary(0),
- m_label_scope(label_scope)
+ m_max_var_index(0), m_max_cursor_index(0),
+ m_parent(prev), m_pboundary(0),
+ m_scope(scope)
{
- (void) my_init_dynamic_array(&m_vars, sizeof(sp_variable_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_case_expr_id_lst, sizeof(int),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_conds, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_cursors, sizeof(LEX_STRING),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- (void) my_init_dynamic_array(&m_handlers, sizeof(sp_cond_type_t *),
- PCONTEXT_ARRAY_INIT_ALLOC,
- PCONTEXT_ARRAY_INCREMENT_ALLOC, MYF(0));
- m_label.empty();
- m_children.empty();
-
- m_var_offset= prev->m_var_offset + prev->m_max_var_index;
- m_cursor_offset= prev->current_cursor_count();
- m_num_case_exprs= prev->get_num_case_exprs();
+ init(prev->m_var_offset + prev->m_max_var_index,
+ prev->current_cursor_count(),
+ prev->get_num_case_exprs());
}
-void
-sp_pcontext::destroy()
+
+sp_pcontext::~sp_pcontext()
{
- List_iterator_fast<sp_pcontext> li(m_children);
- sp_pcontext *child;
-
- while ((child= li++))
- child->destroy();
-
- m_children.empty();
- m_label.empty();
- delete_dynamic(&m_vars);
- delete_dynamic(&m_case_expr_id_lst);
- delete_dynamic(&m_conds);
- delete_dynamic(&m_cursors);
- delete_dynamic(&m_handlers);
+ for (size_t i= 0; i < m_children.elements(); ++i)
+ delete m_children.at(i);
}
-sp_pcontext *
-sp_pcontext::push_context(label_scope_type label_scope)
+
+sp_pcontext *sp_pcontext::push_context(THD *thd, sp_pcontext::enum_scope scope)
{
- sp_pcontext *child= new sp_pcontext(this, label_scope);
+ sp_pcontext *child= new (thd->mem_root) sp_pcontext(this, scope);
if (child)
- m_children.push_back(child);
+ m_children.append(child);
return child;
}
-sp_pcontext *
-sp_pcontext::pop_context()
+
+sp_pcontext *sp_pcontext::pop_context()
{
m_parent->m_max_var_index+= m_max_var_index;
- uint submax= max_handler_index();
- if (submax > m_parent->m_max_handler_index)
- m_parent->m_max_handler_index= submax;
-
- submax= max_cursor_index();
+ uint submax= max_cursor_index();
if (submax > m_parent->m_max_cursor_index)
m_parent->m_max_cursor_index= submax;
@@ -158,142 +111,118 @@ sp_pcontext::pop_context()
return m_parent;
}
-uint
-sp_pcontext::diff_handlers(sp_pcontext *ctx, bool exclusive)
+
+uint sp_pcontext::diff_handlers(const sp_pcontext *ctx, bool exclusive) const
{
uint n= 0;
- sp_pcontext *pctx= this;
- sp_pcontext *last_ctx= NULL;
+ const sp_pcontext *pctx= this;
+ const sp_pcontext *last_ctx= NULL;
while (pctx && pctx != ctx)
{
- n+= pctx->m_context_handlers;
+ n+= pctx->m_handlers.elements();
last_ctx= pctx;
pctx= pctx->parent_context();
}
if (pctx)
- return (exclusive && last_ctx ? n - last_ctx->m_context_handlers : n);
+ return (exclusive && last_ctx ? n - last_ctx->m_handlers.elements() : n);
return 0; // Didn't find ctx
}
-uint
-sp_pcontext::diff_cursors(sp_pcontext *ctx, bool exclusive)
+
+uint sp_pcontext::diff_cursors(const sp_pcontext *ctx, bool exclusive) const
{
uint n= 0;
- sp_pcontext *pctx= this;
- sp_pcontext *last_ctx= NULL;
+ const sp_pcontext *pctx= this;
+ const sp_pcontext *last_ctx= NULL;
while (pctx && pctx != ctx)
{
- n+= pctx->m_cursors.elements;
+ n+= pctx->m_cursors.elements();
last_ctx= pctx;
pctx= pctx->parent_context();
}
if (pctx)
- return (exclusive && last_ctx ? n - last_ctx->m_cursors.elements : n);
+ return (exclusive && last_ctx ? n - last_ctx->m_cursors.elements() : n);
return 0; // Didn't find ctx
}
-/*
- This does a linear search (from newer to older variables, in case
- we have shadowed names).
- It's possible to have a more efficient allocation and search method,
- but it might not be worth it. The typical number of parameters and
- variables will in most cases be low (a handfull).
- ...and, this is only called during parsing.
-*/
-sp_variable_t *
-sp_pcontext::find_variable(LEX_STRING *name, my_bool scoped)
+
+sp_variable *sp_pcontext::find_variable(LEX_STRING name,
+ bool current_scope_only) const
{
- uint i= m_vars.elements - m_pboundary;
+ uint i= m_vars.elements() - m_pboundary;
while (i--)
{
- sp_variable_t *p;
+ sp_variable *p= m_vars.at(i);
- get_dynamic(&m_vars, (uchar*)&p, i);
if (my_strnncoll(system_charset_info,
- (const uchar *)name->str, name->length,
+ (const uchar *)name.str, name.length,
(const uchar *)p->name.str, p->name.length) == 0)
{
return p;
}
}
- if (!scoped && m_parent)
- return m_parent->find_variable(name, scoped);
- return NULL;
+
+ return (!current_scope_only && m_parent) ?
+ m_parent->find_variable(name, false) :
+ NULL;
}
-/*
- Find a variable by offset from the top.
- This used for two things:
- - When evaluating parameters at the beginning, and setting out parameters
- at the end, of invokation. (Top frame only, so no recursion then.)
- - For printing of sp_instr_set. (Debug mode only.)
-*/
-sp_variable_t *
-sp_pcontext::find_variable(uint offset)
+
+sp_variable *sp_pcontext::find_variable(uint offset) const
{
- if (m_var_offset <= offset && offset < m_var_offset + m_vars.elements)
- { // This frame
- sp_variable_t *p;
+ if (m_var_offset <= offset && offset < m_var_offset + m_vars.elements())
+ return m_vars.at(offset - m_var_offset); // This frame
- get_dynamic(&m_vars, (uchar*)&p, offset - m_var_offset);
- return p;
- }
- if (m_parent)
- return m_parent->find_variable(offset); // Some previous frame
- return NULL; // index out of bounds
+ return m_parent ?
+ m_parent->find_variable(offset) : // Some previous frame
+ NULL; // Index out of bounds
}
-sp_variable_t *
-sp_pcontext::push_variable(LEX_STRING *name, enum enum_field_types type,
- sp_param_mode_t mode)
+
+sp_variable *sp_pcontext::add_variable(THD *thd,
+ LEX_STRING name,
+ enum enum_field_types type,
+ sp_variable::enum_mode mode)
{
- sp_variable_t *p= (sp_variable_t *)sql_alloc(sizeof(sp_variable_t));
+ sp_variable *p=
+ new (thd->mem_root) sp_variable(name, type,mode, current_var_count());
if (!p)
return NULL;
++m_max_var_index;
- p->name.str= name->str;
- p->name.length= name->length;
- p->type= type;
- p->mode= mode;
- p->offset= current_var_count();
- p->dflt= NULL;
- if (insert_dynamic(&m_vars, (uchar*)&p))
- return NULL;
- return p;
+ return m_vars.append(p) ? NULL : p;
}
-sp_label_t *
-sp_pcontext::push_label(char *name, uint ip)
+sp_label *sp_pcontext::push_label(THD *thd, LEX_STRING name, uint ip)
{
- sp_label_t *lab = (sp_label_t *)sql_alloc(sizeof(sp_label_t));
+ sp_label *label=
+ new (thd->mem_root) sp_label(name, ip, sp_label::IMPLICIT, this);
- if (lab)
- {
- lab->name= name;
- lab->ip= ip;
- lab->type= SP_LAB_IMPL;
- lab->ctx= this;
- m_label.push_front(lab);
- }
- return lab;
+ if (!label)
+ return NULL;
+
+ m_labels.push_front(label);
+
+ return label;
}
-sp_label_t *
-sp_pcontext::find_label(char *name)
+
+sp_label *sp_pcontext::find_label(LEX_STRING name)
{
- List_iterator_fast<sp_label_t> li(m_label);
- sp_label_t *lab;
+ List_iterator_fast<sp_label> li(m_labels);
+ sp_label *lab;
while ((lab= li++))
- if (my_strcasecmp(system_charset_info, name, lab->name) == 0)
+ {
+ if (my_strcasecmp(system_charset_info, name.str, lab->name.str) == 0)
return lab;
+ }
/*
Note about exception handlers.
@@ -303,159 +232,253 @@ sp_pcontext::find_label(char *name)
In short, a DECLARE HANDLER block can not refer
to labels from the parent context, as they are out of scope.
*/
- if (m_parent && (m_label_scope == LABEL_DEFAULT_SCOPE))
- return m_parent->find_label(name);
- return NULL;
+ return (m_parent && (m_scope == REGULAR_SCOPE)) ?
+ m_parent->find_label(name) :
+ NULL;
}
-int
-sp_pcontext::push_cond(LEX_STRING *name, sp_cond_type_t *val)
+
+bool sp_pcontext::add_condition(THD *thd,
+ LEX_STRING name,
+ sp_condition_value *value)
{
- sp_cond_t *p= (sp_cond_t *)sql_alloc(sizeof(sp_cond_t));
+ sp_condition *p= new (thd->mem_root) sp_condition(name, value);
if (p == NULL)
- return 1;
- p->name.str= name->str;
- p->name.length= name->length;
- p->val= val;
- return insert_dynamic(&m_conds, (uchar *)&p);
+ return true;
+
+ return m_conditions.append(p);
}
-/*
- See comment for find_variable() above
-*/
-sp_cond_type_t *
-sp_pcontext::find_cond(LEX_STRING *name, my_bool scoped)
+
+sp_condition_value *sp_pcontext::find_condition(LEX_STRING name,
+ bool current_scope_only) const
{
- uint i= m_conds.elements;
+ uint i= m_conditions.elements();
while (i--)
{
- sp_cond_t *p;
+ sp_condition *p= m_conditions.at(i);
- get_dynamic(&m_conds, (uchar*)&p, i);
if (my_strnncoll(system_charset_info,
- (const uchar *)name->str, name->length,
- (const uchar *)p->name.str, p->name.length) == 0)
+ (const uchar *) name.str, name.length,
+ (const uchar *) p->name.str, p->name.length) == 0)
{
- return p->val;
+ return p->value;
}
}
- if (!scoped && m_parent)
- return m_parent->find_cond(name, scoped);
- return NULL;
+
+ return (!current_scope_only && m_parent) ?
+ m_parent->find_condition(name, false) :
+ NULL;
}
-/*
- This only searches the current context, for error checking of
- duplicates.
- Returns TRUE if found.
-*/
-bool
-sp_pcontext::find_handler(sp_cond_type_t *cond)
+
+sp_handler *sp_pcontext::add_handler(THD *thd,
+ sp_handler::enum_type type)
{
- uint i= m_handlers.elements;
+ sp_handler *h= new (thd->mem_root) sp_handler(type);
- while (i--)
+ if (!h)
+ return NULL;
+
+ return m_handlers.append(h) ? NULL : h;
+}
+
+
+bool sp_pcontext::check_duplicate_handler(
+ const sp_condition_value *cond_value) const
+{
+ for (size_t i= 0; i < m_handlers.elements(); ++i)
{
- sp_cond_type_t *p;
+ sp_handler *h= m_handlers.at(i);
+
+ List_iterator_fast<sp_condition_value> li(h->condition_values);
+ sp_condition_value *cv;
- get_dynamic(&m_handlers, (uchar*)&p, i);
- if (cond->type == p->type)
+ while ((cv= li++))
{
- switch (p->type)
+ if (cond_value->equals(cv))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+sp_handler*
+sp_pcontext::find_handler(const char *sql_state,
+ uint sql_errno,
+ Sql_condition::enum_warning_level level) const
+{
+ sp_handler *found_handler= NULL;
+ sp_condition_value *found_cv= NULL;
+
+ for (size_t i= 0; i < m_handlers.elements(); ++i)
+ {
+ sp_handler *h= m_handlers.at(i);
+
+ List_iterator_fast<sp_condition_value> li(h->condition_values);
+ sp_condition_value *cv;
+
+ while ((cv= li++))
+ {
+ switch (cv->type)
{
- case sp_cond_type_t::number:
- if (cond->mysqlerr == p->mysqlerr)
- return TRUE;
- break;
- case sp_cond_type_t::state:
- if (strcmp(cond->sqlstate, p->sqlstate) == 0)
- return TRUE;
- break;
- default:
- return TRUE;
+ case sp_condition_value::ERROR_CODE:
+ if (sql_errno == cv->mysqlerr &&
+ (!found_cv ||
+ found_cv->type > sp_condition_value::ERROR_CODE))
+ {
+ found_cv= cv;
+ found_handler= h;
+ }
+ break;
+
+ case sp_condition_value::SQLSTATE:
+ if (strcmp(sql_state, cv->sql_state) == 0 &&
+ (!found_cv ||
+ found_cv->type > sp_condition_value::SQLSTATE))
+ {
+ found_cv= cv;
+ found_handler= h;
+ }
+ break;
+
+ case sp_condition_value::WARNING:
+ if ((is_sqlstate_warning(sql_state) ||
+ level == Sql_condition::WARN_LEVEL_WARN) && !found_cv)
+ {
+ found_cv= cv;
+ found_handler= h;
+ }
+ break;
+
+ case sp_condition_value::NOT_FOUND:
+ if (is_sqlstate_not_found(sql_state) && !found_cv)
+ {
+ found_cv= cv;
+ found_handler= h;
+ }
+ break;
+
+ case sp_condition_value::EXCEPTION:
+ if (is_sqlstate_exception(sql_state) &&
+ level == Sql_condition::WARN_LEVEL_ERROR && !found_cv)
+ {
+ found_cv= cv;
+ found_handler= h;
+ }
+ break;
}
}
}
- return FALSE;
+
+ if (found_handler)
+ return found_handler;
+
+
+ // There is no appropriate handler in this parsing context. We need to look up
+ // in parent contexts. There might be two cases here:
+ //
+ // 1. The current context has REGULAR_SCOPE. That means, it's a simple
+ // BEGIN..END block:
+ // ...
+ // BEGIN
+ // ... # We're here.
+ // END
+ // ...
+ // In this case we simply call find_handler() on parent's context recursively.
+ //
+ // 2. The current context has HANDLER_SCOPE. That means, we're inside an
+ // SQL-handler block:
+ // ...
+ // DECLARE ... HANDLER FOR ...
+ // BEGIN
+ // ... # We're here.
+ // END
+ // ...
+ // In this case we can not just call parent's find_handler(), because
+ // parent's handler don't catch conditions from this scope. Instead, we should
+ // try to find first parent context (we might have nested handler
+ // declarations), which has REGULAR_SCOPE (i.e. which is regular BEGIN..END
+ // block).
+
+ const sp_pcontext *p= this;
+
+ while (p && p->m_scope == HANDLER_SCOPE)
+ p= p->m_parent;
+
+ if (!p || !p->m_parent)
+ return NULL;
+
+ return p->m_parent->find_handler(sql_state, sql_errno, level);
}
-int
-sp_pcontext::push_cursor(LEX_STRING *name)
+
+bool sp_pcontext::add_cursor(LEX_STRING name)
{
- LEX_STRING n;
+ if (m_cursors.elements() == m_max_cursor_index)
+ ++m_max_cursor_index;
- if (m_cursors.elements == m_max_cursor_index)
- m_max_cursor_index+= 1;
- n.str= name->str;
- n.length= name->length;
- return insert_dynamic(&m_cursors, (uchar *)&n);
+ return m_cursors.append(name);
}
-/*
- See comment for find_variable() above
-*/
-my_bool
-sp_pcontext::find_cursor(LEX_STRING *name, uint *poff, my_bool scoped)
+
+bool sp_pcontext::find_cursor(LEX_STRING name,
+ uint *poff,
+ bool current_scope_only) const
{
- uint i= m_cursors.elements;
+ uint i= m_cursors.elements();
while (i--)
{
- LEX_STRING n;
+ LEX_STRING n= m_cursors.at(i);
- get_dynamic(&m_cursors, (uchar*)&n, i);
if (my_strnncoll(system_charset_info,
- (const uchar *)name->str, name->length,
- (const uchar *)n.str, n.length) == 0)
+ (const uchar *) name.str, name.length,
+ (const uchar *) n.str, n.length) == 0)
{
*poff= m_cursor_offset + i;
- return TRUE;
+ return true;
}
}
- if (!scoped && m_parent)
- return m_parent->find_cursor(name, poff, scoped);
- return FALSE;
+
+ return (!current_scope_only && m_parent) ?
+ m_parent->find_cursor(name, poff, false) :
+ false;
}
-void
-sp_pcontext::retrieve_field_definitions(List<Create_field> *field_def_lst)
+void sp_pcontext::retrieve_field_definitions(
+ List<Create_field> *field_def_lst) const
{
/* Put local/context fields in the result list. */
- for (uint i = 0; i < m_vars.elements; ++i)
+ for (size_t i= 0; i < m_vars.elements(); ++i)
{
- sp_variable_t *var_def;
- get_dynamic(&m_vars, (uchar*) &var_def, i);
+ sp_variable *var_def= m_vars.at(i);
field_def_lst->push_back(&var_def->field_def);
}
/* Put the fields of the enclosed contexts in the result list. */
- List_iterator_fast<sp_pcontext> li(m_children);
- sp_pcontext *ctx;
-
- while ((ctx = li++))
- ctx->retrieve_field_definitions(field_def_lst);
+ for (size_t i= 0; i < m_children.elements(); ++i)
+ m_children.at(i)->retrieve_field_definitions(field_def_lst);
}
-/*
- Find a cursor by offset from the top.
- This is only used for debugging.
-*/
-my_bool
-sp_pcontext::find_cursor(uint offset, LEX_STRING *n)
+
+const LEX_STRING *sp_pcontext::find_cursor(uint offset) const
{
if (m_cursor_offset <= offset &&
- offset < m_cursor_offset + m_cursors.elements)
- { // This frame
- get_dynamic(&m_cursors, (uchar*)n, offset - m_cursor_offset);
- return TRUE;
+ offset < m_cursor_offset + m_cursors.elements())
+ {
+ return &m_cursors.at(offset - m_cursor_offset); // This frame
}
- if (m_parent)
- return m_parent->find_cursor(offset, n); // Some previous frame
- return FALSE; // index out of bounds
+
+ return m_parent ?
+ m_parent->find_cursor(offset) : // Some previous frame
+ NULL; // Index out of bounds
}
diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h
index f1d0d250c47..4d8623108aa 100644
--- a/sql/sp_pcontext.h
+++ b/sql/sp_pcontext.h
@@ -24,438 +24,541 @@
#include "sql_string.h" // LEX_STRING
#include "mysql_com.h" // enum_field_types
#include "field.h" // Create_field
+#include "sql_array.h" // Dynamic_array
-class sp_pcontext;
-typedef enum
-{
- sp_param_in,
- sp_param_out,
- sp_param_inout
-} sp_param_mode_t;
+/// This class represents a stored program variable or a parameter
+/// (also referenced as 'SP-variable').
-typedef struct sp_variable
+class sp_variable : public Sql_alloc
{
- LEX_STRING name;
- enum enum_field_types type;
- sp_param_mode_t mode;
-
- /*
- offset -- this the index to the variable's value in the runtime frame.
- This is calculated during parsing and used when creating sp_instr_set
- instructions and Item_splocal items.
- I.e. values are set/referred by array indexing in runtime.
- */
- uint offset;
-
- Item *dflt;
- Create_field field_def;
-} sp_variable_t;
+public:
+ enum enum_mode
+ {
+ MODE_IN,
+ MODE_OUT,
+ MODE_INOUT
+ };
+ /// Name of the SP-variable.
+ LEX_STRING name;
-#define SP_LAB_IMPL 0 // Implicit label generated by parser
-#define SP_LAB_BEGIN 1 // Label at BEGIN
-#define SP_LAB_ITER 2 // Label at iteration control
+ /// Field-type of the SP-variable.
+ enum enum_field_types type;
-/*
- An SQL/PSM label. Can refer to the identifier used with the
- "label_name:" construct which may precede some SQL/PSM statements, or
- to an implicit implementation-dependent identifier which the parser
- inserts before a high-level flow control statement such as
- IF/WHILE/REPEAT/LOOP, when such statement is rewritten into
- a combination of low-level jump/jump_if instructions and labels.
-*/
+ /// Mode of the SP-variable.
+ enum_mode mode;
-typedef struct sp_label
-{
- char *name;
- uint ip; // Instruction index
- int type; // begin/iter or ref/free
- sp_pcontext *ctx; // The label's context
-} sp_label_t;
+ /// The index to the variable's value in the runtime frame.
+ ///
+ /// It is calculated during parsing and used when creating sp_instr_set
+ /// instructions and Item_splocal items. I.e. values are set/referred by
+ /// array indexing in runtime.
+ uint offset;
-typedef struct sp_cond_type
-{
- enum { number, state, warning, notfound, exception } type;
- char sqlstate[SQLSTATE_LENGTH+1];
- uint mysqlerr;
-} sp_cond_type_t;
+ /// Default value of the SP-variable (if any).
+ Item *default_value;
-/*
- Sanity check for SQLSTATEs. Will not check if it's really an existing
- state (there are just too many), but will check length bad characters.
-*/
-extern bool
-sp_cond_check(LEX_STRING *sqlstate);
+ /// Full type information (field meta-data) of the SP-variable.
+ Create_field field_def;
-typedef struct sp_cond
-{
- LEX_STRING name;
- sp_cond_type_t *val;
-} sp_cond_t;
-
-/**
- The scope of a label in Stored Procedures,
- for name resolution of labels in a parsing context.
-*/
-enum label_scope_type
-{
- /**
- The labels declared in a parent context are in scope.
- */
- LABEL_DEFAULT_SCOPE,
- /**
- The labels declared in a parent context are not in scope.
- */
- LABEL_HANDLER_SCOPE
+public:
+ sp_variable(LEX_STRING _name, enum_field_types _type, enum_mode _mode,
+ uint _offset)
+ :Sql_alloc(),
+ name(_name),
+ type(_type),
+ mode(_mode),
+ offset(_offset),
+ default_value(NULL)
+ { }
};
-/**
- The parse-time context, used to keep track of declared variables/parameters,
- conditions, handlers, cursors and labels, during parsing.
- sp_contexts are organized as a tree, with one object for each begin-end
- block, one object for each exception handler,
- plus a root-context for the parameters.
- This is used during parsing for looking up defined names (e.g. declared
- variables and visible labels), for error checking, and to calculate offsets
- to be used at runtime. (During execution variable values, active handlers
- and cursors, etc, are referred to by an index in a stack.)
- Parsing contexts for exception handlers limit the visibility of labels.
- The pcontext tree is also kept during execution and is used for error
- checking (e.g. correct number of parameters), and in the future, used by
- the debugger.
-*/
+///////////////////////////////////////////////////////////////////////////
-class sp_pcontext : public Sql_alloc
+/// This class represents an SQL/PSM label. Can refer to the identifier
+/// used with the "label_name:" construct which may precede some SQL/PSM
+/// statements, or to an implicit implementation-dependent identifier which
+/// the parser inserts before a high-level flow control statement such as
+/// IF/WHILE/REPEAT/LOOP, when such statement is rewritten into a
+/// combination of low-level jump/jump_if instructions and labels.
+
+class sp_label : public Sql_alloc
{
public:
-
- /**
- Constructor.
- Builds a parsing context root node.
- */
- sp_pcontext();
-
- // Free memory
- void
- destroy();
-
- /**
- Create and push a new context in the tree.
- @param label_scope label scope for the new parsing context
- @return the node created
- */
- sp_pcontext *
- push_context(label_scope_type label_scope);
-
- /**
- Pop a node from the parsing context tree.
- @return the parent node
- */
- sp_pcontext *
- pop_context();
-
- sp_pcontext *
- parent_context()
+ enum enum_type
{
- return m_parent;
- }
+ /// Implicit label generated by parser.
+ IMPLICIT,
- /*
- Number of handlers/cursors to pop between this context and 'ctx'.
- If 'exclusive' is true, don't count the last block we are leaving;
- this is used for LEAVE where we will jump to the cpop/hpop instructions.
- */
- uint
- diff_handlers(sp_pcontext *ctx, bool exclusive);
- uint
- diff_cursors(sp_pcontext *ctx, bool exclusive);
-
-
- //
- // Parameters and variables
- //
-
- /*
- The maximum number of variables used in this and all child contexts
- In the root, this gives us the number of slots needed for variables
- during execution.
- */
- inline uint
- max_var_index()
- {
- return m_max_var_index;
- }
+ /// Label at BEGIN.
+ BEGIN,
- /*
- The current number of variables used in the parents (from the root),
- including this context.
- */
- inline uint
- current_var_count()
- {
- return m_var_offset + m_vars.elements;
- }
+ /// Label at iteration control
+ ITERATION
+ };
- /* The number of variables in this context alone */
- inline uint
- context_var_count()
- {
- return m_vars.elements;
- }
+ /// Name of the label.
+ LEX_STRING name;
- /* Map index in this pcontext to runtime offset */
- inline uint
- var_context2runtime(uint i)
- {
- return m_var_offset + i;
- }
+ /// Instruction pointer of the label.
+ uint ip;
- /* Set type of variable. 'i' is the offset from the top */
- inline void
- set_type(uint i, enum enum_field_types type)
- {
- sp_variable_t *p= find_variable(i);
+ /// Type of the label.
+ enum_type type;
- if (p)
- p->type= type;
- }
+ /// Scope of the label.
+ class sp_pcontext *ctx;
- /* Set default value of variable. 'i' is the offset from the top */
- inline void
- set_default(uint i, Item *it)
- {
- sp_variable_t *p= find_variable(i);
+public:
+ sp_label(LEX_STRING _name, uint _ip, enum_type _type, sp_pcontext *_ctx)
+ :Sql_alloc(),
+ name(_name),
+ ip(_ip),
+ type(_type),
+ ctx(_ctx)
+ { }
+};
- if (p)
- p->dflt= it;
- }
+///////////////////////////////////////////////////////////////////////////
+
+/// This class represents condition-value term in DECLARE CONDITION or
+/// DECLARE HANDLER statements. sp_condition_value has little to do with
+/// SQL-conditions.
+///
+/// In some sense, this class is a union -- a set of filled attributes
+/// depends on the sp_condition_value::type value.
- sp_variable_t *
- push_variable(LEX_STRING *name, enum enum_field_types type,
- sp_param_mode_t mode);
-
- /*
- Retrieve definitions of fields from the current context and its
- children.
- */
- void
- retrieve_field_definitions(List<Create_field> *field_def_lst);
-
- // Find by name
- sp_variable_t *
- find_variable(LEX_STRING *name, my_bool scoped=0);
-
- // Find by offset (from the top)
- sp_variable_t *
- find_variable(uint offset);
-
- /*
- Set the current scope boundary (for default values).
- The argument is the number of variables to skip.
- */
- inline void
- declare_var_boundary(uint n)
+class sp_condition_value : public Sql_alloc
+{
+public:
+ enum enum_type
{
- m_pboundary= n;
- }
+ ERROR_CODE,
+ SQLSTATE,
+ WARNING,
+ NOT_FOUND,
+ EXCEPTION
+ };
- /*
- CASE expressions support.
- */
+ /// Type of the condition value.
+ enum_type type;
- inline int
- register_case_expr()
- {
- return m_num_case_exprs++;
- }
+ /// SQLSTATE of the condition value.
+ char sql_state[SQLSTATE_LENGTH+1];
- inline int
- get_num_case_exprs() const
- {
- return m_num_case_exprs;
- }
+ /// MySQL error code of the condition value.
+ uint mysqlerr;
- inline bool
- push_case_expr_id(int case_expr_id)
+public:
+ sp_condition_value(uint _mysqlerr)
+ :Sql_alloc(),
+ type(ERROR_CODE),
+ mysqlerr(_mysqlerr)
+ { }
+
+ sp_condition_value(const char *_sql_state)
+ :Sql_alloc(),
+ type(SQLSTATE)
{
- return insert_dynamic(&m_case_expr_id_lst, (uchar*) &case_expr_id);
+ memcpy(sql_state, _sql_state, SQLSTATE_LENGTH);
+ sql_state[SQLSTATE_LENGTH]= 0;
}
- inline void
- pop_case_expr_id()
+ sp_condition_value(enum_type _type)
+ :Sql_alloc(),
+ type(_type)
{
- pop_dynamic(&m_case_expr_id_lst);
+ DBUG_ASSERT(type != ERROR_CODE && type != SQLSTATE);
}
- inline int
- get_current_case_expr_id() const
- {
- int case_expr_id;
+ /// Check if two instances of sp_condition_value are equal or not.
+ ///
+ /// @param cv another instance of sp_condition_value to check.
+ ///
+ /// @return true if the instances are equal, false otherwise.
+ bool equals(const sp_condition_value *cv) const;
+};
- get_dynamic((DYNAMIC_ARRAY*)&m_case_expr_id_lst, (uchar*) &case_expr_id,
- m_case_expr_id_lst.elements - 1);
+///////////////////////////////////////////////////////////////////////////
- return case_expr_id;
- }
+/// This class represents 'DECLARE CONDITION' statement.
+/// sp_condition has little to do with SQL-conditions.
- //
- // Labels
- //
+class sp_condition : public Sql_alloc
+{
+public:
+ /// Name of the condition.
+ LEX_STRING name;
- sp_label_t *
- push_label(char *name, uint ip);
+ /// Value of the condition.
+ sp_condition_value *value;
- sp_label_t *
- find_label(char *name);
+public:
+ sp_condition(LEX_STRING _name, sp_condition_value *_value)
+ :Sql_alloc(),
+ name(_name),
+ value(_value)
+ { }
+};
- inline sp_label_t *
- last_label()
- {
- sp_label_t *lab= m_label.head();
+///////////////////////////////////////////////////////////////////////////
- if (!lab && m_parent)
- lab= m_parent->last_label();
- return lab;
- }
+/// This class represents 'DECLARE HANDLER' statement.
- inline sp_label_t *
- pop_label()
+class sp_handler : public Sql_alloc
+{
+public:
+ /// Enumeration of possible handler types.
+ /// Note: UNDO handlers are not (and have never been) supported.
+ enum enum_type
{
- return m_label.pop();
- }
+ EXIT,
+ CONTINUE
+ };
- //
- // Conditions
- //
+ /// Handler type.
+ enum_type type;
- int
- push_cond(LEX_STRING *name, sp_cond_type_t *val);
+ /// Conditions caught by this handler.
+ List<sp_condition_value> condition_values;
- sp_cond_type_t *
- find_cond(LEX_STRING *name, my_bool scoped=0);
+public:
+ /// The constructor.
+ ///
+ /// @param _type SQL-handler type.
+ sp_handler(enum_type _type)
+ :Sql_alloc(),
+ type(_type)
+ { }
+};
- //
- // Handlers
- //
+///////////////////////////////////////////////////////////////////////////
+
+/// The class represents parse-time context, which keeps track of declared
+/// variables/parameters, conditions, handlers, cursors and labels.
+///
+/// sp_pcontext objects are organized in a tree according to the following
+/// rules:
+/// - one sp_pcontext object corresponds for for each BEGIN..END block;
+/// - one sp_pcontext object corresponds for each exception handler;
+/// - one additional sp_pcontext object is created to contain
+/// Stored Program parameters.
+///
+/// sp_pcontext objects are used both at parse-time and at runtime.
+///
+/// During the parsing stage sp_pcontext objects are used:
+/// - to look up defined names (e.g. declared variables and visible
+/// labels);
+/// - to check for duplicates;
+/// - for error checking;
+/// - to calculate offsets to be used at runtime.
+///
+/// During the runtime phase, a tree of sp_pcontext objects is used:
+/// - for error checking (e.g. to check correct number of parameters);
+/// - to resolve SQL-handlers.
- inline void
- push_handler(sp_cond_type_t *cond)
+class sp_pcontext : public Sql_alloc
+{
+public:
+ enum enum_scope
{
- insert_dynamic(&m_handlers, (uchar*)&cond);
- }
-
- bool
- find_handler(sp_cond_type *cond);
+ /// REGULAR_SCOPE designates regular BEGIN ... END blocks.
+ REGULAR_SCOPE,
- inline uint
- max_handler_index()
- {
- return m_max_handler_index + m_context_handlers;
- }
+ /// HANDLER_SCOPE designates SQL-handler blocks.
+ HANDLER_SCOPE
+ };
- inline void
- add_handlers(uint n)
+public:
+ sp_pcontext();
+ ~sp_pcontext();
+
+
+ /// Create and push a new context in the tree.
+
+ /// @param thd thread context.
+ /// @param scope scope of the new parsing context.
+ /// @return the node created.
+ sp_pcontext *push_context(THD *thd, enum_scope scope);
+
+ /// Pop a node from the parsing context tree.
+ /// @return the parent node.
+ sp_pcontext *pop_context();
+
+ sp_pcontext *parent_context() const
+ { return m_parent; }
+
+ /// Calculate and return the number of handlers to pop between the given
+ /// context and this one.
+ ///
+ /// @param ctx the other parsing context.
+ /// @param exclusive specifies if the last scope should be excluded.
+ ///
+ /// @return the number of handlers to pop between the given context and
+ /// this one. If 'exclusive' is true, don't count the last scope we are
+ /// leaving; this is used for LEAVE where we will jump to the hpop
+ /// instructions.
+ uint diff_handlers(const sp_pcontext *ctx, bool exclusive) const;
+
+ /// Calculate and return the number of cursors to pop between the given
+ /// context and this one.
+ ///
+ /// @param ctx the other parsing context.
+ /// @param exclusive specifies if the last scope should be excluded.
+ ///
+ /// @return the number of cursors to pop between the given context and
+ /// this one. If 'exclusive' is true, don't count the last scope we are
+ /// leaving; this is used for LEAVE where we will jump to the cpop
+ /// instructions.
+ uint diff_cursors(const sp_pcontext *ctx, bool exclusive) const;
+
+ /////////////////////////////////////////////////////////////////////////
+ // SP-variables (parameters and variables).
+ /////////////////////////////////////////////////////////////////////////
+
+ /// @return the maximum number of variables used in this and all child
+ /// contexts. For the root parsing context, this gives us the number of
+ /// slots needed for variables during the runtime phase.
+ uint max_var_index() const
+ { return m_max_var_index; }
+
+ /// @return the current number of variables used in the parent contexts
+ /// (from the root), including this context.
+ uint current_var_count() const
+ { return m_var_offset + m_vars.elements(); }
+
+ /// @return the number of variables in this context alone.
+ uint context_var_count() const
+ { return m_vars.elements(); }
+
+ /// @return map index in this parsing context to runtime offset.
+ uint var_context2runtime(uint i) const
+ { return m_var_offset + i; }
+
+ /// Add SP-variable to the parsing context.
+ ///
+ /// @param thd Thread context.
+ /// @param name Name of the SP-variable.
+ /// @param type Type of the SP-variable.
+ /// @param mode Mode of the SP-variable.
+ ///
+ /// @return instance of newly added SP-variable.
+ sp_variable *add_variable(THD *thd,
+ LEX_STRING name,
+ enum enum_field_types type,
+ sp_variable::enum_mode mode);
+
+ /// Retrieve full type information about SP-variables in this parsing
+ /// context and its children.
+ ///
+ /// @param field_def_lst[out] Container to store type information.
+ void retrieve_field_definitions(List<Create_field> *field_def_lst) const;
+
+ /// Find SP-variable by name.
+ ///
+ /// The function does a linear search (from newer to older variables,
+ /// in case we have shadowed names).
+ ///
+ /// The function is called only at parsing time.
+ ///
+ /// @param name Variable name.
+ /// @param current_scope_only A flag if we search only in current scope.
+ ///
+ /// @return instance of found SP-variable, or NULL if not found.
+ sp_variable *find_variable(LEX_STRING name, bool current_scope_only) const;
+
+ /// Find SP-variable by the offset in the root parsing context.
+ ///
+ /// The function is used for two things:
+ /// - When evaluating parameters at the beginning, and setting out parameters
+ /// at the end, of invocation. (Top frame only, so no recursion then.)
+ /// - For printing of sp_instr_set. (Debug mode only.)
+ ///
+ /// @param offset Variable offset in the root parsing context.
+ ///
+ /// @return instance of found SP-variable, or NULL if not found.
+ sp_variable *find_variable(uint offset) const;
+
+ /// Set the current scope boundary (for default values).
+ ///
+ /// @param n The number of variables to skip.
+ void declare_var_boundary(uint n)
+ { m_pboundary= n; }
+
+ /////////////////////////////////////////////////////////////////////////
+ // CASE expressions.
+ /////////////////////////////////////////////////////////////////////////
+
+ int register_case_expr()
+ { return m_num_case_exprs++; }
+
+ int get_num_case_exprs() const
+ { return m_num_case_exprs; }
+
+ bool push_case_expr_id(int case_expr_id)
+ { return m_case_expr_ids.append(case_expr_id); }
+
+ void pop_case_expr_id()
+ { m_case_expr_ids.pop(); }
+
+ int get_current_case_expr_id() const
+ { return *m_case_expr_ids.back(); }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Labels.
+ /////////////////////////////////////////////////////////////////////////
+
+ sp_label *push_label(THD *thd, LEX_STRING name, uint ip);
+
+ sp_label *find_label(LEX_STRING name);
+
+ sp_label *last_label()
{
- m_context_handlers+= n;
- }
-
- //
- // Cursors
- //
+ sp_label *label= m_labels.head();
- int
- push_cursor(LEX_STRING *name);
+ if (!label && m_parent)
+ label= m_parent->last_label();
- my_bool
- find_cursor(LEX_STRING *name, uint *poff, my_bool scoped=0);
-
- /* Find by offset (for debugging only) */
- my_bool
- find_cursor(uint offset, LEX_STRING *n);
-
- inline uint
- max_cursor_index()
- {
- return m_max_cursor_index + m_cursors.elements;
- }
-
- inline uint
- current_cursor_count()
- {
- return m_cursor_offset + m_cursors.elements;
+ return label;
}
-protected:
+ sp_label *pop_label()
+ { return m_labels.pop(); }
+
+ /////////////////////////////////////////////////////////////////////////
+ // Conditions.
+ /////////////////////////////////////////////////////////////////////////
+
+ bool add_condition(THD *thd, LEX_STRING name, sp_condition_value *value);
+
+ /// See comment for find_variable() above.
+ sp_condition_value *find_condition(LEX_STRING name,
+ bool current_scope_only) const;
+
+ /////////////////////////////////////////////////////////////////////////
+ // Handlers.
+ /////////////////////////////////////////////////////////////////////////
+
+ sp_handler *add_handler(THD* thd, sp_handler::enum_type type);
+
+ /// This is an auxilary parsing-time function to check if an SQL-handler
+ /// exists in the current parsing context (current scope) for the given
+ /// SQL-condition. This function is used to check for duplicates during
+ /// the parsing phase.
+ ///
+ /// This function can not be used during the runtime phase to check
+ /// SQL-handler existence because it searches for the SQL-handler in the
+ /// current scope only (during runtime, current and parent scopes
+ /// should be checked according to the SQL-handler resolution rules).
+ ///
+ /// @param condition_value the handler condition value
+ /// (not SQL-condition!).
+ ///
+ /// @retval true if such SQL-handler exists.
+ /// @retval false otherwise.
+ bool check_duplicate_handler(const sp_condition_value *cond_value) const;
+
+ /// Find an SQL handler for the given SQL condition according to the
+ /// SQL-handler resolution rules. This function is used at runtime.
+ ///
+ /// @param sql_state The SQL condition state
+ /// @param sql_errno The error code
+ /// @param level The SQL condition level
+ ///
+ /// @return a pointer to the found SQL-handler or NULL.
+ sp_handler *find_handler(const char *sql_state,
+ uint sql_errno,
+ Sql_condition::enum_warning_level level) const;
+
+ /////////////////////////////////////////////////////////////////////////
+ // Cursors.
+ /////////////////////////////////////////////////////////////////////////
+
+ bool add_cursor(LEX_STRING name);
+
+ /// See comment for find_variable() above.
+ bool find_cursor(LEX_STRING name, uint *poff, bool current_scope_only) const;
+
+ /// Find cursor by offset (for debugging only).
+ const LEX_STRING *find_cursor(uint offset) const;
+
+ uint max_cursor_index() const
+ { return m_max_cursor_index + m_cursors.elements(); }
+
+ uint current_cursor_count() const
+ { return m_cursor_offset + m_cursors.elements(); }
- /**
- Constructor for a tree node.
- @param prev the parent parsing context
- @param label_scope label_scope for this parsing context
- */
- sp_pcontext(sp_pcontext *prev, label_scope_type label_scope);
-
- /*
- m_max_var_index -- number of variables (including all types of arguments)
- in this context including all children contexts.
-
- m_max_var_index >= m_vars.elements.
+private:
+ /// Constructor for a tree node.
+ /// @param prev the parent parsing context
+ /// @param scope scope of this parsing context
+ sp_pcontext(sp_pcontext *prev, enum_scope scope);
- m_max_var_index of the root parsing context contains number of all
- variables (including arguments) in all enclosed contexts.
- */
- uint m_max_var_index;
+ void init(uint var_offset, uint cursor_offset, int num_case_expressions);
- // The maximum sub context's framesizes
- uint m_max_cursor_index;
- uint m_max_handler_index;
- uint m_context_handlers; // No. of handlers in this context
+ /* Prevent use of these */
+ sp_pcontext(const sp_pcontext &);
+ void operator=(sp_pcontext &);
private:
+ /// m_max_var_index -- number of variables (including all types of arguments)
+ /// in this context including all children contexts.
+ ///
+ /// m_max_var_index >= m_vars.elements().
+ ///
+ /// m_max_var_index of the root parsing context contains number of all
+ /// variables (including arguments) in all enclosed contexts.
+ uint m_max_var_index;
+
+ /// The maximum sub context's framesizes.
+ uint m_max_cursor_index;
- sp_pcontext *m_parent; // Parent context
-
- /*
- m_var_offset -- this is an index of the first variable in this
- parsing context.
-
- m_var_offset is 0 for root context.
+ /// Parent context.
+ sp_pcontext *m_parent;
- Since now each variable is stored in separate place, no reuse is done,
- so m_var_offset is different for all enclosed contexts.
- */
+ /// An index of the first SP-variable in this parsing context. The index
+ /// belongs to a runtime table of SP-variables.
+ ///
+ /// Note:
+ /// - m_var_offset is 0 for root parsing context;
+ /// - m_var_offset is different for all nested parsing contexts.
uint m_var_offset;
- uint m_cursor_offset; // Cursor offset for this context
+ /// Cursor offset for this context.
+ uint m_cursor_offset;
- /*
- Boundary for finding variables in this context. This is the number
- of variables currently "invisible" to default clauses.
- This is normally 0, but will be larger during parsing of
- DECLARE ... DEFAULT, to get the scope right for DEFAULT values.
- */
+ /// Boundary for finding variables in this context. This is the number of
+ /// variables currently "invisible" to default clauses. This is normally 0,
+ /// but will be larger during parsing of DECLARE ... DEFAULT, to get the
+ /// scope right for DEFAULT values.
uint m_pboundary;
int m_num_case_exprs;
- DYNAMIC_ARRAY m_vars; // Parameters/variables
- DYNAMIC_ARRAY m_case_expr_id_lst; /* Stack of CASE expression ids. */
- DYNAMIC_ARRAY m_conds; // Conditions
- DYNAMIC_ARRAY m_cursors; // Cursors
- DYNAMIC_ARRAY m_handlers; // Handlers, for checking for duplicates
+ /// SP parameters/variables.
+ Dynamic_array<sp_variable *> m_vars;
- List<sp_label_t> m_label; // The label list
+ /// Stack of CASE expression ids.
+ Dynamic_array<int> m_case_expr_ids;
- List<sp_pcontext> m_children; // Children contexts, used for destruction
+ /// Stack of SQL-conditions.
+ Dynamic_array<sp_condition *> m_conditions;
- /**
- Scope of labels for this parsing context.
- */
- label_scope_type m_label_scope;
+ /// Stack of cursors.
+ Dynamic_array<LEX_STRING> m_cursors;
-private:
- sp_pcontext(const sp_pcontext &); /* Prevent use of these */
- void operator=(sp_pcontext &);
+ /// Stack of SQL-handlers.
+ Dynamic_array<sp_handler *> m_handlers;
+
+ /// List of labels.
+ List<sp_label> m_labels;
+
+ /// Children contexts, used for destruction.
+ Dynamic_array<sp_pcontext *> m_children;
+
+ /// Scope of this parsing context.
+ enum_scope m_scope;
}; // class sp_pcontext : public Sql_alloc
diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc
index 30acfebabb2..42476f7a596 100644
--- a/sql/sp_rcontext.cc
+++ b/sql/sp_rcontext.cc
@@ -26,23 +26,21 @@
#include "sp_pcontext.h"
#include "sql_select.h" // create_virtual_tmp_table
-sp_rcontext::sp_rcontext(sp_pcontext *root_parsing_ctx,
+///////////////////////////////////////////////////////////////////////////
+// sp_rcontext implementation.
+///////////////////////////////////////////////////////////////////////////
+
+
+sp_rcontext::sp_rcontext(const sp_pcontext *root_parsing_ctx,
Field *return_value_fld,
- sp_rcontext *prev_runtime_ctx)
- :end_partial_result_set(FALSE),
+ bool in_sub_stmt)
+ :end_partial_result_set(false),
m_root_parsing_ctx(root_parsing_ctx),
- m_var_table(0),
- m_var_items(0),
+ m_var_table(NULL),
m_return_value_fld(return_value_fld),
- m_return_value_set(FALSE),
- in_sub_stmt(FALSE),
- m_hcount(0),
- m_hsp(0),
- m_ihsp(0),
- m_hfound(-1),
- m_ccount(0),
- m_case_expr_holders(0),
- m_prev_runtime_ctx(prev_runtime_ctx)
+ m_return_value_set(false),
+ m_in_sub_stmt(in_sub_stmt),
+ m_ccount(0)
{
}
@@ -51,422 +49,324 @@ sp_rcontext::~sp_rcontext()
{
if (m_var_table)
free_blobs(m_var_table);
+
+ // Leave m_handlers, m_handler_call_stack, m_var_items, m_cstack
+ // and m_case_expr_holders untouched.
+ // They are allocated in mem roots and will be freed accordingly.
}
-/*
- Initialize sp_rcontext instance.
+sp_rcontext *sp_rcontext::create(THD *thd,
+ const sp_pcontext *root_parsing_ctx,
+ Field *return_value_fld)
+{
+ sp_rcontext *ctx= new (thd->mem_root) sp_rcontext(root_parsing_ctx,
+ return_value_fld,
+ thd->in_sub_stmt);
- SYNOPSIS
- thd Thread handle
- RETURN
- FALSE on success
- TRUE on error
-*/
+ if (!ctx)
+ return NULL;
-bool sp_rcontext::init(THD *thd)
-{
- uint handler_count= m_root_parsing_ctx->max_handler_index();
-
- in_sub_stmt= thd->in_sub_stmt;
-
- if (init_var_table(thd) || init_var_items())
- return TRUE;
-
- if (!(m_raised_conditions= new (thd->mem_root) Sql_condition_info[handler_count]))
- return TRUE;
-
- return
- !(m_handler=
- (sp_handler_t*)thd->alloc(handler_count * sizeof(sp_handler_t))) ||
- !(m_hstack=
- (uint*)thd->alloc(handler_count * sizeof(uint))) ||
- !(m_in_handler=
- (sp_active_handler_t*)thd->alloc(handler_count *
- sizeof(sp_active_handler_t))) ||
- !(m_cstack=
- (sp_cursor**)thd->alloc(m_root_parsing_ctx->max_cursor_index() *
- sizeof(sp_cursor*))) ||
- !(m_case_expr_holders=
- (Item_cache**)thd->calloc(m_root_parsing_ctx->get_num_case_exprs() *
- sizeof (Item_cache*)));
+ if (ctx->alloc_arrays(thd) ||
+ ctx->init_var_table(thd) ||
+ ctx->init_var_items(thd))
+ {
+ delete ctx;
+ return NULL;
+ }
+
+ return ctx;
}
-/*
- Create and initialize a table to store SP-vars.
+bool sp_rcontext::alloc_arrays(THD *thd)
+{
+ {
+ size_t n= m_root_parsing_ctx->max_cursor_index();
+ m_cstack.reset(
+ static_cast<sp_cursor **> (
+ thd->alloc(n * sizeof (sp_cursor*))),
+ n);
+ }
+
+ {
+ size_t n= m_root_parsing_ctx->get_num_case_exprs();
+ m_case_expr_holders.reset(
+ static_cast<Item_cache **> (
+ thd->calloc(n * sizeof (Item_cache*))),
+ n);
+ }
+
+ return !m_cstack.array() || !m_case_expr_holders.array();
+}
- SYNOPSIS
- thd Thread handler.
- RETURN
- FALSE on success
- TRUE on error
-*/
-bool
-sp_rcontext::init_var_table(THD *thd)
+bool sp_rcontext::init_var_table(THD *thd)
{
List<Create_field> field_def_lst;
if (!m_root_parsing_ctx->max_var_index())
- return FALSE;
+ return false;
m_root_parsing_ctx->retrieve_field_definitions(&field_def_lst);
DBUG_ASSERT(field_def_lst.elements == m_root_parsing_ctx->max_var_index());
-
+
if (!(m_var_table= create_virtual_tmp_table(thd, field_def_lst)))
- return TRUE;
+ return true;
- m_var_table->copy_blobs= TRUE;
- m_var_table->alias.set("", 0, table_alias_charset);
+ m_var_table->copy_blobs= true;
+ m_var_table->alias.set("", 0, m_var_table->alias.charset());
- return FALSE;
+ return false;
}
-/*
- Create and initialize an Item-adapter (Item_field) for each SP-var field.
-
- RETURN
- FALSE on success
- TRUE on error
-*/
-
-bool
-sp_rcontext::init_var_items()
+bool sp_rcontext::init_var_items(THD *thd)
{
- uint idx;
uint num_vars= m_root_parsing_ctx->max_var_index();
- if (!(m_var_items= (Item**) sql_alloc(num_vars * sizeof (Item *))))
- return TRUE;
+ m_var_items.reset(
+ static_cast<Item **> (
+ thd->alloc(num_vars * sizeof (Item *))),
+ num_vars);
+
+ if (!m_var_items.array())
+ return true;
- for (idx = 0; idx < num_vars; ++idx)
+ for (uint idx = 0; idx < num_vars; ++idx)
{
if (!(m_var_items[idx]= new Item_field(m_var_table->field[idx])))
- return TRUE;
+ return true;
}
- return FALSE;
+ return false;
}
-bool
-sp_rcontext::set_return_value(THD *thd, Item **return_value_item)
+bool sp_rcontext::set_return_value(THD *thd, Item **return_value_item)
{
DBUG_ASSERT(m_return_value_fld);
- m_return_value_set = TRUE;
+ m_return_value_set = true;
return sp_eval_expr(thd, m_return_value_fld, return_value_item);
}
-#define IS_WARNING_CONDITION(S) ((S)[0] == '0' && (S)[1] == '1')
-#define IS_NOT_FOUND_CONDITION(S) ((S)[0] == '0' && (S)[1] == '2')
-#define IS_EXCEPTION_CONDITION(S) ((S)[0] != '0' || (S)[1] > '2')
-
-/**
- Find an SQL handler for the given error.
-
- SQL handlers are pushed on the stack m_handler, with the latest/innermost
- one on the top; we then search for matching handlers from the top and
- down.
-
- We search through all the handlers, looking for the most specific one
- (sql_errno more specific than sqlstate more specific than the rest).
- Note that mysql error code handlers is a MySQL extension, not part of
- the standard.
-
- SQL handlers for warnings are searched in the current scope only.
-
- SQL handlers for errors are searched in the current and in outer scopes.
- That's why finding and activation of handler must be separated: an errror
- handler might be located in the outer scope, which is not active at the
- moment. Before such handler can be activated, execution flow should
- unwind to that scope.
-
- Found SQL handler is remembered in m_hfound for future activation.
- If no handler is found, m_hfound is -1.
-
- @param thd Thread handle
- @param sql_errno The error code
- @param sqlstate The error SQL state
- @param level The error level
- @param msg The error message
-
- @retval TRUE if an SQL handler was found
- @retval FALSE otherwise
-*/
-
-bool
-sp_rcontext::find_handler(THD *thd,
- uint sql_errno,
- const char *sqlstate,
- MYSQL_ERROR::enum_warning_level level,
- const char *msg)
+bool sp_rcontext::push_cursor(sp_lex_keeper *lex_keeper,
+ sp_instr_cpush *i)
{
- int i= m_hcount;
-
- /* Reset previously found handler. */
- m_hfound= -1;
-
/*
- If this is a fatal sub-statement error, and this runtime
- context corresponds to a sub-statement, no CONTINUE/EXIT
- handlers from this context are applicable: try to locate one
- in the outer scope.
+ We should create cursors in the callers arena, as
+ it could be (and usually is) used in several instructions.
*/
- if (thd->is_fatal_sub_stmt_error && in_sub_stmt)
- i= 0;
-
- /* Search handlers from the latest (innermost) to the oldest (outermost) */
- while (i--)
- {
- sp_cond_type_t *cond= m_handler[i].cond;
- int j= m_ihsp;
-
- /* Check active handlers, to avoid invoking one recursively */
- while (j--)
- if (m_in_handler[j].ip == m_handler[i].handler)
- break;
- if (j >= 0)
- continue; // Already executing this handler
+ sp_cursor *c= new (callers_arena->mem_root) sp_cursor(lex_keeper, i);
- switch (cond->type)
- {
- case sp_cond_type_t::number:
- if (sql_errno == cond->mysqlerr &&
- (m_hfound < 0 || m_handler[m_hfound].cond->type > sp_cond_type_t::number))
- m_hfound= i; // Always the most specific
- break;
- case sp_cond_type_t::state:
- if (strcmp(sqlstate, cond->sqlstate) == 0 &&
- (m_hfound < 0 || m_handler[m_hfound].cond->type > sp_cond_type_t::state))
- m_hfound= i;
- break;
- case sp_cond_type_t::warning:
- if ((IS_WARNING_CONDITION(sqlstate) ||
- level == MYSQL_ERROR::WARN_LEVEL_WARN) &&
- m_hfound < 0)
- m_hfound= i;
- break;
- case sp_cond_type_t::notfound:
- if (IS_NOT_FOUND_CONDITION(sqlstate) && m_hfound < 0)
- m_hfound= i;
- break;
- case sp_cond_type_t::exception:
- if (IS_EXCEPTION_CONDITION(sqlstate) &&
- level == MYSQL_ERROR::WARN_LEVEL_ERROR &&
- m_hfound < 0)
- m_hfound= i;
- break;
- }
- }
-
- if (m_hfound >= 0)
- {
- DBUG_ASSERT((uint) m_hfound < m_root_parsing_ctx->max_handler_index());
-
- m_raised_conditions[m_hfound].clear();
- m_raised_conditions[m_hfound].set(sql_errno, sqlstate, level, msg);
-
- return TRUE;
- }
+ if (c == NULL)
+ return true;
- /*
- Only "exception conditions" are propagated to handlers in calling
- contexts. If no handler is found locally for a "completion condition"
- (warning or "not found") we will simply resume execution.
- */
- if (m_prev_runtime_ctx && IS_EXCEPTION_CONDITION(sqlstate) &&
- level == MYSQL_ERROR::WARN_LEVEL_ERROR)
- {
- return m_prev_runtime_ctx->find_handler(thd, sql_errno, sqlstate,
- level, msg);
- }
-
- return FALSE;
+ m_cstack[m_ccount++]= c;
+ return false;
}
-void
-sp_rcontext::push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i)
-{
- DBUG_ENTER("sp_rcontext::push_cursor");
- DBUG_ASSERT(m_ccount < m_root_parsing_ctx->max_cursor_index());
- m_cstack[m_ccount++]= new sp_cursor(lex_keeper, i);
- DBUG_PRINT("info", ("m_ccount: %d", m_ccount));
- DBUG_VOID_RETURN;
-}
-void
-sp_rcontext::pop_cursors(uint count)
+void sp_rcontext::pop_cursors(uint count)
{
- DBUG_ENTER("sp_rcontext::pop_cursors");
DBUG_ASSERT(m_ccount >= count);
+
while (count--)
- {
delete m_cstack[--m_ccount];
- }
- DBUG_PRINT("info", ("m_ccount: %d", m_ccount));
- DBUG_VOID_RETURN;
}
-void
-sp_rcontext::push_handler(struct sp_cond_type *cond, uint h, int type)
-{
- DBUG_ENTER("sp_rcontext::push_handler");
- DBUG_ASSERT(m_hcount < m_root_parsing_ctx->max_handler_index());
-
- m_handler[m_hcount].cond= cond;
- m_handler[m_hcount].handler= h;
- m_handler[m_hcount].type= type;
- m_hcount+= 1;
- DBUG_PRINT("info", ("m_hcount: %d", m_hcount));
- DBUG_VOID_RETURN;
-}
-
-void
-sp_rcontext::pop_handlers(uint count)
+bool sp_rcontext::push_handler(sp_handler *handler, uint first_ip)
{
- DBUG_ENTER("sp_rcontext::pop_handlers");
- DBUG_ASSERT(m_hcount >= count);
+ /*
+ We should create handler entries in the callers arena, as
+ they could be (and usually are) used in several instructions.
+ */
+ sp_handler_entry *he=
+ new (callers_arena->mem_root) sp_handler_entry(handler, first_ip);
- m_hcount-= count;
+ if (he == NULL)
+ return true;
- DBUG_PRINT("info", ("m_hcount: %d", m_hcount));
- DBUG_VOID_RETURN;
+ return m_handlers.append(he);
}
-void
-sp_rcontext::push_hstack(uint h)
-{
- DBUG_ENTER("sp_rcontext::push_hstack");
- DBUG_ASSERT(m_hsp < m_root_parsing_ctx->max_handler_index());
-
- m_hstack[m_hsp++]= h;
- DBUG_PRINT("info", ("m_hsp: %d", m_hsp));
- DBUG_VOID_RETURN;
-}
-
-uint
-sp_rcontext::pop_hstack()
+void sp_rcontext::pop_handlers(size_t count)
{
- uint handler;
- DBUG_ENTER("sp_rcontext::pop_hstack");
- DBUG_ASSERT(m_hsp);
-
- handler= m_hstack[--m_hsp];
+ DBUG_ASSERT(m_handlers.elements() >= count);
- DBUG_PRINT("info", ("m_hsp: %d", m_hsp));
- DBUG_RETURN(handler);
+ for (size_t i= 0; i < count; ++i)
+ m_handlers.pop();
}
-/**
- Prepare found handler to be executed.
-
- @retval TRUE if an SQL handler is activated (was found) and IP of the
- first handler instruction.
- @retval FALSE if there is no active handler
-*/
-bool
-sp_rcontext::activate_handler(THD *thd,
- uint *ip,
- sp_instr *instr,
- Query_arena *execute_arena,
- Query_arena *backup_arena)
+bool sp_rcontext::handle_sql_condition(THD *thd,
+ uint *ip,
+ const sp_instr *cur_spi)
{
- if (m_hfound < 0)
- return FALSE;
+ DBUG_ENTER("sp_rcontext::handle_sql_condition");
- switch (m_handler[m_hfound].type) {
- case SP_HANDLER_NONE:
- break;
+ /*
+ If this is a fatal sub-statement error, and this runtime
+ context corresponds to a sub-statement, no CONTINUE/EXIT
+ handlers from this context are applicable: try to locate one
+ in the outer scope.
+ */
+ if (thd->is_fatal_sub_stmt_error && m_in_sub_stmt)
+ DBUG_RETURN(false);
- case SP_HANDLER_CONTINUE:
- thd->restore_active_arena(execute_arena, backup_arena);
- thd->set_n_backup_active_arena(execute_arena, backup_arena);
- push_hstack(instr->get_cont_dest());
+ Diagnostics_area *da= thd->get_stmt_da();
+ const sp_handler *found_handler= NULL;
+ const Sql_condition *found_condition= NULL;
- /* Fall through */
+ if (thd->is_error())
+ {
+ found_handler=
+ cur_spi->m_ctx->find_handler(da->get_sqlstate(),
+ da->sql_errno(),
+ Sql_condition::WARN_LEVEL_ERROR);
+
+ if (found_handler)
+ found_condition= da->get_error_condition();
+
+ /*
+ Found condition can be NULL if the diagnostics area was full
+ when the error was raised. It can also be NULL if
+ Diagnostics_area::set_error_status(uint sql_error) was used.
+ In these cases, make a temporary Sql_condition here so the
+ error can be handled.
+ */
+ if (!found_condition)
+ {
+ Sql_condition *condition=
+ new (callers_arena->mem_root) Sql_condition(callers_arena->mem_root);
+ condition->set(da->sql_errno(), da->get_sqlstate(),
+ Sql_condition::WARN_LEVEL_ERROR,
+ da->message());
+ found_condition= condition;
+ }
+ }
+ else if (da->current_statement_warn_count())
+ {
+ Diagnostics_area::Sql_condition_iterator it= da->sql_conditions();
+ const Sql_condition *c;
- default:
- /* End aborted result set. */
+ // Here we need to find the last warning/note from the stack.
+ // In MySQL most substantial warning is the last one.
+ // (We could have used a reverse iterator here if one existed)
- if (end_partial_result_set)
- thd->protocol->end_partial_result_set(thd);
+ while ((c= it++))
+ {
+ if (c->get_level() == Sql_condition::WARN_LEVEL_WARN ||
+ c->get_level() == Sql_condition::WARN_LEVEL_NOTE)
+ {
+ const sp_handler *handler=
+ cur_spi->m_ctx->find_handler(c->get_sqlstate(),
+ c->get_sql_errno(),
+ c->get_level());
+ if (handler)
+ {
+ found_handler= handler;
+ found_condition= c;
+ }
+ }
+ }
+ }
- /* Enter handler. */
+ if (!found_handler)
+ DBUG_RETURN(false);
- DBUG_ASSERT(m_ihsp < m_root_parsing_ctx->max_handler_index());
- DBUG_ASSERT(m_hfound >= 0);
+ // At this point, we know that:
+ // - there is a pending SQL-condition (error or warning);
+ // - there is an SQL-handler for it.
- m_in_handler[m_ihsp].ip= m_handler[m_hfound].handler;
- m_in_handler[m_ihsp].index= m_hfound;
- m_ihsp++;
+ DBUG_ASSERT(found_condition);
- DBUG_PRINT("info", ("Entering handler..."));
- DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp));
+ sp_handler_entry *handler_entry= NULL;
+ for (size_t i= 0; i < m_handlers.elements(); ++i)
+ {
+ sp_handler_entry *h= m_handlers.at(i);
- /* Reset error state. */
+ if (h->handler == found_handler)
+ {
+ handler_entry= h;
+ break;
+ }
+ }
- thd->clear_error();
- thd->reset_killed(); // Some errors set thd->killed
- // (e.g. "bad data").
+ /*
+ handler_entry usually should not be NULL here, as that indicates
+ that the parser context thinks a HANDLER should be activated,
+ but the runtime context cannot find it.
+
+ However, this can happen (and this is in line with the Standard)
+ if SQL-condition has been raised before DECLARE HANDLER instruction
+ is processed.
+
+ For example:
+ CREATE PROCEDURE p()
+ BEGIN
+ DECLARE v INT DEFAULT 'get'; -- raises SQL-warning here
+ DECLARE EXIT HANDLER ... -- this handler does not catch the warning
+ END
+ */
+ if (!handler_entry)
+ DBUG_RETURN(false);
- /* Return IP of the activated SQL handler. */
- *ip= m_handler[m_hfound].handler;
+ // Mark active conditions so that they can be deleted when the handler exits.
+ da->mark_sql_conditions_for_removal();
- /* Reset found handler. */
- m_hfound= -1;
- }
+ uint continue_ip= handler_entry->handler->type == sp_handler::CONTINUE ?
+ cur_spi->get_cont_dest() : 0;
- return TRUE;
-}
+ /* End aborted result set. */
+ if (end_partial_result_set)
+ thd->protocol->end_partial_result_set(thd);
-void
-sp_rcontext::exit_handler()
-{
- DBUG_ENTER("sp_rcontext::exit_handler");
- DBUG_ASSERT(m_ihsp);
+ /* Reset error state. */
+ thd->clear_error();
+ thd->killed= NOT_KILLED; // Some errors set thd->killed
+ // (e.g. "bad data").
+
+ /* Add a frame to handler-call-stack. */
+ Sql_condition_info *cond_info=
+ new (callers_arena->mem_root) Sql_condition_info(found_condition,
+ callers_arena);
+ Handler_call_frame *frame=
+ new (callers_arena->mem_root) Handler_call_frame(cond_info, continue_ip);
+ m_handler_call_stack.append(frame);
- uint hindex= m_in_handler[m_ihsp-1].index;
- m_raised_conditions[hindex].clear();
- m_ihsp-= 1;
+ *ip= handler_entry->first_ip;
- DBUG_PRINT("info", ("m_ihsp: %d", m_ihsp));
- DBUG_VOID_RETURN;
+ DBUG_RETURN(true);
}
-Sql_condition_info* sp_rcontext::raised_condition() const
+
+uint sp_rcontext::exit_handler(Diagnostics_area *da)
{
- if (m_ihsp > 0)
- {
- uint hindex= m_in_handler[m_ihsp - 1].index;
- Sql_condition_info *raised= & m_raised_conditions[hindex];
- return raised;
- }
+ DBUG_ENTER("sp_rcontext::exit_handler");
+ DBUG_ASSERT(m_handler_call_stack.elements() > 0);
- if (m_prev_runtime_ctx)
- return m_prev_runtime_ctx->raised_condition();
+ Handler_call_frame *f= m_handler_call_stack.pop();
- return NULL;
-}
+ /*
+ Remove the SQL conditions that were present in DA when the
+ handler was activated.
+ */
+ da->remove_marked_sql_conditions();
+ uint continue_ip= f->continue_ip;
-int
-sp_rcontext::set_variable(THD *thd, uint var_idx, Item **value)
-{
- return set_variable(thd, m_var_table->field[var_idx], value);
+ DBUG_RETURN(continue_ip);
}
-int
-sp_rcontext::set_variable(THD *thd, Field *field, Item **value)
+int sp_rcontext::set_variable(THD *thd, Field *field, Item **value)
{
if (!value)
{
@@ -478,25 +378,47 @@ sp_rcontext::set_variable(THD *thd, Field *field, Item **value)
}
-Item *
-sp_rcontext::get_item(uint var_idx)
+Item_cache *sp_rcontext::create_case_expr_holder(THD *thd,
+ const Item *item) const
{
- return m_var_items[var_idx];
+ Item_cache *holder;
+ Query_arena current_arena;
+
+ thd->set_n_backup_active_arena(thd->spcont->callers_arena, &current_arena);
+
+ holder= Item_cache::get_cache(item);
+
+ thd->restore_active_arena(thd->spcont->callers_arena, &current_arena);
+
+ return holder;
}
-Item **
-sp_rcontext::get_item_addr(uint var_idx)
+bool sp_rcontext::set_case_expr(THD *thd, int case_expr_id,
+ Item **case_expr_item_ptr)
{
- return m_var_items + var_idx;
+ Item *case_expr_item= sp_prepare_func_item(thd, case_expr_item_ptr);
+ if (!case_expr_item)
+ return true;
+
+ if (!m_case_expr_holders[case_expr_id] ||
+ m_case_expr_holders[case_expr_id]->result_type() !=
+ case_expr_item->result_type())
+ {
+ m_case_expr_holders[case_expr_id]=
+ create_case_expr_holder(thd, case_expr_item);
+ }
+
+ m_case_expr_holders[case_expr_id]->store(case_expr_item);
+ m_case_expr_holders[case_expr_id]->cache_value();
+ return false;
}
-/*
- *
- * sp_cursor
- *
- */
+///////////////////////////////////////////////////////////////////////////
+// sp_cursor implementation.
+///////////////////////////////////////////////////////////////////////////
+
sp_cursor::sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i)
:m_lex_keeper(lex_keeper),
@@ -523,8 +445,7 @@ sp_cursor::sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i)
0 in case of success, -1 otherwise
*/
-int
-sp_cursor::open(THD *thd)
+int sp_cursor::open(THD *thd)
{
if (server_side_cursor)
{
@@ -538,8 +459,7 @@ sp_cursor::open(THD *thd)
}
-int
-sp_cursor::close(THD *thd)
+int sp_cursor::close(THD *thd)
{
if (! server_side_cursor)
{
@@ -551,16 +471,14 @@ sp_cursor::close(THD *thd)
}
-void
-sp_cursor::destroy()
+void sp_cursor::destroy()
{
delete server_side_cursor;
- server_side_cursor= 0;
+ server_side_cursor= NULL;
}
-int
-sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars)
+int sp_cursor::fetch(THD *thd, List<sp_variable> *vars)
{
if (! server_side_cursor)
{
@@ -575,7 +493,7 @@ sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars)
}
DBUG_EXECUTE_IF("bug23032_emit_warning",
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_ERROR,
ER(ER_UNKNOWN_ERROR)););
@@ -599,108 +517,13 @@ sp_cursor::fetch(THD *thd, List<struct sp_variable> *vars)
}
-/*
- Create an instance of appropriate Item_cache class depending on the
- specified type in the callers arena.
-
- SYNOPSIS
- thd thread handler
- result_type type of the expression
+///////////////////////////////////////////////////////////////////////////
+// sp_cursor::Select_fetch_into_spvars implementation.
+///////////////////////////////////////////////////////////////////////////
- RETURN
- Pointer to valid object on success
- NULL on error
- NOTE
- We should create cache items in the callers arena, as they are used
- between in several instructions.
-*/
-
-Item_cache *
-sp_rcontext::create_case_expr_holder(THD *thd, const Item *item)
-{
- Item_cache *holder;
- Query_arena current_arena;
-
- thd->set_n_backup_active_arena(thd->spcont->callers_arena, &current_arena);
-
- holder= Item_cache::get_cache(item);
-
- thd->restore_active_arena(thd->spcont->callers_arena, &current_arena);
-
- return holder;
-}
-
-
-/*
- Set CASE expression to the specified value.
-
- SYNOPSIS
- thd thread handler
- case_expr_id identifier of the CASE expression
- case_expr_item a value of the CASE expression
-
- RETURN
- FALSE on success
- TRUE on error
-
- NOTE
- The idea is to reuse Item_cache for the expression of the one CASE
- statement. This optimization takes place when there is CASE statement
- inside of a loop. So, in other words, we will use the same object on each
- iteration instead of creating a new one for each iteration.
-
- TODO
- Hypothetically, a type of CASE expression can be different for each
- iteration. For instance, this can happen if the expression contains a
- session variable (something like @@VAR) and its type is changed from one
- iteration to another.
-
- In order to cope with this problem, we check type each time, when we use
- already created object. If the type does not match, we re-create Item.
- This also can (should?) be optimized.
-*/
-
-int
-sp_rcontext::set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr)
-{
- Item *case_expr_item= sp_prepare_func_item(thd, case_expr_item_ptr);
- if (!case_expr_item)
- return TRUE;
-
- if (!m_case_expr_holders[case_expr_id] ||
- m_case_expr_holders[case_expr_id]->result_type() !=
- case_expr_item->result_type())
- {
- m_case_expr_holders[case_expr_id]=
- create_case_expr_holder(thd, case_expr_item);
- }
-
- m_case_expr_holders[case_expr_id]->store(case_expr_item);
- m_case_expr_holders[case_expr_id]->cache_value();
- return FALSE;
-}
-
-
-Item *
-sp_rcontext::get_case_expr(int case_expr_id)
-{
- return m_case_expr_holders[case_expr_id];
-}
-
-
-Item **
-sp_rcontext::get_case_expr_addr(int case_expr_id)
-{
- return (Item**) m_case_expr_holders + case_expr_id;
-}
-
-
-/***************************************************************************
- Select_fetch_into_spvars
-****************************************************************************/
-
-int Select_fetch_into_spvars::prepare(List<Item> &fields, SELECT_LEX_UNIT *u)
+int sp_cursor::Select_fetch_into_spvars::prepare(List<Item> &fields,
+ SELECT_LEX_UNIT *u)
{
/*
Cache the number of columns in the result set in order to easily
@@ -711,11 +534,11 @@ int Select_fetch_into_spvars::prepare(List<Item> &fields, SELECT_LEX_UNIT *u)
}
-int Select_fetch_into_spvars::send_data(List<Item> &items)
+int sp_cursor::Select_fetch_into_spvars::send_data(List<Item> &items)
{
- List_iterator_fast<struct sp_variable> spvar_iter(*spvar_list);
+ List_iterator_fast<sp_variable> spvar_iter(*spvar_list);
List_iterator_fast<Item> item_iter(items);
- sp_variable_t *spvar;
+ sp_variable *spvar;
Item *item;
/* Must be ensured by the caller */
@@ -728,7 +551,7 @@ int Select_fetch_into_spvars::send_data(List<Item> &items)
for (; spvar= spvar_iter++, item= item_iter++; )
{
if (thd->spcont->set_variable(thd, spvar->offset, &item))
- return 1;
+ return true;
}
- return 0;
+ return false;
}
diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h
index 5008a73d96c..ce692024d0d 100644
--- a/sql/sp_rcontext.h
+++ b/sql/sp_rcontext.h
@@ -22,80 +22,18 @@
#endif
#include "sql_class.h" // select_result_interceptor
+#include "sp_pcontext.h" // sp_condition_value
+
+///////////////////////////////////////////////////////////////////////////
+// sp_rcontext declaration.
+///////////////////////////////////////////////////////////////////////////
-struct sp_cond_type;
class sp_cursor;
-struct sp_variable;
class sp_lex_keeper;
class sp_instr_cpush;
class Query_arena;
class sp_head;
-class sp_pcontext;
class Item_cache;
-typedef class st_select_lex_unit SELECT_LEX_UNIT;
-class Server_side_cursor;
-
-#define SP_HANDLER_NONE 0
-#define SP_HANDLER_EXIT 1
-#define SP_HANDLER_CONTINUE 2
-#define SP_HANDLER_UNDO 3
-
-typedef struct
-{
- /** Condition caught by this HANDLER. */
- struct sp_cond_type *cond;
- /** Location (instruction pointer) of the handler code. */
- uint handler;
- /** Handler type (EXIT, CONTINUE). */
- int type;
-} sp_handler_t;
-
-typedef struct
-{
- /** Instruction pointer of the active handler. */
- uint ip;
- /** Handler index of the active handler. */
- uint index;
-} sp_active_handler_t;
-
-
-class Sql_condition_info : public Sql_alloc
-{
-public:
- /** SQL error code. */
- uint m_sql_errno;
-
- /** Error level. */
- MYSQL_ERROR::enum_warning_level m_level;
-
- /** SQLSTATE. */
- char m_sql_state[SQLSTATE_LENGTH + 1];
-
- /** Text message. */
- char m_message[MYSQL_ERRMSG_SIZE];
-
- void set(uint sql_errno, const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
- const char* msg)
- {
- m_sql_errno= sql_errno;
- m_level= level;
-
- memcpy(m_sql_state, sqlstate, SQLSTATE_LENGTH);
- m_sql_state[SQLSTATE_LENGTH]= '\0';
-
- strncpy(m_message, msg, MYSQL_ERRMSG_SIZE);
- }
-
- void clear()
- {
- m_sql_errno= 0;
- m_level= MYSQL_ERROR::WARN_LEVEL_ERROR;
-
- m_sql_state[0]= '\0';
- m_message[0]= '\0';
- }
-};
/*
@@ -119,251 +57,412 @@ public:
class sp_rcontext : public Sql_alloc
{
- sp_rcontext(const sp_rcontext &); /* Prevent use of these */
- void operator=(sp_rcontext &);
-
- public:
-
- /*
- Arena used to (re) allocate items on . E.g. reallocate INOUT/OUT
- SP parameters when they don't fit into prealloced items. This
- is common situation with String items. It is used mainly in
- sp_eval_func_item().
- */
- Query_arena *callers_arena;
-
- /*
- End a open result set before start executing a continue/exit
- handler if one is found as otherwise the client will hang
- due to a violation of the client/server protocol.
- */
- bool end_partial_result_set;
-
-#ifndef DBUG_OFF
- /*
- The routine for which this runtime context is created. Used for checking
- if correct runtime context is used for variable handling.
- */
- sp_head *sp;
-#endif
-
- sp_rcontext(sp_pcontext *root_parsing_ctx, Field *return_value_fld,
- sp_rcontext *prev_runtime_ctx);
- bool init(THD *thd);
+public:
+ /// Construct and properly initialize a new sp_rcontext instance. The static
+ /// create-function is needed because we need a way to return an error from
+ /// the constructor.
+ ///
+ /// @param thd Thread handle.
+ /// @param root_parsing_ctx Top-level parsing context for this stored program.
+ /// @param return_value_fld Field object to store the return value
+ /// (for stored functions only).
+ ///
+ /// @return valid sp_rcontext object or NULL in case of OOM-error.
+ static sp_rcontext *create(THD *thd,
+ const sp_pcontext *root_parsing_ctx,
+ Field *return_value_fld);
~sp_rcontext();
- int
- set_variable(THD *thd, uint var_idx, Item **value);
-
- Item *
- get_item(uint var_idx);
+private:
+ sp_rcontext(const sp_pcontext *root_parsing_ctx,
+ Field *return_value_fld,
+ bool in_sub_stmt);
- Item **
- get_item_addr(uint var_idx);
+ // Prevent use of copying constructor and operator.
+ sp_rcontext(const sp_rcontext &);
+ void operator=(sp_rcontext &);
- bool
- set_return_value(THD *thd, Item **return_value_item);
+private:
+ /// This is an auxillary class to store entering instruction pointer for an
+ /// SQL-handler.
+ class sp_handler_entry : public Sql_alloc
+ {
+ public:
+ /// Handler definition (from parsing context).
+ const sp_handler *handler;
+
+ /// Instruction pointer to the first instruction.
+ uint first_ip;
+
+ /// The constructor.
+ ///
+ /// @param _handler sp_handler object.
+ /// @param _first_ip first instruction pointer.
+ sp_handler_entry(const sp_handler *_handler, uint _first_ip)
+ :handler(_handler), first_ip(_first_ip)
+ { }
+ };
- inline bool
- is_return_value_set() const
+public:
+ /// This class stores basic information about SQL-condition, such as:
+ /// - SQL error code;
+ /// - error level;
+ /// - SQLSTATE;
+ /// - text message.
+ ///
+ /// It's used to organize runtime SQL-handler call stack.
+ ///
+ /// Standard Sql_condition class can not be used, because we don't always have
+ /// an Sql_condition object for an SQL-condition in Diagnostics_area.
+ ///
+ /// Eventually, this class should be moved to sql_error.h, and be a part of
+ /// standard SQL-condition processing (Diagnostics_area should contain an
+ /// object for active SQL-condition, not just information stored in DA's
+ /// fields).
+ class Sql_condition_info : public Sql_alloc
{
- return m_return_value_set;
- }
+ public:
+ /// SQL error code.
+ uint sql_errno;
+
+ /// Error level.
+ Sql_condition::enum_warning_level level;
+
+ /// SQLSTATE.
+ char sql_state[SQLSTATE_LENGTH + 1];
+
+ /// Text message.
+ char *message;
+
+ /// The constructor.
+ ///
+ /// @param _sql_condition The SQL condition.
+ /// @param arena Query arena for SP
+ Sql_condition_info(const Sql_condition *_sql_condition,
+ Query_arena *arena)
+ :sql_errno(_sql_condition->get_sql_errno()),
+ level(_sql_condition->get_level())
+ {
+ memcpy(sql_state, _sql_condition->get_sqlstate(), SQLSTATE_LENGTH);
+ sql_state[SQLSTATE_LENGTH]= '\0';
+
+ message= strdup_root(arena->mem_root, _sql_condition->get_message_text());
+ }
+ };
- /*
- SQL handlers support.
- */
+private:
+ /// This class represents a call frame of SQL-handler (one invocation of a
+ /// handler). Basically, it's needed to store continue instruction pointer for
+ /// CONTINUE SQL-handlers.
+ class Handler_call_frame : public Sql_alloc
+ {
+ public:
+ /// SQL-condition, triggered handler activation.
+ const Sql_condition_info *sql_condition;
+
+ /// Continue-instruction-pointer for CONTINUE-handlers.
+ /// The attribute contains 0 for EXIT-handlers.
+ uint continue_ip;
+
+ /// The constructor.
+ ///
+ /// @param _sql_condition SQL-condition, triggered handler activation.
+ /// @param _continue_ip Continue instruction pointer.
+ Handler_call_frame(const Sql_condition_info *_sql_condition,
+ uint _continue_ip)
+ :sql_condition(_sql_condition),
+ continue_ip(_continue_ip)
+ { }
+ };
- void push_handler(struct sp_cond_type *cond, uint h, int type);
+public:
+ /// Arena used to (re) allocate items on. E.g. reallocate INOUT/OUT
+ /// SP-variables when they don't fit into prealloced items. This is common
+ /// situation with String items. It is used mainly in sp_eval_func_item().
+ Query_arena *callers_arena;
- void pop_handlers(uint count);
+ /// Flag to end an open result set before start executing an SQL-handler
+ /// (if one is found). Otherwise the client will hang due to a violation
+ /// of the client/server protocol.
+ bool end_partial_result_set;
- bool
- find_handler(THD *thd,
- uint sql_errno,
- const char *sqlstate,
- MYSQL_ERROR::enum_warning_level level,
- const char *msg);
+#ifndef DBUG_OFF
+ /// The stored program for which this runtime context is created. Used for
+ /// checking if correct runtime context is used for variable handling.
+ sp_head *sp;
+#endif
- Sql_condition_info *raised_condition() const;
+ /////////////////////////////////////////////////////////////////////////
+ // SP-variables.
+ /////////////////////////////////////////////////////////////////////////
- void
- push_hstack(uint h);
+ int set_variable(THD *thd, uint var_idx, Item **value)
+ { return set_variable(thd, m_var_table->field[var_idx], value); }
- uint
- pop_hstack();
+ Item *get_item(uint var_idx) const
+ { return m_var_items[var_idx]; }
- bool
- activate_handler(THD *thd,
- uint *ip,
- sp_instr *instr,
- Query_arena *execute_arena,
- Query_arena *backup_arena);
+ Item **get_item_addr(uint var_idx) const
+ { return m_var_items.array() + var_idx; }
+ bool set_return_value(THD *thd, Item **return_value_item);
- void
- exit_handler();
+ bool is_return_value_set() const
+ { return m_return_value_set; }
- void
- push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i);
+ /////////////////////////////////////////////////////////////////////////
+ // SQL-handlers.
+ /////////////////////////////////////////////////////////////////////////
- void
- pop_cursors(uint count);
+ /// Create a new sp_handler_entry instance and push it to the handler call
+ /// stack.
+ ///
+ /// @param handler SQL-handler object.
+ /// @param first_ip First instruction pointer of the handler.
+ ///
+ /// @return error flag.
+ /// @retval false on success.
+ /// @retval true on error.
+ bool push_handler(sp_handler *handler, uint first_ip);
- inline void
- pop_all_cursors()
- {
- pop_cursors(m_ccount);
- }
+ /// Pop and delete given number of sp_handler_entry instances from the handler
+ /// call stack.
+ ///
+ /// @param count Number of handler entries to pop & delete.
+ void pop_handlers(size_t count);
- inline sp_cursor *
- get_cursor(uint i)
+ const Sql_condition_info *raised_condition() const
{
- return m_cstack[i];
+ return m_handler_call_stack.elements() ?
+ (*m_handler_call_stack.back())->sql_condition : NULL;
}
- /*
- CASE expressions support.
- */
+ /// Handle current SQL condition (if any).
+ ///
+ /// This is the public-interface function to handle SQL conditions in
+ /// stored routines.
+ ///
+ /// @param thd Thread handle.
+ /// @param ip[out] Instruction pointer to the first handler
+ /// instruction.
+ /// @param cur_spi Current SP instruction.
+ ///
+ /// @retval true if an SQL-handler has been activated. That means, all of
+ /// the following conditions are satisfied:
+ /// - the SP-instruction raised SQL-condition(s),
+ /// - and there is an SQL-handler to process at least one of those
+ /// SQL-conditions,
+ /// - and that SQL-handler has been activated.
+ /// Note, that the return value has nothing to do with "error flag"
+ /// semantics.
+ ///
+ /// @retval false otherwise.
+ bool handle_sql_condition(THD *thd,
+ uint *ip,
+ const sp_instr *cur_spi);
+
+ /// Remove latest call frame from the handler call stack.
+ ///
+ /// @param da Diagnostics area containing handled conditions.
+ ///
+ /// @return continue instruction pointer of the removed handler.
+ uint exit_handler(Diagnostics_area *da);
+
+ /////////////////////////////////////////////////////////////////////////
+ // Cursors.
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Create a new sp_cursor instance and push it to the cursor stack.
+ ///
+ /// @param lex_keeper SP-instruction execution helper.
+ /// @param i Cursor-push instruction.
+ ///
+ /// @return error flag.
+ /// @retval false on success.
+ /// @retval true on error.
+ bool push_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i);
+
+ /// Pop and delete given number of sp_cursor instance from the cursor stack.
+ ///
+ /// @param count Number of cursors to pop & delete.
+ void pop_cursors(uint count);
+
+ void pop_all_cursors()
+ { pop_cursors(m_ccount); }
+
+ sp_cursor *get_cursor(uint i) const
+ { return m_cstack[i]; }
+
+ /////////////////////////////////////////////////////////////////////////
+ // CASE expressions.
+ /////////////////////////////////////////////////////////////////////////
+
+ /// Set CASE expression to the specified value.
+ ///
+ /// @param thd Thread handler.
+ /// @param case_expr_id The CASE expression identifier.
+ /// @param case_expr_item The CASE expression value
+ ///
+ /// @return error flag.
+ /// @retval false on success.
+ /// @retval true on error.
+ ///
+ /// @note The idea is to reuse Item_cache for the expression of the one
+ /// CASE statement. This optimization takes place when there is CASE
+ /// statement inside of a loop. So, in other words, we will use the same
+ /// object on each iteration instead of creating a new one for each
+ /// iteration.
+ ///
+ /// TODO
+ /// Hypothetically, a type of CASE expression can be different for each
+ /// iteration. For instance, this can happen if the expression contains
+ /// a session variable (something like @@VAR) and its type is changed
+ /// from one iteration to another.
+ ///
+ /// In order to cope with this problem, we check type each time, when we
+ /// use already created object. If the type does not match, we re-create
+ /// Item. This also can (should?) be optimized.
+ bool set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr);
+
+ Item *get_case_expr(int case_expr_id) const
+ { return m_case_expr_holders[case_expr_id]; }
+
+ Item ** get_case_expr_addr(int case_expr_id) const
+ { return (Item**) m_case_expr_holders.array() + case_expr_id; }
- int
- set_case_expr(THD *thd, int case_expr_id, Item **case_expr_item_ptr);
+private:
+ /// Internal function to allocate memory for arrays.
+ ///
+ /// @param thd Thread handle.
+ ///
+ /// @return error flag: false on success, true in case of failure.
+ bool alloc_arrays(THD *thd);
+
+ /// Create and initialize a table to store SP-variables.
+ ///
+ /// param thd Thread handle.
+ ///
+ /// @return error flag.
+ /// @retval false on success.
+ /// @retval true on error.
+ bool init_var_table(THD *thd);
- Item *
- get_case_expr(int case_expr_id);
+ /// Create and initialize an Item-adapter (Item_field) for each SP-var field.
+ ///
+ /// param thd Thread handle.
+ ///
+ /// @return error flag.
+ /// @retval false on success.
+ /// @retval true on error.
+ bool init_var_items(THD *thd);
+
+ /// Create an instance of appropriate Item_cache class depending on the
+ /// specified type in the callers arena.
+ ///
+ /// @note We should create cache items in the callers arena, as they are
+ /// used between in several instructions.
+ ///
+ /// @param thd Thread handler.
+ /// @param item Item to get the expression type.
+ ///
+ /// @return Pointer to valid object on success, or NULL in case of error.
+ Item_cache *create_case_expr_holder(THD *thd, const Item *item) const;
- Item **
- get_case_expr_addr(int case_expr_id);
+ int set_variable(THD *thd, Field *field, Item **value);
private:
- sp_pcontext *m_root_parsing_ctx;
+ /// Top-level (root) parsing context for this runtime context.
+ const sp_pcontext *m_root_parsing_ctx;
- /* Virtual table for storing variables. */
+ /// Virtual table for storing SP-variables.
TABLE *m_var_table;
- /*
- Collection of Item_field proxies, each of them points to the corresponding
- field in m_var_table.
- */
- Item **m_var_items;
+ /// Collection of Item_field proxies, each of them points to the
+ /// corresponding field in m_var_table.
+ Bounds_checked_array<Item *> m_var_items;
- /*
- This is a pointer to a field, which should contain return value for stored
- functions (only). For stored procedures, this pointer is NULL.
- */
+ /// This is a pointer to a field, which should contain return value for
+ /// stored functions (only). For stored procedures, this pointer is NULL.
Field *m_return_value_fld;
- /*
- Indicates whether the return value (in m_return_value_fld) has been set
- during execution.
- */
+ /// Indicates whether the return value (in m_return_value_fld) has been
+ /// set during execution.
bool m_return_value_set;
- /**
- TRUE if the context is created for a sub-statement.
- */
- bool in_sub_stmt;
+ /// Flag to tell if the runtime context is created for a sub-statement.
+ bool m_in_sub_stmt;
- sp_handler_t *m_handler; // Visible handlers
+ /// Stack of visible handlers.
+ Dynamic_array<sp_handler_entry *> m_handlers;
- /**
- SQL conditions caught by each handler.
- This is an array indexed by handler index.
- */
- Sql_condition_info *m_raised_conditions;
+ /// Stack of caught SQL conditions.
+ Dynamic_array<Handler_call_frame *> m_handler_call_stack;
- uint m_hcount; // Stack pointer for m_handler
- uint *m_hstack; // Return stack for continue handlers
- uint m_hsp; // Stack pointer for m_hstack
- /** Active handler stack. */
- sp_active_handler_t *m_in_handler;
- uint m_ihsp; // Stack pointer for m_in_handler
- int m_hfound; // Set by find_handler; -1 if not found
+ /// Stack of cursors.
+ Bounds_checked_array<sp_cursor *> m_cstack;
- sp_cursor **m_cstack;
+ /// Current number of cursors in m_cstack.
uint m_ccount;
- Item_cache **m_case_expr_holders;
-
- /* Previous runtime context (NULL if none) */
- sp_rcontext *m_prev_runtime_ctx;
-
-private:
- bool init_var_table(THD *thd);
- bool init_var_items();
-
- Item_cache *create_case_expr_holder(THD *thd, const Item *item);
-
- int set_variable(THD *thd, Field *field, Item **value);
+ /// Array of CASE expression holders.
+ Bounds_checked_array<Item_cache *> m_case_expr_holders;
}; // class sp_rcontext : public Sql_alloc
+///////////////////////////////////////////////////////////////////////////
+// sp_cursor declaration.
+///////////////////////////////////////////////////////////////////////////
-/*
- An interceptor of cursor result set used to implement
- FETCH <cname> INTO <varlist>.
-*/
-
-class Select_fetch_into_spvars: public select_result_interceptor
-{
- List<struct sp_variable> *spvar_list;
- uint field_count;
-public:
- Select_fetch_into_spvars() {} /* Remove gcc warning */
- uint get_field_count() { return field_count; }
- void set_spvar_list(List<struct sp_variable> *vars) { spvar_list= vars; }
-
- virtual bool send_eof() { return FALSE; }
- virtual int send_data(List<Item> &items);
- virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
-};
-
+class Server_side_cursor;
+typedef class st_select_lex_unit SELECT_LEX_UNIT;
/* A mediator between stored procedures and server side cursors */
class sp_cursor : public Sql_alloc
{
-public:
+private:
+ /// An interceptor of cursor result set used to implement
+ /// FETCH <cname> INTO <varlist>.
+ class Select_fetch_into_spvars: public select_result_interceptor
+ {
+ List<sp_variable> *spvar_list;
+ uint field_count;
+ public:
+ Select_fetch_into_spvars() {} /* Remove gcc warning */
+ uint get_field_count() { return field_count; }
+ void set_spvar_list(List<sp_variable> *vars) { spvar_list= vars; }
+
+ virtual bool send_eof() { return FALSE; }
+ virtual int send_data(List<Item> &items);
+ virtual int prepare(List<Item> &list, SELECT_LEX_UNIT *u);
+};
+public:
sp_cursor(sp_lex_keeper *lex_keeper, sp_instr_cpush *i);
virtual ~sp_cursor()
- {
- destroy();
- }
+ { destroy(); }
- sp_lex_keeper *
- get_lex_keeper() { return m_lex_keeper; }
+ sp_lex_keeper *get_lex_keeper() { return m_lex_keeper; }
- int
- open(THD *thd);
+ int open(THD *thd);
- int
- close(THD *thd);
+ int close(THD *thd);
- inline bool
- is_open()
- {
- return test(server_side_cursor);
- }
+ my_bool is_open()
+ { return test(server_side_cursor); }
- int
- fetch(THD *, List<struct sp_variable> *vars);
+ int fetch(THD *, List<sp_variable> *vars);
- inline sp_instr_cpush *
- get_instr()
- {
- return m_i;
- }
+ sp_instr_cpush *get_instr()
+ { return m_i; }
private:
-
Select_fetch_into_spvars result;
sp_lex_keeper *m_lex_keeper;
Server_side_cursor *server_side_cursor;
sp_instr_cpush *m_i; // My push instruction
- void
- destroy();
+ void destroy();
}; // class sp_cursor : public Sql_alloc
diff --git a/sql/spatial.h b/sql/spatial.h
index 6df6e37e9b8..b0e4b83bf6a 100644
--- a/sql/spatial.h
+++ b/sql/spatial.h
@@ -196,8 +196,8 @@ struct MBR
if (d != mbr->dimension() || d <= 0 || contains(mbr) || within(mbr))
return 0;
- MBR intersection(max(xmin, mbr->xmin), max(ymin, mbr->ymin),
- min(xmax, mbr->xmax), min(ymax, mbr->ymax));
+ MBR intersection(MY_MAX(xmin, mbr->xmin), MY_MAX(ymin, mbr->ymin),
+ MY_MIN(xmax, mbr->xmax), MY_MIN(ymax, mbr->ymax));
return (d == intersection.dimension());
}
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index e2e3647ff2a..80e7d405a04 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -829,7 +829,8 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables)
goto end;
table->use_all_columns();
(void) my_init_dynamic_array(&acl_users,sizeof(ACL_USER), 50, 100, MYF(0));
- username_char_length= min(table->field[1]->char_length(), USERNAME_CHAR_LENGTH);
+ username_char_length= MY_MIN(table->field[1]->char_length(),
+ USERNAME_CHAR_LENGTH);
password_length= table->field[2]->field_length /
table->field[2]->charset()->mbmaxlen;
if (password_length < SCRAMBLED_PASSWORD_CHAR_LENGTH_323)
@@ -1210,9 +1211,9 @@ my_bool acl_reload(THD *thd)
Execution might have been interrupted; only print the error message
if an error condition has been raised.
*/
- if (thd->stmt_da->is_error())
+ if (thd->get_stmt_da()->is_error())
sql_print_error("Fatal error: Can't open and lock privilege tables: %s",
- thd->stmt_da->message());
+ thd->get_stmt_da()->message());
goto end;
}
@@ -1331,7 +1332,7 @@ static ulong get_sort(uint count,...)
chars= 128; // Marker that chars existed
}
}
- sort= (sort << 8) + (wild_pos ? min(wild_pos, 127U) : chars);
+ sort= (sort << 8) + (wild_pos ? MY_MIN(wild_pos, 127U) : chars);
}
va_end(args);
return sort;
@@ -1832,6 +1833,13 @@ bool acl_check_host(const char *host, const char *ip)
}
}
mysql_mutex_unlock(&acl_cache->lock);
+ if (ip != NULL)
+ {
+ /* Increment HOST_CACHE.COUNT_HOST_ACL_ERRORS. */
+ Host_errors errors;
+ errors.m_host_acl= 1;
+ inc_host_errors(ip, &errors);
+ }
return 1; // Host is not allowed
}
@@ -1972,7 +1980,7 @@ bool change_password(THD *thd, const char *host, const char *user,
set_user_plugin(acl_user, new_password_len);
}
else
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SET_PASSWORD_AUTH_PLUGIN, ER(ER_SET_PASSWORD_AUTH_PLUGIN));
if (update_user_table(thd, table,
@@ -4566,7 +4574,6 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
uint i;
ulong orig_want_access= want_access;
my_bool locked= 0;
- GRANT_TABLE *grant_table;
DBUG_ENTER("check_grant");
DBUG_ASSERT(number > 0);
@@ -4646,17 +4653,32 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables,
continue;
}
+ if (is_temporary_table(tl))
+ {
+ /*
+ If this table list element corresponds to a pre-opened temporary
+ table skip checking of all relevant table-level privileges for it.
+ Note that during creation of temporary table we still need to check
+ if user has CREATE_TMP_ACL.
+ */
+ tl->grant.privilege|= TMP_TABLE_ACLS;
+ tl->grant.want_privilege= 0;
+ continue;
+ }
+
if (!locked)
{
locked= 1;
mysql_rwlock_rdlock(&LOCK_grant);
}
- if (!(grant_table= table_hash_search(sctx->host, sctx->ip,
- tl->get_db_name(),
- sctx->priv_user,
- tl->get_table_name(),
- FALSE)))
+ GRANT_TABLE *grant_table= table_hash_search(sctx->host, sctx->ip,
+ tl->get_db_name(),
+ sctx->priv_user,
+ tl->get_table_name(),
+ FALSE);
+
+ if (!grant_table)
{
want_access &= ~tl->grant.privilege;
goto err; // No grants
@@ -6876,9 +6898,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
bool has_errors() { return is_grave; }
@@ -6891,18 +6913,18 @@ Silence_routine_definer_errors::handle_condition(
THD *thd,
uint sql_errno,
const char*,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
- if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
+ if (level == Sql_condition::WARN_LEVEL_ERROR)
{
switch (sql_errno)
{
case ER_NONEXISTING_PROC_GRANT:
/* Convert the error into a warning. */
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
sql_errno, msg);
return TRUE;
default:
@@ -7067,7 +7089,7 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name,
}
else
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_PASSWD_LENGTH,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_PASSWD_LENGTH,
ER(ER_PASSWD_LENGTH), SCRAMBLED_PASSWORD_CHAR_LENGTH);
return TRUE;
}
@@ -8362,7 +8384,7 @@ static ulong parse_client_handshake_packet(MPVIO_EXT *mpvio,
DBUG_ASSERT(net->read_pos[pkt_len] == 0);
if (mpvio->connect_errors)
- reset_host_errors(thd->main_security_ctx.ip);
+ reset_host_connect_errors(thd->main_security_ctx.ip);
ulong client_capabilities= uint2korr(net->read_pos);
if (client_capabilities & CLIENT_PROTOCOL_41)
@@ -8740,7 +8762,6 @@ static int server_mpvio_read_packet(MYSQL_PLUGIN_VIO *param, uchar **buf)
err:
if (mpvio->status == MPVIO_EXT::FAILURE)
{
- inc_host_errors(mpvio->thd->security_ctx->ip);
if (!mpvio->thd->is_error())
{
if (mpvio->make_it_fail)
@@ -8913,6 +8934,9 @@ static int do_auth_once(THD *thd, const LEX_STRING *auth_plugin_name,
else
{
/* Server cannot load the required plugin. */
+ Host_errors errors;
+ errors.m_no_auth_plugin= 1;
+ inc_host_errors(mpvio->thd->security_ctx->ip, &errors);
my_error(ER_PLUGIN_IS_NOT_LOADED, MYF(0), auth_plugin_name->str);
res= CR_ERROR;
}
@@ -9038,8 +9062,26 @@ bool acl_authenticate(THD *thd, uint connect_errors,
if (res > CR_OK && mpvio.status != MPVIO_EXT::SUCCESS)
{
+ Host_errors errors;
DBUG_ASSERT(mpvio.status == MPVIO_EXT::FAILURE);
-
+ switch (res)
+ {
+ case CR_AUTH_PLUGIN_ERROR:
+ errors.m_auth_plugin= 1;
+ break;
+ case CR_AUTH_HANDSHAKE:
+ errors.m_handshake= 1;
+ break;
+ case CR_AUTH_USER_CREDENTIALS:
+ errors.m_authentication= 1;
+ break;
+ case CR_ERROR:
+ default:
+ /* Unknown of unspecified auth plugin error. */
+ errors.m_auth_plugin= 1;
+ break;
+ }
+ inc_host_errors(mpvio.thd->security_ctx->ip, &errors);
if (!thd->is_error())
login_failed_error(thd);
DBUG_RETURN(1);
@@ -9064,6 +9106,9 @@ bool acl_authenticate(THD *thd, uint connect_errors,
/* we need to find the proxy user, but there was none */
if (!proxy_user)
{
+ Host_errors errors;
+ errors.m_proxy_user= 1;
+ inc_host_errors(mpvio.thd->security_ctx->ip, &errors);
if (!thd->is_error())
login_failed_error(thd);
DBUG_RETURN(1);
@@ -9080,6 +9125,9 @@ bool acl_authenticate(THD *thd, uint connect_errors,
mpvio.auth_info.authenticated_as, TRUE);
if (!acl_proxy_user)
{
+ Host_errors errors;
+ errors.m_proxy_user_acl= 1;
+ inc_host_errors(mpvio.thd->security_ctx->ip, &errors);
if (!thd->is_error())
login_failed_error(thd);
mysql_mutex_unlock(&acl_cache->lock);
@@ -9108,6 +9156,9 @@ bool acl_authenticate(THD *thd, uint connect_errors,
*/
if (acl_check_ssl(thd, acl_user))
{
+ Host_errors errors;
+ errors.m_ssl= 1;
+ inc_host_errors(mpvio.thd->security_ctx->ip, &errors);
login_failed_error(thd);
DBUG_RETURN(1);
}
@@ -9190,15 +9241,14 @@ bool acl_authenticate(THD *thd, uint connect_errors,
sctx->external_user= my_strdup(mpvio.auth_info.external_user, MYF(0));
if (res == CR_OK_HANDSHAKE_COMPLETE)
- thd->stmt_da->disable_status();
+ thd->get_stmt_da()->disable_status();
else
my_ok(thd);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_user_host)(thd->main_security_ctx.user,
- strlen(thd->main_security_ctx.user),
- thd->main_security_ctx.host_or_ip,
- strlen(thd->main_security_ctx.host_or_ip));
+ PSI_THREAD_CALL(set_thread_user_host)
+ (thd->main_security_ctx.user, strlen(thd->main_security_ctx.user),
+ thd->main_security_ctx.host_or_ip, strlen(thd->main_security_ctx.host_or_ip));
#endif
/* Ready to handle queries */
@@ -9228,7 +9278,7 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio,
create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand);
/* and send it to the client */
if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1))
- DBUG_RETURN(CR_ERROR);
+ DBUG_RETURN(CR_AUTH_HANDSHAKE);
}
/* reply and authenticate */
@@ -9270,7 +9320,7 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio,
/* read the reply with the encrypted password */
if ((pkt_len= mpvio->read_packet(mpvio, &pkt)) < 0)
- DBUG_RETURN(CR_ERROR);
+ DBUG_RETURN(CR_AUTH_HANDSHAKE);
DBUG_PRINT("info", ("reply read : pkt_len=%d", pkt_len));
#ifdef NO_EMBEDDED_ACCESS_CHECKS
@@ -9278,23 +9328,22 @@ static int native_password_authenticate(MYSQL_PLUGIN_VIO *vio,
#endif
if (pkt_len == 0) /* no password */
- DBUG_RETURN(info->auth_string[0] ? CR_ERROR : CR_OK);
+ DBUG_RETURN(mpvio->acl_user->salt_len != 0 ? CR_AUTH_USER_CREDENTIALS : CR_OK);
info->password_used= PASSWORD_USED_YES;
if (pkt_len == SCRAMBLE_LENGTH)
{
if (!mpvio->acl_user->salt_len)
- DBUG_RETURN(CR_ERROR);
+ DBUG_RETURN(CR_AUTH_USER_CREDENTIALS);
if (check_scramble(pkt, thd->scramble, mpvio->acl_user->salt))
- DBUG_RETURN(CR_ERROR);
+ DBUG_RETURN(CR_AUTH_USER_CREDENTIALS);
else
DBUG_RETURN(CR_OK);
}
- inc_host_errors(mpvio->thd->security_ctx->ip);
my_error(ER_HANDSHAKE_ERROR, MYF(0));
- DBUG_RETURN(CR_ERROR);
+ DBUG_RETURN(CR_AUTH_HANDSHAKE);
}
static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio,
@@ -9311,12 +9360,12 @@ static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio,
create_random_string(thd->scramble, SCRAMBLE_LENGTH, &thd->rand);
/* and send it to the client */
if (mpvio->write_packet(mpvio, (uchar*)thd->scramble, SCRAMBLE_LENGTH + 1))
- return CR_ERROR;
+ return CR_AUTH_HANDSHAKE;
}
/* read the reply and authenticate */
if ((pkt_len= mpvio->read_packet(mpvio, &pkt)) < 0)
- return CR_ERROR;
+ return CR_AUTH_HANDSHAKE;
#ifdef NO_EMBEDDED_ACCESS_CHECKS
return CR_OK;
@@ -9331,26 +9380,25 @@ static int old_password_authenticate(MYSQL_PLUGIN_VIO *vio,
pkt_len= strnlen((char*)pkt, pkt_len);
if (pkt_len == 0) /* no password */
- return info->auth_string[0] ? CR_ERROR : CR_OK;
+ return info->auth_string[0] ? CR_AUTH_USER_CREDENTIALS : CR_OK;
if (secure_auth(thd))
- return CR_ERROR;
+ return CR_AUTH_HANDSHAKE;
info->password_used= PASSWORD_USED_YES;
if (pkt_len == SCRAMBLE_LENGTH_323)
{
if (!mpvio->acl_user->salt_len)
- return CR_ERROR;
+ return CR_AUTH_USER_CREDENTIALS;
return check_scramble_323(pkt, thd->scramble,
(ulong *) mpvio->acl_user->salt) ?
- CR_ERROR : CR_OK;
+ CR_AUTH_USER_CREDENTIALS : CR_OK;
}
- inc_host_errors(mpvio->thd->security_ctx->ip);
my_error(ER_HANDSHAKE_ERROR, MYF(0));
- return CR_ERROR;
+ return CR_AUTH_HANDSHAKE;
}
static struct st_mysql_auth native_password_handler=
@@ -9399,3 +9447,10 @@ maria_declare_plugin(mysql_password)
MariaDB_PLUGIN_MATURITY_BETA /* Maturity */
}
maria_declare_plugin_end;
+
+
+/* called when new user is created or exsisting password is changed */
+int check_password_policy(String *password)
+{
+ return (0);
+}
diff --git a/sql/sql_acl.h b/sql/sql_acl.h
index 3169746419c..d519279e9c2 100644
--- a/sql/sql_acl.h
+++ b/sql/sql_acl.h
@@ -95,6 +95,14 @@
CREATE_ACL | DROP_ACL | ALTER_ACL | INDEX_ACL | \
TRIGGER_ACL | REFERENCES_ACL | GRANT_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL)
+/**
+ Table-level privileges which are automatically "granted" to everyone on
+ existing temporary tables (CREATE_ACL is necessary for ALTER ... RENAME).
+*/
+#define TMP_TABLE_ACLS \
+(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \
+ INDEX_ACL | ALTER_ACL)
+
/*
Defines to change the above bits to how things are stored in tables
This is needed as the 'host' and 'db' table is missing a few privileges
@@ -245,7 +253,7 @@ int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond);
int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond);
int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond);
int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr);
-
+int check_password_policy(String *password);
#ifdef NO_EMBEDDED_ACCESS_CHECKS
#define check_grant(A,B,C,D,E,F) 0
#define check_grant_db(A,B) 0
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 0b2a939d0ba..8f3ea0fedb1 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -1,4 +1,5 @@
-/* Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2010, 2013, Oracle and/or its affiliates.
+ Copyright (c) 2012, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -42,9 +43,19 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list)
trans_rollback(thd);
close_thread_tables(thd);
thd->mdl_context.release_transactional_locks();
+
+ /*
+ table_list->table has been closed and freed. Do not reference
+ uninitialized data. open_tables() could fail.
+ */
+ table_list->table= NULL;
+ /* Same applies to MDL ticket. */
+ table_list->mdl_request.ticket= NULL;
+
DEBUG_SYNC(thd, "ha_admin_try_alter");
tmp_disable_binlog(thd); // binlogging is done by caller if wanted
- result_code= mysql_recreate_table(thd, table_list);
+ result_code= (open_temporary_tables(thd, table_list) ||
+ mysql_recreate_table(thd, table_list));
reenable_binlog(thd);
/*
mysql_recreate_table() can push OK or ERROR.
@@ -52,8 +63,8 @@ static bool admin_recreate_table(THD *thd, TABLE_LIST *table_list)
we will store the error message in a result set row
and then clear.
*/
- if (thd->stmt_da->is_ok())
- thd->stmt_da->reset_diagnostics_area();
+ if (thd->get_stmt_da()->is_ok())
+ thd->get_stmt_da()->reset_diagnostics_area();
table_list->table= NULL;
result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK;
DBUG_RETURN(result_code);
@@ -117,8 +128,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
MDL_EXCLUSIVE, MDL_TRANSACTION);
if (lock_table_names(thd, table_list, table_list->next_global,
- thd->variables.lock_wait_timeout,
- MYSQL_OPEN_SKIP_TEMPORARY))
+ thd->variables.lock_wait_timeout, 0))
DBUG_RETURN(0);
has_mdl_lock= TRUE;
@@ -198,7 +208,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list,
goto end;
/* Close table but don't remove from locked list */
close_all_tables_for_name(thd, table_list->table->s,
- HA_EXTRA_NOT_USED);
+ HA_EXTRA_NOT_USED, NULL);
table_list->table= 0;
}
/*
@@ -338,6 +348,14 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
mysql_ha_rm_tables(thd, tables);
+ /*
+ Close all temporary tables which were pre-open to simplify
+ privilege checking. Clear all references to closed tables.
+ */
+ close_thread_tables(thd);
+ for (table= tables; table; table= table->next_local)
+ table->table= NULL;
+
for (table= tables; table; table= table->next_local)
{
char table_name[SAFE_NAME_LEN*2+2];
@@ -394,14 +412,15 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
because it's already known that the table is badly damaged.
*/
- Warning_info wi(thd->query_id, false);
- Warning_info *wi_saved= thd->warning_info;
+ Diagnostics_area *da= thd->get_stmt_da();
+ Warning_info tmp_wi(thd->query_id, false, true);
- thd->warning_info= &wi;
+ da->push_warning_info(&tmp_wi);
- open_error= open_and_lock_tables(thd, table, TRUE, 0);
+ open_error= (open_temporary_tables(thd, table) ||
+ open_and_lock_tables(thd, table, TRUE, 0));
- thd->warning_info= wi_saved;
+ da->pop_warning_info();
}
else
{
@@ -413,7 +432,8 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
mode. It does make sense for the user to see such errors.
*/
- open_error= open_and_lock_tables(thd, table, TRUE, 0);
+ open_error= (open_temporary_tables(thd, table) ||
+ open_and_lock_tables(thd, table, TRUE, 0));
}
thd->prepare_derived_at_open= FALSE;
@@ -448,7 +468,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
*/
Alter_info *alter_info= &lex->alter_info;
- if (alter_info->flags & ALTER_ADMIN_PARTITION)
+ if (alter_info->flags & Alter_info::ALTER_ADMIN_PARTITION)
{
if (!table->table->part_info)
{
@@ -512,16 +532,16 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
if (!table->table)
{
DBUG_PRINT("admin", ("open table failed"));
- if (thd->warning_info->is_empty())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ if (thd->get_stmt_da()->is_warning_info_empty())
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_CHECK_NO_SUCH_TABLE, ER(ER_CHECK_NO_SUCH_TABLE));
/* if it was a view will check md5 sum */
if (table->view &&
view_checksum(thd, table) == HA_ADMIN_WRONG_CHECKSUM)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_VIEW_CHECKSUM, ER(ER_VIEW_CHECKSUM));
- if (thd->stmt_da->is_error() &&
- table_not_corrupt_error(thd->stmt_da->sql_errno()))
+ if (thd->get_stmt_da()->is_error() &&
+ table_not_corrupt_error(thd->get_stmt_da()->sql_errno()))
result_code= HA_ADMIN_FAILED;
else
/* Default failure code is corrupt table */
@@ -569,7 +589,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
table->table=0; // For query cache
if (protocol->write())
goto err;
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
continue;
/* purecov: end */
}
@@ -742,8 +762,9 @@ send_result:
lex->cleanup_after_one_table_open();
thd->clear_error(); // these errors shouldn't get client
{
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
- MYSQL_ERROR *err;
+ Diagnostics_area::Sql_condition_iterator it=
+ thd->get_stmt_da()->sql_conditions();
+ const Sql_condition *err;
while ((err= it++))
{
protocol->prepare_for_resend();
@@ -756,7 +777,7 @@ send_result:
if (protocol->write())
goto err;
}
- thd->warning_info->clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->clear_warning_info(thd->query_id);
}
protocol->prepare_for_resend();
protocol->store(table_name, system_charset_info);
@@ -824,19 +845,10 @@ send_result_message:
case HA_ADMIN_TRY_ALTER:
{
- uint save_flags;
Alter_info *alter_info= &lex->alter_info;
- /* Store the original value of alter_info->flags */
- save_flags= alter_info->flags;
- /*
- This is currently used only by InnoDB. ha_innobase::optimize() answers
- "try with alter", so here we close the table, do an ALTER TABLE,
- reopen the table and do ha_innobase::analyze() on it.
- We have to end the row, so analyze could return more rows.
- */
protocol->store(STRING_WITH_LEN("note"), system_charset_info);
- if(alter_info->flags & ALTER_ADMIN_PARTITION)
+ if (alter_info->flags & Alter_info::ALTER_ADMIN_PARTITION)
{
protocol->store(STRING_WITH_LEN(
"Table does not support optimize on partitions. All partitions "
@@ -848,18 +860,23 @@ send_result_message:
"Table does not support optimize, doing recreate + analyze instead"),
system_charset_info);
}
- if (protocol->write())
+ if (protocol->write())
goto err;
+
DBUG_PRINT("info", ("HA_ADMIN_TRY_ALTER, trying analyze..."));
TABLE_LIST *save_next_local= table->next_local,
*save_next_global= table->next_global;
table->next_local= table->next_global= 0;
- result_code= admin_recreate_table(thd, table);
+ tmp_disable_binlog(thd); // binlogging is done by caller if wanted
+ result_code= admin_recreate_table(thd, table);
+ reenable_binlog(thd);
trans_commit_stmt(thd);
trans_commit(thd);
close_thread_tables(thd);
thd->mdl_context.release_transactional_locks();
+ /* Clear references to TABLE and MDL_ticket after releasing them. */
+ table->mdl_request.ticket= NULL;
if (!result_code) // recreation went ok
{
@@ -867,22 +884,27 @@ send_result_message:
table->mdl_request.ticket= NULL;
DEBUG_SYNC(thd, "ha_admin_open_ltable");
table->mdl_request.set_type(MDL_SHARED_WRITE);
- /*
- Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags
- to force analyze on all partitions.
- */
- alter_info->flags &= ~(ALTER_ADMIN_PARTITION);
- if ((table->table= open_ltable(thd, table, lock_type, 0)))
+ if (!open_temporary_tables(thd, table) &&
+ (table->table= open_ltable(thd, table, lock_type, 0)))
{
+ uint save_flags;
+ /* Store the original value of alter_info->flags */
+ save_flags= alter_info->flags;
+
+ /*
+ Reset the ALTER_ADMIN_PARTITION bit in alter_info->flags
+ to force analyze on all partitions.
+ */
+ alter_info->flags &= ~(Alter_info::ALTER_ADMIN_PARTITION);
result_code= table->table->file->ha_analyze(thd, check_opt);
if (result_code == HA_ADMIN_ALREADY_DONE)
result_code= HA_ADMIN_OK;
else if (result_code) // analyze failed
table->table->file->print_error(result_code, MYF(0));
+ alter_info->flags= save_flags;
}
else
result_code= -1; // open failed
- alter_info->flags= save_flags;
}
/* Start a new row for the final status row */
protocol->prepare_for_resend();
@@ -893,7 +915,7 @@ send_result_message:
DBUG_ASSERT(thd->is_error() || thd->killed);
if (thd->is_error())
{
- const char *err_msg= thd->stmt_da->message();
+ const char *err_msg= thd->get_stmt_da()->message();
if (!thd->vio_ok())
{
sql_print_error("%s", err_msg);
@@ -912,6 +934,9 @@ send_result_message:
}
thd->clear_error();
}
+ /* Make sure this table instance is not reused after the operation. */
+ if (table->table)
+ table->table->m_needs_reopen= true;
}
result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK;
table->next_local= save_next_local;
@@ -1010,14 +1035,15 @@ send_result_message:
err:
/* Make sure this table instance is not reused after the failure. */
- if (table && table->table)
- table->table->m_needs_reopen= true;
trans_rollback_stmt(thd);
trans_rollback(thd);
+ if (table && table->table)
+ {
+ table->table->m_needs_reopen= true;
+ table->table= 0;
+ }
close_thread_tables(thd); // Shouldn't be needed
thd->mdl_context.release_transactional_locks();
- if (table)
- table->table=0;
DBUG_RETURN(TRUE);
}
@@ -1090,12 +1116,13 @@ bool mysql_preload_keys(THD* thd, TABLE_LIST* tables)
}
-bool Analyze_table_statement::execute(THD *thd)
+bool Sql_cmd_analyze_table::execute(THD *thd)
{
+ LEX *m_lex= thd->lex;
TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
bool res= TRUE;
thr_lock_type lock_type = TL_READ_NO_INSERT;
- DBUG_ENTER("Analyze_table_statement::execute");
+ DBUG_ENTER("Sql_cmd_analyze_table::execute");
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table,
FALSE, UINT_MAX, FALSE))
@@ -1120,12 +1147,13 @@ error:
}
-bool Check_table_statement::execute(THD *thd)
+bool Sql_cmd_check_table::execute(THD *thd)
{
+ LEX *m_lex= thd->lex;
TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
thr_lock_type lock_type = TL_READ_NO_INSERT;
bool res= TRUE;
- DBUG_ENTER("Check_table_statement::execute");
+ DBUG_ENTER("Sql_cmd_check_table::execute");
if (check_table_access(thd, SELECT_ACL, first_table,
TRUE, UINT_MAX, FALSE))
@@ -1144,17 +1172,18 @@ error:
}
-bool Optimize_table_statement::execute(THD *thd)
+bool Sql_cmd_optimize_table::execute(THD *thd)
{
+ LEX *m_lex= thd->lex;
TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
bool res= TRUE;
- DBUG_ENTER("Optimize_table_statement::execute");
+ DBUG_ENTER("Sql_cmd_optimize_table::execute");
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table,
FALSE, UINT_MAX, FALSE))
goto error; /* purecov: inspected */
thd->enable_slow_log= opt_log_slow_admin_statements;
- res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ?
+ res= (specialflag & SPECIAL_NO_NEW_FUNC) ?
mysql_recreate_table(thd, first_table) :
mysql_admin_table(thd, first_table, &m_lex->check_opt,
"optimize", TL_WRITE, 1, 0, 0, 0,
@@ -1175,11 +1204,12 @@ error:
}
-bool Repair_table_statement::execute(THD *thd)
+bool Sql_cmd_repair_table::execute(THD *thd)
{
+ LEX *m_lex= thd->lex;
TABLE_LIST *first_table= m_lex->select_lex.table_list.first;
bool res= TRUE;
- DBUG_ENTER("Repair_table_statement::execute");
+ DBUG_ENTER("Sql_cmd_repair_table::execute");
if (check_table_access(thd, SELECT_ACL | INSERT_ACL, first_table,
FALSE, UINT_MAX, FALSE))
diff --git a/sql/sql_admin.h b/sql/sql_admin.h
index 5398e3019f1..fa89fc9063f 100644
--- a/sql/sql_admin.h
+++ b/sql/sql_admin.h
@@ -26,109 +26,100 @@ int reassign_keycache_tables(THD* thd, KEY_CACHE *src_cache,
KEY_CACHE *dst_cache);
/**
- Analyze_statement represents the ANALYZE TABLE statement.
+ Sql_cmd_analyze_table represents the ANALYZE TABLE statement.
*/
-class Analyze_table_statement : public Sql_statement
+class Sql_cmd_analyze_table : public Sql_cmd
{
public:
/**
Constructor, used to represent a ANALYZE TABLE statement.
- @param lex the LEX structure for this statement.
*/
- Analyze_table_statement(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_analyze_table()
{}
- ~Analyze_table_statement()
+ ~Sql_cmd_analyze_table()
{}
- /**
- Execute a ANALYZE TABLE statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
+
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ANALYZE;
+ }
};
/**
- Check_table_statement represents the CHECK TABLE statement.
+ Sql_cmd_check_table represents the CHECK TABLE statement.
*/
-class Check_table_statement : public Sql_statement
+class Sql_cmd_check_table : public Sql_cmd
{
public:
/**
Constructor, used to represent a CHECK TABLE statement.
- @param lex the LEX structure for this statement.
*/
- Check_table_statement(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_check_table()
{}
- ~Check_table_statement()
+ ~Sql_cmd_check_table()
{}
- /**
- Execute a CHECK TABLE statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
-};
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_CHECK;
+ }
+};
/**
- Optimize_table_statement represents the OPTIMIZE TABLE statement.
+ Sql_cmd_optimize_table represents the OPTIMIZE TABLE statement.
*/
-class Optimize_table_statement : public Sql_statement
+class Sql_cmd_optimize_table : public Sql_cmd
{
public:
/**
Constructor, used to represent a OPTIMIZE TABLE statement.
- @param lex the LEX structure for this statement.
*/
- Optimize_table_statement(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_optimize_table()
{}
- ~Optimize_table_statement()
+ ~Sql_cmd_optimize_table()
{}
- /**
- Execute a OPTIMIZE TABLE statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
+
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_OPTIMIZE;
+ }
};
/**
- Repair_table_statement represents the REPAIR TABLE statement.
+ Sql_cmd_repair_table represents the REPAIR TABLE statement.
*/
-class Repair_table_statement : public Sql_statement
+class Sql_cmd_repair_table : public Sql_cmd
{
public:
/**
Constructor, used to represent a REPAIR TABLE statement.
- @param lex the LEX structure for this statement.
*/
- Repair_table_statement(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_repair_table()
{}
- ~Repair_table_statement()
+ ~Sql_cmd_repair_table()
{}
- /**
- Execute a REPAIR TABLE statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
+
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_REPAIR;
+ }
};
#endif
diff --git a/sql/sql_alter.cc b/sql/sql_alter.cc
index c6c02773286..01bffaf132f 100644
--- a/sql/sql_alter.cc
+++ b/sql/sql_alter.cc
@@ -16,9 +16,176 @@
#include "sql_parse.h" // check_access
#include "sql_table.h" // mysql_alter_table,
// mysql_exchange_partition
+#include "sql_base.h" // open_temporary_tables
#include "sql_alter.h"
-bool Alter_table_statement::execute(THD *thd)
+Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root)
+ :drop_list(rhs.drop_list, mem_root),
+ alter_list(rhs.alter_list, mem_root),
+ key_list(rhs.key_list, mem_root),
+ create_list(rhs.create_list, mem_root),
+ flags(rhs.flags),
+ keys_onoff(rhs.keys_onoff),
+ partition_names(rhs.partition_names, mem_root),
+ num_parts(rhs.num_parts),
+ requested_algorithm(rhs.requested_algorithm),
+ requested_lock(rhs.requested_lock)
+{
+ /*
+ Make deep copies of used objects.
+ This is not a fully deep copy - clone() implementations
+ of Alter_drop, Alter_column, Key, foreign_key, Key_part_spec
+ do not copy string constants. At the same length the only
+ reason we make a copy currently is that ALTER/CREATE TABLE
+ code changes input Alter_info definitions, but string
+ constants never change.
+ */
+ list_copy_and_replace_each_value(drop_list, mem_root);
+ list_copy_and_replace_each_value(alter_list, mem_root);
+ list_copy_and_replace_each_value(key_list, mem_root);
+ list_copy_and_replace_each_value(create_list, mem_root);
+ /* partition_names are not deeply copied currently */
+}
+
+
+bool Alter_info::set_requested_algorithm(const LEX_STRING *str)
+{
+ // To avoid adding new keywords to the grammar, we match strings here.
+ if (!my_strcasecmp(system_charset_info, str->str, "INPLACE"))
+ requested_algorithm= ALTER_TABLE_ALGORITHM_INPLACE;
+ else if (!my_strcasecmp(system_charset_info, str->str, "COPY"))
+ requested_algorithm= ALTER_TABLE_ALGORITHM_COPY;
+ else if (!my_strcasecmp(system_charset_info, str->str, "DEFAULT"))
+ requested_algorithm= ALTER_TABLE_ALGORITHM_DEFAULT;
+ else
+ return true;
+ return false;
+}
+
+
+bool Alter_info::set_requested_lock(const LEX_STRING *str)
+{
+ // To avoid adding new keywords to the grammar, we match strings here.
+ if (!my_strcasecmp(system_charset_info, str->str, "NONE"))
+ requested_lock= ALTER_TABLE_LOCK_NONE;
+ else if (!my_strcasecmp(system_charset_info, str->str, "SHARED"))
+ requested_lock= ALTER_TABLE_LOCK_SHARED;
+ else if (!my_strcasecmp(system_charset_info, str->str, "EXCLUSIVE"))
+ requested_lock= ALTER_TABLE_LOCK_EXCLUSIVE;
+ else if (!my_strcasecmp(system_charset_info, str->str, "DEFAULT"))
+ requested_lock= ALTER_TABLE_LOCK_DEFAULT;
+ else
+ return true;
+ return false;
+}
+
+
+Alter_table_ctx::Alter_table_ctx()
+ : datetime_field(NULL), error_if_not_empty(false),
+ tables_opened(0),
+ db(NULL), table_name(NULL), alias(NULL),
+ new_db(NULL), new_name(NULL), new_alias(NULL),
+ fk_error_if_delete_row(false), fk_error_id(NULL),
+ fk_error_table(NULL)
+#ifndef DBUG_OFF
+ , tmp_table(false)
+#endif
+{
+}
+
+
+Alter_table_ctx::Alter_table_ctx(THD *thd, TABLE_LIST *table_list,
+ uint tables_opened_arg,
+ char *new_db_arg, char *new_name_arg)
+ : datetime_field(NULL), error_if_not_empty(false),
+ tables_opened(tables_opened_arg),
+ new_db(new_db_arg), new_name(new_name_arg),
+ fk_error_if_delete_row(false), fk_error_id(NULL),
+ fk_error_table(NULL)
+#ifndef DBUG_OFF
+ , tmp_table(false)
+#endif
+{
+ /*
+ Assign members db, table_name, new_db and new_name
+ to simplify further comparisions: we want to see if it's a RENAME
+ later just by comparing the pointers, avoiding the need for strcmp.
+ */
+ db= table_list->db;
+ table_name= table_list->table_name;
+ alias= (lower_case_table_names == 2) ? table_list->alias : table_name;
+
+ if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db))
+ new_db= db;
+
+ if (new_name)
+ {
+ DBUG_PRINT("info", ("new_db.new_name: '%s'.'%s'", new_db, new_name));
+
+ if (lower_case_table_names == 1) // Convert new_name/new_alias to lower case
+ {
+ my_casedn_str(files_charset_info, new_name);
+ new_alias= new_name;
+ }
+ else if (lower_case_table_names == 2) // Convert new_name to lower case
+ {
+ strmov(new_alias= new_alias_buff, new_name);
+ my_casedn_str(files_charset_info, new_name);
+ }
+ else
+ new_alias= new_name; // LCTN=0 => case sensitive + case preserving
+
+ if (!is_database_changed() &&
+ !my_strcasecmp(table_alias_charset, new_name, table_name))
+ {
+ /*
+ Source and destination table names are equal:
+ make is_table_renamed() more efficient.
+ */
+ new_alias= table_name;
+ new_name= table_name;
+ }
+ }
+ else
+ {
+ new_alias= alias;
+ new_name= table_name;
+ }
+
+ my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix,
+ current_pid, thd->thread_id);
+ /* Safety fix for InnoDB */
+ if (lower_case_table_names)
+ my_casedn_str(files_charset_info, tmp_name);
+
+ if (table_list->table->s->tmp_table == NO_TMP_TABLE)
+ {
+ build_table_filename(path, sizeof(path) - 1, db, table_name, "", 0);
+
+ build_table_filename(new_path, sizeof(new_path) - 1, new_db, new_name, "", 0);
+
+ build_table_filename(new_filename, sizeof(new_filename) - 1,
+ new_db, new_name, reg_ext, 0);
+
+ build_table_filename(tmp_path, sizeof(tmp_path) - 1, new_db, tmp_name, "",
+ FN_IS_TMP);
+ }
+ else
+ {
+ /*
+ We are not filling path, new_path and new_filename members if
+ we are altering temporary table as these members are not used in
+ this case. This fact is enforced with assert.
+ */
+ build_tmptable_filename(thd, tmp_path, sizeof(tmp_path));
+#ifndef DBUG_OFF
+ tmp_table= true;
+#endif
+ }
+}
+
+
+bool Sql_cmd_alter_table::execute(THD *thd)
{
LEX *lex= thd->lex;
/* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
@@ -38,7 +205,7 @@ bool Alter_table_statement::execute(THD *thd)
ulong priv_needed= ALTER_ACL;
bool result;
- DBUG_ENTER("Alter_table_statement::execute");
+ DBUG_ENTER("Sql_cmd_alter_table::execute");
if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */
DBUG_RETURN(TRUE);
@@ -46,12 +213,14 @@ bool Alter_table_statement::execute(THD *thd)
We also require DROP priv for ALTER TABLE ... DROP PARTITION, as well
as for RENAME TO, as being done by SQLCOM_RENAME_TABLE
*/
- if (alter_info.flags & (ALTER_DROP_PARTITION | ALTER_RENAME))
+ if (alter_info.flags & (Alter_info::ALTER_DROP_PARTITION |
+ Alter_info::ALTER_RENAME))
priv_needed|= DROP_ACL;
/* Must be set in the parser */
DBUG_ASSERT(select_lex->db);
- DBUG_ASSERT(!(alter_info.flags & ALTER_ADMIN_PARTITION));
+ DBUG_ASSERT(!(alter_info.flags & Alter_info::ALTER_EXCHANGE_PARTITION));
+ DBUG_ASSERT(!(alter_info.flags & Alter_info::ALTER_ADMIN_PARTITION));
if (check_access(thd, priv_needed, first_table->db,
&first_table->grant.privilege,
&first_table->grant.m_internal,
@@ -63,10 +232,47 @@ bool Alter_table_statement::execute(THD *thd)
DBUG_RETURN(TRUE); /* purecov: inspected */
/* If it is a merge table, check privileges for merge children. */
- if (create_info.merge_list.first &&
- check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL,
- create_info.merge_list.first, FALSE, UINT_MAX, FALSE))
- DBUG_RETURN(TRUE);
+ if (create_info.merge_list.first)
+ {
+ /*
+ The user must have (SELECT_ACL | UPDATE_ACL | DELETE_ACL) on the
+ underlying base tables, even if there are temporary tables with the same
+ names.
+
+ From user's point of view, it might look as if the user must have these
+ privileges on temporary tables to create a merge table over them. This is
+ one of two cases when a set of privileges is required for operations on
+ temporary tables (see also CREATE TABLE).
+
+ The reason for this behavior stems from the following facts:
+
+ - For merge tables, the underlying table privileges are checked only
+ at CREATE TABLE / ALTER TABLE time.
+
+ In other words, once a merge table is created, the privileges of
+ the underlying tables can be revoked, but the user will still have
+ access to the merge table (provided that the user has privileges on
+ the merge table itself).
+
+ - Temporary tables shadow base tables.
+
+ I.e. there might be temporary and base tables with the same name, and
+ the temporary table takes the precedence in all operations.
+
+ - For temporary MERGE tables we do not track if their child tables are
+ base or temporary. As result we can't guarantee that privilege check
+ which was done in presence of temporary child will stay relevant later
+ as this temporary table might be removed.
+
+ If SELECT_ACL | UPDATE_ACL | DELETE_ACL privileges were not checked for
+ the underlying *base* tables, it would create a security breach as in
+ Bug#12771903.
+ */
+
+ if (check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL,
+ create_info.merge_list.first, FALSE, UINT_MAX, FALSE))
+ DBUG_RETURN(TRUE);
+ }
if (check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, FALSE))
DBUG_RETURN(TRUE); /* purecov: inspected */
@@ -75,7 +281,7 @@ bool Alter_table_statement::execute(THD *thd)
{
// Rename of table
TABLE_LIST tmp_table;
- bzero((char*) &tmp_table,sizeof(tmp_table));
+ memset(&tmp_table, 0, sizeof(tmp_table));
tmp_table.table_name= lex->name.str;
tmp_table.db= select_lex->db;
tmp_table.grant.privilege= priv;
@@ -86,11 +292,11 @@ bool Alter_table_statement::execute(THD *thd)
/* Don't yet allow changing of symlinks with ALTER TABLE */
if (create_info.data_file_name)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
"DATA DIRECTORY");
if (create_info.index_file_name)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
"INDEX DIRECTORY");
create_info.data_file_name= create_info.index_file_name= NULL;
@@ -103,7 +309,50 @@ bool Alter_table_statement::execute(THD *thd)
&alter_info,
select_lex->order_list.elements,
select_lex->order_list.first,
- lex->ignore, lex->online);
+ lex->ignore);
DBUG_RETURN(result);
}
+
+bool Sql_cmd_discard_import_tablespace::execute(THD *thd)
+{
+ /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
+ SELECT_LEX *select_lex= &thd->lex->select_lex;
+ /* first table of first SELECT_LEX */
+ TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first;
+
+ if (check_access(thd, ALTER_ACL, table_list->db,
+ &table_list->grant.privilege,
+ &table_list->grant.m_internal,
+ 0, 0))
+ return true;
+
+ if (check_grant(thd, ALTER_ACL, table_list, false, UINT_MAX, false))
+ return true;
+
+ thd->enable_slow_log= opt_log_slow_admin_statements;
+
+ /*
+ Check if we attempt to alter mysql.slow_log or
+ mysql.general_log table and return an error if
+ it is the case.
+ TODO: this design is obsolete and will be removed.
+ */
+ int table_kind= check_if_log_table(table_list->db_length, table_list->db,
+ table_list->table_name_length,
+ table_list->table_name, false);
+
+ if (table_kind)
+ {
+ /* Disable alter of enabled log tables */
+ if (logger.is_log_table_enabled(table_kind))
+ {
+ my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER");
+ return true;
+ }
+ }
+
+ return
+ mysql_discard_or_import_tablespace(thd, table_list,
+ m_tablespace_op == DISCARD_TABLESPACE);
+}
diff --git a/sql/sql_alter.h b/sql/sql_alter.h
index 6660748f666..f0c0a873a5c 100644
--- a/sql/sql_alter.h
+++ b/sql/sql_alter.h
@@ -16,51 +16,412 @@
#ifndef SQL_ALTER_TABLE_H
#define SQL_ALTER_TABLE_H
+class Alter_drop;
+class Alter_column;
+class Key;
+
+/**
+ Data describing the table being created by CREATE TABLE or
+ altered by ALTER TABLE.
+*/
+
+class Alter_info
+{
+public:
+ /*
+ These flags are set by the parser and describes the type of
+ operation(s) specified by the ALTER TABLE statement.
+
+ They do *not* describe the type operation(s) to be executed
+ by the storage engine. For example, we don't yet know the
+ type of index to be added/dropped.
+ */
+
+ // Set for ADD [COLUMN]
+ static const uint ALTER_ADD_COLUMN = 1L << 0;
+
+ // Set for DROP [COLUMN]
+ static const uint ALTER_DROP_COLUMN = 1L << 1;
+
+ // Set for CHANGE [COLUMN] | MODIFY [CHANGE]
+ // Set by mysql_recreate_table()
+ static const uint ALTER_CHANGE_COLUMN = 1L << 2;
+
+ // Set for ADD INDEX | ADD KEY | ADD PRIMARY KEY | ADD UNIQUE KEY |
+ // ADD UNIQUE INDEX | ALTER ADD [COLUMN]
+ static const uint ALTER_ADD_INDEX = 1L << 3;
+
+ // Set for DROP PRIMARY KEY | DROP FOREIGN KEY | DROP KEY | DROP INDEX
+ static const uint ALTER_DROP_INDEX = 1L << 4;
+
+ // Set for RENAME [TO]
+ static const uint ALTER_RENAME = 1L << 5;
+
+ // Set for ORDER BY
+ static const uint ALTER_ORDER = 1L << 6;
+
+ // Set for table_options
+ static const uint ALTER_OPTIONS = 1L << 7;
+
+ // Set for ALTER [COLUMN] ... SET DEFAULT ... | DROP DEFAULT
+ static const uint ALTER_CHANGE_COLUMN_DEFAULT = 1L << 8;
+
+ // Set for DISABLE KEYS | ENABLE KEYS
+ static const uint ALTER_KEYS_ONOFF = 1L << 9;
+
+ // Set for CONVERT TO CHARACTER SET
+ static const uint ALTER_CONVERT = 1L << 10;
+
+ // Set for FORCE
+ // Set by mysql_recreate_table()
+ static const uint ALTER_RECREATE = 1L << 11;
+
+ // Set for ADD PARTITION
+ static const uint ALTER_ADD_PARTITION = 1L << 12;
+
+ // Set for DROP PARTITION
+ static const uint ALTER_DROP_PARTITION = 1L << 13;
+
+ // Set for COALESCE PARTITION
+ static const uint ALTER_COALESCE_PARTITION = 1L << 14;
+
+ // Set for REORGANIZE PARTITION ... INTO
+ static const uint ALTER_REORGANIZE_PARTITION = 1L << 15;
+
+ // Set for partition_options
+ static const uint ALTER_PARTITION = 1L << 16;
+
+ // Set for LOAD INDEX INTO CACHE ... PARTITION
+ // Set for CACHE INDEX ... PARTITION
+ static const uint ALTER_ADMIN_PARTITION = 1L << 17;
+
+ // Set for REORGANIZE PARTITION
+ static const uint ALTER_TABLE_REORG = 1L << 18;
+
+ // Set for REBUILD PARTITION
+ static const uint ALTER_REBUILD_PARTITION = 1L << 19;
+
+ // Set for partitioning operations specifying ALL keyword
+ static const uint ALTER_ALL_PARTITION = 1L << 20;
+
+ // Set for REMOVE PARTITIONING
+ static const uint ALTER_REMOVE_PARTITIONING = 1L << 21;
+
+ // Set for ADD FOREIGN KEY
+ static const uint ADD_FOREIGN_KEY = 1L << 22;
+
+ // Set for DROP FOREIGN KEY
+ static const uint DROP_FOREIGN_KEY = 1L << 23;
+
+ // Set for EXCHANGE PARITION
+ static const uint ALTER_EXCHANGE_PARTITION = 1L << 24;
+
+ // Set by Sql_cmd_alter_table_truncate_partition::execute()
+ static const uint ALTER_TRUNCATE_PARTITION = 1L << 25;
+
+ // Set for ADD [COLUMN] FIRST | AFTER
+ static const uint ALTER_COLUMN_ORDER = 1L << 26;
+
+
+ enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
+
+ /**
+ The different values of the ALGORITHM clause.
+ Describes which algorithm to use when altering the table.
+ */
+ enum enum_alter_table_algorithm
+ {
+ // In-place if supported, copy otherwise.
+ ALTER_TABLE_ALGORITHM_DEFAULT,
+
+ // In-place if supported, error otherwise.
+ ALTER_TABLE_ALGORITHM_INPLACE,
+
+ // Copy if supported, error otherwise.
+ ALTER_TABLE_ALGORITHM_COPY
+ };
+
+
+ /**
+ The different values of the LOCK clause.
+ Describes the level of concurrency during ALTER TABLE.
+ */
+ enum enum_alter_table_lock
+ {
+ // Maximum supported level of concurency for the given operation.
+ ALTER_TABLE_LOCK_DEFAULT,
+
+ // Allow concurrent reads & writes. If not supported, give erorr.
+ ALTER_TABLE_LOCK_NONE,
+
+ // Allow concurrent reads only. If not supported, give error.
+ ALTER_TABLE_LOCK_SHARED,
+
+ // Block reads and writes.
+ ALTER_TABLE_LOCK_EXCLUSIVE
+ };
+
+
+ // Columns and keys to be dropped.
+ List<Alter_drop> drop_list;
+ // Columns for ALTER_COLUMN_CHANGE_DEFAULT.
+ List<Alter_column> alter_list;
+ // List of keys, used by both CREATE and ALTER TABLE.
+ List<Key> key_list;
+ // List of columns, used by both CREATE and ALTER TABLE.
+ List<Create_field> create_list;
+ // Type of ALTER TABLE operation.
+ uint flags;
+ // Enable or disable keys.
+ enum_enable_or_disable keys_onoff;
+ // List of partitions.
+ List<char> partition_names;
+ // Number of partitions.
+ uint num_parts;
+ // Type of ALTER TABLE algorithm.
+ enum_alter_table_algorithm requested_algorithm;
+ // Type of ALTER TABLE lock.
+ enum_alter_table_lock requested_lock;
+
+
+ Alter_info() :
+ flags(0),
+ keys_onoff(LEAVE_AS_IS),
+ num_parts(0),
+ requested_algorithm(ALTER_TABLE_ALGORITHM_DEFAULT),
+ requested_lock(ALTER_TABLE_LOCK_DEFAULT)
+ {}
+
+ void reset()
+ {
+ drop_list.empty();
+ alter_list.empty();
+ key_list.empty();
+ create_list.empty();
+ flags= 0;
+ keys_onoff= LEAVE_AS_IS;
+ num_parts= 0;
+ partition_names.empty();
+ requested_algorithm= ALTER_TABLE_ALGORITHM_DEFAULT;
+ requested_lock= ALTER_TABLE_LOCK_DEFAULT;
+ }
+
+
+ /**
+ Construct a copy of this object to be used for mysql_alter_table
+ and mysql_create_table.
+
+ Historically, these two functions modify their Alter_info
+ arguments. This behaviour breaks re-execution of prepared
+ statements and stored procedures and is compensated by always
+ supplying a copy of Alter_info to these functions.
+
+ @param rhs Alter_info to make copy of
+ @param mem_root Mem_root for new Alter_info
+
+ @note You need to use check the error in THD for out
+ of memory condition after calling this function.
+ */
+ Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root);
+
+
+ /**
+ Parses the given string and sets requested_algorithm
+ if the string value matches a supported value.
+ Supported values: INPLACE, COPY, DEFAULT
+
+ @param str String containing the supplied value
+ @retval false Supported value found, state updated
+ @retval true Not supported value, no changes made
+ */
+ bool set_requested_algorithm(const LEX_STRING *str);
+
+
+ /**
+ Parses the given string and sets requested_lock
+ if the string value matches a supported value.
+ Supported values: NONE, SHARED, EXCLUSIVE, DEFAULT
+
+ @param str String containing the supplied value
+ @retval false Supported value found, state updated
+ @retval true Not supported value, no changes made
+ */
+
+ bool set_requested_lock(const LEX_STRING *str);
+
+private:
+ Alter_info &operator=(const Alter_info &rhs); // not implemented
+ Alter_info(const Alter_info &rhs); // not implemented
+};
+
+
+/** Runtime context for ALTER TABLE. */
+class Alter_table_ctx
+{
+public:
+ Alter_table_ctx();
+
+ Alter_table_ctx(THD *thd, TABLE_LIST *table_list, uint tables_opened_arg,
+ char *new_db_arg, char *new_name_arg);
+
+ /**
+ @return true if the table is moved to another database, false otherwise.
+ */
+ bool is_database_changed() const
+ { return (new_db != db); };
+
+ /**
+ @return true if the table is renamed, false otherwise.
+ */
+ bool is_table_renamed() const
+ { return (is_database_changed() || new_name != table_name); };
+
+ /**
+ @return filename (including .frm) for the new table.
+ */
+ const char *get_new_filename() const
+ {
+ DBUG_ASSERT(!tmp_table);
+ return new_filename;
+ }
+
+ /**
+ @return path to the original table.
+ */
+ const char *get_path() const
+ {
+ DBUG_ASSERT(!tmp_table);
+ return path;
+ }
+
+ /**
+ @return path to the new table.
+ */
+ const char *get_new_path() const
+ {
+ DBUG_ASSERT(!tmp_table);
+ return new_path;
+ }
+
+ /**
+ @return path to the temporary table created during ALTER TABLE.
+ */
+ const char *get_tmp_path() const
+ { return tmp_path; }
+
+ /**
+ Mark ALTER TABLE as needing to produce foreign key error if
+ it deletes a row from the table being changed.
+ */
+ void set_fk_error_if_delete_row(FOREIGN_KEY_INFO *fk)
+ {
+ fk_error_if_delete_row= true;
+ fk_error_id= fk->foreign_id->str;
+ fk_error_table= fk->foreign_table->str;
+ }
+
+public:
+ Create_field *datetime_field;
+ bool error_if_not_empty;
+ uint tables_opened;
+ char *db;
+ char *table_name;
+ char *alias;
+ char *new_db;
+ char *new_name;
+ char *new_alias;
+ char tmp_name[80];
+ /**
+ Indicates that if a row is deleted during copying of data from old version
+ of table to the new version ER_FK_CANNOT_DELETE_PARENT error should be
+ emitted.
+ */
+ bool fk_error_if_delete_row;
+ /** Name of foreign key for the above error. */
+ const char *fk_error_id;
+ /** Name of table for the above error. */
+ const char *fk_error_table;
+
+private:
+ char new_filename[FN_REFLEN + 1];
+ char new_alias_buff[FN_REFLEN + 1];
+ char path[FN_REFLEN + 1];
+ char new_path[FN_REFLEN + 1];
+ char tmp_path[FN_REFLEN + 1];
+
+#ifndef DBUG_OFF
+ /** Indicates that we are altering temporary table. Used only in asserts. */
+ bool tmp_table;
+#endif
+
+ Alter_table_ctx &operator=(const Alter_table_ctx &rhs); // not implemented
+ Alter_table_ctx(const Alter_table_ctx &rhs); // not implemented
+};
+
+
/**
- Alter_table_common represents the common properties of the ALTER TABLE
+ Sql_cmd_common_alter_table represents the common properties of the ALTER TABLE
statements.
@todo move Alter_info and other ALTER generic structures from Lex here.
*/
-class Alter_table_common : public Sql_statement
+class Sql_cmd_common_alter_table : public Sql_cmd
{
protected:
/**
Constructor.
- @param lex the LEX structure for this statement.
*/
- Alter_table_common(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_common_alter_table()
{}
- virtual ~Alter_table_common()
+ virtual ~Sql_cmd_common_alter_table()
{}
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ALTER_TABLE;
+ }
};
/**
- Alter_table_statement represents the generic ALTER TABLE statement.
+ Sql_cmd_alter_table represents the generic ALTER TABLE statement.
@todo move Alter_info and other ALTER specific structures from Lex here.
*/
-class Alter_table_statement : public Alter_table_common
+class Sql_cmd_alter_table : public Sql_cmd_common_alter_table
{
public:
/**
Constructor, used to represent a ALTER TABLE statement.
- @param lex the LEX structure for this statement.
*/
- Alter_table_statement(LEX *lex)
- : Alter_table_common(lex)
+ Sql_cmd_alter_table()
{}
- ~Alter_table_statement()
+ ~Sql_cmd_alter_table()
{}
- /**
- Execute a ALTER TABLE statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
};
+
+/**
+ Sql_cmd_alter_table_tablespace represents ALTER TABLE
+ IMPORT/DISCARD TABLESPACE statements.
+*/
+class Sql_cmd_discard_import_tablespace : public Sql_cmd_common_alter_table
+{
+public:
+ enum enum_tablespace_op_type
+ {
+ DISCARD_TABLESPACE, IMPORT_TABLESPACE
+ };
+
+ Sql_cmd_discard_import_tablespace(enum_tablespace_op_type tablespace_op_arg)
+ : m_tablespace_op(tablespace_op_arg)
+ {}
+
+ bool execute(THD *thd);
+
+private:
+ const enum_tablespace_op_type m_tablespace_op;
+};
+
#endif
diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc
index f287bf47e81..be35340df27 100644
--- a/sql/sql_analyse.cc
+++ b/sql/sql_analyse.cc
@@ -282,16 +282,16 @@ bool get_ev_num_info(EV_NUM_INFO *ev_info, NUM_INFO *info, const char *num)
{
if (((longlong) info->ullval) < 0)
return 0; // Impossible to store as a negative number
- ev_info->llval = -(longlong) max((ulonglong) -ev_info->llval,
+ ev_info->llval = -(longlong) MY_MAX((ulonglong) -ev_info->llval,
info->ullval);
- ev_info->min_dval = (double) -max(-ev_info->min_dval, info->dval);
+ ev_info->min_dval = (double) -MY_MAX(-ev_info->min_dval, info->dval);
}
else // ulonglong is as big as bigint in MySQL
{
if ((check_ulonglong(num, info->integers) == DECIMAL_NUM))
return 0;
- ev_info->ullval = (ulonglong) max(ev_info->ullval, info->ullval);
- ev_info->max_dval = (double) max(ev_info->max_dval, info->dval);
+ ev_info->ullval = (ulonglong) MY_MAX(ev_info->ullval, info->ullval);
+ ev_info->max_dval = (double) MY_MAX(ev_info->max_dval, info->dval);
}
return 1;
} // get_ev_num_info
@@ -1040,7 +1040,7 @@ String *field_decimal::avg(String *s, ha_rows rows)
my_decimal_div(E_DEC_FATAL_ERROR, &avg_val, sum+cur_sum, &num, prec_increment);
/* TODO remove this after decimal_div returns proper frac */
my_decimal_round(E_DEC_FATAL_ERROR, &avg_val,
- min(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE),
+ MY_MIN(sum[cur_sum].frac + prec_increment, DECIMAL_MAX_SCALE),
FALSE,&rounded_avg);
my_decimal2string(E_DEC_FATAL_ERROR, &rounded_avg, 0, 0, '0', s);
return s;
@@ -1065,7 +1065,7 @@ String *field_decimal::std(String *s, ha_rows rows)
my_decimal_div(E_DEC_FATAL_ERROR, &tmp, &sum2, &num, prec_increment);
my_decimal2double(E_DEC_FATAL_ERROR, &tmp, &std_sqr);
s->set_real(((double) std_sqr <= 0.0 ? 0.0 : sqrt(std_sqr)),
- min(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset);
+ MY_MIN(item->decimals + prec_increment, NOT_FIXED_DEC), my_thd_charset);
return s;
}
@@ -1182,7 +1182,7 @@ bool analyse::change_columns(List<Item> &field_list)
func_items[8] = new Item_proc_string("Std", 255);
func_items[8]->maybe_null = 1;
func_items[9] = new Item_proc_string("Optimal_fieldtype",
- max(64, output_str_length));
+ MY_MAX(64, output_str_length));
for (uint i = 0; i < array_elements(func_items); i++)
field_list.push_back(func_items[i]);
diff --git a/sql/sql_array.h b/sql/sql_array.h
index 43ca4ef4219..697819787f2 100644
--- a/sql/sql_array.h
+++ b/sql/sql_array.h
@@ -92,6 +92,8 @@ private:
/*
A typesafe wrapper around DYNAMIC_ARRAY
+
+ TODO: Change creator to take a THREAD_SPECIFIC option.
*/
template <class Elem> class Dynamic_array
@@ -100,125 +102,113 @@ template <class Elem> class Dynamic_array
public:
Dynamic_array(uint prealloc=16, uint increment=16)
{
+ init(prealloc, increment);
+ }
+
+ void init(uint prealloc=16, uint increment=16)
+ {
my_init_dynamic_array(&array, sizeof(Elem), prealloc, increment,
- MYF(MY_THREAD_SPECIFIC));
+ MYF(0));
}
+ /**
+ @note Though formally this could be declared "const" it would be
+ misleading at it returns a non-const pointer to array's data.
+ */
Elem& at(size_t idx)
{
return *(((Elem*)array.buffer) + idx);
}
-
- Elem *front()
+ /// Const variant of at(), which cannot change data
+ const Elem& at(size_t idx) const
{
- return (Elem*)array.buffer;
- }
-
- Elem *back()
- {
- return ((Elem*)array.buffer) + array.elements;
+ return *(((Elem*)array.buffer) + idx);
}
- bool append(Elem &el)
+ /// @returns pointer to first element; undefined behaviour if array is empty
+ Elem *front()
{
- return (insert_dynamic(&array, (uchar*)&el));
+ DBUG_ASSERT(array.elements >= 1);
+ return (Elem*)array.buffer;
}
- bool append_val(Elem el)
+ /// @returns pointer to first element; undefined behaviour if array is empty
+ const Elem *front() const
{
- return (insert_dynamic(&array, (uchar*)&el));
+ DBUG_ASSERT(array.elements >= 1);
+ return (const Elem*)array.buffer;
}
- size_t elements()
+ /// @returns pointer to last element; undefined behaviour if array is empty.
+ Elem *back()
{
- return array.elements;
+ DBUG_ASSERT(array.elements >= 1);
+ return ((Elem*)array.buffer) + (array.elements - 1);
}
- void set_elements(size_t n)
+ /// @returns pointer to last element; undefined behaviour if array is empty.
+ const Elem *back() const
{
- array.elements= n;
+ DBUG_ASSERT(array.elements >= 1);
+ return ((const Elem*)array.buffer) + (array.elements - 1);
}
- ~Dynamic_array()
+ /**
+ @retval false ok
+ @retval true OOM, @c my_error() has been called.
+ */
+ bool append(const Elem &el)
{
- delete_dynamic(&array);
+ return insert_dynamic(&array, &el);
}
- typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2);
-
- void sort(CMP_FUNC cmp_func)
+ bool append_val(Elem el)
{
- my_qsort(array.buffer, array.elements, sizeof(Elem), (qsort_cmp)cmp_func);
+ return (insert_dynamic(&array, (uchar*)&el));
}
-};
-/*
- Array of pointers to Elem that uses memory from MEM_ROOT
-
- MEM_ROOT has no realloc() so this is supposed to be used for cases when
- reallocations are rare.
-*/
-
-template <class Elem> class Array
-{
- enum {alloc_increment = 16};
- Elem **buffer;
- uint n_elements, max_element;
-public:
- Array(MEM_ROOT *mem_root, uint prealloc=16)
+ /// Pops the last element. Does nothing if array is empty.
+ Elem& pop()
{
- buffer= (Elem**)alloc_root(mem_root, prealloc * sizeof(Elem**));
- max_element = buffer? prealloc : 0;
- n_elements= 0;
+ return *((Elem*)pop_dynamic(&array));
}
- Elem& at(int idx)
+ void del(uint idx)
{
- return *(((Elem*)buffer) + idx);
+ delete_dynamic_element(&array, idx);
}
- Elem **front()
+ size_t elements() const
{
- return buffer;
+ return array.elements;
}
- Elem **back()
+ void elements(size_t num_elements)
{
- return buffer + n_elements;
+ DBUG_ASSERT(num_elements <= array.max_element);
+ array.elements= num_elements;
}
- bool append(MEM_ROOT *mem_root, Elem *el)
+ void clear()
{
- if (n_elements == max_element)
- {
- Elem **newbuf;
- if (!(newbuf= (Elem**)alloc_root(mem_root, (n_elements + alloc_increment)*
- sizeof(Elem**))))
- {
- return FALSE;
- }
- memcpy(newbuf, buffer, n_elements*sizeof(Elem*));
- buffer= newbuf;
- }
- buffer[n_elements++]= el;
- return FALSE;
+ elements(0);
}
- int elements()
+ void set(uint idx, const Elem &el)
{
- return n_elements;
+ set_dynamic(&array, &el, idx);
}
- void clear()
+ ~Dynamic_array()
{
- n_elements= 0;
+ delete_dynamic(&array);
}
- typedef int (*CMP_FUNC)(Elem * const *el1, Elem *const *el2);
+ typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2);
void sort(CMP_FUNC cmp_func)
{
- my_qsort(buffer, n_elements, sizeof(Elem*), (qsort_cmp)cmp_func);
+ my_qsort(array.buffer, array.elements, sizeof(Elem), (qsort_cmp)cmp_func);
}
};
diff --git a/sql/sql_audit.h b/sql/sql_audit.h
index 22fdd221e62..1c7d6a1c224 100644
--- a/sql/sql_audit.h
+++ b/sql/sql_audit.h
@@ -134,7 +134,7 @@ void mysql_audit_general(THD *thd, uint event_subtype,
query= thd->query_string;
user= user_buff;
userlen= make_user_name(thd, user_buff);
- rows= thd->warning_info->current_row_for_warning();
+ rows= thd->get_stmt_da()->current_row_for_warning();
}
else
{
@@ -155,9 +155,10 @@ void mysql_audit_notify_connection_connect(THD *thd)
if (mysql_audit_connection_enabled())
{
const Security_context *sctx= thd->security_ctx;
+ Diagnostics_area *da= thd->get_stmt_da();
mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
MYSQL_AUDIT_CONNECTION_CONNECT,
- thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0,
+ da->is_error() ? da->sql_errno() : 0,
thd->thread_id,
sctx->user, sctx->user ? strlen(sctx->user) : 0,
sctx->priv_user, strlen(sctx->priv_user),
@@ -188,9 +189,10 @@ void mysql_audit_notify_connection_change_user(THD *thd)
if (mysql_audit_connection_enabled())
{
const Security_context *sctx= thd->security_ctx;
+ Diagnostics_area *da= thd->get_stmt_da();
mysql_audit_notify(thd, MYSQL_AUDIT_CONNECTION_CLASS,
MYSQL_AUDIT_CONNECTION_CHANGE_USER,
- thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0,
+ da->is_error() ? da->sql_errno() : 0,
thd->thread_id,
sctx->user, sctx->user ? strlen(sctx->user) : 0,
sctx->priv_user, strlen(sctx->priv_user),
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index e384fbcc32d..5baf05c7f38 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -66,9 +66,9 @@ bool
No_such_table_error_handler::handle_condition(THD *,
uint sql_errno,
const char*,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char*,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
if (sql_errno == ER_NO_SUCH_TABLE || sql_errno == ER_NO_SUCH_TABLE_IN_ENGINE)
@@ -77,7 +77,7 @@ No_such_table_error_handler::handle_condition(THD *,
return TRUE;
}
- if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
+ if (level == Sql_condition::WARN_LEVEL_ERROR)
m_unhandled_errors++;
return FALSE;
}
@@ -110,9 +110,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
/**
Returns TRUE if there were ER_NO_SUCH_/WRONG_MRG_TABLE and there
@@ -140,9 +140,9 @@ bool
Repair_mrg_table_error_handler::handle_condition(THD *,
uint sql_errno,
const char*,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char*,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
if (sql_errno == ER_NO_SUCH_TABLE ||
@@ -164,10 +164,7 @@ Repair_mrg_table_error_handler::handle_condition(THD *,
*/
/**
- Protects table_def_hash, used and unused lists in the
- TABLE_SHARE object, LRU lists of used TABLEs and used
- TABLE_SHAREs, refresh_version and the table id counter.
- In particular:
+ LOCK_open protects the following variables/objects:
end_of_unused_share
last_table_id
@@ -204,11 +201,8 @@ static void init_tdc_psi_keys(void)
const char *category= "sql";
int count;
- if (PSI_server == NULL)
- return;
-
count= array_elements(all_tdc_mutexes);
- PSI_server->register_mutex(category, all_tdc_mutexes, count);
+ mysql_mutex_register(category, all_tdc_mutexes, count);
}
#endif /* HAVE_PSI_INTERFACE */
@@ -275,7 +269,7 @@ static void check_unused(THD *thd)
{
share= (TABLE_SHARE*) my_hash_element(&table_def_cache, idx);
- I_P_List_iterator<TABLE, TABLE_share> it(share->free_tables);
+ TABLE_SHARE::TABLE_list::Iterator it(share->free_tables);
while ((entry= it++))
{
/*
@@ -285,11 +279,13 @@ static void check_unused(THD *thd)
/* Merge children should be detached from a merge parent */
if (entry->in_use)
{
- DBUG_PRINT("error",("Used table is in share's list of unused tables")); /* purecov: inspected */
+ /* purecov: begin inspected */
+ DBUG_PRINT("error",("Used table is in share's list of unused tables"));
+ /* purecov: end */
}
/* extra() may assume that in_use is set */
entry->in_use= thd;
- DBUG_ASSERT(! entry->file->extra(HA_EXTRA_IS_ATTACHED_CHILDREN));
+ DBUG_ASSERT(!thd || ! entry->file->extra(HA_EXTRA_IS_ATTACHED_CHILDREN));
entry->in_use= 0;
count--;
@@ -315,18 +311,16 @@ static void check_unused(THD *thd)
#define check_unused(A)
#endif
+/**
+ Create a table cache/table definition cache key
-/*
- Create a table cache key
-
- SYNOPSIS
- create_tmp_table_def_key()
- thd Thread handler
- key Create key here (must be of size MAX_DBKEY_LENGTH)
- db Database name.
- table_name Table name.
+ @param thd Thread context
+ @param key Buffer for the key to be created (must be of
+ size MAX_DBKEY_LENGTH).
+ @param db_name Database name.
+ @param table_name Table name.
- IMPLEMENTATION
+ @note
The table cache_key is created from:
db_name + \0
table_name + \0
@@ -337,8 +331,7 @@ static void check_unused(THD *thd)
4 bytes for master thread id
4 bytes pseudo thread id
- RETURN
- Length of key
+ @return Length of key.
*/
uint create_tmp_table_def_key(THD *thd, char *key,
@@ -352,9 +345,46 @@ uint create_tmp_table_def_key(THD *thd, char *key,
}
+/**
+ Get table cache key for a table list element.
+
+ @param table_list[in] Table list element.
+ @param key[out] On return points to table cache key for the table.
+
+ @note Unlike create_table_def_key() call this function doesn't construct
+ key in a buffer provider by caller. Instead it relies on the fact
+ that table list element for which key is requested has properly
+ initialized MDL_request object and the fact that table definition
+ cache key is suffix of key used in MDL subsystem. So to get table
+ definition key it simply needs to return pointer to appropriate
+ part of MDL_key object nested in this table list element.
+ Indeed, this means that lifetime of key produced by this call is
+ limited by the lifetime of table list element which it got as
+ parameter.
+
+ @return Length of key.
+*/
+
+uint get_table_def_key(const TABLE_LIST *table_list, const char **key)
+{
+ /*
+ This call relies on the fact that TABLE_LIST::mdl_request::key object
+ is properly initialized, so table definition cache can be produced
+ from key used by MDL subsystem.
+ */
+ DBUG_ASSERT(!strcmp(table_list->get_db_name(),
+ table_list->mdl_request.key.db_name()) &&
+ !strcmp(table_list->get_table_name(),
+ table_list->mdl_request.key.name()));
+
+ *key= (const char*)table_list->mdl_request.key.ptr() + 1;
+ return table_list->mdl_request.key.length() - 1;
+}
+
+
/*****************************************************************************
- Functions to handle table definition cach (TABLE_SHARE)
+ Functions to handle table definition cache (TABLE_SHARE)
*****************************************************************************/
extern "C" uchar *table_def_key(const uchar *record, size_t *length,
@@ -399,7 +429,6 @@ bool table_def_init(void)
oldest_unused_share= &end_of_unused_share;
end_of_unused_share.prev= &oldest_unused_share;
-
return my_hash_init(&table_def_cache, &my_charset_bin, table_def_size,
0, 0, table_def_key,
(my_hash_free_key) table_def_free_entry, 0) != 0;
@@ -591,7 +620,7 @@ static void table_def_unuse_table(TABLE *table)
*/
TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name,
- char *key, uint key_length, uint flags,
+ const char *key, uint key_length, uint flags,
my_hash_value_type hash_value)
{
TABLE_SHARE *share;
@@ -630,13 +659,13 @@ TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name,
}
share->ref_count++; // Mark in use
share->error= OPEN_FRM_OPEN_ERROR;
- mysql_mutex_lock(&share->LOCK_ha_data);
+ mysql_mutex_lock(&share->LOCK_share);
mysql_mutex_unlock(&LOCK_open);
/* note that get_table_share() *always* uses discovery */
open_table_def(thd, share, flags | GTS_USE_DISCOVERY);
- mysql_mutex_unlock(&share->LOCK_ha_data);
+ mysql_mutex_unlock(&share->LOCK_share);
mysql_mutex_lock(&LOCK_open);
if (share->error)
@@ -658,8 +687,8 @@ TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name,
DBUG_ASSERT(!(flags & GTS_FORCE_DISCOVERY));
/* make sure that open_table_def() for this share is not running */
- mysql_mutex_lock(&share->LOCK_ha_data);
- mysql_mutex_unlock(&share->LOCK_ha_data);
+ mysql_mutex_lock(&share->LOCK_share);
+ mysql_mutex_unlock(&share->LOCK_share);
/*
We found an existing table definition. Return it if we didn't get
@@ -792,7 +821,7 @@ void release_table_share(TABLE_SHARE *share)
TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name)
{
- char key[SAFE_NAME_LEN*2+2];
+ char key[MAX_DBKEY_LENGTH];
uint key_length;
mysql_mutex_assert_owner(&LOCK_open);
@@ -861,7 +890,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild)
share->db.str)+1,
share->table_name.str);
(*start_list)->in_use= 0;
- I_P_List_iterator<TABLE, TABLE_share> it(share->used_tables);
+ TABLE_SHARE::TABLE_list::Iterator it(share->used_tables);
while (it++)
++(*start_list)->in_use;
(*start_list)->locked= 0; /* Obsolete. */
@@ -943,7 +972,7 @@ void free_io_cache(TABLE *table)
static void kill_delayed_threads_for_table(TABLE_SHARE *share)
{
- I_P_List_iterator<TABLE, TABLE_share> it(share->used_tables);
+ TABLE_SHARE::TABLE_list::Iterator it(share->used_tables);
TABLE *tab;
mysql_mutex_assert_owner(&LOCK_open);
@@ -1084,7 +1113,7 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables,
result= TRUE;
goto err_with_reopen;
}
- close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED);
+ close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
}
}
@@ -1157,12 +1186,12 @@ err_with_reopen:
*/
thd->locked_tables_list.reopen_tables(thd);
/*
- Since downgrade_exclusive_lock() won't do anything with shared
+ Since downgrade_lock() won't do anything with shared
metadata lock it is much simpler to go through all open tables rather
than picking only those tables that were flushed.
*/
for (TABLE *tab= thd->open_tables; tab; tab= tab->next)
- tab->mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+ tab->mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
}
DBUG_RETURN(result);
}
@@ -1363,7 +1392,8 @@ static void close_open_tables(THD *thd)
void
close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
- ha_extra_function extra)
+ ha_extra_function extra,
+ TABLE *skip_table)
{
char key[MAX_DBKEY_LENGTH];
uint key_length= share->table_cache_key.length;
@@ -1378,7 +1408,8 @@ close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
TABLE *table= *prev;
if (table->s->table_cache_key.length == key_length &&
- !memcmp(table->s->table_cache_key.str, key, key_length))
+ !memcmp(table->s->table_cache_key.str, key, key_length) &&
+ table != skip_table)
{
thd->locked_tables_list.unlink_from_list(thd,
table->pos_in_locked_tables,
@@ -1404,9 +1435,12 @@ close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
prev= &table->next;
}
}
- /* Remove the table share from the cache. */
- tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table_name,
- FALSE);
+ if (skip_table == NULL)
+ {
+ /* Remove the table share from the cache. */
+ tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db, table_name,
+ FALSE);
+ }
}
@@ -1556,9 +1590,8 @@ void close_thread_tables(THD *thd)
/* move one table to free list */
-bool close_thread_table(THD *thd, TABLE **table_ptr)
+void close_thread_table(THD *thd, TABLE **table_ptr)
{
- bool found_old_table= 0;
TABLE *table= *table_ptr;
DBUG_ENTER("close_thread_table");
DBUG_PRINT("tcache", ("table: '%s'.'%s' 0x%lx", table->s->db.str,
@@ -1604,10 +1637,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
if (table->s->has_old_version() || table->needs_reopen() ||
table_def_shutdown_in_progress)
- {
free_cache_entry(table);
- found_old_table= 1;
- }
else
{
DBUG_ASSERT(table->file);
@@ -1620,7 +1650,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr)
free_cache_entry(unused_tables);
}
mysql_mutex_unlock(&LOCK_open);
- DBUG_RETURN(found_old_table);
+ DBUG_VOID_RETURN;
}
@@ -1763,7 +1793,7 @@ bool close_temporary_tables(THD *thd)
qinfo.db_len= db.length();
thd->variables.character_set_client= cs_save;
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
if ((error= (mysql_bin_log.write(&qinfo) || error)))
{
/*
@@ -1781,7 +1811,7 @@ bool close_temporary_tables(THD *thd)
sql_print_error("Failed to write the DROP statement for "
"temporary tables to binary log");
}
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
thd->variables.pseudo_thread_id= save_pseudo_thread_id;
thd->thread_specific_used= save_thread_specific_used;
@@ -2067,12 +2097,9 @@ void update_non_unique_table_error(TABLE_LIST *update,
TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name)
{
- TABLE_LIST tl;
-
- tl.db= (char*) db;
- tl.table_name= (char*) table_name;
-
- return find_temporary_table(thd, &tl);
+ char key[MAX_DBKEY_LENGTH];
+ uint key_length= create_tmp_table_def_key(thd, key, db, table_name);
+ return find_temporary_table(thd, key, key_length);
}
@@ -2085,10 +2112,26 @@ TABLE *find_temporary_table(THD *thd, const char *db, const char *table_name)
TABLE *find_temporary_table(THD *thd, const TABLE_LIST *tl)
{
- char key[MAX_DBKEY_LENGTH];
- uint key_length= create_tmp_table_def_key(thd, key, tl->db, tl->table_name);
+ const char *key;
+ uint key_length;
+ char key_suffix[TMP_TABLE_KEY_EXTRA];
+ TABLE *table;
- return find_temporary_table(thd, key, key_length);
+ key_length= get_table_def_key(tl, &key);
+
+ int4store(key_suffix, thd->variables.server_id);
+ int4store(key_suffix + 4, thd->variables.pseudo_thread_id);
+
+ for (table= thd->temporary_tables; table; table= table->next)
+ {
+ if ((table->s->table_cache_key.length == key_length +
+ TMP_TABLE_KEY_EXTRA) &&
+ !memcmp(table->s->table_cache_key.str, key, key_length) &&
+ !memcmp(table->s->table_cache_key.str + key_length, key_suffix,
+ TMP_TABLE_KEY_EXTRA))
+ return table;
+ }
+ return NULL;
}
@@ -2148,14 +2191,15 @@ TABLE *find_temporary_table(THD *thd,
int drop_temporary_table(THD *thd, TABLE_LIST *table_list, bool *is_trans)
{
- TABLE *table;
DBUG_ENTER("drop_temporary_table");
DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'",
table_list->db, table_list->table_name));
- if (!(table= find_temporary_table(thd, table_list)))
+ if (!is_temporary_table(table_list))
DBUG_RETURN(1);
+ TABLE *table= table_list->table;
+
/* Table might be in use by some outer statement. */
if (table->query_id && table->query_id != thd->query_id)
{
@@ -2163,8 +2207,7 @@ int drop_temporary_table(THD *thd, TABLE_LIST *table_list, bool *is_trans)
DBUG_RETURN(-1);
}
- if (is_trans != NULL)
- *is_trans= table->file->has_transactions();
+ *is_trans= table->file->has_transactions();
/*
If LOCK TABLES list is not empty and contains this table,
@@ -2172,6 +2215,7 @@ int drop_temporary_table(THD *thd, TABLE_LIST *table_list, bool *is_trans)
*/
mysql_lock_remove(thd, thd->lock, table);
close_temporary_table(thd, table, 1, 1);
+ table_list->table= NULL;
DBUG_RETURN(0);
}
@@ -2232,13 +2276,6 @@ void close_temporary(TABLE *table, bool free_share, bool delete_table)
DBUG_PRINT("tmptable", ("closing table: '%s'.'%s'",
table->s->db.str, table->s->table_name.str));
- /* in_use is not set for replication temporary tables during shutdown */
- if (table->in_use)
- {
- table->file->update_global_table_stats();
- table->file->update_global_index_stats();
- }
-
free_io_cache(table);
closefrm(table, 0);
if (delete_table)
@@ -2304,8 +2341,9 @@ bool wait_while_table_is_used(THD *thd, TABLE *table,
table->s->table_name.str, (ulong) table->s,
table->db_stat, table->s->version));
- if (thd->mdl_context.upgrade_shared_lock_to_exclusive(
- table->mdl_ticket, thd->variables.lock_wait_timeout))
+ if (thd->mdl_context.upgrade_shared_lock(
+ table->mdl_ticket, MDL_EXCLUSIVE,
+ thd->variables.lock_wait_timeout))
DBUG_RETURN(TRUE);
tdc_remove_table(thd, remove_type,
@@ -2354,8 +2392,8 @@ void drop_open_table(THD *thd, TABLE *table, const char *db_name,
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, db_name, table_name,
FALSE);
/* Remove the table from the storage engine and rm the .frm. */
- quick_rm_table(table_type, db_name, table_name, 0);
- }
+ quick_rm_table(thd, table_type, db_name, table_name, 0);
+ }
DBUG_VOID_RETURN;
}
@@ -2378,9 +2416,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
private:
/** Open table context to be used for back-off request. */
@@ -2397,9 +2435,9 @@ private:
bool MDL_deadlock_handler::handle_condition(THD *,
uint sql_errno,
const char*,
- MYSQL_ERROR::enum_warning_level,
+ Sql_condition::enum_warning_level,
const char*,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
if (! m_is_active && sql_errno == ER_LOCK_DEADLOCK)
@@ -2584,51 +2622,46 @@ tdc_wait_for_old_version(THD *thd, const char *db, const char *table_name,
}
-/*
- Open a table.
-
- SYNOPSIS
- open_table()
- thd Thread context.
- table_list Open first table in list.
- action INOUT Pointer to variable of enum_open_table_action type
- which will be set according to action which is
- required to remedy problem appeared during attempt
- to open table.
- flags Bitmap of flags to modify how open works:
- MYSQL_OPEN_IGNORE_FLUSH - Open table even if
- someone has done a flush or there is a pending
- exclusive metadata lock requests against it
- (i.e. request high priority metadata lock).
- No version number checking is done.
- MYSQL_OPEN_TEMPORARY_ONLY - Open only temporary
- table not the base table or view.
- MYSQL_OPEN_TAKE_UPGRADABLE_MDL - Obtain upgradable
- metadata lock for tables on which we are going to
- take some kind of write table-level lock.
-
- IMPLEMENTATION
- Uses a cache of open tables to find a table not in use.
-
- If TABLE_LIST::open_strategy is set to OPEN_IF_EXISTS, the table is opened
- only if it exists. If the open strategy is OPEN_STUB, the underlying table
- is never opened. In both cases, metadata locks are always taken according
- to the lock strategy.
-
- RETURN
- TRUE Open failed. "action" parameter may contain type of action
- needed to remedy problem before retrying again.
- FALSE Success. Members of TABLE_LIST structure are filled properly (e.g.
- TABLE_LIST::table is set for real tables and TABLE_LIST::view is
- set for views).
+/**
+ Open a base table.
+
+ @param thd Thread context.
+ @param table_list Open first table in list.
+ @param mem_root Temporary MEM_ROOT to be used for
+ parsing .FRMs for views.
+ @param ot_ctx Context with flags which modify how open works
+ and which is used to recover from a failed
+ open_table() attempt.
+ Some examples of flags:
+ MYSQL_OPEN_IGNORE_FLUSH - Open table even if
+ someone has done a flush. No version number
+ checking is done.
+ MYSQL_OPEN_HAS_MDL_LOCK - instead of acquiring
+ metadata locks rely on that caller already has
+ appropriate ones.
+
+ Uses a cache of open tables to find a TABLE instance not in use.
+
+ If TABLE_LIST::open_strategy is set to OPEN_IF_EXISTS, the table is
+ opened only if it exists. If the open strategy is OPEN_STUB, the
+ underlying table is never opened. In both cases, metadata locks are
+ always taken according to the lock strategy.
+
+ The function used to open temporary tables, but now it opens base tables
+ only.
+
+ @retval TRUE Open failed. "action" parameter may contain type of action
+ needed to remedy problem before retrying again.
+ @retval FALSE Success. Members of TABLE_LIST structure are filled properly
+ (e.g. TABLE_LIST::table is set for real tables and
+ TABLE_LIST::view is set for views).
*/
-
bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
Open_table_context *ot_ctx)
{
reg1 TABLE *table;
- char key[MAX_DBKEY_LENGTH];
+ const char *key;
uint key_length;
char *alias= table_list->alias;
uint flags= ot_ctx->get_flags();
@@ -2638,74 +2671,42 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
uint gts_flags;
DBUG_ENTER("open_table");
+ /*
+ The table must not be opened already. The table can be pre-opened for
+ some statements if it is a temporary table.
+
+ open_temporary_table() must be used to open temporary tables.
+ */
+ DBUG_ASSERT(!table_list->table);
+
/* an open table operation needs a lot of the stack space */
if (check_stack_overrun(thd, STACK_MIN_SIZE_FOR_OPEN, (uchar *)&alias))
DBUG_RETURN(TRUE);
- if (thd->killed)
+ if (!(flags & MYSQL_OPEN_IGNORE_KILLED) && thd->killed)
DBUG_RETURN(TRUE);
- key_length= create_tmp_table_def_key(thd, key, table_list->db,
- table_list->table_name) -
- TMP_TABLE_KEY_EXTRA;
-
/*
- Unless requested otherwise, try to resolve this table in the list
- of temporary tables of this thread. In MySQL temporary tables
- are always thread-local and "shadow" possible base tables with the
- same name. This block implements the behaviour.
- TODO: move this block into a separate function.
+ Check if we're trying to take a write lock in a read only transaction.
+
+ Note that we allow write locks on log tables as otherwise logging
+ to general/slow log would be disabled in read only transactions.
*/
- if (table_list->open_type != OT_BASE_ONLY &&
- ! (flags & MYSQL_OPEN_SKIP_TEMPORARY))
+ if (table_list->mdl_request.type >= MDL_SHARED_WRITE &&
+ thd->tx_read_only &&
+ !(flags & (MYSQL_LOCK_LOG_TABLE | MYSQL_OPEN_HAS_MDL_LOCK)))
{
- for (table= thd->temporary_tables; table ; table=table->next)
- {
- if (table->s->table_cache_key.length == key_length +
- TMP_TABLE_KEY_EXTRA &&
- !memcmp(table->s->table_cache_key.str, key,
- key_length + TMP_TABLE_KEY_EXTRA))
- {
- /*
- We're trying to use the same temporary table twice in a query.
- Right now we don't support this because a temporary table
- is always represented by only one TABLE object in THD, and
- it can not be cloned. Emit an error for an unsupported behaviour.
- */
- if (table->query_id)
- {
- DBUG_PRINT("error",
- ("query_id: %lu server_id: %u pseudo_thread_id: %lu",
- (ulong) table->query_id, (uint) thd->variables.server_id,
- (ulong) thd->variables.pseudo_thread_id));
- my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr());
- DBUG_RETURN(TRUE);
- }
- table->query_id= thd->query_id;
- thd->thread_specific_used= TRUE;
- DBUG_PRINT("info",("Using temporary table"));
- goto reset;
- }
- }
+ my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0));
+ DBUG_RETURN(true);
}
- if (table_list->open_type == OT_TEMPORARY_ONLY ||
- (flags & MYSQL_OPEN_TEMPORARY_ONLY))
- {
- if (table_list->open_strategy == TABLE_LIST::OPEN_NORMAL)
- {
- my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->table_name);
- DBUG_RETURN(TRUE);
- }
- else
- DBUG_RETURN(FALSE);
- }
+ key_length= get_table_def_key(table_list, &key);
/*
- The table is not temporary - if we're in pre-locked or LOCK TABLES
- mode, let's try to find the requested table in the list of pre-opened
- and locked tables. If the table is not there, return an error - we can't
- open not pre-opened tables in pre-locked/LOCK TABLES mode.
+ If we're in pre-locked or LOCK TABLES mode, let's try to find the
+ requested table in the list of pre-opened and locked tables. If the
+ table is not there, return an error - we can't open not pre-opened
+ tables in pre-locked/LOCK TABLES mode.
TODO: move this block into a separate function.
*/
if (thd->locked_tables_mode &&
@@ -2791,7 +2792,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
if (dd_frm_is_view(thd, path))
{
if (!tdc_open_view(thd, table_list, alias, key, key_length,
- mem_root, 0))
+ mem_root, CHECK_METADATA_VERSION))
{
DBUG_ASSERT(table_list->view != 0);
DBUG_RETURN(FALSE); // VIEW
@@ -2800,7 +2801,7 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
}
/*
No table in the locked tables list. In case of explicit LOCK TABLES
- this can happen if a user did not include the able into the list.
+ this can happen if a user did not include the table into the list.
In case of pre-locked mode locked tables list is generated automatically,
so we may only end up here if the table did not exist when
locked tables list was created.
@@ -2820,19 +2821,6 @@ bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
if (! (flags & MYSQL_OPEN_HAS_MDL_LOCK))
{
/*
- Check if we're trying to take a write lock in a read only transaction.
- */
- if (table_list->mdl_request.type >= MDL_SHARED_WRITE &&
- thd->tx_read_only &&
- !(flags & (MYSQL_OPEN_HAS_MDL_LOCK |
- MYSQL_LOCK_LOG_TABLE |
- MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY)))
- {
- my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0));
- DBUG_RETURN(true);
- }
-
- /*
We are not under LOCK TABLES and going to acquire write-lock/
modify the base table. We need to acquire protection against
global read lock until end of this statement in order to have
@@ -3013,12 +3001,12 @@ retry_share:
Release our reference to share, wait until old version of
share goes away and then try to get new version of table share.
*/
- MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
- bool wait_result;
-
release_table_share(share);
mysql_mutex_unlock(&LOCK_open);
+ MDL_deadlock_handler mdl_deadlock_handler(ot_ctx);
+ bool wait_result;
+
thd->push_internal_handler(&mdl_deadlock_handler);
wait_result= tdc_wait_for_old_version(thd, table_list->db,
table_list->table_name,
@@ -3093,10 +3081,8 @@ retry_share:
else if (share->crashed)
(void) ot_ctx->request_backoff_action(Open_table_context::OT_REPAIR,
table_list);
-
goto err_lock;
}
-
if (open_table_entry_fini(thd, share, table))
{
closefrm(table, 0);
@@ -3126,6 +3112,21 @@ retry_share:
table_list->updatable= 1; // It is not derived table nor non-updatable VIEW
table_list->table= table;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (table->part_info)
+ {
+ /* Set all [named] partitions as used. */
+ if (table->part_info->set_partition_bitmaps(table_list))
+ DBUG_RETURN(true);
+ }
+ else if (table_list->partition_names)
+ {
+ /* Don't allow PARTITION () clause on a nonpartitioned table */
+ my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(true);
+ }
+#endif
+
table->init(thd, table_list);
DBUG_RETURN(FALSE);
@@ -3180,9 +3181,9 @@ TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name)
upgrade the lock and ER_TABLE_NOT_LOCKED_FOR_WRITE will be
reported.
- @return Pointer to TABLE instance with MDL_SHARED_NO_WRITE,
- MDL_SHARED_NO_READ_WRITE, or MDL_EXCLUSIVE metadata
- lock, NULL otherwise.
+ @return Pointer to TABLE instance with MDL_SHARED_UPGRADABLE
+ MDL_SHARED_NO_WRITE, MDL_SHARED_NO_READ_WRITE, or
+ MDL_EXCLUSIVE metadata lock, NULL otherwise.
*/
TABLE *find_table_for_mdl_upgrade(THD *thd, const char *db,
@@ -3756,11 +3757,12 @@ check_and_update_routine_version(THD *thd, Sroutine_hash_entry *rt,
*/
bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
- char *cache_key, uint cache_key_length,
+ const char *cache_key, uint cache_key_length,
MEM_ROOT *mem_root, uint flags)
{
TABLE not_used;
TABLE_SHARE *share;
+ bool err= TRUE;
if (!(share= get_table_share(thd, table_list->db, table_list->table_name,
cache_key, cache_key_length, GTS_VIEW)))
@@ -3768,12 +3770,28 @@ bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
DBUG_ASSERT(share->is_view);
- bool err= open_new_frm(thd, share, alias,
+ if (flags & CHECK_METADATA_VERSION)
+ {
+ /*
+ Check TABLE_SHARE-version of view only if we have been instructed to do
+ so. We do not need to check the version if we're executing CREATE VIEW or
+ ALTER VIEW statements.
+
+ In the future, this functionality should be moved out from
+ tdc_open_view(), and tdc_open_view() should became a part of a clean
+ table-definition-cache interface.
+ */
+ if (check_and_update_table_version(thd, table_list, share))
+ goto ret;
+ }
+
+ err= open_new_frm(thd, share, alias,
(HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
HA_GET_INDEX | HA_TRY_READ_ONLY),
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD | flags,
thd->open_options, &not_used, table_list, mem_root);
+ret:
mysql_mutex_lock(&LOCK_open);
release_table_share(share);
mysql_mutex_unlock(&LOCK_open);
@@ -4014,14 +4032,13 @@ recover_from_failed_open(THD *thd)
case OT_DISCOVER:
{
if ((result= lock_table_names(thd, m_failed_table, NULL,
- get_timeout(),
- MYSQL_OPEN_SKIP_TEMPORARY)))
+ get_timeout(), 0)))
break;
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, m_failed_table->db,
m_failed_table->table_name, FALSE);
- thd->warning_info->clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->clear_warning_info(thd->query_id);
thd->clear_error(); // Clear error message
if ((result=
@@ -4036,8 +4053,7 @@ recover_from_failed_open(THD *thd)
case OT_REPAIR:
{
if ((result= lock_table_names(thd, m_failed_table, NULL,
- get_timeout(),
- MYSQL_OPEN_SKIP_TEMPORARY)))
+ get_timeout(), 0)))
break;
tdc_remove_table(thd, TDC_RT_REMOVE_ALL, m_failed_table->db,
@@ -4369,9 +4385,35 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
tables->db, tables->table_name, tables)); //psergey: invalid read of size 1 here
(*counter)++;
- /* Not a placeholder: must be a base table or a view. Let us open it. */
- DBUG_ASSERT(!tables->table);
+ /*
+ Not a placeholder: must be a base/temporary table or a view. Let us open it.
+ */
+ if (tables->table)
+ {
+ /*
+ If this TABLE_LIST object has an associated open TABLE object
+ (TABLE_LIST::table is not NULL), that TABLE object must be a pre-opened
+ temporary table.
+ */
+ DBUG_ASSERT(is_temporary_table(tables));
+ }
+ else if (tables->open_type == OT_TEMPORARY_ONLY)
+ {
+ /*
+ OT_TEMPORARY_ONLY means that we are in CREATE TEMPORARY TABLE statement.
+ Also such table list element can't correspond to prelocking placeholder
+ or to underlying table of merge table.
+ So existing temporary table should have been preopened by this moment
+ and we can simply continue without trying to open temporary or base
+ table.
+ */
+ DBUG_ASSERT(tables->open_strategy);
+ DBUG_ASSERT(!tables->prelocking_placeholder);
+ DBUG_ASSERT(!tables->parent_l);
+ DBUG_RETURN(0);
+ }
+ /* Not a placeholder: must be a base table or a view. Let us open it. */
if (tables->prelocking_placeholder)
{
/*
@@ -4382,7 +4424,35 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
*/
No_such_table_error_handler no_such_table_handler;
thd->push_internal_handler(&no_such_table_handler);
- error= open_table(thd, tables, new_frm_mem, ot_ctx);
+
+ /*
+ We're opening a table from the prelocking list.
+
+ Since this table list element might have been added after pre-opening
+ of temporary tables we have to try to open temporary table for it.
+
+ We can't simply skip this table list element and postpone opening of
+ temporary tabletill the execution of substatement for several reasons:
+ - Temporary table can be a MERGE table with base underlying tables,
+ so its underlying tables has to be properly open and locked at
+ prelocking stage.
+ - Temporary table can be a MERGE table and we might be in PREPARE
+ phase for a prepared statement. In this case it is important to call
+ HA_ATTACH_CHILDREN for all merge children.
+ This is necessary because merge children remember "TABLE_SHARE ref type"
+ and "TABLE_SHARE def version" in the HA_ATTACH_CHILDREN operation.
+ If HA_ATTACH_CHILDREN is not called, these attributes are not set.
+ Then, during the first EXECUTE, those attributes need to be updated.
+ That would cause statement re-preparing (because changing those
+ attributes during EXECUTE is caught by THD::m_reprepare_observers).
+ The problem is that since those attributes are not set in merge
+ children, another round of PREPARE will not help.
+ */
+ error= open_temporary_table(thd, tables);
+
+ if (!error && !tables->table)
+ error= open_table(thd, tables, new_frm_mem, ot_ctx);
+
thd->pop_internal_handler();
safe_to_ignore_table= no_such_table_handler.safely_trapped_errors();
}
@@ -4396,12 +4466,29 @@ open_and_process_table(THD *thd, LEX *lex, TABLE_LIST *tables,
*/
Repair_mrg_table_error_handler repair_mrg_table_handler;
thd->push_internal_handler(&repair_mrg_table_handler);
- error= open_table(thd, tables, new_frm_mem, ot_ctx);
+
+ error= open_temporary_table(thd, tables);
+ if (!error && !tables->table)
+ error= open_table(thd, tables, new_frm_mem, ot_ctx);
+
thd->pop_internal_handler();
safe_to_ignore_table= repair_mrg_table_handler.safely_trapped_errors();
}
else
- error= open_table(thd, tables, new_frm_mem, ot_ctx);
+ {
+ if (tables->parent_l)
+ {
+ /*
+ Even if we are opening table not from the prelocking list we
+ still might need to look for a temporary table if this table
+ list element corresponds to underlying table of a merge table.
+ */
+ error= open_temporary_table(thd, tables);
+ }
+
+ if (!error && !tables->table)
+ error= open_table(thd, tables, new_frm_mem, ot_ctx);
+ }
free_root(new_frm_mem, MYF(MY_KEEP_PREALLOC));
@@ -4628,27 +4715,25 @@ lock_table_names(THD *thd,
for (table= tables_start; table && table != tables_end;
table= table->next_global)
{
- if (table->mdl_request.type >= MDL_SHARED_NO_WRITE &&
- !(table->open_type == OT_TEMPORARY_ONLY ||
- (flags & MYSQL_OPEN_TEMPORARY_ONLY) ||
- (table->open_type != OT_BASE_ONLY &&
- ! (flags & MYSQL_OPEN_SKIP_TEMPORARY) &&
- find_temporary_table(thd, table))))
+ if (table->mdl_request.type < MDL_SHARED_UPGRADABLE ||
+ table->open_type == OT_TEMPORARY_ONLY ||
+ (table->open_type == OT_TEMPORARY_OR_BASE && is_temporary_table(table)))
{
- /*
- Write lock on normal tables is not allowed in a read only transaction.
- */
- if (thd->tx_read_only)
- {
- my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0));
- DBUG_RETURN(true);
- }
+ continue;
+ }
- if (! (flags & MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK) &&
- schema_set.insert(table))
- DBUG_RETURN(TRUE);
- mdl_requests.push_front(&table->mdl_request);
+ /* Write lock on normal tables is not allowed in a read only transaction. */
+ if (thd->tx_read_only)
+ {
+ my_error(ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION, MYF(0));
+ DBUG_RETURN(true);
}
+
+ if (! (flags & MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK) &&
+ schema_set.insert(table))
+ DBUG_RETURN(TRUE);
+
+ mdl_requests.push_front(&table->mdl_request);
}
if (mdl_requests.is_empty())
@@ -4712,7 +4797,7 @@ lock_table_names(THD *thd,
{
if (thd->lex->create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
tables_start->table_name);
}
@@ -4761,34 +4846,33 @@ open_tables_check_upgradable_mdl(THD *thd, TABLE_LIST *tables_start,
for (table= tables_start; table && table != tables_end;
table= table->next_global)
{
- if (table->mdl_request.type >= MDL_SHARED_NO_WRITE &&
- !(table->open_type == OT_TEMPORARY_ONLY ||
- (flags & MYSQL_OPEN_TEMPORARY_ONLY) ||
- (table->open_type != OT_BASE_ONLY &&
- ! (flags & MYSQL_OPEN_SKIP_TEMPORARY) &&
- find_temporary_table(thd, table))))
+ if (table->mdl_request.type < MDL_SHARED_UPGRADABLE ||
+ table->open_type == OT_TEMPORARY_ONLY ||
+ (table->open_type == OT_TEMPORARY_OR_BASE && is_temporary_table(table)))
{
- /*
- We don't need to do anything about the found TABLE instance as it
- will be handled later in open_tables(), we only need to check that
- an upgradable lock is already acquired. When we enter LOCK TABLES
- mode, SNRW locks are acquired before all other locks. So if under
- LOCK TABLES we find that there is TABLE instance with upgradeable
- lock, all other instances of TABLE for the same table will have the
- same ticket.
-
- Note that this works OK even for CREATE TABLE statements which
- request X type of metadata lock. This is because under LOCK TABLES
- such statements don't create the table but only check if it exists
- or, in most complex case, only insert into it.
- Thus SNRW lock should be enough.
-
- Note that find_table_for_mdl_upgrade() will report an error if
- no suitable ticket is found.
- */
- if (!find_table_for_mdl_upgrade(thd, table->db, table->table_name, false))
- return TRUE;
+ continue;
}
+
+ /*
+ We don't need to do anything about the found TABLE instance as it
+ will be handled later in open_tables(), we only need to check that
+ an upgradable lock is already acquired. When we enter LOCK TABLES
+ mode, SNRW locks are acquired before all other locks. So if under
+ LOCK TABLES we find that there is TABLE instance with upgradeable
+ lock, all other instances of TABLE for the same table will have the
+ same ticket.
+
+ Note that this works OK even for CREATE TABLE statements which
+ request X type of metadata lock. This is because under LOCK TABLES
+ such statements don't create the table but only check if it exists
+ or, in most complex case, only insert into it.
+ Thus SNRW lock should be enough.
+
+ Note that find_table_for_mdl_upgrade() will report an error if
+ no suitable ticket is found.
+ */
+ if (!find_table_for_mdl_upgrade(thd, table->db, table->table_name, false))
+ return TRUE;
}
return FALSE;
@@ -4828,11 +4912,12 @@ bool open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags,
Prelocking_strategy *prelocking_strategy)
{
/*
- We use pointers to "next_global" member in the last processed TABLE_LIST
- element and to the "next" member in the last processed Sroutine_hash_entry
- element as iterators over, correspondingly, the table list and stored routines
- list which stay valid and allow to continue iteration when new elements are
- added to the tail of the lists.
+ We use pointers to "next_global" member in the last processed
+ TABLE_LIST element and to the "next" member in the last processed
+ Sroutine_hash_entry element as iterators over, correspondingly,
+ the table list and stored routines list which stay valid and allow
+ to continue iteration when new elements are added to the tail of
+ the lists.
*/
TABLE_LIST **table_to_open;
Sroutine_hash_entry **sroutine_to_open;
@@ -4921,7 +5006,7 @@ restart:
for (table= *start; table && table != thd->lex->first_not_own_table();
table= table->next_global)
{
- if (table->mdl_request.type >= MDL_SHARED_NO_WRITE)
+ if (table->mdl_request.type >= MDL_SHARED_UPGRADABLE)
table->mdl_request.ticket= NULL;
}
}
@@ -4976,6 +5061,10 @@ restart:
if (ot_ctx.recover_from_failed_open(thd))
goto err;
+ /* Re-open temporary tables after close_tables_for_reopen(). */
+ if (open_temporary_tables(thd, *start))
+ goto err;
+
error= FALSE;
goto restart;
}
@@ -5029,6 +5118,10 @@ restart:
if (ot_ctx.recover_from_failed_open(thd))
goto err;
+ /* Re-open temporary tables after close_tables_for_reopen(). */
+ if (open_temporary_tables(thd, *start))
+ goto err;
+
error= FALSE;
goto restart;
}
@@ -5443,6 +5536,10 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type,
bool error;
DBUG_ENTER("open_ltable");
+ /* Ignore temporary tables as they have already ben opened*/
+ if (table_list->table)
+ DBUG_RETURN(table_list->table);
+
/* should not be used in a prelocked_mode context, see NOTE above */
DBUG_ASSERT(thd->locked_tables_mode < LTM_PRELOCKED);
@@ -5452,7 +5549,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type,
table_list->required_type= FRMTYPE_TABLE;
/* This function can't properly handle requests for such metadata locks. */
- DBUG_ASSERT(table_list->mdl_request.type < MDL_SHARED_NO_WRITE);
+ DBUG_ASSERT(table_list->mdl_request.type < MDL_SHARED_UPGRADABLE);
while ((error= open_table(thd, table_list, thd->mem_root, &ot_ctx)) &&
ot_ctx.can_recover_from_failed_open())
@@ -5692,7 +5789,6 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint count,
uint flags)
{
TABLE_LIST *table;
-
DBUG_ENTER("lock_tables");
/*
We can't meet statement requiring prelocking if we already
@@ -5960,6 +6056,9 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables,
@param add_to_temporary_tables_list Specifies if the opened TABLE
instance should be linked into
THD::temporary_tables list.
+ @param open_in_engine Indicates that we need to open table
+ in storage engine in addition to
+ constructing TABLE object for it.
@note This function is used:
- by alter_table() to open a temporary table;
@@ -5972,7 +6071,8 @@ void close_tables_for_reopen(THD *thd, TABLE_LIST **tables,
TABLE *open_table_uncached(THD *thd, handlerton *hton,
const char *path, const char *db,
const char *table_name,
- bool add_to_temporary_tables_list)
+ bool add_to_temporary_tables_list,
+ bool open_in_engine)
{
TABLE *tmp_table;
TABLE_SHARE *share;
@@ -5994,6 +6094,13 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton,
MYF(MY_WME))))
DBUG_RETURN(0); /* purecov: inspected */
+#ifndef DBUG_OFF
+ mysql_mutex_lock(&LOCK_open);
+ DBUG_ASSERT(!my_hash_search(&table_def_cache, (uchar*) cache_key,
+ key_length));
+ mysql_mutex_unlock(&LOCK_open);
+#endif
+
share= (TABLE_SHARE*) (tmp_table+1);
tmp_path= (char*) (share+1);
saved_cache_key= strmov(tmp_path, path)+1;
@@ -6014,11 +6121,17 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton,
share->m_psi= PSI_CALL_get_table_share(true, share);
if (open_table_from_share(thd, share, table_name,
+ open_in_engine ?
(uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE |
- HA_GET_INDEX),
+ HA_GET_INDEX) : 0,
READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD,
ha_open_options,
- tmp_table, FALSE))
+ tmp_table,
+ /*
+ Set "is_create_table" if the table does not
+ exist in SE
+ */
+ open_in_engine ? false : true))
{
/* No need to lock share->mutex as this is not needed for tmp tables */
free_table_share(share);
@@ -6027,6 +6140,7 @@ TABLE *open_table_uncached(THD *thd, handlerton *hton,
}
tmp_table->reginfo.lock_type= TL_WRITE; // Simulate locked
+ tmp_table->grant.privilege= TMP_TABLE_ACLS;
share->tmp_table= (tmp_table->file->has_transactions() ?
TRANSACTIONAL_TMP_TABLE : NON_TRANSACTIONAL_TMP_TABLE);
@@ -6149,6 +6263,143 @@ static void update_field_dependencies(THD *thd, Field *field, TABLE *table)
}
+/**
+ Find a temporary table specified by TABLE_LIST instance in the cache and
+ prepare its TABLE instance for use.
+
+ This function tries to resolve this table in the list of temporary tables
+ of this thread. Temporary tables are thread-local and "shadow" base
+ tables with the same name.
+
+ @note In most cases one should use open_temporary_tables() instead
+ of this call.
+
+ @note One should finalize process of opening temporary table for table
+ list element by calling open_and_process_table(). This function
+ is responsible for table version checking and handling of merge
+ tables.
+
+ @note We used to check global_read_lock before opening temporary tables.
+ However, that limitation was artificial and is removed now.
+
+ @return Error status.
+ @retval FALSE On success. If a temporary table exists for the given
+ key, tl->table is set.
+ @retval TRUE On error. my_error() has been called.
+*/
+
+bool open_temporary_table(THD *thd, TABLE_LIST *tl)
+{
+ TABLE *table;
+ DBUG_ENTER("open_temporary_table");
+ DBUG_PRINT("enter", ("table: '%s'.'%s'", tl->db, tl->table_name));
+
+ /*
+ Code in open_table() assumes that TABLE_LIST::table can
+ be non-zero only for pre-opened temporary tables.
+ */
+ DBUG_ASSERT(tl->table == NULL);
+
+ /*
+ This function should not be called for cases when derived or I_S
+ tables can be met since table list elements for such tables can
+ have invalid db or table name.
+ Instead open_temporary_tables() should be used.
+ */
+ DBUG_ASSERT(!tl->derived && !tl->schema_table);
+
+ if (tl->open_type == OT_BASE_ONLY)
+ {
+ DBUG_PRINT("info", ("skip_temporary is set"));
+ DBUG_RETURN(FALSE);
+ }
+
+ if (!(table= find_temporary_table(thd, tl)))
+ {
+ if (tl->open_type == OT_TEMPORARY_ONLY &&
+ tl->open_strategy == TABLE_LIST::OPEN_NORMAL)
+ {
+ my_error(ER_NO_SUCH_TABLE, MYF(0), tl->db, tl->table_name);
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+ }
+
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ if (tl->partition_names)
+ {
+ /* Partitioned temporary tables is not supported. */
+ DBUG_ASSERT(!table->part_info);
+ my_error(ER_PARTITION_CLAUSE_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(true);
+ }
+#endif
+
+ if (table->query_id)
+ {
+ /*
+ We're trying to use the same temporary table twice in a query.
+ Right now we don't support this because a temporary table is always
+ represented by only one TABLE object in THD, and it can not be
+ cloned. Emit an error for an unsupported behaviour.
+ */
+
+ DBUG_PRINT("error",
+ ("query_id: %lu server_id: %u pseudo_thread_id: %lu",
+ (ulong) table->query_id, (uint) thd->variables.server_id,
+ (ulong) thd->variables.pseudo_thread_id));
+ my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->alias.c_ptr());
+ DBUG_RETURN(TRUE);
+ }
+
+ table->query_id= thd->query_id;
+ thd->thread_specific_used= TRUE;
+
+ tl->updatable= 1; // It is not derived table nor non-updatable VIEW.
+ tl->table= table;
+
+ table->init(thd, tl);
+
+ DBUG_PRINT("info", ("Using temporary table"));
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ Pre-open temporary tables corresponding to table list elements.
+
+ @note One should finalize process of opening temporary tables
+ by calling open_tables(). This function is responsible
+ for table version checking and handling of merge tables.
+
+ @return Error status.
+ @retval FALSE On success. If a temporary tables exists for the
+ given element, tl->table is set.
+ @retval TRUE On error. my_error() has been called.
+*/
+
+bool open_temporary_tables(THD *thd, TABLE_LIST *tl_list)
+{
+ TABLE_LIST *first_not_own= thd->lex->first_not_own_table();
+ DBUG_ENTER("open_temporary_tables");
+
+ for (TABLE_LIST *tl= tl_list; tl && tl != first_not_own; tl= tl->next_global)
+ {
+ if (tl->derived || tl->schema_table)
+ {
+ /*
+ Derived and I_S tables will be handled by a later call to open_tables().
+ */
+ continue;
+ }
+
+ if (open_temporary_table(thd, tl))
+ DBUG_RETURN(TRUE);
+ }
+
+ DBUG_RETURN(FALSE);
+}
+
/*
Find a field by name in a view that uses merge algorithm.
@@ -8818,7 +9069,7 @@ err_no_arena:
@retval false OK.
*/
-static bool
+bool
fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
bool ignore_errors)
{
@@ -8842,7 +9093,7 @@ fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
thus we safely can take table from the first field.
*/
fld= (Item_field*)f++;
- if (!(field= fld->filed_for_view_update()))
+ if (!(field= fld->field_for_view_update()))
{
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name);
goto err;
@@ -8856,7 +9107,7 @@ fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
while ((fld= f++))
{
- if (!(field= fld->filed_for_view_update()))
+ if (!(field= fld->field_for_view_update()))
{
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), fld->name);
goto err;
@@ -8871,7 +9122,7 @@ fill_record(THD * thd, TABLE *table_arg, List<Item> &fields, List<Item> &values,
value->type() != Item::NULL_ITEM &&
table->s->table_category != TABLE_CATEGORY_TEMPORARY)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN,
ER(ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
rfield->field_name, table->s->table_name.str);
@@ -8948,7 +9199,7 @@ fill_record_n_invoke_before_triggers(THD *thd, TABLE *table, List<Item> &fields,
if (fields.elements)
{
fld= (Item_field*)f++;
- item_field= fld->filed_for_view_update();
+ item_field= fld->field_for_view_update();
if (item_field && item_field->field && table && table->vfield)
{
DBUG_ASSERT(table == item_field->field->table);
@@ -9023,7 +9274,7 @@ fill_record(THD *thd, TABLE *table, Field **ptr, List<Item> &values,
value->type() != Item::NULL_ITEM &&
table->s->table_category != TABLE_CATEGORY_TEMPORARY)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN,
ER(ER_WARNING_NON_DEFAULT_VALUE_FOR_VIRTUAL_COLUMN),
field->field_name, table->s->table_name.str);
@@ -9282,6 +9533,15 @@ bool mysql_notify_thread_having_shared_lock(THD *thd, THD *in_use,
instances (if there are no
used instances will also
remove TABLE_SHARE).
+ TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE -
+ remove all TABLE instances
+ except those that belong to
+ this thread, but don't mark
+ TABLE_SHARE as old. There
+ should be no TABLE objects
+ used by other threads and
+ caller should have exclusive
+ metadata lock on the table.
@param db Name of database
@param table_name Name of table
@param has_lock If TRUE, LOCK_open is already acquired
@@ -9319,7 +9579,7 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
{
if (share->ref_count)
{
- I_P_List_iterator<TABLE, TABLE_share> it(share->free_tables);
+ TABLE_SHARE::TABLE_list::Iterator it(share->free_tables);
#ifndef DBUG_OFF
if (remove_type == TDC_RT_REMOVE_ALL)
{
@@ -9328,12 +9588,14 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
else if (remove_type == TDC_RT_REMOVE_NOT_OWN ||
remove_type == TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE)
{
- I_P_List_iterator<TABLE, TABLE_share> it2(share->used_tables);
+ TABLE_SHARE::TABLE_list::Iterator it2(share->used_tables);
while ((table= it2++))
+ {
if (table->in_use != thd)
{
DBUG_ASSERT(0);
}
+ }
}
#endif
/*
@@ -9359,7 +9621,10 @@ void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
free_cache_entry(table);
}
else
+ {
+ DBUG_ASSERT(remove_type != TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE);
(void) my_hash_delete(&table_def_cache, (uchar*) share);
+ }
}
if (! has_lock)
@@ -9745,7 +10010,7 @@ open_log_table(THD *thd, TABLE_LIST *one_table, Open_tables_backup *backup)
DBUG_ASSERT(table->s->table_category == TABLE_CATEGORY_LOG);
/* Make sure all columns get assigned to a default value */
table->use_all_columns();
- table->no_replicate= 1;
+ DBUG_ASSERT(table->no_replicate);
}
else
thd->restore_backup_open_tables_state(backup);
diff --git a/sql/sql_base.h b/sql/sql_base.h
index 95d9bf21fe8..a4f35b59ba9 100644
--- a/sql/sql_base.h
+++ b/sql/sql_base.h
@@ -62,6 +62,7 @@ enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND,
enum enum_tdc_remove_table_type {TDC_RT_REMOVE_ALL, TDC_RT_REMOVE_NOT_OWN,
TDC_RT_REMOVE_UNUSED,
TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE};
+#define TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE
/* bits for last argument to remove_table_from_cache() */
#define RTFC_NO_FLAG 0x0000
@@ -79,13 +80,13 @@ bool table_def_init(void);
void table_def_free(void);
void table_def_start_shutdown(void);
void assign_new_table_id(TABLE_SHARE *share);
-uint cached_open_tables(void);
uint cached_table_definitions(void);
+uint cached_open_tables(void);
/**
Create a table cache key for non-temporary table.
- @param key Buffer for key (must be at least NAME_LEN*2+2 bytes).
+ @param key Buffer for key (must be at least MAX_DBKEY_LENGTH bytes).
@param db Database name.
@param table_name Table name.
@@ -108,8 +109,9 @@ create_table_def_key(char *key, const char *db, const char *table_name)
uint create_tmp_table_def_key(THD *thd, char *key, const char *db,
const char *table_name);
+uint get_table_def_key(const TABLE_LIST *table_list, const char **key);
TABLE_SHARE *get_table_share(THD *thd, const char *db, const char *table_name,
- char *key, uint key_length, uint flags,
+ const char *key, uint key_length, uint flags,
my_hash_value_type hash_value);
void release_table_share(TABLE_SHARE *share);
TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name);
@@ -117,7 +119,7 @@ TABLE_SHARE *get_cached_table_share(const char *db, const char *table_name);
// convenience helper: call get_table_share() without precomputed hash_value
static inline TABLE_SHARE *get_table_share(THD *thd, const char *db,
const char *table_name,
- char *key, uint key_length,
+ const char *key, uint key_length,
uint flags)
{
return get_table_share(thd, db, table_name, key, key_length, flags,
@@ -140,7 +142,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
/* mysql_lock_tables() and open_table() flags bits */
#define MYSQL_OPEN_IGNORE_GLOBAL_READ_LOCK 0x0001
#define MYSQL_OPEN_IGNORE_FLUSH 0x0002
-#define MYSQL_OPEN_TEMPORARY_ONLY 0x0004
+/* MYSQL_OPEN_TEMPORARY_ONLY (0x0004) is not used anymore. */
#define MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY 0x0008
#define MYSQL_LOCK_LOG_TABLE 0x0010
/**
@@ -153,8 +155,7 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
a new instance of the table.
*/
#define MYSQL_OPEN_GET_NEW_TABLE 0x0040
-/** Don't look up the table in the list of temporary tables. */
-#define MYSQL_OPEN_SKIP_TEMPORARY 0x0080
+/* 0x0080 used to be MYSQL_OPEN_SKIP_TEMPORARY */
/** Fail instead of waiting when conficting metadata lock is discovered. */
#define MYSQL_OPEN_FAIL_ON_MDL_CONFLICT 0x0100
/** Open tables using MDL_SHARED lock instead of one specified in parser. */
@@ -176,6 +177,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
#define MYSQL_OPEN_SKIP_SCOPED_MDL_LOCK 0x1000
#define MYSQL_LOCK_NOT_TEMPORARY 0x2000
#define MYSQL_OPEN_FOR_REPAIR 0x4000
+/**
+ Only check THD::killed if waits happen (e.g. wait on MDL, wait on
+ table flush, wait on thr_lock.c locks) while opening and locking table.
+*/
+#define MYSQL_OPEN_IGNORE_KILLED 0x8000
/** Please refer to the internals manual. */
#define MYSQL_OPEN_REOPEN (MYSQL_OPEN_IGNORE_FLUSH |\
@@ -183,11 +189,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update,
MYSQL_LOCK_IGNORE_GLOBAL_READ_ONLY |\
MYSQL_LOCK_IGNORE_TIMEOUT |\
MYSQL_OPEN_GET_NEW_TABLE |\
- MYSQL_OPEN_SKIP_TEMPORARY |\
MYSQL_OPEN_HAS_MDL_LOCK)
bool open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root,
Open_table_context *ot_ctx);
+
bool open_new_frm(THD *thd, TABLE_SHARE *share, const char *alias,
uint db_stat, uint prgflag,
uint ha_open_flags, TABLE *outparam, TABLE_LIST *table_desc,
@@ -197,7 +203,8 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table,
List<String> *index_list);
TABLE *open_table_uncached(THD *thd, handlerton *hton, const char *path,
const char *db, const char *table_name,
- bool add_to_temporary_tables_list);
+ bool add_to_temporary_tables_list,
+ bool open_in_engine);
TABLE *find_locked_table(TABLE *list, const char *db, const char *table_name);
TABLE *find_write_locked_table(TABLE *list, const char *db,
const char *table_name);
@@ -239,6 +246,8 @@ bool setup_fields(THD *thd, Item** ref_pointer_array,
List<Item> &item, enum_mark_columns mark_used_columns,
List<Item> *sum_func_list, bool allow_sum_func);
void unfix_fields(List<Item> &items);
+bool fill_record(THD * thd, TABLE *table_arg, List<Item> &fields,
+ List<Item> &values, bool ignore_errors);
bool fill_record(THD *thd, TABLE *table, Field **field, List<Item> &values,
bool ignore_errors, bool use_value);
@@ -310,7 +319,7 @@ bool lock_tables(THD *thd, TABLE_LIST *tables, uint counter, uint flags);
int decide_logging_format(THD *thd, TABLE_LIST *tables);
void free_io_cache(TABLE *entry);
void intern_close_table(TABLE *entry);
-bool close_thread_table(THD *thd, TABLE **table_ptr);
+void close_thread_table(THD *thd, TABLE **table_ptr);
bool close_temporary_tables(THD *thd);
TABLE_LIST *unique_table(THD *thd, TABLE_LIST *table, TABLE_LIST *table_list,
bool check_alias);
@@ -320,6 +329,8 @@ void close_temporary_table(THD *thd, TABLE *table, bool free_share,
void close_temporary(TABLE *table, bool free_share, bool delete_table);
bool rename_temporary_table(THD* thd, TABLE *table, const char *new_db,
const char *table_name);
+bool open_temporary_tables(THD *thd, TABLE_LIST *tl_list);
+bool open_temporary_table(THD *thd, TABLE_LIST *tl);
bool is_equal(const LEX_STRING *a, const LEX_STRING *b);
class Open_tables_backup;
@@ -340,13 +351,14 @@ bool close_cached_tables(THD *thd, TABLE_LIST *tables,
bool wait_for_refresh, ulong timeout);
bool close_cached_connection_tables(THD *thd, LEX_STRING *connect_string);
void close_all_tables_for_name(THD *thd, TABLE_SHARE *share,
- ha_extra_function extra);
+ ha_extra_function extra,
+ TABLE *skip_table);
OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *db, const char *wild);
void tdc_remove_table(THD *thd, enum_tdc_remove_table_type remove_type,
const char *db, const char *table_name,
bool has_lock);
bool tdc_open_view(THD *thd, TABLE_LIST *table_list, const char *alias,
- char *cache_key, uint cache_key_length,
+ const char *cache_key, uint cache_key_length,
MEM_ROOT *mem_root, uint flags);
static inline bool tdc_open_view(THD *thd, TABLE_LIST *table_list,
@@ -516,11 +528,6 @@ class Lock_tables_prelocking_strategy : public DML_prelocking_strategy
class Alter_table_prelocking_strategy : public Prelocking_strategy
{
public:
-
- Alter_table_prelocking_strategy(Alter_info *alter_info)
- : m_alter_info(alter_info)
- {}
-
virtual bool handle_routine(THD *thd, Query_tables_list *prelocking_ctx,
Sroutine_hash_entry *rt, sp_head *sp,
bool *need_prelocking);
@@ -528,9 +535,6 @@ public:
TABLE_LIST *table_list, bool *need_prelocking);
virtual bool handle_view(THD *thd, Query_tables_list *prelocking_ctx,
TABLE_LIST *table_list, bool *need_prelocking);
-
-private:
- Alter_info *m_alter_info;
};
@@ -652,6 +656,30 @@ private:
/**
+ Check if a TABLE_LIST instance represents a pre-opened temporary table.
+*/
+
+inline bool is_temporary_table(TABLE_LIST *tl)
+{
+ if (tl->view || tl->schema_table)
+ return FALSE;
+
+ if (!tl->table)
+ return FALSE;
+
+ /*
+ NOTE: 'table->s' might be NULL for specially constructed TABLE
+ instances. See SHOW TRIGGERS for example.
+ */
+
+ if (!tl->table->s)
+ return FALSE;
+
+ return tl->table->s->tmp_table != NO_TMP_TABLE;
+}
+
+
+/**
This internal handler is used to trap ER_NO_SUCH_TABLE.
*/
@@ -665,9 +693,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
/**
Returns TRUE if one or more ER_NO_SUCH_TABLE errors have been
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 436f7043c49..007a1b3b585 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -336,6 +336,7 @@ TODO list:
#include "sql_acl.h" // SELECT_ACL
#include "sql_base.h" // TMP_TABLE_KEY_EXTRA
#include "debug_sync.h" // DEBUG_SYNC
+#include "sql_table.h"
#ifdef HAVE_QUERY_CACHE
#include <m_ctype.h>
#include <my_dir.h>
@@ -345,6 +346,7 @@ TODO list:
#include "probes_mysql.h"
#include "log_slow.h"
#include "transaction.h"
+#include "strfunc.h"
const uchar *query_state_map;
@@ -1175,7 +1177,7 @@ void Query_cache::end_of_result(THD *thd)
DBUG_VOID_RETURN;
/* Ensure that only complete results are cached. */
- DBUG_ASSERT(thd->stmt_da->is_eof());
+ DBUG_ASSERT(thd->get_stmt_da()->is_eof());
if (thd->killed)
{
@@ -1223,7 +1225,7 @@ void Query_cache::end_of_result(THD *thd)
}
last_result_block= header->result()->prev;
allign_size= ALIGN_SIZE(last_result_block->used);
- len= max(query_cache.min_allocation_unit, allign_size);
+ len= MY_MAX(query_cache.min_allocation_unit, allign_size);
if (last_result_block->length >= query_cache.min_allocation_unit + len)
query_cache.split_block(last_result_block,len);
@@ -1638,6 +1640,41 @@ send_data_in_chunks(NET *net, const uchar *packet, ulong len)
#endif
+/**
+ Build a normalized table name suitable for query cache engine callback
+
+ This consist of normalized directory '/' normalized_file_name
+ followed by suffix.
+ Suffix is needed for partitioned tables.
+*/
+
+size_t build_normalized_name(char *buff, size_t bufflen,
+ const char *db, size_t db_len,
+ const char *table_name, size_t table_len,
+ size_t suffix_len)
+{
+ uint errors;
+ size_t length;
+ char *pos= buff, *end= buff+bufflen;
+ DBUG_ENTER("build_normalized_name");
+
+ (*pos++)= FN_LIBCHAR;
+ length= strconvert(system_charset_info, db, db_len,
+ &my_charset_filename, pos, bufflen - 3,
+ &errors);
+ pos+= length;
+ (*pos++)= FN_LIBCHAR;
+ length= strconvert(system_charset_info, table_name, table_len,
+ &my_charset_filename, pos, (uint) (end - pos),
+ &errors);
+ pos+= length;
+ if (pos + suffix_len < end)
+ pos= strmake(pos, table_name + table_len, suffix_len);
+
+ DBUG_RETURN((size_t) (pos - buff));
+}
+
+
/*
Check if the query is in the cache. If it was cached, send it
to the user.
@@ -2013,35 +2050,50 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
}
#endif /*!NO_EMBEDDED_ACCESS_CHECKS*/
engine_data= table->engine_data();
- if (table->callback() &&
- !(*table->callback())(thd, table->db(),
- table->key_length(),
- &engine_data))
+ if (table->callback())
{
- DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
- table_list.db, table_list.alias));
- BLOCK_UNLOCK_RD(query_block);
- if (engine_data != table->engine_data())
+ char qcache_se_key_name[FN_REFLEN + 10];
+ uint qcache_se_key_len, db_length= strlen(table->db());
+ engine_data= table->engine_data();
+
+ qcache_se_key_len= build_normalized_name(qcache_se_key_name,
+ sizeof(qcache_se_key_name),
+ table->db(),
+ db_length,
+ table->table(),
+ table->key_length() -
+ db_length - 2 -
+ table->suffix_length(),
+ table->suffix_length());
+
+ if (!(*table->callback())(thd, qcache_se_key_name,
+ qcache_se_key_len, &engine_data))
{
- DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %s.%s %lu-%lu",
- table_list.db, table_list.alias,
- (ulong) engine_data, (ulong) table->engine_data()));
- invalidate_table_internal(thd,
- (uchar *) table->db(),
- table->key_length());
- }
- else
- {
- /*
- As this can change from call to call, don't reset set
- thd->lex->safe_to_cache_query
- */
- thd->query_cache_is_applicable= 0; // Query can't be cached
+ DBUG_PRINT("qcache", ("Handler does not allow caching for %.*s",
+ qcache_se_key_len, qcache_se_key_name));
+ BLOCK_UNLOCK_RD(query_block);
+ if (engine_data != table->engine_data())
+ {
+ DBUG_PRINT("qcache",
+ ("Handler require invalidation queries of %.*s %lu-%lu",
+ qcache_se_key_len, qcache_se_key_name,
+ (ulong) engine_data, (ulong) table->engine_data()));
+ invalidate_table_internal(thd,
+ (uchar *) table->db(),
+ table->key_length());
+ }
+ else
+ {
+ /*
+ As this can change from call to call, don't reset set
+ thd->lex->safe_to_cache_query
+ */
+ thd->query_cache_is_applicable= 0; // Query can't be cached
+ }
+ /* End the statement transaction potentially started by engine. */
+ trans_rollback_stmt(thd);
+ goto err_unlock; // Parse query
}
- /* End the statement transaction potentially started by engine. */
- trans_rollback_stmt(thd);
- goto err_unlock; // Parse query
}
else
DBUG_PRINT("qcache", ("handler allow caching %s,%s",
@@ -2091,8 +2143,8 @@ def_week_frmt: %lu, in_trans: %d, autocommit: %d",
response, we can't handle it anyway.
*/
(void) trans_commit_stmt(thd);
- if (!thd->stmt_da->is_set())
- thd->stmt_da->disable_status();
+ if (!thd->get_stmt_da()->is_set())
+ thd->get_stmt_da()->disable_status();
BLOCK_UNLOCK_RD(query_block);
MYSQL_QUERY_CACHE_HIT(thd->query(), (ulong) thd->limit_found_rows);
@@ -2898,7 +2950,7 @@ Query_cache::write_block_data(ulong data_len, uchar* data,
DBUG_ENTER("Query_cache::write_block_data");
DBUG_PRINT("qcache", ("data: %ld, header: %ld, all header: %ld",
data_len, header_len, all_headers_len));
- Query_cache_block *block= allocate_block(max(align_len,
+ Query_cache_block *block= allocate_block(MY_MAX(align_len,
min_allocation_unit),1, 0);
if (block != 0)
{
@@ -2953,7 +3005,7 @@ Query_cache::append_result_data(Query_cache_block **current_block,
ulong append_min = get_min_append_result_data_size();
if (last_block_free_space < data_len &&
append_next_free_block(last_block,
- max(tail, append_min)))
+ MY_MAX(tail, append_min)))
last_block_free_space = last_block->length - last_block->used;
// If no space in last block (even after join) allocate new block
if (last_block_free_space < data_len)
@@ -2981,7 +3033,7 @@ Query_cache::append_result_data(Query_cache_block **current_block,
// Now finally write data to the last block
if (success && last_block_free_space > 0)
{
- ulong to_copy = min(data_len,last_block_free_space);
+ ulong to_copy = MY_MIN(data_len,last_block_free_space);
DBUG_PRINT("qcache", ("use free space %lub at block 0x%lx to copy %lub",
last_block_free_space, (ulong)last_block, to_copy));
memcpy((uchar*) last_block + last_block->used, data, to_copy);
@@ -3069,8 +3121,8 @@ inline ulong Query_cache::get_min_first_result_data_size()
if (queries_in_cache < QUERY_CACHE_MIN_ESTIMATED_QUERIES_NUMBER)
return min_result_data_size;
ulong avg_result = (query_cache_size - free_memory) / queries_in_cache;
- avg_result = min(avg_result, query_cache_limit);
- return max(min_result_data_size, avg_result);
+ avg_result = MY_MIN(avg_result, query_cache_limit);
+ return MY_MAX(min_result_data_size, avg_result);
}
inline ulong Query_cache::get_min_append_result_data_size()
@@ -3102,7 +3154,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block,
ulong len= data_len + all_headers_len;
ulong align_len= ALIGN_SIZE(len);
- if (!(new_block= allocate_block(max(min_size, align_len),
+ if (!(new_block= allocate_block(MY_MAX(min_size, align_len),
min_result_data_size == 0,
all_headers_len + min_result_data_size)))
{
@@ -3111,7 +3163,7 @@ my_bool Query_cache::allocate_data_chain(Query_cache_block **result_block,
}
new_block->n_tables = 0;
- new_block->used = min(len, new_block->length);
+ new_block->used = MY_MIN(len, new_block->length);
new_block->type = Query_cache_block::RES_INCOMPLETE;
new_block->next = new_block->prev = new_block;
Query_cache_result *header = new_block->result();
@@ -3280,7 +3332,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
There are not callback function for for VIEWs
*/
if (!insert_table(key_length, key, (*block_table),
- tables_used->view_db.length + 1,
+ tables_used->view_db.length + 1, 0,
HA_CACHE_TBL_NONTRANSACT, 0, 0, TRUE))
DBUG_RETURN(0);
/*
@@ -3301,7 +3353,7 @@ Query_cache::register_tables_from_list(THD *thd, TABLE_LIST *tables_used,
if (!insert_table(tables_used->table->s->table_cache_key.length,
tables_used->table->s->table_cache_key.str,
(*block_table),
- tables_used->db_length,
+ tables_used->db_length, 0,
tables_used->table->file->table_cache_type(),
tables_used->callback_func,
tables_used->engine_data,
@@ -3366,7 +3418,8 @@ my_bool Query_cache::register_all_tables(THD *thd,
my_bool
Query_cache::insert_table(uint key_len, char *key,
Query_cache_block_table *node,
- uint32 db_length, uint8 cache_type,
+ uint32 db_length, uint8 suffix_length_arg,
+ uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data,
my_bool hash)
@@ -3441,6 +3494,7 @@ Query_cache::insert_table(uint key_len, char *key,
char *db= header->db();
header->table(db + db_length + 1);
header->key_length(key_len);
+ header->suffix_length(suffix_length_arg);
header->type(cache_type);
header->callback(callback);
header->engine_data(engine_data);
@@ -3517,7 +3571,7 @@ Query_cache::allocate_block(ulong len, my_bool not_less, ulong min)
DBUG_PRINT("qcache", ("len %lu, not less %d, min %lu",
len, not_less,min));
- if (len >= min(query_cache_size, query_cache_limit))
+ if (len >= MY_MIN(query_cache_size, query_cache_limit))
{
DBUG_PRINT("qcache", ("Query cache hase only %lu memory and limit %lu",
query_cache_size, query_cache_limit));
@@ -4078,13 +4132,13 @@ my_bool Query_cache::ask_handler_allowance(THD *thd,
continue;
handler= table->file;
if (!handler->register_query_cache_table(thd,
- table->s->table_cache_key.str,
- table->s->table_cache_key.length,
+ table->s->normalized_path.str,
+ table->s->normalized_path.length,
&tables_used->callback_func,
&tables_used->engine_data))
{
- DBUG_PRINT("qcache", ("Handler does not allow caching for %s.%s",
- tables_used->db, tables_used->alias));
+ DBUG_PRINT("qcache", ("Handler does not allow caching for %s",
+ table->s->normalized_path.str));
/*
As this can change from call to call, don't reset set
thd->lex->safe_to_cache_query
@@ -4503,7 +4557,7 @@ uint Query_cache::filename_2_table_key (char *key, const char *path,
DBUG_PRINT("qcache", ("table '%-.*s.%s'", *db_length, dbname, filename));
DBUG_RETURN((uint) (strmake(strmake(key, dbname,
- min(*db_length, NAME_LEN)) + 1,
+ MY_MIN(*db_length, NAME_LEN)) + 1,
filename, NAME_LEN) - key) + 1);
}
diff --git a/sql/sql_cache.h b/sql/sql_cache.h
index f35ac889b23..15848dabd33 100644
--- a/sql/sql_cache.h
+++ b/sql/sql_cache.h
@@ -190,6 +190,7 @@ struct Query_cache_table
Query_cache_table() {} /* Remove gcc warning */
char *tbl;
uint32 key_len;
+ uint8 suffix_len; /* For partitioned tables */
uint8 table_type;
/* unique for every engine reference */
qc_engine_callback callback_func;
@@ -210,6 +211,8 @@ struct Query_cache_table
inline void table(char *table_arg) { tbl= table_arg; }
inline uint32 key_length() { return key_len; }
inline void key_length(uint32 len) { key_len= len; }
+ inline uint8 suffix_length() { return suffix_len; }
+ inline void suffix_length(uint8 len) { suffix_len= len; }
inline uint8 type() { return table_type; }
inline void type(uint8 t) { table_type= t; }
inline qc_engine_callback callback() { return callback_func; }
@@ -490,7 +493,8 @@ protected:
unsigned pkt_nr);
my_bool insert_table(uint key_len, char *key,
Query_cache_block_table *node,
- uint32 db_length, uint8 cache_type,
+ uint32 db_length, uint8 suffix_length_arg,
+ uint8 cache_type,
qc_engine_callback callback,
ulonglong engine_data,
my_bool hash);
diff --git a/sql/sql_class.cc b/sql/sql_class.cc
index 4fc1769ba1f..c9f07c4d036 100644
--- a/sql/sql_class.cc
+++ b/sql/sql_class.cc
@@ -63,6 +63,7 @@
#include "debug_sync.h"
#include "sql_parse.h" // is_update_query
#include "sql_callback.h"
+#include "lock.h"
#include "sql_connect.h"
/*
@@ -72,6 +73,8 @@
char internal_table_name[2]= "*";
char empty_c_string[1]= {0}; /* used for not defined db */
+LEX_STRING EMPTY_STR= { (char *) "", 0 };
+
const char * const THD::DEFAULT_WHERE= "field list";
/****************************************************************************
@@ -128,6 +131,7 @@ Key::Key(const Key &rhs, MEM_ROOT *mem_root)
Foreign_key::Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root)
:Key(rhs,mem_root),
+ ref_db(rhs.ref_db),
ref_table(rhs.ref_table),
ref_columns(rhs.ref_columns,mem_root),
delete_opt(rhs.delete_opt),
@@ -583,7 +587,7 @@ void THD::enter_stage(const PSI_stage_info *new_stage,
proc_info= msg;
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_state)(msg);
+ PSI_THREAD_CALL(set_thread_state)(msg);
MYSQL_SET_STAGE(m_current_stage_key, calling_file, calling_line);
#endif
}
@@ -682,7 +686,7 @@ int thd_tx_is_read_only(const THD *thd)
extern "C"
void thd_inc_row_count(THD *thd)
{
- thd->warning_info->inc_current_row_for_warning();
+ thd->get_stmt_da()->inc_current_row_for_warning();
}
@@ -761,7 +765,7 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length,
if (max_query_len < 1)
len= thd->query_length();
else
- len= min(thd->query_length(), max_query_len);
+ len= MY_MIN(thd->query_length(), max_query_len);
str.append('\n');
str.append(thd->query(), len);
}
@@ -776,7 +780,7 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length,
was reallocated to a larger buffer to be able to fit.
*/
DBUG_ASSERT(buffer != NULL);
- length= min(str.length(), length-1);
+ length= MY_MIN(str.length(), length-1);
memcpy(buffer, str.c_ptr_quick(), length);
/* Make sure that the new string is null terminated */
buffer[length]= '\0';
@@ -801,9 +805,9 @@ char *thd_security_context(THD *thd, char *buffer, unsigned int length,
bool Drop_table_error_handler::handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
*cond_hdl= NULL;
return ((sql_errno == EE_DELETE && my_errno == ENOENT) ||
@@ -826,8 +830,6 @@ THD::THD()
stmt_depends_on_first_successful_insert_id_in_prev_stmt(FALSE),
m_examined_row_count(0),
accessed_rows_and_keys(0),
- warning_info(&main_warning_info),
- stmt_da(&main_da),
m_statement_psi(NULL),
m_idle_psi(NULL),
m_server_idle(false),
@@ -847,7 +849,8 @@ THD::THD()
#if defined(ENABLED_DEBUG_SYNC)
debug_sync_control(0),
#endif /* defined(ENABLED_DEBUG_SYNC) */
- main_warning_info(0, false, false)
+ main_da(0, false, false),
+ m_stmt_da(&main_da)
{
ulong tmp;
@@ -859,8 +862,8 @@ THD::THD()
THD *old_THR_THD= current_thd;
set_current_thd(this);
status_var.memory_used= 0;
+ main_da.init();
- main_warning_info.init();
/*
Pass nominal parameters to init_alloc_root only to ensure that
the destructor works OK in case of an error. The main_mem_root
@@ -917,6 +920,7 @@ THD::THD()
mysys_var=0;
binlog_evt_union.do_union= FALSE;
enable_slow_log= 0;
+ durability_property= HA_REGULAR_DURABILITY;
#ifndef DBUG_OFF
dbug_sentry=THD_SENTRY_MAGIC;
@@ -1026,9 +1030,9 @@ void THD::push_internal_handler(Internal_error_handler *handler)
bool THD::handle_condition(uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
if (!m_internal_handler)
{
@@ -1065,7 +1069,7 @@ void THD::raise_error(uint sql_errno)
const char* msg= ER(sql_errno);
(void) raise_condition(sql_errno,
NULL,
- MYSQL_ERROR::WARN_LEVEL_ERROR,
+ Sql_condition::WARN_LEVEL_ERROR,
msg);
}
@@ -1081,7 +1085,7 @@ void THD::raise_error_printf(uint sql_errno, ...)
va_end(args);
(void) raise_condition(sql_errno,
NULL,
- MYSQL_ERROR::WARN_LEVEL_ERROR,
+ Sql_condition::WARN_LEVEL_ERROR,
ebuff);
DBUG_VOID_RETURN;
}
@@ -1091,7 +1095,7 @@ void THD::raise_warning(uint sql_errno)
const char* msg= ER(sql_errno);
(void) raise_condition(sql_errno,
NULL,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
msg);
}
@@ -1107,7 +1111,7 @@ void THD::raise_warning_printf(uint sql_errno, ...)
va_end(args);
(void) raise_condition(sql_errno,
NULL,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ebuff);
DBUG_VOID_RETURN;
}
@@ -1121,7 +1125,7 @@ void THD::raise_note(uint sql_errno)
const char* msg= ER(sql_errno);
(void) raise_condition(sql_errno,
NULL,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
msg);
DBUG_VOID_RETURN;
}
@@ -1140,24 +1144,25 @@ void THD::raise_note_printf(uint sql_errno, ...)
va_end(args);
(void) raise_condition(sql_errno,
NULL,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
ebuff);
DBUG_VOID_RETURN;
}
-MYSQL_ERROR* THD::raise_condition(uint sql_errno,
+Sql_condition* THD::raise_condition(uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg)
{
- MYSQL_ERROR *cond= NULL;
+ Diagnostics_area *da= get_stmt_da();
+ Sql_condition *cond= NULL;
DBUG_ENTER("THD::raise_condition");
if (!(variables.option_bits & OPTION_SQL_NOTES) &&
- (level == MYSQL_ERROR::WARN_LEVEL_NOTE))
+ (level == Sql_condition::WARN_LEVEL_NOTE))
DBUG_RETURN(NULL);
- warning_info->opt_clear_warning_info(query_id);
+ da->opt_clear_warning_info(query_id);
/*
TODO: replace by DBUG_ASSERT(sql_errno != 0) once all bugs similar to
@@ -1171,24 +1176,24 @@ MYSQL_ERROR* THD::raise_condition(uint sql_errno,
if (sqlstate == NULL)
sqlstate= mysql_errno_to_sqlstate(sql_errno);
- if ((level == MYSQL_ERROR::WARN_LEVEL_WARN) &&
+ if ((level == Sql_condition::WARN_LEVEL_WARN) &&
really_abort_on_warning())
{
/*
FIXME:
push_warning and strict SQL_MODE case.
*/
- level= MYSQL_ERROR::WARN_LEVEL_ERROR;
+ level= Sql_condition::WARN_LEVEL_ERROR;
killed= KILL_BAD_DATA;
}
switch (level)
{
- case MYSQL_ERROR::WARN_LEVEL_NOTE:
- case MYSQL_ERROR::WARN_LEVEL_WARN:
+ case Sql_condition::WARN_LEVEL_NOTE:
+ case Sql_condition::WARN_LEVEL_WARN:
got_warning= 1;
break;
- case MYSQL_ERROR::WARN_LEVEL_ERROR:
+ case Sql_condition::WARN_LEVEL_ERROR:
break;
default:
DBUG_ASSERT(FALSE);
@@ -1197,29 +1202,31 @@ MYSQL_ERROR* THD::raise_condition(uint sql_errno,
if (handle_condition(sql_errno, sqlstate, level, msg, &cond))
DBUG_RETURN(cond);
- if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
+ /*
+ Avoid pushing a condition for fatal out of memory errors as this will
+ require memory allocation and therefore might fail. Non fatal out of
+ memory errors can occur if raised by SIGNAL/RESIGNAL statement.
+ */
+ if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY ||
+ sql_errno == ER_OUTOFMEMORY)))
+ {
+ cond= da->push_warning(this, sql_errno, sqlstate, level, msg);
+ }
+
+
+ if (level == Sql_condition::WARN_LEVEL_ERROR)
{
is_slave_error= 1; // needed to catch query errors during replication
- if (! stmt_da->is_error())
+ if (!da->is_error())
{
set_row_count_func(-1);
- stmt_da->set_error_status(this, sql_errno, msg, sqlstate);
+ da->set_error_status(sql_errno, msg, sqlstate, cond);
}
}
query_cache_abort(&query_cache_tls);
- /*
- Avoid pushing a condition for fatal out of memory errors as this will
- require memory allocation and therefore might fail. Non fatal out of
- memory errors can occur if raised by SIGNAL/RESIGNAL statement.
- */
- if (!(is_fatal_error && (sql_errno == EE_OUTOFMEMORY ||
- sql_errno == ER_OUTOFMEMORY)))
- {
- cond= warning_info->push_warning(this, sql_errno, sqlstate, level, msg);
- }
DBUG_RETURN(cond);
}
@@ -1331,6 +1338,7 @@ void THD::init(void)
tx_read_only= variables.tx_read_only;
update_charset();
reset_current_stmt_binlog_format_row();
+ reset_binlog_local_stmt_filter();
set_status_var_init();
bzero((char *) &org_status_var, sizeof(org_status_var));
@@ -1546,7 +1554,6 @@ THD::~THD()
mysql_audit_release(this);
plugin_thdvar_cleanup(this);
- DBUG_PRINT("info", ("freeing security context"));
main_security_ctx.destroy();
my_free(db);
db= NULL;
@@ -1570,12 +1577,14 @@ THD::~THD()
#endif
free_root(&main_mem_root, MYF(0));
- main_warning_info.free_memory();
+ main_da.free_memory();
if (status_var.memory_used != 0)
{
DBUG_PRINT("error", ("memory_used: %lld", status_var.memory_used));
SAFEMALLOC_REPORT_MEMORY(my_thread_dbug_id());
+#ifdef ENABLE_BEFORE_END_OF_MERGE_QQ
DBUG_ASSERT(status_var.memory_used == 0); // Ensure everything is freed
+#endif
}
set_current_thd(orig_thd);
@@ -1805,6 +1814,46 @@ void THD::disconnect()
}
+bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use,
+ bool needs_thr_lock_abort)
+{
+ THD *in_use= ctx_in_use->get_thd();
+ bool signalled= FALSE;
+
+ if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) &&
+ !in_use->killed)
+ {
+ in_use->killed= KILL_CONNECTION;
+ mysql_mutex_lock(&in_use->mysys_var->mutex);
+ if (in_use->mysys_var->current_cond)
+ mysql_cond_broadcast(in_use->mysys_var->current_cond);
+ mysql_mutex_unlock(&in_use->mysys_var->mutex);
+ signalled= TRUE;
+ }
+
+ if (needs_thr_lock_abort)
+ {
+ mysql_mutex_lock(&in_use->LOCK_thd_data);
+ for (TABLE *thd_table= in_use->open_tables;
+ thd_table ;
+ thd_table= thd_table->next)
+ {
+ /*
+ Check for TABLE::needs_reopen() is needed since in some places we call
+ handler::close() for table instance (and set TABLE::db_stat to 0)
+ and do not remove such instances from the THD::open_tables
+ for some time, during which other thread can see those instances
+ (e.g. see partitioning code).
+ */
+ if (!thd_table->needs_reopen())
+ signalled|= mysql_lock_abort_for_thread(this, thd_table);
+ }
+ mysql_mutex_unlock(&in_use->LOCK_thd_data);
+ }
+ return signalled;
+}
+
+
/*
Get error number for killed state
Note that the error message can't have any parameters.
@@ -1955,6 +2004,14 @@ void THD::cleanup_after_query()
auto_inc_intervals_forced.empty();
#endif
}
+ /*
+ Forget the binlog stmt filter for the next query.
+ There are some code paths that:
+ - do not call THD::decide_logging_format()
+ - do call THD::binlog_query(),
+ making this reset necessary.
+ */
+ reset_binlog_local_stmt_filter();
if (first_successful_insert_id_in_cur_stmt > 0)
{
/* set what LAST_INSERT_ID() will return */
@@ -2661,7 +2718,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
Non-ASCII separator arguments are not fully supported
*/
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED,
ER(WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED));
}
@@ -2692,7 +2749,7 @@ select_export::prepare(List<Item> &list, SELECT_LEX_UNIT *u)
(exchange->opt_enclosed && non_string_results &&
field_term_length && strchr(NUMERIC_CHARS, field_term_char)))
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_AMBIGUOUS_FIELD_TERM, ER(ER_AMBIGUOUS_FIELD_TERM));
is_ambiguous_field_term= TRUE;
}
@@ -2775,7 +2832,7 @@ int select_export::send_data(List<Item> &items)
convert_to_printable(printable_buff, sizeof(printable_buff),
error_pos, res->ptr() + res->length() - error_pos,
res->charset(), 6);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE_FOR_FIELD,
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
"string", printable_buff,
@@ -2786,7 +2843,7 @@ int select_export::send_data(List<Item> &items)
/*
result is longer than UINT_MAX32 and doesn't fit into String
*/
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_DATA_TRUNCATED, ER(WARN_DATA_TRUNCATED),
item->full_name(), static_cast<long>(row_count));
}
@@ -2821,7 +2878,7 @@ int select_export::send_data(List<Item> &items)
else
{
if (fixed_row_size)
- used_length=min(res->length(),item->max_length);
+ used_length=MY_MIN(res->length(),item->max_length);
else
used_length=res->length();
if ((result_type == STRING_RESULT || is_unsafe_field_sep) &&
@@ -3562,7 +3619,7 @@ int select_dumpvar::send_data(List<Item> &items)
bool select_dumpvar::send_eof()
{
if (! row_count)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA));
/*
Don't send EOF if we're in error condition (which implies we've already
@@ -3733,6 +3790,7 @@ void Security_context::init()
void Security_context::destroy()
{
+ DBUG_PRINT("info", ("freeing security context"));
// If not pointer to constant
if (host != my_localhost)
{
@@ -3966,7 +4024,7 @@ static void thd_send_progress(THD *thd)
ulonglong report_time= my_interval_timer();
if (report_time > thd->progress.next_report_time)
{
- uint seconds_to_next= max(thd->variables.progress_report_time,
+ uint seconds_to_next= MY_MAX(thd->variables.progress_report_time,
global_system_variables.progress_report_time);
if (seconds_to_next == 0) // Turned off
seconds_to_next= 1; // Check again after 1 second
@@ -4152,6 +4210,41 @@ extern "C" bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd)
}
+extern "C" enum durability_properties thd_get_durability_property(const MYSQL_THD thd)
+{
+ enum durability_properties ret= HA_REGULAR_DURABILITY;
+
+ if (thd != NULL)
+ ret= thd->durability_property;
+
+ return ret;
+}
+
+/** Get the auto_increment_offset auto_increment_increment.
+Needed by InnoDB.
+@param thd Thread object
+@param off auto_increment_offset
+@param inc auto_increment_increment */
+extern "C" void thd_get_autoinc(const MYSQL_THD thd, ulong* off, ulong* inc)
+{
+ *off = thd->variables.auto_increment_offset;
+ *inc = thd->variables.auto_increment_increment;
+}
+
+
+/**
+ Is strict sql_mode set.
+ Needed by InnoDB.
+ @param thd Thread object
+ @return True if sql_mode has strict mode (all or trans).
+ @retval true sql_mode has strict mode (all or trans).
+ @retval false sql_mode has not strict mode (all or trans).
+*/
+extern "C" bool thd_is_strict_mode(const MYSQL_THD thd)
+{
+ return thd->is_strict_mode();
+}
+
/*
Interface for MySQL Server, plugins and storage engines to report
@@ -4387,7 +4480,7 @@ void THD::inc_status_created_tmp_disk_tables()
{
status_var_increment(status_var.created_tmp_disk_tables_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_created_tmp_disk_tables)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_created_tmp_disk_tables)(m_statement_psi, 1);
#endif
}
@@ -4395,7 +4488,7 @@ void THD::inc_status_created_tmp_tables()
{
status_var_increment(status_var.created_tmp_tables_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_created_tmp_tables)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_created_tmp_tables)(m_statement_psi, 1);
#endif
}
@@ -4403,7 +4496,7 @@ void THD::inc_status_select_full_join()
{
status_var_increment(status_var.select_full_join_count_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_select_full_join)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_select_full_join)(m_statement_psi, 1);
#endif
}
@@ -4411,7 +4504,7 @@ void THD::inc_status_select_full_range_join()
{
status_var_increment(status_var.select_full_range_join_count_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_select_full_range_join)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_select_full_range_join)(m_statement_psi, 1);
#endif
}
@@ -4419,7 +4512,7 @@ void THD::inc_status_select_range()
{
status_var_increment(status_var.select_range_count_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_select_range)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_select_range)(m_statement_psi, 1);
#endif
}
@@ -4427,7 +4520,7 @@ void THD::inc_status_select_range_check()
{
status_var_increment(status_var.select_range_check_count_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_select_range_check)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_select_range_check)(m_statement_psi, 1);
#endif
}
@@ -4435,7 +4528,7 @@ void THD::inc_status_select_scan()
{
status_var_increment(status_var.select_scan_count_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_select_scan)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_select_scan)(m_statement_psi, 1);
#endif
}
@@ -4443,7 +4536,7 @@ void THD::inc_status_sort_merge_passes()
{
status_var_increment(status_var.filesort_merge_passes_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_sort_merge_passes)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_sort_merge_passes)(m_statement_psi, 1);
#endif
}
@@ -4451,7 +4544,7 @@ void THD::inc_status_sort_range()
{
status_var_increment(status_var.filesort_range_count_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_sort_range)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_sort_range)(m_statement_psi, 1);
#endif
}
@@ -4459,7 +4552,7 @@ void THD::inc_status_sort_rows(ha_rows count)
{
statistic_add(status_var.filesort_rows_, count, &LOCK_status);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_sort_rows)(m_statement_psi, count);
+ PSI_STATEMENT_CALL(inc_statement_sort_rows)(m_statement_psi, count);
#endif
}
@@ -4467,7 +4560,7 @@ void THD::inc_status_sort_scan()
{
status_var_increment(status_var.filesort_scan_count_);
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(inc_statement_sort_scan)(m_statement_psi, 1);
+ PSI_STATEMENT_CALL(inc_statement_sort_scan)(m_statement_psi, 1);
#endif
}
@@ -4475,7 +4568,7 @@ void THD::set_status_no_index_used()
{
server_status|= SERVER_QUERY_NO_INDEX_USED;
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(set_statement_no_index_used)(m_statement_psi);
+ PSI_STATEMENT_CALL(set_statement_no_index_used)(m_statement_psi);
#endif
}
@@ -4483,7 +4576,7 @@ void THD::set_status_no_good_index_used()
{
server_status|= SERVER_QUERY_NO_GOOD_INDEX_USED;
#ifdef HAVE_PSI_STATEMENT_INTERFACE
- PSI_CALL(set_statement_no_good_index_used)(m_statement_psi);
+ PSI_STATEMENT_CALL(set_statement_no_good_index_used)(m_statement_psi);
#endif
}
@@ -4491,7 +4584,7 @@ void THD::set_command(enum enum_server_command command)
{
m_command= command;
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_command)(m_command);
+ PSI_STATEMENT_CALL(set_thread_command)(m_command);
#endif
}
@@ -4504,7 +4597,7 @@ void THD::set_query(const CSET_STRING &string_arg)
mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_info)(query(), query_length());
+ PSI_THREAD_CALL(set_thread_info)(query(), query_length());
#endif
}
@@ -4817,6 +4910,8 @@ int THD::decide_logging_format(TABLE_LIST *tables)
DBUG_PRINT("info", ("lex->get_stmt_unsafe_flags(): 0x%x",
lex->get_stmt_unsafe_flags()));
+ reset_binlog_local_stmt_filter();
+
/*
We should not decide logging format if the binlog is closed or
binlogging is off, or if the statement is filtered out from the
@@ -4859,6 +4954,28 @@ int THD::decide_logging_format(TABLE_LIST *tables)
A pointer to a previous table that was accessed.
*/
TABLE* prev_access_table= NULL;
+ /**
+ The number of tables used in the current statement,
+ that should be replicated.
+ */
+ uint replicated_tables_count= 0;
+ /**
+ The number of tables written to in the current statement,
+ that should not be replicated.
+ A table should not be replicated when it is considered
+ 'local' to a MySQL instance.
+ Currently, these tables are:
+ - mysql.slow_log
+ - mysql.general_log
+ - mysql.slave_relay_log_info
+ - mysql.slave_master_info
+ - mysql.slave_worker_info
+ - performance_schema.*
+ - TODO: information_schema.*
+ In practice, from this list, only performance_schema.* tables
+ are written to by user queries.
+ */
+ uint non_replicated_tables_count= 0;
#ifndef DBUG_OFF
{
@@ -4881,14 +4998,38 @@ int THD::decide_logging_format(TABLE_LIST *tables)
if (table->placeholder())
continue;
- if (table->table->s->table_category == TABLE_CATEGORY_PERFORMANCE ||
- table->table->s->table_category == TABLE_CATEGORY_LOG)
- lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_TABLE);
-
handler::Table_flags const flags= table->table->file->ha_table_flags();
DBUG_PRINT("info", ("table: %s; ha_table_flags: 0x%llx",
table->table_name, flags));
+
+ if (table->table->no_replicate)
+ {
+ /*
+ The statement uses a table that is not replicated.
+ The following properties about the table:
+ - persistent / transient
+ - transactional / non transactional
+ - temporary / permanent
+ - read or write
+ - multiple engines involved because of this table
+ are not relevant, as this table is completely ignored.
+ Because the statement uses a non replicated table,
+ using STATEMENT format in the binlog is impossible.
+ Either this statement will be discarded entirely,
+ or it will be logged (possibly partially) in ROW format.
+ */
+ lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_TABLE);
+
+ if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
+ {
+ non_replicated_tables_count++;
+ continue;
+ }
+ }
+
+ replicated_tables_count++;
+
if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
{
if (prev_write_table && prev_write_table->file->ht !=
@@ -5064,6 +5205,30 @@ int THD::decide_logging_format(TABLE_LIST *tables)
}
}
+ if (non_replicated_tables_count > 0)
+ {
+ if ((replicated_tables_count == 0) || ! is_write)
+ {
+ DBUG_PRINT("info", ("decision: no logging, no replicated table affected"));
+ set_binlog_local_stmt_filter();
+ }
+ else
+ {
+ if (! is_current_stmt_binlog_format_row())
+ {
+ my_error((error= ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES), MYF(0));
+ }
+ else
+ {
+ clear_binlog_local_stmt_filter();
+ }
+ }
+ }
+ else
+ {
+ clear_binlog_local_stmt_filter();
+ }
+
if (error) {
DBUG_PRINT("info", ("decision: no logging since an error was generated"));
DBUG_RETURN(-1);
@@ -5102,7 +5267,7 @@ int THD::decide_logging_format(TABLE_LIST *tables)
Replace the last ',' with '.' for table_names
*/
table_names.replace(table_names.length()-1, 1, ".", 1);
- push_warning_printf(this, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(this, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_ERROR,
"Row events are not logged for %s statements "
"that modify BLACKHOLE tables in row format. "
@@ -5679,7 +5844,7 @@ void THD::issue_unsafe_warnings()
{
if ((unsafe_type_flags & (1 << unsafe_type)) != 0)
{
- push_warning_printf(this, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(this, Sql_condition::WARN_LEVEL_NOTE,
ER_BINLOG_UNSAFE_STATEMENT,
ER(ER_BINLOG_UNSAFE_STATEMENT),
ER(LEX::binlog_stmt_unsafe_errcode[unsafe_type]));
@@ -5729,6 +5894,15 @@ int THD::binlog_query(THD::enum_binlog_query_type qtype, char const *query_arg,
show_query_type(qtype), (int) query_len, query_arg));
DBUG_ASSERT(query_arg && mysql_bin_log.is_open());
+ if (get_binlog_local_stmt_filter() == BINLOG_FILTER_SET)
+ {
+ /*
+ The current statement is to be ignored, and not written to
+ the binlog. Do not call issue_unsafe_warnings().
+ */
+ DBUG_RETURN(0);
+ }
+
/*
If we are not in prelocked mode, mysql_unlock_tables() will be
called after this binlog_query(), so we have to flush the pending
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 2e5e87fd232..889028ce8e5 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -1,6 +1,6 @@
/*
Copyright (c) 2000, 2012, Oracle and/or its affiliates.
- Copyright (c) 2009, 2013, Monty Program Ab
+ Copyright (c) 2009, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -72,7 +72,6 @@ class Rows_log_event;
class Sroutine_hash_entry;
class user_var_entry;
-enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE };
enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME };
enum enum_duplicates { DUP_ERROR, DUP_REPLACE, DUP_UPDATE };
enum enum_delay_key_write { DELAY_KEY_WRITE_NONE, DELAY_KEY_WRITE_ON,
@@ -122,6 +121,7 @@ enum enum_filetype { FILETYPE_CSV, FILETYPE_XML };
extern char internal_table_name[2];
extern char empty_c_string[1];
+extern LEX_STRING EMPTY_STR;
extern MYSQL_PLUGIN_IMPORT const char **errmesg;
extern bool volatile shutdown_in_progress;
@@ -232,12 +232,15 @@ public:
class Alter_drop :public Sql_alloc {
public:
- enum drop_type {KEY, COLUMN };
+ enum drop_type {KEY, COLUMN, FOREIGN_KEY };
const char *name;
enum drop_type type;
bool drop_if_exists;
Alter_drop(enum drop_type par_type,const char *par_name, bool par_exists)
- :name(par_name), type(par_type), drop_if_exists(par_exists) {}
+ :name(par_name), type(par_type), drop_if_exists(par_exists)
+ {
+ DBUG_ASSERT(par_name != NULL);
+ }
/**
Used to make a clone of this object for ALTER/CREATE TABLE
@sa comment for Key_part_spec::clone
@@ -304,7 +307,6 @@ public:
{ return new (mem_root) Key(*this, mem_root); }
};
-class Table_ident;
class Foreign_key: public Key {
public:
@@ -313,20 +315,25 @@ public:
enum fk_option { FK_OPTION_UNDEF, FK_OPTION_RESTRICT, FK_OPTION_CASCADE,
FK_OPTION_SET_NULL, FK_OPTION_NO_ACTION, FK_OPTION_DEFAULT};
- Table_ident *ref_table;
+ LEX_STRING ref_db;
+ LEX_STRING ref_table;
List<Key_part_spec> ref_columns;
uint delete_opt, update_opt, match_opt;
Foreign_key(const LEX_STRING &name_arg, List<Key_part_spec> &cols,
- Table_ident *table, List<Key_part_spec> &ref_cols,
+ const LEX_STRING &ref_db_arg, const LEX_STRING &ref_table_arg,
+ List<Key_part_spec> &ref_cols,
uint delete_opt_arg, uint update_opt_arg, uint match_opt_arg,
bool if_not_exists_opt)
:Key(FOREIGN_KEY, name_arg, &default_key_create_info, 0, cols, NULL,
if_not_exists_opt),
- ref_table(table), ref_columns(ref_cols),
+ ref_db(ref_db_arg), ref_table(ref_table_arg), ref_columns(ref_cols),
delete_opt(delete_opt_arg), update_opt(update_opt_arg),
match_opt(match_opt_arg)
- {}
- Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root);
+ {
+ // We don't check for duplicate FKs.
+ key_create_info.check_for_duplicate_indexes= false;
+ }
+ Foreign_key(const Foreign_key &rhs, MEM_ROOT *mem_root);
/**
Used to make a clone of this object for ALTER/CREATE TABLE
@sa comment for Key_part_spec::clone
@@ -463,6 +470,8 @@ class Time_zone;
#define THD_CHECK_SENTRY(thd) DBUG_ASSERT(thd->dbug_sentry == THD_SENTRY_MAGIC)
+typedef ulonglong sql_mode_t;
+
typedef struct system_variables
{
/*
@@ -486,7 +495,7 @@ typedef struct system_variables
ulonglong tmp_table_size;
ulonglong long_query_time;
ulonglong optimizer_switch;
- ulonglong sql_mode; ///< which non-standard SQL behaviour should be enabled
+ sql_mode_t sql_mode; ///< which non-standard SQL behaviour should be enabled
ulonglong option_bits; ///< OPTION_xxx constants, e.g. OPTION_PROFILING
ulonglong join_buff_space_limit;
ulonglong log_slow_filter;
@@ -1362,9 +1371,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl) = 0;
+ Sql_condition ** cond_hdl) = 0;
private:
Internal_error_handler *m_prev_internal_handler;
@@ -1383,9 +1392,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
/* Ignore error */
return TRUE;
@@ -1410,9 +1419,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ Sql_condition ** cond_hdl);
private:
};
@@ -1584,6 +1593,7 @@ void dbug_serve_apcs(THD *thd, int n_calls);
*/
class THD :public Statement,
+ public MDL_context_owner,
public Open_tables_state
{
private:
@@ -1864,8 +1874,46 @@ public:
return current_stmt_binlog_format == BINLOG_FORMAT_ROW;
}
+ enum binlog_filter_state
+ {
+ BINLOG_FILTER_UNKNOWN,
+ BINLOG_FILTER_CLEAR,
+ BINLOG_FILTER_SET
+ };
+
+ inline void reset_binlog_local_stmt_filter()
+ {
+ m_binlog_filter_state= BINLOG_FILTER_UNKNOWN;
+ }
+
+ inline void clear_binlog_local_stmt_filter()
+ {
+ DBUG_ASSERT(m_binlog_filter_state == BINLOG_FILTER_UNKNOWN);
+ m_binlog_filter_state= BINLOG_FILTER_CLEAR;
+ }
+
+ inline void set_binlog_local_stmt_filter()
+ {
+ DBUG_ASSERT(m_binlog_filter_state == BINLOG_FILTER_UNKNOWN);
+ m_binlog_filter_state= BINLOG_FILTER_SET;
+ }
+
+ inline binlog_filter_state get_binlog_local_stmt_filter()
+ {
+ return m_binlog_filter_state;
+ }
+
private:
/**
+ Indicate if the current statement should be discarded
+ instead of written to the binlog.
+ This is used to discard special statements, such as
+ DML or DDL that affects only 'local' (non replicated)
+ tables, such as performance_schema.*
+ */
+ binlog_filter_state m_binlog_filter_state;
+
+ /**
Indicates the format in which the current statement will be
logged. This can only be set from @c decide_logging_format().
*/
@@ -2233,8 +2281,6 @@ public:
USER_CONN *user_connect;
CHARSET_INFO *db_charset;
- Warning_info *warning_info;
- Diagnostics_area *stmt_da;
#if defined(ENABLED_PROFILING)
PROFILING profiling;
#endif
@@ -2312,6 +2358,12 @@ public:
MEM_ROOT *user_var_events_alloc; /* Allocate above array elements here */
/*
+ Define durability properties that engines may check to
+ improve performance. Not yet used in MariaDB
+ */
+ enum durability_properties durability_property;
+
+ /*
If checking this in conjunction with a wait condition, please
include a check after enter_cond() if you want to avoid a race
condition. For details see the implementation of awake(),
@@ -2595,10 +2647,41 @@ public:
mysql_mutex_unlock(&mysys_var->mutex);
return;
}
+ virtual int is_killed() { return killed; }
+ virtual THD* get_thd() { return this; }
+
+ /**
+ A callback to the server internals that is used to address
+ special cases of the locking protocol.
+ Invoked when acquiring an exclusive lock, for each thread that
+ has a conflicting shared metadata lock.
+
+ This function:
+ - aborts waiting of the thread on a data lock, to make it notice
+ the pending exclusive lock and back off.
+ - if the thread is an INSERT DELAYED thread, sends it a KILL
+ signal to terminate it.
+
+ @note This function does not wait for the thread to give away its
+ locks. Waiting is done outside for all threads at once.
+
+ @param ctx_in_use The MDL context owner (thread) to wake up.
+ @param needs_thr_lock_abort Indicates that to wake up thread
+ this call needs to abort its waiting
+ on table-level lock.
+
+ @retval TRUE if the thread was woken up
+ @retval FALSE otherwise.
+ */
+ virtual bool notify_shared_lock(MDL_context_owner *ctx_in_use,
+ bool needs_thr_lock_abort);
+
+ // End implementation of MDL_context_owner interface.
+
inline bool is_strict_mode() const
{
- return variables.sql_mode & (MODE_STRICT_TRANS_TABLES |
- MODE_STRICT_ALL_TABLES);
+ return (bool) (variables.sql_mode & (MODE_STRICT_TRANS_TABLES |
+ MODE_STRICT_ALL_TABLES));
}
inline my_time_t query_start() { query_start_used=1; return start_time; }
inline ulong query_start_sec_part()
@@ -2609,7 +2692,7 @@ public:
start_time= hrtime_to_my_time(hrtime);
start_time_sec_part= hrtime_sec_part(hrtime);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_start_time)(start_time);
+ PSI_THREAD_CALL(set_thread_start_time)(start_time);
#endif
}
inline void set_start_time()
@@ -2619,7 +2702,7 @@ public:
start_time= hrtime_to_my_time(user_time);
start_time_sec_part= hrtime_sec_part(user_time);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_start_time)(start_time);
+ PSI_THREAD_CALL(set_thread_start_time)(start_time);
#endif
}
else
@@ -2779,8 +2862,8 @@ public:
inline void clear_error()
{
DBUG_ENTER("clear_error");
- if (stmt_da->is_error())
- stmt_da->reset_diagnostics_area();
+ if (get_stmt_da()->is_error())
+ get_stmt_da()->reset_diagnostics_area();
is_slave_error= 0;
DBUG_VOID_RETURN;
}
@@ -2807,7 +2890,7 @@ public:
*/
inline void fatal_error()
{
- DBUG_ASSERT(stmt_da->is_error() || killed);
+ DBUG_ASSERT(get_stmt_da()->is_error() || killed);
is_fatal_error= 1;
DBUG_PRINT("error",("Fatal error set"));
}
@@ -2824,11 +2907,19 @@ public:
To raise this flag, use my_error().
*/
- inline bool is_error() const { return stmt_da->is_error(); }
+ inline bool is_error() const { return m_stmt_da->is_error(); }
/// Returns Diagnostics-area for the current statement.
Diagnostics_area *get_stmt_da()
- { return stmt_da; }
+ { return m_stmt_da; }
+
+ /// Returns Diagnostics-area for the current statement.
+ const Diagnostics_area *get_stmt_da() const
+ { return m_stmt_da; }
+
+ /// Sets Diagnostics-area for the current statement.
+ void set_stmt_da(Diagnostics_area *da)
+ { m_stmt_da= da; }
inline CHARSET_INFO *charset() { return variables.character_set_client; }
void update_charset();
@@ -3069,7 +3160,7 @@ public:
mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
if (result)
- PSI_CALL(set_thread_db)(new_db, new_db_len);
+ PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len);
#endif
return result;
}
@@ -3094,7 +3185,7 @@ public:
db_length= new_db_len;
mysql_mutex_unlock(&LOCK_thd_data);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_db)(new_db, new_db_len);
+ PSI_THREAD_CALL(set_thread_db)(new_db, new_db_len);
#endif
}
}
@@ -3126,6 +3217,7 @@ public:
*/
void push_internal_handler(Internal_error_handler *handler);
+private:
/**
Handle a sql condition.
@param sql_errno the condition error number
@@ -3135,12 +3227,13 @@ public:
@param[out] cond_hdl the sql condition raised, if any
@return true if the condition is handled
*/
- virtual bool handle_condition(uint sql_errno,
- const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
- const char* msg,
- MYSQL_ERROR ** cond_hdl);
+ bool handle_condition(uint sql_errno,
+ const char* sqlstate,
+ Sql_condition::enum_warning_level level,
+ const char* msg,
+ Sql_condition ** cond_hdl);
+public:
/**
Remove the error handler last pushed.
*/
@@ -3190,10 +3283,10 @@ private:
To raise a SQL condition, the code should use the public
raise_error() or raise_warning() methods provided by class THD.
*/
- friend class Signal_common;
- friend class Signal_statement;
- friend class Resignal_statement;
- friend void push_warning(THD*, MYSQL_ERROR::enum_warning_level, uint, const char*);
+ friend class Sql_cmd_common_signal;
+ friend class Sql_cmd_signal;
+ friend class Sql_cmd_resignal;
+ friend void push_warning(THD*, Sql_condition::enum_warning_level, uint, const char*);
friend void my_message_sql(uint, const char *, myf);
/**
@@ -3204,10 +3297,10 @@ private:
@param msg the condition message text
@return The condition raised, or NULL
*/
- MYSQL_ERROR*
+ Sql_condition*
raise_condition(uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg);
public:
@@ -3340,8 +3433,8 @@ private:
tree itself is reused between executions and thus is stored elsewhere.
*/
MEM_ROOT main_mem_root;
- Warning_info main_warning_info;
Diagnostics_area main_da;
+ Diagnostics_area *m_stmt_da;
/**
It will be set TURE if CURRENT_USER() is called in account management
@@ -3374,24 +3467,24 @@ private:
};
-/** A short cut for thd->stmt_da->set_ok_status(). */
+/** A short cut for thd->get_stmt_da()->set_ok_status(). */
inline void
my_ok(THD *thd, ulonglong affected_rows= 0, ulonglong id= 0,
const char *message= NULL)
{
thd->set_row_count_func(affected_rows);
- thd->stmt_da->set_ok_status(thd, affected_rows, id, message);
+ thd->get_stmt_da()->set_ok_status(affected_rows, id, message);
}
-/** A short cut for thd->stmt_da->set_eof_status(). */
+/** A short cut for thd->get_stmt_da()->set_eof_status(). */
inline void
my_eof(THD *thd)
{
thd->set_row_count_func(-1);
- thd->stmt_da->set_eof_status(thd);
+ thd->get_stmt_da()->set_eof_status(thd);
}
#define tmp_disable_binlog(A) \
@@ -3406,9 +3499,9 @@ my_eof(THD *thd)
checking for all date handling.
*/
-const my_bool strict_date_checking= 0;
+const my_bool strict_date_checking= 1;
-inline ulonglong sql_mode_for_dates(THD *thd)
+inline sql_mode_t sql_mode_for_dates(THD *thd)
{
if (strict_date_checking)
return (thd->variables.sql_mode &
@@ -3417,7 +3510,7 @@ inline ulonglong sql_mode_for_dates(THD *thd)
return (thd->variables.sql_mode & MODE_INVALID_DATES);
}
-inline ulonglong sql_mode_for_dates()
+inline sql_mode_t sql_mode_for_dates()
{
return sql_mode_for_dates(current_thd);
}
diff --git a/sql/sql_client.cc b/sql/sql_client.cc
index eb6c039c065..e7c555b5947 100644
--- a/sql/sql_client.cc
+++ b/sql/sql_client.cc
@@ -36,7 +36,7 @@ void my_net_local_init(NET *net)
(uint)global_system_variables.net_write_timeout);
net->retry_count= (uint) global_system_variables.net_retry_count;
- net->max_packet_size= max(global_system_variables.net_buffer_length,
+ net->max_packet_size= MY_MAX(global_system_variables.net_buffer_length,
global_system_variables.max_allowed_packet);
#endif
}
diff --git a/sql/sql_cmd.h b/sql/sql_cmd.h
index 794037a0033..de7ef5fc832 100644
--- a/sql/sql_cmd.h
+++ b/sql/sql_cmd.h
@@ -88,6 +88,7 @@ enum enum_sql_command {
SQLCOM_SHOW_PROFILE, SQLCOM_SHOW_PROFILES,
SQLCOM_SIGNAL, SQLCOM_RESIGNAL,
SQLCOM_SHOW_RELAYLOG_EVENTS,
+ SQLCOM_GET_DIAGNOSTICS,
SQLCOM_SHOW_USER_STATS, SQLCOM_SHOW_TABLE_STATS, SQLCOM_SHOW_INDEX_STATS,
SQLCOM_SHOW_CLIENT_STATS,
SQLCOM_SLAVE_ALL_START, SQLCOM_SLAVE_ALL_STOP,
diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc
index 19e02cc7dae..f14c43d4c54 100644
--- a/sql/sql_connect.cc
+++ b/sql/sql_connect.cc
@@ -124,6 +124,7 @@ end:
int check_for_max_user_connections(THD *thd, USER_CONN *uc)
{
int error= 1;
+ Host_errors errors;
DBUG_ENTER("check_for_max_user_connections");
mysql_mutex_lock(&LOCK_user_conn);
@@ -135,6 +136,8 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc)
!(thd->security_ctx->master_access & SUPER_ACL))
{
my_error(ER_TOO_MANY_USER_CONNECTIONS, MYF(0), uc->user);
+ error=1;
+ errors.m_max_user_connection= 1;
goto end;
}
time_out_user_resource_limits(thd, uc);
@@ -144,6 +147,8 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc)
my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user,
"max_user_connections",
(long) uc->user_resources.user_conn);
+ error= 1;
+ errors.m_max_user_connection= 1;
goto end;
}
if (uc->user_resources.conn_per_hour &&
@@ -152,6 +157,8 @@ int check_for_max_user_connections(THD *thd, USER_CONN *uc)
my_error(ER_USER_LIMIT_REACHED, MYF(0), uc->user,
"max_connections_per_hour",
(long) uc->user_resources.conn_per_hour);
+ error=1;
+ errors.m_max_user_connection_per_hour= 1;
goto end;
}
uc->conn_per_hour++;
@@ -169,6 +176,10 @@ end:
thd->user_connect= NULL;
}
mysql_mutex_unlock(&LOCK_user_conn);
+ if (error)
+ {
+ inc_host_errors(thd->main_security_ctx.ip, &errors);
+ }
DBUG_RETURN(error);
}
@@ -431,7 +442,7 @@ void init_user_stats(USER_STATS *user_stats,
DBUG_ENTER("init_user_stats");
DBUG_PRINT("enter", ("user: %s priv_user: %s", user, priv_user));
- user_length= min(user_length, sizeof(user_stats->user)-1);
+ user_length= MY_MIN(user_length, sizeof(user_stats->user)-1);
memcpy(user_stats->user, user, user_length);
user_stats->user[user_length]= 0;
user_stats->user_name_length= user_length;
@@ -867,7 +878,10 @@ bool init_new_connection_handler_thread()
{
pthread_detach_this_thread();
if (my_thread_init())
+ {
+ statistic_increment(connection_errors_internal, &LOCK_status);
return 1;
+ }
return 0;
}
@@ -887,6 +901,7 @@ bool init_new_connection_handler_thread()
static int check_connection(THD *thd)
{
uint connect_errors= 0;
+ int auth_rc;
NET *net= &thd->net;
DBUG_PRINT("info",
@@ -898,48 +913,116 @@ static int check_connection(THD *thd)
if (!thd->main_security_ctx.host) // If TCP/IP connection
{
+ my_bool peer_rc;
char ip[NI_MAXHOST];
- if (vio_peer_addr(net->vio, ip, &thd->peer_port, NI_MAXHOST))
- {
- my_error(ER_BAD_HOST_ERROR, MYF(0));
- return 1;
- }
- /* BEGIN : DEBUG */
- DBUG_EXECUTE_IF("addr_fake_ipv4",
+ peer_rc= vio_peer_addr(net->vio, ip, &thd->peer_port, NI_MAXHOST);
+
+ /*
+ ===========================================================================
+ DEBUG code only (begin)
+ Simulate various output from vio_peer_addr().
+ ===========================================================================
+ */
+
+ DBUG_EXECUTE_IF("vio_peer_addr_error",
+ {
+ peer_rc= 1;
+ }
+ );
+ DBUG_EXECUTE_IF("vio_peer_addr_fake_ipv4",
{
struct sockaddr *sa= (sockaddr *) &net->vio->remote;
sa->sa_family= AF_INET;
- struct in_addr *ip4= &((struct sockaddr_in *)sa)->sin_addr;
- /* See RFC 5737, 192.0.2.0/23 is reserved */
+ struct in_addr *ip4= &((struct sockaddr_in *) sa)->sin_addr;
+ /* See RFC 5737, 192.0.2.0/24 is reserved. */
const char* fake= "192.0.2.4";
ip4->s_addr= inet_addr(fake);
strcpy(ip, fake);
- };);
- /* END : DEBUG */
+ peer_rc= 0;
+ }
+ );
+#ifdef HAVE_IPV6
+ DBUG_EXECUTE_IF("vio_peer_addr_fake_ipv6",
+ {
+ struct sockaddr_in6 *sa= (sockaddr_in6 *) &net->vio->remote;
+ sa->sin6_family= AF_INET6;
+ struct in6_addr *ip6= & sa->sin6_addr;
+ /* See RFC 3849, ipv6 2001:DB8::/32 is reserved. */
+ const char* fake= "2001:db8::6:6";
+ /* inet_pton(AF_INET6, fake, ip6); not available on Windows XP. */
+ ip6->s6_addr[ 0] = 0x20;
+ ip6->s6_addr[ 1] = 0x01;
+ ip6->s6_addr[ 2] = 0x0d;
+ ip6->s6_addr[ 3] = 0xb8;
+ ip6->s6_addr[ 4] = 0x00;
+ ip6->s6_addr[ 5] = 0x00;
+ ip6->s6_addr[ 6] = 0x00;
+ ip6->s6_addr[ 7] = 0x00;
+ ip6->s6_addr[ 8] = 0x00;
+ ip6->s6_addr[ 9] = 0x00;
+ ip6->s6_addr[10] = 0x00;
+ ip6->s6_addr[11] = 0x00;
+ ip6->s6_addr[12] = 0x00;
+ ip6->s6_addr[13] = 0x06;
+ ip6->s6_addr[14] = 0x00;
+ ip6->s6_addr[15] = 0x06;
+ strcpy(ip, fake);
+ peer_rc= 0;
+ }
+ );
+#endif /* HAVE_IPV6 */
+
+ /*
+ ===========================================================================
+ DEBUG code only (end)
+ ===========================================================================
+ */
+
+ if (peer_rc)
+ {
+ /*
+ Since we can not even get the peer IP address,
+ there is nothing to show in the host_cache,
+ so increment the global status variable for peer address errors.
+ */
+ statistic_increment(connection_errors_peer_addr, &LOCK_status);
+ my_error(ER_BAD_HOST_ERROR, MYF(0));
+ return 1;
+ }
if (!(thd->main_security_ctx.ip= my_strdup(ip,MYF(MY_WME))))
+ {
+ /*
+ No error accounting per IP in host_cache,
+ this is treated as a global server OOM error.
+ TODO: remove the need for my_strdup.
+ */
+ statistic_increment(connection_errors_internal, &LOCK_status);
return 1; /* The error is set by my_strdup(). */
+ }
thd->main_security_ctx.host_or_ip= thd->main_security_ctx.ip;
if (!(specialflag & SPECIAL_NO_RESOLVE))
{
- if (ip_to_hostname(&net->vio->remote, thd->main_security_ctx.ip,
- &thd->main_security_ctx.host, &connect_errors))
- {
- my_error(ER_BAD_HOST_ERROR, MYF(0));
- return 1;
- }
+ int rc;
+
+ rc= ip_to_hostname(&net->vio->remote,
+ thd->main_security_ctx.ip,
+ &thd->main_security_ctx.host,
+ &connect_errors);
/* Cut very long hostnames to avoid possible overflows */
if (thd->main_security_ctx.host)
{
if (thd->main_security_ctx.host != my_localhost)
- thd->main_security_ctx.host[min(strlen(thd->main_security_ctx.host),
+ thd->main_security_ctx.host[MY_MIN(strlen(thd->main_security_ctx.host),
HOSTNAME_LENGTH)]= 0;
thd->main_security_ctx.host_or_ip= thd->main_security_ctx.host;
}
- if (connect_errors > max_connect_errors)
+
+ if (rc == RC_BLOCKED_HOST)
{
+ /* HOST_CACHE stats updated by ip_to_hostname(). */
my_error(ER_HOST_IS_BLOCKED, MYF(0), thd->main_security_ctx.host_or_ip);
return 1;
}
@@ -951,6 +1034,7 @@ static int check_connection(THD *thd)
thd->main_security_ctx.ip : "unknown ip")));
if (acl_check_host(thd->main_security_ctx.host, thd->main_security_ctx.ip))
{
+ /* HOST_CACHE stats updated by acl_check_host(). */
my_error(ER_HOST_NOT_PRIVILEGED, MYF(0),
thd->main_security_ctx.host_or_ip);
return 1;
@@ -967,9 +1051,34 @@ static int check_connection(THD *thd)
vio_keepalive(net->vio, TRUE);
if (thd->packet.alloc(thd->variables.net_buffer_length))
+ {
+ /*
+ Important note:
+ net_buffer_length is a SESSION variable,
+ so it may be tempting to account OOM conditions per IP in the HOST_CACHE,
+ in case some clients are more demanding than others ...
+ However, this session variable is *not* initialized with a per client
+ value during the initial connection, it is initialized from the
+ GLOBAL net_buffer_length variable from the server.
+ Hence, there is no reason to account on OOM conditions per client IP,
+ we count failures in the global server status instead.
+ */
+ statistic_increment(connection_errors_internal, &LOCK_status);
return 1; /* The error is set by alloc(). */
+ }
+
+ auth_rc= acl_authenticate(thd, connect_errors, 0);
+ if (auth_rc == 0 && connect_errors != 0)
+ {
+ /*
+ A client connection from this IP was successful,
+ after some previous failures.
+ Reset the connection error counter.
+ */
+ reset_host_connect_errors(thd->main_security_ctx.ip);
+ }
- return acl_authenticate(thd, connect_errors, 0);
+ return auth_rc;
}
@@ -1118,9 +1227,10 @@ void prepare_new_connection_state(THD* thd)
execute_init_command(thd, &opt_init_connect, &LOCK_sys_init_connect);
if (thd->is_error())
{
+ Host_errors errors;
thd->killed= KILL_CONNECTION;
thd->print_aborted_warning(0, "init_connect command failed");
- sql_print_warning("%s", thd->stmt_da->message());
+ sql_print_warning("%s", thd->get_stmt_da()->message());
/*
now let client to send its first command,
@@ -1145,6 +1255,8 @@ void prepare_new_connection_state(THD* thd)
thd->server_status&= ~SERVER_STATUS_CLEAR_SET;
thd->protocol->end_statement();
thd->killed = KILL_CONNECTION;
+ errors.m_init_connect= 1;
+ inc_host_errors(thd->main_security_ctx.ip, &errors);
return;
}
diff --git a/sql/sql_const.h b/sql/sql_const.h
index d0a7a83f3a1..4ad39bad14a 100644
--- a/sql/sql_const.h
+++ b/sql/sql_const.h
@@ -68,7 +68,7 @@
#define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \
RAND_TABLE_BIT)
#define MAX_FIELDS 4096 /* Limit in the .frm file */
-#define MAX_PARTITIONS 1024
+#define MAX_PARTITIONS 8192
#define MAX_SELECT_NESTING (sizeof(nesting_map)*8-1)
@@ -77,7 +77,6 @@
/* Some portable defines */
-#define portable_sizeof_char_ptr 8
#define STRING_BUFFER_USUAL_SIZE 80
/* Memory allocated when parsing a statement / saving a statement */
@@ -129,6 +128,13 @@
*/
#define TABLE_DEF_CACHE_MIN 400
+/**
+ Maximum number of connections default value.
+ 151 is larger than Apache's default max children,
+ to avoid "too many connections" error in a common setup.
+*/
+#define MAX_CONNECTIONS_DEFAULT 151
+
/*
Stack reservation.
Feel free to raise this by the smallest amount you can to get the
@@ -233,7 +239,7 @@
#define DELAYED_LIMIT 100 /**< pause after xxx inserts */
#define DELAYED_QUEUE_SIZE 1000
#define DELAYED_WAIT_TIMEOUT 5*60 /**< Wait for delayed insert */
-#define MAX_CONNECT_ERRORS 10 ///< errors before disabling host
+#define MAX_CONNECT_ERRORS 100 ///< errors before disabling host
#define LONG_TIMEOUT ((ulong) 3600L*24L*365L)
diff --git a/sql/sql_crypt.h b/sql/sql_crypt.h
index 3a12d603601..3df554e9d31 100644
--- a/sql/sql_crypt.h
+++ b/sql/sql_crypt.h
@@ -22,7 +22,7 @@
#endif
#include "sql_list.h" /* Sql_alloc */
-#include "mysql_com.h" /* rand_struct */
+#include "my_rnd.h" /* rand_struct */
class SQL_CRYPT :public Sql_alloc
{
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 39c30959fe4..9e30ed4513e 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -32,6 +32,7 @@
#include "log_event.h" // Query_log_event
#include "sql_base.h" // lock_table_names, tdc_remove_table
#include "sql_handler.h" // mysql_ha_rm_tables
+#include "sql_class.h"
#include <mysys_err.h>
#include "sp_head.h"
#include "sp.h"
@@ -572,7 +573,7 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info,
error= -1;
goto exit;
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_DB_CREATE_EXISTS, ER(ER_DB_CREATE_EXISTS), db);
error= 0;
goto not_silent;
@@ -780,7 +781,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
}
else
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_DB_DROP_EXISTS, ER(ER_DB_DROP_EXISTS), db);
error= false;
goto update_binlog;
@@ -809,7 +810,7 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent)
/* Lock all tables and stored routines about to be dropped. */
if (lock_table_names(thd, tables, NULL, thd->variables.lock_wait_timeout,
- MYSQL_OPEN_SKIP_TEMPORARY) ||
+ 0) ||
lock_db_routines(thd, db))
goto exit;
@@ -1502,7 +1503,7 @@ bool mysql_change_db(THD *thd, const LEX_STRING *new_db_name, bool force_switch)
{
/* Throw a warning and free new_db_file_name. */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_BAD_DB_ERROR, ER(ER_BAD_DB_ERROR),
new_db_file_name.str);
diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc
index 2992bb0da6e..d9dd538f96d 100644
--- a/sql/sql_derived.cc
+++ b/sql/sql_derived.cc
@@ -675,9 +675,9 @@ exit:
if (derived->view)
{
if (thd->is_error() &&
- (thd->stmt_da->sql_errno() == ER_BAD_FIELD_ERROR ||
- thd->stmt_da->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION ||
- thd->stmt_da->sql_errno() == ER_SP_DOES_NOT_EXIST))
+ (thd->get_stmt_da()->sql_errno() == ER_BAD_FIELD_ERROR ||
+ thd->get_stmt_da()->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION ||
+ thd->get_stmt_da()->sql_errno() == ER_SP_DOES_NOT_EXIST))
{
thd->clear_error();
my_error(ER_VIEW_INVALID, MYF(0), derived->db,
diff --git a/sql/sql_error.cc b/sql/sql_error.cc
index acb61fe68c5..f382f18a983 100644
--- a/sql/sql_error.cc
+++ b/sql/sql_error.cc
@@ -47,12 +47,12 @@ This file contains the implementation of error and warnings related
#include "sp_rcontext.h"
/*
- Design notes about MYSQL_ERROR::m_message_text.
+ Design notes about Sql_condition::m_message_text.
- The member MYSQL_ERROR::m_message_text contains the text associated with
+ The member Sql_condition::m_message_text contains the text associated with
an error, warning or note (which are all SQL 'conditions')
- Producer of MYSQL_ERROR::m_message_text:
+ Producer of Sql_condition::m_message_text:
----------------------------------------
(#1) the server implementation itself, when invoking functions like
@@ -78,16 +78,16 @@ This file contains the implementation of error and warnings related
- a RESIGNAL statement,
the message text is provided by the user logic, and is expressed in UTF8.
- Storage of MYSQL_ERROR::m_message_text:
+ Storage of Sql_condition::m_message_text:
---------------------------------------
- (#4) The class MYSQL_ERROR is used to hold the message text member.
+ (#4) The class Sql_condition is used to hold the message text member.
This class represents a single SQL condition.
(#5) The class Warning_info represents a SQL condition area, and contains
a collection of SQL conditions in the Warning_info::m_warn_list
- Consumer of MYSQL_ERROR::m_message_text:
+ Consumer of Sql_condition::m_message_text:
----------------------------------------
(#6) The statements SHOW WARNINGS and SHOW ERRORS display the content of
@@ -97,9 +97,9 @@ This file contains the implementation of error and warnings related
also read the content of:
- the top level statement condition area (when executed in a query),
- a sub statement (when executed in a stored program)
- and return the data stored in a MYSQL_ERROR.
+ and return the data stored in a Sql_condition.
- (#8) The RESIGNAL statement reads the MYSQL_ERROR caught by an exception
+ (#8) The RESIGNAL statement reads the Sql_condition caught by an exception
handler, to raise a new or modified condition (in #3).
The big picture
@@ -113,7 +113,7 @@ This file contains the implementation of error and warnings related
----------------------------|---------------------------- |
| |
V |
- MYSQL_ERROR(#4) |
+ Sql_condition(#4) |
| |
| |
V |
@@ -151,10 +151,10 @@ This file contains the implementation of error and warnings related
As a result, the design choice for (#4) and (#5) is to store data in
the 'error_message_charset_info' CHARSET, to minimize impact on the code base.
- This is implemented by using 'String MYSQL_ERROR::m_message_text'.
+ This is implemented by using 'String Sql_condition::m_message_text'.
The UTF8 -> error_message_charset_info conversion is implemented in
- Signal_common::eval_signal_informations() (for path #B and #C).
+ Sql_cmd_common_signal::eval_signal_informations() (for path #B and #C).
Future work
-----------
@@ -164,14 +164,14 @@ This file contains the implementation of error and warnings related
- Change (#4 and #5) to store message text in UTF8 natively.
In practice, this means changing the type of the message text to
- '<UTF8 String 128 class> MYSQL_ERROR::m_message_text', and is a direct
+ '<UTF8 String 128 class> Sql_condition::m_message_text', and is a direct
consequence of WL#751.
- Implement (#9) (GET DIAGNOSTICS).
See WL#2111 (Stored Procedures: Implement GET DIAGNOSTICS)
*/
-MYSQL_ERROR::MYSQL_ERROR()
+Sql_condition::Sql_condition()
: Sql_alloc(),
m_class_origin((const char*) NULL, 0, & my_charset_utf8_bin),
m_subclass_origin((const char*) NULL, 0, & my_charset_utf8_bin),
@@ -185,21 +185,20 @@ MYSQL_ERROR::MYSQL_ERROR()
m_cursor_name((const char*) NULL, 0, & my_charset_utf8_bin),
m_message_text(),
m_sql_errno(0),
- m_handled(0),
- m_level(MYSQL_ERROR::WARN_LEVEL_ERROR),
+ m_level(Sql_condition::WARN_LEVEL_ERROR),
m_mem_root(NULL)
{
memset(m_returned_sqlstate, 0, sizeof(m_returned_sqlstate));
}
-void MYSQL_ERROR::init(MEM_ROOT *mem_root)
+void Sql_condition::init(MEM_ROOT *mem_root)
{
DBUG_ASSERT(mem_root != NULL);
DBUG_ASSERT(m_mem_root == NULL);
m_mem_root= mem_root;
}
-void MYSQL_ERROR::clear()
+void Sql_condition::clear()
{
m_class_origin.length(0);
m_subclass_origin.length(0);
@@ -213,11 +212,10 @@ void MYSQL_ERROR::clear()
m_cursor_name.length(0);
m_message_text.length(0);
m_sql_errno= 0;
- m_handled= 0;
- m_level= MYSQL_ERROR::WARN_LEVEL_ERROR;
+ m_level= Sql_condition::WARN_LEVEL_ERROR;
}
-MYSQL_ERROR::MYSQL_ERROR(MEM_ROOT *mem_root)
+Sql_condition::Sql_condition(MEM_ROOT *mem_root)
: Sql_alloc(),
m_class_origin((const char*) NULL, 0, & my_charset_utf8_bin),
m_subclass_origin((const char*) NULL, 0, & my_charset_utf8_bin),
@@ -231,8 +229,7 @@ MYSQL_ERROR::MYSQL_ERROR(MEM_ROOT *mem_root)
m_cursor_name((const char*) NULL, 0, & my_charset_utf8_bin),
m_message_text(),
m_sql_errno(0),
- m_handled(0),
- m_level(MYSQL_ERROR::WARN_LEVEL_ERROR),
+ m_level(Sql_condition::WARN_LEVEL_ERROR),
m_mem_root(mem_root)
{
DBUG_ASSERT(mem_root != NULL);
@@ -257,7 +254,7 @@ static void copy_string(MEM_ROOT *mem_root, String* dst, const String* src)
}
void
-MYSQL_ERROR::copy_opt_attributes(const MYSQL_ERROR *cond)
+Sql_condition::copy_opt_attributes(const Sql_condition *cond)
{
DBUG_ASSERT(this != cond);
copy_string(m_mem_root, & m_class_origin, & cond->m_class_origin);
@@ -270,12 +267,11 @@ MYSQL_ERROR::copy_opt_attributes(const MYSQL_ERROR *cond)
copy_string(m_mem_root, & m_table_name, & cond->m_table_name);
copy_string(m_mem_root, & m_column_name, & cond->m_column_name);
copy_string(m_mem_root, & m_cursor_name, & cond->m_cursor_name);
- m_handled= cond->m_handled;
}
void
-MYSQL_ERROR::set(uint sql_errno, const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level, const char* msg)
+Sql_condition::set(uint sql_errno, const char* sqlstate,
+ Sql_condition::enum_warning_level level, const char* msg)
{
DBUG_ASSERT(sql_errno != 0);
DBUG_ASSERT(sqlstate != NULL);
@@ -290,11 +286,11 @@ MYSQL_ERROR::set(uint sql_errno, const char* sqlstate,
}
void
-MYSQL_ERROR::set_builtin_message_text(const char* str)
+Sql_condition::set_builtin_message_text(const char* str)
{
/*
See the comments
- "Design notes about MYSQL_ERROR::m_message_text."
+ "Design notes about Sql_condition::m_message_text."
*/
const char* copy;
@@ -304,24 +300,42 @@ MYSQL_ERROR::set_builtin_message_text(const char* str)
}
const char*
-MYSQL_ERROR::get_message_text() const
+Sql_condition::get_message_text() const
{
return m_message_text.ptr();
}
int
-MYSQL_ERROR::get_message_octet_length() const
+Sql_condition::get_message_octet_length() const
{
return m_message_text.length();
}
void
-MYSQL_ERROR::set_sqlstate(const char* sqlstate)
+Sql_condition::set_sqlstate(const char* sqlstate)
{
memcpy(m_returned_sqlstate, sqlstate, SQLSTATE_LENGTH);
m_returned_sqlstate[SQLSTATE_LENGTH]= '\0';
}
+Diagnostics_area::Diagnostics_area(bool initialize)
+ : m_main_wi(0, false, initialize)
+{
+ push_warning_info(&m_main_wi);
+
+ reset_diagnostics_area();
+}
+
+Diagnostics_area::Diagnostics_area(ulonglong warning_info_id,
+ bool allow_unlimited_warnings,
+ bool initialize)
+ : m_main_wi(warning_info_id, allow_unlimited_warnings, initialize)
+{
+ push_warning_info(&m_main_wi);
+
+ reset_diagnostics_area();
+}
+
/**
Clear this diagnostics area.
@@ -333,7 +347,7 @@ Diagnostics_area::reset_diagnostics_area()
{
DBUG_ENTER("reset_diagnostics_area");
#ifdef DBUG_OFF
- can_overwrite_status= FALSE;
+ m_can_overwrite_status= FALSE;
/** Don't take chances in production */
m_message[0]= '\0';
m_sql_errno= 0;
@@ -341,7 +355,8 @@ Diagnostics_area::reset_diagnostics_area()
m_last_insert_id= 0;
m_statement_warn_count= 0;
#endif
- is_sent= FALSE;
+ get_warning_info()->clear_error_condition();
+ set_is_sent(false);
/** Tiny reset in debug mode to see garbage right away */
m_status= DA_EMPTY;
DBUG_VOID_RETURN;
@@ -354,9 +369,9 @@ Diagnostics_area::reset_diagnostics_area()
*/
void
-Diagnostics_area::set_ok_status(THD *thd, ulonglong affected_rows_arg,
- ulonglong last_insert_id_arg,
- const char *message_arg)
+Diagnostics_area::set_ok_status(ulonglong affected_rows,
+ ulonglong last_insert_id,
+ const char *message)
{
DBUG_ENTER("set_ok_status");
DBUG_ASSERT(! is_set());
@@ -367,11 +382,11 @@ Diagnostics_area::set_ok_status(THD *thd, ulonglong affected_rows_arg,
if (is_error() || is_disabled())
return;
- m_statement_warn_count= thd->warning_info->statement_warn_count();
- m_affected_rows= affected_rows_arg;
- m_last_insert_id= last_insert_id_arg;
- if (message_arg)
- strmake_buf(m_message, message_arg);
+ m_statement_warn_count= current_statement_warn_count();
+ m_affected_rows= affected_rows;
+ m_last_insert_id= last_insert_id;
+ if (message)
+ strmake_buf(m_message, message);
else
m_message[0]= '\0';
m_status= DA_OK;
@@ -402,20 +417,51 @@ Diagnostics_area::set_eof_status(THD *thd)
anyway.
*/
m_statement_warn_count= (thd->spcont ?
- 0 : thd->warning_info->statement_warn_count());
+ 0 :
+ current_statement_warn_count());
m_status= DA_EOF;
DBUG_VOID_RETURN;
}
/**
- Set ERROR status.
+ Set ERROR status in the Diagnostics Area. This function should be used to
+ report fatal errors (such as out-of-memory errors) when no further
+ processing is possible.
+
+ @param sql_errno SQL-condition error number
*/
void
-Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg,
- const char *message_arg,
- const char *sqlstate)
+Diagnostics_area::set_error_status(uint sql_errno)
+{
+ set_error_status(sql_errno,
+ ER(sql_errno),
+ mysql_errno_to_sqlstate(sql_errno),
+ NULL);
+}
+
+
+/**
+ Set ERROR status in the Diagnostics Area.
+
+ @note error_condition may be NULL. It happens if a) OOM error is being
+ reported; or b) when Warning_info is full.
+
+ @param sql_errno SQL-condition error number
+ @param message SQL-condition message
+ @param sqlstate SQL-condition state
+ @param error_condition SQL-condition object representing the error state
+
+ @note Note, that error_condition may be NULL. It happens if a) OOM error is
+ being reported; or b) when Warning_info is full.
+*/
+
+void
+Diagnostics_area::set_error_status(uint sql_errno,
+ const char *message,
+ const char *sqlstate,
+ const Sql_condition *error_condition)
{
DBUG_ENTER("set_error_status");
/*
@@ -423,7 +469,14 @@ Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg,
The only exception is when we flush the message to the client,
an error can happen during the flush.
*/
- DBUG_ASSERT(! is_set() || can_overwrite_status);
+ DBUG_ASSERT(! is_set() || m_can_overwrite_status);
+
+ // message must be set properly by the caller.
+ DBUG_ASSERT(message);
+
+ // sqlstate must be set properly by the caller.
+ DBUG_ASSERT(sqlstate);
+
#ifdef DBUG_OFF
/*
In production, refuse to overwrite a custom response with an
@@ -433,19 +486,17 @@ Diagnostics_area::set_error_status(THD *thd, uint sql_errno_arg,
return;
#endif
- if (sqlstate == NULL)
- sqlstate= mysql_errno_to_sqlstate(sql_errno_arg);
-
- m_sql_errno= sql_errno_arg;
+ m_sql_errno= sql_errno;
memcpy(m_sqlstate, sqlstate, SQLSTATE_LENGTH);
m_sqlstate[SQLSTATE_LENGTH]= '\0';
- strmake_buf(m_message, message_arg);
+ strmake_buf(m_message, message);
+
+ get_warning_info()->set_error_condition(error_condition);
m_status= DA_ERROR;
DBUG_VOID_RETURN;
}
-
/**
Mark the diagnostics area as 'DISABLED'.
@@ -463,15 +514,16 @@ Diagnostics_area::disable_status()
Warning_info::Warning_info(ulonglong warn_id_arg,
bool allow_unlimited_warnings, bool initialize)
- :m_statement_warn_count(0),
+ :m_current_statement_warn_count(0),
m_current_row_for_warning(1),
m_warn_id(warn_id_arg),
+ m_error_condition(NULL),
m_allow_unlimited_warnings(allow_unlimited_warnings),
initialized(0),
m_read_only(FALSE)
{
m_warn_list.empty();
- bzero((char*) m_warn_count, sizeof(m_warn_count));
+ memset(m_warn_count, 0, sizeof(m_warn_count));
if (initialize)
init();
}
@@ -479,6 +531,7 @@ Warning_info::Warning_info(ulonglong warn_id_arg,
void Warning_info::init()
{
/* Initialize sub structures */
+ DBUG_ASSERT(initialized == 0);
init_sql_alloc(&m_warn_root, WARN_ALLOC_BLOCK_SIZE,
WARN_ALLOC_PREALLOC_SIZE, MYF(MY_THREAD_SPECIFIC));
initialized= 1;
@@ -496,92 +549,164 @@ Warning_info::~Warning_info()
}
-/**
- Reset the warning information of this connection.
-*/
+bool Warning_info::has_sql_condition(const char *message_str,
+ ulong message_length) const
+{
+ Diagnostics_area::Sql_condition_iterator it(m_warn_list);
+ const Sql_condition *err;
-void Warning_info::clear_warning_info(ulonglong warn_id_arg)
+ while ((err= it++))
+ {
+ if (strncmp(message_str, err->get_message_text(), message_length) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+
+void Warning_info::clear(ulonglong new_id)
{
- m_warn_id= warn_id_arg;
- free_memory();
- bzero((char*) m_warn_count, sizeof(m_warn_count));
+ id(new_id);
m_warn_list.empty();
- m_statement_warn_count= 0;
+ m_marked_sql_conditions.empty();
+ free_memory();
+ memset(m_warn_count, 0, sizeof(m_warn_count));
+ m_current_statement_warn_count= 0;
m_current_row_for_warning= 1; /* Start counting from the first row */
+ clear_error_condition();
}
-/**
- Append warnings only if the original contents of the routine
- warning info was replaced.
-*/
-void Warning_info::merge_with_routine_info(THD *thd, Warning_info *source)
+void Warning_info::append_warning_info(THD *thd, const Warning_info *source)
{
- /*
- If a routine body is empty or if a routine did not
- generate any warnings (thus m_warn_id didn't change),
- do not duplicate our own contents by appending the
- contents of the called routine. We know that the called
- routine did not change its warning info.
-
- On the other hand, if the routine body is not empty and
- some statement in the routine generates a warning or
- uses tables, m_warn_id is guaranteed to have changed.
- In this case we know that the routine warning info
- contains only new warnings, and thus we perform a copy.
- */
- if (m_warn_id != source->m_warn_id)
+ const Sql_condition *err;
+ Diagnostics_area::Sql_condition_iterator it(source->m_warn_list);
+ const Sql_condition *src_error_condition = source->get_error_condition();
+
+ while ((err= it++))
{
- /*
- If the invocation of the routine was a standalone statement,
- rather than a sub-statement, in other words, if it's a CALL
- of a procedure, rather than invocation of a function or a
- trigger, we need to clear the current contents of the caller's
- warning info.
-
- This is per MySQL rules: if a statement generates a warning,
- warnings from the previous statement are flushed. Normally
- it's done in push_warning(). However, here we don't use
- push_warning() to avoid invocation of condition handlers or
- escalation of warnings to errors.
- */
- opt_clear_warning_info(thd->query_id);
- append_warning_info(thd, source);
+ // Do not use ::push_warning() to avoid invocation of THD-internal-handlers.
+ Sql_condition *new_error= Warning_info::push_warning(thd, err);
+
+ if (src_error_condition && src_error_condition == err)
+ set_error_condition(new_error);
+
+ if (source->is_marked_for_removal(err))
+ mark_condition_for_removal(new_error);
}
}
+
/**
- Add a warning to the list of warnings. Increment the respective
- counters.
+ Copy Sql_conditions that are not WARN_LEVEL_ERROR from the source
+ Warning_info to the current Warning_info.
+
+ @param thd Thread context.
+ @param sp_wi Stored-program Warning_info
+ @param thd Thread context.
+ @param src_wi Warning_info to copy from.
*/
-MYSQL_ERROR *Warning_info::push_warning(THD *thd,
- uint sql_errno, const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
- const char *msg)
+void Diagnostics_area::copy_non_errors_from_wi(THD *thd,
+ const Warning_info *src_wi)
{
- MYSQL_ERROR *cond= NULL;
+ Sql_condition_iterator it(src_wi->m_warn_list);
+ const Sql_condition *cond;
+ Warning_info *wi= get_warning_info();
+
+ while ((cond= it++))
+ {
+ if (cond->get_level() == Sql_condition::WARN_LEVEL_ERROR)
+ continue;
+
+ Sql_condition *new_condition= wi->push_warning(thd, cond);
+
+ if (src_wi->is_marked_for_removal(cond))
+ wi->mark_condition_for_removal(new_condition);
+ }
+}
+
+
+void Warning_info::mark_sql_conditions_for_removal()
+{
+ Sql_condition_list::Iterator it(m_warn_list);
+ Sql_condition *cond;
+
+ while ((cond= it++))
+ mark_condition_for_removal(cond);
+}
+
+
+void Warning_info::remove_marked_sql_conditions()
+{
+ List_iterator_fast<Sql_condition> it(m_marked_sql_conditions);
+ Sql_condition *cond;
+
+ while ((cond= it++))
+ {
+ m_warn_list.remove(cond);
+ m_warn_count[cond->get_level()]--;
+ m_current_statement_warn_count--;
+ if (cond == m_error_condition)
+ m_error_condition= NULL;
+ }
+
+ m_marked_sql_conditions.empty();
+}
+
+
+bool Warning_info::is_marked_for_removal(const Sql_condition *cond) const
+{
+ List_iterator_fast<Sql_condition> it(
+ const_cast<List<Sql_condition>&> (m_marked_sql_conditions));
+ Sql_condition *c;
+
+ while ((c= it++))
+ {
+ if (c == cond)
+ return true;
+ }
+
+ return false;
+}
+
+
+void Warning_info::reserve_space(THD *thd, uint count)
+{
+ while (m_warn_list.elements() &&
+ (m_warn_list.elements() + count) > thd->variables.max_error_count)
+ m_warn_list.remove(m_warn_list.front());
+}
+
+Sql_condition *Warning_info::push_warning(THD *thd,
+ uint sql_errno, const char* sqlstate,
+ Sql_condition::enum_warning_level level,
+ const char *msg)
+{
+ Sql_condition *cond= NULL;
if (! m_read_only)
{
if (m_allow_unlimited_warnings ||
- m_warn_list.elements < thd->variables.max_error_count)
+ m_warn_list.elements() < thd->variables.max_error_count)
{
- cond= new (& m_warn_root) MYSQL_ERROR(& m_warn_root);
+ cond= new (& m_warn_root) Sql_condition(& m_warn_root);
if (cond)
{
cond->set(sql_errno, sqlstate, level, msg);
- m_warn_list.push_back(cond, &m_warn_root);
+ m_warn_list.push_back(cond);
}
}
m_warn_count[(uint) level]++;
}
- m_statement_warn_count++;
+ m_current_statement_warn_count++;
return cond;
}
-MYSQL_ERROR *Warning_info::push_warning(THD *thd, const MYSQL_ERROR *sql_condition)
+
+Sql_condition *Warning_info::push_warning(THD *thd, const Sql_condition *sql_condition)
{
- MYSQL_ERROR *new_condition= push_warning(thd,
+ Sql_condition *new_condition= push_warning(thd,
sql_condition->get_sql_errno(),
sql_condition->get_sqlstate(),
sql_condition->get_level(),
@@ -604,7 +729,7 @@ MYSQL_ERROR *Warning_info::push_warning(THD *thd, const MYSQL_ERROR *sql_conditi
msg Clear error message
*/
-void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
+void push_warning(THD *thd, Sql_condition::enum_warning_level level,
uint code, const char *msg)
{
DBUG_ENTER("push_warning");
@@ -615,15 +740,15 @@ void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
WARN_LEVEL_ERROR *is* a bug. Either use my_printf_error(),
my_error(), or WARN_LEVEL_WARN.
*/
- DBUG_ASSERT(level != MYSQL_ERROR::WARN_LEVEL_ERROR);
+ DBUG_ASSERT(level != Sql_condition::WARN_LEVEL_ERROR);
- if (level == MYSQL_ERROR::WARN_LEVEL_ERROR)
- level= MYSQL_ERROR::WARN_LEVEL_WARN;
+ if (level == Sql_condition::WARN_LEVEL_ERROR)
+ level= Sql_condition::WARN_LEVEL_WARN;
(void) thd->raise_condition(code, NULL, level, msg);
/* Make sure we also count warnings pushed after calling set_ok_status(). */
- thd->stmt_da->increment_warning();
+ thd->get_stmt_da()->increment_warning();
DBUG_VOID_RETURN;
}
@@ -640,7 +765,7 @@ void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
msg Clear error message
*/
-void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level,
+void push_warning_printf(THD *thd, Sql_condition::enum_warning_level level,
uint code, const char *format, ...)
{
va_list args;
@@ -689,7 +814,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
List<Item> field_list;
DBUG_ENTER("mysqld_show_warnings");
- DBUG_ASSERT(thd->warning_info->is_read_only());
+ DBUG_ASSERT(thd->get_stmt_da()->is_warning_info_read_only());
field_list.push_back(new Item_empty_string("Level", 7));
field_list.push_back(new Item_return_int("Code",4, MYSQL_TYPE_LONG));
@@ -699,7 +824,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
- MYSQL_ERROR *err;
+ const Sql_condition *err;
SELECT_LEX *sel= &thd->lex->select_lex;
SELECT_LEX_UNIT *unit= &thd->lex->unit;
ulonglong idx= 0;
@@ -707,7 +832,8 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
unit->set_limit(sel);
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
+ Diagnostics_area::Sql_condition_iterator it=
+ thd->get_stmt_da()->sql_conditions();
while ((err= it++))
{
/* Skip levels that the user is not interested in */
@@ -730,7 +856,7 @@ bool mysqld_show_warnings(THD *thd, ulong levels_to_show)
}
my_eof(thd);
- thd->warning_info->set_read_only(FALSE);
+ thd->get_stmt_da()->set_warning_info_read_only(FALSE);
DBUG_RETURN(FALSE);
}
@@ -838,7 +964,7 @@ uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs,
if (!to_cs || from_cs == to_cs || to_cs == &my_charset_bin)
{
- length= min(to_length, from_length);
+ length= MY_MIN(to_length, from_length);
memmove(to, from, length);
to[length]= 0;
return length;
@@ -880,3 +1006,32 @@ uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs,
*errors= error_count;
return (uint32) (to - to_start);
}
+
+
+/**
+ Sanity check for SQLSTATEs. The function does not check if it's really an
+ existing SQL-state (there are just too many), it just checks string length and
+ looks for bad characters.
+
+ @param sqlstate the condition SQLSTATE.
+
+ @retval true if it's ok.
+ @retval false if it's bad.
+*/
+
+bool is_sqlstate_valid(const LEX_STRING *sqlstate)
+{
+ if (sqlstate->length != 5)
+ return false;
+
+ for (int i= 0 ; i < 5 ; ++i)
+ {
+ char c = sqlstate->str[i];
+
+ if ((c < '0' || '9' < c) &&
+ (c < 'A' || 'Z' < c))
+ return false;
+ }
+
+ return true;
+}
diff --git a/sql/sql_error.h b/sql/sql_error.h
index cd1b92a2bcc..0a75d7a392d 100644
--- a/sql/sql_error.h
+++ b/sql/sql_error.h
@@ -19,126 +19,13 @@
#include "sql_list.h" /* Sql_alloc, MEM_ROOT */
#include "m_string.h" /* LEX_STRING */
#include "sql_string.h" /* String */
+#include "sql_plist.h" /* I_P_List */
#include "mysql_com.h" /* MYSQL_ERRMSG_SIZE */
#include "my_time.h" /* MYSQL_TIME */
#include "decimal.h"
class THD;
-
-/**
- Stores status of the currently executed statement.
- Cleared at the beginning of the statement, and then
- can hold either OK, ERROR, or EOF status.
- Can not be assigned twice per statement.
-*/
-
-class Diagnostics_area
-{
-public:
- enum enum_diagnostics_status
- {
- /** The area is cleared at start of a statement. */
- DA_EMPTY= 0,
- /** Set whenever one calls my_ok(). */
- DA_OK,
- /** Set whenever one calls my_eof(). */
- DA_EOF,
- /** Set whenever one calls my_error() or my_message(). */
- DA_ERROR,
- /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */
- DA_DISABLED
- };
- /** True if status information is sent to the client. */
- bool is_sent;
- /** Set to make set_error_status after set_{ok,eof}_status possible. */
- bool can_overwrite_status;
-
- void set_ok_status(THD *thd, ulonglong affected_rows_arg,
- ulonglong last_insert_id_arg,
- const char *message);
- void set_eof_status(THD *thd);
- void set_error_status(THD *thd, uint sql_errno_arg, const char *message_arg,
- const char *sqlstate);
-
- void disable_status();
-
- void reset_diagnostics_area();
-
- bool is_set() const { return m_status != DA_EMPTY; }
- bool is_error() const { return m_status == DA_ERROR; }
- bool is_eof() const { return m_status == DA_EOF; }
- bool is_ok() const { return m_status == DA_OK; }
- bool is_disabled() const { return m_status == DA_DISABLED; }
- enum_diagnostics_status status() const { return m_status; }
-
- const char *message() const
- { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; }
-
- uint sql_errno() const
- { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; }
-
- const char* get_sqlstate() const
- { DBUG_ASSERT(m_status == DA_ERROR); return m_sqlstate; }
-
- ulonglong affected_rows() const
- { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; }
-
- ulonglong last_insert_id() const
- { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; }
-
- uint statement_warn_count() const
- {
- DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF);
- return m_statement_warn_count;
- }
-
- /* Used to count any warnings pushed after calling set_ok_status(). */
- void increment_warning()
- {
- if (m_status != DA_EMPTY)
- m_statement_warn_count++;
- }
-
- Diagnostics_area() { reset_diagnostics_area(); }
-
-private:
- /** Message buffer. Can be used by OK or ERROR status. */
- char m_message[MYSQL_ERRMSG_SIZE];
- /**
- SQL error number. One of ER_ codes from share/errmsg.txt.
- Set by set_error_status.
- */
- uint m_sql_errno;
-
- char m_sqlstate[SQLSTATE_LENGTH+1];
-
- /**
- The number of rows affected by the last statement. This is
- semantically close to thd->row_count_func, but has a different
- life cycle. thd->row_count_func stores the value returned by
- function ROW_COUNT() and is cleared only by statements that
- update its value, such as INSERT, UPDATE, DELETE and few others.
- This member is cleared at the beginning of the next statement.
-
- We could possibly merge the two, but life cycle of thd->row_count_func
- can not be changed.
- */
- ulonglong m_affected_rows;
- /**
- Similarly to the previous member, this is a replacement of
- thd->first_successful_insert_id_in_prev_stmt, which is used
- to implement LAST_INSERT_ID().
- */
- ulonglong m_last_insert_id;
- /**
- Number of warnings of this last statement. May differ from
- the number of warnings returned by SHOW WARNINGS e.g. in case
- the statement doesn't clear the warnings, and doesn't generate
- them.
- */
- uint m_statement_warn_count;
- enum_diagnostics_status m_status;
-};
+class my_decimal;
///////////////////////////////////////////////////////////////////////////
@@ -146,10 +33,8 @@ private:
Representation of a SQL condition.
A SQL condition can be a completion condition (note, warning),
or an exception condition (error, not found).
- @note This class is named MYSQL_ERROR instead of SQL_condition for
- historical reasons, to facilitate merging code with previous releases.
*/
-class MYSQL_ERROR : public Sql_alloc
+class Sql_condition : public Sql_alloc
{
public:
/*
@@ -160,6 +45,7 @@ public:
*/
enum enum_warning_level
{ WARN_LEVEL_NOTE, WARN_LEVEL_WARN, WARN_LEVEL_ERROR, WARN_LEVEL_END};
+
/**
Get the MESSAGE_TEXT of this condition.
@return the message text.
@@ -190,30 +76,15 @@ public:
Get the error level of this condition.
@return the error level condition item.
*/
- MYSQL_ERROR::enum_warning_level get_level() const
+ Sql_condition::enum_warning_level get_level() const
{ return m_level; }
- /** Destructor. */
- ~MYSQL_ERROR()
- {}
-
- /** check if condition was handled by a condition handler */
- bool handled() const
- {
- return m_handled;
- }
- /** mark that condition was handled */
- void mark_handled()
- {
- m_handled= 1;
- }
-
private:
/*
- The interface of MYSQL_ERROR is mostly private, by design,
+ The interface of Sql_condition is mostly private, by design,
so that only the following code:
- various raise_error() or raise_warning() methods in class THD,
- - the implementation of SIGNAL / RESIGNAL
+ - the implementation of SIGNAL / RESIGNAL / GET DIAGNOSTICS
- catch / re-throw of SQL conditions in stored procedures (sp_rcontext)
is allowed to create / modify a SQL condition.
Enforcing this policy prevents confusion, since the only public
@@ -223,20 +94,21 @@ private:
*/
friend class THD;
friend class Warning_info;
- friend class Signal_common;
- friend class Signal_statement;
- friend class Resignal_statement;
+ friend class Sql_cmd_common_signal;
+ friend class Sql_cmd_signal;
+ friend class Sql_cmd_resignal;
friend class sp_rcontext;
+ friend class Condition_information_item;
/**
Default constructor.
This constructor is usefull when allocating arrays.
- Note that the init() method should be called to complete the MYSQL_ERROR.
+ Note that the init() method should be called to complete the Sql_condition.
*/
- MYSQL_ERROR();
+ Sql_condition();
/**
- Complete the MYSQL_ERROR initialisation.
+ Complete the Sql_condition initialisation.
@param mem_root The memory root to use for the condition items
of this condition
*/
@@ -247,15 +119,17 @@ private:
@param mem_root The memory root to use for the condition items
of this condition
*/
- MYSQL_ERROR(MEM_ROOT *mem_root);
-
+ Sql_condition(MEM_ROOT *mem_root);
+ /** Destructor. */
+ ~Sql_condition()
+ {}
/**
Copy optional condition items attributes.
@param cond the condition to copy.
*/
- void copy_opt_attributes(const MYSQL_ERROR *cond);
+ void copy_opt_attributes(const Sql_condition *cond);
/**
Set this condition area with a fixed message text.
@@ -266,7 +140,7 @@ private:
@param MyFlags additional flags.
*/
void set(uint sql_errno, const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg);
/**
@@ -279,6 +153,12 @@ private:
/** Set the SQLSTATE of this condition. */
void set_sqlstate(const char* sqlstate);
+ /** Set the CLASS_ORIGIN of this condition. */
+ void set_class_origin();
+
+ /** Set the SUBCLASS_ORIGIN of this condition. */
+ void set_subclass_origin();
+
/**
Clear this SQL condition.
*/
@@ -321,9 +201,6 @@ private:
/** MySQL extension, MYSQL_ERRNO condition item. */
uint m_sql_errno;
- /** Marker if error/warning was handled by a continue handler */
- bool m_handled;
-
/**
SQL RETURNED_SQLSTATE condition item.
This member is always NUL terminated.
@@ -331,44 +208,48 @@ private:
char m_returned_sqlstate[SQLSTATE_LENGTH+1];
/** Severity (error, warning, note) of this condition. */
- MYSQL_ERROR::enum_warning_level m_level;
+ Sql_condition::enum_warning_level m_level;
+
+ /** Pointers for participating in the list of conditions. */
+ Sql_condition *next_in_wi;
+ Sql_condition **prev_in_wi;
/** Memory root to use to hold condition item values. */
MEM_ROOT *m_mem_root;
};
-class Sql_condition : public MYSQL_ERROR
-{
- /*
- Wrapper class to allow one to use Sql_condition in handlers instead of
- MYSQL_ERROR
- */
-};
-
///////////////////////////////////////////////////////////////////////////
/**
Information about warnings of the current connection.
*/
-
class Warning_info
{
+ /** The type of the counted and doubly linked list of conditions. */
+ typedef I_P_List<Sql_condition,
+ I_P_List_adapter<Sql_condition,
+ &Sql_condition::next_in_wi,
+ &Sql_condition::prev_in_wi>,
+ I_P_List_counter,
+ I_P_List_fast_push_back<Sql_condition> >
+ Sql_condition_list;
+
/** A memory root to allocate warnings and errors */
MEM_ROOT m_warn_root;
/** List of warnings of all severities (levels). */
- List <MYSQL_ERROR> m_warn_list;
+ Sql_condition_list m_warn_list;
/** A break down of the number of warnings per severity (level). */
- uint m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_END];
+ uint m_warn_count[(uint) Sql_condition::WARN_LEVEL_END];
/**
The number of warnings of the current statement. Warning_info
life cycle differs from statement life cycle -- it may span
multiple statements. In that case we get
- m_statement_warn_count 0, whereas m_warn_list is not empty.
+ m_current_statement_warn_count 0, whereas m_warn_list is not empty.
*/
- uint m_statement_warn_count;
+ uint m_current_statement_warn_count;
/*
Row counter, to print in errors and warnings. Not increased in
@@ -379,29 +260,67 @@ class Warning_info
/** Used to optionally clear warnings only once per statement. */
ulonglong m_warn_id;
+ /**
+ A pointer to an element of m_warn_list. It determines SQL-condition
+ instance which corresponds to the error state in Diagnostics_area.
+
+ This is needed for properly processing SQL-conditions in SQL-handlers.
+ When an SQL-handler is found for the current error state in Diagnostics_area,
+ this pointer is needed to remove the corresponding SQL-condition from the
+ Warning_info list.
+
+ @note m_error_condition might be NULL in the following cases:
+ - Diagnostics_area set to fatal error state (like OOM);
+ - Max number of Warning_info elements has been reached (thus, there is
+ no corresponding SQL-condition object in Warning_info).
+ */
+ const Sql_condition *m_error_condition;
+
/** Indicates if push_warning() allows unlimited number of warnings. */
bool m_allow_unlimited_warnings;
bool initialized; /* Set to 1 if init() has been called */
-private:
- Warning_info(const Warning_info &rhs); /* Not implemented */
- Warning_info& operator=(const Warning_info &rhs); /* Not implemented */
-public:
+ /** Read only status. */
+ bool m_read_only;
+
+ /** Pointers for participating in the stack of Warning_info objects. */
+ Warning_info *m_next_in_da;
+ Warning_info **m_prev_in_da;
+ List<Sql_condition> m_marked_sql_conditions;
+
+public:
Warning_info(ulonglong warn_id_arg, bool allow_unlimited_warnings,
- bool initialize=true);
+ bool initialized);
~Warning_info();
-
/* Allocate memory for structures */
void init();
void free_memory();
+private:
+ Warning_info(const Warning_info &rhs); /* Not implemented */
+ Warning_info& operator=(const Warning_info &rhs); /* Not implemented */
+
+ /**
+ Checks if Warning_info contains SQL-condition with the given message.
+
+ @param message_str Message string.
+ @param message_length Length of message string.
+
+ @return true if the Warning_info contains an SQL-condition with the given
+ message.
+ */
+ bool has_sql_condition(const char *message_str, ulong message_length) const;
+
/**
Reset the warning information. Clear all warnings,
the number of warnings, reset current row counter
to point to the first row.
+
+ @param new_id new Warning_info id.
*/
- void clear_warning_info(ulonglong warn_id_arg);
+ void clear(ulonglong new_id);
+
/**
Only clear warning info if haven't yet done that already
for the current query. Allows to be issued at any time
@@ -410,46 +329,72 @@ public:
@todo: This is a sign of sloppy coding. Instead we need to
designate one place in a statement life cycle where we call
- clear_warning_info().
+ Warning_info::clear().
+
+ @param query_id Current query id.
*/
- void opt_clear_warning_info(ulonglong query_id)
+ void opt_clear(ulonglong query_id)
{
if (query_id != m_warn_id)
- clear_warning_info(query_id);
- }
-
- void append_warning_info(THD *thd, Warning_info *source)
- {
- append_warnings(thd, & source->warn_list());
+ clear(query_id);
}
/**
Concatenate the list of warnings.
- It's considered tolerable to lose a warning.
- */
- void append_warnings(THD *thd, List<MYSQL_ERROR> *src)
- {
- MYSQL_ERROR *err;
- List_iterator_fast<MYSQL_ERROR> it(*src);
- /*
- Don't use ::push_warning() to avoid invocation of condition
- handlers or escalation of warnings to errors.
- */
- while ((err= it++))
- Warning_info::push_warning(thd, err);
- }
- /**
- Conditional merge of related warning information areas.
+ It's considered tolerable to lose an SQL-condition in case of OOM-error,
+ or if the number of SQL-conditions in the Warning_info reached top limit.
+
+ @param thd Thread context.
+ @param source Warning_info object to copy SQL-conditions from.
*/
- void merge_with_routine_info(THD *thd, Warning_info *source);
+ void append_warning_info(THD *thd, const Warning_info *source);
/**
Reset between two COM_ commands. Warnings are preserved
between commands, but statement_warn_count indicates
the number of warnings of this particular statement only.
*/
- void reset_for_next_command() { m_statement_warn_count= 0; }
+ void reset_for_next_command()
+ { m_current_statement_warn_count= 0; }
+
+ /**
+ Mark active SQL-conditions for later removal.
+ This is done to simulate stacked DAs for HANDLER statements.
+ */
+ void mark_sql_conditions_for_removal();
+
+ /**
+ Unmark SQL-conditions, which were marked for later removal.
+ This is done to simulate stacked DAs for HANDLER statements.
+ */
+ void unmark_sql_conditions_from_removal()
+ { m_marked_sql_conditions.empty(); }
+
+ /**
+ Remove SQL-conditions that are marked for deletion.
+ This is done to simulate stacked DAs for HANDLER statements.
+ */
+ void remove_marked_sql_conditions();
+
+ /**
+ Check if the given SQL-condition is marked for removal in this Warning_info
+ instance.
+
+ @param cond the SQL-condition.
+
+ @retval true if the given SQL-condition is marked for removal in this
+ Warning_info instance.
+ @retval false otherwise.
+ */
+ bool is_marked_for_removal(const Sql_condition *cond) const;
+
+ /**
+ Mark a single SQL-condition for removal (add the given SQL-condition to the
+ removal list of this Warning_info instance).
+ */
+ void mark_condition_for_removal(Sql_condition *cond)
+ { m_marked_sql_conditions.push_back(cond, &m_warn_root); }
/**
Used for @@warning_count system variable, which prints
@@ -458,52 +403,82 @@ public:
ulong warn_count() const
{
/*
- This may be higher than warn_list.elements if we have
+ This may be higher than warn_list.elements() if we have
had more warnings than thd->variables.max_error_count.
*/
- return (m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_NOTE] +
- m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR] +
- m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_WARN]);
+ return (m_warn_count[(uint) Sql_condition::WARN_LEVEL_NOTE] +
+ m_warn_count[(uint) Sql_condition::WARN_LEVEL_ERROR] +
+ m_warn_count[(uint) Sql_condition::WARN_LEVEL_WARN]);
}
/**
- This is for iteration purposes. We return a non-constant reference
- since List doesn't have constant iterators.
- */
- List<MYSQL_ERROR> &warn_list() { return m_warn_list; }
-
- /**
The number of errors, or number of rows returned by SHOW ERRORS,
also the value of session variable @@error_count.
*/
ulong error_count() const
+ { return m_warn_count[(uint) Sql_condition::WARN_LEVEL_ERROR]; }
+
+ /**
+ The number of conditions (errors, warnings and notes) in the list.
+ */
+ uint cond_count() const
{
- return m_warn_count[(uint) MYSQL_ERROR::WARN_LEVEL_ERROR];
+ return m_warn_list.elements();
}
/** Id of the warning information area. */
- ulonglong warn_id() const { return m_warn_id; }
+ ulonglong id() const { return m_warn_id; }
+
+ /** Set id of the warning information area. */
+ void id(ulonglong id) { m_warn_id= id; }
/** Do we have any errors and warnings that we can *show*? */
- bool is_empty() const { return m_warn_list.elements == 0; }
+ bool is_empty() const { return m_warn_list.is_empty(); }
/** Increment the current row counter to point at the next row. */
void inc_current_row_for_warning() { m_current_row_for_warning++; }
+
/** Reset the current row counter. Start counting from the first row. */
void reset_current_row_for_warning() { m_current_row_for_warning= 1; }
+
/** Return the current counter value. */
ulong current_row_for_warning() const { return m_current_row_for_warning; }
- ulong statement_warn_count() const { return m_statement_warn_count; }
+ /** Return the number of warnings thrown by the current statement. */
+ ulong current_statement_warn_count() const
+ { return m_current_statement_warn_count; }
+
+ /** Make sure there is room for the given number of conditions. */
+ void reserve_space(THD *thd, uint count);
+
+ /**
+ Add a new SQL-condition to the current list and increment the respective
+ counters.
+
+ @param thd Thread context.
+ @param sql_errno SQL-condition error number.
+ @param sqlstate SQL-condition state.
+ @param level SQL-condition level.
+ @param msg SQL-condition message.
+
+ @return a pointer to the added SQL-condition.
+ */
+ Sql_condition *push_warning(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ Sql_condition::enum_warning_level level,
+ const char* msg);
+
+ /**
+ Add a new SQL-condition to the current list and increment the respective
+ counters.
- /** Add a new condition to the current list. */
- MYSQL_ERROR *push_warning(THD *thd,
- uint sql_errno, const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
- const char* msg);
+ @param thd Thread context.
+ @param sql_condition SQL-condition to copy values from.
- /** Add a new condition to the current list. */
- MYSQL_ERROR *push_warning(THD *thd, const MYSQL_ERROR *sql_condition);
+ @return a pointer to the added SQL-condition.
+ */
+ Sql_condition *push_warning(THD *thd, const Sql_condition *sql_condition);
/**
Set the read only status for this statement area.
@@ -514,25 +489,51 @@ public:
- SHOW WARNINGS
- SHOW ERRORS
- GET DIAGNOSTICS
- @param read_only the read only property to set
+ @param read_only the read only property to set.
*/
void set_read_only(bool read_only)
{ m_read_only= read_only; }
/**
Read only status.
- @return the read only property
+ @return the read only property.
*/
bool is_read_only() const
{ return m_read_only; }
-private:
- /** Read only status. */
- bool m_read_only;
+ /**
+ @return SQL-condition, which corresponds to the error state in
+ Diagnostics_area.
- friend class Resignal_statement;
+ @see m_error_condition.
+ */
+ const Sql_condition *get_error_condition() const
+ { return m_error_condition; }
+
+ /**
+ Set SQL-condition, which corresponds to the error state in Diagnostics_area.
+
+ @see m_error_condition.
+ */
+ void set_error_condition(const Sql_condition *error_condition)
+ { m_error_condition= error_condition; }
+
+ /**
+ Reset SQL-condition, which corresponds to the error state in
+ Diagnostics_area.
+
+ @see m_error_condition.
+ */
+ void clear_error_condition()
+ { m_error_condition= NULL; }
+
+ // for:
+ // - m_next_in_da / m_prev_in_da
+ // - is_marked_for_removal()
+ friend class Diagnostics_area;
};
+
extern char *err_conv(char *buff, uint to_length, const char *from,
uint from_length, CHARSET_INFO *from_cs);
@@ -606,15 +607,353 @@ public:
}
};
-void push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
+///////////////////////////////////////////////////////////////////////////
+
+/**
+ Stores status of the currently executed statement.
+ Cleared at the beginning of the statement, and then
+ can hold either OK, ERROR, or EOF status.
+ Can not be assigned twice per statement.
+*/
+
+class Diagnostics_area
+{
+private:
+ /** The type of the counted and doubly linked list of conditions. */
+ typedef I_P_List<Warning_info,
+ I_P_List_adapter<Warning_info,
+ &Warning_info::m_next_in_da,
+ &Warning_info::m_prev_in_da>,
+ I_P_List_counter,
+ I_P_List_fast_push_back<Warning_info> >
+ Warning_info_list;
+
+public:
+ /** Const iterator used to iterate through the warning list. */
+ typedef Warning_info::Sql_condition_list::Const_Iterator
+ Sql_condition_iterator;
+
+ enum enum_diagnostics_status
+ {
+ /** The area is cleared at start of a statement. */
+ DA_EMPTY= 0,
+ /** Set whenever one calls my_ok(). */
+ DA_OK,
+ /** Set whenever one calls my_eof(). */
+ DA_EOF,
+ /** Set whenever one calls my_error() or my_message(). */
+ DA_ERROR,
+ /** Set in case of a custom response, such as one from COM_STMT_PREPARE. */
+ DA_DISABLED
+ };
+
+ void set_overwrite_status(bool can_overwrite_status)
+ { m_can_overwrite_status= can_overwrite_status; }
+
+ /** True if status information is sent to the client. */
+ bool is_sent() const { return m_is_sent; }
+
+ void set_is_sent(bool is_sent) { m_is_sent= is_sent; }
+
+ void set_ok_status(ulonglong affected_rows,
+ ulonglong last_insert_id,
+ const char *message);
+
+ void set_eof_status(THD *thd);
+
+ void set_error_status(uint sql_errno);
+
+ void set_error_status(uint sql_errno,
+ const char *message,
+ const char *sqlstate,
+ const Sql_condition *error_condition);
+
+ void disable_status();
+
+ void reset_diagnostics_area();
+
+ bool is_set() const { return m_status != DA_EMPTY; }
+
+ bool is_error() const { return m_status == DA_ERROR; }
+
+ bool is_eof() const { return m_status == DA_EOF; }
+
+ bool is_ok() const { return m_status == DA_OK; }
+
+ bool is_disabled() const { return m_status == DA_DISABLED; }
+
+ enum_diagnostics_status status() const { return m_status; }
+
+ const char *message() const
+ { DBUG_ASSERT(m_status == DA_ERROR || m_status == DA_OK); return m_message; }
+
+ uint sql_errno() const
+ { DBUG_ASSERT(m_status == DA_ERROR); return m_sql_errno; }
+
+ const char* get_sqlstate() const
+ { DBUG_ASSERT(m_status == DA_ERROR); return m_sqlstate; }
+
+ ulonglong affected_rows() const
+ { DBUG_ASSERT(m_status == DA_OK); return m_affected_rows; }
+
+ ulonglong last_insert_id() const
+ { DBUG_ASSERT(m_status == DA_OK); return m_last_insert_id; }
+
+ uint statement_warn_count() const
+ {
+ DBUG_ASSERT(m_status == DA_OK || m_status == DA_EOF);
+ return m_statement_warn_count;
+ }
+
+ /* Used to count any warnings pushed after calling set_ok_status(). */
+ void increment_warning()
+ {
+ if (m_status != DA_EMPTY)
+ m_statement_warn_count++;
+ }
+
+ Diagnostics_area(bool initialize);
+ Diagnostics_area(ulonglong warning_info_id, bool allow_unlimited_warnings,
+ bool initialize);
+ void init() { m_main_wi.init() ; }
+ void free_memory() { m_main_wi.free_memory() ; }
+
+ void push_warning_info(Warning_info *wi)
+ { m_wi_stack.push_front(wi); }
+
+ void pop_warning_info()
+ {
+ DBUG_ASSERT(m_wi_stack.elements() > 0);
+ m_wi_stack.remove(m_wi_stack.front());
+ }
+
+ void set_warning_info_id(ulonglong id)
+ { get_warning_info()->id(id); }
+
+ ulonglong warning_info_id() const
+ { return get_warning_info()->id(); }
+
+ /**
+ Compare given current warning info and current warning info
+ and see if they are different. They will be different if
+ warnings have been generated or statements that use tables
+ have been executed. This is checked by comparing m_warn_id.
+
+ @param wi Warning info to compare with current Warning info.
+
+ @return false if they are equal, true if they are not.
+ */
+ bool warning_info_changed(const Warning_info *wi) const
+ { return get_warning_info()->id() != wi->id(); }
+
+ bool is_warning_info_empty() const
+ { return get_warning_info()->is_empty(); }
+
+ ulong current_statement_warn_count() const
+ { return get_warning_info()->current_statement_warn_count(); }
+
+ bool has_sql_condition(const char *message_str, ulong message_length) const
+ { return get_warning_info()->has_sql_condition(message_str, message_length); }
+
+ void reset_for_next_command()
+ { get_warning_info()->reset_for_next_command(); }
+
+ void clear_warning_info(ulonglong id)
+ { get_warning_info()->clear(id); }
+
+ void opt_clear_warning_info(ulonglong query_id)
+ { get_warning_info()->opt_clear(query_id); }
+
+ ulong current_row_for_warning() const
+ { return get_warning_info()->current_row_for_warning(); }
+
+ void inc_current_row_for_warning()
+ { get_warning_info()->inc_current_row_for_warning(); }
+
+ void reset_current_row_for_warning()
+ { get_warning_info()->reset_current_row_for_warning(); }
+
+ bool is_warning_info_read_only() const
+ { return get_warning_info()->is_read_only(); }
+
+ void set_warning_info_read_only(bool read_only)
+ { get_warning_info()->set_read_only(read_only); }
+
+ ulong error_count() const
+ { return get_warning_info()->error_count(); }
+
+ ulong warn_count() const
+ { return get_warning_info()->warn_count(); }
+
+ uint cond_count() const
+ { return get_warning_info()->cond_count(); }
+
+ Sql_condition_iterator sql_conditions() const
+ { return get_warning_info()->m_warn_list; }
+
+ void reserve_space(THD *thd, uint count)
+ { get_warning_info()->reserve_space(thd, count); }
+
+ Sql_condition *push_warning(THD *thd, const Sql_condition *sql_condition)
+ { return get_warning_info()->push_warning(thd, sql_condition); }
+
+ Sql_condition *push_warning(THD *thd,
+ uint sql_errno,
+ const char* sqlstate,
+ Sql_condition::enum_warning_level level,
+ const char* msg)
+ {
+ return get_warning_info()->push_warning(thd,
+ sql_errno, sqlstate, level, msg);
+ }
+
+ void mark_sql_conditions_for_removal()
+ { get_warning_info()->mark_sql_conditions_for_removal(); }
+
+ void unmark_sql_conditions_from_removal()
+ { get_warning_info()->unmark_sql_conditions_from_removal(); }
+
+ void remove_marked_sql_conditions()
+ { get_warning_info()->remove_marked_sql_conditions(); }
+
+ const Sql_condition *get_error_condition() const
+ { return get_warning_info()->get_error_condition(); }
+
+ void copy_sql_conditions_to_wi(THD *thd, Warning_info *dst_wi) const
+ { dst_wi->append_warning_info(thd, get_warning_info()); }
+
+ void copy_sql_conditions_from_wi(THD *thd, const Warning_info *src_wi)
+ { get_warning_info()->append_warning_info(thd, src_wi); }
+
+ void copy_non_errors_from_wi(THD *thd, const Warning_info *src_wi);
+
+private:
+ Warning_info *get_warning_info() { return m_wi_stack.front(); }
+
+ const Warning_info *get_warning_info() const { return m_wi_stack.front(); }
+
+private:
+ /** True if status information is sent to the client. */
+ bool m_is_sent;
+
+ /** Set to make set_error_status after set_{ok,eof}_status possible. */
+ bool m_can_overwrite_status;
+
+ /** Message buffer. Can be used by OK or ERROR status. */
+ char m_message[MYSQL_ERRMSG_SIZE];
+
+ /**
+ SQL error number. One of ER_ codes from share/errmsg.txt.
+ Set by set_error_status.
+ */
+ uint m_sql_errno;
+
+ char m_sqlstate[SQLSTATE_LENGTH+1];
+
+ /**
+ The number of rows affected by the last statement. This is
+ semantically close to thd->row_count_func, but has a different
+ life cycle. thd->row_count_func stores the value returned by
+ function ROW_COUNT() and is cleared only by statements that
+ update its value, such as INSERT, UPDATE, DELETE and few others.
+ This member is cleared at the beginning of the next statement.
+
+ We could possibly merge the two, but life cycle of thd->row_count_func
+ can not be changed.
+ */
+ ulonglong m_affected_rows;
+
+ /**
+ Similarly to the previous member, this is a replacement of
+ thd->first_successful_insert_id_in_prev_stmt, which is used
+ to implement LAST_INSERT_ID().
+ */
+
+ ulonglong m_last_insert_id;
+ /**
+ Number of warnings of this last statement. May differ from
+ the number of warnings returned by SHOW WARNINGS e.g. in case
+ the statement doesn't clear the warnings, and doesn't generate
+ them.
+ */
+ uint m_statement_warn_count;
+
+ enum_diagnostics_status m_status;
+
+ Warning_info m_main_wi;
+
+ Warning_info_list m_wi_stack;
+};
+
+///////////////////////////////////////////////////////////////////////////
+
+
+void push_warning(THD *thd, Sql_condition::enum_warning_level level,
uint code, const char *msg);
-void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level,
- uint code, const char *format, ...);
+
+void push_warning_printf(THD *thd, Sql_condition::enum_warning_level level,
+ uint code, const char *format, ...);
+
bool mysqld_show_warnings(THD *thd, ulong levels_to_show);
-uint32 convert_error_message(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+
+uint32 convert_error_message(char *to, uint32 to_length,
+ CHARSET_INFO *to_cs,
const char *from, uint32 from_length,
CHARSET_INFO *from_cs, uint *errors);
extern const LEX_STRING warning_level_names[];
+bool is_sqlstate_valid(const LEX_STRING *sqlstate);
+/**
+ Checks if the specified SQL-state-string defines COMPLETION condition.
+ This function assumes that the given string contains a valid SQL-state.
+
+ @param s the condition SQLSTATE.
+
+ @retval true if the given string defines COMPLETION condition.
+ @retval false otherwise.
+*/
+inline bool is_sqlstate_completion(const char *s)
+{ return s[0] == '0' && s[1] == '0'; }
+
+
+/**
+ Checks if the specified SQL-state-string defines WARNING condition.
+ This function assumes that the given string contains a valid SQL-state.
+
+ @param s the condition SQLSTATE.
+
+ @retval true if the given string defines WARNING condition.
+ @retval false otherwise.
+*/
+inline bool is_sqlstate_warning(const char *s)
+{ return s[0] == '0' && s[1] == '1'; }
+
+
+/**
+ Checks if the specified SQL-state-string defines NOT FOUND condition.
+ This function assumes that the given string contains a valid SQL-state.
+
+ @param s the condition SQLSTATE.
+
+ @retval true if the given string defines NOT FOUND condition.
+ @retval false otherwise.
+*/
+inline bool is_sqlstate_not_found(const char *s)
+{ return s[0] == '0' && s[1] == '2'; }
+
+
+/**
+ Checks if the specified SQL-state-string defines EXCEPTION condition.
+ This function assumes that the given string contains a valid SQL-state.
+
+ @param s the condition SQLSTATE.
+
+ @retval true if the given string defines EXCEPTION condition.
+ @retval false otherwise.
+*/
+inline bool is_sqlstate_exception(const char *s)
+{ return s[0] != '0' || s[1] > '2'; }
+
+
#endif // SQL_ERROR_H
diff --git a/sql/sql_get_diagnostics.cc b/sql/sql_get_diagnostics.cc
new file mode 100644
index 00000000000..be1e3589cc6
--- /dev/null
+++ b/sql/sql_get_diagnostics.cc
@@ -0,0 +1,340 @@
+/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+#include "sql_list.h" // Sql_alloc, List, List_iterator
+#include "sql_cmd.h" // Sql_cmd
+#include "sql_class.h" // Diagnostics_area
+#include "sql_get_diagnostics.h" // Sql_cmd_get_diagnostics
+
+/**
+ Execute this GET DIAGNOSTICS statement.
+
+ @param thd The current thread.
+
+ @remark Errors or warnings occurring during the execution of the GET
+ DIAGNOSTICS statement should not affect the diagnostics area
+ of a previous statement as the diagnostics information there
+ would be wiped out. Thus, in order to preserve the contents
+ of the diagnostics area from which information is being
+ retrieved, the GET DIAGNOSTICS statement is executed under
+ a separate diagnostics area. If any errors or warnings occur
+ during the execution of the GET DIAGNOSTICS statement, these
+ error or warnings (conditions) are appended to the list of
+ the original diagnostics area. The only exception to this is
+ fatal errors, which must always cause the statement to fail.
+
+ @retval false on success.
+ @retval true on error
+*/
+
+bool
+Sql_cmd_get_diagnostics::execute(THD *thd)
+{
+ bool rv;
+ Diagnostics_area new_stmt_da(thd->query_id, false, true);
+ Diagnostics_area *save_stmt_da= thd->get_stmt_da();
+ DBUG_ENTER("Sql_cmd_get_diagnostics::execute");
+
+ /* Disable the unneeded read-only mode of the original DA. */
+ save_stmt_da->set_warning_info_read_only(false);
+
+ /* Set new diagnostics area, execute statement and restore. */
+ thd->set_stmt_da(&new_stmt_da);
+ rv= m_info->aggregate(thd, save_stmt_da);
+ thd->set_stmt_da(save_stmt_da);
+
+ /* Bail out early if statement succeeded. */
+ if (! rv)
+ {
+ thd->get_stmt_da()->set_ok_status(0, 0, NULL);
+ DBUG_RETURN(false);
+ }
+
+ /* Statement failed, retrieve the error information for propagation. */
+ uint sql_errno= new_stmt_da.sql_errno();
+ const char *message= new_stmt_da.message();
+ const char *sqlstate= new_stmt_da.get_sqlstate();
+
+ /* In case of a fatal error, set it into the original DA.*/
+ if (thd->is_fatal_error)
+ {
+ save_stmt_da->set_error_status(sql_errno, message, sqlstate, NULL);
+ DBUG_RETURN(true);
+ }
+
+ /* Otherwise, just append the new error as a exception condition. */
+ save_stmt_da->push_warning(thd, sql_errno, sqlstate,
+ Sql_condition::WARN_LEVEL_ERROR,
+ message);
+
+ /* Appending might have failed. */
+ if (! (rv= thd->is_error()))
+ thd->get_stmt_da()->set_ok_status(0, 0, NULL);
+
+ DBUG_RETURN(rv);
+}
+
+
+/**
+ Set a value for this item.
+
+ @param thd The current thread.
+ @param value The obtained value.
+
+ @retval false on success.
+ @retval true on error.
+*/
+
+bool
+Diagnostics_information_item::set_value(THD *thd, Item **value)
+{
+ bool rv;
+ Settable_routine_parameter *srp;
+ DBUG_ENTER("Diagnostics_information_item::set_value");
+
+ /* Get a settable reference to the target. */
+ srp= m_target->get_settable_routine_parameter();
+
+ DBUG_ASSERT(srp);
+
+ /* Set variable/parameter value. */
+ rv= srp->set_value(thd, thd->spcont, value);
+
+ DBUG_RETURN(rv);
+}
+
+
+/**
+ Obtain statement information in the context of a given diagnostics area.
+
+ @param thd The current thread.
+ @param da The diagnostics area.
+
+ @retval false on success.
+ @retval true on error
+*/
+
+bool
+Statement_information::aggregate(THD *thd, const Diagnostics_area *da)
+{
+ bool rv= false;
+ Statement_information_item *stmt_info_item;
+ List_iterator<Statement_information_item> it(*m_items);
+ DBUG_ENTER("Statement_information::aggregate");
+
+ /*
+ Each specified target gets the value of each given
+ information item obtained from the diagnostics area.
+ */
+ while ((stmt_info_item= it++))
+ {
+ if ((rv= evaluate(thd, stmt_info_item, da)))
+ break;
+ }
+
+ DBUG_RETURN(rv);
+}
+
+
+/**
+ Obtain the value of this statement information item in the context of
+ a given diagnostics area.
+
+ @param thd The current thread.
+ @param da The diagnostics area.
+
+ @retval Item representing the value.
+ @retval NULL on error.
+*/
+
+Item *
+Statement_information_item::get_value(THD *thd, const Diagnostics_area *da)
+{
+ Item *value= NULL;
+ DBUG_ENTER("Statement_information_item::get_value");
+
+ switch (m_name)
+ {
+ /*
+ The number of condition areas that have information. That is,
+ the number of errors and warnings within the diagnostics area.
+ */
+ case NUMBER:
+ {
+ ulong count= da->cond_count();
+ value= new (thd->mem_root) Item_uint(count);
+ break;
+ }
+ /*
+ Number that shows how many rows were directly affected by
+ a data-change statement (INSERT, UPDATE, DELETE, MERGE,
+ REPLACE, LOAD).
+ */
+ case ROW_COUNT:
+ value= new (thd->mem_root) Item_int(thd->get_row_count_func());
+ break;
+ }
+
+ DBUG_RETURN(value);
+}
+
+
+/**
+ Obtain condition information in the context of a given diagnostics area.
+
+ @param thd The current thread.
+ @param da The diagnostics area.
+
+ @retval false on success.
+ @retval true on error
+*/
+
+bool
+Condition_information::aggregate(THD *thd, const Diagnostics_area *da)
+{
+ bool rv= false;
+ longlong cond_number;
+ const Sql_condition *cond= NULL;
+ Condition_information_item *cond_info_item;
+ Diagnostics_area::Sql_condition_iterator it_conds= da->sql_conditions();
+ List_iterator_fast<Condition_information_item> it_items(*m_items);
+ DBUG_ENTER("Condition_information::aggregate");
+
+ /* Prepare the expression for evaluation. */
+ if (!m_cond_number_expr->fixed &&
+ m_cond_number_expr->fix_fields(thd, &m_cond_number_expr))
+ DBUG_RETURN(true);
+
+ cond_number= m_cond_number_expr->val_int();
+
+ /*
+ Limit to the number of available conditions. Warning_info::warn_count()
+ is not used because it indicates the number of condition regardless of
+ @@max_error_count, which prevents conditions from being pushed, but not
+ counted.
+ */
+ if (cond_number < 1 || (ulonglong) cond_number > da->cond_count())
+ {
+ my_error(ER_DA_INVALID_CONDITION_NUMBER, MYF(0));
+ DBUG_RETURN(true);
+ }
+
+ /* Advance to the requested condition. */
+ while (cond_number--)
+ cond= it_conds++;
+
+ DBUG_ASSERT(cond);
+
+ /* Evaluate the requested information in the context of the condition. */
+ while ((cond_info_item= it_items++))
+ {
+ if ((rv= evaluate(thd, cond_info_item, cond)))
+ break;
+ }
+
+ DBUG_RETURN(rv);
+}
+
+
+/**
+ Create an UTF-8 string item to represent a condition item string.
+
+ @remark The string might not have a associated charset. For example,
+ this can be the case if the server does not or fails to process
+ the error message file.
+
+ @remark See "Design notes about Sql_condition::m_message_text." in sql_error.cc
+
+ @return Pointer to an string item, NULL on failure.
+*/
+
+Item *
+Condition_information_item::make_utf8_string_item(THD *thd, const String *str)
+{
+ /* Default is utf8 character set and utf8_general_ci collation. */
+ CHARSET_INFO *to_cs= &my_charset_utf8_general_ci;
+ /* If a charset was not set, assume that no conversion is needed. */
+ CHARSET_INFO *from_cs= str->charset() ? str->charset() : to_cs;
+ Item_string *item= new Item_string(str->ptr(), str->length(), from_cs);
+ /* If necessary, convert the string (ignoring errors), then copy it over. */
+ return item ? item->charset_converter(to_cs, false) : NULL;
+}
+
+
+/**
+ Obtain the value of this condition information item in the context of
+ a given condition.
+
+ @param thd The current thread.
+ @param da The diagnostics area.
+
+ @retval Item representing the value.
+ @retval NULL on error.
+*/
+
+Item *
+Condition_information_item::get_value(THD *thd, const Sql_condition *cond)
+{
+ String str;
+ Item *value= NULL;
+ DBUG_ENTER("Condition_information_item::get_value");
+
+ switch (m_name)
+ {
+ case CLASS_ORIGIN:
+ value= make_utf8_string_item(thd, &(cond->m_class_origin));
+ break;
+ case SUBCLASS_ORIGIN:
+ value= make_utf8_string_item(thd, &(cond->m_subclass_origin));
+ break;
+ case CONSTRAINT_CATALOG:
+ value= make_utf8_string_item(thd, &(cond->m_constraint_catalog));
+ break;
+ case CONSTRAINT_SCHEMA:
+ value= make_utf8_string_item(thd, &(cond->m_constraint_schema));
+ break;
+ case CONSTRAINT_NAME:
+ value= make_utf8_string_item(thd, &(cond->m_constraint_name));
+ break;
+ case CATALOG_NAME:
+ value= make_utf8_string_item(thd, &(cond->m_catalog_name));
+ break;
+ case SCHEMA_NAME:
+ value= make_utf8_string_item(thd, &(cond->m_schema_name));
+ break;
+ case TABLE_NAME:
+ value= make_utf8_string_item(thd, &(cond->m_table_name));
+ break;
+ case COLUMN_NAME:
+ value= make_utf8_string_item(thd, &(cond->m_column_name));
+ break;
+ case CURSOR_NAME:
+ value= make_utf8_string_item(thd, &(cond->m_cursor_name));
+ break;
+ case MESSAGE_TEXT:
+ value= make_utf8_string_item(thd, &(cond->m_message_text));
+ break;
+ case MYSQL_ERRNO:
+ value= new (thd->mem_root) Item_uint(cond->m_sql_errno);
+ break;
+ case RETURNED_SQLSTATE:
+ str.set_ascii(cond->get_sqlstate(), strlen(cond->get_sqlstate()));
+ value= make_utf8_string_item(thd, &str);
+ break;
+ }
+
+ DBUG_RETURN(value);
+}
+
diff --git a/sql/sql_get_diagnostics.h b/sql/sql_get_diagnostics.h
new file mode 100644
index 00000000000..f34820757f5
--- /dev/null
+++ b/sql/sql_get_diagnostics.h
@@ -0,0 +1,318 @@
+/* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA */
+
+#ifndef SQL_GET_DIAGNOSTICS_H
+#define SQL_GET_DIAGNOSTICS_H
+
+/** Diagnostics information forward reference. */
+class Diagnostics_information;
+
+
+/**
+ Sql_cmd_get_diagnostics represents a GET DIAGNOSTICS statement.
+
+ The GET DIAGNOSTICS statement retrieves exception or completion
+ condition information from a diagnostics area, usually pertaining
+ to the last non-diagnostic SQL statement that was executed.
+*/
+class Sql_cmd_get_diagnostics : public Sql_cmd
+{
+public:
+ /**
+ Constructor, used to represent a GET DIAGNOSTICS statement.
+
+ @param info Diagnostics information to be obtained.
+ */
+ Sql_cmd_get_diagnostics(Diagnostics_information *info)
+ : m_info(info)
+ {}
+
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_GET_DIAGNOSTICS;
+ }
+
+ virtual bool execute(THD *thd);
+
+private:
+ /** The information to be obtained. */
+ Diagnostics_information *m_info;
+};
+
+
+/**
+ Represents the diagnostics information to be obtained.
+
+ Diagnostic information is made available through statement
+ information and condition information items.
+*/
+class Diagnostics_information : public Sql_alloc
+{
+public:
+ /**
+ Which diagnostics area to access.
+ Only CURRENT is supported for now.
+ */
+ enum Which_area
+ {
+ /** Access the first diagnostics area. */
+ CURRENT_AREA
+ };
+
+ /** Set which diagnostics area to access. */
+ void set_which_da(Which_area area)
+ { m_area= area; }
+
+ /** Get which diagnostics area to access. */
+ Which_area get_which_da(void) const
+ { return m_area; }
+
+ /**
+ Aggregate diagnostics information.
+
+ @param thd The current thread.
+ @param da The diagnostics area.
+
+ @retval false on success.
+ @retval true on error
+ */
+ virtual bool aggregate(THD *thd, const Diagnostics_area *da) = 0;
+
+protected:
+ /**
+ Diagnostics_information objects are allocated in thd->mem_root.
+ Do not rely on the destructor for any cleanup.
+ */
+ virtual ~Diagnostics_information()
+ {
+ DBUG_ASSERT(false);
+ }
+
+ /**
+ Evaluate a diagnostics information item in a specific context.
+
+ @param thd The current thread.
+ @param diag_item The diagnostics information item.
+ @param ctx The context to evaluate the item.
+
+ @retval false on success.
+ @retval true on error.
+ */
+ template <typename Diag_item, typename Context>
+ bool evaluate(THD *thd, Diag_item *diag_item, Context ctx)
+ {
+ Item *value;
+
+ /* Get this item's value. */
+ if (! (value= diag_item->get_value(thd, ctx)))
+ return true;
+
+ /* Set variable/parameter value. */
+ return diag_item->set_value(thd, &value);
+ }
+
+private:
+ /** Which diagnostics area to access. */
+ Which_area m_area;
+};
+
+
+/**
+ A diagnostics information item. Used to associate a specific
+ diagnostics information item to a target variable.
+*/
+class Diagnostics_information_item : public Sql_alloc
+{
+public:
+ /**
+ Set a value for this item.
+
+ @param thd The current thread.
+ @param value The obtained value.
+
+ @retval false on success.
+ @retval true on error.
+ */
+ bool set_value(THD *thd, Item **value);
+
+protected:
+ /**
+ Constructor, used to represent a diagnostics information item.
+
+ @param target A target that gets the value of this item.
+ */
+ Diagnostics_information_item(Item *target)
+ : m_target(target)
+ {}
+
+ /**
+ Diagnostics_information_item objects are allocated in thd->mem_root.
+ Do not rely on the destructor for any cleanup.
+ */
+ virtual ~Diagnostics_information_item()
+ {
+ DBUG_ASSERT(false);
+ }
+
+private:
+ /** The target variable that will receive the value of this item. */
+ Item *m_target;
+};
+
+
+/**
+ A statement information item.
+*/
+class Statement_information_item : public Diagnostics_information_item
+{
+public:
+ /** The name of a statement information item. */
+ enum Name
+ {
+ NUMBER,
+ ROW_COUNT
+ };
+
+ /**
+ Constructor, used to represent a statement information item.
+
+ @param name The name of this item.
+ @param target A target that gets the value of this item.
+ */
+ Statement_information_item(Name name, Item *target)
+ : Diagnostics_information_item(target), m_name(name)
+ {}
+
+ /** Obtain value of this statement information item. */
+ Item *get_value(THD *thd, const Diagnostics_area *da);
+
+private:
+ /** The name of this statement information item. */
+ Name m_name;
+};
+
+
+/**
+ Statement information.
+
+ @remark Provides information about the execution of a statement.
+*/
+class Statement_information : public Diagnostics_information
+{
+public:
+ /**
+ Constructor, used to represent the statement information of a
+ GET DIAGNOSTICS statement.
+
+ @param items List of requested statement information items.
+ */
+ Statement_information(List<Statement_information_item> *items)
+ : m_items(items)
+ {}
+
+ /** Obtain statement information in the context of a diagnostics area. */
+ bool aggregate(THD *thd, const Diagnostics_area *da);
+
+private:
+ /* List of statement information items. */
+ List<Statement_information_item> *m_items;
+};
+
+
+/**
+ A condition information item.
+*/
+class Condition_information_item : public Diagnostics_information_item
+{
+public:
+ /**
+ The name of a condition information item.
+ */
+ enum Name
+ {
+ CLASS_ORIGIN,
+ SUBCLASS_ORIGIN,
+ CONSTRAINT_CATALOG,
+ CONSTRAINT_SCHEMA,
+ CONSTRAINT_NAME,
+ CATALOG_NAME,
+ SCHEMA_NAME,
+ TABLE_NAME,
+ COLUMN_NAME,
+ CURSOR_NAME,
+ MESSAGE_TEXT,
+ MYSQL_ERRNO,
+ RETURNED_SQLSTATE
+ };
+
+ /**
+ Constructor, used to represent a condition information item.
+
+ @param name The name of this item.
+ @param target A target that gets the value of this item.
+ */
+ Condition_information_item(Name name, Item *target)
+ : Diagnostics_information_item(target), m_name(name)
+ {}
+
+ /** Obtain value of this condition information item. */
+ Item *get_value(THD *thd, const Sql_condition *cond);
+
+private:
+ /** The name of this condition information item. */
+ Name m_name;
+
+ /** Create an string item to represent a condition item string. */
+ Item *make_utf8_string_item(THD *thd, const String *str);
+};
+
+
+/**
+ Condition information.
+
+ @remark Provides information about conditions raised during the
+ execution of a statement.
+*/
+class Condition_information : public Diagnostics_information
+{
+public:
+ /**
+ Constructor, used to represent the condition information of a
+ GET DIAGNOSTICS statement.
+
+ @param cond_number_expr Number that identifies the diagnostic condition.
+ @param items List of requested condition information items.
+ */
+ Condition_information(Item *cond_number_expr,
+ List<Condition_information_item> *items)
+ : m_cond_number_expr(cond_number_expr), m_items(items)
+ {}
+
+ /** Obtain condition information in the context of a diagnostics area. */
+ bool aggregate(THD *thd, const Diagnostics_area *da);
+
+private:
+ /**
+ Number that identifies the diagnostic condition for which
+ information is to be obtained.
+ */
+ Item *m_cond_number_expr;
+
+ /** List of condition information items. */
+ List<Condition_information_item> *m_items;
+};
+
+#endif
+
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 3c4804c523a..4187327d622 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -171,7 +171,7 @@ static void mysql_ha_close_table(SQL_HANDLER *handler)
table->file->ha_index_or_rnd_end();
table->open_by_handler= 0;
- (void) close_thread_table(thd, &table);
+ close_thread_table(thd, &table);
thd->mdl_context.release_lock(handler->mdl_request.ticket);
}
else
@@ -294,7 +294,8 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen)
open_ltable() or open_table() because we would like to be able
to open a temporary table.
*/
- error= open_tables(thd, &tables, &counter, 0);
+ error= (open_temporary_tables(thd, tables) ||
+ open_tables(thd, &tables, &counter, 0));
if (error)
goto err;
@@ -502,9 +503,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char *sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR **cond_hdl);
+ Sql_condition **cond_hdl);
bool need_reopen() const { return m_need_reopen; };
void init() { m_need_reopen= FALSE; };
@@ -523,9 +524,9 @@ Sql_handler_lock_error_handler::
handle_condition(THD *thd,
uint sql_errno,
const char *sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR **cond_hdl)
+ Sql_condition **cond_hdl)
{
*cond_hdl= NULL;
if (sql_errno == ER_LOCK_ABORTED)
@@ -640,9 +641,10 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler,
key_part_map keypart_map;
uint key_len;
- if (key_expr->elements > keyinfo->key_parts)
+ if (key_expr->elements > keyinfo->user_defined_key_parts)
{
- my_error(ER_TOO_MANY_KEY_PARTS, MYF(0), keyinfo->key_parts);
+ my_error(ER_TOO_MANY_KEY_PARTS, MYF(0),
+ keyinfo->user_defined_key_parts);
return 1;
}
for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++)
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index 2eda80e8b36..b5178f865d1 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -957,7 +957,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
error=write_record(thd, table ,&info);
if (error)
break;
- thd->warning_info->inc_current_row_for_warning();
+ thd->get_stmt_da()->inc_current_row_for_warning();
}
free_underlaid_joins(thd, &thd->lex->select_lex);
@@ -1114,11 +1114,11 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(lock_type == TL_WRITE_DELAYED) ? (ulong) 0 :
(ulong) (info.records - info.copied),
- (ulong) thd->warning_info->statement_warn_count());
+ (long) thd->get_stmt_da()->current_statement_warn_count());
else
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(ulong) (info.deleted + updated),
- (ulong) thd->warning_info->statement_warn_count());
+ (long) thd->get_stmt_da()->current_statement_warn_count());
::my_ok(thd, info.copied + info.deleted + updated, id, buff);
}
thd->abort_on_warning= 0;
@@ -1203,7 +1203,7 @@ static bool check_view_insertability(THD * thd, TABLE_LIST *view)
}
Item_field *field;
/* simple SELECT list entry (field without expression) */
- if (!(field= trans->item->filed_for_view_update()))
+ if (!(field= trans->item->field_for_view_update()))
{
thd->mark_used_columns= save_mark_used_columns;
DBUG_RETURN(TRUE);
@@ -1639,7 +1639,7 @@ int write_record(THD *thd, TABLE *table,COPY_INFO *info)
}
}
key_copy((uchar*) key,table->record[0],table->key_info+key_nr,0);
- key_part_map keypart_map= (1 << table->key_info[key_nr].key_parts) - 1;
+ key_part_map keypart_map= (1 << table->key_info[key_nr].user_defined_key_parts) - 1;
if ((error= (table->file->ha_index_read_idx_map(table->record[1],
key_nr, (uchar*) key,
keypart_map,
@@ -1888,7 +1888,7 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
}
if (view)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_NO_DEFAULT_FOR_VIEW_FIELD,
ER(ER_NO_DEFAULT_FOR_VIEW_FIELD),
table_list->view_db.str,
@@ -1896,7 +1896,7 @@ int check_that_all_fields_are_given_values(THD *thd, TABLE *entry,
}
else
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_NO_DEFAULT_FOR_FIELD,
ER(ER_NO_DEFAULT_FOR_FIELD),
(*field)->field_name);
@@ -2246,7 +2246,8 @@ bool delayed_get_table(THD *thd, MDL_request *grl_protection_request,
want to send "Server shutdown in progress" in the
INSERT THREAD.
*/
- my_message(di->thd.stmt_da->sql_errno(), di->thd.stmt_da->message(),
+ my_message(di->thd.get_stmt_da()->sql_errno(),
+ di->thd.get_stmt_da()->message(),
MYF(0));
}
di->unlock();
@@ -2336,7 +2337,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
if (!thd.is_error())
my_message(ER_QUERY_INTERRUPTED, ER(ER_QUERY_INTERRUPTED), MYF(0));
else
- my_message(thd.stmt_da->sql_errno(), thd.stmt_da->message(), MYF(0));
+ my_message(thd.get_stmt_da()->sql_errno(),
+ thd.get_stmt_da()->message(), MYF(0));
goto error;
}
}
@@ -2758,8 +2760,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
if (my_thread_init())
{
/* Can't use my_error since store_globals has not yet been called */
- thd->stmt_da->set_error_status(thd, ER_OUT_OF_RESOURCES,
- ER(ER_OUT_OF_RESOURCES), NULL);
+ thd->get_stmt_da()->set_error_status(ER_OUT_OF_RESOURCES);
di->handler_thread_initialized= TRUE;
}
else
@@ -2769,8 +2770,7 @@ pthread_handler_t handle_delayed_insert(void *arg)
if (init_thr_lock() || thd->store_globals())
{
/* Can't use my_error since store_globals has perhaps failed */
- thd->stmt_da->set_error_status(thd, ER_OUT_OF_RESOURCES,
- ER(ER_OUT_OF_RESOURCES), NULL);
+ thd->get_stmt_da()->set_error_status(ER_OUT_OF_RESOURCES);
di->handler_thread_initialized= TRUE;
thd->fatal_error();
goto err;
@@ -3160,7 +3160,7 @@ bool Delayed_insert::handle_inserts(void)
{
/* This should never happen */
table->file->print_error(error,MYF(0));
- sql_print_error("%s", thd.stmt_da->message());
+ sql_print_error("%s", thd.get_stmt_da()->message());
DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed in loop"));
goto err;
}
@@ -3206,7 +3206,7 @@ bool Delayed_insert::handle_inserts(void)
if ((error=table->file->extra(HA_EXTRA_NO_CACHE)))
{ // This shouldn't happen
table->file->print_error(error,MYF(0));
- sql_print_error("%s", thd.stmt_da->message());
+ sql_print_error("%s", thd.get_stmt_da()->message());
DBUG_PRINT("error", ("HA_EXTRA_NO_CACHE failed after loop"));
goto err;
}
@@ -3644,7 +3644,7 @@ bool select_insert::send_eof()
error= (thd->locked_tables_mode <= LTM_LOCK_TABLES ?
table->file->ha_end_bulk_insert() : 0);
if (!error && thd->is_error())
- error= thd->stmt_da->sql_errno();
+ error= thd->get_stmt_da()->sql_errno();
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
@@ -3697,11 +3697,11 @@ bool select_insert::send_eof()
if (info.ignore)
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(ulong) (info.records - info.copied),
- (ulong) thd->warning_info->statement_warn_count());
+ (long) thd->get_stmt_da()->current_statement_warn_count());
else
sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records,
(ulong) (info.deleted+info.updated),
- (ulong) thd->warning_info->statement_warn_count());
+ (long) thd->get_stmt_da()->current_statement_warn_count());
row_count= info.copied + info.deleted +
((thd->client_capabilities & CLIENT_FOUND_ROWS) ?
info.touched : info.updated);
@@ -3839,7 +3839,6 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
init_tmp_table_share(thd, &share, "", 0, "", "");
tmp_table.s->db_create_options=0;
- tmp_table.s->blob_ptr_size= portable_sizeof_char_ptr;
tmp_table.null_row= 0;
tmp_table.maybe_null= 0;
@@ -3905,7 +3904,7 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
*/
if (open_table(thd, create_table, thd->mem_root, &ot_ctx))
{
- quick_rm_table(create_info->db_type, create_table->db,
+ quick_rm_table(thd, create_info->db_type, create_table->db,
table_case_name(create_info, create_table->table_name),
0);
}
@@ -3914,15 +3913,14 @@ static TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info,
}
else
{
- Open_table_context ot_ctx(thd, MYSQL_OPEN_TEMPORARY_ONLY);
- if (open_table(thd, create_table, thd->mem_root, &ot_ctx))
+ if (open_temporary_table(thd, create_table))
{
/*
This shouldn't happen as creation of temporary table should make
- it preparable for open. But let us do close_temporary_table() here
- just in case.
+ it preparable for open. Anyway we can't drop temporary table if
+ we are unable to find it.
*/
- drop_temporary_table(thd, create_table, NULL);
+ DBUG_ASSERT(0);
}
else
table= create_table->table;
diff --git a/sql/sql_join_cache.cc b/sql/sql_join_cache.cc
index 9fca8730cb5..0acccfcee48 100644
--- a/sql/sql_join_cache.cc
+++ b/sql/sql_join_cache.cc
@@ -696,7 +696,7 @@ void JOIN_CACHE::set_constants()
pack_length_with_blob_ptrs= pack_length + blobs*sizeof(uchar *);
min_buff_size= 0;
min_records= 1;
- buff_size= max(join->thd->variables.join_buff_size,
+ buff_size= MY_MAX(join->thd->variables.join_buff_size,
get_min_join_buffer_size());
size_of_rec_ofs= offset_size(buff_size);
size_of_rec_len= blobs ? size_of_rec_ofs : offset_size(len);
@@ -2739,7 +2739,7 @@ int JOIN_CACHE_HASHED::init_hash_table()
key_entries= 0;
/* Calculate the minimal possible value of size_of_key_ofs greater than 1 */
- uint max_size_of_key_ofs= max(2, get_size_of_rec_offset());
+ uint max_size_of_key_ofs= MY_MAX(2, get_size_of_rec_offset());
for (size_of_key_ofs= 2;
size_of_key_ofs <= max_size_of_key_ofs;
size_of_key_ofs+= 2)
diff --git a/sql/sql_join_cache.h b/sql/sql_join_cache.h
index 6953f6881ee..1c56fc9b178 100644
--- a/sql/sql_join_cache.h
+++ b/sql/sql_join_cache.h
@@ -420,7 +420,7 @@ protected:
/* Shall calculate how much space is remaining in the join buffer */
virtual size_t rem_space()
{
- return max(buff_size-(end_pos-buff)-aux_buff_size,0);
+ return MY_MAX(buff_size-(end_pos-buff)-aux_buff_size,0);
}
/*
@@ -943,7 +943,7 @@ protected:
*/
size_t rem_space()
{
- return max(last_key_entry-end_pos-aux_buff_size,0);
+ return MY_MAX(last_key_entry-end_pos-aux_buff_size,0);
}
/*
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index 501d846421d..1bf0d49214e 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -494,12 +494,13 @@ void lex_start(THD *thd)
lex->select_lex.ftfunc_list= &lex->select_lex.ftfunc_list_alloc;
lex->select_lex.group_list.empty();
lex->select_lex.order_list.empty();
+ lex->m_sql_cmd= NULL;
lex->duplicates= DUP_ERROR;
lex->ignore= 0;
lex->spname= NULL;
lex->sphead= NULL;
lex->spcont= NULL;
- lex->m_stmt= NULL;
+ lex->m_sql_cmd= NULL;
lex->proc_list.first= 0;
lex->escape_used= FALSE;
lex->query_tables= 0;
@@ -509,6 +510,7 @@ void lex_start(THD *thd)
lex->parse_vcol_expr= FALSE;
lex->check_exists= FALSE;
lex->verbose= 0;
+ lex->contains_plaintext_password= false;
lex->name.str= 0;
lex->name.length= 0;
@@ -1747,50 +1749,6 @@ int lex_one_token(void *arg, void *yythd)
}
-/**
- Construct a copy of this object to be used for mysql_alter_table
- and mysql_create_table.
-
- Historically, these two functions modify their Alter_info
- arguments. This behaviour breaks re-execution of prepared
- statements and stored procedures and is compensated by always
- supplying a copy of Alter_info to these functions.
-
- @return You need to use check the error in THD for out
- of memory condition after calling this function.
-*/
-
-Alter_info::Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root)
- :drop_list(rhs.drop_list, mem_root),
- alter_list(rhs.alter_list, mem_root),
- key_list(rhs.key_list, mem_root),
- create_list(rhs.create_list, mem_root),
- flags(rhs.flags),
- keys_onoff(rhs.keys_onoff),
- tablespace_op(rhs.tablespace_op),
- partition_names(rhs.partition_names, mem_root),
- num_parts(rhs.num_parts),
- change_level(rhs.change_level),
- datetime_field(rhs.datetime_field),
- error_if_not_empty(rhs.error_if_not_empty)
-{
- /*
- Make deep copies of used objects.
- This is not a fully deep copy - clone() implementations
- of Alter_drop, Alter_column, Key, foreign_key, Key_part_spec
- do not copy string constants. At the same length the only
- reason we make a copy currently is that ALTER/CREATE TABLE
- code changes input Alter_info definitions, but string
- constants never change.
- */
- list_copy_and_replace_each_value(drop_list, mem_root);
- list_copy_and_replace_each_value(alter_list, mem_root);
- list_copy_and_replace_each_value(key_list, mem_root);
- list_copy_and_replace_each_value(create_list, mem_root);
- /* partition_names are not deeply copied currently */
-}
-
-
void trim_whitespace(CHARSET_INFO *cs, LEX_STRING *str)
{
/*
@@ -2200,12 +2158,13 @@ bool st_select_lex_node::inc_in_sum_expr() { return 1; }
uint st_select_lex_node::get_in_sum_expr() { return 0; }
TABLE_LIST* st_select_lex_node::get_table_list() { return 0; }
List<Item>* st_select_lex_node::get_item_list() { return 0; }
-TABLE_LIST *st_select_lex_node::add_table_to_list (THD *thd, Table_ident *table,
+TABLE_LIST *st_select_lex_node::add_table_to_list(THD *thd, Table_ident *table,
LEX_STRING *alias,
ulong table_join_options,
thr_lock_type flags,
enum_mdl_type mdl_type,
List<Index_hint> *hints,
+ List<String> *partition_names,
LEX_STRING *option)
{
return 0;
@@ -4323,8 +4282,8 @@ int st_select_lex_unit::print_explain(select_result_sink *output,
bool LEX::is_partition_management() const
{
return (sql_command == SQLCOM_ALTER_TABLE &&
- (alter_info.flags == ALTER_ADD_PARTITION ||
- alter_info.flags == ALTER_REORGANIZE_PARTITION));
+ (alter_info.flags == Alter_info::ALTER_ADD_PARTITION ||
+ alter_info.flags == Alter_info::ALTER_REORGANIZE_PARTITION));
}
#ifdef MYSQL_SERVER
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 0802adc94aa..59f7c122646 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -27,6 +27,7 @@
#include "thr_lock.h" /* thr_lock_type, TL_UNLOCK */
#include "mem_root_array.h"
#include "sql_cmd.h"
+#include "sql_alter.h" // Alter_info
/* YACC and LEX Definitions */
@@ -44,9 +45,6 @@ class Event_parse_data;
class set_var_base;
class sys_var;
class Item_func_match;
-class Alter_drop;
-class Alter_column;
-class Key;
class File_parser;
class Key_part_spec;
@@ -117,6 +115,7 @@ struct sys_var_with_base
#include "lex_symbol.h"
#if MYSQL_LEX
#include "item_func.h" /* Cast_target used in sql_yacc.h */
+#include "sql_get_diagnostics.h" /* Types used in sql_yacc.h */
#include "sql_yacc.h"
#define LEX_YYSTYPE YYSTYPE *
#else
@@ -265,11 +264,6 @@ enum olap_type
UNSPECIFIED_OLAP_TYPE, CUBE_TYPE, ROLLUP_TYPE
};
-enum tablespace_op_type
-{
- NO_TABLESPACE_OP, DISCARD_TABLESPACE, IMPORT_TABLESPACE
-};
-
/*
String names used to print a statement with index hints.
Keep in sync with index_hint_type.
@@ -513,6 +507,7 @@ public:
thr_lock_type flags= TL_UNLOCK,
enum_mdl_type mdl_type= MDL_SHARED_READ,
List<Index_hint> *hints= 0,
+ List<String> *partition_names= 0,
LEX_STRING *option= 0);
virtual void set_lock_for_tables(thr_lock_type lock_type) {}
@@ -876,6 +871,7 @@ public:
thr_lock_type flags= TL_UNLOCK,
enum_mdl_type mdl_type= MDL_SHARED_READ,
List<Index_hint> *hints= 0,
+ List<String> *partition_names= 0,
LEX_STRING *option= 0);
TABLE_LIST* get_table_list();
bool init_nested_join(THD *thd);
@@ -1007,110 +1003,6 @@ inline bool st_select_lex_unit::is_union ()
first_select()->next_select()->linkage == UNION_TYPE;
}
-#define ALTER_ADD_COLUMN (1L << 0)
-#define ALTER_DROP_COLUMN (1L << 1)
-#define ALTER_CHANGE_COLUMN (1L << 2)
-#define ALTER_ADD_INDEX (1L << 3)
-#define ALTER_DROP_INDEX (1L << 4)
-#define ALTER_RENAME (1L << 5)
-#define ALTER_ORDER (1L << 6)
-#define ALTER_OPTIONS (1L << 7)
-#define ALTER_CHANGE_COLUMN_DEFAULT (1L << 8)
-#define ALTER_KEYS_ONOFF (1L << 9)
-#define ALTER_CONVERT (1L << 10)
-#define ALTER_RECREATE (1L << 11)
-#define ALTER_ADD_PARTITION (1L << 12)
-#define ALTER_DROP_PARTITION (1L << 13)
-#define ALTER_COALESCE_PARTITION (1L << 14)
-#define ALTER_REORGANIZE_PARTITION (1L << 15)
-#define ALTER_PARTITION (1L << 16)
-#define ALTER_ADMIN_PARTITION (1L << 17)
-#define ALTER_TABLE_REORG (1L << 18)
-#define ALTER_REBUILD_PARTITION (1L << 19)
-#define ALTER_ALL_PARTITION (1L << 20)
-#define ALTER_REMOVE_PARTITIONING (1L << 21)
-#define ALTER_FOREIGN_KEY (1L << 22)
-#define ALTER_TRUNCATE_PARTITION (1L << 23)
-
-enum enum_alter_table_change_level
-{
- ALTER_TABLE_METADATA_ONLY= 0,
- ALTER_TABLE_DATA_CHANGED= 1,
- ALTER_TABLE_INDEX_CHANGED= 2
-};
-
-
-/**
- Temporary hack to enable a class bound forward declaration
- of the enum_alter_table_change_level enumeration. To be
- removed once Alter_info is moved to the sql_alter.h
- header.
-*/
-class Alter_table_change_level
-{
-private:
- typedef enum enum_alter_table_change_level enum_type;
- enum_type value;
-public:
- void operator = (enum_type v) { value = v; }
- operator enum_type () { return value; }
-};
-
-
-/**
- @brief Parsing data for CREATE or ALTER TABLE.
-
- This structure contains a list of columns or indexes to be created,
- altered or dropped.
-*/
-
-class Alter_info
-{
-public:
- List<Alter_drop> drop_list;
- List<Alter_column> alter_list;
- List<Key> key_list;
- List<Create_field> create_list;
- uint flags;
- enum enum_enable_or_disable keys_onoff;
- enum tablespace_op_type tablespace_op;
- List<char> partition_names;
- uint num_parts;
- enum_alter_table_change_level change_level;
- Create_field *datetime_field;
- bool error_if_not_empty;
-
-
- Alter_info() :
- flags(0),
- keys_onoff(LEAVE_AS_IS),
- tablespace_op(NO_TABLESPACE_OP),
- num_parts(0),
- change_level(ALTER_TABLE_METADATA_ONLY),
- datetime_field(NULL),
- error_if_not_empty(FALSE)
- {}
-
- void reset()
- {
- drop_list.empty();
- alter_list.empty();
- key_list.empty();
- create_list.empty();
- flags= 0;
- keys_onoff= LEAVE_AS_IS;
- tablespace_op= NO_TABLESPACE_OP;
- num_parts= 0;
- partition_names.empty();
- change_level= ALTER_TABLE_METADATA_ONLY;
- datetime_field= 0;
- error_if_not_empty= FALSE;
- }
- Alter_info(const Alter_info &rhs, MEM_ROOT *mem_root);
-private:
- Alter_info &operator=(const Alter_info &rhs); // not implemented
- Alter_info(const Alter_info &rhs); // not implemented
-};
struct st_sp_chistics
{
@@ -1193,7 +1085,38 @@ public:
Sroutine_hash_entry **sroutines_list_own_last;
uint sroutines_list_own_elements;
- /*
+ /**
+ Locking state of tables in this particular statement.
+
+ If we under LOCK TABLES or in prelocked mode we consider tables
+ for the statement to be "locked" if there was a call to lock_tables()
+ (which called handler::start_stmt()) for tables of this statement
+ and there was no matching close_thread_tables() call.
+
+ As result this state may differ significantly from one represented
+ by Open_tables_state::lock/locked_tables_mode more, which are always
+ "on" under LOCK TABLES or in prelocked mode.
+ */
+ enum enum_lock_tables_state {
+ LTS_NOT_LOCKED = 0,
+ LTS_LOCKED
+ };
+ enum_lock_tables_state lock_tables_state;
+ bool is_query_tables_locked()
+ {
+ return (lock_tables_state == LTS_LOCKED);
+ }
+
+ /**
+ Number of tables which were open by open_tables() and to be locked
+ by lock_tables().
+ Note that we set this member only in some cases, when this value
+ needs to be passed from open_tables() to lock_tables() which are
+ separated by some amount of code.
+ */
+ uint table_count;
+
+ /*
These constructor and destructor serve for creation/destruction
of Query_tables_list instances which are used as backup storage.
*/
@@ -2393,7 +2316,7 @@ struct LEX: public Query_tables_list
*/
nesting_map allow_sum_func;
- Sql_statement *m_stmt;
+ Sql_cmd *m_sql_cmd;
/*
Usually `expr` rule of yacc is quite reused but some commands better
@@ -2456,7 +2379,7 @@ struct LEX: public Query_tables_list
enum enum_yes_no_unknown tx_chain, tx_release;
bool safe_to_cache_query;
- bool subqueries, ignore, online;
+ bool subqueries, ignore;
st_parsing_options parsing_options;
Alter_info alter_info;
/*
@@ -2481,6 +2404,8 @@ struct LEX: public Query_tables_list
bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */
bool all_privileges;
bool proxy_priv;
+ bool contains_plaintext_password;
+
sp_pcontext *spcont;
st_sp_chistics sp_chistics;
diff --git a/sql/sql_load.cc b/sql/sql_load.cc
index 78e88e3ede2..0d0efb0c21f 100644
--- a/sql/sql_load.cc
+++ b/sql/sql_load.cc
@@ -218,7 +218,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
!field_term->is_ascii() ||
!ex->line_term->is_ascii() || !ex->line_start->is_ascii())
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED,
ER(WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED));
}
@@ -588,7 +588,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list,
}
sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted,
(ulong) (info.records - info.copied),
- (ulong) thd->warning_info->statement_warn_count());
+ (long) thd->get_stmt_da()->current_statement_warn_count());
if (thd->transaction.stmt.modified_non_trans_table)
thd->transaction.all.modified_non_trans_table= TRUE;
@@ -829,10 +829,10 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (pos == read_info.row_end)
{
thd->cuted_fields++; /* Not enough fields */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_TOO_FEW_RECORDS,
ER(ER_WARN_TOO_FEW_RECORDS),
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
/*
Timestamp fields that are NOT NULL are autoupdated if there is no
corresponding value in the data file.
@@ -859,10 +859,10 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (pos != read_info.row_end)
{
thd->cuted_fields++; /* To long row */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_TOO_MANY_RECORDS,
ER(ER_WARN_TOO_MANY_RECORDS),
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
}
if (thd->killed ||
@@ -895,12 +895,12 @@ read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (read_info.line_cuted)
{
thd->cuted_fields++; /* To long row */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_TOO_MANY_RECORDS,
ER(ER_WARN_TOO_MANY_RECORDS),
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
}
- thd->warning_info->inc_current_row_for_warning();
+ thd->get_stmt_da()->inc_current_row_for_warning();
continue_loop:;
}
DBUG_RETURN(test(read_info.error));
@@ -980,7 +980,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field->reset())
{
my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0), field->field_name,
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
DBUG_RETURN(1);
}
field->set_null();
@@ -993,7 +993,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field->type() == MYSQL_TYPE_TIMESTAMP)
field->set_time();
else if (field != table->next_number_field)
- field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ field->set_warning(Sql_condition::WARN_LEVEL_WARN,
ER_WARN_NULL_TO_NOTNULL, 1);
}
/* Do not auto-update this field. */
@@ -1059,7 +1059,7 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field->reset())
{
my_error(ER_WARN_NULL_TO_NOTNULL, MYF(0),field->field_name,
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
DBUG_RETURN(1);
}
if (!field->maybe_null() && field->type() == FIELD_TYPE_TIMESTAMP)
@@ -1072,10 +1072,10 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
in the end ?)
*/
thd->cuted_fields++;
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_TOO_FEW_RECORDS,
ER(ER_WARN_TOO_FEW_RECORDS),
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
}
else if (item->type() == Item::STRING_ITEM)
{
@@ -1119,13 +1119,13 @@ read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (read_info.line_cuted)
{
thd->cuted_fields++; /* To long row */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS),
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
if (thd->killed)
DBUG_RETURN(1);
}
- thd->warning_info->inc_current_row_for_warning();
+ thd->get_stmt_da()->inc_current_row_for_warning();
continue_loop:;
}
DBUG_RETURN(test(read_info.error));
@@ -1206,7 +1206,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
if (field->type() == FIELD_TYPE_TIMESTAMP)
field->set_time();
else if (field != table->next_number_field)
- field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN,
+ field->set_warning(Sql_condition::WARN_LEVEL_WARN,
ER_WARN_NULL_TO_NOTNULL, 1);
}
/* Do not auto-update this field. */
@@ -1259,10 +1259,10 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
in the end ?)
*/
thd->cuted_fields++;
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_TOO_FEW_RECORDS,
ER(ER_WARN_TOO_FEW_RECORDS),
- thd->warning_info->current_row_for_warning());
+ thd->get_stmt_da()->current_row_for_warning());
}
else
((Item_user_var_as_out_param *)item)->set_null_value(cs);
@@ -1293,7 +1293,7 @@ read_xml_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list,
its default value at the beginning of each loop iteration.
*/
thd->transaction.stmt.modified_non_trans_table= no_trans_update_stmt;
- thd->warning_info->inc_current_row_for_warning();
+ thd->get_stmt_da()->inc_current_row_for_warning();
continue_loop:;
}
DBUG_RETURN(test(read_info.error) || thd->is_error());
@@ -1364,7 +1364,7 @@ READ_INFO::READ_INFO(File file_par, uint tot_length, CHARSET_INFO *cs,
line_term_char= line_term_length ? (uchar) line_term_ptr[0] : INT_MAX;
/* Set of a stack for unget if long terminators */
- uint length= max(cs->mbmaxlen, max(field_term_length, line_term_length)) + 1;
+ uint length= MY_MAX(cs->mbmaxlen, MY_MAX(field_term_length, line_term_length)) + 1;
set_if_bigger(length,line_start.length());
stack=stack_pos=(int*) sql_alloc(sizeof(int)*length);
diff --git a/sql/sql_locale.cc b/sql/sql_locale.cc
index 13e00c99f19..b2b112ed4ba 100644
--- a/sql/sql_locale.cc
+++ b/sql/sql_locale.cc
@@ -3422,7 +3422,7 @@ MY_LOCALE *my_locale_by_name(const char *name)
if (thd)
{
// Send a warning to the client
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DEPRECATED_SYNTAX, ER(ER_WARN_DEPRECATED_SYNTAX),
name, locale->name);
}
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index f9554c79305..7d057f4e91a 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -24,7 +24,7 @@
// set_handler_table_locks,
// lock_global_read_lock,
// make_global_read_lock_block_commit
-#include "sql_base.h" // find_temporary_tablesx
+#include "sql_base.h" // find_temporary_table
#include "sql_cache.h" // QUERY_CACHE_FLAGS_SIZE, query_cache_*
#include "sql_show.h" // mysqld_list_*, mysqld_show_*,
// calc_sum_of_all_status
@@ -44,7 +44,6 @@
#include "sql_table.h" // mysql_create_like_table,
// mysql_create_table,
// mysql_alter_table,
- // mysql_recreate_table,
// mysql_backup_table,
// mysql_restore_table
#include "sql_reload.h" // reload_acl_and_cache
@@ -499,6 +498,7 @@ void init_update_queries(void)
sql_command_flags[SQLCOM_SELECT]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_SET_OPTION]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_DO]|= CF_PREOPEN_TMP_TABLES;
+ sql_command_flags[SQLCOM_HA_OPEN]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_CALL]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_CHECKSUM]|= CF_PREOPEN_TMP_TABLES;
sql_command_flags[SQLCOM_ANALYZE]|= CF_PREOPEN_TMP_TABLES;
@@ -512,7 +512,7 @@ void init_update_queries(void)
DDL statements that should start with closing opened handlers.
We use this flag only for statements for which open HANDLERs
- have to be closed before emporary tables are pre-opened.
+ have to be closed before temporary tables are pre-opened.
*/
sql_command_flags[SQLCOM_CREATE_TABLE]|= CF_HA_CLOSE;
sql_command_flags[SQLCOM_DROP_TABLE]|= CF_HA_CLOSE;
@@ -888,7 +888,7 @@ bool do_command(THD *thd)
Consider moving to init_connect() instead.
*/
thd->clear_error(); // Clear error message
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
net_new_transaction(net);
@@ -1158,7 +1158,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#endif
case COM_CHANGE_USER:
{
- bool rc;
+ int auth_rc;
status_var_increment(thd->status_var.com_other);
thd->change_user();
@@ -1189,13 +1189,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
if (thd->failed_com_change_user >= 3)
{
my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0));
- rc= 1;
+ auth_rc= 1;
}
else
- rc= acl_authenticate(thd, 0, packet_length);
+ auth_rc= acl_authenticate(thd, 0, packet_length);
mysql_audit_notify_connection_change_user(thd);
- if (rc)
+ if (auth_rc)
{
/* Free user if allocated by acl_authenticate */
my_free(thd->security_ctx->user);
@@ -1294,8 +1294,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
query_cache_end_of_result(thd);
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS,
- thd->stmt_da->is_error() ? thd->stmt_da->sql_errno()
- : 0, command_name[command].str);
+ thd->get_stmt_da()->is_error()
+ ? thd->get_stmt_da()->sql_errno()
+ : 0,
+ command_name[command].str);
ulong length= (ulong)(packet_end - beginning_of_next_stmt);
@@ -1331,10 +1333,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
(char *) thd->security_ctx->host_or_ip);
/* PSI begin */
- thd->m_statement_psi=
- MYSQL_START_STATEMENT(&thd->m_statement_state,
- com_statement_info[command].m_key,
- thd->db, thd->db_length);
+ thd->m_statement_psi= MYSQL_START_STATEMENT(&thd->m_statement_state,
+ com_statement_info[command].m_key,
+ thd->db, thd->db_length,
+ thd->charset());
THD_STAGE_INFO(thd, stage_init);
MYSQL_SET_STATEMENT_TEXT(thd->m_statement_psi, beginning_of_next_stmt,
length);
@@ -1427,6 +1429,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
thd->set_query(fields, query_length);
general_log_print(thd, command, "%s %s", table_list.table_name, fields);
+ if (open_temporary_tables(thd, &table_list))
+ break;
+
if (check_table_access(thd, SELECT_ACL, &table_list,
TRUE, UINT_MAX, FALSE))
break;
@@ -1451,7 +1456,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
/* We don't calculate statistics for this command */
general_log_print(thd, command, NullS);
net->error=0; // Don't give 'abort' message
- thd->stmt_da->disable_status(); // Don't send anything back
+ thd->get_stmt_da()->disable_status(); // Don't send anything back
error=TRUE; // End server
break;
#ifndef EMBEDDED_LIBRARY
@@ -1605,7 +1610,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#else
(void) my_net_write(net, (uchar*) buff, length);
(void) net_flush(net);
- thd->stmt_da->disable_status();
+ thd->get_stmt_da()->disable_status();
#endif
break;
}
@@ -1681,7 +1686,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_RESULT, 0, 0);
mysql_audit_general(thd, MYSQL_AUDIT_GENERAL_STATUS,
- thd->stmt_da->is_error() ? thd->stmt_da->sql_errno() : 0,
+ thd->get_stmt_da()->is_error() ?
+ thd->get_stmt_da()->sql_errno() : 0,
command_name[command].str);
thd->update_all_stats();
@@ -2057,7 +2063,7 @@ bool sp_process_definer(THD *thd)
if (!is_acl_user(lex->definer->host.str, lex->definer->user.str))
{
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
ER_NO_SUCH_USER,
ER(ER_NO_SUCH_USER),
lex->definer->user.str,
@@ -2204,12 +2210,12 @@ mysql_execute_command(THD *thd)
variables, but for now this is probably good enough.
*/
if ((sql_command_flags[lex->sql_command] & CF_DIAGNOSTIC_STMT) != 0)
- thd->warning_info->set_read_only(TRUE);
+ thd->get_stmt_da()->set_warning_info_read_only(TRUE);
else
{
- thd->warning_info->set_read_only(FALSE);
+ thd->get_stmt_da()->set_warning_info_read_only(FALSE);
if (all_tables)
- thd->warning_info->opt_clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
}
#ifdef HAVE_REPLICATION
@@ -2405,6 +2411,31 @@ mysql_execute_command(THD *thd)
goto error;
}
+ /*
+ Close tables open by HANDLERs before executing DDL statement
+ which is going to affect those tables.
+
+ This should happen before temporary tables are pre-opened as
+ otherwise we will get errors about attempt to re-open tables
+ if table to be changed is open through HANDLER.
+
+ Note that even although this is done before any privilege
+ checks there is no security problem here as closing open
+ HANDLER doesn't require any privileges anyway.
+ */
+ if (sql_command_flags[lex->sql_command] & CF_HA_CLOSE)
+ mysql_ha_rm_tables(thd, all_tables);
+
+ /*
+ Pre-open temporary tables to simplify privilege checking
+ for statements which need this.
+ */
+ if (sql_command_flags[lex->sql_command] & CF_PREOPEN_TMP_TABLES)
+ {
+ if (open_temporary_tables(thd, all_tables))
+ goto error;
+ }
+
switch (lex->sql_command) {
case SQLCOM_SHOW_EVENTS:
@@ -2412,13 +2443,6 @@ mysql_execute_command(THD *thd)
my_error(ER_NOT_SUPPORTED_YET, MYF(0), "embedded server");
break;
#endif
- case SQLCOM_SHOW_STATUS_PROC:
- case SQLCOM_SHOW_STATUS_FUNC:
- if ((res= check_table_access(thd, SELECT_ACL, all_tables, FALSE,
- UINT_MAX, FALSE)))
- goto error;
- res= execute_sqlcom_select(thd, all_tables);
- break;
case SQLCOM_SHOW_STATUS:
{
execute_show_status(thd, all_tables);
@@ -2451,6 +2475,8 @@ mysql_execute_command(THD *thd)
}
/* no break; fall through */
}
+ case SQLCOM_SHOW_STATUS_PROC:
+ case SQLCOM_SHOW_STATUS_FUNC:
case SQLCOM_SHOW_DATABASES:
case SQLCOM_SHOW_TABLES:
case SQLCOM_SHOW_TRIGGERS:
@@ -2555,16 +2581,16 @@ case SQLCOM_PREPARE:
case SQLCOM_SHOW_WARNS:
{
res= mysqld_show_warnings(thd, (ulong)
- ((1L << (uint) MYSQL_ERROR::WARN_LEVEL_NOTE) |
- (1L << (uint) MYSQL_ERROR::WARN_LEVEL_WARN) |
- (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR)
+ ((1L << (uint) Sql_condition::WARN_LEVEL_NOTE) |
+ (1L << (uint) Sql_condition::WARN_LEVEL_WARN) |
+ (1L << (uint) Sql_condition::WARN_LEVEL_ERROR)
));
break;
}
case SQLCOM_SHOW_ERRORS:
{
res= mysqld_show_warnings(thd, (ulong)
- (1L << (uint) MYSQL_ERROR::WARN_LEVEL_ERROR));
+ (1L << (uint) Sql_condition::WARN_LEVEL_ERROR));
break;
}
case SQLCOM_SHOW_PROFILES:
@@ -2634,7 +2660,7 @@ case SQLCOM_PREPARE:
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->get_master_info(&lex_mi->connection_name,
- MYSQL_ERROR::WARN_LEVEL_NOTE);
+ Sql_condition::WARN_LEVEL_NOTE);
if (mi == NULL)
{
@@ -2687,7 +2713,7 @@ case SQLCOM_PREPARE:
LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
Master_info *mi;
mi= master_info_index->get_master_info(&lex_mi->connection_name,
- MYSQL_ERROR::WARN_LEVEL_ERROR);
+ Sql_condition::WARN_LEVEL_ERROR);
if (mi != NULL)
{
res= show_master_info(thd, mi, 0);
@@ -2795,9 +2821,6 @@ case SQLCOM_PREPARE:
}
#endif
- /* Close any open handlers for the table. */
- mysql_ha_rm_tables(thd, create_table);
-
if (select_lex->item_list.elements) // With select
{
select_result *result;
@@ -2845,7 +2868,7 @@ case SQLCOM_PREPARE:
*/
if (splocal_refs != thd->query_name_consts)
push_warning(thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_ERROR,
"Invoked routine ran a statement that may cause problems with "
"binary log, see 'NAME_CONST issues' in 'Binary Logging of Stored Programs' "
@@ -2882,7 +2905,7 @@ case SQLCOM_PREPARE:
{
if (create_info.options & HA_LEX_CREATE_IF_NOT_EXISTS)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TABLE_EXISTS_ERROR,
ER(ER_TABLE_EXISTS_ERROR),
create_info.alias);
@@ -2992,7 +3015,7 @@ end_with_restore_list:
res= mysql_alter_table(thd, first_table->db, first_table->table_name,
&create_info, first_table, &alter_info,
- 0, (ORDER*) 0, 0, 0);
+ 0, (ORDER*) 0, 0);
break;
}
#ifdef HAVE_REPLICATION
@@ -3008,7 +3031,7 @@ end_with_restore_list:
if ((mi= (master_info_index->
get_master_info(&lex_mi->connection_name,
- MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ Sql_condition::WARN_LEVEL_ERROR))))
{
if (load_error)
{
@@ -3061,7 +3084,7 @@ end_with_restore_list:
mysql_mutex_lock(&LOCK_active_mi);
if ((mi= (master_info_index->
get_master_info(&lex_mi->connection_name,
- MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ Sql_condition::WARN_LEVEL_ERROR))))
if (!stop_slave(thd, mi, 1/* net report*/))
my_ok(thd);
mysql_mutex_unlock(&LOCK_active_mi);
@@ -3152,6 +3175,13 @@ end_with_restore_list:
else
{
/*
+ Temporary tables should be opened for SHOW CREATE TABLE, but not
+ for SHOW CREATE VIEW.
+ */
+ if (open_temporary_tables(thd, all_tables))
+ goto error;
+
+ /*
The fact that check_some_access() returned FALSE does not mean that
access is granted. We need to check if first_table->grant.privilege
contains any table-specific privilege.
@@ -3329,6 +3359,18 @@ end_with_restore_list:
case SQLCOM_INSERT:
{
DBUG_ASSERT(first_table == all_tables && first_table != 0);
+
+ /*
+ Since INSERT DELAYED doesn't support temporary tables, we could
+ not pre-open temporary tables for SQLCOM_INSERT / SQLCOM_REPLACE.
+ Open them here instead.
+ */
+ if (first_table->lock_type != TL_WRITE_DELAYED)
+ {
+ if ((res= open_temporary_tables(thd, all_tables)))
+ break;
+ }
+
if ((res= insert_precheck(thd, all_tables)))
break;
@@ -3668,6 +3710,19 @@ end_with_restore_list:
thd->mdl_context.release_transactional_locks();
if (res)
goto error;
+
+ /*
+ Here we have to pre-open temporary tables for LOCK TABLES.
+
+ CF_PREOPEN_TMP_TABLES is not set for this SQL statement simply
+ because LOCK TABLES calls close_thread_tables() as a first thing
+ (it's called from unlock_locked_tables() above). So even if
+ CF_PREOPEN_TMP_TABLES was set and the tables would be pre-opened
+ in a usual way, they would have been closed.
+ */
+ if (open_temporary_tables(thd, all_tables))
+ goto error;
+
if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables,
FALSE, UINT_MAX, FALSE))
goto error;
@@ -3970,7 +4025,7 @@ end_with_restore_list:
goto error;
if (specialflag & SPECIAL_NO_RESOLVE &&
hostname_requires_resolving(user->host.str))
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_HOSTNAME_WONT_WORK,
ER(ER_WARN_HOSTNAME_WONT_WORK));
// Are we trying to change a password of another user
@@ -4169,6 +4224,9 @@ end_with_restore_list:
DBUG_ASSERT(first_table == all_tables && first_table != 0);
if (check_table_access(thd, SELECT_ACL, all_tables, FALSE, UINT_MAX, FALSE))
goto error;
+ /* Close temporary tables which were pre-opened for privilege checking. */
+ close_thread_tables(thd);
+ all_tables->table= NULL;
res= mysql_ha_open(thd, first_table, 0);
break;
case SQLCOM_HA_CLOSE:
@@ -4376,7 +4434,7 @@ end_with_restore_list:
{
if (sp_grant_privileges(thd, lex->sphead->m_db.str, name,
lex->sql_command == SQLCOM_CREATE_PROCEDURE))
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_PROC_AUTO_GRANT_FAIL, ER(ER_PROC_AUTO_GRANT_FAIL));
thd->clear_error();
}
@@ -4581,7 +4639,7 @@ create_sp_error:
{
if (lex->check_exists)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST),
"FUNCTION (UDF)", lex->spname->m_name.str);
res= FALSE;
@@ -4634,7 +4692,7 @@ create_sp_error:
sp_revoke_privileges(thd, db, name,
lex->sql_command == SQLCOM_DROP_PROCEDURE))
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_PROC_AUTO_REVOKE_FAIL,
ER(ER_PROC_AUTO_REVOKE_FAIL));
/* If this happens, an error should have been reported. */
@@ -4651,7 +4709,7 @@ create_sp_error:
if (lex->check_exists)
{
res= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST),
SP_COM_STRING(lex), lex->spname->m_qname.str);
if (!res)
@@ -4889,10 +4947,12 @@ create_sp_error:
/* fall through */
case SQLCOM_SIGNAL:
case SQLCOM_RESIGNAL:
- DBUG_ASSERT(lex->m_stmt != NULL);
- res= lex->m_stmt->execute(thd);
+ case SQLCOM_GET_DIAGNOSTICS:
+ DBUG_ASSERT(lex->m_sql_cmd != NULL);
+ res= lex->m_sql_cmd->execute(thd);
break;
default:
+
#ifndef EMBEDDED_LIBRARY
DBUG_ASSERT(0); /* Impossible */
#endif
@@ -4934,7 +4994,7 @@ finish:
if (thd->killed_errno())
{
/* If we already sent 'ok', we can ignore any kill query statements */
- if (! thd->stmt_da->is_set())
+ if (! thd->get_stmt_da()->is_set())
thd->send_kill_message();
}
if (thd->killed < KILL_CONNECTION)
@@ -4948,9 +5008,9 @@ finish:
else
{
/* If commit fails, we should be able to reset the OK status. */
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
trans_commit_stmt(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
}
#ifdef WITH_ARIA_STORAGE_ENGINE
ha_maria::implicit_commit(thd, FALSE);
@@ -4977,10 +5037,10 @@ finish:
/* No transaction control allowed in sub-statements. */
DBUG_ASSERT(! thd->in_sub_stmt);
/* If commit fails, we should be able to reset the OK status. */
- thd->stmt_da->can_overwrite_status= TRUE;
+ thd->get_stmt_da()->set_overwrite_status(true);
/* Commit the normal transaction if one is active. */
trans_commit_implicit(thd);
- thd->stmt_da->can_overwrite_status= FALSE;
+ thd->get_stmt_da()->set_overwrite_status(false);
thd->mdl_context.release_transactional_locks();
}
else if (! thd->in_sub_stmt && ! thd->in_multi_stmt_transaction_mode())
@@ -5046,7 +5106,7 @@ static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables)
mysqld_show_warnings().
*/
thd->lex->unit.print(&str, QT_TO_SYSTEM_CHARSET);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_YES, str.c_ptr_safe());
}
if (res)
@@ -5504,6 +5564,12 @@ static bool check_show_access(THD *thd, TABLE_LIST *table)
DBUG_ASSERT(dst_table);
+ /*
+ Open temporary tables to be able to detect them during privilege check.
+ */
+ if (open_temporary_tables(thd, dst_table))
+ return TRUE;
+
if (check_access(thd, SELECT_ACL, dst_table->db,
&dst_table->grant.privilege,
&dst_table->grant.m_internal,
@@ -5517,6 +5583,9 @@ static bool check_show_access(THD *thd, TABLE_LIST *table)
if (check_grant(thd, SELECT_ACL, dst_table, TRUE, UINT_MAX, FALSE))
return TRUE; /* Access denied */
+ close_thread_tables(thd);
+ dst_table->table= NULL;
+
/* Access granted */
return FALSE;
}
@@ -5602,10 +5671,10 @@ check_table_access(THD *thd, ulong requirements,TABLE_LIST *tables,
DBUG_PRINT("info", ("derived: %d view: %d", tables->derived != 0,
tables->view != 0));
- if (tables->is_anonymous_derived_table() ||
- (tables->table && tables->table->s &&
- (int)tables->table->s->tmp_table))
+
+ if (tables->is_anonymous_derived_table())
continue;
+
thd->security_ctx= sctx;
if (check_access(thd, want_access, tables->get_db_name(),
@@ -5811,7 +5880,7 @@ bool check_stack_overrun(THD *thd, long margin,
return 1;
}
#ifndef DBUG_OFF
- max_stack_used= max(max_stack_used, stack_used);
+ max_stack_used= MY_MAX(max_stack_used, stack_used);
#endif
return 0;
}
@@ -5879,7 +5948,6 @@ void THD::reset_for_next_command(bool calculate_userstat)
DBUG_ENTER("mysql_reset_thd_for_next_command");
DBUG_ASSERT(!thd->spcont); /* not for substatements of routines */
DBUG_ASSERT(! thd->in_sub_stmt);
- DBUG_ASSERT(thd->transaction.on);
thd->free_list= 0;
thd->select_number= 1;
/*
@@ -5916,8 +5984,8 @@ void THD::reset_for_next_command(bool calculate_userstat)
thd->user_var_events_alloc= thd->mem_root;
}
thd->clear_error();
- thd->stmt_da->reset_diagnostics_area();
- thd->warning_info->reset_for_next_command();
+ thd->get_stmt_da()->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_for_next_command();
thd->rand_used= 0;
thd->m_sent_row_count= thd->m_examined_row_count= 0;
thd->accessed_rows_and_keys= 0;
@@ -6144,7 +6212,7 @@ void mysql_parse(THD *thd, char *rawbuf, uint length,
{
LEX *lex= thd->lex;
- bool err= parse_sql(thd, parser_state, NULL);
+ bool err= parse_sql(thd, parser_state, NULL, true);
if (!err)
{
@@ -6252,7 +6320,7 @@ bool mysql_test_parse_for_slave(THD *thd, char *rawbuf, uint length)
lex_start(thd);
mysql_reset_thd_for_next_command(thd, opt_userstat_running);
- if (!parse_sql(thd, & parser_state, NULL) &&
+ if (!parse_sql(thd, & parser_state, NULL, true) &&
all_tables_not_ok(thd, lex->select_lex.table_list.first))
error= 1; /* Ignore question */
thd->end_statement();
@@ -6442,6 +6510,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
thr_lock_type lock_type,
enum_mdl_type mdl_type,
List<Index_hint> *index_hints_arg,
+ List<String> *partition_names,
LEX_STRING *option)
{
register TABLE_LIST *ptr;
@@ -6586,6 +6655,9 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd,
*/
table_list.link_in_list(ptr, &ptr->next_local);
ptr->next_name_resolution_table= NULL;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ ptr->partition_names= partition_names;
+#endif /* WITH_PARTITION_STORAGE_ENGINE */
/* Link table in global list (all used tables) */
lex->add_to_query_tables(ptr);
@@ -7291,7 +7363,7 @@ bool check_simple_select()
char command[80];
Lex_input_stream *lip= & thd->m_parser_state->m_lip;
strmake(command, lip->yylval->symbol.str,
- min(lip->yylval->symbol.length, sizeof(command)-1));
+ MY_MIN(lip->yylval->symbol.length, sizeof(command)-1));
my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command);
return 1;
}
@@ -7464,6 +7536,19 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables)
TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last;
DBUG_ENTER("multi_delete_precheck");
+ /*
+ Temporary tables are pre-opened in 'tables' list only. Here we need to
+ initialize TABLE instances in 'aux_tables' list.
+ */
+ for (TABLE_LIST *tl= aux_tables; tl; tl= tl->next_global)
+ {
+ if (tl->table)
+ continue;
+
+ if (tl->correspondent_table)
+ tl->table= tl->correspondent_table->table;
+ }
+
/* sql_yacc guarantees that tables and aux_tables are not zero */
DBUG_ASSERT(aux_tables != 0);
if (check_table_access(thd, SELECT_ACL, tables, FALSE, UINT_MAX, FALSE))
@@ -7732,9 +7817,9 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
CREATE TABLE ... SELECT, also require INSERT.
*/
- want_priv= ((lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ?
- CREATE_TMP_ACL : CREATE_ACL) |
- (select_lex->item_list.elements ? INSERT_ACL : 0);
+ want_priv= (lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ?
+ CREATE_TMP_ACL :
+ (CREATE_ACL | (select_lex->item_list.elements ? INSERT_ACL : 0));
if (check_access(thd, want_priv, create_table->db,
&create_table->grant.privilege,
@@ -7743,11 +7828,48 @@ bool create_table_precheck(THD *thd, TABLE_LIST *tables,
goto err;
/* If it is a merge table, check privileges for merge children. */
- if (lex->create_info.merge_list.first &&
- check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL,
- lex->create_info.merge_list.first,
- FALSE, UINT_MAX, FALSE))
- goto err;
+ if (lex->create_info.merge_list.first)
+ {
+ /*
+ The user must have (SELECT_ACL | UPDATE_ACL | DELETE_ACL) on the
+ underlying base tables, even if there are temporary tables with the same
+ names.
+
+ From user's point of view, it might look as if the user must have these
+ privileges on temporary tables to create a merge table over them. This is
+ one of two cases when a set of privileges is required for operations on
+ temporary tables (see also CREATE TABLE).
+
+ The reason for this behavior stems from the following facts:
+
+ - For merge tables, the underlying table privileges are checked only
+ at CREATE TABLE / ALTER TABLE time.
+
+ In other words, once a merge table is created, the privileges of
+ the underlying tables can be revoked, but the user will still have
+ access to the merge table (provided that the user has privileges on
+ the merge table itself).
+
+ - Temporary tables shadow base tables.
+
+ I.e. there might be temporary and base tables with the same name, and
+ the temporary table takes the precedence in all operations.
+
+ - For temporary MERGE tables we do not track if their child tables are
+ base or temporary. As result we can't guarantee that privilege check
+ which was done in presence of temporary child will stay relevant later
+ as this temporary table might be removed.
+
+ If SELECT_ACL | UPDATE_ACL | DELETE_ACL privileges were not checked for
+ the underlying *base* tables, it would create a security breach as in
+ Bug#12771903.
+ */
+
+ if (check_table_access(thd, SELECT_ACL | UPDATE_ACL | DELETE_ACL,
+ lex->create_info.merge_list.first,
+ FALSE, UINT_MAX, FALSE))
+ goto err;
+ }
if (want_priv != CREATE_TMP_ACL &&
check_grant(thd, want_priv, create_table, FALSE, 1, FALSE))
@@ -8105,14 +8227,13 @@ extern int MYSQLparse(void *thd); // from sql_yacc.cc
@retval TRUE on parsing error.
*/
-bool parse_sql(THD *thd,
- Parser_state *parser_state,
- Object_creation_ctx *creation_ctx)
+bool parse_sql(THD *thd, Parser_state *parser_state,
+ Object_creation_ctx *creation_ctx, bool do_pfs_digest)
{
bool ret_value;
DBUG_ENTER("parse_sql");
DBUG_ASSERT(thd->m_parser_state == NULL);
- DBUG_ASSERT(thd->lex->m_stmt == NULL);
+ DBUG_ASSERT(thd->lex->m_sql_cmd == NULL);
MYSQL_QUERY_PARSE_START(thd->query());
/* Backup creation context. */
@@ -8129,7 +8250,7 @@ bool parse_sql(THD *thd,
#ifdef HAVE_PSI_STATEMENT_DIGEST_INTERFACE
/* Start Digest */
thd->m_parser_state->m_lip.m_digest_psi=
- MYSQL_DIGEST_START(thd->m_statement_psi);
+ MYSQL_DIGEST_START(do_pfs_digest ? thd->m_statement_psi : NULL);
#endif
/* Parse the query. */
diff --git a/sql/sql_parse.h b/sql/sql_parse.h
index 346a3c8899b..84256aa2256 100644
--- a/sql/sql_parse.h
+++ b/sql/sql_parse.h
@@ -47,9 +47,8 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables);
bool create_table_precheck(THD *thd, TABLE_LIST *tables,
TABLE_LIST *create_table);
-bool parse_sql(THD *thd,
- Parser_state *parser_state,
- Object_creation_ctx *creation_ctx);
+bool parse_sql(THD *thd, Parser_state *parser_state,
+ Object_creation_ctx *creation_ctx, bool do_pfs_digest=false);
void free_items(Item *item);
void cleanup_items(Item *item);
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index f51bba83b75..9e4c48b47ff 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -59,6 +59,7 @@
#include <m_ctype.h>
#include "my_md5.h"
#include "transaction.h"
+#include "debug_sync.h"
#include "sql_base.h" // close_all_tables_for_name
#include "sql_table.h" // build_table_filename,
@@ -67,6 +68,11 @@
// mysql_*_alter_copy_data
#include "opt_range.h" // store_key_image_to_rec
#include "sql_analyse.h" // append_escaped
+#include "sql_alter.h" // Alter_table_ctx
+
+#include <algorithm>
+using std::max;
+using std::min;
#ifdef WITH_PARTITION_STORAGE_ENGINE
#include "ha_partition.h"
@@ -87,9 +93,7 @@ const LEX_STRING partition_keywords[]=
{ C_STRING_WITH_LEN("KEY") },
{ C_STRING_WITH_LEN("MAXVALUE") },
{ C_STRING_WITH_LEN("LINEAR ") },
- { C_STRING_WITH_LEN(" COLUMNS") },
- { C_STRING_WITH_LEN("ALGORITHM") }
-
+ { C_STRING_WITH_LEN(" COLUMNS") }
};
static const char *part_str= "PARTITION";
static const char *sub_str= "SUB";
@@ -189,7 +193,7 @@ static int cmp_rec_and_tuple_prune(part_column_list_val *val,
item New converted item
*/
-Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
+Item* convert_charset_partition_constant(Item *item, const CHARSET_INFO *cs)
{
THD *thd= current_thd;
Name_resolution_context *context= &thd->lex->current_select->context;
@@ -207,21 +211,18 @@ Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs)
}
-/*
- A support function to check if a name is in a list of strings
+/**
+ A support function to check if a name is in a list of strings.
- SYNOPSIS
- is_name_in_list()
- name String searched for
- list_names A list of names searched in
+ @param name String searched for
+ @param list_names A list of names searched in
- RETURN VALUES
- TRUE String found
- FALSE String not found
+ @return True if if the name is in the list.
+ @retval true String found
+ @retval false String not found
*/
-bool is_name_in_list(char *name,
- List<char> list_names)
+static bool is_name_in_list(char *name, List<char> list_names)
{
List_iterator<char> names_it(list_names);
uint num_names= list_names.elements;
@@ -288,61 +289,6 @@ bool partition_default_handling(TABLE *table, partition_info *part_info,
/*
- Check that the reorganized table will not have duplicate partitions.
-
- SYNOPSIS
- check_reorganise_list()
- new_part_info New partition info
- old_part_info Old partition info
- list_part_names The list of partition names that will go away and
- can be reused in the new table.
-
- RETURN VALUES
- TRUE Inacceptable name conflict detected.
- FALSE New names are OK.
-
- DESCRIPTION
- Can handle that the 'new_part_info' and 'old_part_info' the same
- in which case it checks that the list of names in the partitions
- doesn't contain any duplicated names.
-*/
-
-bool check_reorganise_list(partition_info *new_part_info,
- partition_info *old_part_info,
- List<char> list_part_names)
-{
- uint new_count, old_count;
- uint num_new_parts= new_part_info->partitions.elements;
- uint num_old_parts= old_part_info->partitions.elements;
- List_iterator<partition_element> new_parts_it(new_part_info->partitions);
- bool same_part_info= (new_part_info == old_part_info);
- DBUG_ENTER("check_reorganise_list");
-
- new_count= 0;
- do
- {
- List_iterator<partition_element> old_parts_it(old_part_info->partitions);
- char *new_name= (new_parts_it++)->partition_name;
- new_count++;
- old_count= 0;
- do
- {
- char *old_name= (old_parts_it++)->partition_name;
- old_count++;
- if (same_part_info && old_count == new_count)
- break;
- if (!(my_strcasecmp(system_charset_info, old_name, new_name)))
- {
- if (!is_name_in_list(old_name, list_part_names))
- DBUG_RETURN(TRUE);
- }
- } while (old_count < num_old_parts);
- } while (new_count < num_new_parts);
- DBUG_RETURN(FALSE);
-}
-
-
-/*
A useful routine used by update_row for partition handlers to calculate
the partition ids of the old and the new record.
@@ -370,7 +316,7 @@ int get_parts_for_update(const uchar *old_data, uchar *new_data,
longlong old_func_value;
DBUG_ENTER("get_parts_for_update");
- DBUG_ASSERT(new_data == rec0); // table->record[0]
+ DBUG_ASSERT(new_data == rec0);
set_field_ptr(part_field_array, old_data, rec0);
error= part_info->get_partition_id(part_info, old_part_id,
&old_func_value);
@@ -528,12 +474,12 @@ static bool set_up_field_array(TABLE *table,
}
if (num_fields > MAX_REF_PARTS)
{
- char *err_str;
+ char *ptr;
if (is_sub_part)
- err_str= (char*)"subpartition function";
+ ptr= (char*)"subpartition function";
else
- err_str= (char*)"partition function";
- my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), err_str);
+ ptr= (char*)"partition function";
+ my_error(ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR, MYF(0), ptr);
DBUG_RETURN(TRUE);
}
if (num_fields == 0)
@@ -577,7 +523,13 @@ static bool set_up_field_array(TABLE *table,
} while (++inx < num_fields);
if (inx == num_fields)
{
- mem_alloc_error(1);
+ /*
+ Should not occur since it should already been checked in either
+ add_column_list_values, handle_list_of_fields,
+ check_partition_info etc.
+ */
+ DBUG_ASSERT(0);
+ my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0));
result= TRUE;
continue;
}
@@ -741,7 +693,7 @@ end:
static void clear_indicator_in_key_fields(KEY *key_info)
{
KEY_PART_INFO *key_part;
- uint key_parts= key_info->key_parts, i;
+ uint key_parts= key_info->user_defined_key_parts, i;
for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++)
key_part->field->flags&= (~GET_FIXED_FIELDS_FLAG);
}
@@ -761,7 +713,7 @@ static void clear_indicator_in_key_fields(KEY *key_info)
static void set_indicator_in_key_fields(KEY *key_info)
{
KEY_PART_INFO *key_part;
- uint key_parts= key_info->key_parts, i;
+ uint key_parts= key_info->user_defined_key_parts, i;
for (i= 0, key_part=key_info->key_part; i < key_parts; i++, key_part++)
key_part->field->flags|= GET_FIXED_FIELDS_FLAG;
}
@@ -881,7 +833,7 @@ static bool handle_list_of_fields(List_iterator<char> it,
uint primary_key= table->s->primary_key;
if (primary_key != MAX_KEY)
{
- uint num_key_parts= table->key_info[primary_key].key_parts, i;
+ uint num_key_parts= table->key_info[primary_key].user_defined_key_parts, i;
/*
In the case of an empty list we use primary key as partition key.
*/
@@ -1074,7 +1026,7 @@ static bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
goto end;
}
else
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR,
ER(ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR));
}
@@ -1243,39 +1195,44 @@ void check_range_capable_PF(TABLE *table)
}
-/*
- Set up partition bitmap
+/**
+ Set up partition bitmaps
- SYNOPSIS
- set_up_partition_bitmap()
- thd Thread object
- part_info Reference to partitioning data structure
+ @param thd Thread object
+ @param part_info Reference to partitioning data structure
- RETURN VALUE
- TRUE Memory allocation failure
- FALSE Success
+ @return Operation status
+ @retval TRUE Memory allocation failure
+ @retval FALSE Success
- DESCRIPTION
- Allocate memory for bitmap of the partitioned table
+ Allocate memory for bitmaps of the partitioned table
and initialise it.
*/
-static bool set_up_partition_bitmap(THD *thd, partition_info *part_info)
+static bool set_up_partition_bitmaps(THD *thd, partition_info *part_info)
{
uint32 *bitmap_buf;
uint bitmap_bits= part_info->num_subparts?
(part_info->num_subparts* part_info->num_parts):
part_info->num_parts;
uint bitmap_bytes= bitmap_buffer_size(bitmap_bits);
- DBUG_ENTER("set_up_partition_bitmap");
+ DBUG_ENTER("set_up_partition_bitmaps");
+
+ DBUG_ASSERT(!part_info->bitmaps_are_initialized);
- if (!(bitmap_buf= (uint32*)thd->alloc(bitmap_bytes)))
+ /* Allocate for both read and lock_partitions */
+ if (!(bitmap_buf= (uint32*) alloc_root(&part_info->table->mem_root,
+ bitmap_bytes * 2)))
{
- mem_alloc_error(bitmap_bytes);
+ mem_alloc_error(bitmap_bytes * 2);
DBUG_RETURN(TRUE);
}
- bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE);
- bitmap_set_all(&part_info->used_partitions);
+ bitmap_init(&part_info->read_partitions, bitmap_buf, bitmap_bits, FALSE);
+ /* Use the second half of the allocated buffer for lock_partitions */
+ bitmap_init(&part_info->lock_partitions, bitmap_buf + (bitmap_bytes / 4),
+ bitmap_bits, FALSE);
+ part_info->bitmaps_are_initialized= TRUE;
+ part_info->set_partition_bitmaps(NULL);
DBUG_RETURN(FALSE);
}
@@ -1795,7 +1752,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
(table->s->db_type()->partition_flags() & HA_CAN_PARTITION_UNIQUE))) &&
check_unique_keys(table)))
goto end;
- if (unlikely(set_up_partition_bitmap(thd, part_info)))
+ if (unlikely(set_up_partition_bitmaps(thd, part_info)))
goto end;
if (unlikely(part_info->set_up_charset_field_preps()))
{
@@ -1811,6 +1768,7 @@ bool fix_partition_func(THD *thd, TABLE *table,
set_up_partition_key_maps(table, part_info);
set_up_partition_func_pointers(part_info);
set_up_range_analysis_info(part_info);
+ table->file->set_part_info(part_info);
result= FALSE;
end:
thd->mark_used_columns= save_mark_used_columns;
@@ -1982,8 +1940,85 @@ static int add_quoted_string(File fptr, const char *quotestr)
return err + add_string(fptr, "'");
}
+/**
+ @brief Truncate the partition file name from a path it it exists.
+
+ @note A partition file name will contian one or more '#' characters.
+One of the occurances of '#' will be either "#P#" or "#p#" depending
+on whether the storage engine has converted the filename to lower case.
+*/
+void truncate_partition_filename(char *path)
+{
+ if (path)
+ {
+ char* last_slash= strrchr(path, FN_LIBCHAR);
+
+ if (!last_slash)
+ last_slash= strrchr(path, FN_LIBCHAR2);
+
+ if (last_slash)
+ {
+ /* Look for a partition-type filename */
+ for (char* pound= strchr(last_slash, '#');
+ pound; pound = strchr(pound + 1, '#'))
+ {
+ if ((pound[1] == 'P' || pound[1] == 'p') && pound[2] == '#')
+ {
+ last_slash[0] = '\0'; /* truncate the file name */
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+/**
+ @brief Output a filepath. Similar to add_keyword_string except it
+also converts \ to / on Windows and skips the partition file name at
+the end if found.
+
+ @note When Mysql sends a DATA DIRECTORY from SQL for partitions it does
+not use a file name, but it does for DATA DIRECTORY on a non-partitioned
+table. So when the storage engine is asked for the DATA DIRECTORY string
+after a restart through Handler::update_create_options(), the storage
+engine may include the filename.
+*/
+static int add_keyword_path(File fptr, const char *keyword,
+ const char *path)
+{
+ int err= add_string(fptr, keyword);
+
+ err+= add_space(fptr);
+ err+= add_equal(fptr);
+ err+= add_space(fptr);
+
+ char temp_path[FN_REFLEN];
+ strcpy(temp_path, path);
+#ifdef __WIN__
+ /* Convert \ to / to be able to create table on unix */
+ char *pos, *end;
+ uint length= strlen(temp_path);
+ for (pos= temp_path, end= pos+length ; pos < end ; pos++)
+ {
+ if (*pos == '\\')
+ *pos = '/';
+ }
+#endif
+
+ /*
+ If the partition file name with its "#P#" identifier
+ is found after the last slash, truncate that filename.
+ */
+ truncate_partition_filename(temp_path);
+
+ err+= add_quoted_string(fptr, temp_path);
+
+ return err + add_space(fptr);
+}
+
static int add_keyword_string(File fptr, const char *keyword,
- bool should_use_quotes,
+ bool should_use_quotes,
const char *keystr)
{
int err= add_string(fptr, keyword);
@@ -2034,11 +2069,9 @@ static int add_partition_options(File fptr, partition_element *p_elem)
if (!(current_thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE))
{
if (p_elem->data_file_name)
- err+= add_keyword_string(fptr, "DATA DIRECTORY", TRUE,
- p_elem->data_file_name);
+ err+= add_keyword_path(fptr, "DATA DIRECTORY", p_elem->data_file_name);
if (p_elem->index_file_name)
- err+= add_keyword_string(fptr, "INDEX DIRECTORY", TRUE,
- p_elem->index_file_name);
+ err+= add_keyword_path(fptr, "INDEX DIRECTORY", p_elem->index_file_name);
}
if (p_elem->part_comment)
err+= add_keyword_string(fptr, "COMMENT", TRUE, p_elem->part_comment);
@@ -2188,7 +2221,7 @@ static int add_column_list_values(File fptr, partition_info *part_info,
else
{
String *res;
- CHARSET_INFO *field_cs;
+ const CHARSET_INFO *field_cs;
bool need_cs_check= FALSE;
Item_result result_type= STRING_RESULT;
@@ -2344,58 +2377,6 @@ end:
return err;
}
-
-/**
- Add 'KEY' word, with optional 'ALGORTIHM = N'.
-
- @param fptr File to write to.
- @param part_info partition_info holding the used key_algorithm
- @param current_comment_start NULL, or comment string encapsulating the
- PARTITION BY clause.
-
- @return Operation status.
- @retval 0 Success
- @retval != 0 Failure
-*/
-
-static int add_key_with_algorithm(File fptr, partition_info *part_info,
- const char *current_comment_start)
-{
- int err= 0;
- err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
-
- /*
- current_comment_start is given when called from SHOW CREATE TABLE,
- Then only add ALGORITHM = 1, not the default 2 or non-set 0!
- For .frm current_comment_start is NULL, then add ALGORITHM if != 0.
- */
- if (part_info->key_algorithm == partition_info::KEY_ALGORITHM_51 || // SHOW
- (!current_comment_start && // .frm
- (part_info->key_algorithm != partition_info::KEY_ALGORITHM_NONE)))
- {
- /* If we already are within a comment, end that comment first. */
- if (current_comment_start)
- err+= add_string(fptr, "*/ ");
- err+= add_string(fptr, "/*!50531 ");
- err+= add_part_key_word(fptr, partition_keywords[PKW_ALGORITHM].str);
- err+= add_equal(fptr);
- err+= add_space(fptr);
- err+= add_int(fptr, part_info->key_algorithm);
- err+= add_space(fptr);
- err+= add_string(fptr, "*/ ");
- if (current_comment_start)
- {
- /* Skip new line. */
- if (current_comment_start[0] == '\n')
- current_comment_start++;
- err+= add_string(fptr, current_comment_start);
- err+= add_space(fptr);
- }
- }
- return err;
-}
-
-
/*
Generate the partition syntax from the partition data structure.
Useful for support of generating defaults, SHOW CREATE TABLES
@@ -2440,8 +2421,7 @@ char *generate_partition_syntax(partition_info *part_info,
bool use_sql_alloc,
bool show_partition_options,
HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- const char *current_comment_start)
+ Alter_info *alter_info)
{
uint i,j, tot_num_parts, num_subparts;
partition_element *part_elem;
@@ -2475,8 +2455,7 @@ char *generate_partition_syntax(partition_info *part_info,
err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
if (part_info->list_of_part_fields)
{
- err+= add_key_with_algorithm(fptr, part_info,
- current_comment_start);
+ err+= add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
err+= add_part_field_list(fptr, part_info->part_field_list);
}
else
@@ -2516,9 +2495,8 @@ char *generate_partition_syntax(partition_info *part_info,
err+= add_string(fptr, partition_keywords[PKW_LINEAR].str);
if (part_info->list_of_subpart_fields)
{
- err+= add_key_with_algorithm(fptr, part_info,
- current_comment_start);
- err+= add_part_field_list(fptr, part_info->subpart_field_list);
+ add_part_key_word(fptr, partition_keywords[PKW_KEY].str);
+ add_part_field_list(fptr, part_info->subpart_field_list);
}
else
err+= add_part_key_word(fptr, partition_keywords[PKW_HASH].str);
@@ -2702,114 +2680,13 @@ static inline int part_val_int(Item *item_expr, longlong *result)
We have a set of support functions for these 14 variants. There are 4
variants of hash functions and there is a function for each. The KEY
- partitioning uses the function calculate_key_value to calculate the hash
+ partitioning uses the function calculate_key_hash_value to calculate the hash
value based on an array of fields. The linear hash variants uses the
method get_part_id_from_linear_hash to get the partition id using the
hash value and some parameters calculated from the number of partitions.
*/
/*
- Calculate hash value for KEY partitioning using an array of fields.
-
- SYNOPSIS
- calculate_key_value()
- field_array An array of the fields in KEY partitioning
-
- RETURN VALUE
- hash_value calculated
-
- DESCRIPTION
- Uses the hash function on the character set of the field. Integer and
- floating point fields use the binary character set by default.
-*/
-
-static uint32 calculate_key_value(Field **field_array)
-{
- ulong nr1= 1;
- ulong nr2= 4;
- bool use_51_hash;
- use_51_hash= test((*field_array)->table->part_info->key_algorithm ==
- partition_info::KEY_ALGORITHM_51);
-
- do
- {
- Field *field= *field_array;
- if (use_51_hash)
- {
- switch (field->real_type()) {
- case MYSQL_TYPE_TINY:
- case MYSQL_TYPE_SHORT:
- case MYSQL_TYPE_LONG:
- case MYSQL_TYPE_FLOAT:
- case MYSQL_TYPE_DOUBLE:
- case MYSQL_TYPE_NEWDECIMAL:
- case MYSQL_TYPE_TIMESTAMP:
- case MYSQL_TYPE_LONGLONG:
- case MYSQL_TYPE_INT24:
- case MYSQL_TYPE_TIME:
- case MYSQL_TYPE_DATETIME:
- case MYSQL_TYPE_YEAR:
- case MYSQL_TYPE_NEWDATE:
- {
- if (field->is_null())
- {
- nr1^= (nr1 << 1) | 1;
- continue;
- }
- /* Force this to my_hash_sort_bin, which was used in 5.1! */
- uint len= field->pack_length();
- my_charset_bin.coll->hash_sort(&my_charset_bin, field->ptr, len,
- &nr1, &nr2);
- /* Done with this field, continue with next one. */
- continue;
- }
- case MYSQL_TYPE_STRING:
- case MYSQL_TYPE_VARCHAR:
- case MYSQL_TYPE_BIT:
- /* Not affected, same in 5.1 and 5.5 */
- break;
- /*
- ENUM/SET uses my_hash_sort_simple in 5.1 (i.e. my_charset_latin1)
- and my_hash_sort_bin in 5.5!
- */
- case MYSQL_TYPE_ENUM:
- case MYSQL_TYPE_SET:
- {
- if (field->is_null())
- {
- nr1^= (nr1 << 1) | 1;
- continue;
- }
- /* Force this to my_hash_sort_bin, which was used in 5.1! */
- uint len= field->pack_length();
- my_charset_latin1.coll->hash_sort(&my_charset_latin1, field->ptr,
- len, &nr1, &nr2);
- continue;
- }
- /* These types should not be allowed for partitioning! */
- case MYSQL_TYPE_NULL:
- case MYSQL_TYPE_DECIMAL:
- case MYSQL_TYPE_DATE:
- case MYSQL_TYPE_TINY_BLOB:
- case MYSQL_TYPE_MEDIUM_BLOB:
- case MYSQL_TYPE_LONG_BLOB:
- case MYSQL_TYPE_BLOB:
- case MYSQL_TYPE_VAR_STRING:
- case MYSQL_TYPE_GEOMETRY:
- /* fall through. */
- default:
- DBUG_ASSERT(0); // New type?
- /* Fall through for default hashing (5.5). */
- }
- /* fall through, use collation based hashing. */
- }
- field->hash(&nr1, &nr2);
- } while (*(++field_array));
- return (uint32) nr1;
-}
-
-
-/*
A simple support function to calculate part_id given local part and
sub part.
@@ -2896,25 +2773,25 @@ static int get_part_id_linear_hash(partition_info *part_info,
}
-/*
+/**
Calculate part_id for (SUB)PARTITION BY KEY
- SYNOPSIS
- get_part_id_key()
- field_array Array of fields for PARTTION KEY
- num_parts Number of KEY partitions
+ @param file Handler to storage engine
+ @param field_array Array of fields for PARTTION KEY
+ @param num_parts Number of KEY partitions
+ @param func_value[out] Returns calculated hash value
- RETURN VALUE
- Calculated partition id
+ @return Calculated partition id
*/
inline
-static uint32 get_part_id_key(Field **field_array,
+static uint32 get_part_id_key(handler *file,
+ Field **field_array,
uint num_parts,
longlong *func_value)
{
DBUG_ENTER("get_part_id_key");
- *func_value= calculate_key_value(field_array);
+ *func_value= ha_partition::calculate_key_hash_value(field_array);
DBUG_RETURN((uint32) (*func_value % num_parts));
}
@@ -2941,7 +2818,7 @@ static uint32 get_part_id_linear_key(partition_info *part_info,
{
DBUG_ENTER("get_part_id_linear_key");
- *func_value= calculate_key_value(field_array);
+ *func_value= ha_partition::calculate_key_hash_value(field_array);
DBUG_RETURN(get_part_id_from_linear_hash(*func_value,
part_info->linear_hash_mask,
num_parts));
@@ -3629,7 +3506,8 @@ int get_partition_id_key_nosub(partition_info *part_info,
uint32 *part_id,
longlong *func_value)
{
- *part_id= get_part_id_key(part_info->part_field_array,
+ *part_id= get_part_id_key(part_info->table->file,
+ part_info->part_field_array,
part_info->num_parts, func_value);
return 0;
}
@@ -3719,7 +3597,8 @@ int get_partition_id_key_sub(partition_info *part_info,
uint32 *part_id)
{
longlong func_value;
- *part_id= get_part_id_key(part_info->subpart_field_array,
+ *part_id= get_part_id_key(part_info->table->file,
+ part_info->subpart_field_array,
part_info->num_subparts, &func_value);
return FALSE;
}
@@ -3956,6 +3835,92 @@ void get_full_part_id_from_key(const TABLE *table, uchar *buf,
DBUG_VOID_RETURN;
}
+
+/**
+ @brief Verify that all rows in a table is in the given partition
+
+ @param table Table which contains the data that will be checked if
+ it is matching the partition definition.
+ @param part_table Partitioned table containing the partition to check.
+ @param part_id Which partition to match with.
+
+ @return Operation status
+ @retval TRUE Not all rows match the given partition
+ @retval FALSE OK
+*/
+bool verify_data_with_partition(TABLE *table, TABLE *part_table,
+ uint32 part_id)
+{
+ uint32 found_part_id;
+ longlong func_value; /* Unused */
+ handler *file;
+ int error;
+ uchar *old_rec;
+ partition_info *part_info;
+ DBUG_ENTER("verify_data_with_partition");
+ DBUG_ASSERT(table && table->file && part_table && part_table->part_info &&
+ part_table->file);
+
+ /*
+ Verify all table rows.
+ First implementation uses full scan + evaluates partition functions for
+ every row. TODO: add optimization to use index if possible, see WL#5397.
+
+ 1) Open both tables (already done) and set the row buffers to use
+ the same buffer (to avoid copy).
+ 2) Init rnd on table.
+ 3) loop over all rows.
+ 3.1) verify that partition_id on the row is correct. Break if error.
+ */
+ file= table->file;
+ part_info= part_table->part_info;
+ bitmap_union(table->read_set, &part_info->full_part_field_set);
+ old_rec= part_table->record[0];
+ part_table->record[0]= table->record[0];
+ set_field_ptr(part_info->full_part_field_array, table->record[0], old_rec);
+ if ((error= file->ha_rnd_init(TRUE)))
+ {
+ file->print_error(error, MYF(0));
+ goto err;
+ }
+
+ do
+ {
+ if ((error= file->ha_rnd_next(table->record[0])))
+ {
+ if (error == HA_ERR_RECORD_DELETED)
+ continue;
+ if (error == HA_ERR_END_OF_FILE)
+ error= 0;
+ else
+ file->print_error(error, MYF(0));
+ break;
+ }
+ if ((error= part_info->get_partition_id(part_info, &found_part_id,
+ &func_value)))
+ {
+ part_table->file->print_error(error, MYF(0));
+ break;
+ }
+ DEBUG_SYNC(current_thd, "swap_partition_first_row_read");
+ if (found_part_id != part_id)
+ {
+ my_error(ER_ROW_DOES_NOT_MATCH_PARTITION, MYF(0));
+ error= 1;
+ break;
+ }
+ } while (TRUE);
+ (void) file->ha_rnd_end();
+err:
+ set_field_ptr(part_info->full_part_field_array, old_rec,
+ table->record[0]);
+ part_table->record[0]= old_rec;
+ if (error)
+ DBUG_RETURN(TRUE);
+ DBUG_RETURN(FALSE);
+}
+
+
/*
Prune the set of partitions to use in query
@@ -3966,7 +3931,7 @@ void get_full_part_id_from_key(const TABLE *table, uchar *buf,
DESCRIPTION
This function is called to prune the range of partitions to scan by
- checking the used_partitions bitmap.
+ checking the read_partitions bitmap.
If start_part > end_part at return it means no partition needs to be
scanned. If start_part == end_part it always means a single partition
needs to be scanned.
@@ -3983,7 +3948,7 @@ void prune_partition_set(const TABLE *table, part_id_range *part_spec)
DBUG_ENTER("prune_partition_set");
for (i= part_spec->start_part; i <= part_spec->end_part; i++)
{
- if (bitmap_is_set(&(part_info->used_partitions), i))
+ if (bitmap_is_set(&(part_info->read_partitions), i))
{
DBUG_PRINT("info", ("Partition %d is set", i));
if (last_partition == -1)
@@ -4065,7 +4030,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
*/
get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
/*
- Check if range can be adjusted by looking in used_partitions
+ Check if range can be adjusted by looking in read_partitions
*/
prune_partition_set(table, part_spec);
DBUG_VOID_RETURN;
@@ -4117,7 +4082,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
get_full_part_id_from_key(table,buf,key_info,key_spec,part_spec);
clear_indicator_in_key_fields(key_info);
/*
- Check if range can be adjusted by looking in used_partitions
+ Check if range can be adjusted by looking in read_partitions
*/
prune_partition_set(table, part_spec);
DBUG_VOID_RETURN;
@@ -4187,7 +4152,7 @@ void get_partition_set(const TABLE *table, uchar *buf, const uint index,
if (found_part_field)
clear_indicator_in_key_fields(key_info);
/*
- Check if range can be adjusted by looking in used_partitions
+ Check if range can be adjusted by looking in read_partitions
*/
prune_partition_set(table, part_spec);
DBUG_VOID_RETURN;
@@ -4258,9 +4223,11 @@ bool mysql_unpack_partition(THD *thd,
{
bool result= TRUE;
partition_info *part_info;
- CHARSET_INFO *old_character_set_client= thd->variables.character_set_client;
+ const CHARSET_INFO *old_character_set_client=
+ thd->variables.character_set_client;
LEX *old_lex= thd->lex;
LEX lex;
+ PSI_statement_locker *parent_locker= thd->m_statement_psi;
DBUG_ENTER("mysql_unpack_partition");
thd->variables.character_set_client= system_charset_info;
@@ -4290,12 +4257,16 @@ bool mysql_unpack_partition(THD *thd,
}
part_info= lex.part_info;
DBUG_PRINT("info", ("Parse: %s", part_buf));
+
+ thd->m_statement_psi= NULL;
if (parse_sql(thd, & parser_state, NULL) ||
part_info->fix_parser_data(thd))
{
thd->free_items();
+ thd->m_statement_psi= parent_locker;
goto end;
}
+ thd->m_statement_psi= parent_locker;
/*
The parsed syntax residing in the frm file can still contain defaults.
The reason is that the frm file is sometimes saved outside of this
@@ -4335,6 +4306,7 @@ bool mysql_unpack_partition(THD *thd,
*work_part_info_used= true;
}
table->part_info= part_info;
+ part_info->table= table;
table->file->set_part_info(part_info);
if (!part_info->default_engine_type)
part_info->default_engine_type= default_db_type;
@@ -4552,7 +4524,7 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
do
{
partition_element *part_elem= part_it++;
- if ((alter_info->flags & ALTER_ALL_PARTITION) ||
+ if ((alter_info->flags & Alter_info::ALTER_ALL_PARTITION) ||
(is_name_in_list(part_elem->partition_name,
alter_info->partition_names)))
{
@@ -4571,7 +4543,7 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
} while (++part_count < tab_part_info->num_parts);
if (num_parts_found != alter_info->partition_names.elements &&
- !(alter_info->flags & ALTER_ALL_PARTITION))
+ !(alter_info->flags & Alter_info::ALTER_ALL_PARTITION))
{
/* Not all given partitions found, revert and return failure */
part_it.rewind();
@@ -4588,16 +4560,60 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
/**
+ @brief Check if partition is exchangable with table by checking table options
+
+ @param table_create_info Table options from table.
+ @param part_elem All the info of the partition.
+
+ @retval FALSE if they are equal, otherwise TRUE.
+
+ @note Any differens that would cause a change in the frm file is prohibited.
+ Such options as data_file_name, index_file_name, min_rows, max_rows etc. are
+ not allowed to differ. But comment is allowed to differ.
+*/
+bool compare_partition_options(HA_CREATE_INFO *table_create_info,
+ partition_element *part_elem)
+{
+#define MAX_COMPARE_PARTITION_OPTION_ERRORS 5
+ const char *option_diffs[MAX_COMPARE_PARTITION_OPTION_ERRORS + 1];
+ int i, errors= 0;
+ DBUG_ENTER("compare_partition_options");
+ DBUG_ASSERT(!part_elem->tablespace_name &&
+ !table_create_info->tablespace);
+
+ /*
+ Note that there are not yet any engine supporting tablespace together
+ with partitioning. TODO: when there are, add compare.
+ */
+ if (part_elem->tablespace_name || table_create_info->tablespace)
+ option_diffs[errors++]= "TABLESPACE";
+ if (part_elem->part_max_rows != table_create_info->max_rows)
+ option_diffs[errors++]= "MAX_ROWS";
+ if (part_elem->part_min_rows != table_create_info->min_rows)
+ option_diffs[errors++]= "MIN_ROWS";
+ if (part_elem->data_file_name || table_create_info->data_file_name)
+ option_diffs[errors++]= "DATA DIRECTORY";
+ if (part_elem->index_file_name || table_create_info->index_file_name)
+ option_diffs[errors++]= "INDEX DIRECTORY";
+
+ for (i= 0; i < errors; i++)
+ my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0),
+ option_diffs[i]);
+ DBUG_RETURN(errors != 0);
+}
+
+
+/*
Prepare for ALTER TABLE of partition structure
@param[in] thd Thread object
@param[in] table Table object
@param[in,out] alter_info Alter information
@param[in,out] create_info Create info for CREATE TABLE
- @param[in] old_db_type Old engine type
+ @param[in] alter_ctx ALTER TABLE runtime context
@param[out] partition_changed Boolean indicating whether partition changed
- @param[out] fast_alter_table Internal temporary table allowing fast
- partition change or NULL if not possible
+ @param[out] fast_alter_table Boolean indicating if fast partition alter is
+ possible.
@return Operation status
@retval TRUE Error
@@ -4615,22 +4631,26 @@ bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
HA_CREATE_INFO *create_info,
- handlerton *old_db_type,
+ Alter_table_ctx *alter_ctx,
bool *partition_changed,
- char *db,
- const char *table_name,
- const char *path,
- TABLE **fast_alter_table)
+ bool *fast_alter_table)
{
- TABLE *new_table= NULL;
DBUG_ENTER("prep_alter_part_table");
/* Foreign keys on partitioned tables are not supported, waits for WL#148 */
- if (table->part_info && (alter_info->flags & ALTER_FOREIGN_KEY))
+ if (table->part_info && (alter_info->flags & Alter_info::ADD_FOREIGN_KEY ||
+ alter_info->flags & Alter_info::DROP_FOREIGN_KEY))
{
my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
DBUG_RETURN(TRUE);
}
+ /* Remove partitioning on a not partitioned table is not possible */
+ if (!table->part_info && (alter_info->flags &
+ Alter_info::ALTER_REMOVE_PARTITIONING))
+ {
+ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
thd->work_part_info= thd->lex->part_info;
@@ -4639,12 +4659,15 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
DBUG_RETURN(TRUE);
/* ALTER_ADMIN_PARTITION is handled in mysql_admin_table */
- DBUG_ASSERT(!(alter_info->flags & ALTER_ADMIN_PARTITION));
+ DBUG_ASSERT(!(alter_info->flags & Alter_info::ALTER_ADMIN_PARTITION));
if (alter_info->flags &
- (ALTER_ADD_PARTITION | ALTER_DROP_PARTITION |
- ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION |
- ALTER_TABLE_REORG | ALTER_REBUILD_PARTITION))
+ (Alter_info::ALTER_ADD_PARTITION |
+ Alter_info::ALTER_DROP_PARTITION |
+ Alter_info::ALTER_COALESCE_PARTITION |
+ Alter_info::ALTER_REORGANIZE_PARTITION |
+ Alter_info::ALTER_TABLE_REORG |
+ Alter_info::ALTER_REBUILD_PARTITION))
{
partition_info *tab_part_info;
partition_info *alt_part_info= thd->work_part_info;
@@ -4666,30 +4689,31 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
Open it as a copy of the original table, and modify its partition_info
object to allow fast_alter_partition_table to perform the changes.
*/
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE, db, table_name,
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE,
+ alter_ctx->db,
+ alter_ctx->table_name,
MDL_INTENTION_EXCLUSIVE));
- new_table= open_table_uncached(thd, old_db_type, path, db, table_name, 0);
- if (!new_table)
- DBUG_RETURN(TRUE);
- /*
- This table may be used for copy rows between partitions
- and also read/write columns when fixing the partition_info struct.
- */
- new_table->use_all_columns();
-
- tab_part_info= new_table->part_info;
+ tab_part_info= table->part_info;
- if (alter_info->flags & ALTER_TABLE_REORG)
+ if (alter_info->flags & Alter_info::ALTER_TABLE_REORG)
{
uint new_part_no, curr_part_no;
+ /*
+ 'ALTER TABLE t REORG PARTITION' only allowed with auto partition
+ if default partitioning is used.
+ */
+
if (tab_part_info->part_type != HASH_PARTITION ||
- tab_part_info->use_default_num_partitions)
+ ((table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION) &&
+ !tab_part_info->use_default_num_partitions) ||
+ ((!(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION)) &&
+ tab_part_info->use_default_num_partitions))
{
my_error(ER_REORG_NO_PARAM_ERROR, MYF(0));
goto err;
}
- new_part_no= new_table->file->get_default_no_partitions(create_info);
+ new_part_no= table->file->get_default_no_partitions(create_info);
curr_part_no= tab_part_info->num_parts;
if (new_part_no == curr_part_no)
{
@@ -4698,7 +4722,23 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
after the change as before. Thus we can reply ok immediately
without any changes at all.
*/
- *fast_alter_table= new_table;
+ flags= table->file->alter_table_flags(alter_info->flags);
+ if (flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE))
+ {
+ *fast_alter_table= true;
+ /* Force table re-open for consistency with the main case. */
+ table->m_needs_reopen= true;
+ }
+ else
+ {
+ /*
+ Create copy of partition_info to avoid modifying original
+ TABLE::part_info, to keep it safe for later use.
+ */
+ if (!(tab_part_info= tab_part_info->get_clone()))
+ DBUG_RETURN(TRUE);
+ }
+
thd->work_part_info= tab_part_info;
DBUG_RETURN(FALSE);
}
@@ -4708,7 +4748,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
We will add more partitions, we use the ADD PARTITION without
setting the flag for no default number of partitions
*/
- alter_info->flags|= ALTER_ADD_PARTITION;
+ alter_info->flags|= Alter_info::ALTER_ADD_PARTITION;
thd->work_part_info->num_parts= new_part_no - curr_part_no;
}
else
@@ -4717,21 +4757,41 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
We will remove hash partitions, we use the COALESCE PARTITION
without setting the flag for no default number of partitions
*/
- alter_info->flags|= ALTER_COALESCE_PARTITION;
+ alter_info->flags|= Alter_info::ALTER_COALESCE_PARTITION;
alter_info->num_parts= curr_part_no - new_part_no;
}
}
- if (!(flags= new_table->file->alter_table_flags(alter_info->flags)))
+ if (!(flags= table->file->alter_table_flags(alter_info->flags)))
{
my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0));
goto err;
}
if ((flags & (HA_FAST_CHANGE_PARTITION | HA_PARTITION_ONE_PHASE)) != 0)
- *fast_alter_table= new_table;
- DBUG_PRINT("info", ("*fast_alter_table: %p flags: 0x%x",
- *fast_alter_table, flags));
- if ((alter_info->flags & ALTER_ADD_PARTITION) ||
- (alter_info->flags & ALTER_REORGANIZE_PARTITION))
+ {
+ /*
+ "Fast" change of partitioning is supported in this case.
+ We will change TABLE::part_info (as this is how we pass
+ information to storage engine in this case), so the table
+ must be reopened.
+ */
+ *fast_alter_table= true;
+ table->m_needs_reopen= true;
+ }
+ else
+ {
+ /*
+ "Fast" changing of partitioning is not supported. Create
+ a copy of TABLE::part_info object, so we can modify it safely.
+ Modifying original TABLE::part_info will cause problems when
+ we read data from old version of table using this TABLE object
+ while copying them to new version of table.
+ */
+ if (!(tab_part_info= tab_part_info->get_clone()))
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_PRINT("info", ("*fast_alter_table flags: 0x%x", flags));
+ if ((alter_info->flags & Alter_info::ALTER_ADD_PARTITION) ||
+ (alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION))
{
if (thd->work_part_info->part_type != tab_part_info->part_type)
{
@@ -4798,7 +4858,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
goto err;
}
}
- if (alter_info->flags & ALTER_ADD_PARTITION)
+ if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION)
{
/*
We start by moving the new partitions to the list of temporary
@@ -4849,7 +4909,7 @@ uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
}
alt_part_info->part_type= tab_part_info->part_type;
alt_part_info->subpart_type= tab_part_info->subpart_type;
- if (alt_part_info->set_up_defaults_for_partitioning(new_table->file, 0,
+ if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0,
tab_part_info->num_parts))
{
goto err;
@@ -5037,7 +5097,7 @@ that are reorganised.
of partitions anymore. We use this code also for Table reorganisations
and here we don't set any default flags to FALSE.
*/
- if (!(alter_info->flags & ALTER_TABLE_REORG))
+ if (!(alter_info->flags & Alter_info::ALTER_TABLE_REORG))
{
if (!alt_part_info->use_default_partitions)
{
@@ -5048,7 +5108,7 @@ that are reorganised.
tab_part_info->is_auto_partitioned= FALSE;
}
}
- else if (alter_info->flags & ALTER_DROP_PARTITION)
+ else if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION)
{
/*
Drop a partition from a range partition and list partitioning is
@@ -5092,14 +5152,14 @@ that are reorganised.
my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP");
goto err;
}
- if (new_table->file->is_fk_defined_on_table_or_index(MAX_KEY))
+ if (table->file->is_fk_defined_on_table_or_index(MAX_KEY))
{
my_error(ER_ROW_IS_REFERENCED, MYF(0));
goto err;
}
tab_part_info->num_parts-= num_parts_dropped;
}
- else if (alter_info->flags & ALTER_REBUILD_PARTITION)
+ else if (alter_info->flags & Alter_info::ALTER_REBUILD_PARTITION)
{
if (set_part_state(alter_info, tab_part_info, PART_CHANGED))
{
@@ -5108,11 +5168,11 @@ that are reorganised.
}
if (!(*fast_alter_table))
{
- new_table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0));
+ table->file->print_error(HA_ERR_WRONG_COMMAND, MYF(0));
goto err;
}
}
- else if (alter_info->flags & ALTER_COALESCE_PARTITION)
+ else if (alter_info->flags & Alter_info::ALTER_COALESCE_PARTITION)
{
uint num_parts_coalesced= alter_info->num_parts;
uint num_parts_remain= tab_part_info->num_parts - num_parts_coalesced;
@@ -5210,13 +5270,13 @@ state of p1.
} while (part_count < tab_part_info->num_parts);
tab_part_info->num_parts= num_parts_remain;
}
- if (!(alter_info->flags & ALTER_TABLE_REORG))
+ if (!(alter_info->flags & Alter_info::ALTER_TABLE_REORG))
{
tab_part_info->use_default_num_partitions= FALSE;
tab_part_info->is_auto_partitioned= FALSE;
}
}
- else if (alter_info->flags & ALTER_REORGANIZE_PARTITION)
+ else if (alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION)
{
/*
Reorganise partitions takes a number of partitions that are next
@@ -5264,8 +5324,9 @@ state of p1.
alt_part_info->subpart_type= tab_part_info->subpart_type;
alt_part_info->num_subparts= tab_part_info->num_subparts;
DBUG_ASSERT(!alt_part_info->use_default_partitions);
- if (alt_part_info->set_up_defaults_for_partitioning(new_table->file,
- 0, 0))
+ /* We specified partitions explicitly so don't use defaults anymore. */
+ tab_part_info->use_default_partitions= FALSE;
+ if (alt_part_info->set_up_defaults_for_partitioning(table->file, 0, 0))
{
goto err;
}
@@ -5388,8 +5449,8 @@ the generated partition syntax in a correct manner.
}
*partition_changed= TRUE;
thd->work_part_info= tab_part_info;
- if (alter_info->flags & ALTER_ADD_PARTITION ||
- alter_info->flags & ALTER_REORGANIZE_PARTITION)
+ if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION ||
+ alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION)
{
if (tab_part_info->use_default_subpartitions &&
!alt_part_info->use_default_subpartitions)
@@ -5398,7 +5459,7 @@ the generated partition syntax in a correct manner.
tab_part_info->use_default_num_subpartitions= FALSE;
}
if (tab_part_info->check_partition_info(thd, (handlerton**)NULL,
- new_table->file, 0, TRUE))
+ table->file, 0, TRUE))
{
goto err;
}
@@ -5407,7 +5468,7 @@ the generated partition syntax in a correct manner.
since this function "fixes" the item trees of the new partitions
to reorganize into
*/
- if (alter_info->flags == ALTER_REORGANIZE_PARTITION &&
+ if (alter_info->flags == Alter_info::ALTER_REORGANIZE_PARTITION &&
tab_part_info->part_type == RANGE_PARTITION &&
((is_last_partition_reorged &&
(tab_part_info->column_list ?
@@ -5486,15 +5547,17 @@ the generated partition syntax in a correct manner.
There was no partitioning before and no partitioning defined.
Obviously no work needed.
*/
- if (table->part_info)
+ partition_info *tab_part_info= table->part_info;
+
+ if (tab_part_info)
{
- if (alter_info->flags & ALTER_REMOVE_PARTITIONING)
+ if (alter_info->flags & Alter_info::ALTER_REMOVE_PARTITIONING)
{
DBUG_PRINT("info", ("Remove partitioning"));
if (!(create_info->used_fields & HA_CREATE_USED_ENGINE))
{
DBUG_PRINT("info", ("No explicit engine used"));
- create_info->db_type= table->part_info->default_engine_type;
+ create_info->db_type= tab_part_info->default_engine_type;
}
DBUG_PRINT("info", ("New engine type: %s",
ha_resolve_storage_engine_name(create_info->db_type)));
@@ -5506,16 +5569,20 @@ the generated partition syntax in a correct manner.
/*
Retain partitioning but possibly with a new storage engine
beneath.
+
+ Create a copy of TABLE::part_info to be able to modify it freely.
*/
- thd->work_part_info= table->part_info;
+ if (!(tab_part_info= tab_part_info->get_clone()))
+ DBUG_RETURN(TRUE);
+ thd->work_part_info= tab_part_info;
if (create_info->used_fields & HA_CREATE_USED_ENGINE &&
- create_info->db_type != table->part_info->default_engine_type)
+ create_info->db_type != tab_part_info->default_engine_type)
{
/*
Make sure change of engine happens to all partitions.
*/
DBUG_PRINT("info", ("partition changed"));
- if (table->part_info->is_auto_partitioned)
+ if (tab_part_info->is_auto_partitioned)
{
/*
If the user originally didn't specify partitioning to be
@@ -5543,25 +5610,14 @@ the generated partition syntax in a correct manner.
Need to cater for engine types that can handle partition without
using the partition handler.
*/
- if (part_info != table->part_info)
+ if (part_info != tab_part_info)
{
- if (part_info->fix_parser_data(thd))
+ DBUG_PRINT("info", ("partition changed"));
+ *partition_changed= TRUE;
+ if (thd->work_part_info->fix_parser_data(thd))
{
goto err;
}
- /*
- Compare the old and new part_info. If only key_algorithm
- change is done, don't consider it as changed partitioning (to avoid
- rebuild). This is to handle KEY (numeric_cols) partitioned tables
- created in 5.1. For more info, see bug#14521864.
- */
- if (alter_info->flags != ALTER_PARTITION ||
- !table->part_info ||
- !table->part_info->has_same_partitioning(part_info))
- {
- DBUG_PRINT("info", ("partition changed"));
- *partition_changed= true;
- }
}
/*
Set up partition default_engine_type either from the create_info
@@ -5571,8 +5627,8 @@ the generated partition syntax in a correct manner.
part_info->default_engine_type= create_info->db_type;
else
{
- if (table->part_info)
- part_info->default_engine_type= table->part_info->default_engine_type;
+ if (tab_part_info)
+ part_info->default_engine_type= tab_part_info->default_engine_type;
else
part_info->default_engine_type= create_info->db_type;
}
@@ -5592,15 +5648,7 @@ the generated partition syntax in a correct manner.
}
DBUG_RETURN(FALSE);
err:
- if (new_table)
- {
- /*
- Only remove the intermediate table object and its share object,
- do not remove the .frm file, since it is the original one.
- */
- close_temporary(new_table, 1, 0);
- }
- *fast_alter_table= NULL;
+ *fast_alter_table= false;
DBUG_RETURN(TRUE);
}
@@ -5641,12 +5689,7 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
build_table_filename(path, sizeof(path) - 1, lpt->db, lpt->table_name, "", 0);
- /* First lock the original tables */
- if (file->ha_external_lock(thd, F_WRLCK))
- DBUG_RETURN(TRUE);
-
- /* Disable transactions for all new tables */
- if (mysql_trans_prepare_alter_copy_data(thd))
+ if(mysql_trans_prepare_alter_copy_data(thd))
DBUG_RETURN(TRUE);
/* TODO: test if bulk_insert would increase the performance */
@@ -5661,9 +5704,6 @@ static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
if (mysql_trans_commit_alter_copy_data(thd))
error= 1; /* The error has been reported */
- if (file->ha_external_lock(thd, F_UNLCK))
- error= 1;
-
DBUG_RETURN(test(error));
}
@@ -5734,6 +5774,11 @@ static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt)
int error;
DBUG_ENTER("mysql_drop_partitions");
+ DBUG_ASSERT(lpt->thd->mdl_context.is_lock_owner(MDL_key::TABLE,
+ lpt->table->s->db.str,
+ lpt->table->s->table_name.str,
+ MDL_EXCLUSIVE));
+
build_table_filename(path, sizeof(path) - 1, lpt->db, lpt->table_name, "", 0);
if ((error= lpt->table->file->ha_drop_partitions(path)))
{
@@ -6315,7 +6360,8 @@ static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
if (write_log_changed_partitions(lpt, &next_entry, (const char*)path))
goto error;
if (write_log_dropped_partitions(lpt, &next_entry, (const char*)path,
- lpt->alter_info->flags & ALTER_REORGANIZE_PARTITION))
+ lpt->alter_info->flags &
+ Alter_info::ALTER_REORGANIZE_PARTITION))
goto error;
if (write_log_replace_delete_frm(lpt, next_entry, shadow_path, path, TRUE))
goto error;
@@ -6412,47 +6458,54 @@ static void alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt)
{
THD *thd= lpt->thd;
- if (lpt->old_table)
- close_all_tables_for_name(thd, lpt->old_table->s, HA_EXTRA_NOT_USED);
if (lpt->table)
{
/*
- Only remove the intermediate table object and its share object,
- do not remove the .frm file, since it is the original one.
+ Remove all instances of the table and its locks and other resources.
*/
- close_temporary(lpt->table, 1, 0);
+ close_all_tables_for_name(thd, lpt->table->s, HA_EXTRA_NOT_USED, NULL);
}
lpt->table= 0;
- lpt->old_table= 0;
lpt->table_list->table= 0;
- if (thd->locked_tables_list.reopen_tables(thd))
- sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE");
+ if (thd->locked_tables_mode)
+ {
+ Diagnostics_area *stmt_da= NULL;
+ Diagnostics_area tmp_stmt_da(true);
+
+ if (thd->is_error())
+ {
+ /* reopen might fail if we have a previous error, use a temporary da. */
+ stmt_da= thd->get_stmt_da();
+ thd->set_stmt_da(&tmp_stmt_da);
+ }
+
+ if (thd->locked_tables_list.reopen_tables(thd))
+ sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE");
+
+ if (stmt_da)
+ thd->set_stmt_da(stmt_da);
+ }
}
-/*
- Unlock and close table before renaming and dropping partitions
- SYNOPSIS
- alter_close_tables()
- lpt Struct carrying parameters
- close_old Close original table too
- RETURN VALUES
- 0
+/**
+ Unlock and close table before renaming and dropping partitions.
+
+ @param lpt Struct carrying parameters
+
+ @return Always 0.
*/
-static int alter_close_tables(ALTER_PARTITION_PARAM_TYPE *lpt, bool close_old)
+static int alter_close_table(ALTER_PARTITION_PARAM_TYPE *lpt)
{
- DBUG_ENTER("alter_close_tables");
+ DBUG_ENTER("alter_close_table");
+
if (lpt->table->db_stat)
{
+ mysql_lock_remove(lpt->thd, lpt->thd->lock, lpt->table);
lpt->table->file->ha_close();
lpt->table->db_stat= 0; // Mark file closed
}
- if (close_old && lpt->old_table)
- {
- close_all_tables_for_name(lpt->thd, lpt->old_table->s, HA_EXTRA_NOT_USED);
- lpt->old_table= 0;
- }
DBUG_RETURN(0);
}
@@ -6474,23 +6527,54 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
bool close_table)
{
partition_info *part_info= lpt->part_info;
+ THD *thd= lpt->thd;
+ TABLE *table= lpt->table;
DBUG_ENTER("handle_alter_part_error");
+ DBUG_ASSERT(table->m_needs_reopen);
if (close_table)
{
/*
- Since the error handling (ddl_log) needs to drop newly created
- partitions they must be closed first to not issue errors.
- But we still need some information from the part_info object,
- so we clone it first to have a copy.
+ All instances of this table needs to be closed.
+ Better to do that here, than leave the cleaning up to others.
+ Aquire EXCLUSIVE mdl lock if not already aquired.
*/
+ if (!thd->mdl_context.is_lock_owner(MDL_key::TABLE, lpt->db,
+ lpt->table_name,
+ MDL_EXCLUSIVE))
+ {
+ if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
+ {
+ /* At least remove this instance on failure */
+ goto err_exclusive_lock;
+ }
+ }
+ /* Ensure the share is destroyed and reopened. */
part_info= lpt->part_info->get_clone();
- alter_close_tables(lpt, action_completed);
+ close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
+ }
+ else
+ {
+err_exclusive_lock:
+ /*
+ Temporarily remove it from the locked table list, so that it will get
+ reopened.
+ */
+ thd->locked_tables_list.unlink_from_list(thd,
+ table->pos_in_locked_tables,
+ false);
+ /*
+ Make sure that the table is unlocked, closed and removed from
+ the table cache.
+ */
+ mysql_lock_remove(thd, thd->lock, table);
+ part_info= lpt->part_info->get_clone();
+ close_thread_table(thd, &thd->open_tables);
+ lpt->table_list->table= NULL;
}
if (part_info->first_log_entry &&
- execute_ddl_log_entry(lpt->thd,
- part_info->first_log_entry->entry_pos))
+ execute_ddl_log_entry(thd, part_info->first_log_entry->entry_pos))
{
/*
We couldn't recover from error, most likely manual interaction
@@ -6503,14 +6587,14 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
if (drop_partition)
{
/* Table is still ok, but we left a shadow frm file behind. */
- push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1,
"%s %s",
"Operation was unsuccessful, table is still intact,",
"but it is possible that a shadow frm file was left behind");
}
else
{
- push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1,
"%s %s %s %s",
"Operation was unsuccessful, table is still intact,",
"but it is possible that a shadow frm file was left behind.",
@@ -6526,7 +6610,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
Failed during install of shadow frm file, table isn't intact
and dropped partitions are still there
*/
- push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1,
"%s %s %s",
"Failed during alter of partitions, table is no longer intact.",
"The frm file is in an unknown state, and a backup",
@@ -6540,7 +6624,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
ask the user to perform the action manually. We remove the log
records and ask the user to perform the action manually.
*/
- push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1,
"%s %s",
"Failed during drop of partitions, table is intact.",
"Manual drop of remaining partitions is required");
@@ -6552,7 +6636,7 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
certainly in a very bad state so we give user warning and disable
the table by writing an ancient frm version into it.
*/
- push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1,
"%s %s %s",
"Failed during renaming of partitions. We are now in a position",
"where table is not reusable",
@@ -6581,11 +6665,31 @@ void handle_alter_part_error(ALTER_PARTITION_PARAM_TYPE *lpt,
even though we reported an error the operation was successfully
completed.
*/
- push_warning_printf(lpt->thd, MYSQL_ERROR::WARN_LEVEL_WARN, 1,"%s %s",
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, 1,"%s %s",
"Operation was successfully completed by failure handling,",
"after failure of normal operation");
}
}
+
+ if (thd->locked_tables_mode)
+ {
+ Diagnostics_area *stmt_da= NULL;
+ Diagnostics_area tmp_stmt_da(true);
+
+ if (thd->is_error())
+ {
+ /* reopen might fail if we have a previous error, use a temporary da. */
+ stmt_da= thd->get_stmt_da();
+ thd->set_stmt_da(&tmp_stmt_da);
+ }
+
+ if (thd->locked_tables_list.reopen_tables(thd))
+ sql_print_warning("We failed to reacquire LOCKs in ALTER TABLE");
+
+ if (stmt_da)
+ thd->set_stmt_da(stmt_da);
+ }
+
DBUG_VOID_RETURN;
}
@@ -6602,7 +6706,7 @@ static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket,
enum_mdl_type type)
{
if (thd->locked_tables_mode)
- ticket->downgrade_exclusive_lock(type);
+ ticket->downgrade_lock(type);
}
@@ -6611,13 +6715,12 @@ static void downgrade_mdl_if_lock_tables_mode(THD *thd, MDL_ticket *ticket,
previously prepared.
@param thd Thread object
- @param table Original table object
+ @param table Original table object with new part_info
@param alter_info ALTER TABLE info
@param create_info Create info for CREATE TABLE
@param table_list List of the table involved
@param db Database name of new table
@param table_name Table name of new table
- @param fast_alter_table Prepared table object
@return Operation status
@retval TRUE Error
@@ -6633,8 +6736,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
HA_CREATE_INFO *create_info,
TABLE_LIST *table_list,
char *db,
- const char *table_name,
- TABLE *fast_alter_table)
+ const char *table_name)
{
/* Set-up struct used to write frm files */
partition_info *part_info;
@@ -6644,10 +6746,10 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
bool close_table_on_failure= FALSE;
bool frm_install= FALSE;
MDL_ticket *mdl_ticket= table->mdl_ticket;
- DBUG_ASSERT(fast_alter_table);
DBUG_ENTER("fast_alter_partition_table");
+ DBUG_ASSERT(table->m_needs_reopen);
- part_info= fast_alter_table->part_info;
+ part_info= table->part_info;
lpt->thd= thd;
lpt->table_list= table_list;
lpt->part_info= part_info;
@@ -6656,8 +6758,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
lpt->db_options= create_info->table_options;
if (create_info->row_type == ROW_TYPE_DYNAMIC)
lpt->db_options|= HA_OPTION_PACK_RECORD;
- lpt->table= fast_alter_table;
- lpt->old_table= table;
+ lpt->table= table;
lpt->key_info_buffer= 0;
lpt->key_count= 0;
lpt->db= db;
@@ -6715,7 +6816,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
goto err;
}
}
- else if (alter_info->flags & ALTER_DROP_PARTITION)
+ else if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION)
{
/*
Now after all checks and setting state on dropped partitions we can
@@ -6750,9 +6851,9 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
3) Write the ddl log to ensure that the operation is completed
even in the presence of a MySQL Server crash (the log is executed
before any other threads are started, so there are no locking issues).
- 4) Close all tables that have already been opened but didn't stumble on
+ 4) Close the table that have already been opened but didn't stumble on
the abort locked previously. This is done as part of the
- alter_close_tables call.
+ alter_close_table call.
5) Write the bin log
Unfortunately the writing of the binlog is not synchronised with
other logging activities. So no matter in which order the binlog
@@ -6788,7 +6889,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
(action_completed= TRUE, FALSE) ||
ERROR_INJECT_CRASH("crash_drop_partition_4") ||
ERROR_INJECT_ERROR("fail_drop_partition_4") ||
- alter_close_tables(lpt, action_completed) ||
+ alter_close_table(lpt) ||
(close_table_on_failure= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_drop_partition_5") ||
ERROR_INJECT_ERROR("fail_drop_partition_5") ||
@@ -6815,7 +6916,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
goto err;
}
}
- else if ((alter_info->flags & ALTER_ADD_PARTITION) &&
+ else if ((alter_info->flags & Alter_info::ALTER_ADD_PARTITION) &&
(part_info->part_type == RANGE_PARTITION ||
part_info->part_type == LIST_PARTITION))
{
@@ -6865,7 +6966,7 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
ERROR_INJECT_CRASH("crash_add_partition_5") ||
ERROR_INJECT_ERROR("fail_add_partition_5") ||
(close_table_on_failure= FALSE, FALSE) ||
- alter_close_tables(lpt, action_completed) ||
+ alter_close_table(lpt) ||
ERROR_INJECT_CRASH("crash_add_partition_6") ||
ERROR_INJECT_ERROR("fail_add_partition_6") ||
((!thd->lex->no_write_to_binlog) &&
@@ -6925,27 +7026,27 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
use a lower lock level. This can be handled inside store_lock in the
respective handler.
- 0) Write an entry that removes the shadow frm file if crash occurs
- 1) Write the shadow frm file of new partitioning
+ 0) Write an entry that removes the shadow frm file if crash occurs.
+ 1) Write the shadow frm file of new partitioning.
2) Log such that temporary partitions added in change phase are
- removed in a crash situation
- 3) Add the new partitions
- Copy from the reorganised partitions to the new partitions
+ removed in a crash situation.
+ 3) Add the new partitions.
+ Copy from the reorganised partitions to the new partitions.
4) Get an exclusive metadata lock on the table (waits for all active
transactions using this table). This ensures that we
can release all other locks on the table and since no one can open
the table, there can be no new threads accessing the table. They
will be hanging on this exclusive lock.
- 5) Log that operation is completed and log all complete actions
- needed to complete operation from here
- 6) Write bin log
- 7) Close all instances of the table and remove them from the table cache.
- 8) Prepare handlers for rename and delete of partitions
+ 5) Close the table.
+ 6) Log that operation is completed and log all complete actions
+ needed to complete operation from here.
+ 7) Write bin log.
+ 8) Prepare handlers for rename and delete of partitions.
9) Rename and drop the reorged partitions such that they are no
longer used and rename those added to their real new names.
- 10) Install the shadow frm file
- 11) Reopen the table if under lock tables
- 12) Complete query
+ 10) Install the shadow frm file.
+ 11) Reopen the table if under lock tables.
+ 12) Complete query.
*/
if (write_log_drop_shadow_frm(lpt) ||
ERROR_INJECT_CRASH("crash_change_partition_1") ||
@@ -6963,22 +7064,22 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
wait_while_table_is_used(thd, table, HA_EXTRA_NOT_USED) ||
ERROR_INJECT_CRASH("crash_change_partition_5") ||
ERROR_INJECT_ERROR("fail_change_partition_5") ||
- write_log_final_change_partition(lpt) ||
- (action_completed= TRUE, FALSE) ||
+ alter_close_table(lpt) ||
+ (close_table_on_failure= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_change_partition_6") ||
ERROR_INJECT_ERROR("fail_change_partition_6") ||
+ write_log_final_change_partition(lpt) ||
+ (action_completed= TRUE, FALSE) ||
+ ERROR_INJECT_CRASH("crash_change_partition_7") ||
+ ERROR_INJECT_ERROR("fail_change_partition_7") ||
((!thd->lex->no_write_to_binlog) &&
(write_bin_log(thd, FALSE,
thd->query(), thd->query_length()), FALSE)) ||
- ERROR_INJECT_CRASH("crash_change_partition_7") ||
- ERROR_INJECT_ERROR("fail_change_partition_7") ||
+ ERROR_INJECT_CRASH("crash_change_partition_8") ||
+ ERROR_INJECT_ERROR("fail_change_partition_8") ||
((frm_install= TRUE), FALSE) ||
mysql_write_frm(lpt, WFRM_INSTALL_SHADOW) ||
(frm_install= FALSE, FALSE) ||
- ERROR_INJECT_CRASH("crash_change_partition_8") ||
- ERROR_INJECT_ERROR("fail_change_partition_8") ||
- alter_close_tables(lpt, action_completed) ||
- (close_table_on_failure= FALSE, FALSE) ||
ERROR_INJECT_CRASH("crash_change_partition_9") ||
ERROR_INJECT_ERROR("fail_change_partition_9") ||
mysql_drop_partitions(lpt) ||
@@ -7004,22 +7105,6 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
*/
DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted, table_list));
err:
- if (action_completed)
- {
- /*
- Although error occurred, the action was forced to retry for completion.
- Therefore we must close+reopen all instances of the table.
- */
- (void) alter_partition_lock_handling(lpt);
- }
- else
- {
- /*
- The failed action was reverted, leave the original table as is and
- close/destroy the intermediate table object and its share.
- */
- close_temporary(lpt->table, 1, 0);
- }
downgrade_mdl_if_lock_tables_mode(thd, mdl_ticket, MDL_SHARED_NO_READ_WRITE);
DBUG_RETURN(TRUE);
}
@@ -7082,7 +7167,7 @@ void set_key_field_ptr(KEY *key_info, const uchar *new_buf,
const uchar *old_buf)
{
KEY_PART_INFO *key_part= key_info->key_part;
- uint key_parts= key_info->key_parts;
+ uint key_parts= key_info->user_defined_key_parts;
uint i= 0;
my_ptrdiff_t diff= (new_buf - old_buf);
DBUG_ENTER("set_key_field_ptr");
@@ -7118,20 +7203,19 @@ void mem_alloc_error(size_t size)
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
-/*
- Return comma-separated list of used partitions in the provided given string
+/**
+ Return comma-separated list of used partitions in the provided given string.
- SYNOPSIS
- make_used_partitions_str()
- part_info IN Partitioning info
- parts_str OUT The string to fill
+ @param part_info Partitioning info
+ @param[out] parts The resulting list of string to fill
- DESCRIPTION
- Generate a list of used partitions (from bits in part_info->used_partitions
- bitmap), asd store it into the provided String object.
+ Generate a list of used partitions (from bits in part_info->read_partitions
+ bitmap), and store it into the provided String object.
- NOTE
+ @note
The produced string must not be longer then MAX_PARTITIONS * (1 + FN_LEN).
+ In case of UPDATE, only the partitions read is given, not the partitions
+ that was written or locked.
*/
void make_used_partitions_str(partition_info *part_info, String *parts_str)
@@ -7149,7 +7233,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
List_iterator<partition_element> it2(head_pe->subpartitions);
while ((pe= it2++))
{
- if (bitmap_is_set(&part_info->used_partitions, partition_id))
+ if (bitmap_is_set(&part_info->read_partitions, partition_id))
{
if (parts_str->length())
parts_str->append(',');
@@ -7169,7 +7253,7 @@ void make_used_partitions_str(partition_info *part_info, String *parts_str)
{
while ((pe= it++))
{
- if (bitmap_is_set(&part_info->used_partitions, partition_id))
+ if (bitmap_is_set(&part_info->read_partitions, partition_id))
{
if (parts_str->length())
parts_str->append(',');
@@ -8010,8 +8094,7 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
while (part_iter->field_vals.cur != part_iter->field_vals.end)
{
longlong dummy;
- field->store(part_iter->field_vals.cur++,
- ((Field_num*)field)->unsigned_flag);
+ field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG);
if ((part_iter->part_info->is_sub_partitioned() &&
!part_iter->part_info->get_part_partition_id(part_iter->part_info,
&part_id, &dummy)) ||
@@ -8035,12 +8118,11 @@ static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
part_iter->field_vals.cur= part_iter->field_vals.start;
return NOT_A_PARTITION_ID;
}
- field->store(part_iter->field_vals.cur++, FALSE);
+ field->store(part_iter->field_vals.cur++, field->flags & UNSIGNED_FLAG);
if (part_iter->part_info->get_subpartition_id(part_iter->part_info,
&res))
return NOT_A_PARTITION_ID;
return res;
-
}
diff --git a/sql/sql_partition.h b/sql/sql_partition.h
index cf532c45c66..7f39ddd7a3f 100644
--- a/sql/sql_partition.h
+++ b/sql/sql_partition.h
@@ -24,6 +24,7 @@
#include "table.h" /* TABLE_LIST */
class Alter_info;
+class Alter_table_ctx;
class Field;
class String;
class handler;
@@ -53,7 +54,6 @@ typedef struct st_lock_param_type
HA_CREATE_INFO *create_info;
Alter_info *alter_info;
TABLE *table;
- TABLE *old_table;
KEY *key_info_buffer;
const char *db;
const char *table_name;
@@ -75,7 +75,7 @@ typedef struct {
} part_id_range;
struct st_partition_iter;
-#define NOT_A_PARTITION_ID ((uint32)-1)
+#define NOT_A_PARTITION_ID UINT_MAX32
bool is_partition_in_list(char *part_name, List<char> list_part_names);
char *are_partitions_in_table(partition_info *new_part_info,
@@ -125,6 +125,7 @@ bool check_part_func_fields(Field **ptr, bool ok_with_charsets);
bool field_is_partition_charset(Field *field);
Item* convert_charset_partition_constant(Item *item, CHARSET_INFO *cs);
void mem_alloc_error(size_t size);
+void truncate_partition_filename(char *path);
/*
A "Get next" function for partition iterator.
@@ -250,24 +251,23 @@ uint fast_alter_partition_table(THD *thd, TABLE *table,
HA_CREATE_INFO *create_info,
TABLE_LIST *table_list,
char *db,
- const char *table_name,
- TABLE *fast_alter_table);
+ const char *table_name);
bool set_part_state(Alter_info *alter_info, partition_info *tab_part_info,
enum partition_state part_state);
uint prep_alter_part_table(THD *thd, TABLE *table, Alter_info *alter_info,
HA_CREATE_INFO *create_info,
- handlerton *old_db_type,
+ Alter_table_ctx *alter_ctx,
bool *partition_changed,
- char *db,
- const char *table_name,
- const char *path,
- TABLE **fast_alter_table);
+ bool *fast_alter_table);
char *generate_partition_syntax(partition_info *part_info,
uint *buf_length, bool use_sql_alloc,
bool show_partition_options,
HA_CREATE_INFO *create_info,
- Alter_info *alter_info,
- const char *current_comment_start);
+ Alter_info *alter_info);
+bool verify_data_with_partition(TABLE *table, TABLE *part_table,
+ uint32 part_id);
+bool compare_partition_options(HA_CREATE_INFO *table_create_info,
+ partition_element *part_elem);
bool partition_key_modified(TABLE *table, const MY_BITMAP *fields);
#else
#define partition_key_modified(X,Y) 0
diff --git a/sql/sql_partition_admin.cc b/sql/sql_partition_admin.cc
index b9bf3dbc217..1a82413bb07 100644
--- a/sql/sql_partition_admin.cc
+++ b/sql/sql_partition_admin.cc
@@ -14,8 +14,15 @@
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
#include "sql_parse.h" // check_one_table_access
+ // check_merge_table_access
+ // check_one_table_access
#include "sql_table.h" // mysql_alter_table, etc.
-#include "sql_lex.h" // Sql_statement
+#include "sql_cmd.h" // Sql_cmd
+#include "sql_alter.h" // Sql_cmd_alter_table
+#include "sql_partition.h" // struct partition_info, etc.
+#include "debug_sync.h" // DEBUG_SYNC
+#include "sql_truncate.h" // mysql_truncate_table,
+ // Sql_cmd_truncate_table
#include "sql_admin.h" // Analyze/Check/.._table_statement
#include "sql_partition_admin.h" // Alter_table_*_partition
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -36,41 +43,665 @@ bool Partition_statement_unsupported::execute(THD *)
#else
-bool Alter_table_analyze_partition_statement::execute(THD *thd)
+bool Sql_cmd_alter_table_exchange_partition::execute(THD *thd)
+{
+ /* Moved from mysql_execute_command */
+ LEX *lex= thd->lex;
+ /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */
+ SELECT_LEX *select_lex= &lex->select_lex;
+ /* first table of first SELECT_LEX */
+ TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first;
+ /*
+ Code in mysql_alter_table() may modify its HA_CREATE_INFO argument,
+ so we have to use a copy of this structure to make execution
+ prepared statement- safe. A shallow copy is enough as no memory
+ referenced from this structure will be modified.
+ @todo move these into constructor...
+ */
+ HA_CREATE_INFO create_info(lex->create_info);
+ Alter_info alter_info(lex->alter_info, thd->mem_root);
+ ulong priv_needed= ALTER_ACL | DROP_ACL | INSERT_ACL | CREATE_ACL;
+
+ DBUG_ENTER("Sql_cmd_alter_table_exchange_partition::execute");
+
+ if (thd->is_fatal_error) /* out of memory creating a copy of alter_info */
+ DBUG_RETURN(TRUE);
+
+ /* Must be set in the parser */
+ DBUG_ASSERT(select_lex->db);
+ /* also check the table to be exchanged with the partition */
+ DBUG_ASSERT(alter_info.flags & Alter_info::ALTER_EXCHANGE_PARTITION);
+
+ if (check_access(thd, priv_needed, first_table->db,
+ &first_table->grant.privilege,
+ &first_table->grant.m_internal,
+ 0, 0) ||
+ check_access(thd, priv_needed, first_table->next_local->db,
+ &first_table->next_local->grant.privilege,
+ &first_table->next_local->grant.m_internal,
+ 0, 0))
+ DBUG_RETURN(TRUE);
+
+ if (check_grant(thd, priv_needed, first_table, FALSE, UINT_MAX, FALSE))
+ DBUG_RETURN(TRUE);
+
+ /* Not allowed with EXCHANGE PARTITION */
+ DBUG_ASSERT(!create_info.data_file_name && !create_info.index_file_name);
+
+ thd->enable_slow_log= opt_log_slow_admin_statements;
+ DBUG_RETURN(exchange_partition(thd, first_table, &alter_info));
+}
+
+
+/**
+ @brief Checks that the tables will be able to be used for EXCHANGE PARTITION.
+ @param table Non partitioned table.
+ @param part_table Partitioned table.
+
+ @retval FALSE if OK, otherwise error is reported and TRUE is returned.
+*/
+static bool check_exchange_partition(TABLE *table, TABLE *part_table)
+{
+ DBUG_ENTER("check_exchange_partition");
+
+ /* Both tables must exist */
+ if (!part_table || !table)
+ {
+ my_error(ER_CHECK_NO_SUCH_TABLE, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ /* The first table must be partitioned, and the second must not */
+ if (!part_table->part_info)
+ {
+ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ if (table->part_info)
+ {
+ my_error(ER_PARTITION_EXCHANGE_PART_TABLE, MYF(0),
+ table->s->table_name.str);
+ DBUG_RETURN(TRUE);
+ }
+
+ if (part_table->file->ht != partition_hton)
+ {
+ /*
+ Only allowed on partitioned tables throught the generic ha_partition
+ handler, i.e not yet for native partitioning (NDB).
+ */
+ my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ if (table->file->ht != part_table->part_info->default_engine_type)
+ {
+ my_error(ER_MIX_HANDLER_ERROR, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ /* Verify that table is not tmp table, partitioned tables cannot be tmp. */
+ if (table->s->tmp_table != NO_TMP_TABLE)
+ {
+ my_error(ER_PARTITION_EXCHANGE_TEMP_TABLE, MYF(0),
+ table->s->table_name.str);
+ DBUG_RETURN(TRUE);
+ }
+
+ /* The table cannot have foreign keys constraints or be referenced */
+ if(!table->file->can_switch_engines())
+ {
+ my_error(ER_PARTITION_EXCHANGE_FOREIGN_KEY, MYF(0),
+ table->s->table_name.str);
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ @brief Compare table structure/options between a non partitioned table
+ and a specific partition of a partitioned table.
+
+ @param thd Thread object.
+ @param table Non partitioned table.
+ @param part_table Partitioned table.
+ @param part_elem Partition element to use for partition specific compare.
+*/
+static bool compare_table_with_partition(THD *thd, TABLE *table,
+ TABLE *part_table,
+ partition_element *part_elem)
+{
+ HA_CREATE_INFO table_create_info, part_create_info;
+ Alter_info part_alter_info;
+ Alter_table_ctx part_alter_ctx; // Not used
+ DBUG_ENTER("compare_table_with_partition");
+
+ bool metadata_equal= false;
+ memset(&part_create_info, 0, sizeof(HA_CREATE_INFO));
+ memset(&table_create_info, 0, sizeof(HA_CREATE_INFO));
+
+ update_create_info_from_table(&table_create_info, table);
+ /* get the current auto_increment value */
+ table->file->update_create_info(&table_create_info);
+ /* mark all columns used, since they are used when preparing the new table */
+ part_table->use_all_columns();
+ table->use_all_columns();
+ if (mysql_prepare_alter_table(thd, part_table, &part_create_info,
+ &part_alter_info, &part_alter_ctx))
+ {
+ my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ /* db_type is not set in prepare_alter_table */
+ part_create_info.db_type= part_table->part_info->default_engine_type;
+ /*
+ Since we exchange the partition with the table, allow exchanging
+ auto_increment value as well.
+ */
+ part_create_info.auto_increment_value=
+ table_create_info.auto_increment_value;
+
+ /* Check compatible row_types and set create_info accordingly. */
+ {
+ enum row_type part_row_type= part_table->file->get_row_type();
+ enum row_type table_row_type= table->file->get_row_type();
+ if (part_row_type != table_row_type)
+ {
+ my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0),
+ "ROW_FORMAT");
+ DBUG_RETURN(true);
+ }
+ part_create_info.row_type= table->s->row_type;
+ }
+
+ /*
+ NOTE: ha_blackhole does not support check_if_compatible_data,
+ so this always fail for blackhole tables.
+ ha_myisam compares pointers to verify that DATA/INDEX DIRECTORY is
+ the same, so any table using data/index_file_name will fail.
+ */
+ if (mysql_compare_tables(table, &part_alter_info, &part_create_info,
+ &metadata_equal))
+ {
+ my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ DEBUG_SYNC(thd, "swap_partition_after_compare_tables");
+ if (!metadata_equal)
+ {
+ my_error(ER_TABLES_DIFFERENT_METADATA, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_ASSERT(table->s->db_create_options ==
+ part_table->s->db_create_options);
+ DBUG_ASSERT(table->s->db_options_in_use ==
+ part_table->s->db_options_in_use);
+
+ if (table_create_info.avg_row_length != part_create_info.avg_row_length)
+ {
+ my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0),
+ "AVG_ROW_LENGTH");
+ DBUG_RETURN(TRUE);
+ }
+
+ if (table_create_info.table_options != part_create_info.table_options)
+ {
+ my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0),
+ "TABLE OPTION");
+ DBUG_RETURN(TRUE);
+ }
+
+ if (table->s->table_charset != part_table->s->table_charset)
+ {
+ my_error(ER_PARTITION_EXCHANGE_DIFFERENT_OPTION, MYF(0),
+ "CHARACTER SET");
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ NOTE: We do not support update of frm-file, i.e. change
+ max/min_rows, data/index_file_name etc.
+ The workaround is to use REORGANIZE PARTITION to rewrite
+ the frm file and then use EXCHANGE PARTITION when they are the same.
+ */
+ if (compare_partition_options(&table_create_info, part_elem))
+ DBUG_RETURN(TRUE);
+
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ @brief Exchange partition/table with ddl log.
+
+ @details How to handle a crash in the middle of the rename (break on error):
+ 1) register in ddl_log that we are going to exchange swap_table with part.
+ 2) do the first rename (swap_table -> tmp-name) and sync the ddl_log.
+ 3) do the second rename (part -> swap_table) and sync the ddl_log.
+ 4) do the last rename (tmp-name -> part).
+ 5) mark the entry done.
+
+ Recover by:
+ 5) is done, All completed. Nothing to recover.
+ 4) is done see 3). (No mark or sync in the ddl_log...)
+ 3) is done -> try rename part -> tmp-name (ignore failure) goto 2).
+ 2) is done -> try rename swap_table -> part (ignore failure) goto 1).
+ 1) is done -> try rename tmp-name -> swap_table (ignore failure).
+ before 1) Nothing to recover...
+
+ @param thd Thread handle
+ @param name name of table/partition 1 (to be exchanged with 2)
+ @param from_name name of table/partition 2 (to be exchanged with 1)
+ @param tmp_name temporary name to use while exchaning
+ @param ht handlerton of the table/partitions
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
+
+ @note ha_heap always succeeds in rename (since it is created upon usage).
+ This is OK when to recover from a crash since all heap are empty and the
+ recover is done early in the startup of the server (right before
+ read_init_file which can populate the tables).
+
+ And if no crash we can trust the syncs in the ddl_log.
+
+ What about if the rename is put into a background thread? That will cause
+ corruption and is avoided by the exlusive metadata lock.
+*/
+static bool exchange_name_with_ddl_log(THD *thd,
+ const char *name,
+ const char *from_name,
+ const char *tmp_name,
+ handlerton *ht)
+{
+ DDL_LOG_ENTRY exchange_entry;
+ DDL_LOG_MEMORY_ENTRY *log_entry= NULL;
+ DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL;
+ bool error= TRUE;
+ bool error_set= FALSE;
+ handler *file= NULL;
+ DBUG_ENTER("exchange_name_with_ddl_log");
+
+ if (!(file= get_new_handler(NULL, thd->mem_root, ht)))
+ {
+ mem_alloc_error(sizeof(handler));
+ DBUG_RETURN(TRUE);
+ }
+
+ /* prepare the action entry */
+ exchange_entry.entry_type= DDL_LOG_ENTRY_CODE;
+ exchange_entry.action_type= DDL_LOG_EXCHANGE_ACTION;
+ exchange_entry.next_entry= 0;
+ exchange_entry.name= name;
+ exchange_entry.from_name= from_name;
+ exchange_entry.tmp_name= tmp_name;
+ exchange_entry.handler_name= ha_resolve_storage_engine_name(ht);
+ exchange_entry.phase= EXCH_PHASE_NAME_TO_TEMP;
+
+ mysql_mutex_lock(&LOCK_gdl);
+ /*
+ write to the ddl log what to do by:
+ 1) write the action entry (i.e. which names to be exchanged)
+ 2) write the execution entry with a link to the action entry
+ */
+ DBUG_EXECUTE_IF("exchange_partition_fail_1", goto err_no_action_written;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_1", DBUG_SUICIDE(););
+ if (write_ddl_log_entry(&exchange_entry, &log_entry))
+ goto err_no_action_written;
+
+ DBUG_EXECUTE_IF("exchange_partition_fail_2", goto err_no_execute_written;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_2", DBUG_SUICIDE(););
+ if (write_execute_ddl_log_entry(log_entry->entry_pos, FALSE, &exec_log_entry))
+ goto err_no_execute_written;
+ /* ddl_log is written and synced */
+
+ mysql_mutex_unlock(&LOCK_gdl);
+ /*
+ Execute the name exchange.
+ Do one rename, increase the phase, update the action entry and sync.
+ In case of errors in the ddl_log we must fail and let the ddl_log try
+ to revert the changes, since otherwise it could revert the command after
+ we sent OK to the client.
+ */
+ /* call rename table from table to tmp-name */
+ DBUG_EXECUTE_IF("exchange_partition_fail_3",
+ my_error(ER_ERROR_ON_RENAME, MYF(0),
+ name, tmp_name, 0, "n/a");
+ error_set= TRUE;
+ goto err_rename;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_3", DBUG_SUICIDE(););
+ if (file->ha_rename_table(name, tmp_name))
+ {
+ char errbuf[MYSYS_STRERROR_SIZE];
+ my_strerror(errbuf, sizeof(errbuf), my_errno);
+ my_error(ER_ERROR_ON_RENAME, MYF(0), name, tmp_name,
+ my_errno, errbuf);
+ error_set= TRUE;
+ goto err_rename;
+ }
+ DBUG_EXECUTE_IF("exchange_partition_fail_4", goto err_rename;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_4", DBUG_SUICIDE(););
+ if (deactivate_ddl_log_entry(log_entry->entry_pos))
+ goto err_rename;
+
+ /* call rename table from partition to table */
+ DBUG_EXECUTE_IF("exchange_partition_fail_5",
+ my_error(ER_ERROR_ON_RENAME, MYF(0),
+ from_name, name, 0, "n/a");
+ error_set= TRUE;
+ goto err_rename;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_5", DBUG_SUICIDE(););
+ if (file->ha_rename_table(from_name, name))
+ {
+ char errbuf[MYSYS_STRERROR_SIZE];
+ my_strerror(errbuf, sizeof(errbuf), my_errno);
+ my_error(ER_ERROR_ON_RENAME, MYF(0), from_name, name,
+ my_errno, errbuf);
+ error_set= TRUE;
+ goto err_rename;
+ }
+ DBUG_EXECUTE_IF("exchange_partition_fail_6", goto err_rename;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_6", DBUG_SUICIDE(););
+ if (deactivate_ddl_log_entry(log_entry->entry_pos))
+ goto err_rename;
+
+ /* call rename table from tmp-nam to partition */
+ DBUG_EXECUTE_IF("exchange_partition_fail_7",
+ my_error(ER_ERROR_ON_RENAME, MYF(0),
+ tmp_name, from_name, 0, "n/a");
+ error_set= TRUE;
+ goto err_rename;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_7", DBUG_SUICIDE(););
+ if (file->ha_rename_table(tmp_name, from_name))
+ {
+ char errbuf[MYSYS_STRERROR_SIZE];
+ my_strerror(errbuf, sizeof(errbuf), my_errno);
+ my_error(ER_ERROR_ON_RENAME, MYF(0), tmp_name, from_name,
+ my_errno, errbuf);
+ error_set= TRUE;
+ goto err_rename;
+ }
+ DBUG_EXECUTE_IF("exchange_partition_fail_8", goto err_rename;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_8", DBUG_SUICIDE(););
+ if (deactivate_ddl_log_entry(log_entry->entry_pos))
+ goto err_rename;
+
+ /* The exchange is complete and ddl_log is deactivated */
+ DBUG_EXECUTE_IF("exchange_partition_fail_9", goto err_rename;);
+ DBUG_EXECUTE_IF("exchange_partition_abort_9", DBUG_SUICIDE(););
+ /* all OK */
+ error= FALSE;
+ delete file;
+ DBUG_RETURN(error);
+err_rename:
+ /*
+ Nothing to do if any of these commands fails :( the commands itselfs
+ will log to the error log about the failures...
+ */
+ /* execute the ddl log entry to revert the renames */
+ (void) execute_ddl_log_entry(current_thd, log_entry->entry_pos);
+ mysql_mutex_lock(&LOCK_gdl);
+ /* mark the execute log entry done */
+ (void) write_execute_ddl_log_entry(0, TRUE, &exec_log_entry);
+ /* release the execute log entry */
+ (void) release_ddl_log_memory_entry(exec_log_entry);
+err_no_execute_written:
+ /* release the action log entry */
+ (void) release_ddl_log_memory_entry(log_entry);
+err_no_action_written:
+ mysql_mutex_unlock(&LOCK_gdl);
+ delete file;
+ if (!error_set)
+ my_error(ER_DDL_LOG_ERROR, MYF(0));
+ DBUG_RETURN(error);
+}
+
+
+/**
+ @brief Swap places between a partition and a table.
+
+ @details Verify that the tables are compatible (same engine, definition etc),
+ verify that all rows in the table will fit in the partition,
+ if all OK, rename table to tmp name, rename partition to table
+ and finally rename tmp name to partition.
+
+ 1) Take upgradable mdl, open tables and then lock them (inited in parse)
+ 2) Verify that metadata matches
+ 3) verify data
+ 4) Upgrade to exclusive mdl for both tables
+ 5) Rename table <-> partition
+ 6) Rely on close_thread_tables to release mdl and table locks
+
+ @param thd Thread handle
+ @param table_list Table where the partition exists as first table,
+ Table to swap with the partition as second table
+ @param alter_info Contains partition name to swap
+
+ @note This is a DDL operation so triggers will not be used.
+*/
+bool Sql_cmd_alter_table_exchange_partition::
+ exchange_partition(THD *thd, TABLE_LIST *table_list, Alter_info *alter_info)
+{
+ TABLE *part_table, *swap_table;
+ TABLE_LIST *swap_table_list;
+ handlerton *table_hton;
+ partition_element *part_elem;
+ char *partition_name;
+ char temp_name[FN_REFLEN+1];
+ char part_file_name[FN_REFLEN+1];
+ char swap_file_name[FN_REFLEN+1];
+ char temp_file_name[FN_REFLEN+1];
+ uint swap_part_id;
+ uint part_file_name_len;
+ Alter_table_prelocking_strategy alter_prelocking_strategy;
+ MDL_ticket *swap_table_mdl_ticket= NULL;
+ MDL_ticket *part_table_mdl_ticket= NULL;
+ uint table_counter;
+ bool error= TRUE;
+ DBUG_ENTER("mysql_exchange_partition");
+ DBUG_ASSERT(alter_info->flags & Alter_info::ALTER_EXCHANGE_PARTITION);
+
+ /* Don't allow to exchange with log table */
+ swap_table_list= table_list->next_local;
+ if (check_if_log_table(swap_table_list->db_length, swap_table_list->db,
+ swap_table_list->table_name_length,
+ swap_table_list->table_name, 0))
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "PARTITION", "log table");
+ DBUG_RETURN(TRUE);
+ }
+
+ /*
+ Currently no MDL lock that allows both read and write and is upgradeable
+ to exclusive, so leave the lock type to TL_WRITE_ALLOW_READ also on the
+ partitioned table.
+
+ TODO: add MDL lock that allows both read and write and is upgradable to
+ exclusive lock. This would allow to continue using the partitioned table
+ also with update/insert/delete while the verification of the swap table
+ is running.
+ */
+
+ /*
+ NOTE: It is not possible to exchange a crashed partition/table since
+ we need some info from the engine, which we can only access after open,
+ to be able to verify the structure/metadata.
+ */
+ table_list->mdl_request.set_type(MDL_SHARED_NO_WRITE);
+ if (open_tables(thd, &table_list, &table_counter, 0,
+ &alter_prelocking_strategy))
+ DBUG_RETURN(true);
+
+ part_table= table_list->table;
+ swap_table= swap_table_list->table;
+
+ if (check_exchange_partition(swap_table, part_table))
+ DBUG_RETURN(TRUE);
+
+ /* set lock pruning on first table */
+ partition_name= alter_info->partition_names.head();
+ if (table_list->table->part_info->
+ set_named_partition_bitmap(partition_name, strlen(partition_name)))
+ DBUG_RETURN(true);
+
+ if (lock_tables(thd, table_list, table_counter, 0))
+ DBUG_RETURN(true);
+
+
+ table_hton= swap_table->file->ht;
+
+ THD_STAGE_INFO(thd, stage_verifying_table);
+
+ /* Will append the partition name later in part_info->get_part_elem() */
+ part_file_name_len= build_table_filename(part_file_name,
+ sizeof(part_file_name),
+ table_list->db,
+ table_list->table_name,
+ "", 0);
+ build_table_filename(swap_file_name,
+ sizeof(swap_file_name),
+ swap_table_list->db,
+ swap_table_list->table_name,
+ "", 0);
+ /* create a unique temp name #sqlx-nnnn_nnnn, x for eXchange */
+ my_snprintf(temp_name, sizeof(temp_name), "%sx-%lx_%lx",
+ tmp_file_prefix, current_pid, thd->thread_id);
+ if (lower_case_table_names)
+ my_casedn_str(files_charset_info, temp_name);
+ build_table_filename(temp_file_name, sizeof(temp_file_name),
+ table_list->next_local->db,
+ temp_name, "", FN_IS_TMP);
+
+ if (!(part_elem= part_table->part_info->get_part_elem(partition_name,
+ part_file_name +
+ part_file_name_len,
+ &swap_part_id)))
+ {
+ // my_error(ER_UNKNOWN_PARTITION, MYF(0), partition_name,
+ // part_table->alias);
+ DBUG_RETURN(TRUE);
+ }
+
+ if (swap_part_id == NOT_A_PARTITION_ID)
+ {
+ DBUG_ASSERT(part_table->part_info->is_sub_partitioned());
+ my_error(ER_PARTITION_INSTEAD_OF_SUBPARTITION, MYF(0));
+ DBUG_RETURN(TRUE);
+ }
+
+ if (compare_table_with_partition(thd, swap_table, part_table, part_elem))
+ DBUG_RETURN(TRUE);
+
+ /* Table and partition has same structure/options, OK to exchange */
+
+ thd_proc_info(thd, "verifying data with partition");
+
+ if (verify_data_with_partition(swap_table, part_table, swap_part_id))
+ DBUG_RETURN(TRUE);
+
+ /*
+ Get exclusive mdl lock on both tables, alway the non partitioned table
+ first. Remember the tickets for downgrading locks later.
+ */
+ swap_table_mdl_ticket= swap_table->mdl_ticket;
+ part_table_mdl_ticket= part_table->mdl_ticket;
+
+ /*
+ No need to set used_partitions to only propagate
+ HA_EXTRA_PREPARE_FOR_RENAME to one part since no built in engine uses
+ that flag. And the action would probably be to force close all other
+ instances which is what we are doing any way.
+ */
+ if (wait_while_table_is_used(thd, swap_table, HA_EXTRA_PREPARE_FOR_RENAME) ||
+ wait_while_table_is_used(thd, part_table, HA_EXTRA_PREPARE_FOR_RENAME))
+ goto err;
+
+ DEBUG_SYNC(thd, "swap_partition_after_wait");
+
+ close_all_tables_for_name(thd, swap_table->s, HA_EXTRA_NOT_USED, NULL);
+ close_all_tables_for_name(thd, part_table->s, HA_EXTRA_NOT_USED, NULL);
+
+ DEBUG_SYNC(thd, "swap_partition_before_rename");
+
+ if (exchange_name_with_ddl_log(thd, swap_file_name, part_file_name,
+ temp_file_name, table_hton))
+ goto err;
+
+ /*
+ Reopen tables under LOCK TABLES. Ignore the return value for now. It's
+ better to keep master/slave in consistent state. Alternative would be to
+ try to revert the exchange operation and issue error.
+ */
+ (void) thd->locked_tables_list.reopen_tables(thd);
+
+ if ((error= write_bin_log(thd, TRUE, thd->query(), thd->query_length())))
+ {
+ /*
+ The error is reported in write_bin_log().
+ We try to revert to make it easier to keep the master/slave in sync.
+ */
+ (void) exchange_name_with_ddl_log(thd, part_file_name, swap_file_name,
+ temp_file_name, table_hton);
+ }
+
+err:
+ if (thd->locked_tables_mode)
+ {
+ if (swap_table_mdl_ticket)
+ swap_table_mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
+ if (part_table_mdl_ticket)
+ part_table_mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
+ }
+
+ if (!error)
+ my_ok(thd);
+
+ // For query cache
+ table_list->table= NULL;
+ table_list->next_local->table= NULL;
+ query_cache_invalidate3(thd, table_list, FALSE);
+
+ DBUG_RETURN(error);
+}
+
+bool Sql_cmd_alter_table_analyze_partition::execute(THD *thd)
{
bool res;
- DBUG_ENTER("Alter_table_analyze_partition_statement::execute");
+ DBUG_ENTER("Sql_cmd_alter_table_analyze_partition::execute");
/*
Flag that it is an ALTER command which administrates partitions, used
by ha_partition
*/
- m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
-
- res= Analyze_table_statement::execute(thd);
+ thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION;
+ res= Sql_cmd_analyze_table::execute(thd);
+
DBUG_RETURN(res);
}
-bool Alter_table_check_partition_statement::execute(THD *thd)
+bool Sql_cmd_alter_table_check_partition::execute(THD *thd)
{
bool res;
- DBUG_ENTER("Alter_table_check_partition_statement::execute");
+ DBUG_ENTER("Sql_cmd_alter_table_check_partition::execute");
/*
Flag that it is an ALTER command which administrates partitions, used
by ha_partition
*/
- m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
+ thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION;
- res= Check_table_statement::execute(thd);
+ res= Sql_cmd_check_table::execute(thd);
DBUG_RETURN(res);
}
-bool Alter_table_optimize_partition_statement::execute(THD *thd)
+bool Sql_cmd_alter_table_optimize_partition::execute(THD *thd)
{
bool res;
DBUG_ENTER("Alter_table_optimize_partition_statement::execute");
@@ -79,46 +710,49 @@ bool Alter_table_optimize_partition_statement::execute(THD *thd)
Flag that it is an ALTER command which administrates partitions, used
by ha_partition
*/
- m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
+ thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION;
- res= Optimize_table_statement::execute(thd);
+ res= Sql_cmd_optimize_table::execute(thd);
DBUG_RETURN(res);
}
-bool Alter_table_repair_partition_statement::execute(THD *thd)
+bool Sql_cmd_alter_table_repair_partition::execute(THD *thd)
{
bool res;
- DBUG_ENTER("Alter_table_repair_partition_statement::execute");
+ DBUG_ENTER("Sql_cmd_alter_table_repair_partition::execute");
/*
Flag that it is an ALTER command which administrates partitions, used
by ha_partition
*/
- m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
+ thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION;
- res= Repair_table_statement::execute(thd);
+ res= Sql_cmd_repair_table::execute(thd);
DBUG_RETURN(res);
}
-bool Alter_table_truncate_partition_statement::execute(THD *thd)
+bool Sql_cmd_alter_table_truncate_partition::execute(THD *thd)
{
int error;
ha_partition *partition;
ulong timeout= thd->variables.lock_wait_timeout;
TABLE_LIST *first_table= thd->lex->select_lex.table_list.first;
+ Alter_info *alter_info= &thd->lex->alter_info;
+ uint table_counter, i;
+ List<String> partition_names_list;
bool binlog_stmt;
- DBUG_ENTER("Alter_table_truncate_partition_statement::execute");
+ DBUG_ENTER("Sql_cmd_alter_table_truncate_partition::execute");
/*
Flag that it is an ALTER command which administrates partitions, used
by ha_partition.
*/
- m_lex->alter_info.flags|= ALTER_ADMIN_PARTITION |
- ALTER_TRUNCATE_PARTITION;
+ thd->lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION |
+ Alter_info::ALTER_TRUNCATE_PARTITION;
/* Fix the lock types (not the same as ordinary ALTER TABLE). */
first_table->lock_type= TL_WRITE;
@@ -134,8 +768,8 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd)
if (check_one_table_access(thd, DROP_ACL, first_table))
DBUG_RETURN(TRUE);
- if (open_and_lock_tables(thd, first_table, FALSE, 0))
- DBUG_RETURN(TRUE);
+ if (open_tables(thd, &first_table, &table_counter, 0))
+ DBUG_RETURN(true);
/*
TODO: Add support for TRUNCATE PARTITION for NDB and other
@@ -149,24 +783,45 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd)
DBUG_RETURN(TRUE);
}
+
+ /*
+ Prune all, but named partitions,
+ to avoid excessive calls to external_lock().
+ */
+ List_iterator<char> partition_names_it(alter_info->partition_names);
+ uint num_names= alter_info->partition_names.elements;
+ for (i= 0; i < num_names; i++)
+ {
+ char *partition_name= partition_names_it++;
+ String *str_partition_name= new (thd->mem_root)
+ String(partition_name, system_charset_info);
+ if (!str_partition_name)
+ DBUG_RETURN(true);
+ partition_names_list.push_back(str_partition_name);
+ }
+ first_table->partition_names= &partition_names_list;
+ if (first_table->table->part_info->set_partition_bitmaps(first_table))
+ DBUG_RETURN(true);
+
+ if (lock_tables(thd, first_table, table_counter, 0))
+ DBUG_RETURN(true);
+
/*
Under locked table modes this might still not be an exclusive
lock. Hence, upgrade the lock since the handler truncate method
mandates an exclusive metadata lock.
*/
MDL_ticket *ticket= first_table->table->mdl_ticket;
- if (thd->mdl_context.upgrade_shared_lock_to_exclusive(ticket, timeout))
+ if (thd->mdl_context.upgrade_shared_lock(ticket, MDL_EXCLUSIVE, timeout))
DBUG_RETURN(TRUE);
tdc_remove_table(thd, TDC_RT_REMOVE_NOT_OWN, first_table->db,
first_table->table_name, FALSE);
- partition= (ha_partition *) first_table->table->file;
-
+ partition= (ha_partition*) first_table->table->file;
/* Invoke the handler method responsible for truncating the partition. */
- if ((error= partition->truncate_partition(&thd->lex->alter_info,
- &binlog_stmt)))
- first_table->table->file->print_error(error, MYF(0));
+ if ((error= partition->truncate_partition(alter_info, &binlog_stmt)))
+ partition->print_error(error, MYF(0));
/*
All effects of a truncate operation are committed even if the
@@ -184,11 +839,15 @@ bool Alter_table_truncate_partition_statement::execute(THD *thd)
to a shared one.
*/
if (thd->locked_tables_mode)
- ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+ ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
if (! error)
my_ok(thd);
+ // Invalidate query cache
+ DBUG_ASSERT(!first_table->next_local);
+ query_cache_invalidate3(thd, first_table, FALSE);
+
DBUG_RETURN(error);
}
diff --git a/sql/sql_partition_admin.h b/sql/sql_partition_admin.h
index 479371c3b4d..9c53744d9bc 100644
--- a/sql/sql_partition_admin.h
+++ b/sql/sql_partition_admin.h
@@ -22,214 +22,247 @@
Stub class that returns a error if the partition storage engine is
not supported.
*/
-class Partition_statement_unsupported : public Sql_statement
+class Sql_cmd_partition_unsupported : public Sql_cmd
{
public:
- Partition_statement_unsupported(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_partition_unsupported()
{}
- ~Partition_statement_unsupported()
+ ~Sql_cmd_partition_unsupported()
{}
+ /* Override SQLCOM_*, since it is an ALTER command */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ALTER_TABLE;
+ }
+
bool execute(THD *thd);
};
-class Alter_table_analyze_partition_statement :
- public Partition_statement_unsupported
+class Sql_cmd_alter_table_exchange_partition :
+ public Sql_cmd_partition_unsupported
{
public:
- Alter_table_analyze_partition_statement(LEX *lex)
- : Partition_statement_unsupported(lex)
+ Sql_cmd_alter_table_exchange_partition()
{}
- ~Alter_table_analyze_partition_statement()
+ ~Sql_cmd_alter_table_exchange_partition()
{}
};
-class Alter_table_check_partition_statement :
- public Partition_statement_unsupported
+class Sql_cmd_alter_table_analyze_partition :
+ public Sql_cmd_partition_unsupported
{
public:
- Alter_table_check_partition_statement(LEX *lex)
- : Partition_statement_unsupported(lex)
+ Sql_cmd_alter_table_analyze_partition()
{}
- ~Alter_table_check_partition_statement()
+ ~Sql_cmd_alter_table_analyze_partition()
{}
};
-class Alter_table_optimize_partition_statement :
- public Partition_statement_unsupported
+class Sql_cmd_alter_table_check_partition :
+ public Sql_cmd_partition_unsupported
{
public:
- Alter_table_optimize_partition_statement(LEX *lex)
- : Partition_statement_unsupported(lex)
+ Sql_cmd_alter_table_check_partition()
{}
- ~Alter_table_optimize_partition_statement()
+ ~Sql_cmd_alter_table_check_partition()
{}
};
-class Alter_table_repair_partition_statement :
- public Partition_statement_unsupported
+class Sql_cmd_alter_table_optimize_partition :
+ public Sql_cmd_partition_unsupported
{
public:
- Alter_table_repair_partition_statement(LEX *lex)
- : Partition_statement_unsupported(lex)
+ Sql_cmd_alter_table_optimize_partition()
{}
- ~Alter_table_repair_partition_statement()
+ ~Sql_cmd_alter_table_optimize_partition()
{}
};
-class Alter_table_truncate_partition_statement :
- public Partition_statement_unsupported
+class Sql_cmd_alter_table_repair_partition :
+ public Sql_cmd_partition_unsupported
{
public:
- Alter_table_truncate_partition_statement(LEX *lex)
- : Partition_statement_unsupported(lex)
+ Sql_cmd_alter_table_repair_partition()
{}
- ~Alter_table_truncate_partition_statement()
+ ~Sql_cmd_alter_table_repair_partition()
{}
};
+class Sql_cmd_alter_table_truncate_partition :
+ public Sql_cmd_partition_unsupported
+{
+public:
+ Sql_cmd_alter_table_truncate_partition()
+ {}
+
+ ~Sql_cmd_alter_table_truncate_partition()
+ {}
+};
+
#else
/**
Class that represents the ALTER TABLE t1 ANALYZE PARTITION p statement.
*/
-class Alter_table_analyze_partition_statement : public Analyze_table_statement
+class Sql_cmd_alter_table_exchange_partition : public Sql_cmd_common_alter_table
{
public:
/**
- Constructor, used to represent a ALTER TABLE ANALYZE PARTITION statement.
- @param lex the LEX structure for this statement.
+ Constructor, used to represent a ALTER TABLE EXCHANGE PARTITION statement.
*/
- Alter_table_analyze_partition_statement(LEX *lex)
- : Analyze_table_statement(lex)
+ Sql_cmd_alter_table_exchange_partition()
+ : Sql_cmd_common_alter_table()
{}
- ~Alter_table_analyze_partition_statement()
+ ~Sql_cmd_alter_table_exchange_partition()
{}
+ bool execute(THD *thd);
+
+private:
+ bool exchange_partition(THD *thd, TABLE_LIST *, Alter_info *);
+};
+
+
+/**
+ Class that represents the ALTER TABLE t1 ANALYZE PARTITION p statement.
+*/
+class Sql_cmd_alter_table_analyze_partition : public Sql_cmd_analyze_table
+{
+public:
/**
- Execute a ALTER TABLE ANALYZE PARTITION statement at runtime.
- @param thd the current thread.
- @return false on success.
+ Constructor, used to represent a ALTER TABLE ANALYZE PARTITION statement.
*/
+ Sql_cmd_alter_table_analyze_partition()
+ : Sql_cmd_analyze_table()
+ {}
+
+ ~Sql_cmd_alter_table_analyze_partition()
+ {}
+
bool execute(THD *thd);
+
+ /* Override SQLCOM_ANALYZE, since it is an ALTER command */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ALTER_TABLE;
+ }
};
/**
Class that represents the ALTER TABLE t1 CHECK PARTITION p statement.
*/
-class Alter_table_check_partition_statement : public Check_table_statement
+class Sql_cmd_alter_table_check_partition : public Sql_cmd_check_table
{
public:
/**
Constructor, used to represent a ALTER TABLE CHECK PARTITION statement.
- @param lex the LEX structure for this statement.
*/
- Alter_table_check_partition_statement(LEX *lex)
- : Check_table_statement(lex)
+ Sql_cmd_alter_table_check_partition()
+ : Sql_cmd_check_table()
{}
- ~Alter_table_check_partition_statement()
+ ~Sql_cmd_alter_table_check_partition()
{}
- /**
- Execute a ALTER TABLE CHECK PARTITION statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
+
+ /* Override SQLCOM_CHECK, since it is an ALTER command */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ALTER_TABLE;
+ }
};
/**
Class that represents the ALTER TABLE t1 OPTIMIZE PARTITION p statement.
*/
-class Alter_table_optimize_partition_statement : public Optimize_table_statement
+class Sql_cmd_alter_table_optimize_partition : public Sql_cmd_optimize_table
{
public:
/**
Constructor, used to represent a ALTER TABLE OPTIMIZE PARTITION statement.
- @param lex the LEX structure for this statement.
*/
- Alter_table_optimize_partition_statement(LEX *lex)
- : Optimize_table_statement(lex)
+ Sql_cmd_alter_table_optimize_partition()
+ : Sql_cmd_optimize_table()
{}
- ~Alter_table_optimize_partition_statement()
+ ~Sql_cmd_alter_table_optimize_partition()
{}
- /**
- Execute a ALTER TABLE OPTIMIZE PARTITION statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
+
+ /* Override SQLCOM_OPTIMIZE, since it is an ALTER command */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ALTER_TABLE;
+ }
};
/**
Class that represents the ALTER TABLE t1 REPAIR PARTITION p statement.
*/
-class Alter_table_repair_partition_statement : public Repair_table_statement
+class Sql_cmd_alter_table_repair_partition : public Sql_cmd_repair_table
{
public:
/**
Constructor, used to represent a ALTER TABLE REPAIR PARTITION statement.
- @param lex the LEX structure for this statement.
*/
- Alter_table_repair_partition_statement(LEX *lex)
- : Repair_table_statement(lex)
+ Sql_cmd_alter_table_repair_partition()
+ : Sql_cmd_repair_table()
{}
- ~Alter_table_repair_partition_statement()
+ ~Sql_cmd_alter_table_repair_partition()
{}
- /**
- Execute a ALTER TABLE REPAIR PARTITION statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
+
+ /* Override SQLCOM_REPAIR, since it is an ALTER command */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ALTER_TABLE;
+ }
};
/**
Class that represents the ALTER TABLE t1 TRUNCATE PARTITION p statement.
*/
-class Alter_table_truncate_partition_statement : public Sql_statement
+class Sql_cmd_alter_table_truncate_partition : public Sql_cmd_truncate_table
{
public:
/**
Constructor, used to represent a ALTER TABLE TRUNCATE PARTITION statement.
- @param lex the LEX structure for this statement.
*/
- Alter_table_truncate_partition_statement(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_alter_table_truncate_partition()
{}
- virtual ~Alter_table_truncate_partition_statement()
+ virtual ~Sql_cmd_alter_table_truncate_partition()
{}
- /**
- Execute a ALTER TABLE TRUNCATE PARTITION statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
bool execute(THD *thd);
+
+ /* Override SQLCOM_TRUNCATE, since it is an ALTER command */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_ALTER_TABLE;
+ }
};
#endif /* WITH_PARTITION_STORAGE_ENGINE */
diff --git a/sql/sql_plist.h b/sql/sql_plist.h
index 2b6f1067321..8e8c7fcaefb 100644
--- a/sql/sql_plist.h
+++ b/sql/sql_plist.h
@@ -18,7 +18,7 @@
#include <my_global.h>
-template <typename T, typename B, typename C, typename I>
+template <typename T, typename L>
class I_P_List_iterator;
class I_P_List_null_counter;
template <typename T> class I_P_List_no_push_back;
@@ -151,10 +151,14 @@ public:
I::set_last(&rhs.m_first);
C::swap(rhs);
}
+ typedef B Adapter;
+ typedef I_P_List<T, B, C, I> Base;
+ typedef I_P_List_iterator<T, Base> Iterator;
+ typedef I_P_List_iterator<const T, Base> Const_Iterator;
#ifndef _lint
- friend class I_P_List_iterator<T, B, C, I>;
+ friend class I_P_List_iterator<T, Base>;
+ friend class I_P_List_iterator<const T, Base>;
#endif
- typedef I_P_List_iterator<T, B, C, I> Iterator;
};
@@ -162,33 +166,33 @@ public:
Iterator for I_P_List.
*/
-template <typename T, typename B,
- typename C = I_P_List_null_counter,
- typename I = I_P_List_no_push_back<T> >
+template <typename T, typename L>
class I_P_List_iterator
{
- const I_P_List<T, B, C, I> *list;
+ const L *list;
T *current;
public:
- I_P_List_iterator(const I_P_List<T, B, C, I> &a)
+ I_P_List_iterator(const L &a)
: list(&a), current(a.m_first) {}
- I_P_List_iterator(const I_P_List<T, B, C, I> &a, T* current_arg)
+ I_P_List_iterator(const L &a, T* current_arg)
: list(&a), current(current_arg) {}
- inline void init(const I_P_List<T, B, C, I> &a)
+ inline void init(const L &a)
{
list= &a;
current= a.m_first;
}
+ /* Operator for it++ */
inline T* operator++(int)
{
T *result= current;
if (result)
- current= *B::next_ptr(current);
+ current= *L::Adapter::next_ptr(current);
return result;
}
+ /* Operator for ++it */
inline T* operator++()
{
- current= *B::next_ptr(current);
+ current= *L::Adapter::next_ptr(current);
return current;
}
inline void rewind()
@@ -207,7 +211,7 @@ template <typename T, T* T::*next, T** T::*prev>
struct I_P_List_adapter
{
static inline T **next_ptr(T *el) { return &(el->*next); }
-
+ static inline const T* const* next_ptr(const T *el) { return &(el->*next); }
static inline T ***prev_ptr(T *el) { return &(el->*prev); }
};
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 1260fb8cb3a..e89054ac849 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -686,7 +686,7 @@ static my_bool read_maria_plugin_info(struct st_plugin_dl *plugin_dl,
for (i=0;
(old= (struct st_maria_plugin *)(ptr + i * sizeof_st_plugin))->info;
i++)
- memcpy(cur + i, old, min(sizeof(cur[i]), sizeof_st_plugin));
+ memcpy(cur + i, old, MY_MIN(sizeof(cur[i]), sizeof_st_plugin));
sym= cur;
plugin_dl->allocated= true;
@@ -2009,7 +2009,7 @@ static bool finalize_install(THD *thd, TABLE *table, const LEX_STRING *name)
if (tmp->state == PLUGIN_IS_DISABLED)
{
if (global_system_variables.log_warnings)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_CANT_INITIALIZE_UDF, ER(ER_CANT_INITIALIZE_UDF),
name->str, "Plugin is disabled");
}
@@ -2170,7 +2170,7 @@ static bool do_uninstall(THD *thd, TABLE *table, const LEX_STRING *name)
plugin->state= PLUGIN_IS_DELETED;
if (plugin->ref_count)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_PLUGIN_BUSY, ER(WARN_PLUGIN_BUSY));
else
reap_needed= true;
diff --git a/sql/sql_plugin_services.h b/sql/sql_plugin_services.h
index 838b994b6e8..6b70048345a 100644
--- a/sql/sql_plugin_services.h
+++ b/sql/sql_plugin_services.h
@@ -59,6 +59,11 @@ static struct thd_timezone_service_st thd_timezone_handler= {
thd_gmt_sec_to_TIME
};
+static struct my_sha1_service_st my_sha1_handler = {
+ my_sha1,
+ my_sha1_multi
+};
+
static struct st_service_ref list_of_services[]=
{
{ "my_snprintf_service", VERSION_my_snprintf, &my_snprintf_handler },
@@ -68,5 +73,6 @@ static struct st_service_ref list_of_services[]=
{ "debug_sync_service", VERSION_debug_sync, 0 }, // updated in plugin_init()
{ "thd_kill_statement_service", VERSION_kill_statement, &thd_kill_statement_handler },
{ "thd_timezone_service", VERSION_thd_timezone, &thd_timezone_handler },
+ { "my_sha1_service", VERSION_my_sha1, &my_sha1_handler}
};
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 220a2a16db6..120cfc3e350 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -106,6 +106,7 @@ When one supplies long data for a placeholder:
#include "sp_head.h"
#include "sp.h"
#include "sp_cache.h"
+#include "sql_handler.h" // mysql_ha_rm_tables
#include "probes_mysql.h"
#ifdef EMBEDDED_LIBRARY
/* include MYSQL_BIND headers */
@@ -343,7 +344,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns)
int2store(buff+5, columns);
int2store(buff+7, stmt->param_count);
buff[9]= 0; // Guard against a 4.1 client
- tmp= min(stmt->thd->warning_info->statement_warn_count(), 65535);
+ tmp= MY_MIN(stmt->thd->get_stmt_da()->current_statement_warn_count(), 65535);
int2store(buff+10, tmp);
/*
@@ -360,7 +361,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns)
if (!error)
/* Flag that a response has already been sent */
- thd->stmt_da->disable_status();
+ thd->get_stmt_da()->disable_status();
DBUG_RETURN(error);
}
@@ -373,7 +374,7 @@ static bool send_prep_stmt(Prepared_statement *stmt,
thd->client_stmt_id= stmt->id;
thd->client_param_count= stmt->param_count;
thd->clear_error();
- thd->stmt_da->disable_status();
+ thd->get_stmt_da()->disable_status();
return 0;
}
@@ -1253,6 +1254,17 @@ static bool mysql_test_insert(Prepared_statement *stmt,
List_item *values;
DBUG_ENTER("mysql_test_insert");
+ /*
+ Since INSERT DELAYED doesn't support temporary tables, we could
+ not pre-open temporary tables for SQLCOM_INSERT / SQLCOM_REPLACE.
+ Open them here instead.
+ */
+ if (table_list->lock_type != TL_WRITE_DELAYED)
+ {
+ if (open_temporary_tables(thd, table_list))
+ goto error;
+ }
+
if (insert_precheck(thd, table_list))
goto error;
@@ -1820,6 +1832,13 @@ static bool mysql_test_create_view(Prepared_statement *stmt)
if (create_view_precheck(thd, tables, view, lex->create_view_mode))
goto err;
+ /*
+ Since we can't pre-open temporary tables for SQLCOM_CREATE_VIEW,
+ (see mysql_create_view) we have to do it here instead.
+ */
+ if (open_temporary_tables(thd, tables))
+ goto err;
+
if (open_normal_and_derived_tables(thd, tables, MYSQL_OPEN_FORCE_SHARED_MDL,
DT_PREPARE))
goto err;
@@ -2055,7 +2074,20 @@ static bool check_prepared_statement(Prepared_statement *stmt)
/* Reset warning count for each query that uses tables */
if (tables)
- thd->warning_info->opt_clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->opt_clear_warning_info(thd->query_id);
+
+ if (sql_command_flags[sql_command] & CF_HA_CLOSE)
+ mysql_ha_rm_tables(thd, tables);
+
+ /*
+ Open temporary tables that are known now. Temporary tables added by
+ prelocking will be opened afterwards (during open_tables()).
+ */
+ if (sql_command_flags[sql_command] & CF_PREOPEN_TMP_TABLES)
+ {
+ if (open_temporary_tables(thd, tables))
+ goto error;
+ }
switch (sql_command) {
case SQLCOM_REPLACE:
@@ -2859,7 +2891,7 @@ void mysqld_stmt_close(THD *thd, char *packet)
Prepared_statement *stmt;
DBUG_ENTER("mysqld_stmt_close");
- thd->stmt_da->disable_status();
+ thd->get_stmt_da()->disable_status();
if (!(stmt= find_prepared_statement(thd, stmt_id)))
DBUG_VOID_RETURN;
@@ -2935,7 +2967,7 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
status_var_increment(thd->status_var.com_stmt_send_long_data);
- thd->stmt_da->disable_status();
+ thd->get_stmt_da()->disable_status();
#ifndef EMBEDDED_LIBRARY
/* Minimal size of long data packet is 6 bytes */
if (packet_length < MYSQL_LONG_DATA_HEADER)
@@ -2964,26 +2996,23 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length)
param= stmt->param_array[param_number];
- Diagnostics_area new_stmt_da, *save_stmt_da= thd->stmt_da;
- Warning_info new_warnning_info(thd->query_id, false);
- Warning_info *save_warinig_info= thd->warning_info;
+ Diagnostics_area new_stmt_da(thd->query_id, false, true);
+ Diagnostics_area *save_stmt_da= thd->get_stmt_da();
- thd->stmt_da= &new_stmt_da;
- thd->warning_info= &new_warnning_info;
+ thd->set_stmt_da(&new_stmt_da);
#ifndef EMBEDDED_LIBRARY
param->set_longdata(packet, (ulong) (packet_end - packet));
#else
param->set_longdata(thd->extra_data, thd->extra_length);
#endif
- if (thd->stmt_da->is_error())
+ if (thd->get_stmt_da()->is_error())
{
stmt->state= Query_arena::STMT_ERROR;
- stmt->last_errno= thd->stmt_da->sql_errno();
- strncpy(stmt->last_error, thd->stmt_da->message(), MYSQL_ERRMSG_SIZE);
+ stmt->last_errno= thd->get_stmt_da()->sql_errno();
+ strncpy(stmt->last_error, thd->get_stmt_da()->message(), MYSQL_ERRMSG_SIZE);
}
- thd->stmt_da= save_stmt_da;
- thd->warning_info= save_warinig_info;
+ thd->set_stmt_da(save_stmt_da);
general_log_print(thd, thd->get_command(), NullS);
@@ -3059,8 +3088,7 @@ Reprepare_observer::report_error(THD *thd)
that this thread execution stops and returns to the caller,
backtracking all the way to Prepared_statement::execute_loop().
*/
- thd->stmt_da->set_error_status(thd, ER_NEED_REPREPARE,
- ER(ER_NEED_REPREPARE), "HY000");
+ thd->get_stmt_da()->set_error_status(ER_NEED_REPREPARE);
m_invalidated= TRUE;
return TRUE;
@@ -3526,7 +3554,6 @@ Prepared_statement::execute_loop(String *expanded_query,
Reprepare_observer reprepare_observer;
bool error;
int reprepare_attempt= 0;
- bool need_set_parameters= true;
/* Check if we got an error when sending long data */
if (state == Query_arena::STMT_ERROR)
@@ -3535,20 +3562,19 @@ Prepared_statement::execute_loop(String *expanded_query,
return TRUE;
}
-reexecute:
- if (need_set_parameters &&
- set_parameters(expanded_query, packet, packet_end))
+ if (set_parameters(expanded_query, packet, packet_end))
return TRUE;
- /*
- if set_parameters() has generated warnings,
- we need to repeat it when reexecuting, to recreate these
- warnings.
- */
- need_set_parameters= thd->warning_info->statement_warn_count();
-
- reprepare_observer.reset_reprepare_observer();
+#ifdef NOT_YET_FROM_MYSQL_5_6
+ if (unlikely(thd->security_ctx->password_expired &&
+ !lex->is_change_password))
+ {
+ my_error(ER_MUST_CHANGE_PASSWORD, MYF(0));
+ return true;
+ }
+#endif
+reexecute:
/*
If the free_list is not empty, we'll wrongly free some externally
allocated items when cleaning up after validation of the prepared
@@ -3562,22 +3588,24 @@ reexecute:
the observer method will be invoked to push an error into
the error stack.
*/
- if (sql_command_flags[lex->sql_command] &
- CF_REEXECUTION_FRAGILE)
+
+ if (sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE)
{
+ reprepare_observer.reset_reprepare_observer();
DBUG_ASSERT(thd->m_reprepare_observer == NULL);
- thd->m_reprepare_observer = &reprepare_observer;
+ thd->m_reprepare_observer= &reprepare_observer;
}
error= execute(expanded_query, open_cursor) || thd->is_error();
thd->m_reprepare_observer= NULL;
- if (error && !thd->is_fatal_error && !thd->killed &&
+ if ((sql_command_flags[lex->sql_command] & CF_REEXECUTION_FRAGILE) &&
+ error && !thd->is_fatal_error && !thd->killed &&
reprepare_observer.is_invalidated() &&
reprepare_attempt++ < MAX_REPREPARE_ATTEMPTS)
{
- DBUG_ASSERT(thd->stmt_da->sql_errno() == ER_NEED_REPREPARE);
+ DBUG_ASSERT(thd->get_stmt_da()->sql_errno() == ER_NEED_REPREPARE);
thd->clear_error();
error= reprepare();
@@ -3679,7 +3707,7 @@ Prepared_statement::reprepare()
Sic: we can't simply silence warnings during reprepare, because if
it's failed, we need to return all the warnings to the user.
*/
- thd->warning_info->clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->clear_warning_info(thd->query_id);
}
return error;
}
@@ -4041,7 +4069,7 @@ Ed_result_set::Ed_result_set(List<Ed_row> *rows_arg,
*/
Ed_connection::Ed_connection(THD *thd)
- :m_warning_info(thd->query_id, false, true),
+ :m_diagnostics_area(thd->query_id, false, true),
m_thd(thd),
m_rsets(0),
m_current_rset(0)
@@ -4067,7 +4095,7 @@ Ed_connection::free_old_result()
}
m_current_rset= m_rsets;
m_diagnostics_area.reset_diagnostics_area();
- m_warning_info.clear_warning_info(m_thd->query_id);
+ m_diagnostics_area.clear_warning_info(m_thd->query_id);
}
@@ -4104,23 +4132,20 @@ bool Ed_connection::execute_direct(Server_runnable *server_runnable)
Protocol_local protocol_local(m_thd, this);
Prepared_statement stmt(m_thd);
Protocol *save_protocol= m_thd->protocol;
- Diagnostics_area *save_diagnostics_area= m_thd->stmt_da;
- Warning_info *save_warning_info= m_thd->warning_info;
+ Diagnostics_area *save_diagnostics_area= m_thd->get_stmt_da();
DBUG_ENTER("Ed_connection::execute_direct");
free_old_result(); /* Delete all data from previous execution, if any */
m_thd->protocol= &protocol_local;
- m_thd->stmt_da= &m_diagnostics_area;
- m_thd->warning_info= &m_warning_info;
+ m_thd->set_stmt_da(&m_diagnostics_area);
rc= stmt.execute_server_runnable(server_runnable);
m_thd->protocol->end_statement();
m_thd->protocol= save_protocol;
- m_thd->stmt_da= save_diagnostics_area;
- m_thd->warning_info= save_warning_info;
+ m_thd->set_stmt_da(save_diagnostics_area);
/*
Protocol_local makes use of m_current_rset to keep
track of the last result set, while adding result sets to the end.
diff --git a/sql/sql_prepare.h b/sql/sql_prepare.h
index e0891bbd188..ea5ebddb561 100644
--- a/sql/sql_prepare.h
+++ b/sql/sql_prepare.h
@@ -253,16 +253,9 @@ public:
*/
ulong get_warn_count() const
{
- return m_warning_info.warn_count();
+ return m_diagnostics_area.warn_count();
}
- /**
- Get the server warnings as a result set.
- The result set has fixed metadata:
- The first column is the level.
- The second is a numeric code.
- The third is warning text.
- */
- List<MYSQL_ERROR> *get_warn_list() { return &m_warning_info.warn_list(); }
+
/**
The following members are only valid if execute_direct()
or move_to_next_result() returned an error.
@@ -311,7 +304,6 @@ public:
~Ed_connection() { free_old_result(); }
private:
Diagnostics_area m_diagnostics_area;
- Warning_info m_warning_info;
/**
Execute direct interface does not support multi-statements, only
multi-results. So we never have a situation when we have
diff --git a/sql/sql_priv.h b/sql/sql_priv.h
index 9891cf1b24e..383888bac30 100644
--- a/sql/sql_priv.h
+++ b/sql/sql_priv.h
@@ -50,7 +50,7 @@
do { \
compile_time_assert(MYSQL_VERSION_ID < VerHi * 10000 + VerLo * 100); \
if (((THD *) Thd) != NULL) \
- push_warning_printf(((THD *) Thd), MYSQL_ERROR::WARN_LEVEL_WARN, \
+ push_warning_printf(((THD *) Thd), Sql_condition::WARN_LEVEL_WARN, \
ER_WARN_DEPRECATED_SYNTAX, \
ER(ER_WARN_DEPRECATED_SYNTAX), \
(Old), (New)); \
diff --git a/sql/sql_profile.cc b/sql/sql_profile.cc
index feb7810fa28..dc7aacb3d94 100644
--- a/sql/sql_profile.cc
+++ b/sql/sql_profile.cc
@@ -288,7 +288,7 @@ void QUERY_PROFILE::set_query_source(char *query_source_arg,
uint query_length_arg)
{
/* Truncate to avoid DoS attacks. */
- uint length= min(MAX_QUERY_LENGTH, query_length_arg);
+ uint length= MY_MIN(MAX_QUERY_LENGTH, query_length_arg);
DBUG_ASSERT(query_source == NULL); /* we don't leak memory */
if (query_source_arg != NULL)
diff --git a/sql/sql_reload.cc b/sql/sql_reload.cc
index f430c1b3a5d..f3eab6b84cf 100644
--- a/sql/sql_reload.cc
+++ b/sql/sql_reload.cc
@@ -176,7 +176,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
mysql_mutex_lock(&LOCK_active_mi);
if (!(mi= (master_info_index->
get_master_info(&connection_name,
- MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ Sql_condition::WARN_LEVEL_ERROR))))
{
result= 1;
}
@@ -349,7 +349,7 @@ bool reload_acl_and_cache(THD *thd, unsigned long long options,
if (!(mi= (master_info_index->
get_master_info(&lex_mi->connection_name,
- MYSQL_ERROR::WARN_LEVEL_ERROR))))
+ Sql_condition::WARN_LEVEL_ERROR))))
{
result= 1;
}
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index c957076ac4f..78acb4a519f 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -39,8 +39,8 @@ static bool do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db,
static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list);
/*
- Every second entry in the table_list is the original name and every
- second entry is the new name.
+ Every two entries in the table_list form a pair of original name and
+ the new name.
*/
bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
@@ -144,7 +144,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
}
if (lock_table_names(thd, table_list, 0, thd->variables.lock_wait_timeout,
- MYSQL_OPEN_SKIP_TEMPORARY))
+ 0))
goto err;
error=0;
diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc
index 802161a09a9..5a93f3b819a 100644
--- a/sql/sql_repl.cc
+++ b/sql/sql_repl.cc
@@ -2783,7 +2783,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
{
/* Issuing warning then started without --skip-slave-start */
if (!opt_skip_slave_start)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_MISSING_SKIP_SLAVE,
ER(ER_MISSING_SKIP_SLAVE));
}
@@ -2791,7 +2791,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
mysql_mutex_unlock(&mi->rli.data_lock);
}
else if (thd->lex->mi.pos || thd->lex->mi.relay_log_pos)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_UNTIL_COND_IGNORED,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_UNTIL_COND_IGNORED,
ER(ER_UNTIL_COND_IGNORED));
if (!slave_errno)
@@ -2808,7 +2808,7 @@ int start_slave(THD* thd , Master_info* mi, bool net_report)
else
{
/* no error if all threads are already started, only a warning */
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SLAVE_WAS_RUNNING,
ER(ER_SLAVE_WAS_RUNNING));
}
@@ -2874,7 +2874,7 @@ int stop_slave(THD* thd, Master_info* mi, bool net_report )
{
//no error if both threads are already stopped, only a warning
slave_errno= 0;
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SLAVE_WAS_NOT_RUNNING,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_SLAVE_WAS_NOT_RUNNING,
ER(ER_SLAVE_WAS_NOT_RUNNING));
}
unlock_slave_threads(mi);
@@ -3134,7 +3134,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
/* if new Master_info doesn't exists, add it */
if (!master_info_index->get_master_info(&mi->connection_name,
- MYSQL_ERROR::WARN_LEVEL_NOTE))
+ Sql_condition::WARN_LEVEL_NOTE))
{
if (master_info_index->add_master_info(mi, TRUE))
{
@@ -3216,7 +3216,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
if (lex_mi->heartbeat_opt != LEX_MASTER_INFO::LEX_MI_UNCHANGED)
mi->heartbeat_period = lex_mi->heartbeat_period;
else
- mi->heartbeat_period= (float) min(SLAVE_MAX_HEARTBEAT_PERIOD,
+ mi->heartbeat_period= (float) MY_MIN(SLAVE_MAX_HEARTBEAT_PERIOD,
(slave_net_timeout/2.0));
mi->received_heartbeats= 0; // counter lives until master is CHANGEd
/*
@@ -3273,7 +3273,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
if (lex_mi->ssl || lex_mi->ssl_ca || lex_mi->ssl_capath ||
lex_mi->ssl_cert || lex_mi->ssl_cipher || lex_mi->ssl_key ||
lex_mi->ssl_verify_server_cert || lex_mi->ssl_crl || lex_mi->ssl_crlpath)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SLAVE_IGNORED_SSL_PARAMS, ER(ER_SLAVE_IGNORED_SSL_PARAMS));
#endif
@@ -3321,12 +3321,12 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added)
{
/*
Sometimes mi->rli.master_log_pos == 0 (it happens when the SQL thread is
- not initialized), so we use a max().
+ not initialized), so we use a MY_MAX().
What happens to mi->rli.master_log_pos during the initialization stages
of replication is not 100% clear, so we guard against problems using
- max().
+ MY_MAX().
*/
- mi->master_log_pos = max(BIN_LOG_HEADER_SIZE,
+ mi->master_log_pos = MY_MAX(BIN_LOG_HEADER_SIZE,
mi->rli.group_master_log_pos);
strmake_buf(mi->master_log_name, mi->rli.group_master_log_name);
}
@@ -3502,7 +3502,7 @@ bool mysql_show_binlog_events(THD* thd)
mysql_mutex_lock(&LOCK_active_mi);
if (!(mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_ERROR)))
+ Sql_condition::WARN_LEVEL_ERROR)))
{
mysql_mutex_unlock(&LOCK_active_mi);
DBUG_RETURN(TRUE);
@@ -3515,7 +3515,7 @@ bool mysql_show_binlog_events(THD* thd)
LEX_MASTER_INFO *lex_mi= &thd->lex->mi;
SELECT_LEX_UNIT *unit= &thd->lex->unit;
ha_rows event_count, limit_start, limit_end;
- my_off_t pos = max(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly
+ my_off_t pos = MY_MAX(BIN_LOG_HEADER_SIZE, lex_mi->pos); // user-friendly
char search_file_name[FN_REFLEN], *name;
const char *log_file_name = lex_mi->log_file_name;
mysql_mutex_t *log_lock = binary_log->get_log_lock();
@@ -3805,14 +3805,14 @@ int log_loaded_block(IO_CACHE* file)
DBUG_RETURN(0);
for (block_len= (uint) (my_b_get_bytes_in_buffer(file)); block_len > 0;
- buffer += min(block_len, max_event_size),
- block_len -= min(block_len, max_event_size))
+ buffer += MY_MIN(block_len, max_event_size),
+ block_len -= MY_MIN(block_len, max_event_size))
{
lf_info->last_pos_in_file= my_b_get_pos_in_file(file);
if (lf_info->wrote_create_file)
{
Append_block_log_event a(lf_info->thd, lf_info->thd->db, buffer,
- min(block_len, max_event_size),
+ MY_MIN(block_len, max_event_size),
lf_info->log_delayed);
if (mysql_bin_log.write(&a))
DBUG_RETURN(1);
@@ -3821,7 +3821,7 @@ int log_loaded_block(IO_CACHE* file)
{
Begin_load_query_log_event b(lf_info->thd, lf_info->thd->db,
buffer,
- min(block_len, max_event_size),
+ MY_MIN(block_len, max_event_size),
lf_info->log_delayed);
if (mysql_bin_log.write(&b))
DBUG_RETURN(1);
@@ -3965,7 +3965,7 @@ rpl_gtid_pos_check(THD *thd, char *str, size_t len)
}
else if (!gave_missing_warning)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_MASTER_GTID_POS_MISSING_DOMAIN,
ER(ER_MASTER_GTID_POS_MISSING_DOMAIN),
binlog_gtid->domain_id, binlog_gtid->domain_id,
@@ -3985,7 +3985,7 @@ rpl_gtid_pos_check(THD *thd, char *str, size_t len)
}
else if (!gave_conflict_warning)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG,
ER(ER_MASTER_GTID_POS_CONFLICTS_WITH_BINLOG),
slave_gtid->domain_id, slave_gtid->server_id,
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 2d80767a141..f5baaad5655 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -381,7 +381,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
If LIMIT ROWS EXAMINED interrupted query execution, issue a warning,
continue with normal processing and produce an incomplete query result.
*/
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT,
ER(ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT),
thd->accessed_rows_and_keys,
@@ -1217,15 +1217,16 @@ TODO: make view to decide if it is possible to write to WHERE directly or make S
if (!tbl->embedding)
{
Item *prune_cond= tbl->on_expr? tbl->on_expr : conds;
- tbl->table->no_partitions_used= prune_partitions(thd, tbl->table,
- prune_cond);
- }
+ tbl->table->all_partitions_pruned_away= prune_partitions(thd,
+ tbl->table,
+ prune_cond);
+ }
}
}
#endif
/*
- Try to optimize count(*), min() and max() to const fields if
+ Try to optimize count(*), MY_MIN() and MY_MAX() to const fields if
there is implicit grouping (aggregate functions but no
group_list). In this case, the result set shall only contain one
row.
@@ -3352,9 +3353,9 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
bitmap_clear_all(&table->cond_set);
#ifdef WITH_PARTITION_STORAGE_ENGINE
- const bool no_partitions_used= table->no_partitions_used;
+ const bool all_partitions_pruned_away= table->all_partitions_pruned_away;
#else
- const bool no_partitions_used= FALSE;
+ const bool all_partitions_pruned_away= FALSE;
#endif
DBUG_EXECUTE_IF("bug11747970_raise_error",
@@ -3391,7 +3392,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
if (!table->is_filled_at_execution() &&
((!table->file->stats.records &&
(table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT)) ||
- no_partitions_used) && !embedding)
+ all_partitions_pruned_away) && !embedding)
{ // Empty table
s->dependent= 0; // Ignore LEFT JOIN depend.
no_rows_const_tables |= table->map;
@@ -3435,7 +3436,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
(table->s->system ||
(table->file->stats.records <= 1 &&
(table->file->ha_table_flags() & HA_STATS_RECORDS_IS_EXACT)) ||
- no_partitions_used) &&
+ all_partitions_pruned_away) &&
!s->dependent &&
!table->fulltext_searched && !join->no_const_tables)
{
@@ -3685,7 +3686,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
(!embedding || (embedding->sj_on_expr && !embedding->embedding)))
{
key_map base_part, base_const_ref, base_eq_part;
- base_part.set_prefix(keyinfo->key_parts);
+ base_part.set_prefix(keyinfo->user_defined_key_parts);
base_const_ref= const_ref;
base_const_ref.intersect(base_part);
base_eq_part= eq_part;
@@ -3778,7 +3779,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
This is can't be to high as otherwise we are likely to use
table scan.
*/
- s->worst_seeks= min((double) s->found_records / 10,
+ s->worst_seeks= MY_MIN((double) s->found_records / 10,
(double) s->read_time*3);
if (s->worst_seeks < 2.0) // Fix for small tables
s->worst_seeks=2.0;
@@ -4977,7 +4978,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
uint and_level,i;
KEY_FIELD *key_fields, *end, *field;
uint sz;
- uint m= max(select_lex->max_equal_elems,1);
+ uint m= MY_MAX(select_lex->max_equal_elems,1);
/*
We use the same piece of memory to store both KEY_FIELD
@@ -5000,7 +5001,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
can be not more than select_lex->max_equal_elems such
substitutions.
*/
- sz= max(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))*
+ sz= MY_MAX(sizeof(KEY_FIELD),sizeof(SARGABLE_PARAM))*
(((thd->lex->current_select->cond_count+1)*2 +
thd->lex->current_select->between_count)*m+1);
if (!(key_fields=(KEY_FIELD*) thd->alloc(sz)))
@@ -5184,7 +5185,7 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array)
DBUG_ASSERT(tablenr != Table_map_iterator::BITMAP_END);
TABLE *tmp_table=join->table[tablenr];
if (tmp_table) // already created
- keyuse->ref_table_rows= max(tmp_table->file->stats.records, 100);
+ keyuse->ref_table_rows= MY_MAX(tmp_table->file->stats.records, 100);
}
}
/*
@@ -5663,7 +5664,7 @@ best_access_path(JOIN *join,
tmp= table->file->keyread_time(key, 1, (ha_rows) tmp);
else
tmp= table->file->read_time(key, 1,
- (ha_rows) min(tmp,s->worst_seeks));
+ (ha_rows) MY_MIN(tmp,s->worst_seeks));
tmp*= record_count;
}
}
@@ -5676,7 +5677,7 @@ best_access_path(JOIN *join,
*/
if ((found_part & 1) &&
(!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) ||
- found_part == PREV_BITS(uint,keyinfo->key_parts)))
+ found_part == PREV_BITS(uint,keyinfo->user_defined_key_parts)))
{
max_key_part= max_part_bit(found_part);
/*
@@ -5770,7 +5771,7 @@ best_access_path(JOIN *join,
*/
double rec_per_key;
if (!(rec_per_key=(double)
- keyinfo->rec_per_key[keyinfo->key_parts-1]))
+ keyinfo->rec_per_key[keyinfo->user_defined_key_parts-1]))
rec_per_key=(double) s->records/rec+1;
if (!s->records)
@@ -5780,10 +5781,10 @@ best_access_path(JOIN *join,
else
{
double a=s->records*0.01;
- if (keyinfo->key_parts > 1)
+ if (keyinfo->user_defined_key_parts > 1)
tmp= (max_key_part * (rec_per_key - a) +
- a*keyinfo->key_parts - rec_per_key)/
- (keyinfo->key_parts-1);
+ a*keyinfo->user_defined_key_parts - rec_per_key)/
+ (keyinfo->user_defined_key_parts-1);
else
tmp= a;
set_if_bigger(tmp,1.0);
@@ -5828,7 +5829,7 @@ best_access_path(JOIN *join,
tmp= table->file->keyread_time(key, 1, (ha_rows) tmp);
else
tmp= table->file->read_time(key, 1,
- (ha_rows) min(tmp,s->worst_seeks));
+ (ha_rows) MY_MIN(tmp,s->worst_seeks));
tmp*= record_count;
}
else
@@ -8202,8 +8203,8 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab,
!(key_part_info = (KEY_PART_INFO *) thd->alloc(sizeof(KEY_PART_INFO)*
key_parts)))
DBUG_RETURN(TRUE);
- keyinfo->usable_key_parts= keyinfo->key_parts = key_parts;
- keyinfo->ext_key_parts= keyinfo->key_parts;
+ keyinfo->usable_key_parts= keyinfo->user_defined_key_parts = key_parts;
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->key_part= key_part_info;
keyinfo->key_length=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
@@ -8249,7 +8250,7 @@ static bool create_hj_key_for_table(JOIN *join, JOIN_TAB *join_tab,
keyuse++;
} while (keyuse->table == table && keyuse->is_for_hash_join());
- keyinfo->ext_key_parts= keyinfo->key_parts;
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->ext_key_part_map= 0;
@@ -8461,9 +8462,9 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j,
ulong key_flags= j->table->actual_key_flags(keyinfo);
if (j->type == JT_CONST)
j->table->const_table= 1;
- else if (!((keyparts == keyinfo->key_parts &&
+ else if (!((keyparts == keyinfo->user_defined_key_parts &&
((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)) ||
- (keyparts > keyinfo->key_parts && // true only for extended keys
+ (keyparts > keyinfo->user_defined_key_parts && // true only for extended keys
test(key_flags & HA_EXT_NOSAME) &&
keyparts == keyinfo->ext_key_parts)) ||
null_ref_key)
@@ -10905,7 +10906,7 @@ bool TABLE_REF::tmp_table_index_lookup_init(THD *thd,
bool value,
uint skip)
{
- uint tmp_key_parts= tmp_key->key_parts;
+ uint tmp_key_parts= tmp_key->user_defined_key_parts;
uint i;
DBUG_ENTER("TABLE_REF::tmp_table_index_lookup_init");
@@ -11002,7 +11003,7 @@ bool TABLE_REF::is_access_triggered()
a correlated subquery itself, but has subqueries, we can free it
fully and also free JOINs of all its subqueries. The exception
is a subquery in SELECT list, e.g: @n
- SELECT a, (select max(b) from t1) group by c @n
+ SELECT a, (select MY_MAX(b) from t1) group by c @n
This subquery will not be evaluated at first sweep and its value will
not be inserted into the temporary table. Instead, it's evaluated
when selecting from the temporary table. Therefore, it can't be freed
@@ -12472,7 +12473,7 @@ static int compare_fields_by_table_order(Item *field1,
if (!cmp)
{
KEY *key_info= tab->table->key_info + keyno;
- for (uint i= 0; i < key_info->key_parts; i++)
+ for (uint i= 0; i < key_info->user_defined_key_parts; i++)
{
Field *fld= key_info->key_part[i].field;
if (fld->eq(f2->field))
@@ -14966,7 +14967,6 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
table->s= share;
init_tmp_table_share(thd, share, "", 0, tmpname, tmpname);
share->blob_field= blob_field;
- share->blob_ptr_size= portable_sizeof_char_ptr;
share->table_charset= param->table_charset;
share->primary_key= MAX_KEY; // Indicate no primary key
share->keys_for_keyread.init();
@@ -15207,6 +15207,12 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
if (!table->file)
goto err;
+ if (table->file->set_ha_share_ref(&share->ha_share))
+ {
+ delete table->file;
+ goto err;
+ }
+
if (!using_unique_constraint)
reclength+= group_null_items; // null flag is stored separately
@@ -15368,7 +15374,7 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
share->max_rows= ~(ha_rows) 0;
else
share->max_rows= (ha_rows) (((share->db_type() == heap_hton) ?
- min(thd->variables.tmp_table_size,
+ MY_MIN(thd->variables.tmp_table_size,
thd->variables.max_heap_table_size) :
thd->variables.tmp_table_size) /
share->reclength);
@@ -15395,8 +15401,8 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
keyinfo->key_part=key_part_info;
keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
keyinfo->ext_key_flags= keyinfo->flags;
- keyinfo->usable_key_parts=keyinfo->key_parts= param->group_parts;
- keyinfo->ext_key_parts= keyinfo->key_parts;
+ keyinfo->usable_key_parts=keyinfo->user_defined_key_parts= param->group_parts;
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->key_length=0;
keyinfo->rec_per_key=NULL;
keyinfo->read_stats= NULL;
@@ -15496,16 +15502,17 @@ create_tmp_table(THD *thd, TMP_TABLE_PARAM *param, List<Item> &fields,
share->uniques= 1;
}
null_pack_length-=hidden_null_pack_length;
- keyinfo->key_parts= ((field_count-param->hidden_field_count)+
- (share->uniques ? test(null_pack_length) : 0));
- keyinfo->ext_key_parts= keyinfo->key_parts;
+ keyinfo->user_defined_key_parts=
+ ((field_count-param->hidden_field_count)+
+ (share->uniques ? test(null_pack_length) : 0));
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
table->distinct= 1;
share->keys= 1;
if (!(key_part_info= (KEY_PART_INFO*)
alloc_root(&table->mem_root,
- keyinfo->key_parts * sizeof(KEY_PART_INFO))))
+ keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO))))
goto err;
- bzero((void*) key_part_info, keyinfo->key_parts * sizeof(KEY_PART_INFO));
+ bzero((void*) key_part_info, keyinfo->user_defined_key_parts * sizeof(KEY_PART_INFO));
table->keys_in_use_for_query.set_bit(0);
share->keys_in_use.set_bit(0);
table->key_info= table->s->key_info= keyinfo;
@@ -15685,7 +15692,6 @@ TABLE *create_virtual_tmp_table(THD *thd, List<Create_field> &field_list)
table->temp_pool_slot= MY_BIT_NONE;
share->blob_field= blob_field;
share->fields= field_count;
- share->blob_ptr_size= portable_sizeof_char_ptr;
setup_tmp_table_column_bitmaps(table, bitmaps);
/* Create all fields and calculate the total length of record */
@@ -15841,13 +15847,13 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
- sizeof(*seg) * keyinfo->key_parts);
+ sizeof(*seg) * keyinfo->user_defined_key_parts);
if (!seg)
goto err;
- bzero(seg, sizeof(*seg) * keyinfo->key_parts);
+ bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts);
if (keyinfo->key_length >= table->file->max_key_length() ||
- keyinfo->key_parts > table->file->max_key_parts() ||
+ keyinfo->user_defined_key_parts > table->file->max_key_parts() ||
share->uniques)
{
if (!share->uniques && !(keyinfo->flags & HA_NOSAME))
@@ -15862,7 +15868,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
- uniquedef.keysegs=keyinfo->key_parts;
+ uniquedef.keysegs=keyinfo->user_defined_key_parts;
uniquedef.seg=seg;
uniquedef.null_are_equal=1;
@@ -15878,10 +15884,10 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
/* Create a key */
bzero((char*) &keydef,sizeof(keydef));
keydef.flag= keyinfo->flags & HA_NOSAME;
- keydef.keysegs= keyinfo->key_parts;
+ keydef.keysegs= keyinfo->user_defined_key_parts;
keydef.seg= seg;
}
- for (uint i=0; i < keyinfo->key_parts ; i++,seg++)
+ for (uint i=0; i < keyinfo->user_defined_key_parts ; i++,seg++)
{
Field *field=keyinfo->key_part[i].field;
seg->flag= 0;
@@ -15893,7 +15899,8 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
seg->type=
((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
- seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size);
+ seg->bit_start= (uint8)(field->pack_length() -
+ portable_sizeof_char_ptr);
seg->flag= HA_BLOB_PART;
seg->length=0; // Whole blob in unique constraint
}
@@ -15947,7 +15954,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
start_recinfo,
share->uniques, &uniquedef,
&create_info,
- HA_CREATE_TMP_TABLE)))
+ HA_CREATE_TMP_TABLE | HA_CREATE_INTERNAL_TABLE)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
table->db_stat=0;
@@ -16010,13 +16017,13 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
{ // Get keys for ni_create
bool using_unique_constraint=0;
HA_KEYSEG *seg= (HA_KEYSEG*) alloc_root(&table->mem_root,
- sizeof(*seg) * keyinfo->key_parts);
+ sizeof(*seg) * keyinfo->user_defined_key_parts);
if (!seg)
goto err;
- bzero(seg, sizeof(*seg) * keyinfo->key_parts);
+ bzero(seg, sizeof(*seg) * keyinfo->user_defined_key_parts);
if (keyinfo->key_length >= table->file->max_key_length() ||
- keyinfo->key_parts > table->file->max_key_parts() ||
+ keyinfo->user_defined_key_parts > table->file->max_key_parts() ||
share->uniques)
{
/* Can't create a key; Make a unique constraint instead of a key */
@@ -16024,7 +16031,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
share->uniques= 1;
using_unique_constraint=1;
bzero((char*) &uniquedef,sizeof(uniquedef));
- uniquedef.keysegs=keyinfo->key_parts;
+ uniquedef.keysegs=keyinfo->user_defined_key_parts;
uniquedef.seg=seg;
uniquedef.null_are_equal=1;
@@ -16041,10 +16048,10 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
bzero((char*) &keydef,sizeof(keydef));
keydef.flag= ((keyinfo->flags & HA_NOSAME) | HA_BINARY_PACK_KEY |
HA_PACK_KEY);
- keydef.keysegs= keyinfo->key_parts;
+ keydef.keysegs= keyinfo->user_defined_key_parts;
keydef.seg= seg;
}
- for (uint i=0; i < keyinfo->key_parts ; i++,seg++)
+ for (uint i=0; i < keyinfo->user_defined_key_parts ; i++,seg++)
{
Field *field=keyinfo->key_part[i].field;
seg->flag= 0;
@@ -16056,7 +16063,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
seg->type=
((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ?
HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2);
- seg->bit_start= (uint8)(field->pack_length() - share->blob_ptr_size);
+ seg->bit_start= (uint8)(field->pack_length() - portable_sizeof_char_ptr);
seg->flag= HA_BLOB_PART;
seg->length=0; // Whole blob in unique constraint
}
@@ -16093,7 +16100,7 @@ bool create_internal_tmp_table(TABLE *table, KEY *keyinfo,
start_recinfo,
share->uniques, &uniquedef,
&create_info,
- HA_CREATE_TMP_TABLE)))
+ HA_CREATE_TMP_TABLE | HA_CREATE_INTERNAL_TABLE)))
{
table->file->print_error(error,MYF(0)); /* purecov: inspected */
table->db_stat=0;
@@ -16149,6 +16156,12 @@ create_internal_tmp_table_from_heap(THD *thd, TABLE *table,
new_table.s->db_type())))
DBUG_RETURN(1); // End of memory
+ if (new_table.file->set_ha_share_ref(&share.ha_share))
+ {
+ delete new_table.file;
+ DBUG_RETURN(1);
+ }
+
save_proc_info=thd->proc_info;
THD_STAGE_INFO(thd, stage_converting_heap_to_myisam);
@@ -16779,7 +16792,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records)
if (join_tab->on_precond && !join_tab->on_precond->val_int())
rc= NESTED_LOOP_NO_MORE_ROWS;
}
- join->thd->warning_info->reset_current_row_for_warning();
+ join->thd->get_stmt_da()->reset_current_row_for_warning();
if (rc != NESTED_LOOP_NO_MORE_ROWS &&
(rc= join_tab_execution_startup(join_tab)) < 0)
@@ -17016,7 +17029,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
enum enum_nested_loop_state rc;
/* A match from join_tab is found for the current partial join. */
rc= (*join_tab->next_select)(join, join_tab+1, 0);
- join->thd->warning_info->inc_current_row_for_warning();
+ join->thd->get_stmt_da()->inc_current_row_for_warning();
if (rc != NESTED_LOOP_OK && rc != NESTED_LOOP_NO_MORE_ROWS)
DBUG_RETURN(rc);
if (return_tab < join->return_tab)
@@ -17034,7 +17047,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
}
else
{
- join->thd->warning_info->inc_current_row_for_warning();
+ join->thd->get_stmt_da()->inc_current_row_for_warning();
join_tab->read_record.unlock_row(join_tab);
}
}
@@ -17045,7 +17058,7 @@ evaluate_join_record(JOIN *join, JOIN_TAB *join_tab,
with the beginning coinciding with the current partial join.
*/
join->examined_rows++;
- join->thd->warning_info->inc_current_row_for_warning();
+ join->thd->get_stmt_da()->inc_current_row_for_warning();
join_tab->read_record.unlock_row(join_tab);
}
DBUG_RETURN(NESTED_LOOP_OK);
@@ -17157,13 +17170,8 @@ int report_error(TABLE *table, int error)
*/
if (error != HA_ERR_LOCK_DEADLOCK && error != HA_ERR_LOCK_WAIT_TIMEOUT
&& !table->in_use->killed)
- {
- push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, error,
- "Got error %d when reading table %`s.%`s",
- error, table->s->db.str, table->s->table_name.str);
sql_print_error("Got error %d when reading table '%s'",
error, table->s->path.str);
- }
table->file->print_error(error,MYF(0));
return 1;
}
@@ -18837,7 +18845,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
{
KEY_PART_INFO *key_part,*key_part_end;
key_part=table->key_info[idx].key_part;
- key_part_end=key_part+table->key_info[idx].key_parts;
+ key_part_end=key_part+table->key_info[idx].user_defined_key_parts;
key_part_map const_key_parts=table->const_key_parts[idx];
int reverse=0;
uint key_parts;
@@ -18879,7 +18887,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
(we have to stop as first not continous primary key part)
*/
for (key_part_end= key_part,
- end= key_part+table->key_info[table->s->primary_key].key_parts;
+ end= key_part+table->key_info[table->s->primary_key].user_defined_key_parts;
key_part_end < end; key_part_end++, pk_part_idx++)
{
/* Found hole in the pk_parts; Abort */
@@ -18896,7 +18904,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
Test if the primary key parts were all const (i.e. there's one row).
The sorting doesn't matter.
*/
- if (key_part == start+table->key_info[table->s->primary_key].key_parts &&
+ if (key_part == start+table->key_info[table->s->primary_key].user_defined_key_parts &&
reverse == 0)
{
key_parts= 0;
@@ -18922,7 +18930,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
}
if (on_pk_suffix)
{
- uint used_key_parts_secondary= table->key_info[idx].key_parts;
+ uint used_key_parts_secondary= table->key_info[idx].user_defined_key_parts;
uint used_key_parts_pk=
(uint) (key_part - table->key_info[table->s->primary_key].key_part);
key_parts= used_key_parts_pk + used_key_parts_secondary;
@@ -19033,7 +19041,7 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts,
{
if (usable_keys->is_set(nr) &&
table->key_info[nr].key_length < min_length &&
- table->key_info[nr].key_parts >= ref_key_parts &&
+ table->key_info[nr].user_defined_key_parts >= ref_key_parts &&
is_subkey(table->key_info[nr].key_part, ref_key_part,
ref_key_part_end) &&
test_if_order_by_key(order, table, nr))
@@ -19091,7 +19099,7 @@ list_contains_unique_index(TABLE *table,
KEY_PART_INFO *key_part, *key_part_end;
for (key_part=keyinfo->key_part,
- key_part_end=key_part+ keyinfo->key_parts;
+ key_part_end=key_part+ keyinfo->user_defined_key_parts;
key_part < key_part_end;
key_part++)
{
@@ -19400,7 +19408,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
order_direction= best_key_direction;
/*
saved_best_key_parts is actual number of used keyparts found by the
- test_if_order_by_key function. It could differ from keyinfo->key_parts,
+ test_if_order_by_key function. It could differ from keyinfo->user_defined_key_parts,
thus we have to restore it in case of desc order as it affects
QUICK_SELECT_DESC behaviour.
*/
@@ -20131,7 +20139,7 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length,
count++;
if (!sortorder)
sortorder= (SORT_FIELD*) sql_alloc(sizeof(SORT_FIELD) *
- (max(count, *length) + 1));
+ (MY_MAX(count, *length) + 1));
pos= sort= sortorder;
if (!pos)
@@ -20365,7 +20373,7 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
warning so the user knows that the field from the FROM clause
overshadows the column reference from the SELECT list.
*/
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR,
ER(ER_NON_UNIQ_ERROR),
((Item_ident*) order_item)->field_name,
current_thd->where);
@@ -23750,12 +23758,12 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
if (group)
{
/*
- Used_key_parts can be larger than keyinfo->key_parts
+ Used_key_parts can be larger than keyinfo->user_defined_key_parts
when using a secondary index clustered with a primary
key (e.g. as in Innodb).
See Bug #28591 for details.
*/
- uint used_index_parts= keyinfo->key_parts;
+ uint used_index_parts= keyinfo->user_defined_key_parts;
uint used_pk_parts= 0;
if (used_key_parts > used_index_parts)
used_pk_parts= used_key_parts-used_index_parts;
@@ -23770,7 +23778,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
of the primary key are considered unknown we assume
they are equal to 1.
*/
- if (used_key_parts == pkinfo->key_parts ||
+ if (used_key_parts == pkinfo->user_defined_key_parts ||
pkinfo->rec_per_key[0] == 0)
rec_per_key= 1;
if (rec_per_key > 1)
@@ -23837,7 +23845,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
select_limit= (ha_rows) (select_limit *
(double) table_records /
table->quick_condition_rows);
- rec_per_key= keyinfo->actual_rec_per_key(keyinfo->key_parts-1);
+ rec_per_key= keyinfo->actual_rec_per_key(keyinfo->user_defined_key_parts-1);
set_if_bigger(rec_per_key, 1);
/*
Here we take into account the fact that rows are
@@ -23851,7 +23859,7 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
index entry.
*/
index_scan_time= select_limit/rec_per_key *
- min(rec_per_key, table->file->scan_time());
+ MY_MIN(rec_per_key, table->file->scan_time());
if ((ref_key < 0 && (group || table->force_index || is_covering)) ||
index_scan_time < read_time)
{
@@ -23862,13 +23870,13 @@ test_if_cheaper_ordering(const JOIN_TAB *tab, ORDER *order, TABLE *table,
if (table->quick_keys.is_set(nr))
quick_records= table->quick_rows[nr];
if (best_key < 0 ||
- (select_limit <= min(quick_records,best_records) ?
- keyinfo->key_parts < best_key_parts :
+ (select_limit <= MY_MIN(quick_records,best_records) ?
+ keyinfo->user_defined_key_parts < best_key_parts :
quick_records < best_records) ||
(!is_best_covering && is_covering))
{
best_key= nr;
- best_key_parts= keyinfo->key_parts;
+ best_key_parts= keyinfo->user_defined_key_parts;
if (saved_best_key_parts)
*saved_best_key_parts= used_key_parts;
best_records= quick_records;
diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc
index b5b7f9866c5..cf96297391c 100644
--- a/sql/sql_servers.cc
+++ b/sql/sql_servers.cc
@@ -265,9 +265,9 @@ bool servers_reload(THD *thd)
Execution might have been interrupted; only print the error message
if an error condition has been raised.
*/
- if (thd->stmt_da->is_error())
+ if (thd->get_stmt_da()->is_error())
sql_print_error("Can't open and lock privilege tables: %s",
- thd->stmt_da->message());
+ thd->get_stmt_da()->message());
return_val= FALSE;
goto end;
}
@@ -631,7 +631,7 @@ int drop_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
if (close_cached_connection_tables(thd, &name))
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_ERROR, "Server connection in use");
}
@@ -1060,7 +1060,7 @@ int alter_server(THD *thd, LEX_SERVER_OPTIONS *server_options)
if (close_cached_connection_tables(thd, &name))
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_ERROR, "Server connection in use");
}
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index d5a52ed0b52..587d4b6ebdb 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -910,8 +910,8 @@ public:
}
bool handle_condition(THD *thd, uint sql_errno, const char * /* sqlstate */,
- MYSQL_ERROR::enum_warning_level level,
- const char *message, MYSQL_ERROR ** /* cond_hdl */)
+ Sql_condition::enum_warning_level level,
+ const char *message, Sql_condition ** /* cond_hdl */)
{
/*
The handler does not handle the errors raised by itself.
@@ -942,7 +942,7 @@ public:
case ER_NO_SUCH_TABLE:
case ER_NO_SUCH_TABLE_IN_ENGINE:
/* Established behavior: warn if underlying tables are missing. */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_VIEW_INVALID,
ER(ER_VIEW_INVALID),
m_top_view->get_db_name(),
@@ -952,7 +952,7 @@ public:
case ER_SP_DOES_NOT_EXIST:
/* Established behavior: warn if underlying functions are missing. */
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_VIEW_INVALID,
ER(ER_VIEW_INVALID),
m_top_view->get_db_name(),
@@ -1046,7 +1046,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
{
field_list.push_back(new Item_empty_string("View",NAME_CHAR_LEN));
field_list.push_back(new Item_empty_string("Create View",
- max(buffer.length(),1024)));
+ MY_MAX(buffer.length(),1024)));
field_list.push_back(new Item_empty_string("character_set_client",
MY_CS_NAME_SIZE));
field_list.push_back(new Item_empty_string("collation_connection",
@@ -1057,7 +1057,7 @@ mysqld_show_create(THD *thd, TABLE_LIST *table_list)
field_list.push_back(new Item_empty_string("Table",NAME_CHAR_LEN));
// 1024 is for not to confuse old clients
field_list.push_back(new Item_empty_string("Create Table",
- max(buffer.length(),1024)));
+ MY_MAX(buffer.length(),1024)));
}
if (protocol->send_result_set_metadata(&field_list,
@@ -1739,7 +1739,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(STRING_WITH_LEN(" ("));
- for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
+ for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++)
{
if (j)
packet->append(',');
@@ -1881,6 +1881,22 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
packet->append(STRING_WITH_LEN(" PACK_KEYS=1"));
if (create_info.options & HA_OPTION_NO_PACK_KEYS)
packet->append(STRING_WITH_LEN(" PACK_KEYS=0"));
+ if (share->db_create_options & HA_OPTION_STATS_PERSISTENT)
+ packet->append(STRING_WITH_LEN(" STATS_PERSISTENT=1"));
+ if (share->db_create_options & HA_OPTION_NO_STATS_PERSISTENT)
+ packet->append(STRING_WITH_LEN(" STATS_PERSISTENT=0"));
+ if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON)
+ packet->append(STRING_WITH_LEN(" STATS_AUTO_RECALC=1"));
+ else if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF)
+ packet->append(STRING_WITH_LEN(" STATS_AUTO_RECALC=0"));
+ if (share->stats_sample_pages != 0)
+ {
+ char *end;
+ packet->append(STRING_WITH_LEN(" STATS_SAMPLE_PAGES="));
+ end= longlong10_to_str(share->stats_sample_pages, buff, 10);
+ packet->append(buff, (uint) (end - buff));
+ }
+
/* We use CHECKSUM, instead of TABLE_CHECKSUM, for backward compability */
if (create_info.options & HA_OPTION_CHECKSUM)
packet->append(STRING_WITH_LEN(" CHECKSUM=1"));
@@ -1940,8 +1956,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
&part_syntax_len,
FALSE,
show_table_options,
- NULL, NULL,
- comment_start.c_ptr())))
+ NULL, NULL)))
{
packet->append(comment_start);
if (packet->append(part_syntax, part_syntax_len) ||
@@ -2257,7 +2272,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
/* Lock THD mutex that protects its data when looking at it. */
if (tmp->query())
{
- uint length= min(max_query_length, tmp->query_length());
+ uint length= MY_MIN(max_query_length, tmp->query_length());
char *q= thd->strmake(tmp->query(),length);
/* Safety: in case strmake failed, we set length to 0. */
thd_info->query_string=
@@ -2270,7 +2285,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose)
*/
if (tmp->progress.max_counter)
{
- uint max_stage= max(tmp->progress.max_stage, 1);
+ uint max_stage= MY_MAX(tmp->progress.max_stage, 1);
thd_info->progress= (((tmp->progress.stage / (double) max_stage) +
((tmp->progress.counter /
(double) tmp->progress.max_counter) /
@@ -2479,7 +2494,7 @@ int fill_show_explain(THD *thd, TABLE_LIST *table, COND *cond)
else
warning_text= explain_req.query_str.c_ptr_safe();
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_YES, warning_text);
}
DBUG_RETURN(bres);
@@ -2583,7 +2598,7 @@ int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond)
if (tmp->query())
{
table->field[7]->store(tmp->query(),
- min(PROCESS_LIST_INFO_WIDTH,
+ MY_MIN(PROCESS_LIST_INFO_WIDTH,
tmp->query_length()), cs);
table->field[7]->set_notnull();
}
@@ -3030,7 +3045,7 @@ static int aggregate_user_stats(HASH *all_user_stats, HASH *agg_user_stats)
{
DBUG_ENTER("aggregate_user_stats");
if (my_hash_init(agg_user_stats, system_charset_info,
- max(all_user_stats->records, 1),
+ MY_MAX(all_user_stats->records, 1),
0, 0, (my_hash_get_key)get_key_user_stats,
(my_hash_free_key)free_user_stats, 0))
{
@@ -4069,12 +4084,13 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys,
'only_view_structure()'.
*/
lex->sql_command= SQLCOM_SHOW_FIELDS;
- result= open_normal_and_derived_tables(thd, table_list,
- (MYSQL_OPEN_IGNORE_FLUSH |
- MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
- (can_deadlock ?
- MYSQL_OPEN_FAIL_ON_MDL_CONFLICT : 0)),
- DT_PREPARE | DT_CREATE);
+ result= (open_temporary_tables(thd, table_list) ||
+ open_normal_and_derived_tables(thd, table_list,
+ (MYSQL_OPEN_IGNORE_FLUSH |
+ MYSQL_OPEN_FORCE_SHARED_HIGH_PRIO_MDL |
+ (can_deadlock ?
+ MYSQL_OPEN_FAIL_ON_MDL_CONFLICT : 0)),
+ DT_PREPARE | DT_CREATE));
/*
Restore old value of sql_command back as it is being looked at in
process_table() function.
@@ -4095,8 +4111,8 @@ fill_schema_table_by_open(THD *thd, bool is_show_fields_or_keys,
of backward compatibility.
*/
if (!is_show_fields_or_keys && result && thd->is_error() &&
- (thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE ||
- thd->stmt_da->sql_errno() == ER_WRONG_OBJECT))
+ (thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE ||
+ thd->get_stmt_da()->sql_errno() == ER_WRONG_OBJECT))
{
/*
Hide error for a non-existing table.
@@ -4183,7 +4199,7 @@ static int fill_schema_table_names(THD *thd, TABLE_LIST *tables,
else
table->field[3]->store(STRING_WITH_LEN("ERROR"), cs);
- if (thd->is_error() && thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE)
+ if (thd->is_error() && thd->get_stmt_da()->sql_errno() == ER_NO_SUCH_TABLE)
{
thd->clear_error();
return 0;
@@ -4227,7 +4243,7 @@ uint get_table_open_method(TABLE_LIST *tables,
for (ptr=tables->table->field; (field= *ptr) ; ptr++)
{
star_table_open_method=
- min(star_table_open_method,
+ MY_MIN(star_table_open_method,
schema_table->fields_info[field_indx].open_method);
if (bitmap_is_set(tables->table->read_set, field->field_index))
{
@@ -4386,7 +4402,7 @@ static int fill_schema_table_from_frm(THD *thd, TABLE_LIST *tables,
*/
DBUG_ASSERT(can_deadlock);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_I_S_SKIPPED_TABLE,
ER(ER_WARN_I_S_SKIPPED_TABLE),
table_list.db, table_list.table_name);
@@ -4514,9 +4530,9 @@ public:
bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* msg,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
if (sql_errno == ER_PARSE_ERROR ||
sql_errno == ER_TRG_NO_DEFINER ||
@@ -4892,7 +4908,7 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
}
else
{
- char option_buff[350];
+ char option_buff[512];
String str(option_buff,sizeof(option_buff), system_charset_info);
TABLE *show_table= tables->table;
TABLE_SHARE *share= show_table->s;
@@ -4957,6 +4973,23 @@ static int get_schema_tables_record(THD *thd, TABLE_LIST *tables,
if (share->db_create_options & HA_OPTION_NO_PACK_KEYS)
str.qs_append(STRING_WITH_LEN(" pack_keys=0"));
+ if (share->db_create_options & HA_OPTION_STATS_PERSISTENT)
+ str.qs_append(STRING_WITH_LEN(" stats_persistent=1"));
+
+ if (share->db_create_options & HA_OPTION_NO_STATS_PERSISTENT)
+ str.qs_append(STRING_WITH_LEN(" stats_persistent=0"));
+
+ if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON)
+ str.qs_append(STRING_WITH_LEN(" stats_auto_recalc=1"));
+ else if (share->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF)
+ str.qs_append(STRING_WITH_LEN(" stats_auto_recalc=0"));
+
+ if (share->stats_sample_pages != 0)
+ {
+ str.qs_append(STRING_WITH_LEN(" stats_sample_pages="));
+ str.qs_append(share->stats_sample_pages);
+ }
+
/* We use CHECKSUM, instead of TABLE_CHECKSUM, for backward compability */
if (share->db_create_options & HA_OPTION_CHECKSUM)
str.qs_append(STRING_WITH_LEN(" checksum=1"));
@@ -5104,13 +5137,14 @@ err:
column with the error text, and clear the error so that the operation
can continue.
*/
- const char *error= thd->is_error() ? thd->stmt_da->message() : "";
+ const char *error= thd->is_error() ? thd->get_stmt_da()->message() : "";
table->field[20]->store(error, strlen(error), cs);
if (thd->is_error())
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
}
}
@@ -5274,8 +5308,9 @@ static int get_schema_column_record(THD *thd, TABLE_LIST *tables,
rather than in SHOW COLUMNS
*/
if (thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
res= 0;
}
@@ -5671,16 +5706,16 @@ bool store_schema_params(THD *thd, TABLE *table, TABLE *proc_table,
for (uint i= 0 ; i < params ; i++)
{
const char *tmp_buff;
- sp_variable_t *spvar= spcont->find_variable(i);
+ sp_variable *spvar= spcont->find_variable(i);
field_def= &spvar->field_def;
switch (spvar->mode) {
- case sp_param_in:
+ case sp_variable::MODE_IN:
tmp_buff= "IN";
break;
- case sp_param_out:
+ case sp_variable::MODE_OUT:
tmp_buff= "OUT";
break;
- case sp_param_inout:
+ case sp_variable::MODE_INOUT:
tmp_buff= "INOUT";
break;
default:
@@ -5945,8 +5980,9 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
rather than in SHOW KEYS
*/
if (thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
res= 0;
}
@@ -5967,7 +6003,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
{
KEY_PART_INFO *key_part= key_info->key_part;
const char *str;
- for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
+ for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++)
{
restore_record(table, s->default_values);
table->field[0]->store(STRING_WITH_LEN("def"), cs);
@@ -6130,7 +6166,7 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
*/
while ((item= it++))
{
- if ((field= item->filed_for_view_update()) && field->field &&
+ if ((field= item->field_for_view_update()) && field->field &&
!field->field->table->pos_in_table_list->schema_table)
{
updatable_view= 1;
@@ -6167,8 +6203,9 @@ static int get_schema_views_record(THD *thd, TABLE_LIST *tables,
if (schema_table_store_record(thd, table))
DBUG_RETURN(1);
if (res && thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
}
if (res)
thd->clear_error();
@@ -6201,8 +6238,9 @@ static int get_schema_constraints_record(THD *thd, TABLE_LIST *tables,
if (res)
{
if (thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -6307,8 +6345,9 @@ static int get_schema_triggers_record(THD *thd, TABLE_LIST *tables,
if (res)
{
if (thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -6388,8 +6427,9 @@ static int get_schema_key_column_usage_record(THD *thd,
if (res)
{
if (thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -6408,7 +6448,7 @@ static int get_schema_key_column_usage_record(THD *thd,
continue;
uint f_idx= 0;
KEY_PART_INFO *key_part= key_info->key_part;
- for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
+ for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++)
{
if (key_part->field)
{
@@ -6603,7 +6643,7 @@ static void store_schema_partitions_record(THD *thd, TABLE *schema_table,
strlen(part_elem->tablespace_name), cs);
else
{
- char *ts= showing_table->file->get_tablespace_name(thd,0,0);
+ char *ts= showing_table->s->tablespace;
if(ts)
table->field[24]->store(ts, strlen(ts), cs);
else
@@ -6678,8 +6718,9 @@ static int get_schema_partitions_record(THD *thd, TABLE_LIST *tables,
if (res)
{
if (thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -7210,8 +7251,9 @@ get_referential_constraints_record(THD *thd, TABLE_LIST *tables,
if (res)
{
if (thd->is_error())
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
- thd->stmt_da->sql_errno(), thd->stmt_da->message());
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ thd->get_stmt_da()->sql_errno(),
+ thd->get_stmt_da()->message());
thd->clear_error();
DBUG_RETURN(0);
}
@@ -7919,41 +7961,32 @@ static bool do_fill_table(THD *thd,
// Warning_info, so "useful warnings" get rejected. In order to avoid
// that problem we create a Warning_info instance, which is capable of
// storing "unlimited" number of warnings.
- Warning_info wi(thd->query_id, true);
- Warning_info *wi_saved= thd->warning_info;
+ Diagnostics_area *da= thd->get_stmt_da();
+ Warning_info wi_tmp(thd->query_id, true, true);
- thd->warning_info= &wi;
+ da->push_warning_info(&wi_tmp);
bool res= table_list->schema_table->fill_table(
thd, table_list, join_table->select_cond);
- thd->warning_info= wi_saved;
+ da->pop_warning_info();
// Pass an error if any.
- if (thd->stmt_da->is_error())
+ if (da->is_error())
{
- thd->warning_info->push_warning(thd,
- thd->stmt_da->sql_errno(),
- thd->stmt_da->get_sqlstate(),
- MYSQL_ERROR::WARN_LEVEL_ERROR,
- thd->stmt_da->message());
+ da->push_warning(thd,
+ da->sql_errno(),
+ da->get_sqlstate(),
+ Sql_condition::WARN_LEVEL_ERROR,
+ da->message());
}
// Pass warnings (if any).
//
// Filter out warnings with WARN_LEVEL_ERROR level, because they
// correspond to the errors which were filtered out in fill_table().
-
-
- List_iterator_fast<MYSQL_ERROR> it(wi.warn_list());
- MYSQL_ERROR *err;
-
- while ((err= it++))
- {
- if (err->get_level() != MYSQL_ERROR::WARN_LEVEL_ERROR)
- thd->warning_info->push_warning(thd, err);
- }
+ da->copy_non_errors_from_wi(thd, &wi_tmp);
return res;
}
@@ -9110,7 +9143,7 @@ static bool show_create_trigger_impl(THD *thd,
Item_empty_string *stmt_fld=
new Item_empty_string("SQL Original Statement",
- max(trg_sql_original_stmt.length, 1024));
+ MY_MAX(trg_sql_original_stmt.length, 1024));
stmt_fld->maybe_null= TRUE;
diff --git a/sql/sql_signal.cc b/sql/sql_signal.cc
index ed4d2c23d53..a0a47b77591 100644
--- a/sql/sql_signal.cc
+++ b/sql/sql_signal.cc
@@ -88,9 +88,10 @@ void Set_signal_information::clear()
memset(m_item, 0, sizeof(m_item));
}
-void Signal_common::assign_defaults(MYSQL_ERROR *cond,
+void Sql_cmd_common_signal::assign_defaults(
+ Sql_condition *cond,
bool set_level_code,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
int sqlcode)
{
if (set_level_code)
@@ -102,7 +103,7 @@ void Signal_common::assign_defaults(MYSQL_ERROR *cond,
cond->set_builtin_message_text(ER(sqlcode));
}
-void Signal_common::eval_defaults(THD *thd, MYSQL_ERROR *cond)
+void Sql_cmd_common_signal::eval_defaults(THD *thd, Sql_condition *cond)
{
DBUG_ASSERT(cond);
@@ -114,8 +115,8 @@ void Signal_common::eval_defaults(THD *thd, MYSQL_ERROR *cond)
/*
SIGNAL is restricted in sql_yacc.yy to only signal SQLSTATE conditions.
*/
- DBUG_ASSERT(m_cond->type == sp_cond_type::state);
- sqlstate= m_cond->sqlstate;
+ DBUG_ASSERT(m_cond->type == sp_condition_value::SQLSTATE);
+ sqlstate= m_cond->sql_state;
cond->set_sqlstate(sqlstate);
}
else
@@ -129,19 +130,19 @@ void Signal_common::eval_defaults(THD *thd, MYSQL_ERROR *cond)
{
/* SQLSTATE class "01": warning. */
assign_defaults(cond, set_defaults,
- MYSQL_ERROR::WARN_LEVEL_WARN, ER_SIGNAL_WARN);
+ Sql_condition::WARN_LEVEL_WARN, ER_SIGNAL_WARN);
}
else if ((sqlstate[0] == '0') && (sqlstate[1] == '2'))
{
/* SQLSTATE class "02": not found. */
assign_defaults(cond, set_defaults,
- MYSQL_ERROR::WARN_LEVEL_ERROR, ER_SIGNAL_NOT_FOUND);
+ Sql_condition::WARN_LEVEL_ERROR, ER_SIGNAL_NOT_FOUND);
}
else
{
/* other SQLSTATE classes : error. */
assign_defaults(cond, set_defaults,
- MYSQL_ERROR::WARN_LEVEL_ERROR, ER_SIGNAL_EXCEPTION);
+ Sql_condition::WARN_LEVEL_ERROR, ER_SIGNAL_EXCEPTION);
}
}
@@ -256,26 +257,26 @@ static int assign_condition_item(MEM_ROOT *mem_root, const char* name, THD *thd,
}
-int Signal_common::eval_signal_informations(THD *thd, MYSQL_ERROR *cond)
+int Sql_cmd_common_signal::eval_signal_informations(THD *thd, Sql_condition *cond)
{
struct cond_item_map
{
enum enum_diag_condition_item_name m_item;
- String MYSQL_ERROR::*m_member;
+ String Sql_condition::*m_member;
};
static cond_item_map map[]=
{
- { DIAG_CLASS_ORIGIN, & MYSQL_ERROR::m_class_origin },
- { DIAG_SUBCLASS_ORIGIN, & MYSQL_ERROR::m_subclass_origin },
- { DIAG_CONSTRAINT_CATALOG, & MYSQL_ERROR::m_constraint_catalog },
- { DIAG_CONSTRAINT_SCHEMA, & MYSQL_ERROR::m_constraint_schema },
- { DIAG_CONSTRAINT_NAME, & MYSQL_ERROR::m_constraint_name },
- { DIAG_CATALOG_NAME, & MYSQL_ERROR::m_catalog_name },
- { DIAG_SCHEMA_NAME, & MYSQL_ERROR::m_schema_name },
- { DIAG_TABLE_NAME, & MYSQL_ERROR::m_table_name },
- { DIAG_COLUMN_NAME, & MYSQL_ERROR::m_column_name },
- { DIAG_CURSOR_NAME, & MYSQL_ERROR::m_cursor_name }
+ { DIAG_CLASS_ORIGIN, & Sql_condition::m_class_origin },
+ { DIAG_SUBCLASS_ORIGIN, & Sql_condition::m_subclass_origin },
+ { DIAG_CONSTRAINT_CATALOG, & Sql_condition::m_constraint_catalog },
+ { DIAG_CONSTRAINT_SCHEMA, & Sql_condition::m_constraint_schema },
+ { DIAG_CONSTRAINT_NAME, & Sql_condition::m_constraint_name },
+ { DIAG_CATALOG_NAME, & Sql_condition::m_catalog_name },
+ { DIAG_SCHEMA_NAME, & Sql_condition::m_schema_name },
+ { DIAG_TABLE_NAME, & Sql_condition::m_table_name },
+ { DIAG_COLUMN_NAME, & Sql_condition::m_column_name },
+ { DIAG_CURSOR_NAME, & Sql_condition::m_cursor_name }
};
Item *set;
@@ -288,7 +289,7 @@ int Signal_common::eval_signal_informations(THD *thd, MYSQL_ERROR *cond)
String *member;
const LEX_STRING *name;
- DBUG_ENTER("Signal_common::eval_signal_informations");
+ DBUG_ENTER("Sql_cmd_common_signal::eval_signal_informations");
for (i= FIRST_DIAG_SET_PROPERTY;
i <= LAST_DIAG_SET_PROPERTY;
@@ -360,7 +361,7 @@ int Signal_common::eval_signal_informations(THD *thd, MYSQL_ERROR *cond)
/*
See the comments
- "Design notes about MYSQL_ERROR::m_message_text."
+ "Design notes about Sql_condition::m_message_text."
in file sql_error.cc
*/
String converted_text;
@@ -413,23 +414,23 @@ end:
DBUG_RETURN(result);
}
-bool Signal_common::raise_condition(THD *thd, MYSQL_ERROR *cond)
+bool Sql_cmd_common_signal::raise_condition(THD *thd, Sql_condition *cond)
{
bool result= TRUE;
- DBUG_ENTER("Signal_common::raise_condition");
+ DBUG_ENTER("Sql_cmd_common_signal::raise_condition");
- DBUG_ASSERT(m_lex->query_tables == NULL);
+ DBUG_ASSERT(thd->lex->query_tables == NULL);
eval_defaults(thd, cond);
if (eval_signal_informations(thd, cond))
DBUG_RETURN(result);
/* SIGNAL should not signal WARN_LEVEL_NOTE */
- DBUG_ASSERT((cond->m_level == MYSQL_ERROR::WARN_LEVEL_WARN) ||
- (cond->m_level == MYSQL_ERROR::WARN_LEVEL_ERROR));
+ DBUG_ASSERT((cond->m_level == Sql_condition::WARN_LEVEL_WARN) ||
+ (cond->m_level == Sql_condition::WARN_LEVEL_ERROR));
- MYSQL_ERROR *raised= NULL;
+ Sql_condition *raised= NULL;
raised= thd->raise_condition(cond->get_sql_errno(),
cond->get_sqlstate(),
cond->get_level(),
@@ -437,7 +438,7 @@ bool Signal_common::raise_condition(THD *thd, MYSQL_ERROR *cond)
if (raised)
raised->copy_opt_attributes(cond);
- if (cond->m_level == MYSQL_ERROR::WARN_LEVEL_WARN)
+ if (cond->m_level == Sql_condition::WARN_LEVEL_WARN)
{
my_ok(thd);
result= FALSE;
@@ -446,12 +447,12 @@ bool Signal_common::raise_condition(THD *thd, MYSQL_ERROR *cond)
DBUG_RETURN(result);
}
-bool Signal_statement::execute(THD *thd)
+bool Sql_cmd_signal::execute(THD *thd)
{
bool result= TRUE;
- MYSQL_ERROR cond(thd->mem_root);
+ Sql_condition cond(thd->mem_root);
- DBUG_ENTER("Signal_statement::execute");
+ DBUG_ENTER("Sql_cmd_signal::execute");
/*
WL#2110 SIGNAL specification says:
@@ -465,9 +466,9 @@ bool Signal_statement::execute(THD *thd)
This has roots in the SQL standard specification for SIGNAL.
*/
- thd->stmt_da->reset_diagnostics_area();
+ thd->get_stmt_da()->reset_diagnostics_area();
thd->set_row_count_func(0);
- thd->warning_info->clear_warning_info(thd->query_id);
+ thd->get_stmt_da()->clear_warning_info(thd->query_id);
result= raise_condition(thd, &cond);
@@ -475,14 +476,27 @@ bool Signal_statement::execute(THD *thd)
}
-bool Resignal_statement::execute(THD *thd)
+/**
+ Execute RESIGNAL SQL-statement.
+
+ @param thd Thread context.
+
+ @return Error status
+ @retval true in case of error
+ @retval false on success
+*/
+
+bool Sql_cmd_resignal::execute(THD *thd)
{
- Sql_condition_info *signaled;
+ Diagnostics_area *da= thd->get_stmt_da();
+ const sp_rcontext::Sql_condition_info *signaled;
int result= TRUE;
DBUG_ENTER("Resignal_statement::execute");
- thd->warning_info->m_warn_id= thd->query_id;
+ // This is a way to force sql_conditions from the current Warning_info to be
+ // passed to the caller's Warning_info.
+ da->set_warning_info_id(thd->query_id);
if (! thd->spcont || ! (signaled= thd->spcont->raised_condition()))
{
@@ -490,22 +504,38 @@ bool Resignal_statement::execute(THD *thd)
DBUG_RETURN(result);
}
- MYSQL_ERROR signaled_err(thd->mem_root);
- signaled_err.set(signaled->m_sql_errno,
- signaled->m_sql_state,
- signaled->m_level,
- signaled->m_message);
+ Sql_condition signaled_err(thd->mem_root);
+ signaled_err.set(signaled->sql_errno,
+ signaled->sql_state,
+ signaled->level,
+ signaled->message);
- if (m_cond == NULL)
+ if (m_cond)
{
- /* RESIGNAL without signal_value */
- result= raise_condition(thd, &signaled_err);
- DBUG_RETURN(result);
+ query_cache_abort(&thd->query_cache_tls);
+
+ /* Keep handled conditions. */
+ da->unmark_sql_conditions_from_removal();
+
+ /* Check if the old condition still exists. */
+ if (da->has_sql_condition(signaled->message, strlen(signaled->message)))
+ {
+ /* Make room for the new RESIGNAL condition. */
+ da->reserve_space(thd, 1);
+ }
+ else
+ {
+ /* Make room for old condition + the new RESIGNAL condition. */
+ da->reserve_space(thd, 2);
+
+ da->push_warning(thd, &signaled_err);
+ }
}
/* RESIGNAL with signal_value */
result= raise_condition(thd, &signaled_err);
DBUG_RETURN(result);
+
}
diff --git a/sql/sql_signal.h b/sql/sql_signal.h
index 058457a3639..2a508eed5bf 100644
--- a/sql/sql_signal.h
+++ b/sql/sql_signal.h
@@ -18,27 +18,25 @@
#define SQL_SIGNAL_H
/**
- Signal_common represents the common properties of the SIGNAL and RESIGNAL
- statements.
+ Sql_cmd_common_signal represents the common properties of the
+ SIGNAL and RESIGNAL statements.
*/
-class Signal_common : public Sql_statement
+class Sql_cmd_common_signal : public Sql_cmd
{
protected:
/**
Constructor.
- @param lex the LEX structure for this statement.
@param cond the condition signaled if any, or NULL.
@param set collection of signal condition item assignments.
*/
- Signal_common(LEX *lex,
- const sp_cond_type_t *cond,
- const Set_signal_information& set)
- : Sql_statement(lex),
+ Sql_cmd_common_signal(const sp_condition_value *cond,
+ const Set_signal_information& set)
+ : Sql_cmd(),
m_cond(cond),
m_set_signal_information(set)
{}
- virtual ~Signal_common()
+ virtual ~Sql_cmd_common_signal()
{}
/**
@@ -49,9 +47,9 @@ protected:
@param level the level to assign
@param sqlcode the sql code to assign
*/
- static void assign_defaults(MYSQL_ERROR *cond,
+ static void assign_defaults(Sql_condition *cond,
bool set_level_code,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
int sqlcode);
/**
@@ -60,7 +58,7 @@ protected:
@param thd the current thread.
@param cond the condition to update.
*/
- void eval_defaults(THD *thd, MYSQL_ERROR *cond);
+ void eval_defaults(THD *thd, Sql_condition *cond);
/**
Evaluate each signal condition items for this statement.
@@ -68,7 +66,7 @@ protected:
@param cond the condition to update.
@return 0 on success.
*/
- int eval_signal_informations(THD *thd, MYSQL_ERROR *cond);
+ int eval_signal_informations(THD *thd, Sql_condition *cond);
/**
Raise a SQL condition.
@@ -76,13 +74,13 @@ protected:
@param cond the condition to raise.
@return false on success.
*/
- bool raise_condition(THD *thd, MYSQL_ERROR *cond);
+ bool raise_condition(THD *thd, Sql_condition *cond);
/**
The condition to signal or resignal.
This member is optional and can be NULL (RESIGNAL).
*/
- const sp_cond_type_t *m_cond;
+ const sp_condition_value *m_cond;
/**
Collection of 'SET item = value' assignments in the
@@ -92,60 +90,56 @@ protected:
};
/**
- Signal_statement represents a SIGNAL statement.
+ Sql_cmd_signal represents a SIGNAL statement.
*/
-class Signal_statement : public Signal_common
+class Sql_cmd_signal : public Sql_cmd_common_signal
{
public:
/**
Constructor, used to represent a SIGNAL statement.
- @param lex the LEX structure for this statement.
@param cond the SQL condition to signal (required).
@param set the collection of signal informations to signal.
*/
- Signal_statement(LEX *lex,
- const sp_cond_type_t *cond,
- const Set_signal_information& set)
- : Signal_common(lex, cond, set)
+ Sql_cmd_signal(const sp_condition_value *cond,
+ const Set_signal_information& set)
+ : Sql_cmd_common_signal(cond, set)
{}
- virtual ~Signal_statement()
+ virtual ~Sql_cmd_signal()
{}
- /**
- Execute a SIGNAL statement at runtime.
- @param thd the current thread.
- @return false on success.
- */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_SIGNAL;
+ }
+
virtual bool execute(THD *thd);
};
/**
- Resignal_statement represents a RESIGNAL statement.
+ Sql_cmd_resignal represents a RESIGNAL statement.
*/
-class Resignal_statement : public Signal_common
+class Sql_cmd_resignal : public Sql_cmd_common_signal
{
public:
/**
Constructor, used to represent a RESIGNAL statement.
- @param lex the LEX structure for this statement.
@param cond the SQL condition to resignal (optional, may be NULL).
@param set the collection of signal informations to resignal.
*/
- Resignal_statement(LEX *lex,
- const sp_cond_type_t *cond,
- const Set_signal_information& set)
- : Signal_common(lex, cond, set)
+ Sql_cmd_resignal(const sp_condition_value *cond,
+ const Set_signal_information& set)
+ : Sql_cmd_common_signal(cond, set)
{}
- virtual ~Resignal_statement()
+ virtual ~Sql_cmd_resignal()
{}
- /**
- Execute a RESIGNAL statement at runtime.
- @param thd the current thread.
- @return 0 on success.
- */
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_RESIGNAL;
+ }
+
virtual bool execute(THD *thd);
};
diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc
index 2e2886a1d3f..94cbf3b946a 100644
--- a/sql/sql_statistics.cc
+++ b/sql/sql_statistics.cc
@@ -1548,7 +1548,7 @@ public:
is_single_comp_pk= FALSE;
uint pk= table->s->primary_key;
if ((uint) (table->key_info - key_info) == pk &&
- table->key_info[pk].key_parts == 1)
+ table->key_info[pk].user_defined_key_parts == 1)
{
prefixes= 1;
is_single_comp_pk= TRUE;
@@ -1990,12 +1990,12 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
DBUG_RETURN(1);
if (!is_safe)
- mysql_mutex_lock(&table_share->LOCK_ha_data);
+ mysql_mutex_lock(&table_share->LOCK_share);
if (stats_cb->stats_can_be_read)
{
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(0);
}
@@ -2007,7 +2007,7 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
if (!table_stats)
{
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(1);
}
memset(table_stats, 0, sizeof(Table_statistics));
@@ -2080,7 +2080,7 @@ int alloc_statistics_for_table_share(THD* thd, TABLE_SHARE *table_share,
stats_cb->stats_can_be_read= TRUE;
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(0);
}
@@ -2124,12 +2124,12 @@ int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share,
DBUG_ENTER("alloc_histograms_for_table_share");
if (!is_safe)
- mysql_mutex_lock(&table_share->LOCK_ha_data);
+ mysql_mutex_lock(&table_share->LOCK_share);
if (stats_cb->histograms_can_be_read)
{
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(0);
}
@@ -2143,7 +2143,7 @@ int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share,
if (!histograms)
{
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(1);
}
memset(histograms, 0, total_hist_size);
@@ -2152,7 +2152,7 @@ int alloc_histograms_for_table_share(THD* thd, TABLE_SHARE *table_share,
}
if (!is_safe)
- mysql_mutex_unlock(&table_share->LOCK_ha_data);
+ mysql_mutex_unlock(&table_share->LOCK_share);
DBUG_RETURN(0);
@@ -2177,7 +2177,7 @@ void Column_statistics_collected::init(THD *thd, Field *table_field)
is_single_pk_col= FALSE;
- if (pk != MAX_KEY && table->key_info[pk].key_parts == 1 &&
+ if (pk != MAX_KEY && table->key_info[pk].user_defined_key_parts == 1 &&
table->key_info[pk].key_part[0].fieldnr == table_field->field_index + 1)
is_single_pk_col= TRUE;
@@ -2727,12 +2727,12 @@ int read_statistics_for_table(THD *thd, TABLE *table, TABLE_LIST *stat_tables)
}
key_part_map ext_key_part_map= key_info->ext_key_part_map;
- if (key_info->key_parts != key_info->ext_key_parts &&
- key_info->read_stats->get_avg_frequency(key_info->key_parts) == 0)
+ if (key_info->user_defined_key_parts != key_info->ext_key_parts &&
+ key_info->read_stats->get_avg_frequency(key_info->user_defined_key_parts) == 0)
{
KEY *pk_key_info= table_share->key_info + table_share->primary_key;
- uint k= key_info->key_parts;
- uint pk_parts= pk_key_info->key_parts;
+ uint k= key_info->user_defined_key_parts;
+ uint pk_parts= pk_key_info->user_defined_key_parts;
ha_rows n_rows= read_stats->cardinality;
double k_dist= n_rows / key_info->read_stats->get_avg_frequency(k-1);
uint m= 0;
@@ -3193,7 +3193,7 @@ int delete_statistics_for_index(THD *thd, TABLE *tab, KEY *key_info,
}
else
{
- for (uint i= key_info->key_parts; i < key_info->ext_key_parts; i++)
+ for (uint i= key_info->user_defined_key_parts; i < key_info->ext_key_parts; i++)
{
index_stat.set_key_fields(key_info, i+1);
if (index_stat.find_next_stat_for_prefix(4))
@@ -3341,7 +3341,10 @@ int rename_column_in_stat_tables(THD *thd, TABLE *tab, Field *col,
int rc= 0;
DBUG_ENTER("rename_column_in_stat_tables");
-
+
+ if (tab->s->tmp_table != NO_TMP_TABLE)
+ DBUG_RETURN(0);
+
if (open_single_stat_table(thd, &tables, &stat_table_name[1],
&open_tables_backup, TRUE))
{
diff --git a/sql/sql_statistics.h b/sql/sql_statistics.h
index c6a72478c34..c1c80921861 100644
--- a/sql/sql_statistics.h
+++ b/sql/sql_statistics.h
@@ -258,6 +258,17 @@ public:
class Columns_statistics;
class Index_statistics;
+static inline
+int rename_table_in_stat_tables(THD *thd, const char *db, const char *tab,
+ const char *new_db, const char *new_tab)
+{
+ LEX_STRING od= { const_cast<char*>(db), strlen(db) };
+ LEX_STRING ot= { const_cast<char*>(tab), strlen(tab) };
+ LEX_STRING nd= { const_cast<char*>(new_db), strlen(new_db) };
+ LEX_STRING nt= { const_cast<char*>(new_tab), strlen(new_tab) };
+ return rename_table_in_stat_tables(thd, &od, &ot, &nd, &nt);
+}
+
/* Statistical data on a table */
diff --git a/sql/sql_string.cc b/sql/sql_string.cc
index f1cb5e07eca..ddac315f80f 100644
--- a/sql/sql_string.cc
+++ b/sql/sql_string.cc
@@ -661,7 +661,7 @@ int String::reserve(uint32 space_needed, uint32 grow_by)
{
if (Alloced_length < str_length + space_needed)
{
- if (realloc(Alloced_length + max(space_needed, grow_by) - 1))
+ if (realloc(Alloced_length + MY_MAX(space_needed, grow_by) - 1))
return TRUE;
}
return FALSE;
@@ -748,7 +748,7 @@ int sortcmp(const String *s,const String *t, CHARSET_INFO *cs)
int stringcmp(const String *s,const String *t)
{
- uint32 s_len=s->length(),t_len=t->length(),len=min(s_len,t_len);
+ uint32 s_len=s->length(),t_len=t->length(),len=MY_MIN(s_len,t_len);
int cmp= memcmp(s->ptr(), t->ptr(), len);
return (cmp) ? cmp : (int) (s_len - t_len);
}
@@ -765,7 +765,7 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length)
}
if (to->realloc(from_length))
return from; // Actually an error
- if ((to->str_length=min(from->str_length,from_length)))
+ if ((to->str_length=MY_MIN(from->str_length,from_length)))
memcpy(to->Ptr,from->Ptr,to->str_length);
to->str_charset=from->str_charset;
return to;
@@ -776,67 +776,6 @@ String *copy_if_not_alloced(String *to,String *from,uint32 from_length)
Help functions
****************************************************************************/
-
-
-/*
- Optimized for quick copying of ASCII characters in the range 0x00..0x7F.
-*/
-uint32
-copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs,
- const char *from, uint32 from_length, CHARSET_INFO *from_cs,
- uint *errors)
-{
- /*
- If any of the character sets is not ASCII compatible,
- immediately switch to slow mb_wc->wc_mb method.
- */
- if ((to_cs->state | from_cs->state) & MY_CS_NONASCII)
- return copy_and_convert_extended(to, to_length, to_cs,
- from, from_length, from_cs, errors);
-
- uint32 length= min(to_length, from_length), length2= length;
-
-#if defined(__i386__) || defined(__x86_64__)
- /*
- Special loop for i386, it allows to refer to a
- non-aligned memory block as UINT32, which makes
- it possible to copy four bytes at once. This
- gives about 10% performance improvement comparing
- to byte-by-byte loop.
- */
- for ( ; length >= 4; length-= 4, from+= 4, to+= 4)
- {
- if ((*(uint32*)from) & 0x80808080)
- break;
- *((uint32*) to)= *((const uint32*) from);
- }
-#endif
-
- for (; ; *to++= *from++, length--)
- {
- if (!length)
- {
- *errors= 0;
- return length2;
- }
- if (*((unsigned char*) from) > 0x7F) /* A non-ASCII character */
- {
- uint32 copied_length= length2 - length;
- to_length-= copied_length;
- from_length-= copied_length;
- return copied_length + copy_and_convert_extended(to, to_length,
- to_cs,
- from, from_length,
- from_cs,
- errors);
- }
- }
-
- DBUG_ASSERT(FALSE); // Should never get to here
- return 0; // Make compiler happy
-}
-
-
/**
Copy string with HEX-encoding of "bad" characters.
@@ -954,7 +893,7 @@ well_formed_copy_nchars(CHARSET_INFO *to_cs,
if (to_cs == &my_charset_bin)
{
- res= min(min(nchars, to_length), from_length);
+ res= MY_MIN(MY_MIN(nchars, to_length), from_length);
memmove(to, from, res);
*from_end_pos= from + res;
*well_formed_error_pos= NULL;
@@ -1155,7 +1094,7 @@ uint convert_to_printable(char *to, size_t to_len,
char *t= to;
char *t_end= to + to_len - 1; // '- 1' is for the '\0' at the end
const char *f= from;
- const char *f_end= from + (nbytes ? min(from_len, nbytes) : from_len);
+ const char *f_end= from + (nbytes ? MY_MIN(from_len, nbytes) : from_len);
char *dots= to; // last safe place to append '...'
if (!f || t == t_end)
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 1979ac6e4af..352dfbe9fa3 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -34,9 +34,13 @@ typedef struct st_mem_root MEM_ROOT;
int sortcmp(const String *a,const String *b, CHARSET_INFO *cs);
String *copy_if_not_alloced(String *a,String *b,uint32 arg_length);
-uint32 copy_and_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs,
- const char *from, uint32 from_length,
- CHARSET_INFO *from_cs, uint *errors);
+inline uint32 copy_and_convert(char *to, uint32 to_length,
+ const CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ const CHARSET_INFO *from_cs, uint *errors)
+{
+ return my_convert(to, to_length, to_cs, from, from_length, from_cs, errors);
+}
uint32 well_formed_copy_nchars(CHARSET_INFO *to_cs,
char *to, uint to_length,
CHARSET_INFO *from_cs,
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 35a486a960a..79c6d4cbaf9 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -32,6 +32,7 @@
#include "sql_partition.h" // mem_alloc_error,
// generate_partition_syntax,
// partition_info
+ // NOT_A_PARTITION_ID
#include "sql_db.h" // load_db_opt_by_name
#include "sql_time.h" // make_truncated_value_warning
#include "records.h" // init_read_record, end_read_record
@@ -63,10 +64,12 @@ const char *primary_key_name="PRIMARY";
static bool check_if_keyname_exists(const char *name,KEY *start, KEY *end);
static char *make_unique_key_name(const char *field_name,KEY *start,KEY *end);
-static int copy_data_between_tables(THD *thd, TABLE *,TABLE *,
- List<Create_field> &, bool,
- uint, ORDER *, ha_rows *,ha_rows *,
- enum enum_enable_or_disable, bool);
+static int copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
+ List<Create_field> &create, bool ignore,
+ uint order_num, ORDER *order,
+ ha_rows *copied,ha_rows *deleted,
+ Alter_info::enum_enable_or_disable keys_onoff,
+ Alter_table_ctx *alter_ctx);
static bool prepare_blob_field(THD *thd, Create_field *sql_field);
static bool check_engine(THD *, const char *, const char *, HA_CREATE_INFO *);
@@ -100,7 +103,8 @@ static char* add_identifier(THD* thd, char *to_p, const char * end_p,
tmp_name[name_len]= 0;
conv_name= tmp_name;
}
- res= strconvert(&my_charset_filename, conv_name, system_charset_info,
+ res= strconvert(&my_charset_filename, conv_name, name_len,
+ system_charset_info,
conv_string, FN_REFLEN, &errors);
if (!res || errors)
{
@@ -376,7 +380,7 @@ uint filename_to_tablename(const char *from, char *to, uint to_length
DBUG_ENTER("filename_to_tablename");
DBUG_PRINT("enter", ("from '%s'", from));
- res= strconvert(&my_charset_filename, from,
+ res= strconvert(&my_charset_filename, from, FN_REFLEN,
system_charset_info, to, to_length, &errors);
if (errors) // Old 5.0 name
{
@@ -467,7 +471,7 @@ uint tablename_to_filename(const char *from, char *to, uint to_length)
}
DBUG_RETURN(length);
}
- length= strconvert(system_charset_info, from,
+ length= strconvert(system_charset_info, from, FN_REFLEN,
&my_charset_filename, to, to_length, &errors);
if (check_if_legal_tablename(to) &&
length + 4 < to_length)
@@ -523,7 +527,7 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
db, table_name, ext, flags));
if (flags & FN_IS_TMP) // FN_FROM_IS_TMP | FN_TO_IS_TMP
- strnmov(tbbuff, table_name, sizeof(tbbuff));
+ strmake(tbbuff, table_name, sizeof(tbbuff)-1);
else
(void) tablename_to_filename(table_name, tbbuff, sizeof(tbbuff));
@@ -538,8 +542,11 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
pos= strnmov(pos, FN_ROOTDIR, end - pos);
pos= strxnmov(pos, end - pos, dbbuff, FN_ROOTDIR, NullS);
#ifdef USE_SYMDIR
- unpack_dirname(buff, buff);
- pos= strend(buff);
+ if (!(flags & SKIP_SYMDIR_ACCESS))
+ {
+ unpack_dirname(buff, buff);
+ pos= strend(buff);
+ }
#endif
pos= strxnmov(pos, end - pos, tbbuff, ext, NullS);
@@ -548,22 +555,19 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
}
-/*
- Creates path to a file: mysql_tmpdir/#sql1234_12_1.ext
-
- SYNOPSIS
- build_tmptable_filename()
- thd The thread handle.
- buff Where to write result in my_charset_filename.
- bufflen buff size
+/**
+ Create path to a temporary table mysql_tmpdir/#sql1234_12_1
+ (i.e. to its .FRM file but without an extension).
- NOTES
+ @param thd The thread handle.
+ @param buff Where to write result in my_charset_filename.
+ @param bufflen buff size
+ @note
Uses current_pid, thread_id, and tmp_table counter to create
a file name in mysql_tmpdir.
- RETURN
- path length
+ @return Path length.
*/
uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
@@ -571,9 +575,9 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
DBUG_ENTER("build_tmptable_filename");
char *p= strnmov(buff, mysql_tmpdir, bufflen);
- my_snprintf(p, bufflen - (p - buff), "/%s%lx_%lx_%x%s",
+ my_snprintf(p, bufflen - (p - buff), "/%s%lx_%lx_%x",
tmp_file_prefix, current_pid,
- thd->thread_id, thd->tmp_table++, reg_ext);
+ thd->thread_id, thd->tmp_table++);
if (lower_case_table_names)
{
@@ -612,9 +616,15 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen)
--------------------------------------------------------------------------
*/
-
struct st_global_ddl_log
{
+ /*
+ We need to adjust buffer size to be able to handle downgrades/upgrades
+ where IO_SIZE has changed. We'll set the buffer size such that we can
+ handle that the buffer size was upto 4 times bigger in the version
+ that wrote the DDL log.
+ */
+ char file_entry_buf[4*IO_SIZE];
char file_name_str[FN_REFLEN];
char *file_name;
DDL_LOG_MEMORY_ENTRY *first_free;
@@ -642,31 +652,28 @@ mysql_mutex_t LOCK_gdl;
#define DDL_LOG_NUM_ENTRY_POS 0
#define DDL_LOG_NAME_LEN_POS 4
#define DDL_LOG_IO_SIZE_POS 8
-#define DDL_LOG_HEADER_SIZE 12
/**
Read one entry from ddl log file.
- @param[out] file_entry_buf Buffer to read into
- @param entry_no Entry number to read
- @param size Number of bytes of the entry to read
+
+ @param entry_no Entry number to read
@return Operation status
@retval true Error
@retval false Success
*/
-static bool read_ddl_log_file_entry(uchar *file_entry_buf,
- uint entry_no,
- uint size)
+static bool read_ddl_log_file_entry(uint entry_no)
{
bool error= FALSE;
File file_id= global_ddl_log.file_id;
+ uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf;
uint io_size= global_ddl_log.io_size;
DBUG_ENTER("read_ddl_log_file_entry");
- DBUG_ASSERT(io_size >= size);
- if (mysql_file_pread(file_id, file_entry_buf, size, io_size * entry_no,
- MYF(MY_WME)) != size)
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ if (mysql_file_pread(file_id, file_entry_buf, io_size, io_size * entry_no,
+ MYF(MY_WME)) != io_size)
error= TRUE;
DBUG_RETURN(error);
}
@@ -675,75 +682,77 @@ static bool read_ddl_log_file_entry(uchar *file_entry_buf,
/**
Write one entry to ddl log file.
- @param file_entry_buf Buffer to write
- @param entry_no Entry number to write
- @param size Number of bytes of the entry to write
+ @param entry_no Entry number to write
@return Operation status
@retval true Error
@retval false Success
*/
-static bool write_ddl_log_file_entry(uchar *file_entry_buf,
- uint entry_no,
- uint size)
+static bool write_ddl_log_file_entry(uint entry_no)
{
bool error= FALSE;
File file_id= global_ddl_log.file_id;
- uint io_size= global_ddl_log.io_size;
+ uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf;
DBUG_ENTER("write_ddl_log_file_entry");
- DBUG_ASSERT(io_size >= size);
- if (mysql_file_pwrite(file_id, file_entry_buf, size,
- io_size * entry_no, MYF(MY_WME)) != size)
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ if (mysql_file_pwrite(file_id, file_entry_buf,
+ IO_SIZE, IO_SIZE * entry_no, MYF(MY_WME)) != IO_SIZE)
error= TRUE;
DBUG_RETURN(error);
}
-/*
- Write ddl log header
- SYNOPSIS
- write_ddl_log_header()
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Sync the ddl log file.
+
+ @return Operation status
+ @retval FALSE Success
+ @retval TRUE Error
+*/
+
+
+static bool sync_ddl_log_file()
+{
+ DBUG_ENTER("sync_ddl_log_file");
+ DBUG_RETURN(mysql_file_sync(global_ddl_log.file_id, MYF(MY_WME)));
+}
+
+
+/**
+ Write ddl log header.
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
static bool write_ddl_log_header()
{
uint16 const_var;
- bool error= FALSE;
- uchar file_entry_buf[DDL_LOG_HEADER_SIZE];
DBUG_ENTER("write_ddl_log_header");
- DBUG_ASSERT((DDL_LOG_NAME_POS + 3 * global_ddl_log.name_len)
- <= global_ddl_log.io_size);
- int4store(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NUM_ENTRY_POS],
global_ddl_log.num_entries);
- const_var= global_ddl_log.name_len;
- int4store(&file_entry_buf[DDL_LOG_NAME_LEN_POS],
+ const_var= FN_REFLEN;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_LEN_POS],
(ulong) const_var);
- const_var= global_ddl_log.io_size;
- int4store(&file_entry_buf[DDL_LOG_IO_SIZE_POS],
+ const_var= IO_SIZE;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_IO_SIZE_POS],
(ulong) const_var);
- if (write_ddl_log_file_entry(file_entry_buf, 0UL, DDL_LOG_HEADER_SIZE))
+ if (write_ddl_log_file_entry(0UL))
{
sql_print_error("Error writing ddl log header");
DBUG_RETURN(TRUE);
}
- (void) sync_ddl_log();
- DBUG_RETURN(error);
+ DBUG_RETURN(sync_ddl_log_file());
}
-/*
- Create ddl log file name
- SYNOPSIS
- create_ddl_log_file_name()
- file_name Filename setup
- RETURN VALUES
- NONE
+/**
+ Create ddl log file name.
+ @param file_name Filename setup
*/
static inline void create_ddl_log_file_name(char *file_name)
@@ -752,35 +761,32 @@ static inline void create_ddl_log_file_name(char *file_name)
}
-/*
- Read header of ddl log file
- SYNOPSIS
- read_ddl_log_header()
- RETURN VALUES
- > 0 Last entry in ddl log
- 0 No entries in ddl log
- DESCRIPTION
- When we read the ddl log header we get information about maximum sizes
- of names in the ddl log and we also get information about the number
- of entries in the ddl log.
+/**
+ Read header of ddl log file.
+
+ When we read the ddl log header we get information about maximum sizes
+ of names in the ddl log and we also get information about the number
+ of entries in the ddl log.
+
+ @return Last entry in ddl log (0 if no entries)
*/
static uint read_ddl_log_header()
{
- char file_entry_buf[DDL_LOG_HEADER_SIZE];
+ uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf;
char file_name[FN_REFLEN];
uint entry_no;
bool successful_open= FALSE;
DBUG_ENTER("read_ddl_log_header");
- DBUG_ASSERT(global_ddl_log.io_size <= IO_SIZE);
+ mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_SLOW);
+ mysql_mutex_lock(&LOCK_gdl);
create_ddl_log_file_name(file_name);
if ((global_ddl_log.file_id= mysql_file_open(key_file_global_ddl_log,
file_name,
O_RDWR | O_BINARY, MYF(0))) >= 0)
{
- if (read_ddl_log_file_entry((uchar *) file_entry_buf, 0UL,
- DDL_LOG_HEADER_SIZE))
+ if (read_ddl_log_file_entry(0UL))
{
/* Write message into error log */
sql_print_error("Failed to read ddl log file in recovery");
@@ -793,6 +799,8 @@ static uint read_ddl_log_header()
entry_no= uint4korr(&file_entry_buf[DDL_LOG_NUM_ENTRY_POS]);
global_ddl_log.name_len= uint4korr(&file_entry_buf[DDL_LOG_NAME_LEN_POS]);
global_ddl_log.io_size= uint4korr(&file_entry_buf[DDL_LOG_IO_SIZE_POS]);
+ DBUG_ASSERT(global_ddl_log.io_size <=
+ sizeof(global_ddl_log.file_entry_buf));
}
else
{
@@ -801,28 +809,72 @@ static uint read_ddl_log_header()
global_ddl_log.first_free= NULL;
global_ddl_log.first_used= NULL;
global_ddl_log.num_entries= 0;
- mysql_mutex_init(key_LOCK_gdl, &LOCK_gdl, MY_MUTEX_INIT_FAST);
global_ddl_log.do_release= true;
+ mysql_mutex_unlock(&LOCK_gdl);
DBUG_RETURN(entry_no);
}
/**
- Set ddl log entry struct from buffer
- @param read_entry Entry number
- @param file_entry_buf Buffer to use
- @param ddl_log_entry Entry to be set
+ Convert from ddl_log_entry struct to file_entry_buf binary blob.
- @note Pointers in ddl_log_entry will point into file_entry_buf!
+ @param ddl_log_entry filled in ddl_log_entry struct.
*/
-static void set_ddl_log_entry_from_buf(uint read_entry,
- uchar *file_entry_buf,
- DDL_LOG_ENTRY *ddl_log_entry)
+static void set_global_from_ddl_log_entry(const DDL_LOG_ENTRY *ddl_log_entry)
{
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ global_ddl_log.file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
+ (char)DDL_LOG_ENTRY_CODE;
+ global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
+ (char)ddl_log_entry->action_type;
+ global_ddl_log.file_entry_buf[DDL_LOG_PHASE_POS]= 0;
+ int4store(&global_ddl_log.file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
+ ddl_log_entry->next_entry);
+ DBUG_ASSERT(strlen(ddl_log_entry->name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
+ ddl_log_entry->name, FN_REFLEN - 1);
+ if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
+ ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION ||
+ ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION)
+ {
+ DBUG_ASSERT(strlen(ddl_log_entry->from_name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN],
+ ddl_log_entry->from_name, FN_REFLEN - 1);
+ }
+ else
+ global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0;
+ DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (2*FN_REFLEN)],
+ ddl_log_entry->handler_name, FN_REFLEN - 1);
+ if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION)
+ {
+ DBUG_ASSERT(strlen(ddl_log_entry->tmp_name) < FN_REFLEN);
+ strmake(&global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)],
+ ddl_log_entry->tmp_name, FN_REFLEN - 1);
+ }
+ else
+ global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS + (3*FN_REFLEN)]= 0;
+}
+
+
+/**
+ Convert from file_entry_buf binary blob to ddl_log_entry struct.
+
+ @param[out] ddl_log_entry struct to fill in.
+
+ @note Strings (names) are pointing to the global_ddl_log structure,
+ so LOCK_gdl needs to be hold until they are read or copied.
+*/
+
+static void set_ddl_log_entry_from_global(DDL_LOG_ENTRY *ddl_log_entry,
+ const uint read_entry)
+{
+ char *file_entry_buf= (char*) global_ddl_log.file_entry_buf;
uint inx;
uchar single_char;
- DBUG_ENTER("set_ddl_log_entry_from_buf");
+
+ mysql_mutex_assert_owner(&LOCK_gdl);
ddl_log_entry->entry_pos= read_entry;
single_char= file_entry_buf[DDL_LOG_ENTRY_TYPE_POS];
ddl_log_entry->entry_type= (enum ddl_log_entry_code)single_char;
@@ -830,27 +882,56 @@ static void set_ddl_log_entry_from_buf(uint read_entry,
ddl_log_entry->action_type= (enum ddl_log_action_code)single_char;
ddl_log_entry->phase= file_entry_buf[DDL_LOG_PHASE_POS];
ddl_log_entry->next_entry= uint4korr(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS]);
- ddl_log_entry->name= (char*) &file_entry_buf[DDL_LOG_NAME_POS];
+ ddl_log_entry->name= &file_entry_buf[DDL_LOG_NAME_POS];
inx= DDL_LOG_NAME_POS + global_ddl_log.name_len;
- ddl_log_entry->from_name= (char*) &file_entry_buf[inx];
+ ddl_log_entry->from_name= &file_entry_buf[inx];
inx+= global_ddl_log.name_len;
- ddl_log_entry->handler_name= (char*) &file_entry_buf[inx];
- DBUG_VOID_RETURN;
+ ddl_log_entry->handler_name= &file_entry_buf[inx];
+ if (ddl_log_entry->action_type == DDL_LOG_EXCHANGE_ACTION)
+ {
+ inx+= global_ddl_log.name_len;
+ ddl_log_entry->tmp_name= &file_entry_buf[inx];
+ }
+ else
+ ddl_log_entry->tmp_name= NULL;
}
-
-/*
- Initialise ddl log
- SYNOPSIS
- init_ddl_log()
- DESCRIPTION
- Write the header of the ddl log file and length of names. Also set
- number of entries to zero.
+/**
+ Read a ddl log entry.
- RETURN VALUES
- TRUE Error
- FALSE Success
+ Read a specified entry in the ddl log.
+
+ @param read_entry Number of entry to read
+ @param[out] entry_info Information from entry
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
+*/
+
+static bool read_ddl_log_entry(uint read_entry, DDL_LOG_ENTRY *ddl_log_entry)
+{
+ DBUG_ENTER("read_ddl_log_entry");
+
+ if (read_ddl_log_file_entry(read_entry))
+ {
+ DBUG_RETURN(TRUE);
+ }
+ set_ddl_log_entry_from_global(ddl_log_entry, read_entry);
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
+ Initialise ddl log.
+
+ Write the header of the ddl log file and length of names. Also set
+ number of entries to zero.
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
static bool init_ddl_log()
@@ -862,7 +943,7 @@ static bool init_ddl_log()
goto end;
global_ddl_log.io_size= IO_SIZE;
- global_ddl_log.name_len= FN_LEN;
+ global_ddl_log.name_len= FN_REFLEN;
create_ddl_log_file_name(file_name);
if ((global_ddl_log.file_id= mysql_file_create(key_file_global_ddl_log,
file_name, CREATE_MODE,
@@ -886,14 +967,116 @@ end:
}
-/*
+/**
+ Sync ddl log file.
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
+*/
+
+static bool sync_ddl_log_no_lock()
+{
+ DBUG_ENTER("sync_ddl_log_no_lock");
+
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ if ((!global_ddl_log.recovery_phase) &&
+ init_ddl_log())
+ {
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(sync_ddl_log_file());
+}
+
+
+/**
+ @brief Deactivate an individual entry.
+
+ @details For complex rename operations we need to deactivate individual
+ entries.
+
+ During replace operations where we start with an existing table called
+ t1 and a replacement table called t1#temp or something else and where
+ we want to delete t1 and rename t1#temp to t1 this is not possible to
+ do in a safe manner unless the ddl log is informed of the phases in
+ the change.
+
+ Delete actions are 1-phase actions that can be ignored immediately after
+ being executed.
+ Rename actions from x to y is also a 1-phase action since there is no
+ interaction with any other handlers named x and y.
+ Replace action where drop y and x -> y happens needs to be a two-phase
+ action. Thus the first phase will drop y and the second phase will
+ rename x -> y.
+
+ @param entry_no Entry position of record to change
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
+*/
+
+static bool deactivate_ddl_log_entry_no_lock(uint entry_no)
+{
+ uchar *file_entry_buf= (uchar*)global_ddl_log.file_entry_buf;
+ DBUG_ENTER("deactivate_ddl_log_entry_no_lock");
+
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ if (!read_ddl_log_file_entry(entry_no))
+ {
+ if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
+ {
+ /*
+ Log entry, if complete mark it done (IGNORE).
+ Otherwise increase the phase by one.
+ */
+ if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION ||
+ file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION ||
+ (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION &&
+ file_entry_buf[DDL_LOG_PHASE_POS] == 1) ||
+ (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_EXCHANGE_ACTION &&
+ file_entry_buf[DDL_LOG_PHASE_POS] >= EXCH_PHASE_TEMP_TO_FROM))
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE;
+ else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION)
+ {
+ DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0);
+ file_entry_buf[DDL_LOG_PHASE_POS]= 1;
+ }
+ else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_EXCHANGE_ACTION)
+ {
+ DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] <=
+ EXCH_PHASE_FROM_TO_NAME);
+ file_entry_buf[DDL_LOG_PHASE_POS]++;
+ }
+ else
+ {
+ DBUG_ASSERT(0);
+ }
+ if (write_ddl_log_file_entry(entry_no))
+ {
+ sql_print_error("Error in deactivating log entry. Position = %u",
+ entry_no);
+ DBUG_RETURN(TRUE);
+ }
+ }
+ }
+ else
+ {
+ sql_print_error("Failed in reading entry before deactivating it");
+ DBUG_RETURN(TRUE);
+ }
+ DBUG_RETURN(FALSE);
+}
+
+
+/**
Execute one action in a ddl log entry
- SYNOPSIS
- execute_ddl_log_action()
- ddl_log_entry Information in action entry to execute
- RETURN VALUES
- TRUE Error
- FALSE Success
+
+ @param ddl_log_entry Information in action entry to execute
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
@@ -911,17 +1094,20 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
handlerton *hton;
DBUG_ENTER("execute_ddl_log_action");
+ mysql_mutex_assert_owner(&LOCK_gdl);
if (ddl_log_entry->entry_type == DDL_IGNORE_LOG_ENTRY_CODE)
{
DBUG_RETURN(FALSE);
}
DBUG_PRINT("ddl_log",
- ("execute type %c next %u name '%s' from_name '%s' handler '%s'",
+ ("execute type %c next %u name '%s' from_name '%s' handler '%s'"
+ " tmp_name '%s'",
ddl_log_entry->action_type,
ddl_log_entry->next_entry,
ddl_log_entry->name,
ddl_log_entry->from_name,
- ddl_log_entry->handler_name));
+ ddl_log_entry->handler_name,
+ ddl_log_entry->tmp_name));
handler_name.str= (char*)ddl_log_entry->handler_name;
handler_name.length= strlen(ddl_log_entry->handler_name);
init_sql_alloc(&mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(MY_THREAD_SPECIFIC));
@@ -935,7 +1121,7 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), ddl_log_entry->handler_name);
goto error;
}
- hton= plugin_hton(plugin);
+ hton= plugin_data(plugin, handlerton*);
file= get_new_handler((TABLE_SHARE*)0, &mem_root, hton);
if (!file)
{
@@ -971,9 +1157,9 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
break;
}
}
- if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
+ if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos)))
break;
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
error= FALSE;
if (ddl_log_entry->action_type == DDL_LOG_DELETE_ACTION)
break;
@@ -1006,12 +1192,64 @@ static int execute_ddl_log_action(THD *thd, DDL_LOG_ENTRY *ddl_log_entry)
ddl_log_entry->name))
break;
}
- if ((deactivate_ddl_log_entry(ddl_log_entry->entry_pos)))
+ if ((deactivate_ddl_log_entry_no_lock(ddl_log_entry->entry_pos)))
break;
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
error= FALSE;
break;
}
+ case DDL_LOG_EXCHANGE_ACTION:
+ {
+ /* We hold LOCK_gdl, so we can alter global_ddl_log.file_entry_buf */
+ char *file_entry_buf= (char*)&global_ddl_log.file_entry_buf;
+ /* not yet implemented for frm */
+ DBUG_ASSERT(!frm_action);
+ /*
+ Using a case-switch here to revert all currently done phases,
+ since it will fall through until the first phase is undone.
+ */
+ switch (ddl_log_entry->phase) {
+ case EXCH_PHASE_TEMP_TO_FROM:
+ /* tmp_name -> from_name possibly done */
+ (void) file->ha_rename_table(ddl_log_entry->from_name,
+ ddl_log_entry->tmp_name);
+ /* decrease the phase and sync */
+ file_entry_buf[DDL_LOG_PHASE_POS]--;
+ if (write_ddl_log_file_entry(ddl_log_entry->entry_pos))
+ break;
+ if (sync_ddl_log_no_lock())
+ break;
+ /* fall through */
+ case EXCH_PHASE_FROM_TO_NAME:
+ /* from_name -> name possibly done */
+ (void) file->ha_rename_table(ddl_log_entry->name,
+ ddl_log_entry->from_name);
+ /* decrease the phase and sync */
+ file_entry_buf[DDL_LOG_PHASE_POS]--;
+ if (write_ddl_log_file_entry(ddl_log_entry->entry_pos))
+ break;
+ if (sync_ddl_log_no_lock())
+ break;
+ /* fall through */
+ case EXCH_PHASE_NAME_TO_TEMP:
+ /* name -> tmp_name possibly done */
+ (void) file->ha_rename_table(ddl_log_entry->tmp_name,
+ ddl_log_entry->name);
+ /* disable the entry and sync */
+ file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE;
+ if (write_ddl_log_file_entry(ddl_log_entry->entry_pos))
+ break;
+ if (sync_ddl_log_no_lock())
+ break;
+ error= FALSE;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ break;
+ }
+
+ break;
+ }
default:
DBUG_ASSERT(0);
break;
@@ -1023,14 +1261,14 @@ error:
}
-/*
+/**
Get a free entry in the ddl log
- SYNOPSIS
- get_free_ddl_log_entry()
- out:active_entry A ddl log memory entry returned
- RETURN VALUES
- TRUE Error
- FALSE Success
+
+ @param[out] active_entry A ddl log memory entry returned
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
@@ -1040,7 +1278,6 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
DDL_LOG_MEMORY_ENTRY *first_used= global_ddl_log.first_used;
DBUG_ENTER("get_free_ddl_log_entry");
- mysql_mutex_assert_owner(&LOCK_gdl);
if (global_ddl_log.first_free == NULL)
{
if (!(used_entry= (DDL_LOG_MEMORY_ENTRY*)my_malloc(
@@ -1074,76 +1311,99 @@ static bool get_free_ddl_log_entry(DDL_LOG_MEMORY_ENTRY **active_entry,
}
+/**
+ Execute one entry in the ddl log.
+
+ Executing an entry means executing a linked list of actions.
+
+ @param first_entry Reference to first action in entry
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
+*/
+
+static bool execute_ddl_log_entry_no_lock(THD *thd, uint first_entry)
+{
+ DDL_LOG_ENTRY ddl_log_entry;
+ uint read_entry= first_entry;
+ DBUG_ENTER("execute_ddl_log_entry_no_lock");
+
+ mysql_mutex_assert_owner(&LOCK_gdl);
+ do
+ {
+ if (read_ddl_log_entry(read_entry, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to read entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
+ ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
+
+ if (execute_ddl_log_action(thd, &ddl_log_entry))
+ {
+ /* Write to error log and continue with next log entry */
+ sql_print_error("Failed to execute action for entry = %u from ddl log",
+ read_entry);
+ break;
+ }
+ read_entry= ddl_log_entry.next_entry;
+ } while (read_entry);
+ DBUG_RETURN(FALSE);
+}
+
+
/*
External interface methods for the DDL log Module
---------------------------------------------------
*/
-/*
- SYNOPSIS
- write_ddl_log_entry()
- ddl_log_entry Information about log entry
- out:entry_written Entry information written into
+/**
+ Write a ddl log entry.
- RETURN VALUES
- TRUE Error
- FALSE Success
+ A careful write of the ddl log is performed to ensure that we can
+ handle crashes occurring during CREATE and ALTER TABLE processing.
- DESCRIPTION
- A careful write of the ddl log is performed to ensure that we can
- handle crashes occurring during CREATE and ALTER TABLE processing.
+ @param ddl_log_entry Information about log entry
+ @param[out] entry_written Entry information written into
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
DDL_LOG_MEMORY_ENTRY **active_entry)
{
bool error, write_header;
- char file_entry_buf[IO_SIZE];
DBUG_ENTER("write_ddl_log_entry");
+ mysql_mutex_assert_owner(&LOCK_gdl);
if (init_ddl_log())
{
DBUG_RETURN(TRUE);
}
- memset(file_entry_buf, 0, sizeof(file_entry_buf));
- file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]=
- (char)DDL_LOG_ENTRY_CODE;
- file_entry_buf[DDL_LOG_ACTION_TYPE_POS]=
- (char)ddl_log_entry->action_type;
- file_entry_buf[DDL_LOG_PHASE_POS]= 0;
- int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS],
- ddl_log_entry->next_entry);
- DBUG_ASSERT(strlen(ddl_log_entry->name) < global_ddl_log.name_len);
- strmake(&file_entry_buf[DDL_LOG_NAME_POS], ddl_log_entry->name,
- global_ddl_log.name_len - 1);
- if (ddl_log_entry->action_type == DDL_LOG_RENAME_ACTION ||
- ddl_log_entry->action_type == DDL_LOG_REPLACE_ACTION)
- {
- DBUG_ASSERT(strlen(ddl_log_entry->from_name) < global_ddl_log.name_len);
- strmake(&file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len],
- ddl_log_entry->from_name, global_ddl_log.name_len - 1);
- }
- else
- file_entry_buf[DDL_LOG_NAME_POS + global_ddl_log.name_len]= 0;
- DBUG_ASSERT(strlen(ddl_log_entry->handler_name) < global_ddl_log.name_len);
- strmake(&file_entry_buf[DDL_LOG_NAME_POS + (2*global_ddl_log.name_len)],
- ddl_log_entry->handler_name, global_ddl_log.name_len - 1);
+ set_global_from_ddl_log_entry(ddl_log_entry);
if (get_free_ddl_log_entry(active_entry, &write_header))
{
DBUG_RETURN(TRUE);
}
error= FALSE;
DBUG_PRINT("ddl_log",
- ("write type %c next %u name '%s' from_name '%s' handler '%s'",
- (char) file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
+ ("write type %c next %u name '%s' from_name '%s' handler '%s'"
+ " tmp_name '%s'",
+ (char) global_ddl_log.file_entry_buf[DDL_LOG_ACTION_TYPE_POS],
ddl_log_entry->next_entry,
- (char*) &file_entry_buf[DDL_LOG_NAME_POS],
- (char*) &file_entry_buf[DDL_LOG_NAME_POS +
- global_ddl_log.name_len],
- (char*) &file_entry_buf[DDL_LOG_NAME_POS +
- (2*global_ddl_log.name_len)]));
- if (write_ddl_log_file_entry((uchar*) file_entry_buf,
- (*active_entry)->entry_pos, IO_SIZE))
+ (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS],
+ (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
+ + FN_REFLEN],
+ (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
+ + (2*FN_REFLEN)],
+ (char*) &global_ddl_log.file_entry_buf[DDL_LOG_NAME_POS
+ + (3*FN_REFLEN)]));
+ if (write_ddl_log_file_entry((*active_entry)->entry_pos))
{
error= TRUE;
sql_print_error("Failed to write entry_no = %u",
@@ -1151,7 +1411,7 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
}
if (write_header && !error)
{
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
if (write_ddl_log_header())
error= TRUE;
}
@@ -1161,31 +1421,30 @@ bool write_ddl_log_entry(DDL_LOG_ENTRY *ddl_log_entry,
}
-/*
- Write final entry in the ddl log
- SYNOPSIS
- write_execute_ddl_log_entry()
- first_entry First entry in linked list of entries
+/**
+ @brief Write final entry in the ddl log.
+
+ @details This is the last write in the ddl log. The previous log entries
+ have already been written but not yet synched to disk.
+ We write a couple of log entries that describes action to perform.
+ This entries are set-up in a linked list, however only when a first
+ execute entry is put as the first entry these will be executed.
+ This routine writes this first.
+
+ @param first_entry First entry in linked list of entries
to execute, if 0 = NULL it means that
the entry is removed and the entries
are put into the free list.
- complete Flag indicating we are simply writing
+ @param complete Flag indicating we are simply writing
info about that entry has been completed
- in:out:active_entry Entry to execute, 0 = NULL if the entry
+ @param[in,out] active_entry Entry to execute, 0 = NULL if the entry
is written first time and needs to be
returned. In this case the entry written
is returned in this parameter
- RETURN VALUES
- TRUE Error
- FALSE Success
- DESCRIPTION
- This is the last write in the ddl log. The previous log entries have
- already been written but not yet synched to disk.
- We write a couple of log entries that describes action to perform.
- This entries are set-up in a linked list, however only when a first
- execute entry is put as the first entry these will be executed.
- This routine writes this first
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool write_execute_ddl_log_entry(uint first_entry,
@@ -1193,14 +1452,14 @@ bool write_execute_ddl_log_entry(uint first_entry,
DDL_LOG_MEMORY_ENTRY **active_entry)
{
bool write_header= FALSE;
- char file_entry_buf[IO_SIZE];
+ char *file_entry_buf= (char*)global_ddl_log.file_entry_buf;
DBUG_ENTER("write_execute_ddl_log_entry");
+ mysql_mutex_assert_owner(&LOCK_gdl);
if (init_ddl_log())
{
DBUG_RETURN(TRUE);
}
- memset(file_entry_buf, 0, sizeof(file_entry_buf));
if (!complete)
{
/*
@@ -1209,28 +1468,32 @@ bool write_execute_ddl_log_entry(uint first_entry,
any log entries before, we are only here to write the execute
entry to indicate it is done.
*/
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_LOG_EXECUTE_CODE;
}
else
file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= (char)DDL_IGNORE_LOG_ENTRY_CODE;
+ file_entry_buf[DDL_LOG_ACTION_TYPE_POS]= 0; /* Ignored for execute entries */
+ file_entry_buf[DDL_LOG_PHASE_POS]= 0;
int4store(&file_entry_buf[DDL_LOG_NEXT_ENTRY_POS], first_entry);
+ file_entry_buf[DDL_LOG_NAME_POS]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + FN_REFLEN]= 0;
+ file_entry_buf[DDL_LOG_NAME_POS + 2*FN_REFLEN]= 0;
if (!(*active_entry))
{
if (get_free_ddl_log_entry(active_entry, &write_header))
{
DBUG_RETURN(TRUE);
}
+ write_header= TRUE;
}
- if (write_ddl_log_file_entry((uchar*) file_entry_buf,
- (*active_entry)->entry_pos,
- IO_SIZE))
+ if (write_ddl_log_file_entry((*active_entry)->entry_pos))
{
sql_print_error("Error writing execute entry in ddl log");
release_ddl_log_memory_entry(*active_entry);
DBUG_RETURN(TRUE);
}
- (void) sync_ddl_log();
+ (void) sync_ddl_log_no_lock();
if (write_header)
{
if (write_ddl_log_header())
@@ -1243,112 +1506,54 @@ bool write_execute_ddl_log_entry(uint first_entry,
}
-/*
- For complex rename operations we need to deactivate individual entries.
- SYNOPSIS
- deactivate_ddl_log_entry()
- entry_no Entry position of record to change
- RETURN VALUES
- TRUE Error
- FALSE Success
- DESCRIPTION
- During replace operations where we start with an existing table called
- t1 and a replacement table called t1#temp or something else and where
- we want to delete t1 and rename t1#temp to t1 this is not possible to
- do in a safe manner unless the ddl log is informed of the phases in
- the change.
-
- Delete actions are 1-phase actions that can be ignored immediately after
- being executed.
- Rename actions from x to y is also a 1-phase action since there is no
- interaction with any other handlers named x and y.
- Replace action where drop y and x -> y happens needs to be a two-phase
- action. Thus the first phase will drop y and the second phase will
- rename x -> y.
+/**
+ Deactivate an individual entry.
+
+ @details see deactivate_ddl_log_entry_no_lock.
+
+ @param entry_no Entry position of record to change
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool deactivate_ddl_log_entry(uint entry_no)
{
- uchar file_entry_buf[DDL_LOG_NAME_POS];
+ bool error;
DBUG_ENTER("deactivate_ddl_log_entry");
-
- /*
- Only need to read and write the first bytes of the entry, where
- ENTRY_TYPE, ACTION_TYPE and PHASE reside. Using DDL_LOG_NAME_POS
- to include all info except for the names.
- */
- if (!read_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS))
- {
- if (file_entry_buf[DDL_LOG_ENTRY_TYPE_POS] == DDL_LOG_ENTRY_CODE)
- {
- if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_DELETE_ACTION ||
- file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_RENAME_ACTION ||
- (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION &&
- file_entry_buf[DDL_LOG_PHASE_POS] == 1))
- file_entry_buf[DDL_LOG_ENTRY_TYPE_POS]= DDL_IGNORE_LOG_ENTRY_CODE;
- else if (file_entry_buf[DDL_LOG_ACTION_TYPE_POS] == DDL_LOG_REPLACE_ACTION)
- {
- DBUG_ASSERT(file_entry_buf[DDL_LOG_PHASE_POS] == 0);
- file_entry_buf[DDL_LOG_PHASE_POS]= 1;
- }
- else
- {
- DBUG_ASSERT(0);
- }
- if (write_ddl_log_file_entry(file_entry_buf, entry_no, DDL_LOG_NAME_POS))
- {
- sql_print_error("Error in deactivating log entry. Position = %u",
- entry_no);
- DBUG_RETURN(TRUE);
- }
- }
- }
- else
- {
- sql_print_error("Failed in reading entry before deactivating it");
- DBUG_RETURN(TRUE);
- }
- DBUG_RETURN(FALSE);
+ mysql_mutex_lock(&LOCK_gdl);
+ error= deactivate_ddl_log_entry_no_lock(entry_no);
+ mysql_mutex_unlock(&LOCK_gdl);
+ DBUG_RETURN(error);
}
-/*
- Sync ddl log file
- SYNOPSIS
- sync_ddl_log()
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Sync ddl log file.
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool sync_ddl_log()
{
- bool error= FALSE;
+ bool error;
DBUG_ENTER("sync_ddl_log");
- if ((!global_ddl_log.recovery_phase) &&
- init_ddl_log())
- {
- DBUG_RETURN(TRUE);
- }
- if (mysql_file_sync(global_ddl_log.file_id, MYF(0)))
- {
- /* Write to error log */
- sql_print_error("Failed to sync ddl log");
- error= TRUE;
- }
+ mysql_mutex_lock(&LOCK_gdl);
+ error= sync_ddl_log_no_lock();
+ mysql_mutex_unlock(&LOCK_gdl);
+
DBUG_RETURN(error);
}
-/*
- Release a log memory entry
- SYNOPSIS
- release_ddl_log_memory_entry()
- log_memory_entry Log memory entry to release
- RETURN VALUES
- NONE
+/**
+ Release a log memory entry.
+ @param log_memory_entry Log memory entry to release
*/
void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
@@ -1357,8 +1562,8 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
DDL_LOG_MEMORY_ENTRY *next_log_entry= log_entry->next_log_entry;
DDL_LOG_MEMORY_ENTRY *prev_log_entry= log_entry->prev_log_entry;
DBUG_ENTER("release_ddl_log_memory_entry");
- mysql_mutex_assert_owner(&LOCK_gdl);
+ mysql_mutex_assert_owner(&LOCK_gdl);
global_ddl_log.first_free= log_entry;
log_entry->next_log_entry= first_free;
@@ -1372,58 +1577,32 @@ void release_ddl_log_memory_entry(DDL_LOG_MEMORY_ENTRY *log_entry)
}
-/*
- Execute one entry in the ddl log. Executing an entry means executing
- a linked list of actions.
- SYNOPSIS
- execute_ddl_log_entry()
- first_entry Reference to first action in entry
- RETURN VALUES
- TRUE Error
- FALSE Success
+/**
+ Execute one entry in the ddl log.
+
+ Executing an entry means executing a linked list of actions.
+
+ @param first_entry Reference to first action in entry
+
+ @return Operation status
+ @retval TRUE Error
+ @retval FALSE Success
*/
bool execute_ddl_log_entry(THD *thd, uint first_entry)
{
- DDL_LOG_ENTRY ddl_log_entry;
- uint read_entry= first_entry;
- uchar file_entry_buf[IO_SIZE];
+ bool error;
DBUG_ENTER("execute_ddl_log_entry");
mysql_mutex_lock(&LOCK_gdl);
- do
- {
- if (read_ddl_log_file_entry(file_entry_buf, read_entry, IO_SIZE))
- {
- /* Print the error to the log and continue with next log entry */
- sql_print_error("Failed to read entry = %u from ddl log",
- read_entry);
- break;
- }
- set_ddl_log_entry_from_buf(read_entry, file_entry_buf, &ddl_log_entry);
- DBUG_ASSERT(ddl_log_entry.entry_type == DDL_LOG_ENTRY_CODE ||
- ddl_log_entry.entry_type == DDL_IGNORE_LOG_ENTRY_CODE);
-
- if (execute_ddl_log_action(thd, &ddl_log_entry))
- {
- /* Print the error to the log and continue with next log entry */
- sql_print_error("Failed to execute action for entry = %u from ddl log",
- read_entry);
- break;
- }
- read_entry= ddl_log_entry.next_entry;
- } while (read_entry);
+ error= execute_ddl_log_entry_no_lock(thd, first_entry);
mysql_mutex_unlock(&LOCK_gdl);
- DBUG_RETURN(FALSE);
+ DBUG_RETURN(error);
}
-/*
- Close the ddl log
- SYNOPSIS
- close_ddl_log()
- RETURN VALUES
- NONE
+/**
+ Close the ddl log.
*/
static void close_ddl_log()
@@ -1438,12 +1617,8 @@ static void close_ddl_log()
}
-/*
- Execute the ddl log at recovery of MySQL Server
- SYNOPSIS
- execute_ddl_log_recovery()
- RETURN VALUES
- NONE
+/**
+ Execute the ddl log at recovery of MySQL Server.
*/
void execute_ddl_log_recovery()
@@ -1451,8 +1626,6 @@ void execute_ddl_log_recovery()
uint num_entries, i;
THD *thd;
DDL_LOG_ENTRY ddl_log_entry;
- uchar *file_entry_buf;
- uint io_size;
char file_name[FN_REFLEN];
static char recover_query_string[]= "INTERNAL DDL LOG RECOVER IN PROGRESS";
DBUG_ENTER("execute_ddl_log_recovery");
@@ -1460,6 +1633,7 @@ void execute_ddl_log_recovery()
/*
Initialise global_ddl_log struct
*/
+ bzero(global_ddl_log.file_entry_buf, sizeof(global_ddl_log.file_entry_buf));
global_ddl_log.inited= FALSE;
global_ddl_log.recovery_phase= TRUE;
global_ddl_log.io_size= IO_SIZE;
@@ -1477,26 +1651,18 @@ void execute_ddl_log_recovery()
/* this also initialize LOCK_gdl */
num_entries= read_ddl_log_header();
- io_size= global_ddl_log.io_size;
- file_entry_buf= (uchar*) my_malloc(io_size, MYF(0));
- if (!file_entry_buf)
- {
- sql_print_error("Failed to allocate buffer for recover ddl log");
- DBUG_VOID_RETURN;
- }
+ mysql_mutex_lock(&LOCK_gdl);
for (i= 1; i < num_entries + 1; i++)
{
- if (read_ddl_log_file_entry(file_entry_buf, i, io_size))
+ if (read_ddl_log_entry(i, &ddl_log_entry))
{
sql_print_error("Failed to read entry no = %u from ddl log",
i);
continue;
}
-
- set_ddl_log_entry_from_buf(i, file_entry_buf, &ddl_log_entry);
if (ddl_log_entry.entry_type == DDL_LOG_EXECUTE_CODE)
{
- if (execute_ddl_log_entry(thd, ddl_log_entry.next_entry))
+ if (execute_ddl_log_entry_no_lock(thd, ddl_log_entry.next_entry))
{
/* Real unpleasant scenario but we continue anyways. */
continue;
@@ -1507,20 +1673,16 @@ void execute_ddl_log_recovery()
create_ddl_log_file_name(file_name);
(void) mysql_file_delete(key_file_global_ddl_log, file_name, MYF(0));
global_ddl_log.recovery_phase= FALSE;
+ mysql_mutex_unlock(&LOCK_gdl);
delete thd;
- my_free(file_entry_buf);
/* Remember that we don't have a THD */
set_current_thd(0);
DBUG_VOID_RETURN;
}
-/*
- Release all memory allocated to the ddl log
- SYNOPSIS
- release_ddl_log()
- RETURN VALUES
- NONE
+/**
+ Release all memory allocated to the ddl log.
*/
void release_ddl_log()
@@ -1659,8 +1821,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
&syntax_len,
TRUE, TRUE,
lpt->create_info,
- lpt->alter_info,
- NULL)))
+ lpt->alter_info)))
{
DBUG_RETURN(TRUE);
}
@@ -1763,8 +1924,7 @@ bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags)
&syntax_len,
TRUE, TRUE,
lpt->create_info,
- lpt->alter_info,
- NULL)))
+ lpt->alter_info)))
{
error= 1;
goto err;
@@ -1896,22 +2056,20 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
(void) delete_statistics_for_table(thd, &db_name, &table_name);
}
}
-
- mysql_ha_rm_tables(thd, tables);
if (!drop_temporary)
{
if (!thd->locked_tables_mode)
{
- if (lock_table_names(thd, tables, NULL, thd->variables.lock_wait_timeout,
- MYSQL_OPEN_SKIP_TEMPORARY))
+ if (lock_table_names(thd, tables, NULL,
+ thd->variables.lock_wait_timeout, 0))
DBUG_RETURN(true);
}
else
{
for (table= tables; table; table= table->next_local)
- if (table->open_type != OT_BASE_ONLY &&
- find_temporary_table(thd, table))
+ {
+ if (is_temporary_table(table))
{
/*
A temporary table.
@@ -1943,6 +2101,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
DBUG_RETURN(true);
table->mdl_request.ticket= table->table->mdl_ticket;
}
+ }
}
}
@@ -2022,6 +2181,9 @@ static uint32 comment_length(THD *thd, uint32 comment_pos,
@note This function assumes that metadata locks have already been taken.
It is also assumed that the tables have been removed from TDC.
+ @note This function assumes that temporary tables to be dropped have
+ been pre-opened using corresponding table list elements.
+
@todo When logging to the binary log, we should log
tmp_tables and transactional tables as separate statements if we
are in a transaction; This is needed to get these tables into the
@@ -2036,9 +2198,10 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
bool dont_log_query)
{
TABLE_LIST *table;
- char path[FN_REFLEN + 1], *alias= NULL;
+ char path[FN_REFLEN + 1], wrong_tables_buff[160], *alias= NULL;
+ String wrong_tables(wrong_tables_buff, sizeof(wrong_tables_buff)-1,
+ system_charset_info);
uint path_length= 0;
- String wrong_tables;
int error= 0;
int non_temp_tables_count= 0;
bool foreign_key_error=0;
@@ -2049,6 +2212,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
String built_trans_tmp_query, built_non_trans_tmp_query;
DBUG_ENTER("mysql_rm_table_no_locks");
+ wrong_tables.length(0);
/*
Prepares the drop statements that will be written into the binary
log as follows:
@@ -2247,9 +2411,17 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
. ./sql/datadict.cc +32 /Alfranio - TODO: We need to test this.
*/
if (if_exists)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR),
- table->table_name);
+ {
+ char buff[FN_REFLEN];
+ String tbl_name(buff, sizeof(buff), system_charset_info);
+ tbl_name.length(0);
+ tbl_name.append(db);
+ tbl_name.append('.');
+ tbl_name.append(table->table_name);
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR),
+ tbl_name.c_ptr_safe());
+ }
else
{
non_tmp_error = (drop_temporary ? non_tmp_error : TRUE);
@@ -2282,7 +2454,7 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
}
/* the following internally does TDC_RT_REMOVE_ALL */
close_all_tables_for_name(thd, table->table->s,
- HA_EXTRA_PREPARE_FOR_DROP);
+ HA_EXTRA_PREPARE_FOR_DROP, NULL);
table->table= 0;
}
else
@@ -2335,7 +2507,9 @@ int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
{
if (wrong_tables.length())
wrong_tables.append(',');
- wrong_tables.append(String(table->table_name,system_charset_info));
+ wrong_tables.append(db);
+ wrong_tables.append('.');
+ wrong_tables.append(table->table_name);
}
else
{
@@ -2451,22 +2625,20 @@ end:
}
-/*
+/**
Quickly remove a table.
- SYNOPSIS
- quick_rm_table()
- base The handlerton handle.
- db The database name.
- table_name The table name.
- flags flags for build_table_filename().
+ @param thd Thread context.
+ @param base The handlerton handle.
+ @param db The database name.
+ @param table_name The table name.
+ @param flags Flags for build_table_filename() as well as describing
+ if handler files / .FRM should be deleted as well.
- RETURN
- 0 OK
- != 0 Error
+ @return False in case of success, True otherwise.
*/
-bool quick_rm_table(handlerton *base,const char *db,
+bool quick_rm_table(THD *thd, handlerton *base, const char *db,
const char *table_name, uint flags)
{
char path[FN_REFLEN + 1];
@@ -2478,7 +2650,15 @@ bool quick_rm_table(handlerton *base,const char *db,
if (mysql_file_delete(key_file_frm, path, MYF(0)))
error= 1; /* purecov: inspected */
path[path_length - reg_ext_length]= '\0'; // Remove reg_ext
- if (!(flags & FRM_ONLY))
+ if (flags & NO_HA_TABLE)
+ {
+ handler *file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, base);
+ if (!file)
+ DBUG_RETURN(true);
+ (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
+ delete file;
+ }
+ if (!(flags & (FRM_ONLY|NO_HA_TABLE)))
error|= ha_delete_table(current_thd, base, path, db, table_name, 0);
if (likely(error == 0))
@@ -2490,6 +2670,7 @@ bool quick_rm_table(handlerton *base,const char *db,
DBUG_RETURN(error);
}
+
/*
Sort keys in the following order:
- PRIMARY KEY
@@ -2583,7 +2764,7 @@ bool check_duplicates_in_interval(const char *set_or_name,
name, err.ptr(), set_or_name);
return 1;
}
- push_warning_printf(thd,MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd,Sql_condition::WARN_LEVEL_NOTE,
ER_DUPLICATED_VALUE_IN_TYPE,
ER(ER_DUPLICATED_VALUE_IN_TYPE),
name, err.ptr(), set_or_name);
@@ -2651,7 +2832,7 @@ int prepare_create_field(Create_field *sql_field,
longlong table_flags)
{
unsigned int dup_val_count;
- DBUG_ENTER("prepare_field");
+ DBUG_ENTER("prepare_create_field");
/*
This code came from mysql_prepare_create_table.
@@ -2828,21 +3009,6 @@ CHARSET_INFO* get_sql_field_charset(Create_field *sql_field,
}
-bool check_duplicate_warning(THD *thd, char *msg, ulong length)
-{
- List_iterator_fast<MYSQL_ERROR> it(thd->warning_info->warn_list());
- MYSQL_ERROR *err;
- while ((err= it++))
- {
- if (strncmp(msg, err->get_message_text(), length) == 0)
- {
- return true;
- }
- }
- return false;
-}
-
-
/**
Modifies the first column definition whose SQL type is TIMESTAMP
by adding the features DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP.
@@ -2939,6 +3105,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
executing a prepared statement for the second time.
*/
sql_field->length= sql_field->char_length;
+ /* Set field charset. */
save_cs= sql_field->charset= get_sql_field_charset(sql_field,
create_info);
if ((sql_field->flags & BINCMP_FLAG) &&
@@ -3405,7 +3572,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (key->generated)
key_info->flags|= HA_GENERATED_KEY;
- key_info->key_parts=(uint8) key->columns.elements;
+ key_info->user_defined_key_parts=(uint8) key->columns.elements;
key_info->key_part=key_part_info;
key_info->usable_key_parts= key_number;
key_info->algorithm= key->key_create_info.algorithm;
@@ -3440,7 +3607,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
my_error(ER_TABLE_CANT_HANDLE_SPKEYS, MYF(0), file->table_type());
DBUG_RETURN(TRUE);
}
- if (key_info->key_parts != 1)
+ if (key_info->user_defined_key_parts != 1)
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "SPATIAL INDEX");
DBUG_RETURN(TRUE);
@@ -3449,7 +3616,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
else if (key_info->algorithm == HA_KEY_ALG_RTREE)
{
#ifdef HAVE_RTREE_KEYS
- if ((key_info->key_parts & 1) == 1)
+ if ((key_info->user_defined_key_parts & 1) == 1)
{
my_error(ER_WRONG_ARGUMENTS, MYF(0), "RTREE INDEX");
DBUG_RETURN(TRUE);
@@ -3633,14 +3800,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if ((length=column->length) > max_key_length ||
length > file->max_key_part_length())
{
- length=min(max_key_length, file->max_key_part_length());
+ length=MY_MIN(max_key_length, file->max_key_part_length());
if (key->type == Key::MULTIPLE)
{
/* not a critical problem */
char warn_buff[MYSQL_ERRMSG_SIZE];
my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY),
length);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TOO_LONG_KEY, warn_buff);
/* Align key length to multibyte char boundary */
length-= length % sql_field->charset->mbmaxlen;
@@ -3688,7 +3855,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
char warn_buff[MYSQL_ERRMSG_SIZE];
my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_KEY),
length);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TOO_LONG_KEY, warn_buff);
/* Align key length to multibyte char boundary */
length-= length % sql_field->charset->mbmaxlen;
@@ -3778,8 +3945,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_INDEX_COMMENT),
key_info->name, static_cast<ulong>(INDEX_COMMENT_MAXLEN));
/* do not push duplicate warnings */
- if (!check_duplicate_warning(thd, warn_buff, strlen(warn_buff)))
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ if (!thd->get_stmt_da()->has_sql_condition(warn_buff, strlen(warn_buff)))
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TOO_LONG_INDEX_COMMENT, warn_buff);
key->key_create_info.comment.length= tmp_len;
@@ -3842,7 +4009,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
}
if (create_info->tmp_table())
- create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE;
+ create_info->options|=HA_CREATE_DELAY_KEY_WRITE;
/* Give warnings for not supported table options */
#if defined(WITH_ARIA_STORAGE_ENGINE)
@@ -3850,7 +4017,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
if (file->ht != maria_hton)
#endif
if (create_info->transactional)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
file->engine_name()->str,
@@ -3933,7 +4100,7 @@ static bool prepare_blob_field(THD *thd, Create_field *sql_field)
my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_AUTO_CONVERT), sql_field->field_name,
(sql_field->charset == &my_charset_bin) ? "VARBINARY" : "VARCHAR",
(sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT");
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_AUTO_CONVERT,
warn_buff);
}
@@ -4004,56 +4171,15 @@ void sp_prepare_create_field(THD *thd, Create_field *sql_field)
}
-#ifdef WITH_PARTITION_STORAGE_ENGINE
-/**
- Auxiliary function which allows to check if freshly created .FRM
- file for table can be opened.
-
- @retval FALSE - Success.
- @retval TRUE - Failure.
-*/
-
-static bool check_if_created_table_can_be_opened(THD *thd,
- const char *path,
- const char *db,
- const char *table_name,
- HA_CREATE_INFO *create_info,
- handler *file)
-{
- TABLE table;
- TABLE_SHARE share;
- bool result;
-
- /*
- It is impossible to open definition of partitioned table without .par file.
- */
- if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG))
- return TRUE;
-
- init_tmp_table_share(thd, &share, db, 0, table_name, path);
- share.db_plugin= ha_lock_engine(thd, file->ht);
-
- result= (open_table_def(thd, &share) ||
- open_table_from_share(thd, &share, "", 0, (uint) READ_ALL,
- 0, &table, TRUE));
- if (! result)
- (void) closefrm(&table, 0);
-
- free_table_share(&share);
- (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
- return result;
-}
-#endif
-
-
handler *mysql_create_frm_image(THD *thd,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
Alter_info *alter_info, int create_table_mode,
+ KEY **key_info,
+ uint *key_count,
LEX_CUSTRING *frm)
{
- uint db_options, key_count;
- KEY *key_info_buffer;
+ uint db_options;
handler *file;
DBUG_ENTER("mysql_create_frm_image");
@@ -4073,6 +4199,7 @@ handler *mysql_create_frm_image(THD *thd,
create_info->row_type != ROW_TYPE_FIXED &&
create_info->row_type != ROW_TYPE_DEFAULT)
db_options|= HA_OPTION_PACK_RECORD;
+
if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root,
create_info->db_type)))
{
@@ -4109,12 +4236,7 @@ handler *mysql_create_frm_image(THD *thd,
partitions also in the call to check_partition_info. We transport
this information in the default_db_type variable, it is either
DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command.
-
- Check that we don't use foreign keys in the table since it won't
- work even with InnoDB beneath it.
*/
- List_iterator<Key> key_iterator(alter_info->key_list);
- Key *key;
handlerton *part_engine_type= create_info->db_type;
char *part_syntax_buf;
uint syntax_len;
@@ -4124,15 +4246,6 @@ handler *mysql_create_frm_image(THD *thd,
my_error(ER_PARTITION_NO_TEMPORARY, MYF(0));
goto err;
}
- while ((key= key_iterator++))
- {
- if (key->type == Key::FOREIGN_KEY &&
- !part_info->is_auto_partitioned)
- {
- my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
- goto err;
- }
- }
if ((part_engine_type == partition_hton) &&
part_info->default_engine_type)
{
@@ -4174,8 +4287,7 @@ handler *mysql_create_frm_image(THD *thd,
&syntax_len,
TRUE, TRUE,
create_info,
- alter_info,
- NULL)))
+ alter_info)))
goto err;
part_info->part_info_string= part_syntax_buf;
part_info->part_info_len= syntax_len;
@@ -4239,17 +4351,37 @@ handler *mysql_create_frm_image(THD *thd,
}
}
}
+ /*
+ Unless table's storage engine supports partitioning natively
+ don't allow foreign keys on partitioned tables (they won't
+ work work even with InnoDB beneath of partitioning engine).
+ If storage engine handles partitioning natively (like NDB)
+ foreign keys support is possible, so we let the engine decide.
+ */
+ if (create_info->db_type == partition_hton)
+ {
+ List_iterator_fast<Key> key_iterator(alter_info->key_list);
+ Key *key;
+ while ((key= key_iterator++))
+ {
+ if (key->type == Key::FOREIGN_KEY)
+ {
+ my_error(ER_FOREIGN_KEY_ON_PARTITIONED, MYF(0));
+ goto err;
+ }
+ }
+ }
#endif
if (mysql_prepare_create_table(thd, create_info, alter_info, &db_options,
- file, &key_info_buffer, &key_count,
+ file, key_info, key_count,
create_table_mode))
goto err;
create_info->table_options=db_options;
*frm= build_frm_image(thd, table_name, create_info,
- alter_info->create_list, key_count,
- key_info_buffer, file);
+ alter_info->create_list, *key_count,
+ *key_info, file);
if (frm->str)
DBUG_RETURN(file);
@@ -4260,52 +4392,52 @@ err:
}
-/*
+/**
Create a table
- SYNOPSIS
- mysql_create_table_no_lock()
- thd Thread object
- db Database
- table_name Table name
- create_info Create information (like MAX_ROWS)
- fields List of fields to create
- keys List of keys to create
- is_trans identifies the type of engine where the table
- was created: either trans or non-trans.
- create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE, C_ASSISTED_DISCOVERY
- or any positive number (for C_CREATE_SELECT).
-
- DESCRIPTION
- If one creates a temporary table, this is automatically opened
-
- Note that this function assumes that caller already have taken
- exclusive metadata lock on table being created or used some other
- way to ensure that concurrent operations won't intervene.
- mysql_create_table() is a wrapper that can be used for this.
-
- select_field_count is also used for CREATE ... SELECT,
- and must be zero for standard create of table.
-
- RETURN VALUES
- FALSE OK
- TRUE error
+ @param thd Thread object
+ @param db Database
+ @param table_name Table name
+ @param path Path to table (i.e. to its .FRM file without
+ the extension).
+ @param create_info Create information (like MAX_ROWS)
+ @param alter_info Description of fields and keys for new table
+ @param create_table_mode C_ORDINARY_CREATE, C_ALTER_TABLE, C_ASSISTED_DISCOVERY
+ or any positive number (for C_CREATE_SELECT).
+ @param[out] is_trans Identifies the type of engine where the table
+ was created: either trans or non-trans.
+ @param[out] key_info Array of KEY objects describing keys in table
+ which was created.
+ @param[out] key_count Number of keys in table which was created.
+
+ If one creates a temporary table, this is automatically opened
+
+ Note that this function assumes that caller already have taken
+ exclusive metadata lock on table being created or used some other
+ way to ensure that concurrent operations won't intervene.
+ mysql_create_table() is a wrapper that can be used for this.
+
+ @retval false OK
+ @retval true error
*/
-bool mysql_create_table_no_lock(THD *thd,
- const char *db, const char *table_name,
- HA_CREATE_INFO *create_info,
- Alter_info *alter_info, bool *is_trans,
- int create_table_mode)
+static
+bool create_table_impl(THD *thd,
+ const char *db, const char *table_name,
+ const char *path,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info,
+ int create_table_mode,
+ bool *is_trans,
+ KEY **key_info,
+ uint *key_count,
+ LEX_CUSTRING *frm)
{
- char path[FN_REFLEN + 1];
- uint path_length;
const char *alias;
handler *file= 0;
- LEX_CUSTRING frm= {0,0};
bool error= TRUE;
- bool internal_tmp_table= create_table_mode == C_ALTER_TABLE ||
- create_table_mode == C_ALTER_TABLE_FRM_ONLY;
+ bool frm_only= create_table_mode == C_ALTER_TABLE_FRM_ONLY;
+ bool internal_tmp_table= create_table_mode == C_ALTER_TABLE || frm_only;
DBUG_ENTER("mysql_create_table_no_lock");
DBUG_PRINT("enter", ("db: '%s' table: '%s' tmp: %d",
db, table_name, internal_tmp_table));
@@ -4313,11 +4445,11 @@ bool mysql_create_table_no_lock(THD *thd,
if (!my_use_symdir || (thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE))
{
if (create_info->data_file_name)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
"DATA DIRECTORY");
if (create_info->index_file_name)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_IGNORED, ER(WARN_OPTION_IGNORED),
"INDEX DIRECTORY");
create_info->data_file_name= create_info->index_file_name= 0;
@@ -4333,9 +4465,6 @@ bool mysql_create_table_no_lock(THD *thd,
/* Check if table exists */
if (create_info->tmp_table())
{
- path_length= build_tmptable_filename(thd, path, sizeof(path));
- path[path_length - reg_ext_length]= '\0'; // Remove .frm extension
-
if (find_temporary_table(thd, db, table_name))
{
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
@@ -4346,9 +4475,6 @@ bool mysql_create_table_no_lock(THD *thd,
}
else
{
- path_length= build_table_filename(path, sizeof(path) - 1, db, alias, "",
- internal_tmp_table ? FN_IS_TMP : 0);
-
if (!internal_tmp_table && ha_table_exists(thd, db, table_name))
{
if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS)
@@ -4410,15 +4536,15 @@ bool mysql_create_table_no_lock(THD *thd,
else
{
file= mysql_create_frm_image(thd, db, table_name, create_info, alter_info,
- create_table_mode, &frm);
+ create_table_mode, key_info, key_count, frm);
if (!file)
goto err;
- if (rea_create_table(thd, &frm, path, db, table_name, create_info,
- create_table_mode == C_ALTER_TABLE_FRM_ONLY ? 0 : file))
+ if (rea_create_table(thd, frm, path, db, table_name, create_info,
+ file, frm_only))
goto err;
}
- if (create_info->tmp_table())
+ if (!frm_only && create_info->tmp_table())
{
/*
Open a table (skipping table cache) and add it into
@@ -4426,7 +4552,7 @@ bool mysql_create_table_no_lock(THD *thd,
*/
TABLE *table= open_table_uncached(thd, create_info->db_type, path,
- db, table_name, TRUE);
+ db, table_name, true, true);
if (!table)
{
@@ -4440,7 +4566,7 @@ bool mysql_create_table_no_lock(THD *thd,
thd->thread_specific_used= TRUE;
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
- else if (thd->work_part_info && create_table_mode == C_ALTER_TABLE_FRM_ONLY)
+ else if (thd->work_part_info && frm_only)
{
/*
For partitioned tables we can't find some problems with table
@@ -4452,12 +4578,25 @@ bool mysql_create_table_no_lock(THD *thd,
In cases when we create .FRM without SE part we have to open
table explicitly.
*/
- if (check_if_created_table_can_be_opened(thd, path, db, table_name,
- create_info, file))
+ TABLE table;
+ TABLE_SHARE share;
+
+ init_tmp_table_share(thd, &share, db, 0, table_name, path);
+
+ bool result= (open_table_def(thd, &share, GTS_TABLE) ||
+ open_table_from_share(thd, &share, "", 0, (uint) READ_ALL,
+ 0, &table, true));
+ if (!result)
+ (void) closefrm(&table, 0);
+
+ free_table_share(&share);
+
+ if (result)
{
char frm_name[FN_REFLEN];
strxnmov(frm_name, sizeof(frm_name), path, reg_ext, NullS);
(void) mysql_file_delete(key_file_frm, frm_name, MYF(0));
+ (void) file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
goto err;
}
}
@@ -4466,19 +4605,56 @@ bool mysql_create_table_no_lock(THD *thd,
error= FALSE;
err:
THD_STAGE_INFO(thd, stage_after_create);
- my_free(const_cast<uchar*>(frm.str));
delete file;
DBUG_RETURN(error);
warn:
error= FALSE;
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_TABLE_EXISTS_ERROR, ER(ER_TABLE_EXISTS_ERROR),
alias);
goto err;
}
/**
+ Simple wrapper around create_table_impl() to be used
+ in various version of CREATE TABLE statement.
+*/
+bool mysql_create_table_no_lock(THD *thd,
+ const char *db, const char *table_name,
+ HA_CREATE_INFO *create_info,
+ Alter_info *alter_info, bool *is_trans,
+ int create_table_mode)
+{
+ KEY *not_used_1;
+ uint not_used_2;
+ char path[FN_REFLEN + 1];
+ LEX_CUSTRING frm= {0,0};
+
+ if (create_info->tmp_table())
+ build_tmptable_filename(thd, path, sizeof(path));
+ else
+ {
+ int length;
+ const char *alias= table_case_name(create_info, table_name);
+ length= build_table_filename(path, sizeof(path) - 1, db, alias,
+ "", 0);
+ // Check if we hit FN_REFLEN bytes along with file extension.
+ if (length+reg_ext_length > FN_REFLEN)
+ {
+ my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), sizeof(path)-1, path);
+ return true;
+ }
+ }
+
+ bool res= create_table_impl(thd, db, table_name, path, create_info,
+ alter_info, create_table_mode, is_trans,
+ &not_used_1, &not_used_2, &frm);
+ my_free(const_cast<uchar*>(frm.str));
+ return res;
+}
+
+/**
Implementation of SQLCOM_CREATE_TABLE.
Take the metadata locks (including a shared lock on the affected
@@ -4572,25 +4748,23 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end)
****************************************************************************/
-/*
+/**
Rename a table.
- SYNOPSIS
- mysql_rename_table()
- base The handlerton handle.
- old_db The old database name.
- old_name The old table name.
- new_db The new database name.
- new_name The new table name.
- flags flags for build_table_filename().
- FN_FROM_IS_TMP old_name is temporary.
- FN_TO_IS_TMP new_name is temporary.
- NO_FRM_RENAME Don't rename the FRM file
- but only the table in the storage engine.
-
- RETURN
- FALSE OK
- TRUE Error
+ @param base The handlerton handle.
+ @param old_db The old database name.
+ @param old_name The old table name.
+ @param new_db The new database name.
+ @param new_name The new table name.
+ @param flags flags
+ FN_FROM_IS_TMP old_name is temporary.
+ FN_TO_IS_TMP new_name is temporary.
+ NO_FRM_RENAME Don't rename the FRM file
+ but only the table in the storage engine.
+ NO_HA_TABLE Don't rename table in engine.
+
+ @return false OK
+ @return true Error
*/
bool
@@ -4605,6 +4779,7 @@ mysql_rename_table(handlerton *base, const char *old_db,
char tmp_name[SAFE_NAME_LEN+1];
handler *file;
int error=0;
+ int length;
DBUG_ENTER("mysql_rename_table");
DBUG_PRINT("enter", ("old: '%s'.'%s' new: '%s'.'%s'",
old_db, old_name, new_db, new_name));
@@ -4614,8 +4789,14 @@ mysql_rename_table(handlerton *base, const char *old_db,
build_table_filename(from, sizeof(from) - 1, old_db, old_name, "",
flags & FN_FROM_IS_TMP);
- build_table_filename(to, sizeof(to) - 1, new_db, new_name, "",
- flags & FN_TO_IS_TMP);
+ length= build_table_filename(to, sizeof(to) - 1, new_db, new_name, "",
+ flags & FN_TO_IS_TMP);
+ // Check if we hit FN_REFLEN bytes along with file extension.
+ if (length+reg_ext_length > FN_REFLEN)
+ {
+ my_error(ER_IDENT_CAUSES_TOO_LONG_PATH, MYF(0), sizeof(to)-1, to);
+ DBUG_RETURN(TRUE);
+ }
/*
If lower_case_table_names == 2 (case-preserving but case-insensitive
@@ -4638,7 +4819,13 @@ mysql_rename_table(handlerton *base, const char *old_db,
to_base= lc_to;
}
- if (!file || !(error=file->ha_rename_table(from_base, to_base)))
+ if (flags & NO_HA_TABLE)
+ {
+ if (rename_file_ext(from,to,reg_ext))
+ error= my_errno;
+ (void) file->ha_create_partitioning_metadata(to, from, CHF_RENAME_FLAG);
+ }
+ else if (!file || !(error=file->ha_rename_table(from_base, to_base)))
{
if (!(flags & NO_FRM_RENAME) && rename_file_ext(from,to,reg_ext))
{
@@ -4695,6 +4882,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
{
HA_CREATE_INFO local_create_info;
Alter_info local_alter_info;
+ Alter_table_ctx local_alter_ctx; // Not used
bool res= TRUE;
bool is_trans= FALSE;
uint not_used;
@@ -4726,7 +4914,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
local_create_info.db_type= src_table->table->s->db_type();
local_create_info.row_type= src_table->table->s->row_type;
if (mysql_prepare_alter_table(thd, src_table->table, &local_create_info,
- &local_alter_info))
+ &local_alter_info, &local_alter_ctx))
goto err;
#ifdef WITH_PARTITION_STORAGE_ENGINE
/* Partition info is not handled by mysql_prepare_alter_table() call. */
@@ -4800,6 +4988,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
String query(buf, sizeof(buf), system_charset_info);
query.length(0); // Have to zero it since constructor doesn't
Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN);
+ bool new_table= FALSE; // Whether newly created table is open.
/*
The condition avoids a crash as described in BUG#48506. Other
@@ -4808,14 +4997,21 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
*/
if (!table->view)
{
- /*
- Here we open the destination table, on which we already have
- exclusive metadata lock. This is needed for store_create_info()
- to work. The table will be closed by close_thread_table() at
- the end of this branch.
- */
- if (open_table(thd, table, thd->mem_root, &ot_ctx))
- goto err;
+ if (!table->table)
+ {
+
+ /*
+ In order for store_create_info() to work we need to open
+ destination table if it is not already open (i.e. if it
+ has not existed before). We don't need acquire metadata
+ lock in order to do this as we already hold exclusive
+ lock on this table. The table will be closed by
+ close_thread_table() at the end of this branch.
+ */
+ if (open_table(thd, table, thd->mem_root, &ot_ctx))
+ goto err;
+ new_table= TRUE;
+ }
int result __attribute__((unused))=
store_create_info(thd, table, &query,
@@ -4825,13 +5021,16 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST* src_table,
if (write_bin_log(thd, TRUE, query.ptr(), query.length()))
goto err;
- DBUG_ASSERT(thd->open_tables == table->table);
- /*
- When opening the table, we ignored the locked tables
- (MYSQL_OPEN_GET_NEW_TABLE). Now we can close the table without
- risking to close some locked table.
- */
- close_thread_table(thd, &thd->open_tables);
+ if (new_table)
+ {
+ DBUG_ASSERT(thd->open_tables == table->table);
+ /*
+ When opening the table, we ignored the locked tables
+ (MYSQL_OPEN_GET_NEW_TABLE). Now we can close the table
+ without risking to close some locked table.
+ */
+ close_thread_table(thd, &thd->open_tables);
+ }
}
}
else // Case 1
@@ -4851,16 +5050,16 @@ err:
/* table_list should contain just one table */
-static int
-mysql_discard_or_import_tablespace(THD *thd,
- TABLE_LIST *table_list,
- enum tablespace_op_type tablespace_op)
+int mysql_discard_or_import_tablespace(THD *thd,
+ TABLE_LIST *table_list,
+ bool discard)
{
- TABLE *table;
- my_bool discard;
+ Alter_table_prelocking_strategy alter_prelocking_strategy;
int error;
DBUG_ENTER("mysql_discard_or_import_tablespace");
+ mysql_audit_alter_table(thd, table_list);
+
/*
Note that DISCARD/IMPORT TABLESPACE always is the only operation in an
ALTER TABLE
@@ -4868,21 +5067,28 @@ mysql_discard_or_import_tablespace(THD *thd,
THD_STAGE_INFO(thd, stage_discard_or_import_tablespace);
- discard= test(tablespace_op == DISCARD_TABLESPACE);
-
/*
We set this flag so that ha_innobase::open and ::external_lock() do
not complain when we lock the table
*/
thd->tablespace_op= TRUE;
- table_list->mdl_request.set_type(MDL_SHARED_WRITE);
- if (!(table=open_ltable(thd, table_list, TL_WRITE, 0)))
+ /*
+ Adjust values of table-level and metadata which was set in parser
+ for the case general ALTER TABLE.
+ */
+ table_list->mdl_request.set_type(MDL_EXCLUSIVE);
+ table_list->lock_type= TL_WRITE;
+ /* Do not open views. */
+ table_list->required_type= FRMTYPE_TABLE;
+
+ if (open_and_lock_tables(thd, table_list, FALSE, 0,
+ &alter_prelocking_strategy))
{
thd->tablespace_op=FALSE;
DBUG_RETURN(-1);
}
- error= table->file->ha_discard_or_import_tablespace(discard);
+ error= table_list->table->file->ha_discard_or_import_tablespace(discard);
THD_STAGE_INFO(thd, stage_end);
@@ -4913,48 +5119,31 @@ err:
DBUG_RETURN(0);
}
- table->file->print_error(error, MYF(0));
+ table_list->table->file->print_error(error, MYF(0));
DBUG_RETURN(-1);
}
+
/**
- @brief Check if both DROP and CREATE are present for an index in ALTER TABLE
-
- @details Checks if any index is being modified (present as both DROP INDEX
- and ADD INDEX) in the current ALTER TABLE statement. Needed for disabling
- in-place ALTER TABLE.
-
- @param table The table being altered
- @param alter_info The ALTER TABLE structure
- @return presence of index being altered
- @retval FALSE No such index
- @retval TRUE Have at least 1 index modified
+ Check if key is a candidate key, i.e. a unique index with no index
+ fields partial or nullable.
*/
-static bool
-is_index_maintenance_unique (TABLE *table, Alter_info *alter_info)
+static bool is_candidate_key(KEY *key)
{
- List_iterator<Key> key_it(alter_info->key_list);
- List_iterator<Alter_drop> drop_it(alter_info->drop_list);
- Key *key;
+ KEY_PART_INFO *key_part;
+ KEY_PART_INFO *key_part_end= key->key_part + key->user_defined_key_parts;
- while ((key= key_it++))
- {
- if (key->name.str)
- {
- Alter_drop *drop;
+ if (!(key->flags & HA_NOSAME) || (key->flags & HA_NULL_PART_KEY))
+ return false;
- drop_it.rewind();
- while ((drop= drop_it++))
- {
- if (drop->type == Alter_drop::KEY &&
- !my_strcasecmp(system_charset_info, key->name.str, drop->name))
- return TRUE;
- }
- }
+ for (key_part= key->key_part; key_part < key_part_end; key_part++)
+ {
+ if (key_part->key_part_flag & HA_PART_KEY_SEG)
+ return false;
}
- return FALSE;
+ return true;
}
@@ -4999,15 +5188,15 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
if (my_strcasecmp(system_charset_info,
sql_field->field_name, (*f_ptr)->field_name) == 0)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_DUP_FIELDNAME, ER(ER_DUP_FIELDNAME),
sql_field->field_name);
it.remove();
if (alter_info->create_list.is_empty())
{
- alter_info->flags&= ~ALTER_ADD_COLUMN;
+ alter_info->flags&= ~Alter_info::ALTER_ADD_COLUMN;
if (alter_info->key_list.is_empty())
- alter_info->flags&= ~ALTER_ADD_INDEX;
+ alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX;
}
break;
}
@@ -5038,15 +5227,16 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
}
if (*f_ptr == NULL)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR),
sql_field->change, table->s->table_name.str);
it.remove();
if (alter_info->create_list.is_empty())
{
- alter_info->flags&= ~(ALTER_ADD_COLUMN | ALTER_CHANGE_COLUMN);
+ alter_info->flags&= ~(Alter_info::ALTER_ADD_COLUMN |
+ Alter_info::ALTER_CHANGE_COLUMN);
if (alter_info->key_list.is_empty())
- alter_info->flags&= ~ALTER_ADD_INDEX;
+ alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX;
}
}
}
@@ -5093,12 +5283,13 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
}
if (remove_drop)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_CANT_DROP_FIELD_OR_KEY, ER(ER_CANT_DROP_FIELD_OR_KEY),
drop->name);
drop_it.remove();
if (alter_info->drop_list.is_empty())
- alter_info->flags&= ~(ALTER_DROP_COLUMN | ALTER_DROP_INDEX);
+ alter_info->flags&= ~(Alter_info::ALTER_DROP_COLUMN |
+ Alter_info::ALTER_DROP_INDEX);
}
}
}
@@ -5118,7 +5309,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
if (my_strcasecmp(system_charset_info,
key->name.str, table->key_info[n_key].name) == 0)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_DUP_KEYNAME, ER(ER_DUP_KEYNAME), key->name.str);
key_it.remove();
if (key->type == Key::FOREIGN_KEY)
@@ -5127,7 +5318,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
key_it.remove();
}
if (alter_info->key_list.is_empty())
- alter_info->flags&= ~ALTER_ADD_INDEX;
+ alter_info->flags&= ~Alter_info::ALTER_ADD_INDEX;
break;
}
}
@@ -5139,7 +5330,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
if (tab_part_info && thd->lex->check_exists)
{
/* ALTER TABLE ADD PARTITION IF NOT EXISTS */
- if (alter_info->flags & ALTER_ADD_PARTITION)
+ if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION)
{
partition_info *alt_part_info= thd->lex->part_info;
if (alt_part_info)
@@ -5150,10 +5341,10 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
{
if (!tab_part_info->has_unique_name(pe))
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_SAME_NAME_PARTITION, ER(ER_SAME_NAME_PARTITION),
pe->partition_name);
- alter_info->flags&= ~ALTER_ADD_PARTITION;
+ alter_info->flags&= ~Alter_info::ALTER_ADD_PARTITION;
thd->lex->part_info= NULL;
break;
}
@@ -5161,7 +5352,7 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
}
}
/* ALTER TABLE DROP PARTITION IF EXISTS */
- if (alter_info->flags & ALTER_DROP_PARTITION)
+ if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION)
{
List_iterator<char> names_it(alter_info->partition_names);
char *name;
@@ -5178,293 +5369,332 @@ handle_if_exists_options(THD *thd, TABLE *table, Alter_info *alter_info)
}
if (!part_elem)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_DROP_PARTITION_NON_EXISTENT,
ER(ER_DROP_PARTITION_NON_EXISTENT), "DROP");
names_it.remove();
}
}
if (alter_info->partition_names.elements == 0)
- alter_info->flags&= ~ALTER_DROP_PARTITION;
+ alter_info->flags&= ~Alter_info::ALTER_DROP_PARTITION;
}
}
#endif /*WITH_PARTITION_STORAGE_ENGINE*/
- /* Clear the ALTER_FOREIGN_KEY flag if nothing other than that set. */
- if (alter_info->flags == ALTER_FOREIGN_KEY)
- alter_info->flags= 0;
-
DBUG_VOID_RETURN;
}
-/*
- SYNOPSIS
- mysql_compare_tables()
- table The original table.
- alter_info Alter options, fields and keys for the new
- table.
- create_info Create options for the new table.
- order_num Number of order list elements.
- need_copy_table OUT Result of the comparison. Undefined if error.
- Otherwise is one of:
- ALTER_TABLE_METADATA_ONLY No copy needed
- ALTER_TABLE_DATA_CHANGED Data changes,
- copy needed
- ALTER_TABLE_INDEX_CHANGED Index changes,
- copy might be needed
- key_info_buffer OUT An array of KEY structs for new indexes
- index_drop_buffer OUT An array of offsets into table->key_info.
- index_drop_count OUT The number of elements in the array.
- index_add_buffer OUT An array of offsets into key_info_buffer.
- index_add_count OUT The number of elements in the array.
- candidate_key_count OUT The number of candidate keys in original table.
+/**
+ Get Create_field object for newly created table by field index.
- DESCRIPTION
- 'table' (first argument) contains information of the original
- table, which includes all corresponding parts that the new
- table has in arguments create_list, key_list and create_info.
+ @param alter_info Alter_info describing newly created table.
+ @param idx Field index.
+*/
- By comparing the changes between the original and new table
- we can determine how much it has changed after ALTER TABLE
- and whether we need to make a copy of the table, or just change
- the .frm file.
+static Create_field *get_field_by_index(Alter_info *alter_info, uint idx)
+{
+ List_iterator_fast<Create_field> field_it(alter_info->create_list);
+ uint field_idx= 0;
+ Create_field *field;
- If there are no data changes, but index changes, 'index_drop_buffer'
- and/or 'index_add_buffer' are populated with offsets into
- table->key_info or key_info_buffer respectively for the indexes
- that need to be dropped and/or (re-)created.
+ while ((field= field_it++) && field_idx < idx)
+ { field_idx++; }
- RETURN VALUES
- TRUE The tables are not compatible; We have to do a full alter table
- FALSE The tables are compatible; We only have to modify the .frm
+ return field;
+}
+
+
+static int compare_uint(const uint *s, const uint *t)
+{
+ return (*s < *t) ? -1 : ((*s > *t) ? 1 : 0);
+}
+
+
+/**
+ Compare original and new versions of a table and fill Alter_inplace_info
+ describing differences between those versions.
+
+ @param thd Thread
+ @param table The original table.
+ @param varchar Indicates that new definition has new
+ VARCHAR column.
+ @param[in/out] ha_alter_info Data structure which already contains
+ basic information about create options,
+ field and keys for the new version of
+ table and which should be completed with
+ more detailed information needed for
+ in-place ALTER.
+
+ First argument 'table' contains information of the original
+ table, which includes all corresponding parts that the new
+ table has in arguments create_list, key_list and create_info.
+
+ Compare the changes between the original and new table definitions.
+ The result of this comparison is then passed to SE which determines
+ whether it can carry out these changes in-place.
+
+ Mark any changes detected in the ha_alter_flags.
+ We generally try to specify handler flags only if there are real
+ changes. But in cases when it is cumbersome to determine if some
+ attribute has really changed we might choose to set flag
+ pessimistically, for example, relying on parser output only.
+
+ If there are no data changes, but index changes, 'index_drop_buffer'
+ and/or 'index_add_buffer' are populated with offsets into
+ table->key_info or key_info_buffer respectively for the indexes
+ that need to be dropped and/or (re-)created.
+
+ Note that this function assumes that it is OK to change Alter_info
+ and HA_CREATE_INFO which it gets. It is caller who is responsible
+ for creating copies for this structures if he needs them unchanged.
+
+ @retval true error
+ @retval false success
*/
-bool
-mysql_compare_tables(TABLE *table,
- Alter_info *alter_info,
- HA_CREATE_INFO *create_info,
- uint order_num,
- enum_alter_table_change_level *need_copy_table,
- KEY **key_info_buffer,
- uint **index_drop_buffer, uint *index_drop_count,
- uint **index_add_buffer, uint *index_add_count,
- uint *candidate_key_count)
+static bool fill_alter_inplace_info(THD *thd,
+ TABLE *table,
+ bool varchar,
+ Alter_inplace_info *ha_alter_info)
{
Field **f_ptr, *field;
- uint changes= 0, tmp;
- uint key_count;
- List_iterator_fast<Create_field> new_field_it, tmp_new_field_it;
- Create_field *new_field, *tmp_new_field;
- KEY_PART_INFO *key_part;
+ List_iterator_fast<Create_field> new_field_it;
+ Create_field *new_field;
+ KEY_PART_INFO *key_part, *new_part;
KEY_PART_INFO *end;
- THD *thd= table->in_use;
- uint i;
+ uint candidate_key_count= 0;
+ Alter_info *alter_info= ha_alter_info->alter_info;
+ DBUG_ENTER("fill_alter_inplace_info");
+
+ /* Allocate result buffers. */
+ if (! (ha_alter_info->index_drop_buffer=
+ (KEY**) thd->alloc(sizeof(KEY*) * table->s->keys)) ||
+ ! (ha_alter_info->index_add_buffer=
+ (uint*) thd->alloc(sizeof(uint) *
+ alter_info->key_list.elements)))
+ DBUG_RETURN(true);
+
+ /* First we setup ha_alter_flags based on what was detected by parser. */
+ if (alter_info->flags & Alter_info::ALTER_ADD_COLUMN)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_COLUMN;
+ if (alter_info->flags & Alter_info::ALTER_DROP_COLUMN)
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_COLUMN;
/*
- Remember if the new definition has new VARCHAR column;
- create_info->varchar will be reset in mysql_prepare_create_table.
+ Comparing new and old default values of column is cumbersome.
+ So instead of using such a comparison for detecting if default
+ has really changed we rely on flags set by parser to get an
+ approximate value for storage engine flag.
*/
- bool varchar= create_info->varchar;
- bool not_nullable= true;
- DBUG_ENTER("mysql_compare_tables");
+ if (alter_info->flags & (Alter_info::ALTER_CHANGE_COLUMN |
+ Alter_info::ALTER_CHANGE_COLUMN_DEFAULT))
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_DEFAULT;
+ if (alter_info->flags & Alter_info::ADD_FOREIGN_KEY)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_FOREIGN_KEY;
+ if (alter_info->flags & Alter_info::DROP_FOREIGN_KEY)
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_FOREIGN_KEY;
+ if (alter_info->flags & Alter_info::ALTER_OPTIONS)
+ ha_alter_info->handler_flags|= Alter_inplace_info::CHANGE_CREATE_OPTION;
+ if (alter_info->flags & Alter_info::ALTER_RENAME)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_RENAME;
+ /* Check partition changes */
+ if (alter_info->flags & Alter_info::ALTER_ADD_PARTITION)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PARTITION;
+ if (alter_info->flags & Alter_info::ALTER_DROP_PARTITION)
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_PARTITION;
+ if (alter_info->flags & Alter_info::ALTER_PARTITION)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_PARTITION;
+ if (alter_info->flags & Alter_info::ALTER_COALESCE_PARTITION)
+ ha_alter_info->handler_flags|= Alter_inplace_info::COALESCE_PARTITION;
+ if (alter_info->flags & Alter_info::ALTER_REORGANIZE_PARTITION)
+ ha_alter_info->handler_flags|= Alter_inplace_info::REORGANIZE_PARTITION;
+ if (alter_info->flags & Alter_info::ALTER_TABLE_REORG)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_TABLE_REORG;
+ if (alter_info->flags & Alter_info::ALTER_REMOVE_PARTITIONING)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_REMOVE_PARTITIONING;
+ if (alter_info->flags & Alter_info::ALTER_ALL_PARTITION)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_ALL_PARTITION;
/*
- Create a copy of alter_info.
- To compare the new and old table definitions, we need to "prepare"
- the new definition - transform it from parser output to a format
- that describes the final table layout (all column defaults are
- initialized, duplicate columns are removed). This is done by
- mysql_prepare_create_table. Unfortunately,
- mysql_prepare_create_table performs its transformations
- "in-place", that is, modifies the argument. Since we would
- like to keep mysql_compare_tables() idempotent (not altering any
- of the arguments) we create a copy of alter_info here and
- pass it to mysql_prepare_create_table, then use the result
- to evaluate possibility of in-place ALTER TABLE, and then
- destroy the copy.
+ If we altering table with old VARCHAR fields we will be automatically
+ upgrading VARCHAR column types.
*/
- Alter_info tmp_alter_info(*alter_info, thd->mem_root);
- uint db_options= 0; /* not used */
-
- /* Set default value for return value (to ensure it's always set) */
- *need_copy_table= ALTER_TABLE_DATA_CHANGED;
+ if (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE;
- /* Create the prepared information. */
- int create_table_mode= table->s->tmp_table == NO_TMP_TABLE ?
- C_ORDINARY_CREATE : C_ALTER_TABLE;
- if (mysql_prepare_create_table(thd, create_info, &tmp_alter_info,
- &db_options, table->file, key_info_buffer,
- &key_count, create_table_mode))
- DBUG_RETURN(1);
- /* Allocate result buffers. */
- if (! (*index_drop_buffer=
- (uint*) thd->alloc(sizeof(uint) * table->s->keys)) ||
- ! (*index_add_buffer=
- (uint*) thd->alloc(sizeof(uint) * tmp_alter_info.key_list.elements)))
- DBUG_RETURN(1);
-
/*
- Some very basic checks. If number of fields changes, or the
- handler, we need to run full ALTER TABLE. In the future
- new fields can be added and old dropped without copy, but
- not yet.
+ Go through fields in old version of table and detect changes to them.
+ We don't want to rely solely on Alter_info flags for this since:
+ a) new definition of column can be fully identical to the old one
+ despite the fact that this column is mentioned in MODIFY clause.
+ b) even if new column type differs from its old column from metadata
+ point of view, it might be identical from storage engine point
+ of view (e.g. when ENUM('a','b') is changed to ENUM('a','b',c')).
+ c) flags passed to storage engine contain more detailed information
+ about nature of changes than those provided from parser.
+ */
+ for (f_ptr= table->field; (field= *f_ptr); f_ptr++)
+ {
+ /* Clear marker for renamed or dropped field
+ which we are going to set later. */
+ field->flags&= ~(FIELD_IS_RENAMED | FIELD_IS_DROPPED);
- Test also that engine was not given during ALTER TABLE, or
- we are force to run regular alter table (copy).
- E.g. ALTER TABLE tbl_name ENGINE=MyISAM.
+ /* Use transformed info to evaluate flags for storage engine. */
+ uint new_field_index= 0;
+ new_field_it.init(alter_info->create_list);
+ while ((new_field= new_field_it++))
+ {
+ if (new_field->field == field)
+ break;
+ new_field_index++;
+ }
- For the following ones we also want to run regular alter table:
- ALTER TABLE tbl_name ORDER BY ..
- ALTER TABLE tbl_name CONVERT TO CHARACTER SET ..
+ if (new_field)
+ {
+ ha_alter_info->create_info->fields_option_struct[f_ptr - table->field]=
+ new_field->option_struct;
- At the moment we can't handle altering temporary tables without a copy.
- We also test if OPTIMIZE TABLE was given and was mapped to alter table.
- In that case we always do full copy.
+ /* Field is not dropped. Evaluate changes bitmap for it. */
- There was a bug prior to mysql-4.0.25. Number of null fields was
- calculated incorrectly. As a result frm and data files gets out of
- sync after in-place alter table. There is no way to determine by which
- mysql version (in 4.0 and 4.1 branches) table was created, thus we
- disable in-place alter table for all tables created by mysql versions
- prior to 5.0 branch.
- See BUG#6236.
- */
- if (table->s->fields != alter_info->create_list.elements ||
- table->s->db_type() != create_info->db_type ||
- table->s->tmp_table ||
- create_info->used_fields & HA_CREATE_USED_ENGINE ||
- create_info->used_fields & HA_CREATE_USED_CHARSET ||
- create_info->used_fields & HA_CREATE_USED_DEFAULT_CHARSET ||
- (table->s->row_type != create_info->row_type) ||
- create_info->used_fields & HA_CREATE_USED_PAGE_CHECKSUM ||
- create_info->used_fields & HA_CREATE_USED_TRANSACTIONAL ||
- create_info->used_fields & HA_CREATE_USED_PACK_KEYS ||
- create_info->used_fields & HA_CREATE_USED_MAX_ROWS ||
- (alter_info->flags & (ALTER_RECREATE | ALTER_FOREIGN_KEY)) ||
- order_num ||
- !table->s->mysql_version ||
- (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar))
- {
- DBUG_PRINT("info", ("Basic checks -> ALTER_TABLE_DATA_CHANGED"));
- DBUG_RETURN(0);
- }
+ /*
+ Check if type of column has changed to some incompatible type.
+ */
+ switch (field->is_equal(new_field))
+ {
+ case IS_EQUAL_NO:
+ /* New column type is incompatible with old one. */
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE;
+ if (table->s->tmp_table == NO_TMP_TABLE)
+ {
+ delete_statistics_for_column(thd, table, field);
+ KEY *key_info= table->key_info;
+ for (uint i=0; i < table->s->keys; i++, key_info++)
+ {
+ if (field->part_of_key.is_set(i))
+ {
+ uint key_parts= table->actual_n_key_parts(key_info);
+ for (uint j= 0; j < key_parts; j++)
+ {
+ if (key_info->key_part[j].fieldnr-1 == field->field_index)
+ {
+ delete_statistics_for_index(thd, table, key_info,
+ j >= key_info->user_defined_key_parts);
+ break;
+ }
+ }
+ }
+ }
+ }
+ break;
+ case IS_EQUAL_YES:
+ /*
+ New column is the same as the old one or the fully compatible with
+ it (for example, ENUM('a','b') was changed to ENUM('a','b','c')).
+ Such a change if any can ALWAYS be carried out by simply updating
+ data-dictionary without even informing storage engine.
+ No flag is set in this case.
+ */
+ break;
+ case IS_EQUAL_PACK_LENGTH:
+ /*
+ New column type differs from the old one, but has compatible packed
+ data representation. Depending on storage engine, such a change can
+ be carried out by simply updating data dictionary without changing
+ actual data (for example, VARCHAR(300) is changed to VARCHAR(400)).
+ */
+ ha_alter_info->handler_flags|= Alter_inplace_info::
+ ALTER_COLUMN_EQUAL_PACK_LENGTH;
+ break;
+ default:
+ DBUG_ASSERT(0);
+ /* Safety. */
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_TYPE;
+ }
- if ((create_info->fields_option_struct= (ha_field_option_struct**)
- thd->calloc(sizeof(void*) * table->s->fields)) == NULL ||
- (create_info->indexes_option_struct= (ha_index_option_struct**)
- thd->calloc(sizeof(void*) * table->s->keys)) == NULL)
- DBUG_RETURN(1);
+ /*
+ Check if the altered column is computed and either
+ is stored or is used in the partitioning expression.
+ TODO: Mark such a column with an alter flag only if
+ the defining expression has changed.
+ */
+ if (field->vcol_info &&
+ (field->stored_in_db || field->vcol_info->is_in_partitioning_expr()))
+ {
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL;
+ }
- tmp_new_field_it.init(tmp_alter_info.create_list);
- for (i= 0, f_ptr= table->field, tmp_new_field= tmp_new_field_it++;
- (field= *f_ptr);
- i++, f_ptr++, tmp_new_field= tmp_new_field_it++)
- {
- if (field->is_equal(tmp_new_field) == IS_EQUAL_NO &&
- table->s->tmp_table == NO_TMP_TABLE)
- (void) delete_statistics_for_column(thd, table, field);
- else if (my_strcasecmp(system_charset_info,
- field->field_name,
- tmp_new_field->field_name))
- (void) rename_column_in_stat_tables(thd, table, field,
- tmp_new_field->field_name);
- }
+ /* Check if field was renamed */
+ if (my_strcasecmp(system_charset_info, field->field_name,
+ new_field->field_name))
+ {
+ field->flags|= FIELD_IS_RENAMED;
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_NAME;
+ rename_column_in_stat_tables(thd, table, field,
+ new_field->field_name);
+ }
- /*
- Use transformed info to evaluate possibility of in-place ALTER TABLE
- but use the preserved field to persist modifications.
- */
- new_field_it.init(alter_info->create_list);
- tmp_new_field_it.init(tmp_alter_info.create_list);
+ /* Check that NULL behavior is same for old and new fields */
+ if ((new_field->flags & NOT_NULL_FLAG) !=
+ (uint) (field->flags & NOT_NULL_FLAG))
+ {
+ if (new_field->flags & NOT_NULL_FLAG)
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_COLUMN_NOT_NULLABLE;
+ else
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_COLUMN_NULLABLE;
+ }
- /*
- Go through fields and check if the original ones are compatible
- with new table.
- */
- for (i= 0, f_ptr= table->field, new_field= new_field_it++,
- tmp_new_field= tmp_new_field_it++;
- (field= *f_ptr);
- i++, f_ptr++, new_field= new_field_it++,
- tmp_new_field= tmp_new_field_it++)
- {
- DBUG_ASSERT(i < table->s->fields);
- create_info->fields_option_struct[i]= tmp_new_field->option_struct;
+ /*
+ We do not detect changes to default values in this loop.
+ See comment above for more details.
+ */
- /* reset common markers of how field changed */
- field->flags&= ~(FIELD_IS_RENAMED | FIELD_IN_ADD_INDEX);
+ /*
+ Detect changes in column order.
+ */
+ if (field->field_index != new_field_index)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_ORDER;
- /* Make sure we have at least the default charset in use. */
- if (!new_field->charset)
- new_field->charset= create_info->default_table_charset;
+ /* Detect changes in storage type of column */
+ if (new_field->field_storage_type() != field->field_storage_type())
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE;
- /* Check that NULL behavior is same for old and new fields */
- if ((tmp_new_field->flags & NOT_NULL_FLAG) !=
- (uint) (field->flags & NOT_NULL_FLAG))
- {
- DBUG_PRINT("info", ("NULL behaviour difference in field '%s' -> "
- "ALTER_TABLE_DATA_CHANGED", new_field->field_name));
- DBUG_RETURN(0);
+ /* Detect changes in column format of column */
+ if (new_field->column_format() != field->column_format())
+ ha_alter_info->handler_flags|=
+ Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT;
}
-
- /*
- Check if the altered column is computed and either
- is stored or is used in the partitioning expression.
- TODO: Mark such a column with an alter flag only if
- the defining expression has changed.
- */
- if (field->vcol_info &&
- (field->stored_in_db || field->vcol_info->is_in_partitioning_expr()))
+ else
{
- *need_copy_table= ALTER_TABLE_DATA_CHANGED;
- DBUG_RETURN(0);
+ /*
+ Field is not present in new version of table and therefore was dropped.
+ Corresponding storage engine flag should be already set.
+ */
+ DBUG_ASSERT(ha_alter_info->handler_flags & Alter_inplace_info::DROP_COLUMN);
+ field->flags|= FIELD_IS_DROPPED;
}
+ }
- /* Don't pack rows in old tables if the user has requested this. */
- if (create_info->row_type == ROW_TYPE_DYNAMIC ||
- (tmp_new_field->flags & BLOB_FLAG) ||
- (tmp_new_field->sql_type == MYSQL_TYPE_VARCHAR &&
- create_info->row_type != ROW_TYPE_FIXED))
- create_info->table_options|= HA_OPTION_PACK_RECORD;
-
- /* Check if field was renamed */
- if (my_strcasecmp(system_charset_info,
- field->field_name,
- tmp_new_field->field_name))
+ new_field_it.init(alter_info->create_list);
+ while ((new_field= new_field_it++))
+ {
+ if (! new_field->field)
{
- field->flags|= FIELD_IS_RENAMED;
- if (table->s->tmp_table == NO_TMP_TABLE)
- rename_column_in_stat_tables(thd, table, field,
- tmp_new_field->field_name);
- }
+ /*
+ Field is not present in old version of table and therefore was added.
+ Again corresponding storage engine flag should be already set.
+ */
+ DBUG_ASSERT(ha_alter_info->handler_flags & Alter_inplace_info::ADD_COLUMN);
- /* Evaluate changes bitmap and send to check_if_incompatible_data() */
- if (!(tmp= field->is_equal(tmp_new_field)))
- {
- if (table->s->tmp_table == NO_TMP_TABLE)
+ if (new_field->vcol_info &&
+ (new_field->stored_in_db || new_field->vcol_info->is_in_partitioning_expr()))
{
- KEY *key_info= table->key_info;
- for (uint i=0; i < table->s->keys; i++, key_info++)
- {
- if (field->part_of_key.is_set(i))
- {
- uint key_parts= table->actual_n_key_parts(key_info);
- for (uint j= 0; j < key_parts; j++)
- {
- if (key_info->key_part[j].fieldnr-1 == field->field_index)
- {
- (void) delete_statistics_for_index(thd, table, key_info,
- j >= key_info->key_parts);
- break;
- }
- }
- }
- }
+ ha_alter_info->handler_flags|= Alter_inplace_info::ALTER_COLUMN_VCOL;
}
- DBUG_PRINT("info", ("!field_is_equal('%s') -> ALTER_TABLE_DATA_CHANGED",
- new_field->field_name));
- DBUG_RETURN(0);
+ break;
}
- changes|= tmp;
}
/*
@@ -5474,105 +5704,86 @@ mysql_compare_tables(TABLE *table,
KEY *table_key;
KEY *table_key_end= table->key_info + table->s->keys;
KEY *new_key;
- KEY *new_key_end= *key_info_buffer + key_count;
+ KEY *new_key_end=
+ ha_alter_info->key_info_buffer + ha_alter_info->key_count;
DBUG_PRINT("info", ("index count old: %d new: %d",
- table->s->keys, key_count));
+ table->s->keys, ha_alter_info->key_count));
+
/*
Step through all keys of the old table and search matching new keys.
*/
- *index_drop_count= 0;
- *index_add_count= 0;
- *candidate_key_count= 0;
+ ha_alter_info->index_drop_count= 0;
+ ha_alter_info->index_add_count= 0;
for (table_key= table->key_info; table_key < table_key_end; table_key++)
{
- KEY_PART_INFO *table_part;
- KEY_PART_INFO *table_part_end= table_key->key_part + table_key->key_parts;
- KEY_PART_INFO *new_part;
-
- /*
- Check if key is a candidate key, i.e. a unique index with no index
- fields nullable, then key is either already primary key or could
- be promoted to primary key if the original primary key is dropped.
- Count all candidate keys.
- */
- not_nullable= true;
- for (table_part= table_key->key_part;
- table_part < table_part_end;
- table_part++)
- {
- not_nullable= not_nullable && (! table_part->field->maybe_null());
- }
- if ((table_key->flags & HA_NOSAME) && not_nullable)
- (*candidate_key_count)++;
-
/* Search a new key with the same name. */
- for (new_key= *key_info_buffer; new_key < new_key_end; new_key++)
+ for (new_key= ha_alter_info->key_info_buffer;
+ new_key < new_key_end;
+ new_key++)
{
if (! strcmp(table_key->name, new_key->name))
break;
}
if (new_key >= new_key_end)
{
- /* Key not found. Add the offset of the key to the drop buffer. */
- (*index_drop_buffer)[(*index_drop_count)++]= table_key - table->key_info;
+ /* Key not found. Add the key to the drop buffer. */
+ ha_alter_info->index_drop_buffer
+ [ha_alter_info->index_drop_count++]=
+ table_key;
DBUG_PRINT("info", ("index dropped: '%s'", table_key->name));
continue;
}
/* Check that the key types are compatible between old and new tables. */
if ((table_key->algorithm != new_key->algorithm) ||
- ((table_key->flags & HA_KEYFLAG_MASK) !=
+ ((table_key->flags & HA_KEYFLAG_MASK) !=
(new_key->flags & HA_KEYFLAG_MASK)) ||
- (table_key->key_parts != new_key->key_parts))
+ (table_key->user_defined_key_parts !=
+ new_key->user_defined_key_parts))
goto index_changed;
/*
Check that the key parts remain compatible between the old and
new tables.
*/
- for (table_part= table_key->key_part, new_part= new_key->key_part;
- table_part < table_part_end;
- table_part++, new_part++)
+ end= table_key->key_part + table_key->user_defined_key_parts;
+ for (key_part= table_key->key_part, new_part= new_key->key_part;
+ key_part < end;
+ key_part++, new_part++)
{
/*
- Key definition has changed if we are using a different field or
- if the used key part length is different. We know that the fields
- did not change. Comparing field numbers is sufficient.
+ Key definition has changed if we are using a different field or
+ if the used key part length is different. It makes sense to
+ check lengths first as in case when fields differ it is likely
+ that lengths differ too and checking fields is more expensive
+ in general case.
*/
- if ((table_part->length != new_part->length) ||
- (table_part->fieldnr - 1 != new_part->fieldnr))
- goto index_changed;
+ if (key_part->length != new_part->length)
+ goto index_changed;
+
+ new_field= get_field_by_index(alter_info, new_part->fieldnr);
+
+ /*
+ For prefix keys KEY_PART_INFO::field points to cloned Field
+ object with adjusted length. So below we have to check field
+ indexes instead of simply comparing pointers to Field objects.
+ */
+ if (! new_field->field ||
+ new_field->field->field_index != key_part->fieldnr - 1)
+ goto index_changed;
}
continue;
index_changed:
- /* Key modified. Add the offset of the key to both buffers. */
- (*index_drop_buffer)[(*index_drop_count)++]= table_key - table->key_info;
- (*index_add_buffer)[(*index_add_count)++]= new_key - *key_info_buffer;
- key_part= new_key->key_part;
- end= key_part + new_key->key_parts;
- for(; key_part != end; key_part++)
- {
- // Mark field to be part of new key
- field= table->field[key_part->fieldnr];
- field->flags|= FIELD_IN_ADD_INDEX;
- }
- if (table->s->tmp_table == NO_TMP_TABLE)
- {
- (void) delete_statistics_for_index(thd, table, table_key, FALSE);
- if ((uint) (table_key - table->key_info) == table->s->primary_key)
- {
- KEY *tab_key_info= table->key_info;
- for (uint j=0; j < table->s->keys; j++, tab_key_info++)
- {
- if (tab_key_info->key_parts != tab_key_info->ext_key_parts)
- (void) delete_statistics_for_index(thd, table, tab_key_info,
- TRUE);
- }
- }
- }
-
+ /* Key modified. Add the key / key offset to both buffers. */
+ ha_alter_info->index_drop_buffer
+ [ha_alter_info->index_drop_count++]=
+ table_key;
+ ha_alter_info->index_add_buffer
+ [ha_alter_info->index_add_count++]=
+ new_key - ha_alter_info->key_info_buffer;
+ /* Mark all old fields which are used in newly created index. */
DBUG_PRINT("info", ("index changed: '%s'", table_key->name));
}
/*end of for (; table_key < table_key_end;) */
@@ -5580,12 +5791,12 @@ mysql_compare_tables(TABLE *table,
/*
Step through all keys of the new table and find matching old keys.
*/
- for (new_key= *key_info_buffer; new_key < new_key_end; new_key++)
+ for (new_key= ha_alter_info->key_info_buffer;
+ new_key < new_key_end;
+ new_key++)
{
/* Search an old key with the same name. */
- for (i= 0, table_key= table->key_info;
- table_key < table_key_end;
- i++, table_key++)
+ for (table_key= table->key_info; table_key < table_key_end; table_key++)
{
if (! strcmp(table_key->name, new_key->name))
break;
@@ -5593,44 +5804,309 @@ mysql_compare_tables(TABLE *table,
if (table_key >= table_key_end)
{
/* Key not found. Add the offset of the key to the add buffer. */
- (*index_add_buffer)[(*index_add_count)++]= new_key - *key_info_buffer;
- key_part= new_key->key_part;
- end= key_part + new_key->key_parts;
- for(; key_part != end; key_part++)
+ ha_alter_info->index_add_buffer
+ [ha_alter_info->index_add_count++]=
+ new_key - ha_alter_info->key_info_buffer;
+ DBUG_PRINT("info", ("index added: '%s'", new_key->name));
+ }
+ else
+ ha_alter_info->create_info->indexes_option_struct[table_key - table->key_info]=
+ new_key->option_struct;
+ }
+
+ /*
+ Sort index_add_buffer according to how key_info_buffer is sorted.
+ I.e. with primary keys first - see sort_keys().
+ */
+ my_qsort(ha_alter_info->index_add_buffer,
+ ha_alter_info->index_add_count,
+ sizeof(uint), (qsort_cmp) compare_uint);
+
+ /* Now let us calculate flags for storage engine API. */
+
+ /* Count all existing candidate keys. */
+ for (table_key= table->key_info; table_key < table_key_end; table_key++)
+ {
+ /*
+ Check if key is a candidate key, This key is either already primary key
+ or could be promoted to primary key if the original primary key is
+ dropped.
+ In MySQL one is allowed to create primary key with partial fields (i.e.
+ primary key which is not considered candidate). For simplicity we count
+ such key as a candidate key here.
+ */
+ if (((uint) (table_key - table->key_info) == table->s->primary_key) ||
+ is_candidate_key(table_key))
+ candidate_key_count++;
+ }
+
+ /* Figure out what kind of indexes we are dropping. */
+ KEY **dropped_key;
+ KEY **dropped_key_end= ha_alter_info->index_drop_buffer +
+ ha_alter_info->index_drop_count;
+
+ for (dropped_key= ha_alter_info->index_drop_buffer;
+ dropped_key < dropped_key_end; dropped_key++)
+ {
+ table_key= *dropped_key;
+
+ if (table_key->flags & HA_NOSAME)
+ {
+ /*
+ Unique key. Check for PRIMARY KEY. Also see comment about primary
+ and candidate keys above.
+ */
+ if ((uint) (table_key - table->key_info) == table->s->primary_key)
{
- // Mark field to be part of new key
- field= table->field[key_part->fieldnr];
- field->flags|= FIELD_IN_ADD_INDEX;
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_PK_INDEX;
+ candidate_key_count--;
+ }
+ else
+ {
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_UNIQUE_INDEX;
+ if (is_candidate_key(table_key))
+ candidate_key_count--;
}
- DBUG_PRINT("info", ("index added: '%s'", new_key->name));
}
else
+ ha_alter_info->handler_flags|= Alter_inplace_info::DROP_INDEX;
+ }
+
+ /* Now figure out what kind of indexes we are adding. */
+ for (uint add_key_idx= 0; add_key_idx < ha_alter_info->index_add_count; add_key_idx++)
+ {
+ new_key= ha_alter_info->key_info_buffer + ha_alter_info->index_add_buffer[add_key_idx];
+
+ if (new_key->flags & HA_NOSAME)
{
- DBUG_ASSERT(i < table->s->keys);
- create_info->indexes_option_struct[i]= new_key->option_struct;
+ bool is_pk= !my_strcasecmp(system_charset_info, new_key->name, primary_key_name);
+
+ if ((!(new_key->flags & HA_KEY_HAS_PART_KEY_SEG) &&
+ !(new_key->flags & HA_NULL_PART_KEY)) ||
+ is_pk)
+ {
+ /* Candidate key or primary key! */
+ if (candidate_key_count == 0 || is_pk)
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_PK_INDEX;
+ else
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX;
+ candidate_key_count++;
+ }
+ else
+ {
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_UNIQUE_INDEX;
+ }
}
+ else
+ ha_alter_info->handler_flags|= Alter_inplace_info::ADD_INDEX;
}
- /* Check if changes are compatible with current handler without a copy */
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Mark fields participating in newly added indexes in TABLE object which
+ corresponds to new version of altered table.
+
+ @param ha_alter_info Alter_inplace_info describing in-place ALTER.
+ @param altered_table TABLE object for new version of TABLE in which
+ fields should be marked.
+*/
+
+static void update_altered_table(const Alter_inplace_info &ha_alter_info,
+ TABLE *altered_table)
+{
+ uint field_idx, add_key_idx;
+ KEY *key;
+ KEY_PART_INFO *end, *key_part;
+
+ /*
+ Clear marker for all fields, as we are going to set it only
+ for fields which participate in new indexes.
+ */
+ for (field_idx= 0; field_idx < altered_table->s->fields; ++field_idx)
+ altered_table->field[field_idx]->flags&= ~FIELD_IN_ADD_INDEX;
+
+ /*
+ Go through array of newly added indexes and mark fields
+ participating in them.
+ */
+ for (add_key_idx= 0; add_key_idx < ha_alter_info.index_add_count;
+ add_key_idx++)
+ {
+ key= ha_alter_info.key_info_buffer +
+ ha_alter_info.index_add_buffer[add_key_idx];
+
+ end= key->key_part + key->user_defined_key_parts;
+ for (key_part= key->key_part; key_part < end; key_part++)
+ altered_table->field[key_part->fieldnr]->flags|= FIELD_IN_ADD_INDEX;
+ }
+}
+
+
+/**
+ Compare two tables to see if their metadata are compatible.
+ One table specified by a TABLE instance, the other using Alter_info
+ and HA_CREATE_INFO.
+
+ @param[in] table The first table.
+ @param[in] alter_info Alter options, fields and keys for the
+ second table.
+ @param[in] create_info Create options for the second table.
+ @param[out] metadata_equal Result of comparison.
+
+ @retval true error
+ @retval false success
+*/
+
+bool mysql_compare_tables(TABLE *table,
+ Alter_info *alter_info,
+ HA_CREATE_INFO *create_info,
+ bool *metadata_equal)
+{
+ DBUG_ENTER("mysql_compare_tables");
+
+ uint changes= IS_EQUAL_NO;
+ uint key_count;
+ List_iterator_fast<Create_field> tmp_new_field_it;
+ THD *thd= table->in_use;
+ *metadata_equal= false;
+
+ /*
+ Create a copy of alter_info.
+ To compare definitions, we need to "prepare" the definition - transform it
+ from parser output to a format that describes the table layout (all column
+ defaults are initialized, duplicate columns are removed). This is done by
+ mysql_prepare_create_table. Unfortunately, mysql_prepare_create_table
+ performs its transformations "in-place", that is, modifies the argument.
+ Since we would like to keep mysql_compare_tables() idempotent (not altering
+ any of the arguments) we create a copy of alter_info here and pass it to
+ mysql_prepare_create_table, then use the result to compare the tables, and
+ then destroy the copy.
+ */
+ Alter_info tmp_alter_info(*alter_info, thd->mem_root);
+ uint db_options= 0; /* not used */
+ KEY *key_info_buffer= NULL;
+
+ /* Create the prepared information. */
+ int create_table_mode= table->s->tmp_table == NO_TMP_TABLE ?
+ C_ORDINARY_CREATE : C_ALTER_TABLE;
+ if (mysql_prepare_create_table(thd, create_info, &tmp_alter_info,
+ &db_options, table->file, &key_info_buffer,
+ &key_count, create_table_mode))
+ DBUG_RETURN(1);
+
+ /* Some very basic checks. */
+ if (table->s->fields != alter_info->create_list.elements ||
+ table->s->db_type() != create_info->db_type ||
+ table->s->tmp_table ||
+ (table->s->row_type != create_info->row_type))
+ DBUG_RETURN(false);
+
+ /* Go through fields and check if they are compatible. */
+ tmp_new_field_it.init(tmp_alter_info.create_list);
+ for (Field **f_ptr= table->field; *f_ptr; f_ptr++)
+ {
+ Field *field= *f_ptr;
+ Create_field *tmp_new_field= tmp_new_field_it++;
+
+ /* Check that NULL behavior is the same. */
+ if ((tmp_new_field->flags & NOT_NULL_FLAG) !=
+ (uint) (field->flags & NOT_NULL_FLAG))
+ DBUG_RETURN(false);
+
+ /*
+ mysql_prepare_alter_table() clears HA_OPTION_PACK_RECORD bit when
+ preparing description of existing table. In ALTER TABLE it is later
+ updated to correct value by create_table_impl() call.
+ So to get correct value of this bit in this function we have to
+ mimic behavior of create_table_impl().
+ */
+ if (create_info->row_type == ROW_TYPE_DYNAMIC ||
+ (tmp_new_field->flags & BLOB_FLAG) ||
+ (tmp_new_field->sql_type == MYSQL_TYPE_VARCHAR &&
+ create_info->row_type != ROW_TYPE_FIXED))
+ create_info->table_options|= HA_OPTION_PACK_RECORD;
+
+ /* Check if field was renamed */
+ if (my_strcasecmp(system_charset_info,
+ field->field_name,
+ tmp_new_field->field_name))
+ DBUG_RETURN(false);
+
+ /* Evaluate changes bitmap and send to check_if_incompatible_data() */
+ uint field_changes= field->is_equal(tmp_new_field);
+ if (field_changes != IS_EQUAL_YES)
+ DBUG_RETURN(false);
+
+ changes|= field_changes;
+ }
+
+ /* Check if changes are compatible with current handler. */
if (table->file->check_if_incompatible_data(create_info, changes))
+ DBUG_RETURN(false);
+
+ /* Go through keys and check if they are compatible. */
+ KEY *table_key;
+ KEY *table_key_end= table->key_info + table->s->keys;
+ KEY *new_key;
+ KEY *new_key_end= key_info_buffer + key_count;
+
+ /* Step through all keys of the first table and search matching keys. */
+ for (table_key= table->key_info; table_key < table_key_end; table_key++)
{
- DBUG_PRINT("info", ("check_if_incompatible_data() -> "
- "ALTER_TABLE_DATA_CHANGED"));
- DBUG_RETURN(0);
+ /* Search a key with the same name. */
+ for (new_key= key_info_buffer; new_key < new_key_end; new_key++)
+ {
+ if (! strcmp(table_key->name, new_key->name))
+ break;
+ }
+ if (new_key >= new_key_end)
+ DBUG_RETURN(false);
+
+ /* Check that the key types are compatible. */
+ if ((table_key->algorithm != new_key->algorithm) ||
+ ((table_key->flags & HA_KEYFLAG_MASK) !=
+ (new_key->flags & HA_KEYFLAG_MASK)) ||
+ (table_key->user_defined_key_parts !=
+ new_key->user_defined_key_parts))
+ DBUG_RETURN(false);
+
+ /* Check that the key parts remain compatible. */
+ KEY_PART_INFO *table_part;
+ KEY_PART_INFO *table_part_end= table_key->key_part + table_key->user_defined_key_parts;
+ KEY_PART_INFO *new_part;
+ for (table_part= table_key->key_part, new_part= new_key->key_part;
+ table_part < table_part_end;
+ table_part++, new_part++)
+ {
+ /*
+ Key definition is different if we are using a different field or
+ if the used key part length is different. We know that the fields
+ are equal. Comparing field numbers is sufficient.
+ */
+ if ((table_part->length != new_part->length) ||
+ (table_part->fieldnr - 1 != new_part->fieldnr))
+ DBUG_RETURN(false);
+ }
}
- if (*index_drop_count || *index_add_count)
+ /* Step through all keys of the second table and find matching keys. */
+ for (new_key= key_info_buffer; new_key < new_key_end; new_key++)
{
- DBUG_PRINT("info", ("Index dropped=%u added=%u -> "
- "ALTER_TABLE_INDEX_CHANGED",
- *index_drop_count, *index_add_count));
- *need_copy_table= ALTER_TABLE_INDEX_CHANGED;
- DBUG_RETURN(0);
+ /* Search a key with the same name. */
+ for (table_key= table->key_info; table_key < table_key_end; table_key++)
+ {
+ if (! strcmp(table_key->name, new_key->name))
+ break;
+ }
+ if (table_key >= table_key_end)
+ DBUG_RETURN(false);
}
- DBUG_PRINT("info", (" -> ALTER_TABLE_METADATA_ONLY"));
- *need_copy_table= ALTER_TABLE_METADATA_ONLY; // Tables are compatible
- DBUG_RETURN(0);
+ *metadata_equal= true; // Tables are compatible
+ DBUG_RETURN(false);
}
@@ -5651,7 +6127,7 @@ mysql_compare_tables(TABLE *table,
static
bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
- enum enum_enable_or_disable keys_onoff)
+ Alter_info::enum_enable_or_disable keys_onoff)
{
int error= 0;
DBUG_ENTER("alter_table_manage_keys");
@@ -5659,21 +6135,21 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
table, indexes_were_disabled, keys_onoff));
switch (keys_onoff) {
- case ENABLE:
+ case Alter_info::ENABLE:
DEBUG_SYNC(table->in_use, "alter_table_enable_indexes");
error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
break;
- case LEAVE_AS_IS:
+ case Alter_info::LEAVE_AS_IS:
if (!indexes_were_disabled)
break;
/* fall-through: disabled indexes */
- case DISABLE:
+ case Alter_info::DISABLE:
error= table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
}
if (error == HA_ERR_WRONG_COMMAND)
{
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_NOTE,
ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
table->file->table_type(),
table->s->db.str, table->s->table_name.str);
@@ -5684,6 +6160,401 @@ bool alter_table_manage_keys(TABLE *table, int indexes_were_disabled,
DBUG_RETURN(error);
}
+
+/**
+ Check if the pending ALTER TABLE operations support the in-place
+ algorithm based on restrictions in the SQL layer or given the
+ nature of the operations themselves. If in-place isn't supported,
+ it won't be necessary to check with the storage engine.
+
+ @param table The original TABLE.
+ @param create_info Information from the parsing phase about new
+ table properties.
+ @param alter_info Data related to detected changes.
+
+ @return false In-place is possible, check with storage engine.
+ @return true Incompatible operations, must use table copy.
+*/
+
+static bool is_inplace_alter_impossible(TABLE *table,
+ HA_CREATE_INFO *create_info,
+ const Alter_info *alter_info)
+{
+ DBUG_ENTER("is_inplace_alter_impossible");
+
+ /* At the moment we can't handle altering temporary tables without a copy. */
+ if (table->s->tmp_table)
+ DBUG_RETURN(true);
+
+
+ /*
+ We also test if OPTIMIZE TABLE was given and was mapped to alter table.
+ In that case we always do full copy (ALTER_RECREATE is set in this case).
+
+ For the ALTER TABLE tbl_name ORDER BY ... we also always use copy
+ algorithm. In theory, this operation can be done in-place by some
+ engine, but since a) no current engine does this and b) our current
+ API lacks infrastructure for passing information about table ordering
+ to storage engine we simply always do copy now.
+
+ ENABLE/DISABLE KEYS is a MyISAM/Heap specific operation that is
+ not supported for in-place in combination with other operations.
+ Alone, it will be done by simple_rename_or_index_change().
+ */
+ if (alter_info->flags & (Alter_info::ALTER_RECREATE |
+ Alter_info::ALTER_ORDER |
+ Alter_info::ALTER_KEYS_ONOFF))
+ DBUG_RETURN(true);
+
+ /*
+ Test also that engine was not given during ALTER TABLE, or
+ we are force to run regular alter table (copy).
+ E.g. ALTER TABLE tbl_name ENGINE=MyISAM.
+ Note that in addition to checking flag in HA_CREATE_INFO we
+ also check HA_CREATE_INFO::db_type value. This is done
+ to cover cases in which engine is changed implicitly
+ (e.g. when non-partitioned table becomes partitioned).
+
+ Note that we do copy even if the table is already using the
+ given engine. Many users and tools depend on using ENGINE
+ to force a table rebuild.
+ */
+ if (create_info->db_type != table->s->db_type() ||
+ create_info->used_fields & HA_CREATE_USED_ENGINE)
+ DBUG_RETURN(true);
+
+ /*
+ There was a bug prior to mysql-4.0.25. Number of null fields was
+ calculated incorrectly. As a result frm and data files gets out of
+ sync after fast alter table. There is no way to determine by which
+ mysql version (in 4.0 and 4.1 branches) table was created, thus we
+ disable fast alter table for all tables created by mysql versions
+ prior to 5.0 branch.
+ See BUG#6236.
+ */
+ if (!table->s->mysql_version)
+ DBUG_RETURN(true);
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Perform in-place alter table.
+
+ @param thd Thread handle.
+ @param table_list TABLE_LIST for the table to change.
+ @param table The original TABLE.
+ @param altered_table TABLE object for new version of the table.
+ @param ha_alter_info Structure describing ALTER TABLE to be carried
+ out and serving as a storage place for data
+ used during different phases.
+ @param inplace_supported Enum describing the locking requirements.
+ @param target_mdl_request Metadata request/lock on the target table name.
+ @param alter_ctx ALTER TABLE runtime context.
+
+ @retval true Error
+ @retval false Success
+
+ @note
+ If mysql_alter_table does not need to copy the table, it is
+ either an alter table where the storage engine does not
+ need to know about the change, only the frm will change,
+ or the storage engine supports performing the alter table
+ operation directly, in-place without mysql having to copy
+ the table.
+
+ @note This function frees the TABLE object associated with the new version of
+ the table and removes the .FRM file for it in case of both success and
+ failure.
+*/
+
+static bool mysql_inplace_alter_table(THD *thd,
+ TABLE_LIST *table_list,
+ TABLE *table,
+ TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info,
+ enum_alter_inplace_result inplace_supported,
+ MDL_request *target_mdl_request,
+ Alter_table_ctx *alter_ctx)
+{
+ Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN);
+ handlerton *db_type= table->s->db_type();
+ MDL_ticket *mdl_ticket= table->mdl_ticket;
+ HA_CREATE_INFO *create_info= ha_alter_info->create_info;
+ Alter_info *alter_info= ha_alter_info->alter_info;
+ bool reopen_tables= false;
+
+ DBUG_ENTER("mysql_inplace_alter_table");
+
+ /*
+ Upgrade to EXCLUSIVE lock if:
+ - This is requested by the storage engine
+ - Or the storage engine needs exclusive lock for just the prepare
+ phase
+ - Or requested by the user
+
+ Note that we handle situation when storage engine needs exclusive
+ lock for prepare phase under LOCK TABLES in the same way as when
+ exclusive lock is required for duration of the whole statement.
+ */
+ if (inplace_supported == HA_ALTER_INPLACE_EXCLUSIVE_LOCK ||
+ ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE ||
+ inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) &&
+ (thd->locked_tables_mode == LTM_LOCK_TABLES ||
+ thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)) ||
+ alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE)
+ {
+ if (wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
+ goto cleanup;
+ /*
+ Get rid of all TABLE instances belonging to this thread
+ except one to be used for in-place ALTER TABLE.
+
+ This is mostly needed to satisfy InnoDB assumptions/asserts.
+ */
+ close_all_tables_for_name(thd, table->s,
+ alter_ctx->is_table_renamed() ?
+ HA_EXTRA_PREPARE_FOR_RENAME :
+ HA_EXTRA_NOT_USED,
+ table);
+ /*
+ If we are under LOCK TABLES we will need to reopen tables which we
+ just have closed in case of error.
+ */
+ reopen_tables= true;
+ }
+ else if (inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE ||
+ inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE)
+ {
+ /*
+ Storage engine has requested exclusive lock only for prepare phase
+ and we are not under LOCK TABLES.
+ Don't mark TABLE_SHARE as old in this case, as this won't allow opening
+ of table by other threads during main phase of in-place ALTER TABLE.
+ */
+ if (thd->mdl_context.upgrade_shared_lock(table->mdl_ticket, MDL_EXCLUSIVE,
+ thd->variables.lock_wait_timeout))
+ goto cleanup;
+
+ tdc_remove_table(thd, TDC_RT_REMOVE_NOT_OWN_KEEP_SHARE,
+ table->s->db.str, table->s->table_name.str,
+ false);
+ }
+
+ /*
+ Upgrade to SHARED_NO_WRITE lock if:
+ - The storage engine needs writes blocked for the whole duration
+ - Or this is requested by the user
+ Note that under LOCK TABLES, we will already have SHARED_NO_READ_WRITE.
+ */
+ if ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK ||
+ alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED) &&
+ thd->mdl_context.upgrade_shared_lock(table->mdl_ticket,
+ MDL_SHARED_NO_WRITE,
+ thd->variables.lock_wait_timeout))
+ {
+ goto cleanup;
+ }
+
+ // It's now safe to take the table level lock.
+ if (lock_tables(thd, table_list, alter_ctx->tables_opened, 0))
+ goto cleanup;
+
+ DEBUG_SYNC(thd, "alter_table_inplace_after_lock_upgrade");
+ THD_STAGE_INFO(thd, stage_alter_inplace_prepare);
+
+ switch (inplace_supported) {
+ case HA_ALTER_ERROR:
+ case HA_ALTER_INPLACE_NOT_SUPPORTED:
+ DBUG_ASSERT(0);
+ // fall through
+ case HA_ALTER_INPLACE_NO_LOCK:
+ case HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE:
+ switch (alter_info->requested_lock) {
+ case Alter_info::ALTER_TABLE_LOCK_DEFAULT:
+ case Alter_info::ALTER_TABLE_LOCK_NONE:
+ ha_alter_info->online= true;
+ break;
+ case Alter_info::ALTER_TABLE_LOCK_SHARED:
+ case Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE:
+ break;
+ }
+ break;
+ case HA_ALTER_INPLACE_EXCLUSIVE_LOCK:
+ case HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE:
+ case HA_ALTER_INPLACE_SHARED_LOCK:
+ break;
+ }
+
+ if (table->file->ha_prepare_inplace_alter_table(altered_table,
+ ha_alter_info))
+ {
+ goto rollback;
+ }
+
+ /*
+ Downgrade the lock if storage engine has told us that exclusive lock was
+ necessary only for prepare phase (unless we are not under LOCK TABLES) and
+ user has not explicitly requested exclusive lock.
+ */
+ if ((inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE ||
+ inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE) &&
+ !(thd->locked_tables_mode == LTM_LOCK_TABLES ||
+ thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES) &&
+ (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE))
+ {
+ /* If storage engine or user requested shared lock downgrade to SNW. */
+ if (inplace_supported == HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE ||
+ alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_SHARED)
+ table->mdl_ticket->downgrade_lock(MDL_SHARED_NO_WRITE);
+ else
+ {
+ DBUG_ASSERT(inplace_supported == HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE);
+ table->mdl_ticket->downgrade_lock(MDL_SHARED_UPGRADABLE);
+ }
+ }
+
+ DEBUG_SYNC(thd, "alter_table_inplace_after_lock_downgrade");
+ THD_STAGE_INFO(thd, stage_alter_inplace);
+
+ if (table->file->ha_inplace_alter_table(altered_table,
+ ha_alter_info))
+ {
+ goto rollback;
+ }
+
+ // Upgrade to EXCLUSIVE before commit.
+ if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME))
+ goto rollback;
+
+ /*
+ If we are killed after this point, we should ignore and continue.
+ We have mostly completed the operation at this point, there should
+ be no long waits left.
+ */
+
+ DBUG_EXECUTE_IF("alter_table_rollback_new_index", {
+ table->file->ha_commit_inplace_alter_table(altered_table,
+ ha_alter_info,
+ false);
+ my_error(ER_UNKNOWN_ERROR, MYF(0));
+ goto cleanup;
+ });
+
+ DEBUG_SYNC(thd, "alter_table_inplace_before_commit");
+ THD_STAGE_INFO(thd, stage_alter_inplace_commit);
+
+ if (table->file->ha_commit_inplace_alter_table(altered_table,
+ ha_alter_info,
+ true))
+ {
+ goto rollback;
+ }
+
+ close_all_tables_for_name(thd, table->s,
+ alter_ctx->is_table_renamed() ?
+ HA_EXTRA_PREPARE_FOR_RENAME :
+ HA_EXTRA_NOT_USED,
+ NULL);
+ table_list->table= table= NULL;
+ close_temporary_table(thd, altered_table, true, false);
+
+ /*
+ Replace the old .FRM with the new .FRM, but keep the old name for now.
+ Rename to the new name (if needed) will be handled separately below.
+ */
+ if (mysql_rename_table(db_type, alter_ctx->new_db, alter_ctx->tmp_name,
+ alter_ctx->db, alter_ctx->alias,
+ FN_FROM_IS_TMP | NO_HA_TABLE))
+ {
+ // Since changes were done in-place, we can't revert them.
+ (void) quick_rm_table(thd, db_type,
+ alter_ctx->new_db, alter_ctx->tmp_name,
+ FN_IS_TMP | NO_HA_TABLE);
+ DBUG_RETURN(true);
+ }
+
+ table_list->mdl_request.ticket= mdl_ticket;
+ if (open_table(thd, table_list, thd->mem_root, &ot_ctx))
+ DBUG_RETURN(true);
+
+ /*
+ Tell the handler that the changed frm is on disk and table
+ has been re-opened
+ */
+ table_list->table->file->ha_notify_table_changed();
+
+ /*
+ We might be going to reopen table down on the road, so we have to
+ restore state of the TABLE object which we used for obtaining of
+ handler object to make it usable for later reopening.
+ */
+ close_thread_table(thd, &thd->open_tables);
+ table_list->table= NULL;
+
+ // Rename altered table if requested.
+ if (alter_ctx->is_table_renamed())
+ {
+ // Remove TABLE and TABLE_SHARE for old name from TDC.
+ tdc_remove_table(thd, TDC_RT_REMOVE_ALL,
+ alter_ctx->db, alter_ctx->table_name, false);
+
+ if (mysql_rename_table(db_type, alter_ctx->db, alter_ctx->table_name,
+ alter_ctx->new_db, alter_ctx->new_alias, 0))
+ {
+ /*
+ If the rename fails we will still have a working table
+ with the old name, but with other changes applied.
+ */
+ DBUG_RETURN(true);
+ }
+ if (Table_triggers_list::change_table_name(thd,
+ alter_ctx->db,
+ alter_ctx->alias,
+ alter_ctx->table_name,
+ alter_ctx->new_db,
+ alter_ctx->new_alias))
+ {
+ /*
+ If the rename of trigger files fails, try to rename the table
+ back so we at least have matching table and trigger files.
+ */
+ (void) mysql_rename_table(db_type,
+ alter_ctx->new_db, alter_ctx->new_alias,
+ alter_ctx->db, alter_ctx->alias, 0);
+ DBUG_RETURN(true);
+ }
+ rename_table_in_stat_tables(thd, alter_ctx->db,alter_ctx->alias,
+ alter_ctx->new_db, alter_ctx->new_alias);
+ }
+
+ DBUG_RETURN(false);
+
+ rollback:
+ table->file->ha_commit_inplace_alter_table(altered_table,
+ ha_alter_info,
+ false);
+ cleanup:
+ if (reopen_tables)
+ {
+ /* Close the only table instance which is still around. */
+ close_all_tables_for_name(thd, table->s,
+ alter_ctx->is_table_renamed() ?
+ HA_EXTRA_PREPARE_FOR_RENAME :
+ HA_EXTRA_NOT_USED,
+ NULL);
+ if (thd->locked_tables_list.reopen_tables(thd))
+ thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
+ /* QQ; do something about metadata locks ? */
+ }
+ close_temporary_table(thd, altered_table, true, false);
+ // Delete temporary .frm/.par
+ (void) quick_rm_table(thd, create_info->db_type, alter_ctx->new_db,
+ alter_ctx->tmp_name, FN_IS_TMP | NO_HA_TABLE);
+ DBUG_RETURN(true);
+}
+
/**
maximum possible length for certain blob types.
@@ -5742,6 +6613,7 @@ blob_length_by_type(enum_field_types type)
But since ALTER might end-up doing CREATE,
this distinction is gone and we just carry
around two structures.
+ @param[in,out] alter_ctx Runtime context for ALTER TABLE.
@return
Fills various create_info members based on information retrieved
@@ -5758,7 +6630,8 @@ blob_length_by_type(enum_field_types type)
bool
mysql_prepare_alter_table(THD *thd, TABLE *table,
HA_CREATE_INFO *create_info,
- Alter_info *alter_info)
+ Alter_info *alter_info,
+ Alter_table_ctx *alter_ctx)
{
/* New column definitions are added here */
List<Create_field> new_create_list;
@@ -5797,15 +6670,30 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
table->file->info(HA_STATUS_AUTO);
create_info->auto_increment_value= table->file->stats.auto_increment_value;
}
+
if (!(used_fields & HA_CREATE_USED_KEY_BLOCK_SIZE))
create_info->key_block_size= table->s->key_block_size;
+
+ if (!(used_fields & HA_CREATE_USED_STATS_SAMPLE_PAGES))
+ create_info->stats_sample_pages= table->s->stats_sample_pages;
+
+ if (!(used_fields & HA_CREATE_USED_STATS_AUTO_RECALC))
+ create_info->stats_auto_recalc= table->s->stats_auto_recalc;
+
if (!(used_fields & HA_CREATE_USED_TRANSACTIONAL))
create_info->transactional= table->s->transactional;
restore_record(table, s->default_values); // Empty record for DEFAULT
+ if ((create_info->fields_option_struct= (ha_field_option_struct**)
+ thd->calloc(sizeof(void*) * table->s->fields)) == NULL ||
+ (create_info->indexes_option_struct= (ha_index_option_struct**)
+ thd->calloc(sizeof(void*) * table->s->keys)) == NULL)
+ DBUG_RETURN(1);
+
create_info->option_list= merge_engine_table_options(table->s->option_list,
create_info->option_list, thd->mem_root);
+
/*
First collect all fields from table which isn't in drop_list
*/
@@ -5836,12 +6724,6 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (table->s->tmp_table == NO_TMP_TABLE)
(void) delete_statistics_for_column(thd, table, field);
drop_it.remove();
- /*
- ALTER TABLE DROP COLUMN always changes table data even in cases
- when new version of the table has the same structure as the old
- one.
- */
- alter_info->change_level= ALTER_TABLE_DATA_CHANGED;
continue;
}
/* Check if field is changed */
@@ -5855,6 +6737,12 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (def)
{ // Field is changed
def->field=field;
+ /*
+ Add column being updated to the list of new columns.
+ Note that columns with AFTER clauses are added to the end
+ of the list for now. Their positions will be corrected later.
+ */
+ new_create_list.push_back(def);
if (field->stored_in_db != def->stored_in_db)
{
my_error(ER_UNSUPPORTED_ACTION_ON_VIRTUAL_COLUMN, MYF(0));
@@ -5862,7 +6750,13 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
}
if (!def->after)
{
- new_create_list.push_back(def);
+ /*
+ If this ALTER TABLE doesn't have an AFTER clause for the modified
+ column then remove this column from the list of columns to be
+ processed. So later we can iterate over the columns remaining
+ in this list and process modified columns with AFTER clause or
+ add new columns.
+ */
def_it.remove();
}
}
@@ -5916,45 +6810,58 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
def->sql_type == MYSQL_TYPE_NEWDATE ||
def->sql_type == MYSQL_TYPE_DATETIME ||
def->sql_type == MYSQL_TYPE_DATETIME2) &&
- !alter_info->datetime_field &&
+ !alter_ctx->datetime_field &&
!(~def->flags & (NO_DEFAULT_VALUE_FLAG | NOT_NULL_FLAG)) &&
thd->variables.sql_mode & MODE_NO_ZERO_DATE)
{
- alter_info->datetime_field= def;
- alter_info->error_if_not_empty= TRUE;
+ alter_ctx->datetime_field= def;
+ alter_ctx->error_if_not_empty= TRUE;
}
if (!def->after)
new_create_list.push_back(def);
- else if (def->after == first_keyword)
- {
- new_create_list.push_front(def);
- /*
- Re-ordering columns in table can't be done using in-place algorithm
- as it always changes table data.
- */
- alter_info->change_level= ALTER_TABLE_DATA_CHANGED;
- }
else
{
Create_field *find;
- find_it.rewind();
- while ((find=find_it++)) // Add new columns
+ if (def->change)
{
- if (!my_strcasecmp(system_charset_info,def->after, find->field_name))
- break;
+ find_it.rewind();
+ /*
+ For columns being modified with AFTER clause we should first remove
+ these columns from the list and then add them back at their correct
+ positions.
+ */
+ while ((find=find_it++))
+ {
+ /*
+ Create_fields representing changed columns are added directly
+ from Alter_info::create_list to new_create_list. We can therefore
+ safely use pointer equality rather than name matching here.
+ This prevents removing the wrong column in case of column rename.
+ */
+ if (find == def)
+ {
+ find_it.remove();
+ break;
+ }
+ }
}
- if (!find)
+ if (def->after == first_keyword)
+ new_create_list.push_front(def);
+ else
{
- my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after,
- table->s->table_name.str);
- goto err;
+ find_it.rewind();
+ while ((find=find_it++))
+ {
+ if (!my_strcasecmp(system_charset_info, def->after, find->field_name))
+ break;
+ }
+ if (!find)
+ {
+ my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after, table->s->table_name.str);
+ goto err;
+ }
+ find_it.after(def); // Put column after this
}
- find_it.after(def); // Put element after this
- /*
- Re-ordering columns in table can't be done using in-place algorithm
- as it always changes table data.
- */
- alter_info->change_level= ALTER_TABLE_DATA_CHANGED;
}
}
if (alter_info->alter_list.elements)
@@ -5996,7 +6903,8 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
KEY *tab_key_info= table->key_info;
for (uint j=0; j < table->s->keys; j++, tab_key_info++)
{
- if (tab_key_info->key_parts != tab_key_info->ext_key_parts)
+ if (tab_key_info->user_defined_key_parts !=
+ tab_key_info->ext_key_parts)
(void) delete_statistics_for_index(thd, table, tab_key_info,
TRUE);
}
@@ -6009,7 +6917,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
KEY_PART_INFO *key_part= key_info->key_part;
key_parts.empty();
bool delete_index_stat= FALSE;
- for (uint j=0 ; j < key_info->key_parts ; j++,key_part++)
+ for (uint j=0 ; j < key_info->user_defined_key_parts ; j++,key_part++)
{
if (!key_part->field)
continue; // Wrong field (from UNIREG)
@@ -6083,7 +6991,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (delete_index_stat)
(void) delete_statistics_for_index(thd, table, key_info, FALSE);
else if (modified_primary_key &&
- key_info->key_parts != key_info->ext_key_parts)
+ key_info->user_defined_key_parts != key_info->ext_key_parts)
(void) delete_statistics_for_index(thd, table, key_info, TRUE);
}
@@ -6130,8 +7038,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (key->type == Key::FOREIGN_KEY &&
((Foreign_key *)key)->validate(new_create_list))
goto err;
- if (key->type != Key::FOREIGN_KEY)
- new_key_list.push_back(key);
+ new_key_list.push_back(key);
if (key->name.str &&
!my_strcasecmp(system_charset_info, key->name.str, primary_key_name))
{
@@ -6143,9 +7050,20 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
if (alter_info->drop_list.elements)
{
- my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0),
- alter_info->drop_list.head()->name);
- goto err;
+ Alter_drop *drop;
+ drop_it.rewind();
+ while ((drop=drop_it++)) {
+ switch (drop->type) {
+ case Alter_drop::KEY:
+ case Alter_drop::COLUMN:
+ my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0),
+ alter_info->drop_list.head()->name);
+ goto err;
+ case Alter_drop::FOREIGN_KEY:
+ // Leave the DROP FOREIGN KEY names in the alter_info->drop_list.
+ break;
+ }
+ }
}
if (alter_info->alter_list.elements)
{
@@ -6165,6 +7083,11 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS)) ||
(used_fields & HA_CREATE_USED_PACK_KEYS))
db_create_options&= ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS);
+ if ((create_info->table_options &
+ (HA_OPTION_STATS_PERSISTENT | HA_OPTION_NO_STATS_PERSISTENT)) ||
+ (used_fields & HA_CREATE_USED_STATS_PERSISTENT))
+ db_create_options&= ~(HA_OPTION_STATS_PERSISTENT | HA_OPTION_NO_STATS_PERSISTENT);
+
if (create_info->table_options &
(HA_OPTION_CHECKSUM | HA_OPTION_NO_CHECKSUM))
db_create_options&= ~(HA_OPTION_CHECKSUM | HA_OPTION_NO_CHECKSUM);
@@ -6185,88 +7108,495 @@ err:
}
-/*
- Alter table
+/**
+ Get Create_field object for newly created table by its name
+ in the old version of table.
- SYNOPSIS
- mysql_alter_table()
- thd Thread handle
- new_db If there is a RENAME clause
- new_name If there is a RENAME clause
- create_info Information from the parsing phase about new
- table properties.
- table_list The table to change.
- alter_info Lists of fields, keys to be changed, added
- or dropped.
- order_num How many ORDER BY fields has been specified.
- order List of fields to ORDER BY.
- ignore Whether we have ALTER IGNORE TABLE
- require_online Give an error if we can't do operation online
+ @param alter_info Alter_info describing newly created table.
+ @param old_name Name of field in old table.
- DESCRIPTION
- This is a veery long function and is everything but the kitchen sink :)
- It is used to alter a table and not only by ALTER TABLE but also
- CREATE|DROP INDEX are mapped on this function.
-
- When the ALTER TABLE statement just does a RENAME or ENABLE|DISABLE KEYS,
- or both, then this function short cuts its operation by renaming
- the table and/or enabling/disabling the keys. In this case, the FRM is
- not changed, directly by mysql_alter_table. However, if there is a
- RENAME + change of a field, or an index, the short cut is not used.
- See how `create_list` is used to generate the new FRM regarding the
- structure of the fields. The same is done for the indices of the table.
-
- Important is the fact, that this function tries to do as little work as
- possible, by finding out whether a intermediate table is needed to copy
- data into and when finishing the altering to use it as the original table.
- For this reason the function mysql_compare_tables() is called, which decides
- based on all kind of data how similar are the new and the original
- tables.
+ @returns Pointer to Create_field object, NULL - if field is
+ not present in new version of table.
+*/
- RETURN VALUES
- FALSE OK
- TRUE Error
+static Create_field *get_field_by_old_name(Alter_info *alter_info,
+ const char *old_name)
+{
+ List_iterator_fast<Create_field> new_field_it(alter_info->create_list);
+ Create_field *new_field;
+
+ while ((new_field= new_field_it++))
+ {
+ if (new_field->field &&
+ (my_strcasecmp(system_charset_info,
+ new_field->field->field_name,
+ old_name) == 0))
+ break;
+ }
+ return new_field;
+}
+
+
+/** Type of change to foreign key column, */
+
+enum fk_column_change_type
+{
+ FK_COLUMN_NO_CHANGE, FK_COLUMN_DATA_CHANGE,
+ FK_COLUMN_RENAMED, FK_COLUMN_DROPPED
+};
+
+/**
+ Check that ALTER TABLE's changes on columns of a foreign key are allowed.
+
+ @param[in] thd Thread context.
+ @param[in] alter_info Alter_info describing changes to be done
+ by ALTER TABLE.
+ @param[in] fk_columns List of columns of the foreign key to check.
+ @param[out] bad_column_name Name of field on which ALTER TABLE tries to
+ do prohibited operation.
+
+ @note This function takes into account value of @@foreign_key_checks
+ setting.
+
+ @retval FK_COLUMN_NO_CHANGE No significant changes are to be done on
+ foreign key columns.
+ @retval FK_COLUMN_DATA_CHANGE ALTER TABLE might result in value
+ change in foreign key column (and
+ foreign_key_checks is on).
+ @retval FK_COLUMN_RENAMED Foreign key column is renamed.
+ @retval FK_COLUMN_DROPPED Foreign key column is dropped.
+*/
+
+static enum fk_column_change_type
+fk_check_column_changes(THD *thd, Alter_info *alter_info,
+ List<LEX_STRING> &fk_columns,
+ const char **bad_column_name)
+{
+ List_iterator_fast<LEX_STRING> column_it(fk_columns);
+ LEX_STRING *column;
+
+ *bad_column_name= NULL;
+
+ while ((column= column_it++))
+ {
+ Create_field *new_field= get_field_by_old_name(alter_info, column->str);
+
+ if (new_field)
+ {
+ Field *old_field= new_field->field;
+
+ if (my_strcasecmp(system_charset_info, old_field->field_name,
+ new_field->field_name))
+ {
+ /*
+ Copy algorithm doesn't support proper renaming of columns in
+ the foreign key yet. At the moment we lack API which will tell
+ SE that foreign keys should be updated to use new name of column
+ like it happens in case of in-place algorithm.
+ */
+ *bad_column_name= column->str;
+ return FK_COLUMN_RENAMED;
+ }
+
+ if ((old_field->is_equal(new_field) == IS_EQUAL_NO) ||
+ ((new_field->flags & NOT_NULL_FLAG) &&
+ !(old_field->flags & NOT_NULL_FLAG)))
+ {
+ if (!(thd->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS))
+ {
+ /*
+ Column in a FK has changed significantly. Unless
+ foreign_key_checks are off we prohibit this since this
+ means values in this column might be changed by ALTER
+ and thus referential integrity might be broken,
+ */
+ *bad_column_name= column->str;
+ return FK_COLUMN_DATA_CHANGE;
+ }
+ }
+ }
+ else
+ {
+ /*
+ Column in FK was dropped. Most likely this will break
+ integrity constraints of InnoDB data-dictionary (and thus
+ InnoDB will emit an error), so we prohibit this right away
+ even if foreign_key_checks are off.
+ This also includes a rare case when another field replaces
+ field being dropped since it is easy to break referential
+ integrity in this case.
+ */
+ *bad_column_name= column->str;
+ return FK_COLUMN_DROPPED;
+ }
+ }
+
+ return FK_COLUMN_NO_CHANGE;
+}
+
+
+/**
+ Check if ALTER TABLE we are about to execute using COPY algorithm
+ is not supported as it might break referential integrity.
+
+ @note If foreign_key_checks is disabled (=0), we allow to break
+ referential integrity. But we still disallow some operations
+ like dropping or renaming columns in foreign key since they
+ are likely to break consistency of InnoDB data-dictionary
+ and thus will end-up in error anyway.
+
+ @param[in] thd Thread context.
+ @param[in] table Table to be altered.
+ @param[in] alter_info Lists of fields, keys to be changed, added
+ or dropped.
+ @param[out] alter_ctx ALTER TABLE runtime context.
+ Alter_table_ctx::fk_error_if_delete flag
+ is set if deletion during alter can break
+ foreign key integrity.
+
+ @retval false Success.
+ @retval true Error, ALTER - tries to do change which is not compatible
+ with foreign key definitions on the table.
+*/
+
+static bool fk_prepare_copy_alter_table(THD *thd, TABLE *table,
+ Alter_info *alter_info,
+ Alter_table_ctx *alter_ctx)
+{
+ List <FOREIGN_KEY_INFO> fk_parent_key_list;
+ List <FOREIGN_KEY_INFO> fk_child_key_list;
+ FOREIGN_KEY_INFO *f_key;
+
+ DBUG_ENTER("fk_prepare_copy_alter_table");
+
+ table->file->get_parent_foreign_key_list(thd, &fk_parent_key_list);
+
+ /* OOM when building list. */
+ if (thd->is_error())
+ DBUG_RETURN(true);
+
+ /*
+ Remove from the list all foreign keys in which table participates as
+ parent which are to be dropped by this ALTER TABLE. This is possible
+ when a foreign key has the same table as child and parent.
+ */
+ List_iterator<FOREIGN_KEY_INFO> fk_parent_key_it(fk_parent_key_list);
+
+ while ((f_key= fk_parent_key_it++))
+ {
+ Alter_drop *drop;
+ List_iterator_fast<Alter_drop> drop_it(alter_info->drop_list);
+
+ while ((drop= drop_it++))
+ {
+ /*
+ InnoDB treats foreign key names in case-insensitive fashion.
+ So we do it here too. For database and table name type of
+ comparison used depends on lower-case-table-names setting.
+ For l_c_t_n = 0 we use case-sensitive comparison, for
+ l_c_t_n > 0 modes case-insensitive comparison is used.
+ */
+ if ((drop->type == Alter_drop::FOREIGN_KEY) &&
+ (my_strcasecmp(system_charset_info, f_key->foreign_id->str,
+ drop->name) == 0) &&
+ (my_strcasecmp(table_alias_charset, f_key->foreign_db->str,
+ table->s->db.str) == 0) &&
+ (my_strcasecmp(table_alias_charset, f_key->foreign_table->str,
+ table->s->table_name.str) == 0))
+ fk_parent_key_it.remove();
+ }
+ }
+
+ /*
+ If there are FKs in which this table is parent which were not
+ dropped we need to prevent ALTER deleting rows from the table,
+ as it might break referential integrity. OTOH it is OK to do
+ so if foreign_key_checks are disabled.
+ */
+ if (!fk_parent_key_list.is_empty() &&
+ !(thd->variables.option_bits & OPTION_NO_FOREIGN_KEY_CHECKS))
+ alter_ctx->set_fk_error_if_delete_row(fk_parent_key_list.head());
+
+ fk_parent_key_it.rewind();
+ while ((f_key= fk_parent_key_it++))
+ {
+ enum fk_column_change_type changes;
+ const char *bad_column_name;
+
+ changes= fk_check_column_changes(thd, alter_info,
+ f_key->referenced_fields,
+ &bad_column_name);
+
+ switch(changes)
+ {
+ case FK_COLUMN_NO_CHANGE:
+ /* No significant changes. We can proceed with ALTER! */
+ break;
+ case FK_COLUMN_DATA_CHANGE:
+ {
+ char buff[NAME_LEN*2+2];
+ strxnmov(buff, sizeof(buff)-1, f_key->foreign_db->str, ".",
+ f_key->foreign_table->str, NullS);
+ my_error(ER_FK_COLUMN_CANNOT_CHANGE_CHILD, MYF(0), bad_column_name,
+ f_key->foreign_id->str, buff);
+ DBUG_RETURN(true);
+ }
+ case FK_COLUMN_RENAMED:
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0),
+ "ALGORITHM=COPY",
+ ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME),
+ "ALGORITHM=INPLACE");
+ DBUG_RETURN(true);
+ case FK_COLUMN_DROPPED:
+ {
+ char buff[NAME_LEN*2+2];
+ strxnmov(buff, sizeof(buff)-1, f_key->foreign_db->str, ".",
+ f_key->foreign_table->str, NullS);
+ my_error(ER_FK_COLUMN_CANNOT_DROP_CHILD, MYF(0), bad_column_name,
+ f_key->foreign_id->str, buff);
+ DBUG_RETURN(true);
+ }
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+
+ table->file->get_foreign_key_list(thd, &fk_child_key_list);
+
+ /* OOM when building list. */
+ if (thd->is_error())
+ DBUG_RETURN(true);
+
+ /*
+ Remove from the list all foreign keys which are to be dropped
+ by this ALTER TABLE.
+ */
+ List_iterator<FOREIGN_KEY_INFO> fk_key_it(fk_child_key_list);
+
+ while ((f_key= fk_key_it++))
+ {
+ Alter_drop *drop;
+ List_iterator_fast<Alter_drop> drop_it(alter_info->drop_list);
+
+ while ((drop= drop_it++))
+ {
+ /* Names of foreign keys in InnoDB are case-insensitive. */
+ if ((drop->type == Alter_drop::FOREIGN_KEY) &&
+ (my_strcasecmp(system_charset_info, f_key->foreign_id->str,
+ drop->name) == 0))
+ fk_key_it.remove();
+ }
+ }
+
+ fk_key_it.rewind();
+ while ((f_key= fk_key_it++))
+ {
+ enum fk_column_change_type changes;
+ const char *bad_column_name;
+
+ changes= fk_check_column_changes(thd, alter_info,
+ f_key->foreign_fields,
+ &bad_column_name);
+
+ switch(changes)
+ {
+ case FK_COLUMN_NO_CHANGE:
+ /* No significant changes. We can proceed with ALTER! */
+ break;
+ case FK_COLUMN_DATA_CHANGE:
+ my_error(ER_FK_COLUMN_CANNOT_CHANGE, MYF(0), bad_column_name,
+ f_key->foreign_id->str);
+ DBUG_RETURN(true);
+ case FK_COLUMN_RENAMED:
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0),
+ "ALGORITHM=COPY",
+ ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME),
+ "ALGORITHM=INPLACE");
+ DBUG_RETURN(true);
+ case FK_COLUMN_DROPPED:
+ my_error(ER_FK_COLUMN_CANNOT_DROP, MYF(0), bad_column_name,
+ f_key->foreign_id->str);
+ DBUG_RETURN(true);
+ default:
+ DBUG_ASSERT(0);
+ }
+ }
+
+ DBUG_RETURN(false);
+}
+
+
+/**
+ Rename table and/or turn indexes on/off without touching .FRM
+
+ @param thd Thread handler
+ @param table_list TABLE_LIST for the table to change
+ @param keys_onoff ENABLE or DISABLE KEYS?
+ @param alter_ctx ALTER TABLE runtime context.
+
+ @return Operation status
+ @retval false Success
+ @retval true Failure
+*/
+
+static bool
+simple_rename_or_index_change(THD *thd, TABLE_LIST *table_list,
+ Alter_info::enum_enable_or_disable keys_onoff,
+ Alter_table_ctx *alter_ctx)
+{
+ TABLE *table= table_list->table;
+ MDL_ticket *mdl_ticket= table->mdl_ticket;
+ int error= 0;
+ enum ha_extra_function extra_func= thd->locked_tables_mode
+ ? HA_EXTRA_NOT_USED
+ : HA_EXTRA_FORCE_REOPEN;
+ DBUG_ENTER("simple_rename_or_index_change");
+
+ if (keys_onoff != Alter_info::LEAVE_AS_IS)
+ {
+ if (wait_while_table_is_used(thd, table, extra_func))
+ DBUG_RETURN(true);
+
+ // It's now safe to take the table level lock.
+ if (lock_tables(thd, table_list, alter_ctx->tables_opened, 0))
+ DBUG_RETURN(true);
+
+ if (keys_onoff == Alter_info::ENABLE)
+ {
+ DEBUG_SYNC(thd,"alter_table_enable_indexes");
+ DBUG_EXECUTE_IF("sleep_alter_enable_indexes", my_sleep(6000000););
+ error= table->file->ha_enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
+ }
+ else if (keys_onoff == Alter_info::DISABLE)
+ error=table->file->ha_disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE);
+
+ if (error == HA_ERR_WRONG_COMMAND)
+ {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
+ ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
+ table->file->table_type(),
+ table->s->db.str, table->s->table_name.str);
+ error= 0;
+ }
+ else if (error > 0)
+ {
+ table->file->print_error(error, MYF(0));
+ error= -1;
+ }
+ }
+
+ if (!error && alter_ctx->is_table_renamed())
+ {
+ THD_STAGE_INFO(thd, stage_rename);
+ handlerton *old_db_type= table->s->db_type();
+ /*
+ Then do a 'simple' rename of the table. First we need to close all
+ instances of 'source' table.
+ Note that if wait_while_table_is_used() returns error here (i.e. if
+ this thread was killed) then it must be that previous step of
+ simple rename did nothing and therefore we can safely return
+ without additional clean-up.
+ */
+ if (wait_while_table_is_used(thd, table, extra_func))
+ DBUG_RETURN(true);
+ close_all_tables_for_name(thd, table->s, HA_EXTRA_PREPARE_FOR_RENAME, NULL);
+
+ LEX_STRING old_db_name= { alter_ctx->db, strlen(alter_ctx->db) };
+ LEX_STRING old_table_name=
+ { alter_ctx->table_name, strlen(alter_ctx->table_name) };
+ LEX_STRING new_db_name= { alter_ctx->new_db, strlen(alter_ctx->new_db) };
+ LEX_STRING new_table_name=
+ { alter_ctx->new_alias, strlen(alter_ctx->new_alias) };
+ (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name,
+ &new_db_name, &new_table_name);
+
+ if (mysql_rename_table(old_db_type, alter_ctx->db, alter_ctx->table_name,
+ alter_ctx->new_db, alter_ctx->new_alias, 0))
+ error= -1;
+ else if (Table_triggers_list::change_table_name(thd,
+ alter_ctx->db,
+ alter_ctx->alias,
+ alter_ctx->table_name,
+ alter_ctx->new_db,
+ alter_ctx->new_alias))
+ {
+ (void) mysql_rename_table(old_db_type,
+ alter_ctx->new_db, alter_ctx->new_alias,
+ alter_ctx->db, alter_ctx->table_name, 0);
+ error= -1;
+ }
+ }
+
+ if (!error)
+ {
+ error= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
+ if (!error)
+ my_ok(thd);
+ }
+ table_list->table= NULL; // For query cache
+ query_cache_invalidate3(thd, table_list, 0);
+
+ if ((thd->locked_tables_mode == LTM_LOCK_TABLES ||
+ thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES))
+ {
+ /*
+ Under LOCK TABLES we should adjust meta-data locks before finishing
+ statement. Otherwise we can rely on them being released
+ along with the implicit commit.
+ */
+ if (alter_ctx->is_table_renamed())
+ thd->mdl_context.release_all_locks_for_name(mdl_ticket);
+ else
+ mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
+ }
+ DBUG_RETURN(error != 0);
+}
+
+
+/**
+ Alter table
+
+ @param thd Thread handle
+ @param new_db If there is a RENAME clause
+ @param new_name If there is a RENAME clause
+ @param create_info Information from the parsing phase about new
+ table properties.
+ @param table_list The table to change.
+ @param alter_info Lists of fields, keys to be changed, added
+ or dropped.
+ @param order_num How many ORDER BY fields has been specified.
+ @param order List of fields to ORDER BY.
+ @param ignore Whether we have ALTER IGNORE TABLE
+
+ @retval true Error
+ @retval false Success
+
+ This is a veery long function and is everything but the kitchen sink :)
+ It is used to alter a table and not only by ALTER TABLE but also
+ CREATE|DROP INDEX are mapped on this function.
+
+ When the ALTER TABLE statement just does a RENAME or ENABLE|DISABLE KEYS,
+ or both, then this function short cuts its operation by renaming
+ the table and/or enabling/disabling the keys. In this case, the FRM is
+ not changed, directly by mysql_alter_table. However, if there is a
+ RENAME + change of a field, or an index, the short cut is not used.
+ See how `create_list` is used to generate the new FRM regarding the
+ structure of the fields. The same is done for the indices of the table.
+
+ Altering a table can be done in two ways. The table can be modified
+ directly using an in-place algorithm, or the changes can be done using
+ an intermediate temporary table (copy). In-place is the preferred
+ algorithm as it avoids copying table data. The storage engine
+ selects which algorithm to use in check_if_supported_inplace_alter()
+ based on information about the table changes from fill_alter_inplace_info().
*/
bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
HA_CREATE_INFO *create_info,
TABLE_LIST *table_list,
Alter_info *alter_info,
- uint order_num, ORDER *order, bool ignore,
- bool require_online)
+ uint order_num, ORDER *order, bool ignore)
{
- TABLE *table, *new_table= 0;
- MDL_ticket *mdl_ticket;
- MDL_request target_mdl_request;
- int error= 0, create_table_mode= C_ALTER_TABLE;
- char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN + 1];
- char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias;
- char index_file[FN_REFLEN], data_file[FN_REFLEN];
- char path[FN_REFLEN + 1];
- ha_rows copied,deleted;
- handlerton *old_db_type, *new_db_type, *save_old_db_type;
- enum_alter_table_change_level need_copy_table= ALTER_TABLE_METADATA_ONLY;
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- TABLE *table_for_fast_alter_partition= NULL;
- bool partition_changed= FALSE;
-#endif
- bool need_lock_for_indexes __attribute__((unused)) = TRUE;
- KEY *key_info_buffer;
- uint index_drop_count= 0;
- uint *index_drop_buffer= NULL;
- uint index_add_count= 0;
- handler_add_index *add= NULL;
- bool pending_inplace_add_index= false;
- uint *index_add_buffer= NULL;
- uint candidate_key_count= 0;
- bool no_pk;
- ulong explicit_used_fields= 0;
- enum ha_extra_function extra_func= thd->locked_tables_mode
- ? HA_EXTRA_NOT_USED
- : HA_EXTRA_FORCE_REOPEN;
- LEX_STRING old_db_name= { table_list->db, table_list->db_length };
- LEX_STRING old_table_name= { table_list->table_name,
- table_list->table_name_length };
DBUG_ENTER("mysql_alter_table");
/*
@@ -6275,68 +7605,39 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
it is the case.
TODO: this design is obsolete and will be removed.
*/
- if (table_list && table_list->db && table_list->table_name)
- {
- int table_kind= 0;
+ int table_kind= check_if_log_table(table_list->db_length, table_list->db,
+ table_list->table_name_length,
+ table_list->table_name, false);
- table_kind= check_if_log_table(table_list->db_length, table_list->db,
- table_list->table_name_length,
- table_list->table_name, 0);
-
- if (table_kind)
+ if (table_kind)
+ {
+ /* Disable alter of enabled log tables */
+ if (logger.is_log_table_enabled(table_kind))
{
- /* Disable alter of enabled log tables */
- if (logger.is_log_table_enabled(table_kind))
- {
- my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER");
- DBUG_RETURN(TRUE);
- }
+ my_error(ER_BAD_LOG_STATEMENT, MYF(0), "ALTER");
+ DBUG_RETURN(true);
+ }
- /* Disable alter of log tables to unsupported engine */
- if ((create_info->used_fields & HA_CREATE_USED_ENGINE) &&
- (!create_info->db_type || /* unknown engine */
- !(create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES)))
- {
- my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0),
- hton_name(create_info->db_type)->str);
- DBUG_RETURN(TRUE);
- }
+ /* Disable alter of log tables to unsupported engine */
+ if ((create_info->used_fields & HA_CREATE_USED_ENGINE) &&
+ (!create_info->db_type || /* unknown engine */
+ !(create_info->db_type->flags & HTON_SUPPORT_LOG_TABLES)))
+ {
+ my_error(ER_UNSUPORTED_LOG_ENGINE, MYF(0),
+ hton_name(create_info->db_type)->str);
+ DBUG_RETURN(true);
+ }
#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (alter_info->flags & ALTER_PARTITION)
- {
- my_error(ER_WRONG_USAGE, MYF(0), "PARTITION", "log table");
- DBUG_RETURN(TRUE);
- }
-#endif
+ if (alter_info->flags & Alter_info::ALTER_PARTITION)
+ {
+ my_error(ER_WRONG_USAGE, MYF(0), "PARTITION", "log table");
+ DBUG_RETURN(true);
}
+#endif
}
- /*
- Assign variables table_name, new_name, db, new_db, path,
- to simplify further comparisions: we want to see if it's a RENAME
- later just by comparing the pointers, avoiding the need for strcmp.
- */
THD_STAGE_INFO(thd, stage_init);
- table_name=table_list->table_name;
- alias= (lower_case_table_names == 2) ? table_list->alias : table_name;
- db=table_list->db;
- if (!new_db || !my_strcasecmp(table_alias_charset, new_db, db))
- new_db= db;
- build_table_filename(path, sizeof(path) - 1, db, table_name, "", 0);
-
- mysql_ha_rm_tables(thd, table_list);
-
- /* DISCARD/IMPORT TABLESPACE is always alone in an ALTER TABLE */
- if (alter_info->tablespace_op != NO_TABLESPACE_OP)
- {
- mysql_audit_alter_table(thd, table_list);
-
- /* Conditionally writes to binlog. */
- bool ret= mysql_discard_or_import_tablespace(thd,table_list,
- alter_info->tablespace_op);
- DBUG_RETURN(ret);
- }
/*
Code below can handle only base tables so ensure that we won't open a view.
@@ -6345,20 +7646,21 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
*/
table_list->required_type= FRMTYPE_TABLE;
- Alter_table_prelocking_strategy alter_prelocking_strategy(alter_info);
+ Alter_table_prelocking_strategy alter_prelocking_strategy;
DEBUG_SYNC(thd, "alter_table_before_open_tables");
- error= open_and_lock_tables(thd, table_list, FALSE, 0,
- &alter_prelocking_strategy);
+ uint tables_opened;
+ bool error= open_tables(thd, &table_list, &tables_opened, 0,
+ &alter_prelocking_strategy);
+
+ DEBUG_SYNC(thd, "alter_opened_table");
if (error)
- {
- DBUG_RETURN(TRUE);
- }
+ DBUG_RETURN(true);
- table= table_list->table;
+ TABLE *table= table_list->table;
table->use_all_columns();
- mdl_ticket= table->mdl_ticket;
+ MDL_ticket *mdl_ticket= table->mdl_ticket;
/*
Prohibit changing of the UNION list of a non-temporary MERGE table
@@ -6372,100 +7674,73 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
(table->s->tmp_table == NO_TMP_TABLE))
{
my_error(ER_LOCK_OR_ACTIVE_TRANSACTION, MYF(0));
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
+ Alter_table_ctx alter_ctx(thd, table_list, tables_opened, new_db, new_name);
+
+ MDL_request target_mdl_request;
+
/* Check that we are not trying to rename to an existing table */
- if (new_name)
+ if (alter_ctx.is_table_renamed())
{
- DBUG_PRINT("info", ("new_db.new_name: '%s'.'%s'", new_db, new_name));
- strmov(new_name_buff,new_name);
- strmov(new_alias= new_alias_buff, new_name);
- if (lower_case_table_names)
+ if (table->s->tmp_table != NO_TMP_TABLE)
{
- if (lower_case_table_names != 2)
+ if (find_temporary_table(thd, alter_ctx.new_db, alter_ctx.new_name))
{
- my_casedn_str(files_charset_info, new_name_buff);
- new_alias= new_name; // Create lower case table name
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alter_ctx.new_alias);
+ DBUG_RETURN(true);
}
- my_casedn_str(files_charset_info, new_name);
}
- if (new_db == db &&
- !my_strcasecmp(table_alias_charset, new_name_buff, table_name))
+ else
{
+ MDL_request_list mdl_requests;
+ MDL_request target_db_mdl_request;
+
+ target_mdl_request.init(MDL_key::TABLE,
+ alter_ctx.new_db, alter_ctx.new_name,
+ MDL_EXCLUSIVE, MDL_TRANSACTION);
+ mdl_requests.push_front(&target_mdl_request);
+
/*
- Source and destination table names are equal: make later check
- easier.
+ If we are moving the table to a different database, we also
+ need IX lock on the database name so that the target database
+ is protected by MDL while the table is moved.
*/
- new_alias= new_name= table_name;
- }
- else
- {
- if (table->s->tmp_table != NO_TMP_TABLE)
+ if (alter_ctx.is_database_changed())
{
- if (find_temporary_table(thd,new_db,new_name_buff))
- {
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff);
- DBUG_RETURN(TRUE);
- }
+ target_db_mdl_request.init(MDL_key::SCHEMA, alter_ctx.new_db, "",
+ MDL_INTENTION_EXCLUSIVE,
+ MDL_TRANSACTION);
+ mdl_requests.push_front(&target_db_mdl_request);
}
- else
- {
- MDL_request_list mdl_requests;
- MDL_request target_db_mdl_request;
-
- target_mdl_request.init(MDL_key::TABLE, new_db, new_name,
- MDL_EXCLUSIVE, MDL_TRANSACTION);
- mdl_requests.push_front(&target_mdl_request);
-
- /*
- If we are moving the table to a different database, we also
- need IX lock on the database name so that the target database
- is protected by MDL while the table is moved.
- */
- if (new_db != db)
- {
- target_db_mdl_request.init(MDL_key::SCHEMA, new_db, "",
- MDL_INTENTION_EXCLUSIVE,
- MDL_TRANSACTION);
- mdl_requests.push_front(&target_db_mdl_request);
- }
- /*
- Global intention exclusive lock must have been already acquired when
- table to be altered was open, so there is no need to do it here.
- */
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::GLOBAL,
- "", "",
- MDL_INTENTION_EXCLUSIVE));
+ /*
+ Global intention exclusive lock must have been already acquired when
+ table to be altered was open, so there is no need to do it here.
+ */
+ DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::GLOBAL,
+ "", "",
+ MDL_INTENTION_EXCLUSIVE));
- if (thd->mdl_context.acquire_locks(&mdl_requests,
- thd->variables.lock_wait_timeout))
- DBUG_RETURN(TRUE);
+ if (thd->mdl_context.acquire_locks(&mdl_requests,
+ thd->variables.lock_wait_timeout))
+ DBUG_RETURN(true);
- DEBUG_SYNC(thd, "locked_table_name");
- /*
- Table maybe does not exist, but we got an exclusive lock
- on the name, now we can safely try to find out for sure.
- */
- build_table_filename(new_name_buff, sizeof(new_name_buff) - 1,
- new_db, new_name_buff, reg_ext, 0);
- if (!access(new_name_buff, F_OK))
- {
- /* Table will be closed in do_command() */
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias);
- goto err;
- }
+ DEBUG_SYNC(thd, "locked_table_name");
+ /*
+ Table maybe does not exist, but we got an exclusive lock
+ on the name, now we can safely try to find out for sure.
+ */
+ if (!access(alter_ctx.get_new_filename(), F_OK))
+ {
+ /* Table will be closed in do_command() */
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alter_ctx.new_alias);
+ DBUG_RETURN(true);
}
}
}
- else
- {
- new_alias= (lower_case_table_names == 2) ? alias : table_name;
- new_name= table_name;
- }
- old_db_type= table->s->db_type();
if (!create_info->db_type)
{
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -6483,161 +7758,78 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
else
#endif
- create_info->db_type= old_db_type;
+ create_info->db_type= table->s->db_type();
}
- if (check_engine(thd, new_db, new_name, create_info))
- goto err;
- new_db_type= create_info->db_type;
+ if (check_engine(thd, alter_ctx.new_db, alter_ctx.new_name, create_info))
+ DBUG_RETURN(true);
- if ((new_db_type != old_db_type ||
- alter_info->flags & ALTER_PARTITION) &&
+ if ((create_info->db_type != table->s->db_type() ||
+ alter_info->flags & Alter_info::ALTER_PARTITION) &&
!table->file->can_switch_engines())
{
my_error(ER_ROW_IS_REFERENCED, MYF(0));
- goto err;
+ DBUG_RETURN(true);
}
/*
- If this is an ALTER TABLE and no explicit row type specified reuse
- the table's row type.
- Note: this is the same as if the row type was specified explicitly and
- we must thus set HA_CREATE_USED_ROW_FORMAT!
+ If this is an ALTER TABLE and no explicit row type specified reuse
+ the table's row type.
+ Note : this is the same as if the row type was specified explicitly.
*/
if (create_info->row_type == ROW_TYPE_NOT_USED)
{
/* ALTER TABLE without explicit row type */
create_info->row_type= table->s->row_type;
- /*
- We have to mark the row type as used, as otherwise the engine may
- change the row format in update_create_info().
- */
- create_info->used_fields|= HA_CREATE_USED_ROW_FORMAT;
- explicit_used_fields|= HA_CREATE_USED_ROW_FORMAT;
+ }
+ else
+ {
+ /* ALTER TABLE with specific row type */
+ create_info->used_fields |= HA_CREATE_USED_ROW_FORMAT;
}
DBUG_PRINT("info", ("old type: %s new type: %s",
- ha_resolve_storage_engine_name(old_db_type),
- ha_resolve_storage_engine_name(new_db_type)));
- if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED))
+ ha_resolve_storage_engine_name(table->s->db_type()),
+ ha_resolve_storage_engine_name(create_info->db_type)));
+ if (ha_check_storage_engine_flag(table->s->db_type(), HTON_ALTER_NOT_SUPPORTED))
{
DBUG_PRINT("info", ("doesn't support alter"));
- my_error(ER_ILLEGAL_HA, MYF(0), hton_name(old_db_type)->str,
- db, table_name);
- goto err;
+ my_error(ER_ILLEGAL_HA, MYF(0), hton_name(table->s->db_type())->str,
+ alter_ctx.db, alter_ctx.table_name);
+ DBUG_RETURN(true);
}
- if (ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED))
+ if (ha_check_storage_engine_flag(create_info->db_type,
+ HTON_ALTER_NOT_SUPPORTED))
{
DBUG_PRINT("info", ("doesn't support alter"));
- my_error(ER_ILLEGAL_HA, MYF(0), hton_name(new_db_type)->str,
- new_db, new_name);
- goto err;
+ my_error(ER_ILLEGAL_HA, MYF(0), hton_name(create_info->db_type)->str,
+ alter_ctx.new_db, alter_ctx.new_name);
+ DBUG_RETURN(true);
}
if (table->s->tmp_table == NO_TMP_TABLE)
mysql_audit_alter_table(thd, table_list);
-
+
THD_STAGE_INFO(thd, stage_setup);
- if (!(alter_info->flags & ~(ALTER_RENAME | ALTER_KEYS_ONOFF)) &&
+ if (!(alter_info->flags & ~(Alter_info::ALTER_RENAME |
+ Alter_info::ALTER_KEYS_ONOFF)) &&
+ alter_info->requested_algorithm !=
+ Alter_info::ALTER_TABLE_ALGORITHM_COPY &&
!table->s->tmp_table) // no need to touch frm
{
- if (alter_info->keys_onoff != LEAVE_AS_IS)
- {
- if (wait_while_table_is_used(thd, table, extra_func,
- TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
- goto err;
- error= alter_table_manage_keys(table, 0, alter_info->keys_onoff);
- table->s->allow_access_to_protected_table();
- }
-
- if (!error && (new_name != table_name || new_db != db))
- {
- THD_STAGE_INFO(thd, stage_rename);
- /*
- Then do a 'simple' rename of the table. First we need to close all
- instances of 'source' table.
- Note that if wait_while_table_is_used() returns error here (i.e. if
- this thread was killed) then it must be that previous step of
- simple rename did nothing and therefore we can safely return
- without additional clean-up.
- */
- if (wait_while_table_is_used(thd, table, extra_func,
- TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
- goto err;
- close_all_tables_for_name(thd, table->s, HA_EXTRA_PREPARE_FOR_RENAME);
- /*
- Then, we want check once again that target table does not exist.
- Actually the order of these two steps does not matter since
- earlier we took exclusive metadata lock on the target table, so
- we do them in this particular order only to be consistent with 5.0,
- in which we don't take this lock and where this order really matters.
- TODO: Investigate if we need this access() check at all.
- */
- if (!access(new_name_buff,F_OK))
- {
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name);
- error= -1;
- }
- else
- {
- *fn_ext(new_name)=0;
-
- LEX_STRING new_db_name= { new_db, strlen(new_db) };
- LEX_STRING new_table_name= { new_alias, strlen(new_alias) };
- (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name,
- &new_db_name, &new_table_name);
-
- if (mysql_rename_table(old_db_type,db,table_name,new_db,new_alias, 0))
- error= -1;
- else if (Table_triggers_list::change_table_name(thd, db,
- alias, table_name,
- new_db, new_alias))
- {
- (void) mysql_rename_table(old_db_type, new_db, new_alias, db,
- table_name, 0);
- error= -1;
- }
- }
- }
-
- if (error == HA_ERR_WRONG_COMMAND)
+ // This requires X-lock, no other lock levels supported.
+ if (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_DEFAULT &&
+ alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE)
{
- error= 0;
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
- ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA),
- table->file->table_type(),
- table->s->db.str, table->s->table_name.str);
- }
-
- if (!error)
- {
- error= write_bin_log(thd, TRUE, thd->query(), thd->query_length());
- if (!error)
- my_ok(thd);
- }
- else if (error > 0)
- {
- table->file->print_error(error, MYF(0));
- error= -1;
- }
- table_list->table= NULL; // For query cache
- query_cache_invalidate3(thd, table_list, 0);
-
- if ((thd->locked_tables_mode == LTM_LOCK_TABLES ||
- thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES))
- {
- /*
- Under LOCK TABLES we should adjust meta-data locks before finishing
- statement. Otherwise we can rely on them being released
- along with the implicit commit.
- */
- if (new_name != table_name || new_db != db)
- thd->mdl_context.release_all_locks_for_name(mdl_ticket);
- else
- mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0),
+ "LOCK=NONE/SHARED", "LOCK=EXCLUSIVE");
+ DBUG_RETURN(true);
}
- DBUG_RETURN(error);
+ bool res= simple_rename_or_index_change(thd, table_list,
+ alter_info->keys_onoff,
+ &alter_ctx);
+ DBUG_RETURN(res);
}
handle_if_exists_options(thd, table, alter_info);
@@ -6647,297 +7839,127 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
/* the IF (NOT) EXISTS options. */
if (alter_info->flags == 0)
{
- copied= deleted= 0;
- goto end_temporary;
+ my_snprintf(alter_ctx.tmp_name, sizeof(alter_ctx.tmp_name),
+ ER(ER_INSERT_INFO), 0L, 0L, 0L);
+ my_ok(thd, 0L, 0L, alter_ctx.tmp_name);
+ DBUG_RETURN(false);
}
/* We have to do full alter table. */
#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (prep_alter_part_table(thd, table, alter_info, create_info, old_db_type,
- &partition_changed,
- db, table_name, path,
- &table_for_fast_alter_partition))
- goto err;
+ bool partition_changed= false;
+ bool fast_alter_partition= false;
+ {
+ if (prep_alter_part_table(thd, table, alter_info, create_info,
+ &alter_ctx, &partition_changed,
+ &fast_alter_partition))
+ {
+ DBUG_RETURN(true);
+ }
+ }
#endif
- /*
- If the old table had partitions and we are doing ALTER TABLE ...
- engine= <new_engine>, the new table must preserve the original
- partitioning. That means that the new engine is still the
- partitioning engine, not the engine specified in the parser.
- This is discovered in prep_alter_part_table, which in such case
- updates create_info->db_type.
- Now we need to update the stack copy of create_info->db_type,
- as otherwise we won't be able to correctly move the files of the
- temporary table to the result table files.
- */
- new_db_type= create_info->db_type;
-
- if (is_index_maintenance_unique (table, alter_info))
- need_copy_table= ALTER_TABLE_DATA_CHANGED;
-
- if (mysql_prepare_alter_table(thd, table, create_info, alter_info))
- goto err;
-
- /* Remove markers set for update_create_info */
- create_info->used_fields&= ~explicit_used_fields;
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
- need_copy_table= alter_info->change_level;
+ if (mysql_prepare_alter_table(thd, table, create_info, alter_info,
+ &alter_ctx))
+ {
+ DBUG_RETURN(true);
+ }
- set_table_default_charset(thd, create_info, db);
+ set_table_default_charset(thd, create_info, alter_ctx.db);
promote_first_timestamp_column(&alter_info->create_list);
- if (thd->variables.old_alter_table
- || (table->s->db_type() != create_info->db_type)
#ifdef WITH_PARTITION_STORAGE_ENGINE
- || partition_changed
-#endif
- )
- need_copy_table= ALTER_TABLE_DATA_CHANGED;
- else
+ if (fast_alter_partition)
{
- enum_alter_table_change_level need_copy_table_res;
- /* Check how much the tables differ. */
- if (mysql_compare_tables(table, alter_info,
- create_info, order_num,
- &need_copy_table_res,
- &key_info_buffer,
- &index_drop_buffer, &index_drop_count,
- &index_add_buffer, &index_add_count,
- &candidate_key_count))
- goto err;
-
- DBUG_EXECUTE_IF("alter_table_only_metadata_change", {
- if (need_copy_table_res != ALTER_TABLE_METADATA_ONLY)
- goto err; });
- DBUG_EXECUTE_IF("alter_table_only_index_change", {
- if (need_copy_table_res != ALTER_TABLE_INDEX_CHANGED)
- goto err; });
-
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
- need_copy_table= need_copy_table_res;
- }
-
- /*
- If there are index changes only, try to do them in-place. "Index
- changes only" means also that the handler for the table does not
- change. The table is open and locked. The handler can be accessed.
- */
- if (need_copy_table == ALTER_TABLE_INDEX_CHANGED)
- {
- int pk_changed= 0;
- ulong alter_flags= 0;
- ulong needed_inplace_with_read_flags= 0;
- ulong needed_inplace_flags= 0;
- KEY *key;
- uint *idx_p;
- uint *idx_end_p;
-
- alter_flags= table->file->alter_table_flags(alter_info->flags);
- DBUG_PRINT("info", ("alter_flags: %lu", alter_flags));
- /* Check dropped indexes. */
- for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
- idx_p < idx_end_p;
- idx_p++)
- {
- key= table->key_info + *idx_p;
- DBUG_PRINT("info", ("index dropped: '%s'", key->name));
- if (key->flags & HA_NOSAME)
- {
- /*
- Unique key. Check for "PRIMARY".
- or if dropping last unique key
- */
- if ((uint) (key - table->key_info) == table->s->primary_key)
- {
- DBUG_PRINT("info", ("Dropping primary key"));
- /* Primary key. */
- needed_inplace_with_read_flags|= HA_INPLACE_DROP_PK_INDEX_NO_WRITE;
- needed_inplace_flags|= HA_INPLACE_DROP_PK_INDEX_NO_READ_WRITE;
- pk_changed++;
- candidate_key_count--;
- }
- else
- {
- KEY_PART_INFO *part_end= key->key_part + key->key_parts;
- bool is_candidate_key= true;
-
- /* Non-primary unique key. */
- needed_inplace_with_read_flags|=
- HA_INPLACE_DROP_UNIQUE_INDEX_NO_WRITE;
- needed_inplace_flags|= HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE;
-
- /*
- Check if all fields in key are declared
- NOT NULL and adjust candidate_key_count
- */
- for (KEY_PART_INFO *key_part= key->key_part;
- key_part < part_end;
- key_part++)
- is_candidate_key=
- (is_candidate_key &&
- (! table->field[key_part->fieldnr-1]->maybe_null()));
- if (is_candidate_key)
- candidate_key_count--;
- }
- }
- else
- {
- /* Non-unique key. */
- needed_inplace_with_read_flags|= HA_INPLACE_DROP_INDEX_NO_WRITE;
- needed_inplace_flags|= HA_INPLACE_DROP_INDEX_NO_READ_WRITE;
- }
- }
- no_pk= ((table->s->primary_key == MAX_KEY) ||
- (needed_inplace_with_read_flags &
- HA_INPLACE_DROP_PK_INDEX_NO_WRITE));
- /* Check added indexes. */
- for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count;
- idx_p < idx_end_p;
- idx_p++)
- {
- key= key_info_buffer + *idx_p;
- DBUG_PRINT("info", ("index added: '%s'", key->name));
- if (key->flags & HA_NOSAME)
- {
- /* Unique key */
-
- KEY_PART_INFO *part_end= key->key_part + key->key_parts;
- bool is_candidate_key= true;
-
- /*
- Check if all fields in key are declared
- NOT NULL
- */
- for (KEY_PART_INFO *key_part= key->key_part;
- key_part < part_end;
- key_part++)
- is_candidate_key=
- (is_candidate_key &&
- (! table->field[key_part->fieldnr]->maybe_null()));
-
- /*
- Check for "PRIMARY"
- or if adding first unique key
- defined on non-nullable fields
- */
-
- if ((!my_strcasecmp(system_charset_info,
- key->name, primary_key_name)) ||
- (no_pk && candidate_key_count == 0 && is_candidate_key))
- {
- DBUG_PRINT("info", ("Adding primary key"));
- /* Primary key. */
- needed_inplace_with_read_flags|= HA_INPLACE_ADD_PK_INDEX_NO_WRITE;
- needed_inplace_flags|= HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE;
- pk_changed++;
- no_pk= false;
- }
- else
- {
- /* Non-primary unique key. */
- needed_inplace_with_read_flags|= HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE;
- needed_inplace_flags|= HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE;
- if (ignore)
- {
- /*
- If ignore is used, we have to remove all duplicate rows,
- which require a full table copy.
- */
- need_copy_table= ALTER_TABLE_DATA_CHANGED;
- pk_changed= 2; // Don't change need_copy_table
- break;
- }
- }
- }
- else
- {
- /* Non-unique key. */
- needed_inplace_with_read_flags|= HA_INPLACE_ADD_INDEX_NO_WRITE;
- needed_inplace_flags|= HA_INPLACE_ADD_INDEX_NO_READ_WRITE;
- }
+ /*
+ ALGORITHM and LOCK clauses are generally not allowed by the
+ parser for operations related to partitioning.
+ The exceptions are ALTER_PARTITION and ALTER_REMOVE_PARTITIONING.
+ For consistency, we report ER_ALTER_OPERATION_NOT_SUPPORTED here.
+ */
+ if (alter_info->requested_lock !=
+ Alter_info::ALTER_TABLE_LOCK_DEFAULT)
+ {
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0),
+ "LOCK=NONE/SHARED/EXCLUSIVE",
+ ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION),
+ "LOCK=DEFAULT");
+ DBUG_RETURN(true);
}
-
- if ((candidate_key_count > 0) &&
- (needed_inplace_with_read_flags & HA_INPLACE_DROP_PK_INDEX_NO_WRITE))
+ else if (alter_info->requested_algorithm !=
+ Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT)
{
- /*
- Dropped primary key when there is some other unique
- not null key that should be converted to primary key
- */
- needed_inplace_with_read_flags|= HA_INPLACE_ADD_PK_INDEX_NO_WRITE;
- needed_inplace_flags|= HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE;
- pk_changed= 2;
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0),
+ "ALGORITHM=COPY/INPLACE",
+ ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION),
+ "ALGORITHM=DEFAULT");
+ DBUG_RETURN(true);
}
- DBUG_PRINT("info",
- ("needed_inplace_with_read_flags: 0x%lx, needed_inplace_flags: 0x%lx",
- needed_inplace_with_read_flags, needed_inplace_flags));
/*
- In-place add/drop index is possible only if
- the primary key is not added and dropped in the same statement.
- Otherwise we have to recreate the table.
- need_copy_table is no-zero at this place.
-
- Also, in-place is not possible if we add a primary key
- and drop another key in the same statement. If the drop fails,
- we will not be able to revert adding of primary key.
+ Upgrade from MDL_SHARED_UPGRADABLE to MDL_SHARED_NO_WRITE.
+ Afterwards it's safe to take the table level lock.
*/
- if ( pk_changed < 2 )
+ if (thd->mdl_context.upgrade_shared_lock(mdl_ticket, MDL_SHARED_NO_WRITE,
+ thd->variables.lock_wait_timeout)
+ || lock_tables(thd, table_list, alter_ctx.tables_opened, 0))
{
- if ((needed_inplace_with_read_flags & HA_INPLACE_ADD_PK_INDEX_NO_WRITE) &&
- index_drop_count > 0)
- {
- /*
- Do copy, not in-place ALTER.
- Avoid setting ALTER_TABLE_METADATA_ONLY.
- */
- }
- else if ((alter_flags & needed_inplace_with_read_flags) ==
- needed_inplace_with_read_flags)
- {
- /* All required in-place flags to allow concurrent reads are present. */
- need_copy_table= ALTER_TABLE_METADATA_ONLY;
- need_lock_for_indexes= FALSE;
- }
- else if ((alter_flags & needed_inplace_flags) == needed_inplace_flags)
- {
- /* All required in-place flags are present. */
- need_copy_table= ALTER_TABLE_METADATA_ONLY;
- }
+ DBUG_RETURN(true);
}
- DBUG_PRINT("info", ("need_copy_table: %u need_lock: %d",
- need_copy_table, need_lock_for_indexes));
- }
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
- {
- char frm_name[FN_REFLEN+1];
- strxnmov(frm_name, sizeof(frm_name), path, reg_ext, NullS);
- /*
- C_ALTER_TABLE_FRM_ONLY can only be used if old frm exists.
- discovering frm-less engines cannot enjoy this optimization.
- */
- if (!my_access(frm_name, F_OK))
- create_table_mode= C_ALTER_TABLE_FRM_ONLY;
+ // In-place execution of ALTER TABLE for partitioning.
+ DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info,
+ create_info, table_list,
+ alter_ctx.db,
+ alter_ctx.table_name));
}
+#endif
+ /*
+ Use copy algorithm if:
+ - old_alter_table system variable is set without in-place requested using
+ the ALGORITHM clause.
+ - Or if in-place is impossible for given operation.
+ - Changes to partitioning which were not handled by fast_alter_part_table()
+ needs to be handled using table copying algorithm unless the engine
+ supports auto-partitioning as such engines can do some changes
+ using in-place API.
+ */
+ if ((thd->variables.old_alter_table &&
+ alter_info->requested_algorithm !=
+ Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)
+ || is_inplace_alter_impossible(table, create_info, alter_info)
#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (table_for_fast_alter_partition)
+ || (partition_changed &&
+ !(table->s->db_type()->partition_flags() & HA_USE_AUTO_PARTITION))
+#endif
+ )
{
- DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info,
- create_info, table_list,
- db, table_name,
- table_for_fast_alter_partition));
+ if (alter_info->requested_algorithm ==
+ Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)
+ {
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED, MYF(0),
+ "ALGORITHM=INPLACE", "ALGORITHM=COPY");
+ DBUG_RETURN(true);
+ }
+ alter_info->requested_algorithm= Alter_info::ALTER_TABLE_ALGORITHM_COPY;
}
-#endif
- my_snprintf(tmp_name, sizeof(tmp_name), "%s-%lx_%lx", tmp_file_prefix,
- current_pid, thd->thread_id);
- /* Safety fix for innodb */
- if (lower_case_table_names)
- my_casedn_str(files_charset_info, tmp_name);
+ /*
+ If the old table had partitions and we are doing ALTER TABLE ...
+ engine= <new_engine>, the new table must preserve the original
+ partitioning. This means that the new engine is still the
+ partitioning engine, not the engine specified in the parser.
+ This is discovered in prep_alter_part_table, which in such case
+ updates create_info->db_type.
+ It's therefore important that the assignment below is done
+ after prep_alter_part_table.
+ */
+ handlerton *new_db_type= create_info->db_type;
+ handlerton *old_db_type= table->s->db_type();
+ TABLE *new_table= NULL;
+ ha_rows copied=0,deleted=0;
/*
Handling of symlinked tables:
@@ -6963,292 +7985,346 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
Copy data.
Remove old table and symlinks.
*/
- if (!strcmp(db, new_db)) // Ignore symlink if db changed
+ char index_file[FN_REFLEN], data_file[FN_REFLEN];
+
+ if (!alter_ctx.is_database_changed())
{
if (create_info->index_file_name)
{
/* Fix index_file_name to have 'tmp_name' as basename */
- strmov(index_file, tmp_name);
+ strmov(index_file, alter_ctx.tmp_name);
create_info->index_file_name=fn_same(index_file,
- create_info->index_file_name,
- 1);
+ create_info->index_file_name,
+ 1);
}
if (create_info->data_file_name)
{
/* Fix data_file_name to have 'tmp_name' as basename */
- strmov(data_file, tmp_name);
+ strmov(data_file, alter_ctx.tmp_name);
create_info->data_file_name=fn_same(data_file,
- create_info->data_file_name,
- 1);
+ create_info->data_file_name,
+ 1);
}
}
else
+ {
+ /* Ignore symlink if db is changed. */
create_info->data_file_name=create_info->index_file_name=0;
+ }
DEBUG_SYNC(thd, "alter_table_before_create_table_no_lock");
+ /* We can abort alter table for any table type */
+ thd->abort_on_warning= !ignore && thd->is_strict_mode();
+
/*
- Create a table with a temporary name.
- With C_ALTER_TABLE_FRM_ONLY this creates a .frm file only and
- we keep the original row format.
+ Create .FRM for new version of table with a temporary name.
We don't log the statement, it will be logged later.
+
+ Keep information about keys in newly created table as it
+ will be used later to construct Alter_inplace_info object
+ and by fill_alter_inplace_info() call.
*/
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
- {
- DBUG_ASSERT(create_table_mode == C_ALTER_TABLE_FRM_ONLY);
- /* Ensure we keep the original table format */
- create_info->table_options= ((create_info->table_options &
- ~HA_OPTION_PACK_RECORD) |
- (table->s->db_create_options &
- HA_OPTION_PACK_RECORD));
- }
+ KEY *key_info;
+ uint key_count;
+ /*
+ Remember if the new definition has new VARCHAR column;
+ create_info->varchar will be reset in create_table_impl()/
+ mysql_prepare_create_table().
+ */
+ bool varchar= create_info->varchar;
+ LEX_CUSTRING frm= {0,0};
+
tmp_disable_binlog(thd);
create_info->options|=HA_CREATE_TMP_ALTER;
- error= mysql_create_table_no_lock(thd, new_db, tmp_name, create_info,
- alter_info, NULL, create_table_mode);
+ error= create_table_impl(thd, alter_ctx.new_db, alter_ctx.tmp_name,
+ alter_ctx.get_tmp_path(),
+ create_info, alter_info,
+ C_ALTER_TABLE_FRM_ONLY, NULL,
+ &key_info, &key_count, &frm);
reenable_binlog(thd);
+ thd->abort_on_warning= false;
if (error)
- goto err;
+ {
+ my_free(const_cast<uchar*>(frm.str));
+ DBUG_RETURN(true);
+ }
+
+ /* Remember that we have not created table in storage engine yet. */
+ bool no_ha_table= true;
- /* Open the table if we need to copy the data. */
- DBUG_PRINT("info", ("need_copy_table: %u", need_copy_table));
- if (need_copy_table != ALTER_TABLE_METADATA_ONLY)
+ if (alter_info->requested_algorithm != Alter_info::ALTER_TABLE_ALGORITHM_COPY)
{
- if (table->s->tmp_table)
+ Alter_inplace_info ha_alter_info(create_info, alter_info,
+ key_info, key_count,
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ thd->work_part_info,
+#else
+ NULL,
+#endif
+ ignore);
+ TABLE *altered_table= NULL;
+ bool use_inplace= true;
+
+ /* Fill the Alter_inplace_info structure. */
+ if (fill_alter_inplace_info(thd, table, varchar, &ha_alter_info))
+ goto err_new_table_cleanup;
+
+ if (ha_alter_info.handler_flags == 0)
{
- Open_table_context ot_ctx(thd, (MYSQL_OPEN_IGNORE_FLUSH |
- MYSQL_OPEN_FOR_REPAIR |
- MYSQL_LOCK_IGNORE_TIMEOUT));
- TABLE_LIST tbl;
- bzero((void*) &tbl, sizeof(tbl));
- tbl.db= new_db;
- tbl.table_name= tbl.alias= tmp_name;
- /* Table is in thd->temporary_tables */
- (void) open_table(thd, &tbl, thd->mem_root, &ot_ctx);
- new_table= tbl.table;
+ /*
+ No-op ALTER, no need to call handler API functions.
+
+ If this code path is entered for an ALTER statement that
+ should not be a real no-op, new handler flags should be added
+ and fill_alter_inplace_info() adjusted.
+
+ Note that we can end up here if an ALTER statement has clauses
+ that cancel each other out (e.g. ADD/DROP identically index).
+
+ Also note that we ignore the LOCK clause here.
+
+ TODO don't create the frm in the first place
+ */
+ deletefrm(alter_ctx.get_tmp_path());
+ my_free(const_cast<uchar*>(frm.str));
+ goto end_inplace;
+ }
+
+ // We assume that the table is non-temporary.
+ DBUG_ASSERT(!table->s->tmp_table);
+
+ if (!(altered_table= open_table_uncached(thd, new_db_type,
+ alter_ctx.get_tmp_path(),
+ alter_ctx.new_db,
+ alter_ctx.tmp_name,
+ true, false)))
+ goto err_new_table_cleanup;
+
+ /* Set markers for fields in TABLE object for altered table. */
+ update_altered_table(ha_alter_info, altered_table);
+
+ /*
+ Mark all columns in 'altered_table' as used to allow usage
+ of its record[0] buffer and Field objects during in-place
+ ALTER TABLE.
+ */
+ altered_table->column_bitmaps_set_no_signal(&altered_table->s->all_set,
+ &altered_table->s->all_set);
+
+ // Ask storage engine whether to use copy or in-place
+ enum_alter_inplace_result inplace_supported=
+ table->file->check_if_supported_inplace_alter(altered_table,
+ &ha_alter_info);
+
+ switch (inplace_supported) {
+ case HA_ALTER_INPLACE_EXCLUSIVE_LOCK:
+ // If SHARED lock and no particular algorithm was requested, use COPY.
+ if (alter_info->requested_lock ==
+ Alter_info::ALTER_TABLE_LOCK_SHARED &&
+ alter_info->requested_algorithm ==
+ Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT)
+ {
+ use_inplace= false;
+ }
+ // Otherwise, if weaker lock was requested, report errror.
+ else if (alter_info->requested_lock ==
+ Alter_info::ALTER_TABLE_LOCK_NONE ||
+ alter_info->requested_lock ==
+ Alter_info::ALTER_TABLE_LOCK_SHARED)
+ {
+ ha_alter_info.report_unsupported_error("LOCK=NONE/SHARED",
+ "LOCK=EXCLUSIVE");
+ close_temporary_table(thd, altered_table, true, false);
+ goto err_new_table_cleanup;
+ }
+ break;
+ case HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE:
+ case HA_ALTER_INPLACE_SHARED_LOCK:
+ // If weaker lock was requested, report errror.
+ if (alter_info->requested_lock ==
+ Alter_info::ALTER_TABLE_LOCK_NONE)
+ {
+ ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED");
+ close_temporary_table(thd, altered_table, true, false);
+ goto err_new_table_cleanup;
+ }
+ break;
+ case HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE:
+ case HA_ALTER_INPLACE_NO_LOCK:
+ break;
+ case HA_ALTER_INPLACE_NOT_SUPPORTED:
+ // If INPLACE was requested, report error.
+ if (alter_info->requested_algorithm ==
+ Alter_info::ALTER_TABLE_ALGORITHM_INPLACE)
+ {
+ ha_alter_info.report_unsupported_error("ALGORITHM=INPLACE",
+ "ALGORITHM=COPY");
+ close_temporary_table(thd, altered_table, true, false);
+ goto err_new_table_cleanup;
+ }
+ // COPY with LOCK=NONE is not supported, no point in trying.
+ if (alter_info->requested_lock ==
+ Alter_info::ALTER_TABLE_LOCK_NONE)
+ {
+ ha_alter_info.report_unsupported_error("LOCK=NONE", "LOCK=SHARED");
+ close_temporary_table(thd, altered_table, true, false);
+ goto err_new_table_cleanup;
+ }
+ // Otherwise use COPY
+ use_inplace= false;
+ break;
+ case HA_ALTER_ERROR:
+ default:
+ close_temporary_table(thd, altered_table, true, false);
+ goto err_new_table_cleanup;
+ }
+
+ if (use_inplace)
+ {
+ my_free(const_cast<uchar*>(frm.str));
+ if (mysql_inplace_alter_table(thd, table_list, table,
+ altered_table,
+ &ha_alter_info,
+ inplace_supported, &target_mdl_request,
+ &alter_ctx))
+ {
+ DBUG_RETURN(true);
+ }
+
+ goto end_inplace;
}
else
{
- char path[FN_REFLEN + 1];
- /* table is a normal table: Create temporary table in same directory */
- build_table_filename(path, sizeof(path) - 1, new_db, tmp_name, "",
- FN_IS_TMP);
- /* Open our intermediate table. */
- new_table= open_table_uncached(thd, new_db_type, path,
- new_db, tmp_name, TRUE);
+ close_temporary_table(thd, altered_table, true, false);
+ }
+ }
+
+ /* ALTER TABLE using copy algorithm. */
+
+ /* Check if ALTER TABLE is compatible with foreign key definitions. */
+ if (fk_prepare_copy_alter_table(thd, table, alter_info, &alter_ctx))
+ goto err_new_table_cleanup;
+
+ if (!table->s->tmp_table)
+ {
+ // COPY algorithm doesn't work with concurrent writes.
+ if (alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_NONE)
+ {
+ my_error(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON, MYF(0),
+ "LOCK=NONE",
+ ER(ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY),
+ "LOCK=SHARED");
+ goto err_new_table_cleanup;
}
- if (!new_table)
+
+ // If EXCLUSIVE lock is requested, upgrade already.
+ if (alter_info->requested_lock == Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE &&
+ wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
goto err_new_table_cleanup;
+
/*
- Note: In case of MERGE table, we do not attach children. We do not
- copy data for MERGE tables. Only the children have data.
+ Otherwise upgrade to SHARED_NO_WRITE.
+ Note that under LOCK TABLES, we will already have SHARED_NO_READ_WRITE.
*/
+ if (alter_info->requested_lock != Alter_info::ALTER_TABLE_LOCK_EXCLUSIVE &&
+ thd->mdl_context.upgrade_shared_lock(mdl_ticket, MDL_SHARED_NO_WRITE,
+ thd->variables.lock_wait_timeout))
+ goto err_new_table_cleanup;
+
+ DEBUG_SYNC(thd, "alter_table_copy_after_lock_upgrade");
}
- /* Check if we can do the ALTER TABLE as online */
- if (require_online)
+ // It's now safe to take the table level lock.
+ if (lock_tables(thd, table_list, alter_ctx.tables_opened, 0))
+ goto err_new_table_cleanup;
+
{
- if (index_add_count || index_drop_count ||
- (new_table &&
- !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER)))
- {
- my_error(ER_CANT_DO_ONLINE, MYF(0), "ALTER");
+ if (ha_create_table(thd, alter_ctx.get_tmp_path(),
+ alter_ctx.new_db, alter_ctx.tmp_name,
+ create_info, &frm))
goto err_new_table_cleanup;
+
+ /* Mark that we have created table in storage engine. */
+ no_ha_table= false;
+
+ if (create_info->tmp_table())
+ {
+ if (!open_table_uncached(thd, new_db_type,
+ alter_ctx.get_tmp_path(),
+ alter_ctx.new_db, alter_ctx.tmp_name,
+ true, true))
+ goto err_new_table_cleanup;
}
}
+
+ /* Open the table since we need to copy the data. */
+ if (table->s->tmp_table != NO_TMP_TABLE)
+ {
+ TABLE_LIST tbl;
+ tbl.init_one_table(alter_ctx.new_db, strlen(alter_ctx.new_db),
+ alter_ctx.tmp_name, strlen(alter_ctx.tmp_name),
+ alter_ctx.tmp_name, TL_READ_NO_INSERT);
+ /* Table is in thd->temporary_tables */
+ (void) open_temporary_table(thd, &tbl);
+ new_table= tbl.table;
+ }
+ else
+ {
+ /* table is a normal table: Create temporary table in same directory */
+ /* Open our intermediate table. */
+ new_table= open_table_uncached(thd, new_db_type, alter_ctx.get_tmp_path(),
+ alter_ctx.new_db, alter_ctx.tmp_name,
+ true, true);
+ }
+ if (!new_table)
+ goto err_new_table_cleanup;
+ /*
+ Note: In case of MERGE table, we do not attach children. We do not
+ copy data for MERGE tables. Only the children have data.
+ */
+
/* Copy the data if necessary. */
thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields
thd->cuted_fields=0L;
- copied=deleted=0;
/*
We do not copy data for MERGE tables. Only the children have data.
MERGE tables have HA_NO_COPY_ON_ALTER set.
*/
- if (new_table && !(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))
+ if (!(new_table->file->ha_table_flags() & HA_NO_COPY_ON_ALTER))
{
new_table->next_number_field=new_table->found_next_number_field;
+ THD_STAGE_INFO(thd, stage_copy_to_tmp_table);
DBUG_EXECUTE_IF("abort_copy_table", {
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
goto err_new_table_cleanup;
});
- error= copy_data_between_tables(thd, table, new_table,
- alter_info->create_list, ignore,
- order_num, order, &copied, &deleted,
- alter_info->keys_onoff,
- alter_info->error_if_not_empty);
+ if (copy_data_between_tables(thd, table, new_table,
+ alter_info->create_list, ignore,
+ order_num, order, &copied, &deleted,
+ alter_info->keys_onoff,
+ &alter_ctx))
+ goto err_new_table_cleanup;
}
else
{
- /*
- Ensure that we will upgrade the metadata lock if
- handler::enable/disable_indexes() will be called.
- */
- if (alter_info->keys_onoff != LEAVE_AS_IS ||
- table->file->indexes_are_disabled())
- need_lock_for_indexes= true;
- if (!table->s->tmp_table && need_lock_for_indexes &&
- wait_while_table_is_used(thd, table, extra_func,
- TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
+ /* Should be MERGE only */
+ DBUG_ASSERT(new_table->file->ht->db_type == DB_TYPE_MRG_MYISAM);
+ if (!table->s->tmp_table &&
+ wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN))
goto err_new_table_cleanup;
THD_STAGE_INFO(thd, stage_manage_keys);
DEBUG_SYNC(thd, "alter_table_manage_keys");
alter_table_manage_keys(table, table->file->indexes_are_disabled(),
alter_info->keys_onoff);
- error= trans_commit_stmt(thd);
- if (trans_commit_implicit(thd))
- error= 1;
- /*
- If the table was locked, allow one to still run SHOW commands against it
- */
- if (table->s->protected_against_usage())
- table->s->allow_access_to_protected_table();
- }
- thd->count_cuted_fields= CHECK_FIELD_IGNORE;
-
- if (error)
- goto err_new_table_cleanup;
-
- /* If we did not need to copy, we might still need to add/drop indexes. */
- if (! new_table)
- {
- uint *key_numbers;
- uint *keyno_p;
- KEY *key_info;
- KEY *key;
- uint *idx_p;
- uint *idx_end_p;
- KEY_PART_INFO *key_part;
- KEY_PART_INFO *part_end;
- DBUG_PRINT("info", ("No new_table, checking add/drop index"));
-
- table->file->ha_prepare_for_alter();
- if (index_add_count)
- {
- /* The add_index() method takes an array of KEY structs. */
- key_info= (KEY*) thd->alloc(sizeof(KEY) * index_add_count);
- key= key_info;
- for (idx_p= index_add_buffer, idx_end_p= idx_p + index_add_count;
- idx_p < idx_end_p;
- idx_p++, key++)
- {
- /* Copy the KEY struct. */
- *key= key_info_buffer[*idx_p];
- /* Fix the key parts. */
- part_end= key->key_part + key->key_parts;
- for (key_part= key->key_part; key_part < part_end; key_part++)
- key_part->field= table->field[key_part->fieldnr];
- }
- /* Add the indexes. */
- if ((error= table->file->add_index(table, key_info, index_add_count,
- &add)))
- {
- /* Only report error if handler has not already reported an error */
- if (!thd->is_error())
- {
- /*
- Exchange the key_info for the error message. If we exchange
- key number by key name in the message later, we need correct info.
- */
- KEY *save_key_info= table->key_info;
- table->key_info= key_info;
- table->file->print_error(error, MYF(0));
- table->key_info= save_key_info;
- }
- goto err_new_table_cleanup;
- }
- pending_inplace_add_index= true;
- }
- /*end of if (index_add_count)*/
-
- if (index_drop_count)
- {
- /* Currently we must finalize add index if we also drop indexes */
- if (pending_inplace_add_index)
- {
- /* Committing index changes needs exclusive metadata lock. */
- DBUG_ASSERT(thd->mdl_context.is_lock_owner(MDL_key::TABLE,
- table_list->db,
- table_list->table_name,
- MDL_EXCLUSIVE));
- if ((error= table->file->final_add_index(add, true)))
- {
- table->file->print_error(error, MYF(0));
- goto err_new_table_cleanup;
- }
- pending_inplace_add_index= false;
- }
- /* The prepare_drop_index() method takes an array of key numbers. */
- key_numbers= (uint*) thd->alloc(sizeof(uint) * index_drop_count);
- keyno_p= key_numbers;
- /* Get the number of each key. */
- for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count;
- idx_p < idx_end_p;
- idx_p++, keyno_p++)
- *keyno_p= *idx_p;
- /*
- Tell the handler to prepare for drop indexes.
- This re-numbers the indexes to get rid of gaps.
- */
- error= table->file->prepare_drop_index(table, key_numbers,
- index_drop_count);
- if (!error)
- {
- /* Tell the handler to finally drop the indexes. */
- error= table->file->final_drop_index(table);
- }
-
- if (error)
- {
- table->file->print_error(error, MYF(0));
- if (index_add_count) // Drop any new indexes added.
- {
- /*
- Temporarily set table-key_info to include information about the
- indexes added above that we now need to drop.
- */
- KEY *save_key_info= table->key_info;
- table->key_info= key_info_buffer;
- if ((error= table->file->prepare_drop_index(table, index_add_buffer,
- index_add_count)))
- table->file->print_error(error, MYF(0));
- else if ((error= table->file->final_drop_index(table)))
- table->file->print_error(error, MYF(0));
- table->key_info= save_key_info;
- }
-
- /*
- Mark this TABLE instance as stale to avoid
- out-of-sync index information.
- */
- table->m_needs_reopen= true;
- goto err_new_table_cleanup;
- }
- }
- /*end of if (index_drop_count)*/
-
- /*
- The final .frm file is already created as a temporary file
- and will be renamed to the original table name later.
- */
-
- /* Need to commit before a table is unlocked (NDB requirement). */
- DBUG_PRINT("info", ("Committing before unlocking table"));
if (trans_commit_stmt(thd) || trans_commit_implicit(thd))
goto err_new_table_cleanup;
}
- /*end of if (! new_table) for add/drop index*/
-
- DBUG_ASSERT(error == 0);
+ thd->count_cuted_fields= CHECK_FIELD_IGNORE;
if (table->s->tmp_table != NO_TMP_TABLE)
{
- /*
- In-place operations are not supported for temporary tables, so
- we don't have to call final_add_index() in this case. The assert
- verifies that in-place add index has not been done.
- */
- DBUG_ASSERT(!pending_inplace_add_index);
/* Close lock if this is a transactional table */
if (thd->lock)
{
@@ -7256,7 +8332,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
thd->locked_tables_mode != LTM_PRELOCKED_UNDER_LOCK_TABLES)
{
mysql_unlock_tables(thd, thd->lock);
- thd->lock=0;
+ thd->lock= NULL;
}
else
{
@@ -7268,14 +8344,16 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
}
}
/* Remove link to old table and rename the new one */
- close_temporary_table(thd, table, 1, 1);
+ close_temporary_table(thd, table, true, true);
/* Should pass the 'new_name' as we store table name in the cache */
- if (rename_temporary_table(thd, new_table, new_db, new_name))
+ if (rename_temporary_table(thd, new_table,
+ alter_ctx.new_db, alter_ctx.new_name))
goto err_new_table_cleanup;
/* We don't replicate alter table statement on temporary tables */
if (!thd->is_current_stmt_binlog_format_row() &&
- write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
- DBUG_RETURN(TRUE);
+ write_bin_log(thd, true, thd->query(), thd->query_length()))
+ DBUG_RETURN(true);
+ my_free(const_cast<uchar*>(frm.str));
goto end_temporary;
}
@@ -7284,11 +8362,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
not delete it! Even altough MERGE tables do not have their children
attached here it is safe to call close_temporary_table().
*/
- if (new_table)
- {
- close_temporary_table(thd, new_table, 1, 0);
- new_table= 0;
- }
+ close_temporary_table(thd, new_table, true, false);
+ new_table= NULL;
+
DEBUG_SYNC(thd, "alter_table_before_rename_result_table");
/*
@@ -7309,243 +8385,164 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name,
*/
THD_STAGE_INFO(thd, stage_rename_result_table);
- my_snprintf(old_name, sizeof(old_name), "%s2-%lx-%lx", tmp_file_prefix,
- current_pid, thd->thread_id);
- if (lower_case_table_names)
- my_casedn_str(files_charset_info, old_name);
- if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME,
- TDC_RT_REMOVE_NOT_OWN_AND_MARK_NOT_USABLE))
- {
- if (pending_inplace_add_index)
- {
- pending_inplace_add_index= false;
- table->file->final_add_index(add, false);
- }
- // Mark this TABLE instance as stale to avoid out-of-sync index information.
- table->m_needs_reopen= true;
+ if (wait_while_table_is_used(thd, table, HA_EXTRA_PREPARE_FOR_RENAME))
goto err_new_table_cleanup;
- }
- if (pending_inplace_add_index)
- {
- pending_inplace_add_index= false;
- DBUG_EXECUTE_IF("alter_table_rollback_new_index", {
- table->file->final_add_index(add, false);
- my_error(ER_UNKNOWN_ERROR, MYF(0));
- goto err_new_table_cleanup;
- });
- if ((error= table->file->final_add_index(add, true)))
- {
- table->file->print_error(error, MYF(0));
- goto err_new_table_cleanup;
- }
- }
close_all_tables_for_name(thd, table->s,
- new_name != table_name || new_db != db ?
- HA_EXTRA_PREPARE_FOR_RENAME :
- HA_EXTRA_NOT_USED);
-
- error=0;
- table_list->table= table= 0; /* Safety */
- save_old_db_type= old_db_type;
+ alter_ctx.is_table_renamed() ?
+ HA_EXTRA_PREPARE_FOR_RENAME:
+ HA_EXTRA_NOT_USED,
+ NULL);
+ table_list->table= table= NULL; /* Safety */
+ my_free(const_cast<uchar*>(frm.str));
/*
- This leads to the storage engine (SE) not being notified for renames in
- mysql_rename_table(), because we just juggle with the FRM and nothing
- more. If we have an intermediate table, then we notify the SE that
- it should become the actual table. Later, we will recycle the old table.
- However, in case of ALTER TABLE RENAME there might be no intermediate
- table. This is when the old and new tables are compatible, according to
- mysql_compare_table(). Then, we need one additional call to
- mysql_rename_table() with flag NO_FRM_RENAME, which does nothing else but
- actual rename in the SE and the FRM is not touched. Note that, if the
- table is renamed and the SE is also changed, then an intermediate table
- is created and the additional call will not take place.
+ Rename the old table to temporary name to have a backup in case
+ anything goes wrong while renaming the new table.
*/
-
- if (new_name != table_name || new_db != db)
+ char backup_name[32];
+ my_snprintf(backup_name, sizeof(backup_name), "%s2-%lx-%lx", tmp_file_prefix,
+ current_pid, thd->thread_id);
+ if (lower_case_table_names)
+ my_casedn_str(files_charset_info, backup_name);
+ if (mysql_rename_table(old_db_type, alter_ctx.db, alter_ctx.table_name,
+ alter_ctx.db, backup_name, FN_TO_IS_TMP))
{
- LEX_STRING new_db_name= { new_db, strlen(new_db) };
- LEX_STRING new_table_name= { new_name, strlen(new_name) };
- (void) rename_table_in_stat_tables(thd, &old_db_name, &old_table_name,
- &new_db_name, &new_table_name);
+ // Rename to temporary name failed, delete the new table, abort ALTER.
+ (void) quick_rm_table(thd, new_db_type, alter_ctx.new_db,
+ alter_ctx.tmp_name, FN_IS_TMP);
+ goto err_with_mdl;
}
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
- {
- DBUG_ASSERT(new_db_type == old_db_type);
- /* This type cannot happen in regular ALTER. */
- new_db_type= old_db_type= NULL;
- }
- if (mysql_rename_table(old_db_type, db, table_name, db, old_name,
- FN_TO_IS_TMP))
- {
- error=1;
- (void) quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP);
- }
- else if (mysql_rename_table(new_db_type, new_db, tmp_name, new_db,
- new_alias, FN_FROM_IS_TMP))
+ // Rename the new table to the correct name.
+ if (mysql_rename_table(new_db_type, alter_ctx.new_db, alter_ctx.tmp_name,
+ alter_ctx.new_db, alter_ctx.new_alias,
+ FN_FROM_IS_TMP))
{
- /* Try to get everything back. */
- error= 1;
- (void) quick_rm_table(new_db_type, new_db, tmp_name, FN_IS_TMP);
- (void) mysql_rename_table(old_db_type, db, old_name, db, alias,
- FN_FROM_IS_TMP);
+ // Rename failed, delete the temporary table.
+ (void) quick_rm_table(thd, new_db_type, alter_ctx.new_db,
+ alter_ctx.tmp_name, FN_IS_TMP);
+ // Restore the backup of the original table to the old name.
+ (void) mysql_rename_table(old_db_type, alter_ctx.db, backup_name,
+ alter_ctx.db, alter_ctx.alias, FN_FROM_IS_TMP);
+ goto err_with_mdl;
}
- else if (new_name != table_name || new_db != db)
+
+ // Check if we renamed the table and if so update trigger files.
+ if (alter_ctx.is_table_renamed())
{
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY &&
- mysql_rename_table(save_old_db_type, db, table_name, new_db,
- new_alias, NO_FRM_RENAME))
+ if (Table_triggers_list::change_table_name(thd,
+ alter_ctx.db,
+ alter_ctx.alias,
+ alter_ctx.table_name,
+ alter_ctx.new_db,
+ alter_ctx.new_alias))
{
- /* Try to get everything back. */
- error= 1;
- (void) quick_rm_table(new_db_type, new_db, new_alias, 0);
- (void) mysql_rename_table(old_db_type, db, old_name, db, alias,
- FN_FROM_IS_TMP);
- }
- else if (Table_triggers_list::change_table_name(thd, db, alias,
- table_name, new_db,
- new_alias))
- {
- /* Try to get everything back. */
- error= 1;
- (void) quick_rm_table(new_db_type, new_db, new_alias, 0);
- (void) mysql_rename_table(old_db_type, db, old_name, db,
- alias, FN_FROM_IS_TMP);
- /*
- If we were performing "fast"/in-place ALTER TABLE we also need
- to restore old name of table in storage engine as a separate
- step, as the above rename affects .FRM only.
- */
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
- {
- (void) mysql_rename_table(save_old_db_type, new_db, new_alias,
- db, table_name, NO_FRM_RENAME);
- }
+ // Rename succeeded, delete the new table.
+ (void) quick_rm_table(thd, new_db_type,
+ alter_ctx.new_db, alter_ctx.new_alias, 0);
+ // Restore the backup of the original table to the old name.
+ (void) mysql_rename_table(old_db_type, alter_ctx.db, backup_name,
+ alter_ctx.db, alter_ctx.alias, FN_FROM_IS_TMP);
+ goto err_with_mdl;
}
+ rename_table_in_stat_tables(thd, alter_ctx.db,alter_ctx.alias,
+ alter_ctx.new_db, alter_ctx.new_alias);
}
- if (! error)
- (void) quick_rm_table(old_db_type, db, old_name, FN_IS_TMP);
-
- if (error)
- {
- /* This shouldn't happen. But let us play it safe. */
- goto err_with_mdl;
- }
-
- if (need_copy_table == ALTER_TABLE_METADATA_ONLY)
+ // ALTER TABLE succeeded, delete the backup of the old table.
+ if (quick_rm_table(thd, old_db_type, alter_ctx.db, backup_name, FN_IS_TMP))
{
/*
- Now we have to inform handler that new .FRM file is in place.
- To do this we need to obtain a handler object for it.
- NO need to tamper with MERGE tables. The real open is done later.
+ The fact that deletion of the backup failed is not critical
+ error, but still worth reporting as it might indicate serious
+ problem with server.
*/
- Open_table_context ot_ctx(thd, MYSQL_OPEN_REOPEN);
- TABLE_LIST temp_table_list;
- TABLE_LIST *t_table_list;
- if (new_name != table_name || new_db != db)
- {
- temp_table_list.init_one_table(new_db, strlen(new_db),
- new_name, strlen(new_name),
- new_name, TL_READ_NO_INSERT);
- temp_table_list.mdl_request.ticket= target_mdl_request.ticket;
- t_table_list= &temp_table_list;
- }
- else
- {
- /*
- Under LOCK TABLES, we have a different mdl_lock_ticket
- points to a different instance than the one set initially
- to request the lock.
- */
- table_list->mdl_request.ticket= mdl_ticket;
- t_table_list= table_list;
- }
- if (open_table(thd, t_table_list, thd->mem_root, &ot_ctx))
- {
- goto err_with_mdl;
- }
-
- /* Tell the handler that a new frm file is in place. */
- error= t_table_list->table->file->ha_create_partitioning_metadata(path, NULL,
- CHF_INDEX_FLAG);
+ goto err_with_mdl;
+ }
- DBUG_ASSERT(thd->open_tables == t_table_list->table);
- close_thread_table(thd, &thd->open_tables);
- t_table_list->table= NULL;
+end_inplace:
- if (error)
- goto err_with_mdl;
- }
if (thd->locked_tables_list.reopen_tables(thd))
goto err_with_mdl;
THD_STAGE_INFO(thd, stage_end);
- DBUG_EXECUTE_IF("sleep_alter_before_main_binlog", my_sleep(6000000););
DEBUG_SYNC(thd, "alter_table_before_main_binlog");
ha_binlog_log_query(thd, create_info->db_type, LOGCOM_ALTER_TABLE,
thd->query(), thd->query_length(),
- db, table_name);
+ alter_ctx.db, alter_ctx.table_name);
DBUG_ASSERT(!(mysql_bin_log.is_open() &&
thd->is_current_stmt_binlog_format_row() &&
(create_info->tmp_table())));
- if (write_bin_log(thd, TRUE, thd->query(), thd->query_length()))
- DBUG_RETURN(TRUE);
+ if (write_bin_log(thd, true, thd->query(), thd->query_length()))
+ DBUG_RETURN(true);
- table_list->table=0; // For query cache
- query_cache_invalidate3(thd, table_list, 0);
+ if (ha_check_storage_engine_flag(old_db_type, HTON_FLUSH_AFTER_RENAME))
+ {
+ /*
+ For the alter table to be properly flushed to the logs, we
+ have to open the new table. If not, we get a problem on server
+ shutdown. But we do not need to attach MERGE children.
+ */
+ TABLE *t_table;
+ t_table= open_table_uncached(thd, new_db_type, alter_ctx.get_new_path(),
+ alter_ctx.new_db, alter_ctx.new_name,
+ false, true);
+ if (t_table)
+ intern_close_table(t_table);
+ else
+ sql_print_warning("Could not open table %s.%s after rename\n",
+ alter_ctx.new_db, alter_ctx.table_name);
+ ha_flush_logs(old_db_type);
+ }
+ table_list->table= NULL; // For query cache
+ query_cache_invalidate3(thd, table_list, false);
if (thd->locked_tables_mode == LTM_LOCK_TABLES ||
thd->locked_tables_mode == LTM_PRELOCKED_UNDER_LOCK_TABLES)
{
- if ((new_name != table_name || new_db != db))
+ if (alter_ctx.is_table_renamed())
thd->mdl_context.release_all_locks_for_name(mdl_ticket);
else
- mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+ mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
}
end_temporary:
- my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO),
+ my_snprintf(alter_ctx.tmp_name, sizeof(alter_ctx.tmp_name),
+ ER(ER_INSERT_INFO),
(ulong) (copied + deleted), (ulong) deleted,
- (ulong) thd->warning_info->statement_warn_count());
- my_ok(thd, copied + deleted, 0L, tmp_name);
- DBUG_RETURN(FALSE);
+ (ulong) thd->get_stmt_da()->current_statement_warn_count());
+ my_ok(thd, copied + deleted, 0L, alter_ctx.tmp_name);
+ DBUG_RETURN(false);
err_new_table_cleanup:
+ my_free(const_cast<uchar*>(frm.str));
if (new_table)
{
/* close_temporary_table() frees the new_table pointer. */
- close_temporary_table(thd, new_table, 1, 1);
+ close_temporary_table(thd, new_table, true, true);
}
else
- (void) quick_rm_table(new_db_type, new_db, tmp_name,
- create_table_mode == C_ALTER_TABLE_FRM_ONLY ?
- FN_IS_TMP | FRM_ONLY : FN_IS_TMP);
+ (void) quick_rm_table(thd, new_db_type,
+ alter_ctx.new_db, alter_ctx.tmp_name,
+ (FN_IS_TMP | (no_ha_table ? NO_HA_TABLE : 0)));
-err:
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- /* If prep_alter_part_table created an intermediate table, destroy it. */
- if (table_for_fast_alter_partition)
- close_temporary(table_for_fast_alter_partition, 1, 0);
-#endif /* WITH_PARTITION_STORAGE_ENGINE */
/*
No default value was provided for a DATE/DATETIME field, the
current sql_mode doesn't allow the '0000-00-00' value and
the table to be altered isn't empty.
Report error here.
*/
- if (alter_info->error_if_not_empty &&
- thd->warning_info->current_row_for_warning())
+ if (alter_ctx.error_if_not_empty &&
+ thd->get_stmt_da()->current_row_for_warning())
{
const char *f_val= 0;
enum enum_mysql_timestamp_type t_type= MYSQL_TIMESTAMP_DATE;
- switch (alter_info->datetime_field->sql_type)
+ switch (alter_ctx.datetime_field->sql_type)
{
case MYSQL_TYPE_DATE:
case MYSQL_TYPE_NEWDATE:
@@ -7562,14 +8559,14 @@ err:
DBUG_ASSERT(0);
}
bool save_abort_on_warning= thd->abort_on_warning;
- thd->abort_on_warning= TRUE;
- make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ thd->abort_on_warning= true;
+ make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
f_val, strlength(f_val), t_type,
- alter_info->datetime_field->field_name);
+ alter_ctx.datetime_field->field_name);
thd->abort_on_warning= save_abort_on_warning;
}
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
err_with_mdl:
/*
@@ -7580,13 +8577,10 @@ err_with_mdl:
*/
thd->locked_tables_list.unlink_all_closed_tables(thd, NULL, 0);
thd->mdl_context.release_all_locks_for_name(mdl_ticket);
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(true);
}
-/* Copy all rows from one table to another */
-
-
/**
Prepare the transaction for the alter table's copy phase.
@@ -7635,16 +8629,14 @@ bool mysql_trans_commit_alter_copy_data(THD *thd)
static int
-copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
- List<Create_field> &create,
- bool ignore,
+copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
+ List<Create_field> &create, bool ignore,
uint order_num, ORDER *order,
- ha_rows *copied,
- ha_rows *deleted,
- enum enum_enable_or_disable keys_onoff,
- bool error_if_not_empty)
+ ha_rows *copied, ha_rows *deleted,
+ Alter_info::enum_enable_or_disable keys_onoff,
+ Alter_table_ctx *alter_ctx)
{
- int error= 1, errpos= 0;
+ int error= 1;
Copy_field *copy= NULL, *copy_end;
ha_rows found_count= 0, delete_count= 0;
uint length= 0;
@@ -7658,8 +8650,6 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
bool auto_increment_field_copied= 0;
ulonglong save_sql_mode= thd->variables.sql_mode;
ulonglong prev_insert_id, time_to_report_progress;
- List_iterator<Create_field> it(create);
- Create_field *def;
Field **dfield_ptr= to->default_field;
DBUG_ENTER("copy_data_between_tables");
@@ -7667,16 +8657,14 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
thd_progress_init(thd, 2 + test(order));
if (mysql_trans_prepare_alter_copy_data(thd))
- goto err;
- errpos=1;
+ DBUG_RETURN(-1);
if (!(copy= new Copy_field[to->s->fields]))
- goto err; /* purecov: inspected */
+ DBUG_RETURN(-1); /* purecov: inspected */
/* We need external lock before we can disable/enable keys */
if (to->file->ha_external_lock(thd, F_WRLCK))
- goto err;
- errpos= 2;
+ DBUG_RETURN(-1);
alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff);
@@ -7686,8 +8674,9 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
from->file->info(HA_STATUS_VARIABLE);
to->file->ha_start_bulk_insert(from->file->stats.records,
ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT);
- errpos= 3;
+ List_iterator<Create_field> it(create);
+ Create_field *def;
copy_end=copy;
to->s->default_fields= 0;
for (Field **ptr=to->field ; *ptr ; ptr++)
@@ -7736,7 +8725,7 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
my_snprintf(warn_buff, sizeof(warn_buff),
"ORDER BY ignored as there is a user-defined clustered index"
" in the table '%-.192s'", from->s->table_name.str);
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
warn_buff);
}
else
@@ -7769,11 +8758,13 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
to->use_all_columns();
to->mark_virtual_columns_for_write(TRUE);
if (init_read_record(&info, thd, from, (SQL_SELECT *) 0, 1, 1, FALSE))
+ {
+ error= 1;
goto err;
- errpos= 4;
- if (ignore)
+ }
+ if (ignore && !alter_ctx->fk_error_if_delete_row)
to->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- thd->warning_info->reset_current_row_for_warning();
+ thd->get_stmt_da()->reset_current_row_for_warning();
restore_record(to, s->default_values); // Create empty record
thd->progress.max_counter= from->file->records();
@@ -7797,7 +8788,7 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
}
/* Return error if source table isn't empty. */
- if (error_if_not_empty)
+ if (alter_ctx->error_if_not_empty)
{
error= 1;
break;
@@ -7831,38 +8822,62 @@ copy_data_between_tables(THD *thd, TABLE *from,TABLE *to,
to->auto_increment_field_not_null= FALSE;
if (error)
{
- if (!ignore ||
- to->file->is_fatal_error(error, HA_CHECK_DUP))
+ if (to->file->is_fatal_error(error, HA_CHECK_DUP))
{
- if (!to->file->is_fatal_error(error, HA_CHECK_DUP))
- {
- uint key_nr= to->file->get_dup_key(error);
- if ((int) key_nr >= 0)
- {
- const char *err_msg= ER(ER_DUP_ENTRY_WITH_KEY_NAME);
- if (key_nr == 0 &&
- (to->key_info[0].key_part[0].field->flags &
- AUTO_INCREMENT_FLAG))
- err_msg= ER(ER_DUP_ENTRY_AUTOINCREMENT_CASE);
- to->file->print_keydup_error(key_nr, err_msg, MYF(0));
- break;
- }
- }
-
- to->file->print_error(error,MYF(0));
+ /* Not a duplicate key error. */
+ to->file->print_error(error, MYF(0));
break;
}
- to->file->restore_auto_increment(prev_insert_id);
- delete_count++;
+ else
+ {
+ /* Duplicate key error. */
+ if (alter_ctx->fk_error_if_delete_row)
+ {
+ /*
+ We are trying to omit a row from the table which serves as parent
+ in a foreign key. This might have broken referential integrity so
+ emit an error. Note that we can't ignore this error even if we are
+ executing ALTER IGNORE TABLE. IGNORE allows to skip rows, but
+ doesn't allow to break unique or foreign key constraints,
+ */
+ my_error(ER_FK_CANNOT_DELETE_PARENT, MYF(0),
+ alter_ctx->fk_error_id,
+ alter_ctx->fk_error_table);
+ break;
+ }
+
+ if (ignore)
+ {
+ /* This ALTER IGNORE TABLE. Simply skip row and continue. */
+ to->file->restore_auto_increment(prev_insert_id);
+ delete_count++;
+ }
+ else
+ {
+ /* Ordinary ALTER TABLE. Report duplicate key error. */
+ uint key_nr= to->file->get_dup_key(error);
+ if ((int) key_nr >= 0)
+ {
+ const char *err_msg= ER(ER_DUP_ENTRY_WITH_KEY_NAME);
+ if (key_nr == 0 &&
+ (to->key_info[0].key_part[0].field->flags &
+ AUTO_INCREMENT_FLAG))
+ err_msg= ER(ER_DUP_ENTRY_AUTOINCREMENT_CASE);
+ print_keydup_error(to, key_nr == MAX_KEY ? NULL :
+ &to->key_info[key_nr],
+ err_msg, MYF(0));
+ }
+ else
+ to->file->print_error(error, MYF(0));
+ break;
+ }
+ }
}
else
found_count++;
- thd->warning_info->inc_current_row_for_warning();
+ thd->get_stmt_da()->inc_current_row_for_warning();
}
-
-err:
- if (errpos >= 4)
- end_read_record(&info);
+ end_read_record(&info);
free_io_cache(from);
delete [] copy;
@@ -7874,22 +8889,23 @@ err:
/* We are going to drop the temporary table */
to->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
}
- if (errpos >= 3 && to->file->ha_end_bulk_insert() && error <= 0)
+ if (to->file->ha_end_bulk_insert() && error <= 0)
{
to->file->print_error(my_errno,MYF(0));
error= 1;
}
to->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
- if (errpos >= 1 && mysql_trans_commit_alter_copy_data(thd))
+ if (mysql_trans_commit_alter_copy_data(thd))
error= 1;
+ err:
thd->variables.sql_mode= save_sql_mode;
thd->abort_on_warning= 0;
*copied= found_count;
*deleted=delete_count;
to->file->ha_release_auto_increment();
- if (errpos >= 2 && to->file->ha_external_lock(thd,F_UNLCK))
+ if (to->file->ha_external_lock(thd,F_UNLCK))
error=1;
if (error < 0 && to->file->extra(HA_EXTRA_PREPARE_FOR_RENAME))
error= 1;
@@ -7909,20 +8925,14 @@ err:
RETURN
Like mysql_alter_table().
*/
+
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list)
{
HA_CREATE_INFO create_info;
Alter_info alter_info;
-
DBUG_ENTER("mysql_recreate_table");
DBUG_ASSERT(!table_list->next_global);
- /*
- table_list->table has been closed and freed. Do not reference
- uninitialized data. open_tables() could fail.
- */
- table_list->table= NULL;
- /* Same applies to MDL ticket. */
- table_list->mdl_request.ticket= NULL;
+
/* Set lock type which is appropriate for ALTER TABLE. */
table_list->lock_type= TL_READ_NO_INSERT;
/* Same applies to MDL request. */
@@ -7932,10 +8942,11 @@ bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list)
create_info.row_type=ROW_TYPE_NOT_USED;
create_info.default_table_charset=default_charset_info;
/* Force alter table to recreate table */
- alter_info.flags= (ALTER_CHANGE_COLUMN | ALTER_RECREATE);
+ alter_info.flags= (Alter_info::ALTER_CHANGE_COLUMN |
+ Alter_info::ALTER_RECREATE);
DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info,
table_list, &alter_info, 0,
- (ORDER *) 0, 0, 0));
+ (ORDER *) 0, 0));
}
@@ -7950,23 +8961,48 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2));
item->maybe_null= 1;
- field_list.push_back(item= new Item_int("Checksum", (longlong) 1,
+ field_list.push_back(item= new Item_int("Checksum",
+ (longlong) 1,
MY_INT64_NUM_DECIMAL_DIGITS));
item->maybe_null= 1;
if (protocol->send_result_set_metadata(&field_list,
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
DBUG_RETURN(TRUE);
+ /*
+ Close all temporary tables which were pre-open to simplify
+ privilege checking. Clear all references to closed tables.
+ */
+ close_thread_tables(thd);
+ for (table= tables; table; table= table->next_local)
+ table->table= NULL;
+
/* Open one table after the other to keep lock time as short as possible. */
for (table= tables; table; table= table->next_local)
{
char table_name[SAFE_NAME_LEN*2+2];
TABLE *t;
+ TABLE_LIST *save_next_global;
strxmov(table_name, table->db ,".", table->table_name, NullS);
- t= table->table= open_n_lock_single_table(thd, table, TL_READ, 0);
- thd->clear_error(); // these errors shouldn't get client
+ /* Remember old 'next' pointer and break the list. */
+ save_next_global= table->next_global;
+ table->next_global= NULL;
+ table->lock_type= TL_READ;
+ /* Allow to open real tables only. */
+ table->required_type= FRMTYPE_TABLE;
+
+ if (open_temporary_tables(thd, table) ||
+ open_and_lock_tables(thd, table, FALSE, 0))
+ {
+ t= NULL;
+ thd->clear_error(); // these errors shouldn't get client
+ }
+ else
+ t= table->table;
+
+ table->next_global= save_next_global;
protocol->prepare_for_resend();
protocol->store(table_name, system_charset_info);
@@ -8067,11 +9103,6 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables,
if (! thd->in_sub_stmt)
trans_rollback_stmt(thd);
close_thread_tables(thd);
- /*
- Don't release metadata locks, this will be done at
- statement end.
- */
- table->table=0; // For query cache
}
if (protocol->write())
goto err;
@@ -8113,7 +9144,7 @@ static bool check_engine(THD *thd, const char *db_name,
if (req_engine && req_engine != *new_engine)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_WARN_USING_OTHER_HANDLER,
ER(ER_WARN_USING_OTHER_HANDLER),
ha_resolve_storage_engine_name(*new_engine),
diff --git a/sql/sql_table.h b/sql/sql_table.h
index 8bc6865decd..6bd111cae6d 100644
--- a/sql/sql_table.h
+++ b/sql/sql_table.h
@@ -21,6 +21,7 @@
#include "my_sys.h" // pthread_mutex_t
class Alter_info;
+class Alter_table_ctx;
class Create_field;
struct TABLE_LIST;
class THD;
@@ -33,7 +34,6 @@ typedef struct st_key KEY;
typedef struct st_key_cache KEY_CACHE;
typedef struct st_lock_param_type ALTER_PARTITION_PARAM_TYPE;
typedef struct st_order ORDER;
-class Alter_table_change_level;
enum ddl_log_entry_code
{
@@ -65,10 +65,19 @@ enum ddl_log_action_code
DDL_LOG_REPLACE_ACTION:
Rename an entity after removing the previous entry with the
new name, that is replace this entry.
+ DDL_LOG_EXCHANGE_ACTION:
+ Exchange two entities by renaming them a -> tmp, b -> a, tmp -> b.
*/
DDL_LOG_DELETE_ACTION = 'd',
DDL_LOG_RENAME_ACTION = 'r',
- DDL_LOG_REPLACE_ACTION = 's'
+ DDL_LOG_REPLACE_ACTION = 's',
+ DDL_LOG_EXCHANGE_ACTION = 'e'
+};
+
+enum enum_ddl_log_exchange_phase {
+ EXCH_PHASE_NAME_TO_TEMP= 0,
+ EXCH_PHASE_FROM_TO_NAME= 1,
+ EXCH_PHASE_TEMP_TO_FROM= 2
};
@@ -77,6 +86,7 @@ typedef struct st_ddl_log_entry
const char *name;
const char *from_name;
const char *handler_name;
+ const char *tmp_name;
uint next_entry;
uint entry_pos;
enum ddl_log_entry_code entry_type;
@@ -115,11 +125,15 @@ enum enum_explain_filename_mode
#define WFRM_KEEP_SHARE 8
/* Flags for conversion functions. */
-#define FN_FROM_IS_TMP (1 << 0)
-#define FN_TO_IS_TMP (1 << 1)
-#define FN_IS_TMP (FN_FROM_IS_TMP | FN_TO_IS_TMP)
-#define NO_FRM_RENAME (1 << 2)
-#define FRM_ONLY (1 << 3)
+static const uint FN_FROM_IS_TMP= 1 << 0;
+static const uint FN_TO_IS_TMP= 1 << 1;
+static const uint FN_IS_TMP= FN_FROM_IS_TMP | FN_TO_IS_TMP;
+static const uint NO_FRM_RENAME= 1 << 2;
+static const uint FRM_ONLY= 1 << 3;
+/** Don't remove table in engine. Remove only .FRM and maybe .PAR files. */
+static const uint NO_HA_TABLE= 1 << 4;
+/** Don't resolve MySQL's fake "foo.sym" symbolic directory names. */
+static const uint SKIP_SYMDIR_ACCESS= 1 << 5;
uint filename_to_tablename(const char *from, char *to, uint to_length
#ifndef DBUG_OFF
@@ -133,6 +147,7 @@ uint build_table_filename(char *buff, size_t bufflen, const char *db,
const char *table, const char *ext, uint flags);
uint build_table_shadow_filename(char *buff, size_t bufflen,
ALTER_PARTITION_PARAM_TYPE *lpt);
+uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen);
bool mysql_create_table(THD *thd, TABLE_LIST *create_table,
HA_CREATE_INFO *create_info,
Alter_info *alter_info);
@@ -183,27 +198,30 @@ handler *mysql_create_frm_image(THD *thd,
const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
Alter_info *alter_info,
- int create_table_mode, LEX_CUSTRING *frm);
+ int create_table_mode,
+ KEY **key_info,
+ uint *key_count,
+ LEX_CUSTRING *frm);
+
+int mysql_discard_or_import_tablespace(THD *thd,
+ TABLE_LIST *table_list,
+ bool discard);
+
bool mysql_prepare_alter_table(THD *thd, TABLE *table,
HA_CREATE_INFO *create_info,
- Alter_info *alter_info);
+ Alter_info *alter_info,
+ Alter_table_ctx *alter_ctx);
bool mysql_trans_prepare_alter_copy_data(THD *thd);
bool mysql_trans_commit_alter_copy_data(THD *thd);
bool mysql_alter_table(THD *thd, char *new_db, char *new_name,
HA_CREATE_INFO *create_info,
TABLE_LIST *table_list,
Alter_info *alter_info,
- uint order_num, ORDER *order, bool ignore,
- bool require_online);
+ uint order_num, ORDER *order, bool ignore);
bool mysql_compare_tables(TABLE *table,
Alter_info *alter_info,
HA_CREATE_INFO *create_info,
- uint order_num,
- Alter_table_change_level *need_copy_table,
- KEY **key_info_buffer,
- uint **index_drop_buffer, uint *index_drop_count,
- uint **index_add_buffer, uint *index_add_count,
- uint *candidate_key_count);
+ bool *metadata_equal);
bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list);
bool mysql_create_like_table(THD *thd, TABLE_LIST *table,
TABLE_LIST *src_table,
@@ -222,7 +240,7 @@ bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists,
int mysql_rm_table_no_locks(THD *thd, TABLE_LIST *tables, bool if_exists,
bool drop_temporary, bool drop_view,
bool log_query);
-bool quick_rm_table(handlerton *base,const char *db,
+bool quick_rm_table(THD *thd, handlerton *base, const char *db,
const char *table_name, uint flags);
void close_cached_table(THD *thd, TABLE *table);
void sp_prepare_create_field(THD *thd, Create_field *sql_field);
@@ -246,6 +264,9 @@ bool sync_ddl_log();
void release_ddl_log();
void execute_ddl_log_recovery();
bool execute_ddl_log_entry(THD *thd, uint first_entry);
+bool validate_comment_length(THD *thd, const char *comment_str,
+ size_t *comment_len, uint max_len,
+ uint err_code, const char *comment_name);
bool check_duplicate_warning(THD *thd, char *msg, ulong length);
template<typename T> class List;
diff --git a/sql/sql_tablespace.cc b/sql/sql_tablespace.cc
index 3f6daf7a9ec..48eeb94f7c9 100644
--- a/sql/sql_tablespace.cc
+++ b/sql/sql_tablespace.cc
@@ -35,7 +35,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
{
hton= ha_default_handlerton(thd);
if (ts_info->storage_engine != 0)
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_USING_OTHER_HANDLER,
ER(ER_WARN_USING_OTHER_HANDLER),
hton_name(hton)->str,
@@ -65,7 +65,7 @@ int mysql_alter_tablespace(THD *thd, st_alter_tablespace *ts_info)
}
else
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_ILLEGAL_HA_CREATE_OPTION,
ER(ER_ILLEGAL_HA_CREATE_OPTION),
hton_name(hton)->str,
diff --git a/sql/sql_test.cc b/sql/sql_test.cc
index 93b35b4918f..867d49808e1 100644
--- a/sql/sql_test.cc
+++ b/sql/sql_test.cc
@@ -92,7 +92,7 @@ static void print_cached_tables(void)
{
share= (TABLE_SHARE*) my_hash_element(&table_def_cache, idx);
- I_P_List_iterator<TABLE, TABLE_share> it(share->used_tables);
+ TABLE_SHARE::TABLE_list::Iterator it(share->used_tables);
while ((entry= it++))
{
printf("%-14.14s %-32s%6ld%8ld%6d %s\n",
diff --git a/sql/sql_time.cc b/sql/sql_time.cc
index a67768d4c34..f4612ec517e 100644
--- a/sql/sql_time.cc
+++ b/sql/sql_time.cc
@@ -222,7 +222,7 @@ check_date_with_warn(const MYSQL_TIME *ltime, ulonglong fuzzy_date,
if (check_date(ltime, fuzzy_date, &unused))
{
ErrConvTime str(ltime);
- make_truncated_value_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
&str, ts_type, 0);
return true;
}
@@ -309,15 +309,13 @@ str_to_datetime_with_warn(CHARSET_INFO *cs,
{
MYSQL_TIME_STATUS status;
THD *thd= current_thd;
- bool ret_val= str_to_datetime(cs, str, length, l_time,
- (flags | (sql_mode_for_dates(thd))),
- &status);
+ bool ret_val= str_to_datetime(cs, str, length, l_time, flags, &status);
if (ret_val || status.warnings)
- make_truncated_value_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ make_truncated_value_warning(thd, Sql_condition::WARN_LEVEL_WARN,
str, length, flags & TIME_TIME_ONLY ?
MYSQL_TIMESTAMP_TIME : l_time->time_type, NullS);
DBUG_EXECUTE_IF("str_to_datetime_warn",
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_YES, str););
return ret_val;
}
@@ -360,7 +358,7 @@ static bool number_to_time_with_warn(bool neg, ulonglong nr, ulong sec_part,
if (res < 0 || (was_cut && (fuzzydate & TIME_NO_ZERO_IN_DATE)))
{
make_truncated_value_warning(current_thd,
- MYSQL_ERROR::WARN_LEVEL_WARN, str,
+ Sql_condition::WARN_LEVEL_WARN, str,
res < 0 ? MYSQL_TIMESTAMP_ERROR
: mysql_type_to_time_type(f_type),
field_name);
@@ -814,7 +812,7 @@ const char *get_date_time_format_str(KNOWN_DATE_TIME_FORMAT *format,
}
void make_truncated_value_warning(THD *thd,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const ErrConv *sval,
timestamp_type time_type,
const char *field_name)
@@ -839,7 +837,7 @@ void make_truncated_value_warning(THD *thd,
cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff),
ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD),
type_str, sval->ptr(), field_name,
- (ulong) thd->warning_info->current_row_for_warning());
+ (ulong) thd->get_stmt_da()->current_row_for_warning());
else
{
if (time_type > MYSQL_TIMESTAMP_ERROR)
@@ -968,7 +966,7 @@ bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type,
return 0; // Ok
invalid_date:
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_DATETIME_FUNCTION_OVERFLOW,
ER(ER_DATETIME_FUNCTION_OVERFLOW),
ltime->time_type == MYSQL_TIMESTAMP_TIME ?
diff --git a/sql/sql_time.h b/sql/sql_time.h
index cf029f143b3..47b300d51cc 100644
--- a/sql/sql_time.h
+++ b/sql/sql_time.h
@@ -19,7 +19,7 @@
#include "my_global.h" /* ulong */
#include "my_time.h"
#include "mysql_time.h" /* timestamp_type */
-#include "sql_error.h" /* MYSQL_ERROR */
+#include "sql_error.h" /* Sql_condition */
#include "structs.h" /* INTERVAL */
typedef enum enum_mysql_timestamp_type timestamp_type;
@@ -48,13 +48,14 @@ bool int_to_datetime_with_warn(longlong value, MYSQL_TIME *ltime,
ulonglong fuzzydate,
const char *name);
-void make_truncated_value_warning(THD *thd, MYSQL_ERROR::enum_warning_level level,
+void make_truncated_value_warning(THD *thd,
+ Sql_condition::enum_warning_level level,
const ErrConv *str_val,
timestamp_type time_type,
const char *field_name);
static inline void make_truncated_value_warning(THD *thd,
- MYSQL_ERROR::enum_warning_level level, const char *str_val,
+ Sql_condition::enum_warning_level level, const char *str_val,
uint str_length, timestamp_type time_type,
const char *field_name)
{
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 022c4ff4ea5..bc4986bebee 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -157,7 +157,7 @@ Trigger_creation_ctx::create(THD *thd,
if (invalid_creation_ctx)
{
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_WARN,
+ Sql_condition::WARN_LEVEL_WARN,
ER_TRG_INVALID_CREATION_CTX,
ER(ER_TRG_INVALID_CREATION_CTX),
(const char *) db_name,
@@ -329,9 +329,9 @@ public:
virtual bool handle_condition(THD *thd,
uint sql_errno,
const char* sqlstate,
- MYSQL_ERROR::enum_warning_level level,
+ Sql_condition::enum_warning_level level,
const char* message,
- MYSQL_ERROR ** cond_hdl)
+ Sql_condition ** cond_hdl)
{
if (sql_errno != EE_OUTOFMEMORY &&
sql_errno != ER_OUT_OF_RESOURCES)
@@ -561,7 +561,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
if (result)
goto end;
- close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED);
+ close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
/*
Reopen the table if we were under LOCK TABLES.
Ignore the return value for now. It's better to
@@ -588,7 +588,7 @@ end:
with the implicit commit.
*/
if (thd->locked_tables_mode && tables && lock_upgrade_done)
- mdl_ticket->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+ mdl_ticket->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
/* Restore the query table list. Used only for drop trigger. */
if (!create)
@@ -799,7 +799,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
lex->definer->user.str))
{
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
ER_NO_SUCH_USER,
ER(ER_NO_SUCH_USER),
lex->definer->user.str,
@@ -1274,7 +1274,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
DBUG_RETURN(1); // EOM
}
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRG_NO_CREATION_CTX,
ER(ER_TRG_NO_CREATION_CTX),
(const char*) db,
@@ -1458,7 +1458,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
warning here.
*/
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_TRG_NO_DEFINER, ER(ER_TRG_NO_DEFINER),
(const char*) db,
(const char*) sp->m_name.str);
@@ -1731,7 +1731,7 @@ bool add_table_for_trigger(THD *thd,
if (if_exists)
{
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
ER_TRG_DOES_NOT_EXIST,
ER(ER_TRG_DOES_NOT_EXIST));
@@ -2211,6 +2211,37 @@ add_tables_and_routines_for_triggers(THD *thd,
/**
+ Check if any of the marked fields are used in the trigger.
+
+ @param used_fields Bitmap over fields to check
+ @param event_type Type of event triggers for which we are going to inspect
+ @param action_time Type of trigger action time we are going to inspect
+*/
+
+bool Table_triggers_list::is_fields_updated_in_trigger(MY_BITMAP *used_fields,
+ trg_event_type event_type,
+ trg_action_time_type action_time)
+{
+ Item_trigger_field *trg_field;
+ sp_head *sp= bodies[event_type][action_time];
+ DBUG_ASSERT(used_fields->n_bits == trigger_table->s->fields);
+
+ for (trg_field= sp->m_trg_table_fields.first; trg_field;
+ trg_field= trg_field->next_trg_field)
+ {
+ /* We cannot check fields which does not present in table. */
+ if (trg_field->field_idx != (uint)-1)
+ {
+ if (bitmap_is_set(used_fields, trg_field->field_idx) &&
+ trg_field->get_settable_routine_parameter())
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/**
Mark fields of subject table which we read/set in its triggers
as such.
@@ -2302,7 +2333,7 @@ Handle_old_incorrect_sql_modes_hook::process_unknown_string(char *&unknown_key,
DBUG_PRINT("info", ("sql_modes affected by BUG#14090 detected"));
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
ER_OLD_FILE_FORMAT,
ER(ER_OLD_FILE_FORMAT),
(char *)path, "TRIGGER");
@@ -2343,7 +2374,7 @@ process_unknown_string(char *&unknown_key, uchar* base, MEM_ROOT *mem_root,
DBUG_PRINT("info", ("trigger_table affected by BUG#15921 detected"));
push_warning_printf(current_thd,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
ER_OLD_FILE_FORMAT,
ER(ER_OLD_FILE_FORMAT),
(char *)path, "TRIGGER");
diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h
index 47b1d19ae54..52892550d35 100644
--- a/sql/sql_trigger.h
+++ b/sql/sql_trigger.h
@@ -207,6 +207,10 @@ public:
Query_tables_list *prelocking_ctx,
TABLE_LIST *table_list);
+ bool is_fields_updated_in_trigger(MY_BITMAP *used_fields,
+ trg_event_type event_type,
+ trg_action_time_type action_time);
+
private:
bool prepare_record1_accessors(TABLE *table);
LEX_STRING* change_table_name_in_trignames(const char *old_db_name,
diff --git a/sql/sql_truncate.cc b/sql/sql_truncate.cc
index 19ce553f5ce..9cd984a6663 100644
--- a/sql/sql_truncate.cc
+++ b/sql/sql_truncate.cc
@@ -18,9 +18,8 @@
#include "sql_class.h" // THD
#include "sql_base.h" // open_and_lock_tables
#include "sql_table.h" // write_bin_log
-#include "sql_handler.h" // mysql_ha_rm_tables
#include "datadict.h" // dd_recreate_table()
-#include "lock.h" // MYSQL_OPEN_TEMPORARY_ONLY
+#include "lock.h" // MYSQL_OPEN_* flags
#include "sql_acl.h" // DROP_ACL
#include "sql_parse.h" // check_one_table_access()
#include "sql_truncate.h"
@@ -186,12 +185,12 @@ fk_truncate_illegal_if_parent(THD *thd, TABLE *table)
@retval > 0 Error code.
*/
-int Truncate_statement::handler_truncate(THD *thd, TABLE_LIST *table_ref,
- bool is_tmp_table)
+int Sql_cmd_truncate_table::handler_truncate(THD *thd, TABLE_LIST *table_ref,
+ bool is_tmp_table)
{
int error= 0;
uint flags;
- DBUG_ENTER("Truncate_statement::handler_truncate");
+ DBUG_ENTER("Sql_cmd_truncate_table::handler_truncate");
/*
Can't recreate, the engine must mechanically delete all rows
@@ -199,9 +198,7 @@ int Truncate_statement::handler_truncate(THD *thd, TABLE_LIST *table_ref,
*/
/* If it is a temporary table, no need to take locks. */
- if (is_tmp_table)
- flags= MYSQL_OPEN_TEMPORARY_ONLY;
- else
+ if (!is_tmp_table)
{
/* We don't need to load triggers. */
DBUG_ASSERT(table_ref->trg_event_map == 0);
@@ -216,7 +213,7 @@ int Truncate_statement::handler_truncate(THD *thd, TABLE_LIST *table_ref,
the MDL lock taken above and otherwise there is no way to
wait for FLUSH TABLES in deadlock-free fashion.
*/
- flags= MYSQL_OPEN_IGNORE_FLUSH | MYSQL_OPEN_SKIP_TEMPORARY;
+ flags= MYSQL_OPEN_IGNORE_FLUSH;
/*
Even though we have an MDL lock on the table here, we don't
pass MYSQL_OPEN_HAS_MDL_LOCK to open_and_lock_tables
@@ -270,7 +267,7 @@ static bool recreate_temporary_table(THD *thd, TABLE *table)
share->normalized_path.str);
if (open_table_uncached(thd, table_type, share->path.str, share->db.str,
- share->table_name.str, TRUE))
+ share->table_name.str, true, true))
{
error= FALSE;
thd->thread_specific_used= TRUE;
@@ -298,11 +295,11 @@ static bool recreate_temporary_table(THD *thd, TABLE *table)
@retval TRUE Error.
*/
-bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref,
- bool *hton_can_recreate)
+bool Sql_cmd_truncate_table::lock_table(THD *thd, TABLE_LIST *table_ref,
+ bool *hton_can_recreate)
{
TABLE *table= NULL;
- DBUG_ENTER("Truncate_statement::lock_table");
+ DBUG_ENTER("Sql_cmd_truncate_table::lock_table");
/* Lock types are set in the parser. */
DBUG_ASSERT(table_ref->lock_type == TL_WRITE);
@@ -337,8 +334,7 @@ bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref,
/* Acquire an exclusive lock. */
DBUG_ASSERT(table_ref->next_global == NULL);
if (lock_table_names(thd, table_ref, NULL,
- thd->variables.lock_wait_timeout,
- MYSQL_OPEN_SKIP_TEMPORARY))
+ thd->variables.lock_wait_timeout, 0))
DBUG_RETURN(TRUE);
handlerton *hton;
@@ -379,7 +375,7 @@ bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref,
m_ticket_downgrade= table->mdl_ticket;
/* Close if table is going to be recreated. */
if (*hton_can_recreate)
- close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED);
+ close_all_tables_for_name(thd, table->s, HA_EXTRA_NOT_USED, NULL);
}
else
{
@@ -406,29 +402,31 @@ bool Truncate_statement::lock_table(THD *thd, TABLE_LIST *table_ref,
@retval TRUE Error.
*/
-bool Truncate_statement::truncate_table(THD *thd, TABLE_LIST *table_ref)
+bool Sql_cmd_truncate_table::truncate_table(THD *thd, TABLE_LIST *table_ref)
{
int error;
- TABLE *table;
bool binlog_stmt;
- DBUG_ENTER("Truncate_statement::truncate_table");
+ DBUG_ENTER("Sql_cmd_truncate_table::truncate_table");
+
+ DBUG_ASSERT((!table_ref->table) ||
+ (table_ref->table && table_ref->table->s));
/* Initialize, or reinitialize in case of reexecution (SP). */
m_ticket_downgrade= NULL;
- /* Remove table from the HANDLER's hash. */
- mysql_ha_rm_tables(thd, table_ref);
-
/* If it is a temporary table, no need to take locks. */
- if ((table= find_temporary_table(thd, table_ref)))
+ if (is_temporary_table(table_ref))
{
+ TABLE *tmp_table= table_ref->table;
+
/* In RBR, the statement is not binlogged if the table is temporary. */
binlog_stmt= !thd->is_current_stmt_binlog_format_row();
/* Note that a temporary table cannot be partitioned. */
- if (ha_check_storage_engine_flag(table->s->db_type(), HTON_CAN_RECREATE))
+ if (ha_check_storage_engine_flag(tmp_table->s->db_type(),
+ HTON_CAN_RECREATE))
{
- if ((error= recreate_temporary_table(thd, table)))
+ if ((error= recreate_temporary_table(thd, tmp_table)))
binlog_stmt= FALSE; /* No need to binlog failed truncate-by-recreate. */
DBUG_ASSERT(! thd->transaction.stmt.modified_non_trans_table);
@@ -508,7 +506,7 @@ bool Truncate_statement::truncate_table(THD *thd, TABLE_LIST *table_ref)
to a shared one.
*/
if (m_ticket_downgrade)
- m_ticket_downgrade->downgrade_exclusive_lock(MDL_SHARED_NO_READ_WRITE);
+ m_ticket_downgrade->downgrade_lock(MDL_SHARED_NO_READ_WRITE);
DBUG_RETURN(error);
}
@@ -522,11 +520,11 @@ bool Truncate_statement::truncate_table(THD *thd, TABLE_LIST *table_ref)
@return FALSE on success.
*/
-bool Truncate_statement::execute(THD *thd)
+bool Sql_cmd_truncate_table::execute(THD *thd)
{
bool res= TRUE;
TABLE_LIST *first_table= thd->lex->select_lex.table_list.first;
- DBUG_ENTER("Truncate_statement::execute");
+ DBUG_ENTER("Sql_cmd_truncate_table::execute");
if (check_one_table_access(thd, DROP_ACL, first_table))
DBUG_RETURN(res);
diff --git a/sql/sql_truncate.h b/sql/sql_truncate.h
index 95a2f35df4f..061c561b8ea 100644
--- a/sql/sql_truncate.h
+++ b/sql/sql_truncate.h
@@ -19,9 +19,9 @@ class THD;
struct TABLE_LIST;
/**
- Truncate_statement represents the TRUNCATE statement.
+ Sql_cmd_truncate_table represents the TRUNCATE statement.
*/
-class Truncate_statement : public Sql_statement
+class Sql_cmd_truncate_table : public Sql_cmd
{
private:
/* Set if a lock must be downgraded after truncate is done. */
@@ -29,14 +29,12 @@ private:
public:
/**
- Constructor, used to represent a ALTER TABLE statement.
- @param lex the LEX structure for this statement.
+ Constructor, used to represent a TRUNCATE statement.
*/
- Truncate_statement(LEX *lex)
- : Sql_statement(lex)
+ Sql_cmd_truncate_table()
{}
- virtual ~Truncate_statement()
+ virtual ~Sql_cmd_truncate_table()
{}
/**
@@ -46,6 +44,11 @@ public:
*/
bool execute(THD *thd);
+ virtual enum_sql_command sql_command_code() const
+ {
+ return SQLCOM_TRUNCATE;
+ }
+
protected:
/** Handle locking a base table for truncate. */
bool lock_table(THD *, TABLE_LIST *, bool *);
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 3cf7f576cbf..a835c182c86 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -714,7 +714,7 @@ bool st_select_lex_unit::exec()
Stop execution of the remaining queries in the UNIONS, and produce
the current result.
*/
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT,
ER(ER_QUERY_EXCEEDED_ROWS_EXAMINED_LIMIT),
thd->accessed_rows_and_keys,
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 5edccd4e937..b91215bcedd 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -138,7 +138,7 @@ static bool check_fields(THD *thd, List<Item> &items)
while ((item= it++))
{
- if (!(field= item->filed_for_view_update()))
+ if (!(field= item->field_for_view_update()))
{
/* item has name, because it comes from VIEW SELECT list */
my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name);
@@ -841,7 +841,7 @@ int mysql_update(THD *thd,
error= 1;
break;
}
- thd->warning_info->inc_current_row_for_warning();
+ thd->get_stmt_da()->inc_current_row_for_warning();
if (thd->is_error())
{
error= 1;
@@ -949,7 +949,7 @@ int mysql_update(THD *thd,
char buff[MYSQL_ERRMSG_SIZE];
my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found,
(ulong) updated,
- (ulong) thd->warning_info->statement_warn_count());
+ (ulong) thd->get_stmt_da()->current_statement_warn_count());
my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
id, buff);
DBUG_PRINT("info",("%ld records updated", (long) updated));
@@ -1134,7 +1134,7 @@ bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
// The primary key can cover multiple columns
KEY key_info= table1->key_info[table1->s->primary_key];
KEY_PART_INFO *key_part= key_info.key_part;
- KEY_PART_INFO *key_part_end= key_part + key_info.key_parts;
+ KEY_PART_INFO *key_part_end= key_part + key_info.user_defined_key_parts;
for (;key_part != key_part_end; ++key_part)
{
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 01bbd5ecb9f..e0a567420ba 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -350,7 +350,7 @@ bool create_view_precheck(THD *thd, TABLE_LIST *tables, TABLE_LIST *view,
while ((item= it++))
{
Item_field *field;
- if ((field= item->filed_for_view_update()))
+ if ((field= item->field_for_view_update()))
{
/*
any_privileges may be reset later by the Item_field::set_field
@@ -432,7 +432,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
lex->link_first_table_back(view, link_to_local);
view->open_type= OT_BASE_ONLY;
- if (open_and_lock_tables(thd, lex->query_tables, TRUE, 0))
+ if (open_temporary_tables(thd, lex->query_tables) ||
+ open_and_lock_tables(thd, lex->query_tables, TRUE, 0))
{
view= lex->unlink_first_table(&link_to_local);
res= TRUE;
@@ -511,7 +512,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
if (!is_acl_user(lex->definer->host.str,
lex->definer->user.str))
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_NO_SUCH_USER,
ER(ER_NO_SUCH_USER),
lex->definer->user.str,
@@ -636,7 +637,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
Item *item;
while ((item= it++))
{
- Item_field *fld= item->filed_for_view_update();
+ Item_field *fld= item->field_for_view_update();
uint priv= (get_column_grant(thd, &view->grant, view->db,
view->table_name, item->name) &
VIEW_ANY_ACL);
@@ -887,7 +888,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
if (lex->create_view_algorithm == VIEW_ALGORITHM_MERGE &&
!lex->can_be_merged())
{
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_VIEW_MERGE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_WARN_VIEW_MERGE,
ER(ER_WARN_VIEW_MERGE));
lex->create_view_algorithm= DTYPE_ALGORITHM_UNDEFINED;
}
@@ -1165,7 +1166,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
DBUG_ASSERT(!table->definer.host.str &&
!table->definer.user.length &&
!table->definer.host.length);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_VIEW_FRM_NO_USER, ER(ER_VIEW_FRM_NO_USER),
table->db, table->table_name);
get_default_definer(thd, &table->definer);
@@ -1566,7 +1567,7 @@ bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
lex->select_lex.order_list.elements &&
!table->select_lex->master_unit()->is_union())
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_VIEW_ORDERBY_IGNORED,
ER(ER_VIEW_ORDERBY_IGNORED),
table->db, table->table_name);
@@ -1664,8 +1665,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
DBUG_RETURN(TRUE);
}
- if (lock_table_names(thd, views, 0, thd->variables.lock_wait_timeout,
- MYSQL_OPEN_SKIP_TEMPORARY))
+ if (lock_table_names(thd, views, 0, thd->variables.lock_wait_timeout, 0))
DBUG_RETURN(TRUE);
for (view= views; view; view= view->next_local)
@@ -1680,7 +1680,7 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
my_snprintf(name, sizeof(name), "%s.%s", view->db, view->table_name);
if (thd->lex->check_exists)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR),
name);
continue;
@@ -1815,7 +1815,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
if ((key_info->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME)
{
KEY_PART_INFO *key_part= key_info->key_part;
- KEY_PART_INFO *key_part_end= key_part + key_info->key_parts;
+ KEY_PART_INFO *key_part_end= key_part + key_info->user_defined_key_parts;
/* check that all key parts are used */
for (;;)
@@ -1824,7 +1824,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
for (k= trans; k < end_of_trans; k++)
{
Item_field *field;
- if ((field= k->item->filed_for_view_update()) &&
+ if ((field= k->item->field_for_view_update()) &&
field->field == key_part->field)
break;
}
@@ -1846,7 +1846,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
for (fld= trans; fld < end_of_trans; fld++)
{
Item_field *field;
- if ((field= fld->item->filed_for_view_update()) &&
+ if ((field= fld->item->field_for_view_update()) &&
field->field == *field_ptr)
break;
}
@@ -1860,7 +1860,7 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
if (thd->variables.updatable_views_with_limit)
{
/* update allowed, but issue warning */
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_WARN_VIEW_WITHOUT_KEY, ER(ER_WARN_VIEW_WITHOUT_KEY));
DBUG_RETURN(FALSE);
}
@@ -1900,7 +1900,7 @@ bool insert_view_fields(THD *thd, List<Item> *list, TABLE_LIST *view)
for (Field_translator *entry= trans; entry < trans_end; entry++)
{
Item_field *fld;
- if ((fld= entry->item->filed_for_view_update()))
+ if ((fld= entry->item->field_for_view_update()))
list->push_back(fld);
else
{
diff --git a/sql/sql_view.h b/sql/sql_view.h
index 2e9c77252e8..abe95c63e6e 100644
--- a/sql/sql_view.h
+++ b/sql/sql_view.h
@@ -37,6 +37,7 @@ bool mysql_create_view(THD *thd, TABLE_LIST *view,
bool mysql_make_view(THD *thd, File_parser *parser, TABLE_LIST *table,
uint flags);
+
bool mysql_drop_view(THD *thd, TABLE_LIST *view, enum_drop_mode drop_mode);
bool check_key_in_view(THD *thd, TABLE_LIST * view);
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index da8750a7ba4..07666822acf 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -55,11 +55,13 @@
#include "sp_pcontext.h"
#include "sp_rcontext.h"
#include "sp.h"
-#include "sql_alter.h" // Alter_table*_statement
-#include "sql_truncate.h" // Truncate_statement
-#include "sql_admin.h" // Analyze/Check..._table_stmt
-#include "sql_partition_admin.h" // Alter_table_*_partition_stmt
+#include "sql_alter.h" // Sql_cmd_alter_table*
+#include "sql_truncate.h" // Sql_cmd_truncate_table
+#include "sql_admin.h" // Sql_cmd_analyze/Check..._table
+#include "sql_partition_admin.h" // Sql_cmd_alter_table_*_part.
+#include "sql_handler.h" // Sql_cmd_handler_*
#include "sql_signal.h"
+#include "sql_get_diagnostics.h" // Sql_cmd_get_diagnostics
#include "event_parse_data.h"
#include "create_options.h"
#include <myisam.h>
@@ -283,7 +285,7 @@ void case_stmt_action_case(LEX *lex)
(Instruction 12 in the example)
*/
- lex->spcont->push_label((char *)"", lex->sphead->instructions());
+ lex->spcont->push_label(current_thd, EMPTY_STR, lex->sphead->instructions());
}
/**
@@ -352,7 +354,7 @@ int case_stmt_action_when(LEX *lex, Item *when, bool simple)
*/
return !test(i) ||
- sp->push_backpatch(i, ctx->push_label((char *)"", 0)) ||
+ sp->push_backpatch(i, ctx->push_label(current_thd, EMPTY_STR, 0)) ||
sp->add_cont_backpatch(i) ||
sp->add_instr(i);
}
@@ -468,7 +470,7 @@ set_system_variable(THD *thd, struct sys_var_with_base *tmp,
*/
static bool
-set_local_variable(THD *thd, sp_variable_t *spv, Item *val)
+set_local_variable(THD *thd, sp_variable *spv, Item *val)
{
Item *it;
LEX *lex= thd->lex;
@@ -476,8 +478,8 @@ set_local_variable(THD *thd, sp_variable_t *spv, Item *val)
if (val)
it= val;
- else if (spv->dflt)
- it= spv->dflt;
+ else if (spv->default_value)
+ it= spv->default_value;
else
{
it= new (thd->mem_root) Item_null();
@@ -543,6 +545,57 @@ set_trigger_new_row(THD *thd, LEX_STRING *name, Item *val)
/**
+ Create an object to represent a SP variable in the Item-hierarchy.
+
+ @param thd The current thread.
+ @param name The SP variable name.
+ @param spvar The SP variable (optional).
+ @param start_in_q Start position of the SP variable name in the query.
+ @param end_in_q End position of the SP variable name in the query.
+
+ @remark If spvar is not specified, the name is used to search for the
+ variable in the parse-time context. If the variable does not
+ exist, a error is set and NULL is returned to the caller.
+
+ @return An Item_splocal object representing the SP variable, or NULL on error.
+*/
+static Item_splocal*
+create_item_for_sp_var(THD *thd, LEX_STRING name, sp_variable *spvar,
+ const char *start_in_q, const char *end_in_q)
+{
+ Item_splocal *item;
+ LEX *lex= thd->lex;
+ uint pos_in_q, len_in_q;
+ sp_pcontext *spc = lex->spcont;
+
+ /* If necessary, look for the variable. */
+ if (spc && !spvar)
+ spvar= spc->find_variable(name, false);
+
+ if (!spvar)
+ {
+ my_error(ER_SP_UNDECLARED_VAR, MYF(0), name.str);
+ return NULL;
+ }
+
+ DBUG_ASSERT(spc && spvar);
+
+ /* Position and length of the SP variable name in the query. */
+ pos_in_q= start_in_q - lex->sphead->m_tmp_query;
+ len_in_q= end_in_q - start_in_q;
+
+ item= new (thd->mem_root)
+ Item_splocal(name, spvar->offset, spvar->type, pos_in_q, len_in_q);
+
+#ifndef DBUG_OFF
+ if (item)
+ item->m_sp= lex->sphead;
+#endif
+
+ return item;
+}
+
+/**
Helper to resolve the SQL:2003 Syntax exception 1) in <in predicate>.
See SQL:2003, Part 2, section 8.4 <in predicate>, Note 184, page 383.
This function returns the proper item for the SQL expression
@@ -707,10 +760,10 @@ static bool add_create_index_prepare (LEX *lex, Table_ident *table)
if (!lex->current_select->add_table_to_list(lex->thd, table, NULL,
TL_OPTION_UPDATING,
TL_READ_NO_INSERT,
- MDL_SHARED_NO_WRITE))
+ MDL_SHARED_UPGRADABLE))
return TRUE;
lex->alter_info.reset();
- lex->alter_info.flags= ALTER_ADD_INDEX;
+ lex->alter_info.flags= Alter_info::ALTER_ADD_INDEX;
lex->col_list.empty();
lex->change= NullS;
lex->option_list= NULL;
@@ -879,7 +932,7 @@ static bool sp_create_assignment_instr(THD *thd, bool no_lookahead)
timestamp_type date_time_type;
st_select_lex *select_lex;
chooser_compare_func_creator boolfunc2creator;
- struct sp_cond_type *spcondtype;
+ class sp_condition_value *spcondvalue;
struct { int vars, conds, hndlrs, curs; } spblock;
sp_name *spname;
LEX *lex;
@@ -890,6 +943,14 @@ static bool sp_create_assignment_instr(THD *thd, bool no_lookahead)
enum Foreign_key::fk_option m_fk_option;
enum enum_yes_no_unknown m_yes_no_unk;
Diag_condition_item_name diag_condition_item_name;
+ Diagnostics_information::Which_area diag_area;
+ Diagnostics_information *diag_info;
+ Statement_information_item *stmt_info_item;
+ Statement_information_item::Name stmt_info_item_name;
+ List<Statement_information_item> *stmt_info_list;
+ Condition_information_item *cond_info_item;
+ Condition_information_item::Name cond_info_item_name;
+ List<Condition_information_item> *cond_info_list;
DYNCALL_CREATE_DEF *dyncol_def;
List<DYNCALL_CREATE_DEF> *dyncol_def_list;
bool is_not_empty;
@@ -901,10 +962,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%pure_parser /* We have threads */
/*
- Currently there are 189 shift/reduce conflicts.
+ Currently there are 185 shift/reduce conflicts.
We should not introduce new conflicts any more.
*/
-%expect 189
+%expect 185
/*
Comments for TOKENS.
@@ -1027,6 +1088,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token CROSS /* SQL-2003-R */
%token CUBE_SYM /* SQL-2003-R */
%token CURDATE /* MYSQL-FUNC */
+%token CURRENT_SYM /* SQL-2003-R */
%token CURRENT_USER /* SQL-2003-R */
%token CURRENT_POS_SYM
%token CURSOR_SYM /* SQL-2003-R */
@@ -1058,6 +1120,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token DESCRIBE /* SQL-2003-R */
%token DES_KEY_FILE
%token DETERMINISTIC_SYM /* SQL-2003-R */
+%token DIAGNOSTICS_SYM /* SQL-2003-N */
%token DIRECTORY_SYM
%token DISABLE_SYM
%token DISCARD
@@ -1091,6 +1154,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token EVENTS_SYM
%token EVENT_SYM
%token EVERY_SYM /* SQL-2003-N */
+%token EXCHANGE_SYM
%token EXAMINED_SYM
%token EXECUTE_SYM /* SQL-2003-R */
%token EXISTS /* SQL-2003-R */
@@ -1123,6 +1187,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token GEOMETRYCOLLECTION
%token GEOMETRY_SYM
%token GET_FORMAT /* MYSQL-FUNC */
+%token GET_SYM /* SQL-2003-R */
%token GLOBAL_SYM /* SQL-2003-R */
%token GRANT /* SQL-2003-R */
%token GRANTS
@@ -1283,6 +1348,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token NO_WRITE_TO_BINLOG
%token NULL_SYM /* SQL-2003-R */
%token NUM
+%token NUMBER_SYM /* SQL-2003-N */
%token NUMERIC_SYM /* SQL-2003-R */
%token NVARCHAR_SYM
%token OFFSET_SYM
@@ -1311,9 +1377,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token PARSER_SYM
%token PARSE_VCOL_EXPR_SYM
%token PARTIAL /* SQL-2003-N */
-%token PARTITIONING_SYM
-%token PARTITIONS_SYM
%token PARTITION_SYM /* SQL-2003-R */
+%token PARTITIONS_SYM
+%token PARTITIONING_SYM
%token PASSWORD
%token PERSISTENT_SYM
%token PHASE_SYM
@@ -1374,6 +1440,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token RESTORE_SYM
%token RESTRICT
%token RESUME_SYM
+%token RETURNED_SQLSTATE_SYM /* SQL-2003-N */
%token RETURNS_SYM /* SQL-2003-R */
%token RETURN_SYM /* SQL-2003-R */
%token REVOKE /* SQL-2003-R */
@@ -1384,6 +1451,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token ROWS_SYM /* SQL-2003-R */
%token ROW_FORMAT_SYM
%token ROW_SYM /* SQL-2003-R */
+%token ROW_COUNT_SYM /* SQL-2003-N */
%token RTREE_SYM
%token SAVEPOINT_SYM /* SQL-2003-R */
%token SCHEDULE_SYM
@@ -1437,6 +1505,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
%token STARTING
%token STARTS_SYM
%token START_SYM /* SQL-2003-R */
+%token STATS_AUTO_RECALC_SYM
+%token STATS_PERSISTENT_SYM
+%token STATS_SAMPLE_PAGES_SYM
%token STATUS_SYM
%token STDDEV_SAMP_SYM /* SQL-2003-N */
%token STD_SYM
@@ -1647,6 +1718,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
function_call_generic
function_call_conflict kill_expr
signal_allowed_expr
+ simple_target_specification
+ condition_number
%type <item_num>
NUM_literal
@@ -1665,7 +1738,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
btree_or_rtree
%type <string_list>
- using_list
+ using_list opt_use_partition use_partition
%type <key_part>
key_part
@@ -1777,7 +1850,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize);
key_using_alg
part_column_list
server_def server_options_list server_option
- definer_opt no_definer definer
+ definer_opt no_definer definer get_diagnostics
parse_vcol_expr vcol_opt_specifier vcol_opt_attribute
vcol_opt_attribute_list vcol_attribute
END_OF_INPUT
@@ -1793,7 +1866,7 @@ END_OF_INPUT
%type <NONE> case_stmt_specification simple_case_stmt searched_case_stmt
%type <num> sp_decl_idents sp_opt_inout sp_handler_type sp_hcond_list
-%type <spcondtype> sp_cond sp_hcond sqlstate signal_value opt_signal_value
+%type <spcondvalue> sp_cond sp_hcond sqlstate signal_value opt_signal_value
%type <spblock> sp_decls sp_decl
%type <lex> sp_cursor_stmt
%type <spname> sp_name
@@ -1804,6 +1877,15 @@ END_OF_INPUT
%type <NONE> signal_stmt resignal_stmt
%type <diag_condition_item_name> signal_condition_information_item_name
+%type <diag_area> which_area;
+%type <diag_info> diagnostics_information;
+%type <stmt_info_item> statement_information_item;
+%type <stmt_info_item_name> statement_information_item_name;
+%type <stmt_info_list> statement_information;
+%type <cond_info_item> condition_information_item;
+%type <cond_info_item_name> condition_information_item_name;
+%type <cond_info_list> condition_information;
+
%type <NONE>
'-' '+' '*' '/' '%' '(' ')'
',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM
@@ -1908,6 +1990,7 @@ statement:
| drop
| execute
| flush
+ | get_diagnostics
| grant
| handler
| help
@@ -2069,6 +2152,7 @@ master_def:
| MASTER_PASSWORD_SYM EQ TEXT_STRING_sys
{
Lex->mi.password = $3.str;
+ Lex->contains_plaintext_password= true;
}
| MASTER_PORT_SYM EQ ulong_num
{
@@ -2131,7 +2215,7 @@ master_def:
}
if (Lex->mi.heartbeat_period > slave_net_timeout)
{
- push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN,
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX,
ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX));
}
@@ -2139,7 +2223,7 @@ master_def:
{
if (Lex->mi.heartbeat_period != 0.0)
{
- push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN,
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN,
ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN));
Lex->mi.heartbeat_period= 0.0;
@@ -2187,7 +2271,7 @@ master_file_def:
from 0" (4 in fact), unspecified means "don't change the position
(keep the preceding value)").
*/
- Lex->mi.pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.pos);
+ Lex->mi.pos= MY_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.pos);
}
| RELAY_LOG_FILE_SYM EQ TEXT_STRING_sys
{
@@ -2197,7 +2281,7 @@ master_file_def:
{
Lex->mi.relay_log_pos = $3;
/* Adjust if < BIN_LOG_HEADER_SIZE (same comment as Lex->mi.pos) */
- Lex->mi.relay_log_pos = max(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
+ Lex->mi.relay_log_pos= MY_MAX(BIN_LOG_HEADER_SIZE, Lex->mi.relay_log_pos);
}
| MASTER_USE_GTID_SYM EQ CURRENT_POS_SYM
{
@@ -2289,7 +2373,7 @@ create:
!lex->create_info.db_type)
{
lex->create_info.db_type= ha_default_handlerton(YYTHD);
- push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_USING_OTHER_HANDLER,
ER(ER_WARN_USING_OTHER_HANDLER),
hton_name(lex->create_info.db_type)->str,
@@ -2307,6 +2391,7 @@ create:
if (add_create_index(Lex, $2, $5))
MYSQL_YYABORT;
}
+ opt_index_lock_algorithm { }
| CREATE fulltext INDEX_SYM opt_if_not_exists ident init_key_options ON
table_ident
{
@@ -2318,6 +2403,7 @@ create:
if (add_create_index(Lex, $2, $5))
MYSQL_YYABORT;
}
+ opt_index_lock_algorithm { }
| CREATE spatial INDEX_SYM opt_if_not_exists ident init_key_options ON
table_ident
{
@@ -2329,6 +2415,7 @@ create:
if (add_create_index(Lex, $2, $5))
MYSQL_YYABORT;
}
+ opt_index_lock_algorithm { }
| CREATE DATABASE opt_if_not_exists ident
{
Lex->create_info.default_table_charset= NULL;
@@ -2405,6 +2492,7 @@ server_option:
| PASSWORD TEXT_STRING_sys
{
Lex->server_options.password= $2.str;
+ Lex->contains_plaintext_password= true;
}
| SOCKET_SYM TEXT_STRING_sys
{
@@ -2777,14 +2865,16 @@ sp_fdparam:
LEX *lex= Lex;
sp_pcontext *spc= lex->spcont;
- if (spc->find_variable(&$1, TRUE))
+ if (spc->find_variable($1, TRUE))
{
my_error(ER_SP_DUP_PARAM, MYF(0), $1.str);
MYSQL_YYABORT;
}
- sp_variable_t *spvar= spc->push_variable(&$1,
- (enum enum_field_types)$3,
- sp_param_in);
+
+ sp_variable *spvar= spc->add_variable(YYTHD,
+ $1,
+ (enum enum_field_types) $3,
+ sp_variable::MODE_IN);
if (lex->sphead->fill_field_definition(YYTHD, lex,
(enum enum_field_types) $3,
@@ -2814,14 +2904,15 @@ sp_pdparam:
LEX *lex= Lex;
sp_pcontext *spc= lex->spcont;
- if (spc->find_variable(&$3, TRUE))
+ if (spc->find_variable($3, TRUE))
{
my_error(ER_SP_DUP_PARAM, MYF(0), $3.str);
MYSQL_YYABORT;
}
- sp_variable_t *spvar= spc->push_variable(&$3,
- (enum enum_field_types)$4,
- (sp_param_mode_t)$1);
+ sp_variable *spvar= spc->add_variable(YYTHD,
+ $3,
+ (enum enum_field_types) $4,
+ (sp_variable::enum_mode) $1);
if (lex->sphead->fill_field_definition(YYTHD, lex,
(enum enum_field_types) $4,
@@ -2835,10 +2926,10 @@ sp_pdparam:
;
sp_opt_inout:
- /* Empty */ { $$= sp_param_in; }
- | IN_SYM { $$= sp_param_in; }
- | OUT_SYM { $$= sp_param_out; }
- | INOUT_SYM { $$= sp_param_inout; }
+ /* Empty */ { $$= sp_variable::MODE_IN; }
+ | IN_SYM { $$= sp_variable::MODE_IN; }
+ | OUT_SYM { $$= sp_variable::MODE_OUT; }
+ | INOUT_SYM { $$= sp_variable::MODE_INOUT; }
;
sp_proc_stmts:
@@ -2910,13 +3001,13 @@ sp_decl:
for (uint i = num_vars-$2 ; i < num_vars ; i++)
{
uint var_idx= pctx->var_context2runtime(i);
- sp_variable_t *spvar= pctx->find_variable(var_idx);
+ sp_variable *spvar= pctx->find_variable(var_idx);
if (!spvar)
MYSQL_YYABORT;
spvar->type= var_type;
- spvar->dflt= dflt_value_item;
+ spvar->default_value= dflt_value_item;
if (lex->sphead->fill_field_definition(YYTHD, lex, var_type,
&spvar->field_def))
@@ -2952,36 +3043,41 @@ sp_decl:
LEX *lex= Lex;
sp_pcontext *spc= lex->spcont;
- if (spc->find_cond(&$2, TRUE))
+ if (spc->find_condition($2, TRUE))
{
my_error(ER_SP_DUP_COND, MYF(0), $2.str);
MYSQL_YYABORT;
}
- if(YYTHD->lex->spcont->push_cond(&$2, $5))
+ if(spc->add_condition(YYTHD, $2, $5))
MYSQL_YYABORT;
$$.vars= $$.hndlrs= $$.curs= 0;
$$.conds= 1;
}
| DECLARE_SYM sp_handler_type HANDLER_SYM FOR_SYM
{
+ THD *thd= YYTHD;
LEX *lex= Lex;
sp_head *sp= lex->sphead;
- lex->spcont= lex->spcont->push_context(LABEL_HANDLER_SCOPE);
+ sp_handler *h= lex->spcont->add_handler(thd,
+ (sp_handler::enum_type) $2);
+
+ lex->spcont= lex->spcont->push_context(thd,
+ sp_pcontext::HANDLER_SCOPE);
sp_pcontext *ctx= lex->spcont;
sp_instr_hpush_jump *i=
- new sp_instr_hpush_jump(sp->instructions(), ctx, $2,
- ctx->current_var_count());
+ new sp_instr_hpush_jump(sp->instructions(), ctx, h);
+
if (i == NULL || sp->add_instr(i))
MYSQL_YYABORT;
/* For continue handlers, mark end of handler scope. */
- if ($2 == SP_HANDLER_CONTINUE &&
+ if ($2 == sp_handler::CONTINUE &&
sp->push_backpatch(i, ctx->last_label()))
MYSQL_YYABORT;
- if (sp->push_backpatch(i, ctx->push_label(empty_c_string, 0)))
+ if (sp->push_backpatch(i, ctx->push_label(thd, EMPTY_STR, 0)))
MYSQL_YYABORT;
}
sp_hcond_list sp_proc_stmt
@@ -2989,20 +3085,19 @@ sp_decl:
LEX *lex= Lex;
sp_head *sp= lex->sphead;
sp_pcontext *ctx= lex->spcont;
- sp_label_t *hlab= lex->spcont->pop_label(); /* After this hdlr */
+ sp_label *hlab= lex->spcont->pop_label(); /* After this hdlr */
sp_instr_hreturn *i;
- if ($2 == SP_HANDLER_CONTINUE)
+ if ($2 == sp_handler::CONTINUE)
{
- i= new sp_instr_hreturn(sp->instructions(), ctx,
- ctx->current_var_count());
+ i= new sp_instr_hreturn(sp->instructions(), ctx);
if (i == NULL ||
sp->add_instr(i))
MYSQL_YYABORT;
}
else
{ /* EXIT or UNDO handler, just jump to the end of the block */
- i= new sp_instr_hreturn(sp->instructions(), ctx, 0);
+ i= new sp_instr_hreturn(sp->instructions(), ctx);
if (i == NULL ||
sp->add_instr(i) ||
sp->push_backpatch(i, lex->spcont->last_label())) /* Block end */
@@ -3013,8 +3108,7 @@ sp_decl:
lex->spcont= ctx->pop_context();
$$.vars= $$.conds= $$.curs= 0;
- $$.hndlrs= $6;
- lex->spcont->add_handlers($6);
+ $$.hndlrs= 1;
}
| DECLARE_SYM ident CURSOR_SYM FOR_SYM sp_cursor_stmt
{
@@ -3024,7 +3118,7 @@ sp_decl:
uint offp;
sp_instr_cpush *i;
- if (ctx->find_cursor(&$2, &offp, TRUE))
+ if (ctx->find_cursor($2, &offp, TRUE))
{
my_error(ER_SP_DUP_CURS, MYF(0), $2.str);
delete $5;
@@ -3034,7 +3128,7 @@ sp_decl:
ctx->current_cursor_count());
if (i == NULL ||
sp->add_instr(i) ||
- ctx->push_cursor(&$2))
+ ctx->add_cursor($2))
MYSQL_YYABORT;
$$.vars= $$.conds= $$.hndlrs= 0;
$$.curs= 1;
@@ -3065,9 +3159,9 @@ sp_cursor_stmt:
;
sp_handler_type:
- EXIT_SYM { $$= SP_HANDLER_EXIT; }
- | CONTINUE_SYM { $$= SP_HANDLER_CONTINUE; }
- /*| UNDO_SYM { QQ No yet } */
+ EXIT_SYM { $$= sp_handler::EXIT; }
+ | CONTINUE_SYM { $$= sp_handler::CONTINUE; }
+ /*| UNDO_SYM { QQ No yet } */
;
sp_hcond_list:
@@ -3084,7 +3178,7 @@ sp_hcond_element:
sp_head *sp= lex->sphead;
sp_pcontext *ctx= lex->spcont->parent_context();
- if (ctx->find_handler($1))
+ if (ctx->check_duplicate_handler($1))
{
my_message(ER_SP_DUP_HANDLER, ER(ER_SP_DUP_HANDLER), MYF(0));
MYSQL_YYABORT;
@@ -3095,7 +3189,6 @@ sp_hcond_element:
(sp_instr_hpush_jump *)sp->last_instruction();
i->add_condition($1);
- ctx->push_handler($1);
}
}
;
@@ -3108,11 +3201,9 @@ sp_cond:
my_error(ER_WRONG_VALUE, MYF(0), "CONDITION", "0");
MYSQL_YYABORT;
}
- $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$= new (YYTHD->mem_root) sp_condition_value($1);
if ($$ == NULL)
MYSQL_YYABORT;
- $$->type= sp_cond_type_t::number;
- $$->mysqlerr= $1;
}
| sqlstate
;
@@ -3120,17 +3211,22 @@ sp_cond:
sqlstate:
SQLSTATE_SYM opt_value TEXT_STRING_literal
{ /* SQLSTATE */
- if (!sp_cond_check(&$3))
+
+ /*
+ An error is triggered:
+ - if the specified string is not a valid SQLSTATE,
+ - or if it represents the completion condition -- it is not
+ allowed to SIGNAL, or declare a handler for the completion
+ condition.
+ */
+ if (!is_sqlstate_valid(&$3) || is_sqlstate_completion($3.str))
{
my_error(ER_SP_BAD_SQLSTATE, MYF(0), $3.str);
MYSQL_YYABORT;
}
- $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$= new (YYTHD->mem_root) sp_condition_value($3.str);
if ($$ == NULL)
MYSQL_YYABORT;
- $$->type= sp_cond_type_t::state;
- memcpy($$->sqlstate, $3.str, SQLSTATE_LENGTH);
- $$->sqlstate[SQLSTATE_LENGTH]= '\0';
}
;
@@ -3146,7 +3242,7 @@ sp_hcond:
}
| ident /* CONDITION name */
{
- $$= Lex->spcont->find_cond(&$1);
+ $$= Lex->spcont->find_condition($1, false);
if ($$ == NULL)
{
my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str);
@@ -3155,24 +3251,22 @@ sp_hcond:
}
| SQLWARNING_SYM /* SQLSTATEs 01??? */
{
- $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$= new (YYTHD->mem_root) sp_condition_value(sp_condition_value::WARNING);
if ($$ == NULL)
MYSQL_YYABORT;
- $$->type= sp_cond_type_t::warning;
}
| not FOUND_SYM /* SQLSTATEs 02??? */
{
- $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$= new (YYTHD->mem_root) sp_condition_value(sp_condition_value::NOT_FOUND);
if ($$ == NULL)
MYSQL_YYABORT;
- $$->type= sp_cond_type_t::notfound;
}
| SQLEXCEPTION_SYM /* All other SQLSTATEs */
{
- $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t));
+ $$= (sp_condition_value *)YYTHD->alloc(sizeof(sp_condition_value));
+ $$= new (YYTHD->mem_root) sp_condition_value(sp_condition_value::EXCEPTION);
if ($$ == NULL)
MYSQL_YYABORT;
- $$->type= sp_cond_type_t::exception;
}
;
@@ -3184,9 +3278,9 @@ signal_stmt:
Yacc_state *state= & thd->m_parser_state->m_yacc;
lex->sql_command= SQLCOM_SIGNAL;
- lex->m_stmt= new (thd->mem_root) Signal_statement(lex, $2,
- state->m_set_signal_info);
- if (lex->m_stmt == NULL)
+ lex->m_sql_cmd=
+ new (thd->mem_root) Sql_cmd_signal($2, state->m_set_signal_info);
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
;
@@ -3195,20 +3289,20 @@ signal_value:
ident
{
LEX *lex= Lex;
- sp_cond_type_t *cond;
+ sp_condition_value *cond;
if (lex->spcont == NULL)
{
/* SIGNAL foo cannot be used outside of stored programs */
my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str);
MYSQL_YYABORT;
}
- cond= lex->spcont->find_cond(&$1);
+ cond= lex->spcont->find_condition($1, false);
if (cond == NULL)
{
my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str);
MYSQL_YYABORT;
}
- if (cond->type != sp_cond_type_t::state)
+ if (cond->type != sp_condition_value::SQLSTATE)
{
my_error(ER_SIGNAL_BAD_CONDITION_TYPE, MYF(0));
MYSQL_YYABORT;
@@ -3323,13 +3417,160 @@ resignal_stmt:
Yacc_state *state= & thd->m_parser_state->m_yacc;
lex->sql_command= SQLCOM_RESIGNAL;
- lex->m_stmt= new (thd->mem_root) Resignal_statement(lex, $2,
- state->m_set_signal_info);
- if (lex->m_stmt == NULL)
+ lex->m_sql_cmd=
+ new (thd->mem_root) Sql_cmd_resignal($2,
+ state->m_set_signal_info);
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
;
+get_diagnostics:
+ GET_SYM which_area DIAGNOSTICS_SYM diagnostics_information
+ {
+ Diagnostics_information *info= $4;
+
+ info->set_which_da($2);
+
+ Lex->sql_command= SQLCOM_GET_DIAGNOSTICS;
+ Lex->m_sql_cmd= new (YYTHD->mem_root) Sql_cmd_get_diagnostics(info);
+
+ if (Lex->m_sql_cmd == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+which_area:
+ /* If <which area> is not specified, then CURRENT is implicit. */
+ { $$= Diagnostics_information::CURRENT_AREA; }
+ | CURRENT_SYM
+ { $$= Diagnostics_information::CURRENT_AREA; }
+ ;
+
+diagnostics_information:
+ statement_information
+ {
+ $$= new (YYTHD->mem_root) Statement_information($1);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | CONDITION_SYM condition_number condition_information
+ {
+ $$= new (YYTHD->mem_root) Condition_information($2, $3);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+statement_information:
+ statement_information_item
+ {
+ $$= new (YYTHD->mem_root) List<Statement_information_item>;
+ if ($$ == NULL || $$->push_back($1))
+ MYSQL_YYABORT;
+ }
+ | statement_information ',' statement_information_item
+ {
+ if ($1->push_back($3))
+ MYSQL_YYABORT;
+ $$= $1;
+ }
+ ;
+
+statement_information_item:
+ simple_target_specification EQ statement_information_item_name
+ {
+ $$= new (YYTHD->mem_root) Statement_information_item($3, $1);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+
+simple_target_specification:
+ ident
+ {
+ Lex_input_stream *lip= &YYTHD->m_parser_state->m_lip;
+ $$= create_item_for_sp_var(YYTHD, $1, NULL,
+ lip->get_tok_start(), lip->get_ptr());
+
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ | '@' ident_or_text
+ {
+ $$= new (YYTHD->mem_root) Item_func_get_user_var($2);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+ ;
+
+statement_information_item_name:
+ NUMBER_SYM
+ { $$= Statement_information_item::NUMBER; }
+ | ROW_COUNT_SYM
+ { $$= Statement_information_item::ROW_COUNT; }
+ ;
+
+/*
+ Only a limited subset of <expr> are allowed in GET DIAGNOSTICS
+ <condition number>, same subset as for SIGNAL/RESIGNAL.
+*/
+condition_number:
+ signal_allowed_expr
+ { $$= $1; }
+ ;
+
+condition_information:
+ condition_information_item
+ {
+ $$= new (YYTHD->mem_root) List<Condition_information_item>;
+ if ($$ == NULL || $$->push_back($1))
+ MYSQL_YYABORT;
+ }
+ | condition_information ',' condition_information_item
+ {
+ if ($1->push_back($3))
+ MYSQL_YYABORT;
+ $$= $1;
+ }
+ ;
+
+condition_information_item:
+ simple_target_specification EQ condition_information_item_name
+ {
+ $$= new (YYTHD->mem_root) Condition_information_item($3, $1);
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ }
+
+condition_information_item_name:
+ CLASS_ORIGIN_SYM
+ { $$= Condition_information_item::CLASS_ORIGIN; }
+ | SUBCLASS_ORIGIN_SYM
+ { $$= Condition_information_item::SUBCLASS_ORIGIN; }
+ | CONSTRAINT_CATALOG_SYM
+ { $$= Condition_information_item::CONSTRAINT_CATALOG; }
+ | CONSTRAINT_SCHEMA_SYM
+ { $$= Condition_information_item::CONSTRAINT_SCHEMA; }
+ | CONSTRAINT_NAME_SYM
+ { $$= Condition_information_item::CONSTRAINT_NAME; }
+ | CATALOG_NAME_SYM
+ { $$= Condition_information_item::CATALOG_NAME; }
+ | SCHEMA_NAME_SYM
+ { $$= Condition_information_item::SCHEMA_NAME; }
+ | TABLE_NAME_SYM
+ { $$= Condition_information_item::TABLE_NAME; }
+ | COLUMN_NAME_SYM
+ { $$= Condition_information_item::COLUMN_NAME; }
+ | CURSOR_NAME_SYM
+ { $$= Condition_information_item::CURSOR_NAME; }
+ | MESSAGE_TEXT_SYM
+ { $$= Condition_information_item::MESSAGE_TEXT; }
+ | MYSQL_ERRNO_SYM
+ { $$= Condition_information_item::MYSQL_ERRNO; }
+ | RETURNED_SQLSTATE_SYM
+ { $$= Condition_information_item::RETURNED_SQLSTATE; }
+ ;
+
sp_decl_idents:
ident
{
@@ -3338,12 +3579,15 @@ sp_decl_idents:
LEX *lex= Lex;
sp_pcontext *spc= lex->spcont;
- if (spc->find_variable(&$1, TRUE))
+ if (spc->find_variable($1, TRUE))
{
my_error(ER_SP_DUP_VAR, MYF(0), $1.str);
MYSQL_YYABORT;
}
- spc->push_variable(&$1, (enum_field_types)0, sp_param_in);
+ spc->add_variable(YYTHD,
+ $1,
+ MYSQL_TYPE_DECIMAL,
+ sp_variable::MODE_IN);
$$= 1;
}
| sp_decl_idents ',' ident
@@ -3353,12 +3597,15 @@ sp_decl_idents:
LEX *lex= Lex;
sp_pcontext *spc= lex->spcont;
- if (spc->find_variable(&$3, TRUE))
+ if (spc->find_variable($3, TRUE))
{
my_error(ER_SP_DUP_VAR, MYF(0), $3.str);
MYSQL_YYABORT;
}
- spc->push_variable(&$3, (enum_field_types)0, sp_param_in);
+ spc->add_variable(YYTHD,
+ $3,
+ MYSQL_TYPE_DECIMAL,
+ sp_variable::MODE_IN);
$$= $1 + 1;
}
;
@@ -3480,7 +3727,9 @@ sp_proc_stmt_unlabeled:
{ /* Unlabeled controls get a secret label. */
LEX *lex= Lex;
- lex->spcont->push_label((char *)"", lex->sphead->instructions());
+ lex->spcont->push_label(YYTHD,
+ EMPTY_STR,
+ lex->sphead->instructions());
}
sp_unlabeled_control
{
@@ -3496,7 +3745,7 @@ sp_proc_stmt_leave:
LEX *lex= Lex;
sp_head *sp = lex->sphead;
sp_pcontext *ctx= lex->spcont;
- sp_label_t *lab= ctx->find_label($2.str);
+ sp_label *lab= ctx->find_label($2);
if (! lab)
{
@@ -3516,7 +3765,7 @@ sp_proc_stmt_leave:
there are no hpop/cpop at the jump destination,
so we should include the block context here for cleanup.
*/
- bool exclusive= (lab->type == SP_LAB_BEGIN);
+ bool exclusive= (lab->type == sp_label::BEGIN);
n= ctx->diff_handlers(lab->ctx, exclusive);
if (n)
@@ -3549,9 +3798,9 @@ sp_proc_stmt_iterate:
LEX *lex= Lex;
sp_head *sp= lex->sphead;
sp_pcontext *ctx= lex->spcont;
- sp_label_t *lab= ctx->find_label($2.str);
+ sp_label *lab= ctx->find_label($2);
- if (! lab || lab->type != SP_LAB_ITER)
+ if (! lab || lab->type != sp_label::ITERATION)
{
my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "ITERATE", $2.str);
MYSQL_YYABORT;
@@ -3594,7 +3843,7 @@ sp_proc_stmt_open:
uint offset;
sp_instr_copen *i;
- if (! lex->spcont->find_cursor(&$2, &offset))
+ if (! lex->spcont->find_cursor($2, &offset, false))
{
my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str);
MYSQL_YYABORT;
@@ -3614,7 +3863,7 @@ sp_proc_stmt_fetch:
uint offset;
sp_instr_cfetch *i;
- if (! lex->spcont->find_cursor(&$3, &offset))
+ if (! lex->spcont->find_cursor($3, &offset, false))
{
my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $3.str);
MYSQL_YYABORT;
@@ -3636,7 +3885,7 @@ sp_proc_stmt_close:
uint offset;
sp_instr_cclose *i;
- if (! lex->spcont->find_cursor(&$2, &offset))
+ if (! lex->spcont->find_cursor($2, &offset, false))
{
my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str);
MYSQL_YYABORT;
@@ -3660,9 +3909,9 @@ sp_fetch_list:
LEX *lex= Lex;
sp_head *sp= lex->sphead;
sp_pcontext *spc= lex->spcont;
- sp_variable_t *spv;
+ sp_variable *spv;
- if (!spc || !(spv = spc->find_variable(&$1)))
+ if (!spc || !(spv = spc->find_variable($1, false)))
{
my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str);
MYSQL_YYABORT;
@@ -3680,9 +3929,9 @@ sp_fetch_list:
LEX *lex= Lex;
sp_head *sp= lex->sphead;
sp_pcontext *spc= lex->spcont;
- sp_variable_t *spv;
+ sp_variable *spv;
- if (!spc || !(spv = spc->find_variable(&$3)))
+ if (!spc || !(spv = spc->find_variable($3, false)))
{
my_error(ER_SP_UNDECLARED_VAR, MYF(0), $3.str);
MYSQL_YYABORT;
@@ -3708,7 +3957,7 @@ sp_if:
sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, ctx,
$2, lex);
if (i == NULL ||
- sp->push_backpatch(i, ctx->push_label((char *)"", 0)) ||
+ sp->push_backpatch(i, ctx->push_label(YYTHD, EMPTY_STR, 0)) ||
sp->add_cont_backpatch(i) ||
sp->add_instr(i))
MYSQL_YYABORT;
@@ -3725,7 +3974,7 @@ sp_if:
sp->add_instr(i))
MYSQL_YYABORT;
sp->backpatch(ctx->pop_label());
- sp->push_backpatch(i, ctx->push_label((char *)"", 0));
+ sp->push_backpatch(i, ctx->push_label(YYTHD, EMPTY_STR, 0));
}
sp_elseifs
{
@@ -3867,7 +4116,7 @@ sp_labeled_control:
{
LEX *lex= Lex;
sp_pcontext *ctx= lex->spcont;
- sp_label_t *lab= ctx->find_label($1.str);
+ sp_label *lab= ctx->find_label($1);
if (lab)
{
@@ -3876,19 +4125,18 @@ sp_labeled_control:
}
else
{
- lab= lex->spcont->push_label($1.str,
- lex->sphead->instructions());
- lab->type= SP_LAB_ITER;
+ lab= lex->spcont->push_label(YYTHD, $1, lex->sphead->instructions());
+ lab->type= sp_label::ITERATION;
}
}
sp_unlabeled_control sp_opt_label
{
LEX *lex= Lex;
- sp_label_t *lab= lex->spcont->pop_label();
+ sp_label *lab= lex->spcont->pop_label();
if ($5.str)
{
- if (my_strcasecmp(system_charset_info, $5.str, lab->name) != 0)
+ if (my_strcasecmp(system_charset_info, $5.str, lab->name.str) != 0)
{
my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str);
MYSQL_YYABORT;
@@ -3908,7 +4156,7 @@ sp_labeled_block:
{
LEX *lex= Lex;
sp_pcontext *ctx= lex->spcont;
- sp_label_t *lab= ctx->find_label($1.str);
+ sp_label *lab= ctx->find_label($1);
if (lab)
{
@@ -3916,18 +4164,17 @@ sp_labeled_block:
MYSQL_YYABORT;
}
- lab= lex->spcont->push_label($1.str,
- lex->sphead->instructions());
- lab->type= SP_LAB_BEGIN;
+ lab= lex->spcont->push_label(YYTHD, $1, lex->sphead->instructions());
+ lab->type= sp_label::BEGIN;
}
sp_block_content sp_opt_label
{
LEX *lex= Lex;
- sp_label_t *lab= lex->spcont->pop_label();
+ sp_label *lab= lex->spcont->pop_label();
if ($5.str)
{
- if (my_strcasecmp(system_charset_info, $5.str, lab->name) != 0)
+ if (my_strcasecmp(system_charset_info, $5.str, lab->name.str) != 0)
{
my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str);
MYSQL_YYABORT;
@@ -3940,8 +4187,8 @@ sp_unlabeled_block:
{ /* Unlabeled blocks get a secret label. */
LEX *lex= Lex;
uint ip= lex->sphead->instructions();
- sp_label_t *lab= lex->spcont->push_label((char *)"", ip);
- lab->type= SP_LAB_BEGIN;
+ sp_label *lab= lex->spcont->push_label(YYTHD, EMPTY_STR, ip);
+ lab->type= sp_label::BEGIN;
}
sp_block_content
{
@@ -3956,7 +4203,8 @@ sp_block_content:
together. No [[NOT] ATOMIC] yet, and we need to figure out how
make it coexist with the existing BEGIN COMMIT/ROLLBACK. */
LEX *lex= Lex;
- lex->spcont= lex->spcont->push_context(LABEL_DEFAULT_SCOPE);
+ lex->spcont= lex->spcont->push_context(YYTHD,
+ sp_pcontext::REGULAR_SCOPE);
}
sp_decls
sp_proc_stmts
@@ -3992,7 +4240,7 @@ sp_unlabeled_control:
{
LEX *lex= Lex;
uint ip= lex->sphead->instructions();
- sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */
+ sp_label *lab= lex->spcont->last_label(); /* Jumping back */
sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip);
if (i == NULL ||
lex->sphead->add_instr(i))
@@ -4020,7 +4268,7 @@ sp_unlabeled_control:
{
LEX *lex= Lex;
uint ip= lex->sphead->instructions();
- sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */
+ sp_label *lab= lex->spcont->last_label(); /* Jumping back */
sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip);
if (i == NULL ||
lex->sphead->add_instr(i))
@@ -4033,7 +4281,7 @@ sp_unlabeled_control:
{
LEX *lex= Lex;
uint ip= lex->sphead->instructions();
- sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */
+ sp_label *lab= lex->spcont->last_label(); /* Jumping back */
sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont,
$5, lab->ip,
lex);
@@ -4549,7 +4797,7 @@ partitioning:
}
if (lex->sql_command == SQLCOM_ALTER_TABLE)
{
- lex->alter_info.flags|= ALTER_PARTITION;
+ lex->alter_info.flags|= Alter_info::ALTER_PARTITION;
}
}
partition
@@ -5349,6 +5597,70 @@ create_table_option:
~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS);
Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;
}
+ | STATS_AUTO_RECALC_SYM opt_equal ulong_num
+ {
+ switch($3) {
+ case 0:
+ Lex->create_info.stats_auto_recalc= HA_STATS_AUTO_RECALC_OFF;
+ break;
+ case 1:
+ Lex->create_info.stats_auto_recalc= HA_STATS_AUTO_RECALC_ON;
+ break;
+ default:
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ Lex->create_info.used_fields|= HA_CREATE_USED_STATS_AUTO_RECALC;
+ }
+ | STATS_AUTO_RECALC_SYM opt_equal DEFAULT
+ {
+ Lex->create_info.stats_auto_recalc= HA_STATS_AUTO_RECALC_DEFAULT;
+ Lex->create_info.used_fields|= HA_CREATE_USED_STATS_AUTO_RECALC;
+ }
+ | STATS_PERSISTENT_SYM opt_equal ulong_num
+ {
+ switch($3) {
+ case 0:
+ Lex->create_info.table_options|= HA_OPTION_NO_STATS_PERSISTENT;
+ break;
+ case 1:
+ Lex->create_info.table_options|= HA_OPTION_STATS_PERSISTENT;
+ break;
+ default:
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ Lex->create_info.used_fields|= HA_CREATE_USED_STATS_PERSISTENT;
+ }
+ | STATS_PERSISTENT_SYM opt_equal DEFAULT
+ {
+ Lex->create_info.table_options&=
+ ~(HA_OPTION_STATS_PERSISTENT | HA_OPTION_NO_STATS_PERSISTENT);
+ Lex->create_info.used_fields|= HA_CREATE_USED_STATS_PERSISTENT;
+ }
+ | STATS_SAMPLE_PAGES_SYM opt_equal ulong_num
+ {
+ /* From user point of view STATS_SAMPLE_PAGES can be specified as
+ STATS_SAMPLE_PAGES=N (where 0<N<=65535, it does not make sense to
+ scan 0 pages) or STATS_SAMPLE_PAGES=default. Internally we record
+ =default as 0. See create_frm() in sql/table.cc, we use only two
+ bytes for stats_sample_pages and this is why we do not allow
+ larger values. 65535 pages, 16kb each means to sample 1GB, which
+ is impractical. If at some point this needs to be extended, then
+ we can store the higher bits from stats_sample_pages in .frm too. */
+ if ($3 == 0 || $3 > 0xffff)
+ {
+ my_parse_error(ER(ER_SYNTAX_ERROR));
+ MYSQL_YYABORT;
+ }
+ Lex->create_info.stats_sample_pages=$3;
+ Lex->create_info.used_fields|= HA_CREATE_USED_STATS_SAMPLE_PAGES;
+ }
+ | STATS_SAMPLE_PAGES_SYM opt_equal DEFAULT
+ {
+ Lex->create_info.stats_sample_pages=0;
+ Lex->create_info.used_fields|= HA_CREATE_USED_STATS_SAMPLE_PAGES;
+ }
| CHECKSUM_SYM opt_equal ulong_num
{
Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM;
@@ -5515,7 +5827,7 @@ storage_engines:
MYSQL_YYABORT;
}
$$= 0;
- push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN,
ER_UNKNOWN_STORAGE_ENGINE,
ER(ER_UNKNOWN_STORAGE_ENGINE),
$1.str);
@@ -5627,7 +5939,8 @@ key_def:
{
LEX *lex=Lex;
Key *key= new Foreign_key($4.str ? $4 : $1, lex->col_list,
- $8,
+ $8->db,
+ $8->table,
lex->ref_list,
lex->fk_delete_opt,
lex->fk_update_opt,
@@ -5641,7 +5954,7 @@ key_def:
&default_key_create_info, 1))
MYSQL_YYABORT;
/* Only used for ALTER TABLE. Ignored otherwise. */
- lex->alter_info.flags|= ALTER_FOREIGN_KEY;
+ lex->alter_info.flags|= Alter_info::ADD_FOREIGN_KEY;
}
| opt_constraint check_constraint
{
@@ -5739,13 +6052,13 @@ vcol_attribute:
{
LEX *lex=Lex;
lex->type|= UNIQUE_FLAG;
- lex->alter_info.flags|= ALTER_ADD_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| UNIQUE_SYM KEY_SYM
{
LEX *lex=Lex;
lex->type|= UNIQUE_KEY_FLAG;
- lex->alter_info.flags|= ALTER_ADD_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| COMMENT_SYM TEXT_STRING_sys { Lex->comment= $2; }
;
@@ -5860,7 +6173,7 @@ type:
{
char buff[sizeof("YEAR()") + MY_INT64_NUM_DECIMAL_DIGITS + 1];
my_snprintf(buff, sizeof(buff), "YEAR(%lu)", length);
- push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_NOTE,
ER_WARN_DEPRECATED_SYNTAX,
ER(ER_WARN_DEPRECATED_SYNTAX),
buff, "YEAR(4)");
@@ -6091,25 +6404,25 @@ attribute:
{
LEX *lex=Lex;
lex->type|= AUTO_INCREMENT_FLAG | NOT_NULL_FLAG | UNIQUE_FLAG;
- lex->alter_info.flags|= ALTER_ADD_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| opt_primary KEY_SYM
{
LEX *lex=Lex;
lex->type|= PRI_KEY_FLAG | NOT_NULL_FLAG;
- lex->alter_info.flags|= ALTER_ADD_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| UNIQUE_SYM
{
LEX *lex=Lex;
lex->type|= UNIQUE_FLAG;
- lex->alter_info.flags|= ALTER_ADD_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| UNIQUE_SYM KEY_SYM
{
LEX *lex=Lex;
lex->type|= UNIQUE_KEY_FLAG;
- lex->alter_info.flags|= ALTER_ADD_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| COMMENT_SYM TEXT_STRING_sys { Lex->comment= $2; }
| COLLATE_SYM collation_name
@@ -6323,7 +6636,6 @@ opt_bin_mod:
| BINARY { Lex->type|= BINCMP_FLAG; }
;
-
opt_primary:
/* empty */
| PRIMARY_SYM
@@ -6632,41 +6944,40 @@ string_list:
*/
alter:
- ALTER alter_options TABLE_SYM table_ident
+ ALTER
+ {
+ Lex->name.str= 0;
+ Lex->name.length= 0;
+ Lex->sql_command= SQLCOM_ALTER_TABLE;
+ Lex->duplicates= DUP_ERROR;
+ Lex->col_list.empty();
+ Lex->select_lex.init_order();
+ bzero(&Lex->create_info, sizeof(Lex->create_info));
+ Lex->create_info.db_type= 0;
+ Lex->create_info.default_table_charset= NULL;
+ Lex->create_info.row_type= ROW_TYPE_NOT_USED;
+ Lex->alter_info.reset();
+ Lex->no_write_to_binlog= 0;
+ Lex->create_info.storage_media= HA_SM_DEFAULT;
+ DBUG_ASSERT(!Lex->m_sql_cmd);
+ }
+ alter_options TABLE_SYM table_ident
{
- THD *thd= YYTHD;
- LEX *lex= thd->lex;
- lex->name.str= 0;
- lex->name.length= 0;
- lex->sql_command= SQLCOM_ALTER_TABLE;
- lex->duplicates= DUP_ERROR;
- if (!lex->select_lex.add_table_to_list(thd, $4, NULL,
+ if (!Lex->select_lex.add_table_to_list(YYTHD, $5, NULL,
TL_OPTION_UPDATING,
TL_READ_NO_INSERT,
- MDL_SHARED_NO_WRITE))
+ MDL_SHARED_UPGRADABLE))
MYSQL_YYABORT;
- lex->col_list.empty();
- lex->select_lex.init_order();
- lex->select_lex.db= (lex->select_lex.table_list.first)->db;
- bzero((char*) &lex->create_info,sizeof(lex->create_info));
- lex->create_info.db_type= 0;
- lex->create_info.default_table_charset= NULL;
- lex->create_info.row_type= ROW_TYPE_NOT_USED;
- lex->alter_info.reset();
- lex->no_write_to_binlog= 0;
- lex->create_info.storage_media= HA_SM_DEFAULT;
- lex->create_last_non_select_table= lex->last_table();
- DBUG_ASSERT(!lex->m_stmt);
+ Lex->select_lex.db= (Lex->select_lex.table_list.first)->db;
+ Lex->create_last_non_select_table= Lex->last_table();
}
alter_commands
{
- THD *thd= YYTHD;
- LEX *lex= thd->lex;
- if (!lex->m_stmt)
+ if (!Lex->m_sql_cmd)
{
/* Create a generic ALTER TABLE statment. */
- lex->m_stmt= new (thd->mem_root) Alter_table_statement(lex);
- if (lex->m_stmt == NULL)
+ Lex->m_sql_cmd= new (YYTHD->mem_root) Sql_cmd_alter_table();
+ if (Lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
}
@@ -6857,8 +7168,22 @@ ident_or_empty:
alter_commands:
/* empty */
- | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; }
- | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; }
+ | DISCARD TABLESPACE
+ {
+ Lex->m_sql_cmd= new (YYTHD->mem_root)
+ Sql_cmd_discard_import_tablespace(
+ Sql_cmd_discard_import_tablespace::DISCARD_TABLESPACE);
+ if (Lex->m_sql_cmd == NULL)
+ MYSQL_YYABORT;
+ }
+ | IMPORT TABLESPACE
+ {
+ Lex->m_sql_cmd= new (YYTHD->mem_root)
+ Sql_cmd_discard_import_tablespace(
+ Sql_cmd_discard_import_tablespace::IMPORT_TABLESPACE);
+ if (Lex->m_sql_cmd == NULL)
+ MYSQL_YYABORT;
+ }
| alter_list
opt_partitioning
| alter_list
@@ -6870,19 +7195,18 @@ alter_commands:
From here we insert a number of commands to manage the partitions of a
partitioned table such as adding partitions, dropping partitions,
reorganising partitions in various manners. In future releases the list
- will be longer and also include moving partitions to a
- new table and so forth.
+ will be longer.
*/
| add_partition_rule
| DROP PARTITION_SYM opt_if_exists alt_part_name_list
{
- Lex->alter_info.flags|= ALTER_DROP_PARTITION;
+ Lex->alter_info.flags|= Alter_info::ALTER_DROP_PARTITION;
}
| REBUILD_SYM PARTITION_SYM opt_no_write_to_binlog
all_or_alt_part_name_list
{
LEX *lex= Lex;
- lex->alter_info.flags|= ALTER_REBUILD_PARTITION;
+ lex->alter_info.flags|= Alter_info::ALTER_REBUILD_PARTITION;
lex->no_write_to_binlog= $3;
}
| OPTIMIZE PARTITION_SYM opt_no_write_to_binlog
@@ -6892,10 +7216,10 @@ alter_commands:
LEX *lex= thd->lex;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root)
- Alter_table_optimize_partition_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root)
+ Sql_cmd_alter_table_optimize_partition();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
opt_no_write_to_binlog
@@ -6906,21 +7230,21 @@ alter_commands:
LEX *lex= thd->lex;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root)
- Alter_table_analyze_partition_statement(lex);
- if (lex->m_stmt == NULL)
- MYSQL_YYABORT;
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root)
+ Sql_cmd_alter_table_analyze_partition();
+ if (lex->m_sql_cmd == NULL)
+ MYSQL_YYABORT;
}
| CHECK_SYM PARTITION_SYM all_or_alt_part_name_list
{
THD *thd= YYTHD;
LEX *lex= thd->lex;
lex->check_opt.init();
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root)
- Alter_table_check_partition_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root)
+ Sql_cmd_alter_table_check_partition();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
opt_mi_check_type
@@ -6931,17 +7255,17 @@ alter_commands:
LEX *lex= thd->lex;
lex->no_write_to_binlog= $3;
lex->check_opt.init();
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root)
- Alter_table_repair_partition_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root)
+ Sql_cmd_alter_table_repair_partition();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
opt_mi_repair_type
| COALESCE PARTITION_SYM opt_no_write_to_binlog real_ulong_num
{
LEX *lex= Lex;
- lex->alter_info.flags|= ALTER_COALESCE_PARTITION;
+ lex->alter_info.flags|= Alter_info::ALTER_COALESCE_PARTITION;
lex->no_write_to_binlog= $3;
lex->alter_info.num_parts= $4;
}
@@ -6950,26 +7274,51 @@ alter_commands:
THD *thd= YYTHD;
LEX *lex= thd->lex;
lex->check_opt.init();
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root)
- Alter_table_truncate_partition_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root)
+ Sql_cmd_alter_table_truncate_partition();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
| reorg_partition_rule
+ | EXCHANGE_SYM PARTITION_SYM alt_part_name_item
+ WITH TABLE_SYM table_ident have_partitioning
+ {
+ THD *thd= YYTHD;
+ LEX *lex= thd->lex;
+ size_t dummy;
+ lex->select_lex.db=$6->db.str;
+ if (lex->select_lex.db == NULL &&
+ lex->copy_db_to(&lex->select_lex.db, &dummy))
+ {
+ MYSQL_YYABORT;
+ }
+ lex->name= $6->table;
+ lex->alter_info.flags|= Alter_info::ALTER_EXCHANGE_PARTITION;
+ if (!lex->select_lex.add_table_to_list(thd, $6, NULL,
+ TL_OPTION_UPDATING,
+ TL_READ_NO_INSERT,
+ MDL_SHARED_NO_WRITE))
+ MYSQL_YYABORT;
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root)
+ Sql_cmd_alter_table_exchange_partition();
+ if (lex->m_sql_cmd == NULL)
+ MYSQL_YYABORT;
+ }
;
remove_partitioning:
REMOVE_SYM PARTITIONING_SYM
{
- Lex->alter_info.flags|= ALTER_REMOVE_PARTITIONING;
+ Lex->alter_info.flags|= Alter_info::ALTER_REMOVE_PARTITIONING;
}
;
all_or_alt_part_name_list:
ALL
{
- Lex->alter_info.flags|= ALTER_ALL_PARTITION;
+ Lex->alter_info.flags|= Alter_info::ALTER_ALL_PARTITION;
}
| alt_part_name_list
;
@@ -6984,7 +7333,7 @@ add_partition_rule:
mem_alloc_error(sizeof(partition_info));
MYSQL_YYABORT;
}
- lex->alter_info.flags|= ALTER_ADD_PARTITION;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_PARTITION;
lex->no_write_to_binlog= $4;
}
add_part_extra
@@ -7022,11 +7371,11 @@ reorg_partition_rule:
reorg_parts_rule:
/* empty */
{
- Lex->alter_info.flags|= ALTER_TABLE_REORG;
+ Lex->alter_info.flags|= Alter_info::ALTER_TABLE_REORG;
}
| alt_part_name_list
{
- Lex->alter_info.flags|= ALTER_REORGANIZE_PARTITION;
+ Lex->alter_info.flags|= Alter_info::ALTER_REORGANIZE_PARTITION;
}
INTO '(' part_def_list ')'
{
@@ -7065,7 +7414,7 @@ add_column:
{
LEX *lex=Lex;
lex->change=0;
- lex->alter_info.flags|= ALTER_ADD_COLUMN;
+ lex->alter_info.flags|= Alter_info::ALTER_ADD_COLUMN;
}
;
@@ -7077,17 +7426,18 @@ alter_list_item:
| ADD key_def
{
Lex->create_last_non_select_table= Lex->last_table();
- Lex->alter_info.flags|= ALTER_ADD_INDEX;
+ Lex->alter_info.flags|= Alter_info::ALTER_ADD_INDEX;
}
| add_column '(' create_field_list ')'
{
- Lex->alter_info.flags|= ALTER_ADD_COLUMN | ALTER_ADD_INDEX;
+ Lex->alter_info.flags|= Alter_info::ALTER_ADD_COLUMN |
+ Alter_info::ALTER_ADD_INDEX;
}
| CHANGE opt_column opt_if_exists field_ident
{
LEX *lex=Lex;
lex->change= $4.str;
- lex->alter_info.flags|= ALTER_CHANGE_COLUMN;
+ lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN;
lex->option_list= NULL;
}
field_spec opt_place
@@ -7101,7 +7451,7 @@ alter_list_item:
lex->default_value= lex->on_update_value= 0;
lex->comment=null_lex_str;
lex->charset= NULL;
- lex->alter_info.flags|= ALTER_CHANGE_COLUMN;
+ lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN;
lex->vcol_info= 0;
lex->option_list= NULL;
}
@@ -7129,11 +7479,16 @@ alter_list_item:
if (ad == NULL)
MYSQL_YYABORT;
lex->alter_info.drop_list.push_back(ad);
- lex->alter_info.flags|= ALTER_DROP_COLUMN;
+ lex->alter_info.flags|= Alter_info::ALTER_DROP_COLUMN;
}
- | DROP FOREIGN KEY_SYM opt_if_exists opt_ident
+ | DROP FOREIGN KEY_SYM opt_if_exists field_ident
{
- Lex->alter_info.flags|= ALTER_DROP_INDEX | ALTER_FOREIGN_KEY;
+ LEX *lex=Lex;
+ Alter_drop *ad= new Alter_drop(Alter_drop::FOREIGN_KEY, $5.str, $4);
+ if (ad == NULL)
+ MYSQL_YYABORT;
+ lex->alter_info.drop_list.push_back(ad);
+ lex->alter_info.flags|= Alter_info::DROP_FOREIGN_KEY;
}
| DROP PRIMARY_SYM KEY_SYM
{
@@ -7143,7 +7498,7 @@ alter_list_item:
if (ad == NULL)
MYSQL_YYABORT;
lex->alter_info.drop_list.push_back(ad);
- lex->alter_info.flags|= ALTER_DROP_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_DROP_INDEX;
}
| DROP key_or_index opt_if_exists field_ident
{
@@ -7152,19 +7507,19 @@ alter_list_item:
if (ad == NULL)
MYSQL_YYABORT;
lex->alter_info.drop_list.push_back(ad);
- lex->alter_info.flags|= ALTER_DROP_INDEX;
+ lex->alter_info.flags|= Alter_info::ALTER_DROP_INDEX;
}
| DISABLE_SYM KEYS
{
LEX *lex=Lex;
- lex->alter_info.keys_onoff= DISABLE;
- lex->alter_info.flags|= ALTER_KEYS_ONOFF;
+ lex->alter_info.keys_onoff= Alter_info::DISABLE;
+ lex->alter_info.flags|= Alter_info::ALTER_KEYS_ONOFF;
}
| ENABLE_SYM KEYS
{
LEX *lex=Lex;
- lex->alter_info.keys_onoff= ENABLE;
- lex->alter_info.flags|= ALTER_KEYS_ONOFF;
+ lex->alter_info.keys_onoff= Alter_info::ENABLE;
+ lex->alter_info.flags|= Alter_info::ALTER_KEYS_ONOFF;
}
| ALTER opt_column field_ident SET DEFAULT signed_literal
{
@@ -7173,7 +7528,7 @@ alter_list_item:
if (ac == NULL)
MYSQL_YYABORT;
lex->alter_info.alter_list.push_back(ac);
- lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT;
+ lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN_DEFAULT;
}
| ALTER opt_column field_ident DROP DEFAULT
{
@@ -7182,7 +7537,7 @@ alter_list_item:
if (ac == NULL)
MYSQL_YYABORT;
lex->alter_info.alter_list.push_back(ac);
- lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT;
+ lex->alter_info.flags|= Alter_info::ALTER_CHANGE_COLUMN_DEFAULT;
}
| RENAME opt_to table_ident
{
@@ -7201,7 +7556,7 @@ alter_list_item:
MYSQL_YYABORT;
}
lex->name= $3->table;
- lex->alter_info.flags|= ALTER_RENAME;
+ lex->alter_info.flags|= Alter_info::ALTER_RENAME;
}
| CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate
{
@@ -7222,12 +7577,12 @@ alter_list_item:
lex->create_info.default_table_charset= $5;
lex->create_info.used_fields|= (HA_CREATE_USED_CHARSET |
HA_CREATE_USED_DEFAULT_CHARSET);
- lex->alter_info.flags|= ALTER_CONVERT;
+ lex->alter_info.flags|= Alter_info::ALTER_CONVERT;
}
| create_table_options_space_separated
{
LEX *lex=Lex;
- lex->alter_info.flags|= ALTER_OPTIONS;
+ lex->alter_info.flags|= Alter_info::ALTER_OPTIONS;
if ((lex->create_info.used_fields & HA_CREATE_USED_ENGINE) &&
!lex->create_info.db_type)
{
@@ -7236,12 +7591,53 @@ alter_list_item:
}
| FORCE_SYM
{
- Lex->alter_info.flags|= ALTER_RECREATE;
+ Lex->alter_info.flags|= Alter_info::ALTER_RECREATE;
}
| alter_order_clause
{
LEX *lex=Lex;
- lex->alter_info.flags|= ALTER_ORDER;
+ lex->alter_info.flags|= Alter_info::ALTER_ORDER;
+ }
+ | alter_algorithm_option
+ | alter_lock_option
+ ;
+
+opt_index_lock_algorithm:
+ /* empty */
+ | alter_lock_option
+ | alter_algorithm_option
+ | alter_lock_option alter_algorithm_option
+ | alter_algorithm_option alter_lock_option
+
+alter_algorithm_option:
+ ALGORITHM_SYM opt_equal DEFAULT
+ {
+ Lex->alter_info.requested_algorithm=
+ Alter_info::ALTER_TABLE_ALGORITHM_DEFAULT;
+ }
+ | ALGORITHM_SYM opt_equal ident
+ {
+ if (Lex->alter_info.set_requested_algorithm(&$3))
+ {
+ my_error(ER_UNKNOWN_ALTER_ALGORITHM, MYF(0), $3.str);
+ MYSQL_YYABORT;
+ }
+ }
+ ;
+
+alter_lock_option:
+ LOCK_SYM opt_equal DEFAULT
+ {
+ Lex->alter_info.requested_lock=
+ Alter_info::ALTER_TABLE_LOCK_DEFAULT;
+ }
+ | LOCK_SYM opt_equal ident
+ {
+ if (Lex->alter_info.set_requested_lock(&$3))
+ {
+ my_error(ER_UNKNOWN_ALTER_LOCK, MYF(0), $3.str);
+ MYSQL_YYABORT;
+ }
}
;
@@ -7256,7 +7652,7 @@ opt_ignore:
;
alter_options:
- { Lex->ignore= Lex->online= 0;} alter_options_part2
+ { Lex->ignore= 0;} alter_options_part2
;
alter_options_part2:
@@ -7271,7 +7667,11 @@ alter_option_list:
alter_option:
IGNORE_SYM { Lex->ignore= 1;}
- | ONLINE_SYM { Lex->online= 1;}
+ | ONLINE_SYM
+ {
+ Lex->alter_info.requested_algorithm=
+ Alter_info::ALTER_TABLE_ALGORITHM_INPLACE;
+ }
opt_restrict:
@@ -7282,8 +7682,16 @@ opt_restrict:
opt_place:
/* empty */ {}
- | AFTER_SYM ident { store_position_for_column($2.str); }
- | FIRST_SYM { store_position_for_column(first_keyword); }
+ | AFTER_SYM ident
+ {
+ store_position_for_column($2.str);
+ Lex->alter_info.flags |= Alter_info::ALTER_COLUMN_ORDER;
+ }
+ | FIRST_SYM
+ {
+ store_position_for_column(first_keyword);
+ Lex->alter_info.flags |= Alter_info::ALTER_COLUMN_ORDER;
+ }
;
opt_to:
@@ -7473,9 +7881,9 @@ repair:
{
THD *thd= YYTHD;
LEX* lex= thd->lex;
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root) Repair_table_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_repair_table();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
;
@@ -7511,9 +7919,9 @@ analyze:
{
THD *thd= YYTHD;
LEX* lex= thd->lex;
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root) Analyze_table_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_analyze_table();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
;
@@ -7636,9 +8044,9 @@ check:
{
THD *thd= YYTHD;
LEX* lex= thd->lex;
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root) Check_table_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_check_table();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
;
@@ -7677,9 +8085,9 @@ optimize:
{
THD *thd= YYTHD;
LEX* lex= thd->lex;
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root) Optimize_table_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_optimize_table();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
;
@@ -7826,7 +8234,7 @@ preload_keys_parts:
adm_partition:
PARTITION_SYM have_partitioning
{
- Lex->alter_info.flags|= ALTER_ADMIN_PARTITION;
+ Lex->alter_info.flags|= Alter_info::ALTER_ADMIN_PARTITION;
}
'(' all_or_alt_part_name_list ')'
;
@@ -9320,6 +9728,7 @@ function_call_conflict:
| OLD_PASSWORD '(' expr ')'
{
$$= new (YYTHD->mem_root) Item_func_old_password($3);
+ Lex->contains_plaintext_password= true;
if ($$ == NULL)
MYSQL_YYABORT;
}
@@ -9327,7 +9736,8 @@ function_call_conflict:
{
THD *thd= YYTHD;
Item* i1;
- if (thd->variables.old_passwords)
+ Lex->contains_plaintext_password= true;
+ if (thd->variables.old_passwords == 1)
i1= new (thd->mem_root) Item_func_old_password($3);
else
i1= new (thd->mem_root) Item_func_password($3);
@@ -9353,6 +9763,14 @@ function_call_conflict:
if ($$ == NULL)
MYSQL_YYABORT;
}
+ | ROW_COUNT_SYM '(' ')'
+ {
+ $$= new (YYTHD->mem_root) Item_func_row_count();
+ if ($$ == NULL)
+ MYSQL_YYABORT;
+ Lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_SYSTEM_FUNCTION);
+ Lex->safe_to_cache_query= 0;
+ }
| TRUNCATE_SYM '(' expr ',' expr ')'
{
$$= new (YYTHD->mem_root) Item_func_round($3,$5,1);
@@ -10163,6 +10581,22 @@ normal_join:
| CROSS JOIN_SYM { $$ = 0; }
;
+/*
+ table PARTITION (list of partitions), reusing using_list instead of creating
+ a new rule for partition_list.
+*/
+opt_use_partition:
+ /* empty */ { $$= 0;}
+ | use_partition
+ ;
+
+use_partition:
+ PARTITION_SYM '(' using_list ')' have_partitioning
+ {
+ $$= $3;
+ }
+ ;
+
/*
This is a flattening of the rules <table factor> and <table primary>
in the SQL:2003 standard, since we don't have <sample clause>
@@ -10176,13 +10610,14 @@ table_factor:
SELECT_LEX *sel= Select;
sel->table_join_options= 0;
}
- table_ident opt_table_alias opt_key_definition
+ table_ident opt_use_partition opt_table_alias opt_key_definition
{
- if (!($$= Select->add_table_to_list(YYTHD, $2, $3,
+ if (!($$= Select->add_table_to_list(YYTHD, $2, $4,
Select->get_table_join_options(),
YYPS->m_lock_type,
YYPS->m_mdl_type,
- Select->pop_index_hints())))
+ Select->pop_index_hints(),
+ $3)))
MYSQL_YYABORT;
Select->add_joined_table($$);
}
@@ -10252,7 +10687,7 @@ table_factor:
if (ti == NULL)
MYSQL_YYABORT;
if (!($$= sel->add_table_to_list(lex->thd,
- new Table_ident(unit), $5, 0,
+ ti, $5, 0,
TL_READ, MDL_SHARED_READ)))
MYSQL_YYABORT;
@@ -10844,9 +11279,9 @@ limit_option:
THD *thd= YYTHD;
LEX *lex= thd->lex;
Lex_input_stream *lip= & thd->m_parser_state->m_lip;
- sp_variable_t *spv;
+ sp_variable *spv;
sp_pcontext *spc = lex->spcont;
- if (spc && (spv = spc->find_variable(&$1)))
+ if (spc && (spv = spc->find_variable($1, false)))
{
splocal= new (thd->mem_root)
Item_splocal($1, spv->offset, spv->type,
@@ -11066,9 +11501,9 @@ select_var_ident:
| ident_or_text
{
LEX *lex=Lex;
- sp_variable_t *t;
+ sp_variable *t;
- if (!lex->spcont || !(t=lex->spcont->find_variable(&$1)))
+ if (!lex->spcont || !(t=lex->spcont->find_variable($1, false)))
{
my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str);
MYSQL_YYABORT;
@@ -11177,12 +11612,12 @@ drop:
MYSQL_YYABORT;
lex->sql_command= SQLCOM_DROP_INDEX;
lex->alter_info.reset();
- lex->alter_info.flags= ALTER_DROP_INDEX;
+ lex->alter_info.flags= Alter_info::ALTER_DROP_INDEX;
lex->alter_info.drop_list.push_back(ad);
if (!lex->current_select->add_table_to_list(lex->thd, $6, NULL,
TL_OPTION_UPDATING,
TL_READ_NO_INSERT,
- MDL_SHARED_NO_WRITE))
+ MDL_SHARED_UPGRADABLE))
MYSQL_YYABORT;
}
| DROP DATABASE opt_if_exists ident
@@ -11310,6 +11745,19 @@ table_name:
}
;
+table_name_with_opt_use_partition:
+ table_ident opt_use_partition
+ {
+ if (!Select->add_table_to_list(YYTHD, $1, NULL,
+ TL_OPTION_UPDATING,
+ YYPS->m_lock_type,
+ YYPS->m_mdl_type,
+ NULL,
+ $2))
+ MYSQL_YYABORT;
+ }
+ ;
+
table_alias_ref_list:
table_alias_ref
| table_alias_ref_list ',' table_alias_ref
@@ -11422,7 +11870,7 @@ insert2:
;
insert_table:
- table_name
+ table_name_with_opt_use_partition
{
LEX *lex=Lex;
lex->field_list.empty();
@@ -11622,11 +12070,13 @@ delete:
;
single_multi:
- FROM table_ident
+ FROM table_ident opt_use_partition
{
if (!Select->add_table_to_list(YYTHD, $2, NULL, TL_OPTION_UPDATING,
YYPS->m_lock_type,
- YYPS->m_mdl_type))
+ YYPS->m_mdl_type,
+ NULL,
+ $3))
MYSQL_YYABORT;
YYPS->m_lock_type= TL_READ_DEFAULT;
YYPS->m_mdl_type= MDL_SHARED_READ;
@@ -11723,9 +12173,9 @@ truncate:
{
THD *thd= YYTHD;
LEX* lex= thd->lex;
- DBUG_ASSERT(!lex->m_stmt);
- lex->m_stmt= new (thd->mem_root) Truncate_statement(lex);
- if (lex->m_stmt == NULL)
+ DBUG_ASSERT(!lex->m_sql_cmd);
+ lex->m_sql_cmd= new (thd->mem_root) Sql_cmd_truncate_table();
+ if (lex->m_sql_cmd == NULL)
MYSQL_YYABORT;
}
;
@@ -11930,7 +12380,7 @@ show_param:
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_SHOW_AUTHORS;
- push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT,
ER(ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT),
"SHOW AUTHORS");
@@ -11939,7 +12389,7 @@ show_param:
{
LEX *lex=Lex;
lex->sql_command= SQLCOM_SHOW_CONTRIBUTORS;
- push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(YYTHD, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT,
ER(ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT),
"SHOW CONTRIBUTORS");
@@ -12520,18 +12970,18 @@ load:
if (!(lex->exchange= new sql_exchange($7.str, 0, $2)))
MYSQL_YYABORT;
}
- opt_duplicate INTO TABLE_SYM table_ident
+ opt_duplicate INTO TABLE_SYM table_ident opt_use_partition
{
LEX *lex=Lex;
if (!Select->add_table_to_list(YYTHD, $12, NULL, TL_OPTION_UPDATING,
- $4, MDL_SHARED_WRITE))
+ $4, MDL_SHARED_WRITE, NULL, $13))
MYSQL_YYABORT;
lex->field_list.empty();
lex->update_list.empty();
lex->value_list.empty();
}
opt_load_data_charset
- { Lex->exchange->cs= $14; }
+ { Lex->exchange->cs= $15; }
opt_xml_rows_identified_by
opt_field_term opt_line_term opt_ignore_lines opt_field_or_var_spec
opt_load_data_set_spec
@@ -13046,9 +13496,9 @@ simple_ident:
THD *thd= YYTHD;
LEX *lex= thd->lex;
Lex_input_stream *lip= YYLIP;
- sp_variable_t *spv;
+ sp_variable *spv;
sp_pcontext *spc = lex->spcont;
- if (spc && (spv = spc->find_variable(&$1)))
+ if (spc && (spv = spc->find_variable($1, false)))
{
/* We're compiling a stored procedure and found a variable */
if (! lex->parsing_options.allows_variable)
@@ -13526,7 +13976,6 @@ keyword:
| OPTIONS_SYM {}
| OWNER_SYM {}
| PARSER_SYM {}
- | PARTITION_SYM {}
| PORT_SYM {}
| PREPARE_SYM {}
| REMOVE_SYM {}
@@ -13608,6 +14057,11 @@ keyword_sp:
| CURRENT_POS_SYM {}
| CPU_SYM {}
| CUBE_SYM {}
+ /*
+ Although a reserved keyword in SQL:2003 (and :2008),
+ not reserved in MySQL per WL#2111 specification.
+ */
+ | CURRENT_SYM {}
| CURSOR_NAME_SYM {}
| DATA_SYM {}
| DATAFILE_SYM {}
@@ -13617,6 +14071,7 @@ keyword_sp:
| DEFINER_SYM {}
| DELAY_KEY_WRITE_SYM {}
| DES_KEY_FILE {}
+ | DIAGNOSTICS_SYM {}
| DIRECTORY_SYM {}
| DISABLE_SYM {}
| DISCARD {}
@@ -13634,6 +14089,7 @@ keyword_sp:
| EVENT_SYM {}
| EVENTS_SYM {}
| EVERY_SYM {}
+ | EXCHANGE_SYM {}
| EXPANSION_SYM {}
| EXTENDED_SYM {}
| EXTENT_SIZE_SYM {}
@@ -13732,6 +14188,7 @@ keyword_sp:
| NO_WAIT_SYM {}
| NODEGROUP_SYM {}
| NONE_SYM {}
+ | NUMBER_SYM {}
| NVARCHAR_SYM {}
| OFFSET_SYM {}
| OLD_PASSWORD {}
@@ -13778,10 +14235,12 @@ keyword_sp:
| REPLICATION {}
| RESOURCES {}
| RESUME_SYM {}
+ | RETURNED_SQLSTATE_SYM {}
| RETURNS_SYM {}
| ROLLUP_SYM {}
| ROUTINE_SYM {}
| ROWS_SYM {}
+ | ROW_COUNT_SYM {}
| ROW_FORMAT_SYM {}
| ROW_SYM {}
| RTREE_SYM {}
@@ -14012,12 +14471,11 @@ option_value_no_option_type:
{
THD *thd= YYTHD;
LEX *lex= Lex;
- LEX_STRING *name= &$1.base_name;
if ($1.var == trg_new_row_fake_var)
{
/* We are in trigger and assigning value to field of new row */
- if (set_trigger_new_row(YYTHD, name, $3))
+ if (set_trigger_new_row(YYTHD, &$1.base_name, $3))
MYSQL_YYABORT;
}
else if ($1.var)
@@ -14029,7 +14487,7 @@ option_value_no_option_type:
else
{
sp_pcontext *spc= lex->spcont;
- sp_variable *spv= spc->find_variable(name, false);
+ sp_variable *spv= spc->find_variable($1.base_name, false);
/* It is a local variable. */
if (set_local_variable(thd, spv, $3))
@@ -14082,7 +14540,7 @@ option_value_no_option_type:
names.str= (char *)"names";
names.length= 5;
- if (spc && spc->find_variable(&names, false))
+ if (spc && spc->find_variable(names, false))
my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), names.str);
else
my_parse_error(ER(ER_SYNTAX_ERROR));
@@ -14118,7 +14576,7 @@ option_value_no_option_type:
pw.str= (char *)"password";
pw.length= 8;
- if (spc && spc->find_variable(&pw, false))
+ if (spc && spc->find_variable(pw, false))
{
my_error(ER_SP_BAD_VAR_SHADOW, MYF(0), pw.str);
MYSQL_YYABORT;
@@ -14153,10 +14611,10 @@ internal_variable_name:
{
THD *thd= YYTHD;
sp_pcontext *spc= thd->lex->spcont;
- sp_variable_t *spv;
+ sp_variable *spv;
/* Best effort lookup for system variable. */
- if (!spc || !(spv = spc->find_variable(&$1)))
+ if (!spc || !(spv = spc->find_variable($1, false)))
{
struct sys_var_with_base tmp= {NULL, $1};
@@ -14291,24 +14749,33 @@ text_or_password:
TEXT_STRING { $$=$1.str;}
| PASSWORD '(' TEXT_STRING ')'
{
- $$= $3.length ? YYTHD->variables.old_passwords ?
- Item_func_old_password::alloc(YYTHD, $3.str, $3.length) :
- Item_func_password::alloc(YYTHD, $3.str, $3.length) :
- $3.str;
+ if ($3.length == 0)
+ $$= $3.str;
+ else
+ switch (YYTHD->variables.old_passwords) {
+ case 1: $$= Item_func_old_password::
+ alloc(YYTHD, $3.str, $3.length);
+ break;
+ case 0:
+ case 2: $$= Item_func_password::
+ create_password_hash_buffer(YYTHD, $3.str, $3.length);
+ break;
+ }
if ($$ == NULL)
MYSQL_YYABORT;
+ Lex->contains_plaintext_password= true;
}
| OLD_PASSWORD '(' TEXT_STRING ')'
{
- $$= $3.length ? Item_func_old_password::alloc(YYTHD, $3.str,
- $3.length) :
+ $$= $3.length ? Item_func_old_password::
+ alloc(YYTHD, $3.str, $3.length) :
$3.str;
if ($$ == NULL)
MYSQL_YYABORT;
+ Lex->contains_plaintext_password= true;
}
;
-
set_expr_or_default:
expr { $$=$1; }
| DEFAULT { $$=0; }
@@ -14789,9 +15256,11 @@ grant_user:
user IDENTIFIED_SYM BY TEXT_STRING
{
$$=$1; $1->password=$4;
+ if (Lex->sql_command == SQLCOM_REVOKE)
+ MYSQL_YYABORT;
if ($4.length)
{
- if (YYTHD->variables.old_passwords)
+ if (YYTHD->variables.old_passwords == 1)
{
char *buff=
(char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH_323+1);
@@ -14807,7 +15276,7 @@ grant_user:
(char *) YYTHD->alloc(SCRAMBLED_PASSWORD_CHAR_LENGTH+1);
if (buff == NULL)
MYSQL_YYABORT;
- my_make_scrambled_password(buff, $4.str, $4.length);
+ my_make_scrambled_password_sha1(buff, $4.str, $4.length);
$1->password.str= buff;
$1->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH;
}
@@ -15637,7 +16106,7 @@ sf_tail:
If a collision exists, it should not be silenced but fixed.
*/
push_warning_printf(thd,
- MYSQL_ERROR::WARN_LEVEL_NOTE,
+ Sql_condition::WARN_LEVEL_NOTE,
ER_NATIVE_FCT_NAME_COLLISION,
ER(ER_NATIVE_FCT_NAME_COLLISION),
sp->m_name.str);
diff --git a/sql/strfunc.cc b/sql/strfunc.cc
index 48c77c7c99f..a5a64c065ce 100644
--- a/sql/strfunc.cc
+++ b/sql/strfunc.cc
@@ -265,27 +265,22 @@ uint check_word(TYPELIB *lib, const char *val, const char *end,
*/
-uint strconvert(CHARSET_INFO *from_cs, const char *from,
+uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors)
{
int cnvres;
my_wc_t wc;
char *to_start= to;
uchar *to_end= (uchar*) to + to_length - 1;
+ const uchar *from_end= (const uchar*) from + from_length;
my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb;
uint error_count= 0;
while (1)
{
- /*
- Using 'from + 10' is safe:
- - it is enough to scan a single character in any character set.
- - if remaining string is shorter than 10, then mb_wc will return
- with error because of unexpected '\0' character.
- */
if ((cnvres= (*mb_wc)(from_cs, &wc,
- (uchar*) from, (uchar*) from + 10)) > 0)
+ (uchar*) from, from_end)) > 0)
{
if (!wc)
break;
diff --git a/sql/strfunc.h b/sql/strfunc.h
index 57c5427fcd0..7b031710c76 100644
--- a/sql/strfunc.h
+++ b/sql/strfunc.h
@@ -43,7 +43,7 @@ char *set_to_string(THD *thd, LEX_STRING *result, ulonglong set,
/*
These functions were protected by INNODB_COMPATIBILITY_HOOKS
*/
-uint strconvert(CHARSET_INFO *from_cs, const char *from,
+uint strconvert(CHARSET_INFO *from_cs, const char *from, uint from_length,
CHARSET_INFO *to_cs, char *to, uint to_length, uint *errors);
#endif /* STRFUNC_INCLUDED */
diff --git a/sql/structs.h b/sql/structs.h
index a3a54c524e6..e5e65e01064 100644
--- a/sql/structs.h
+++ b/sql/structs.h
@@ -89,8 +89,8 @@ struct ha_index_option_struct;
typedef struct st_key {
uint key_length; /* Tot length of key */
ulong flags; /* dupp key and pack flags */
- uint key_parts; /* How many key_parts */
- uint usable_key_parts; /* Should normally be = key_parts */
+ uint user_defined_key_parts; /* How many key_parts */
+ uint usable_key_parts; /* Should normally be = user_defined_key_parts */
uint ext_key_parts; /* Number of key parts in extended key */
ulong ext_key_flags; /* Flags for extended key */
key_part_map ext_key_part_map; /* Bitmap of pk key parts in extension */
@@ -256,10 +256,10 @@ typedef struct user_conn {
typedef struct st_user_stats
{
- char user[max(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1];
+ char user[MY_MAX(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1];
// Account name the user is mapped to when this is a user from mapped_user.
// Otherwise, the same value as user.
- char priv_user[max(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1];
+ char priv_user[MY_MAX(USERNAME_LENGTH, LIST_PROCESS_HOST_LEN) + 1];
uint user_name_length;
uint total_connections;
uint concurrent_connections;
diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc
index 34ff98f3e78..457636629a1 100644
--- a/sql/sys_vars.cc
+++ b/sql/sys_vars.cc
@@ -48,6 +48,7 @@
// mysql_user_table_is_in_short_password_format
#include "derror.h" // read_texts
#include "sql_base.h" // close_cached_tables
+#include "hostname.h" // host_cache_size
#include <myisam.h>
#include "log_slow.h"
#include "debug_sync.h" // DEBUG_SYNC
@@ -75,22 +76,24 @@ static Sys_var_mybool Sys_pfs_enabled(
"performance_schema",
"Enable the performance schema.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_enabled),
- CMD_LINE(OPT_ARG), DEFAULT(FALSE));
+ CMD_LINE(OPT_ARG), DEFAULT(TRUE));
-static Sys_var_ulong Sys_pfs_events_waits_history_long_size(
+static Sys_var_long Sys_pfs_events_waits_history_long_size(
"performance_schema_events_waits_history_long_size",
- "Number of rows in EVENTS_WAITS_HISTORY_LONG.",
+ "Number of rows in EVENTS_WAITS_HISTORY_LONG."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY
GLOBAL_VAR(pfs_param.m_events_waits_history_long_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_WAITS_HISTORY_LONG_SIZE), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_events_waits_history_size(
+static Sys_var_long Sys_pfs_events_waits_history_size(
"performance_schema_events_waits_history_size",
- "Number of rows per thread in EVENTS_WAITS_HISTORY.",
+ "Number of rows per thread in EVENTS_WAITS_HISTORY."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_waits_history_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024),
- DEFAULT(PFS_WAITS_HISTORY_SIZE), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
static Sys_var_ulong Sys_pfs_max_cond_classes(
"performance_schema_max_cond_classes",
@@ -99,12 +102,13 @@ static Sys_var_ulong Sys_pfs_max_cond_classes(
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256),
DEFAULT(PFS_MAX_COND_CLASS), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_cond_instances(
+static Sys_var_long Sys_pfs_max_cond_instances(
"performance_schema_max_cond_instances",
- "Maximum number of instrumented condition objects.",
+ "Maximum number of instrumented condition objects."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_cond_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_COND), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
static Sys_var_ulong Sys_pfs_max_file_classes(
"performance_schema_max_file_classes",
@@ -120,19 +124,21 @@ static Sys_var_ulong Sys_pfs_max_file_handles(
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
DEFAULT(PFS_MAX_FILE_HANDLE), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_file_instances(
+static Sys_var_long Sys_pfs_max_file_instances(
"performance_schema_max_file_instances",
- "Maximum number of instrumented files.",
+ "Maximum number of instrumented files."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_file_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_FILE), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_sockets(
+static Sys_var_long Sys_pfs_max_sockets(
"performance_schema_max_socket_instances",
- "Maximum number of opened instrumented sockets.",
+ "Maximum number of opened instrumented sockets."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_socket_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_SOCKETS),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
static Sys_var_ulong Sys_pfs_max_socket_classes(
@@ -150,12 +156,13 @@ static Sys_var_ulong Sys_pfs_max_mutex_classes(
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256),
DEFAULT(PFS_MAX_MUTEX_CLASS), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_mutex_instances(
+static Sys_var_long Sys_pfs_max_mutex_instances(
"performance_schema_max_mutex_instances",
- "Maximum number of instrumented MUTEX objects.",
+ "Maximum number of instrumented MUTEX objects."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_mutex_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 100*1024*1024),
- DEFAULT(PFS_MAX_MUTEX), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 100*1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
static Sys_var_ulong Sys_pfs_max_rwlock_classes(
"performance_schema_max_rwlock_classes",
@@ -164,26 +171,29 @@ static Sys_var_ulong Sys_pfs_max_rwlock_classes(
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256),
DEFAULT(PFS_MAX_RWLOCK_CLASS), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_rwlock_instances(
+static Sys_var_long Sys_pfs_max_rwlock_instances(
"performance_schema_max_rwlock_instances",
- "Maximum number of instrumented RWLOCK objects.",
+ "Maximum number of instrumented RWLOCK objects."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_rwlock_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 100*1024*1024),
- DEFAULT(PFS_MAX_RWLOCK), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 100*1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_table_handles(
+static Sys_var_long Sys_pfs_max_table_handles(
"performance_schema_max_table_handles",
- "Maximum number of opened instrumented tables.",
+ "Maximum number of opened instrumented tables."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_table_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_TABLE), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_table_instances(
+static Sys_var_long Sys_pfs_max_table_instances(
"performance_schema_max_table_instances",
- "Maximum number of instrumented tables.",
+ "Maximum number of instrumented tables."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_table_share_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_TABLE_SHARE), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
static Sys_var_ulong Sys_pfs_max_thread_classes(
"performance_schema_max_thread_classes",
@@ -192,12 +202,13 @@ static Sys_var_ulong Sys_pfs_max_thread_classes(
CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 256),
DEFAULT(PFS_MAX_THREAD_CLASS), BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_max_thread_instances(
+static Sys_var_long Sys_pfs_max_thread_instances(
"performance_schema_max_thread_instances",
- "Maximum number of instrumented threads.",
+ "Maximum number of instrumented threads."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_thread_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_THREAD), BLOCK_SIZE(1));
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1), BLOCK_SIZE(1));
static Sys_var_ulong Sys_pfs_setup_actors_size(
"performance_schema_setup_actors_size",
@@ -215,28 +226,31 @@ static Sys_var_ulong Sys_pfs_setup_objects_size(
DEFAULT(PFS_MAX_SETUP_OBJECT),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_accounts_size(
+static Sys_var_long Sys_pfs_accounts_size(
"performance_schema_accounts_size",
- "Maximum number of instrumented user@host accounts.",
+ "Maximum number of instrumented user@host accounts."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_account_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_ACCOUNT),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_hosts_size(
+static Sys_var_long Sys_pfs_hosts_size(
"performance_schema_hosts_size",
- "Maximum number of instrumented hosts.",
+ "Maximum number of instrumented hosts."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_host_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_HOST),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_users_size(
+static Sys_var_long Sys_pfs_users_size(
"performance_schema_users_size",
- "Maximum number of instrumented users.",
+ "Maximum number of instrumented users."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_user_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_MAX_USER),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
static Sys_var_ulong Sys_pfs_max_stage_classes(
@@ -247,20 +261,22 @@ static Sys_var_ulong Sys_pfs_max_stage_classes(
DEFAULT(PFS_MAX_STAGE_CLASS),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_events_stages_history_long_size(
+static Sys_var_long Sys_pfs_events_stages_history_long_size(
"performance_schema_events_stages_history_long_size",
- "Number of rows in EVENTS_STAGES_HISTORY_LONG.",
+ "Number of rows in EVENTS_STAGES_HISTORY_LONG."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_stages_history_long_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_STAGES_HISTORY_LONG_SIZE),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_events_stages_history_size(
+static Sys_var_long Sys_pfs_events_stages_history_size(
"performance_schema_events_stages_history_size",
- "Number of rows per thread in EVENTS_STAGES_HISTORY.",
+ "Number of rows per thread in EVENTS_STAGES_HISTORY."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_stages_history_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024),
- DEFAULT(PFS_STAGES_HISTORY_SIZE),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
/**
@@ -280,28 +296,41 @@ static Sys_var_ulong Sys_pfs_max_statement_classes(
DEFAULT((ulong) SQLCOM_END + (ulong) COM_END + 3),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_events_statements_history_long_size(
+static Sys_var_long Sys_pfs_events_statements_history_long_size(
"performance_schema_events_statements_history_long_size",
- "Number of rows in EVENTS_STATEMENTS_HISTORY_LONG.",
+ "Number of rows in EVENTS_STATEMENTS_HISTORY_LONG."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_statements_history_long_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024*1024),
- DEFAULT(PFS_STATEMENTS_HISTORY_LONG_SIZE),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024*1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_events_statements_history_size(
+static Sys_var_long Sys_pfs_events_statements_history_size(
"performance_schema_events_statements_history_size",
- "Number of rows per thread in EVENTS_STATEMENTS_HISTORY.",
+ "Number of rows per thread in EVENTS_STATEMENTS_HISTORY."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_events_statements_history_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 1024),
- DEFAULT(PFS_STATEMENTS_HISTORY_SIZE),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
-static Sys_var_ulong Sys_pfs_digest_size(
+static Sys_var_long Sys_pfs_digest_size(
"performance_schema_digests_size",
- "Size of the statement digest.",
+ "Size of the statement digest."
+ " Use 0 to disable, -1 for automated sizing.",
PARSED_EARLY READ_ONLY GLOBAL_VAR(pfs_param.m_digest_sizing),
- CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 200),
- DEFAULT(PFS_DIGEST_SIZE),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 200),
+ DEFAULT(-1),
+ BLOCK_SIZE(1));
+
+static Sys_var_long Sys_pfs_connect_attrs_size(
+ "performance_schema_session_connect_attrs_size",
+ "Size of session attribute string buffer per thread."
+ " Use 0 to disable, -1 for automated sizing.",
+ PARSED_EARLY READ_ONLY
+ GLOBAL_VAR(pfs_param.m_session_connect_attrs_sizing),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(-1, 1024 * 1024),
+ DEFAULT(-1),
BLOCK_SIZE(1));
#endif /* WITH_PERFSCHEMA_STORAGE_ENGINE */
@@ -1195,7 +1224,7 @@ static bool check_max_allowed_packet(sys_var *self, THD *thd, set_var *var)
val= var->save_result.ulonglong_value;
if (val < (longlong) global_system_variables.net_buffer_length)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_BELOW_LIMIT, ER(WARN_OPTION_BELOW_LIMIT),
"max_allowed_packet", "net_buffer_length");
}
@@ -1262,8 +1291,9 @@ static bool fix_max_connections(sys_var *self, THD *thd, enum_var_type type)
// children, to avoid "too many connections" error in a common setup
static Sys_var_ulong Sys_max_connections(
"max_connections", "The number of simultaneous clients allowed",
- GLOBAL_VAR(max_connections), CMD_LINE(REQUIRED_ARG),
- VALID_RANGE(1, 100000), DEFAULT(151), BLOCK_SIZE(1), NO_MUTEX_GUARD,
+ PARSED_EARLY GLOBAL_VAR(max_connections), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1, 100000),
+ DEFAULT(MAX_CONNECTIONS_DEFAULT), BLOCK_SIZE(1), NO_MUTEX_GUARD,
NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(fix_max_connections));
static Sys_var_ulong Sys_max_connect_errors(
@@ -1320,6 +1350,12 @@ static Sys_var_ulong Sys_metadata_locks_cache_size(
VALID_RANGE(1, 1024*1024), DEFAULT(MDL_LOCKS_CACHE_SIZE_DEFAULT),
BLOCK_SIZE(1));
+static Sys_var_ulong Sys_metadata_locks_hash_instances(
+ "metadata_locks_hash_instances", "Number of metadata locks hash instances",
+ READ_ONLY GLOBAL_VAR(mdl_locks_hash_partitions), CMD_LINE(REQUIRED_ARG),
+ VALID_RANGE(1, 1024), DEFAULT(MDL_LOCKS_HASH_PARTITIONS_DEFAULT),
+ BLOCK_SIZE(1));
+
/*
"pseudo_thread_id" variable used in the test suite to detect 32/64bit
systems. If you change it to something else then ulong then fix the tests
@@ -1695,7 +1731,7 @@ static bool check_net_buffer_length(sys_var *self, THD *thd, set_var *var)
val= var->save_result.ulonglong_value;
if (val > (longlong) global_system_variables.max_allowed_packet)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
WARN_OPTION_BELOW_LIMIT, ER(WARN_OPTION_BELOW_LIMIT),
"max_allowed_packet", "net_buffer_length");
}
@@ -2258,7 +2294,7 @@ static bool fix_query_cache_size(sys_var *self, THD *thd, enum_var_type type)
requested cache size. See also query_cache_size_arg
*/
if (query_cache_size != new_cache_size)
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARN_QC_RESIZE, ER(ER_WARN_QC_RESIZE),
query_cache_size, new_cache_size);
@@ -2976,7 +3012,7 @@ static bool fix_autocommit(sys_var *self, THD *thd, enum_var_type type)
Don't close thread tables or release metadata locks: if we do so, we
risk releasing locks/closing tables of expressions used to assign
other variables, as in:
- set @var=my_stored_function1(), @@autocommit=1, @var2=(select max(a)
+ set @var=my_stored_function1(), @@autocommit=1, @var2=(select MY_MAX(a)
from my_table), ...
The locks will be released at statement end anyway, as SET
statement that assigns autocommit is marked to commit
@@ -3315,7 +3351,7 @@ static Sys_var_session_special Sys_rand_seed2(
static ulonglong read_error_count(THD *thd)
{
- return thd->warning_info->error_count();
+ return thd->get_stmt_da()->error_count();
}
// this really belongs to the SHOW STATUS
static Sys_var_session_special Sys_error_count(
@@ -3327,7 +3363,7 @@ static Sys_var_session_special Sys_error_count(
static ulonglong read_warning_count(THD *thd)
{
- return thd->warning_info->warn_count();
+ return thd->get_stmt_da()->warn_count();
}
// this really belongs to the SHOW STATUS
static Sys_var_session_special Sys_warning_count(
@@ -3424,6 +3460,14 @@ static bool check_log_path(sys_var *self, THD *thd, set_var *var)
if (!path_length)
return true;
+ if (!is_filename_allowed(var->save_result.string_value.str,
+ var->save_result.string_value.length, TRUE))
+ {
+ my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0),
+ self->name.str, var->save_result.string_value.str);
+ return true;
+ }
+
MY_STAT f_stat;
if (my_stat(path, &f_stat, MYF(0)))
@@ -3703,13 +3747,13 @@ bool Sys_var_rpl_filter::global_update(THD *thd, set_var *var)
{
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_ERROR);
+ Sql_condition::WARN_LEVEL_ERROR);
}
else // has base name
{
mi= master_info_index->
get_master_info(&var->base,
- MYSQL_ERROR::WARN_LEVEL_WARN);
+ Sql_condition::WARN_LEVEL_WARN);
}
if (mi)
@@ -3775,13 +3819,13 @@ uchar *Sys_var_rpl_filter::global_value_ptr(THD *thd, LEX_STRING *base)
{
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_ERROR);
+ Sql_condition::WARN_LEVEL_ERROR);
}
else // has base name
{
mi= master_info_index->
get_master_info(base,
- MYSQL_ERROR::WARN_LEVEL_WARN);
+ Sql_condition::WARN_LEVEL_WARN);
}
mysql_mutex_lock(&LOCK_global_system_variables);
@@ -3889,7 +3933,7 @@ get_master_info_uint_value(THD *thd, ptrdiff_t offset)
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_WARN);
+ Sql_condition::WARN_LEVEL_WARN);
if (mi)
{
mysql_mutex_lock(&mi->rli.data_lock);
@@ -3914,7 +3958,7 @@ bool update_multi_source_variable(sys_var *self_var, THD *thd,
mysql_mutex_lock(&LOCK_active_mi);
mi= master_info_index->
get_master_info(&thd->variables.default_master_connection,
- MYSQL_ERROR::WARN_LEVEL_ERROR);
+ Sql_condition::WARN_LEVEL_ERROR);
if (mi)
{
mysql_mutex_lock(&mi->rli.run_lock);
@@ -4063,7 +4107,7 @@ static bool check_locale(sys_var *self, THD *thd, set_var *var)
mysql_mutex_unlock(&LOCK_error_messages);
if (res)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR,
"Can't process error message file for locale '%s'",
locale->name);
return true;
@@ -4091,6 +4135,22 @@ static Sys_var_tz Sys_time_zone(
SESSION_VAR(time_zone), NO_CMD_LINE,
DEFAULT(&default_tz), NO_MUTEX_GUARD, IN_BINLOG);
+static bool fix_host_cache_size(sys_var *, THD *, enum_var_type)
+{
+ hostname_cache_resize((uint) host_cache_size);
+ return false;
+}
+
+static Sys_var_ulong Sys_host_cache_size(
+ "host_cache_size",
+ "How many host names should be cached to avoid resolving.",
+ GLOBAL_VAR(host_cache_size),
+ CMD_LINE(REQUIRED_ARG), VALID_RANGE(0, 65536),
+ DEFAULT(HOST_CACHE_SIZE),
+ BLOCK_SIZE(1),
+ NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(NULL),
+ ON_UPDATE(fix_host_cache_size));
+
static Sys_var_charptr Sys_ignore_db_dirs(
"ignore_db_dirs",
"Specifies a directory to add to the ignore list when collecting "
@@ -4354,7 +4414,7 @@ static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var)
else if (previous_val && val)
goto ineffective;
else if (!previous_val && val)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_VAR,
"'pseudo_slave_mode' is already ON.");
}
@@ -4363,7 +4423,7 @@ static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var)
if (!previous_val && !val)
goto ineffective;
else if (previous_val && !val)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_VAR,
"Slave applier execution mode not active, "
"statement ineffective.");
@@ -4371,7 +4431,7 @@ static bool check_pseudo_slave_mode(sys_var *self, THD *thd, set_var *var)
goto end;
ineffective:
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_VALUE_FOR_VAR,
"'pseudo_slave_mode' change was ineffective.");
diff --git a/sql/sys_vars.h b/sql/sys_vars.h
index bf17040e65c..179069040ff 100644
--- a/sql/sys_vars.h
+++ b/sql/sys_vars.h
@@ -224,6 +224,8 @@ typedef Sys_var_integer<uint, GET_UINT, SHOW_UINT> Sys_var_uint;
typedef Sys_var_integer<ulong, GET_ULONG, SHOW_ULONG> Sys_var_ulong;
typedef Sys_var_integer<ha_rows, GET_HA_ROWS, SHOW_HA_ROWS> Sys_var_harows;
typedef Sys_var_integer<ulonglong, GET_ULL, SHOW_ULONGLONG> Sys_var_ulonglong;
+typedef Sys_var_integer<long, GET_LONG, SHOW_LONG> Sys_var_long;
+
/**
Helper class for variables that take values from a TYPELIB
diff --git a/sql/table.cc b/sql/table.cc
index 0be93aaec65..266749d98a2 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -148,7 +148,7 @@ View_creation_ctx * View_creation_ctx::create(THD *thd,
if (!view->view_client_cs_name.str ||
!view->view_connection_cl_name.str)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_VIEW_NO_CREATION_CTX,
ER(ER_VIEW_NO_CREATION_CTX),
(const char *) view->db,
@@ -182,7 +182,7 @@ View_creation_ctx * View_creation_ctx::create(THD *thd,
(const char *) view->view_client_cs_name.str,
(const char *) view->view_connection_cl_name.str);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_VIEW_INVALID_CREATION_CTX,
ER(ER_VIEW_INVALID_CREATION_CTX),
(const char *) view->db,
@@ -273,7 +273,7 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db, const LEX_STRING *name)
/*
- Allocate a setup TABLE_SHARE structure
+ Allocate and setup a TABLE_SHARE structure
SYNOPSIS
alloc_table_share()
@@ -287,7 +287,7 @@ TABLE_CATEGORY get_table_category(const LEX_STRING *db, const LEX_STRING *name)
*/
TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
- char *key, uint key_length)
+ const char *key, uint key_length)
{
MEM_ROOT mem_root;
TABLE_SHARE *share;
@@ -336,6 +336,8 @@ TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
init_sql_alloc(&share->stats_cb.mem_root, TABLE_ALLOC_BLOCK_SIZE, 0, MYF(0));
memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root));
+ mysql_mutex_init(key_TABLE_SHARE_LOCK_share,
+ &share->LOCK_share, MY_MUTEX_INIT_SLOW);
mysql_mutex_init(key_TABLE_SHARE_LOCK_ha_data,
&share->LOCK_ha_data, MY_MUTEX_INIT_FAST);
}
@@ -419,20 +421,27 @@ void TABLE_SHARE::destroy()
{
uint idx;
KEY *info_it;
+ DBUG_ENTER("TABLE_SHARE::destroy");
+ DBUG_PRINT("info", ("db: %s table: %s", db.str, table_name.str));
+
+ if (ha_share)
+ {
+ delete ha_share;
+ ha_share= NULL; // Safety
+ }
- if (tmp_table == NO_TMP_TABLE)
- mysql_mutex_lock(&LOCK_ha_data);
free_root(&stats_cb.mem_root, MYF(0));
stats_cb.stats_can_be_read= FALSE;
stats_cb.stats_is_read= FALSE;
stats_cb.histograms_can_be_read= FALSE;
stats_cb.histograms_are_read= FALSE;
- if (tmp_table == NO_TMP_TABLE)
- mysql_mutex_unlock(&LOCK_ha_data);
- /* The mutex is initialized only for shares that are part of the TDC */
+ /* The mutexes are initialized only for shares that are part of the TDC */
if (tmp_table == NO_TMP_TABLE)
+ {
+ mysql_mutex_destroy(&LOCK_share);
mysql_mutex_destroy(&LOCK_ha_data);
+ }
my_hash_free(&name_hash);
plugin_unlock(NULL, db_plugin);
@@ -448,25 +457,20 @@ void TABLE_SHARE::destroy()
info_it->flags= 0;
}
}
- if (ha_data_destroy)
- {
- ha_data_destroy(ha_data);
- ha_data_destroy= NULL;
- }
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
plugin_unlock(NULL, default_part_plugin);
- if (ha_part_data_destroy)
- {
- ha_part_data_destroy(ha_part_data);
- ha_part_data_destroy= NULL;
- }
#endif /* WITH_PARTITION_STORAGE_ENGINE */
+
+ PSI_CALL_release_table_share(m_psi);
+
/*
Make a copy since the share is allocated in its own root,
and free_root() updates its argument after freeing the memory.
*/
MEM_ROOT own_root= mem_root;
free_root(&own_root, MYF(0));
+ DBUG_VOID_RETURN;
}
/*
@@ -876,8 +880,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->page_checksum= (ha_choice) ((frm_image[39] >> 2) & 3);
share->row_type= (enum row_type) frm_image[40];
share->table_charset= get_charset((((uint) frm_image[41]) << 8) +
- (uint) frm_image[38],MYF(0));
+ (uint) frm_image[38], MYF(0));
share->null_field_first= 1;
+ share->stats_sample_pages= uint2korr(frm_image+42);
+ share->stats_auto_recalc= (enum_stats_auto_recalc)(frm_image[44]);
}
if (!share->table_charset)
{
@@ -893,8 +899,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
share->table_charset= default_charset_info;
}
share->db_record_offset= 1;
- if (db_create_options & HA_OPTION_LONG_BLOB_PTR)
- share->blob_ptr_size= portable_sizeof_char_ptr;
share->max_rows= uint4korr(frm_image+18);
share->min_rows= uint4korr(frm_image+22);
@@ -957,7 +961,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
goto err;
keyinfo->flags= (uint) uint2korr(strpos) ^ HA_NOSAME;
keyinfo->key_length= (uint) uint2korr(strpos+2);
- keyinfo->key_parts= (uint) strpos[4];
+ keyinfo->user_defined_key_parts= (uint) strpos[4];
keyinfo->algorithm= (enum ha_key_alg) strpos[5];
keyinfo->block_size= uint2korr(strpos+6);
strpos+=8;
@@ -968,7 +972,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
goto err;
keyinfo->flags= ((uint) strpos[0]) ^ HA_NOSAME;
keyinfo->key_length= (uint) uint2korr(strpos+1);
- keyinfo->key_parts= (uint) strpos[3];
+ keyinfo->user_defined_key_parts= (uint) strpos[3];
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
strpos+=4;
}
@@ -976,7 +980,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (i == 0)
{
ext_key_parts= key_parts +
- (share->use_ext_keys ? first_keyinfo.key_parts*(keys-1) : 0);
+ (share->use_ext_keys ? first_keyinfo.user_defined_key_parts*(keys-1) : 0);
n_length=keys * sizeof(KEY) + ext_key_parts * sizeof(KEY_PART_INFO);
if (!(keyinfo= (KEY*) alloc_root(&share->mem_root,
@@ -990,10 +994,10 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
sizeof(ulong) * ext_key_parts)))
goto err;
first_key_part= key_part;
- first_key_parts= first_keyinfo.key_parts;
+ first_key_parts= first_keyinfo.user_defined_key_parts;
keyinfo->flags= first_keyinfo.flags;
keyinfo->key_length= first_keyinfo.key_length;
- keyinfo->key_parts= first_keyinfo.key_parts;
+ keyinfo->user_defined_key_parts= first_keyinfo.user_defined_key_parts;
keyinfo->algorithm= first_keyinfo.algorithm;
if (new_frm_ver >= 3)
keyinfo->block_size= first_keyinfo.block_size;
@@ -1001,7 +1005,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo->key_part= key_part;
keyinfo->rec_per_key= rec_per_key;
- for (j=keyinfo->key_parts ; j-- ; key_part++)
+ for (j=keyinfo->user_defined_key_parts ; j-- ; key_part++)
{
if (strpos + (new_frm_ver >= 1 ? 9 : 7) >= frm_image_end)
goto err;
@@ -1029,17 +1033,22 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
key_part->store_length=key_part->length;
}
- keyinfo->ext_key_parts= keyinfo->key_parts;
+
+ /*
+ Add primary key to end of extended keys for non unique keys for
+ storage engines that supports it.
+ */
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->ext_key_part_map= 0;
- if (share->use_ext_keys && i)
+ if (share->use_ext_keys && i && !(keyinfo->flags & HA_NOSAME))
{
keyinfo->ext_key_part_map= 0;
for (j= 0;
j < first_key_parts && keyinfo->ext_key_parts < MAX_REF_PARTS;
j++)
{
- uint key_parts= keyinfo->key_parts;
+ uint key_parts= keyinfo->user_defined_key_parts;
KEY_PART_INFO* curr_key_part= keyinfo->key_part;
KEY_PART_INFO* curr_key_part_end= curr_key_part+key_parts;
for ( ; curr_key_part < curr_key_part_end; curr_key_part++)
@@ -1359,6 +1368,9 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
plugin_hton(se_plugin))))
goto err;
+ if (handler_file->set_ha_share_ref(&share->ha_share))
+ goto err;
+
record= share->default_values-1; /* Fieldstart = 1 */
null_bits_are_used= share->null_fields != 0;
if (share->null_field_first)
@@ -1556,7 +1568,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
"Please do \"ALTER TABLE '%s' FORCE\" to fix it!",
share->fieldnames.type_names[i], share->table_name.str,
share->table_name.str);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_CRASHED_ON_USAGE,
"Found incompatible DECIMAL field '%s' in %s; "
"Please do \"ALTER TABLE '%s' FORCE\" to fix it!",
@@ -1687,7 +1699,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
Do not extend the key that contains a component
defined over the beginning of a field.
*/
- for (i= 0; i < keyinfo->key_parts; i++)
+ for (i= 0; i < keyinfo->user_defined_key_parts; i++)
{
uint fieldnr= keyinfo->key_part[i].fieldnr;
if (share->field[fieldnr-1]->key_length() !=
@@ -1698,11 +1710,11 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
}
}
- if (add_first_key_parts < keyinfo->ext_key_parts-keyinfo->key_parts)
+ if (add_first_key_parts < keyinfo->ext_key_parts-keyinfo->user_defined_key_parts)
{
share->ext_key_parts-= keyinfo->ext_key_parts;
key_part_map ext_key_part_map= keyinfo->ext_key_part_map;
- keyinfo->ext_key_parts= keyinfo->key_parts;
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->ext_key_flags= keyinfo->flags;
keyinfo->ext_key_part_map= 0;
for (i= 0; i < add_first_key_parts; i++)
@@ -1735,7 +1747,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
*/
primary_key=key;
key_part= keyinfo->key_part;
- for (i=0 ; i < keyinfo->key_parts ;i++)
+ for (i=0 ; i < keyinfo->user_defined_key_parts ;i++)
{
uint fieldnr= key_part[i].fieldnr;
if (!fieldnr ||
@@ -1751,7 +1763,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
key_part= keyinfo->key_part;
uint key_parts= share->use_ext_keys ? keyinfo->ext_key_parts :
- keyinfo->key_parts;
+ keyinfo->user_defined_key_parts;
for (i=0; i < key_parts; key_part++, i++)
{
Field *field;
@@ -1791,7 +1803,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
if (i == 0 && key != primary_key)
field->flags |= (((keyinfo->flags & HA_NOSAME) &&
- (keyinfo->key_parts == 1)) ?
+ (keyinfo->user_defined_key_parts == 1)) ?
UNIQUE_KEY_FLAG : MULTIPLE_KEY_FLAG);
if (i == 0)
field->key_start.set_bit(key);
@@ -1802,7 +1814,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
{
share->keys_for_keyread.set_bit(key);
field->part_of_key.set_bit(key);
- if (i < keyinfo->key_parts)
+ if (i < keyinfo->user_defined_key_parts)
field->part_of_key_not_clustered.set_bit(key);
}
if (handler_file->index_flags(key, i, 1) & HA_READ_ORDER)
@@ -1848,7 +1860,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
"Please do \"ALTER TABLE '%s' FORCE \" to fix it!",
share->table_name.str,
share->table_name.str);
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_CRASHED_ON_USAGE,
"Found wrong key definition in %s; "
"Please do \"ALTER TABLE '%s' FORCE\" to fix "
@@ -1876,7 +1888,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
keyinfo->usable_key_parts= usable_parts; // Filesort
set_if_bigger(share->max_key_length,keyinfo->key_length+
- keyinfo->key_parts);
+ keyinfo->user_defined_key_parts);
share->total_key_length+= keyinfo->key_length;
/*
MERGE tables do not have unique indexes. But every key could be
@@ -1894,7 +1906,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
If we are using an integer as the primary key then allow the user to
refer to it as '_rowid'
*/
- if (share->key_info[primary_key].key_parts == 1)
+ if (share->key_info[primary_key].user_defined_key_parts == 1)
{
Field *field= share->key_info[primary_key].key_part[0].field;
if (field && field->result_type() == INT_RESULT)
@@ -1993,18 +2005,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
delete handler_file;
plugin_unlock(0, se_plugin);
my_hash_free(&share->name_hash);
- if (share->ha_data_destroy)
- {
- share->ha_data_destroy(share->ha_data);
- share->ha_data_destroy= NULL;
- }
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- if (share->ha_part_data_destroy)
- {
- share->ha_part_data_destroy(share->ha_part_data);
- share->ha_data_destroy= NULL;
- }
-#endif /* WITH_PARTITION_STORAGE_ENGINE */
if (!thd->is_error())
open_table_error(share, OPEN_FRM_CORRUPTED, share->open_errno);
@@ -2064,6 +2064,8 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
LEX *old_lex;
Query_arena *arena, backup;
LEX tmp_lex;
+ KEY *unused1;
+ uint unused2;
LEX_CUSTRING frm= {0,0};
DBUG_ENTER("TABLE_SHARE::init_from_sql_statement_string");
@@ -2105,7 +2107,7 @@ int TABLE_SHARE::init_from_sql_statement_string(THD *thd, bool write,
file= mysql_create_frm_image(thd, db.str, table_name.str,
&thd->lex->create_info, &thd->lex->alter_info,
- C_ORDINARY_CREATE, &frm);
+ C_ORDINARY_CREATE, &unused1, &unused2, &frm);
error|= file == 0;
delete file;
@@ -2516,6 +2518,9 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
if (!(outparam->file= get_new_handler(share, &outparam->mem_root,
share->db_type())))
goto err;
+
+ if (outparam->file->set_ha_share_ref(&share->ha_share))
+ goto err;
}
else
{
@@ -2615,7 +2620,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
key_info->key_part= key_part;
key_part_end= key_part + (share->use_ext_keys ? key_info->ext_key_parts :
- key_info->key_parts) ;
+ key_info->user_defined_key_parts) ;
for ( ; key_part < key_part_end; key_part++)
{
Field *field= key_part->field= outparam->field[key_part->fieldnr - 1];
@@ -2633,7 +2638,7 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
}
}
if (!share->use_ext_keys)
- key_part+= key_info->ext_key_parts - key_info->key_parts;
+ key_part+= key_info->ext_key_parts - key_info->user_defined_key_parts;
}
}
@@ -2726,8 +2731,9 @@ enum open_frm_error open_table_from_share(THD *thd, TABLE_SHARE *share,
}
outparam->part_info->is_auto_partitioned= share->auto_partitioned;
DBUG_PRINT("info", ("autopartitioned: %u", share->auto_partitioned));
- /* we should perform the fix_partition_func in either local or
- caller's arena depending on work_part_info_used value
+ /*
+ We should perform the fix_partition_func in either local or
+ caller's arena depending on work_part_info_used value.
*/
if (!work_part_info_used)
tmp= fix_partition_func(thd, outparam, is_create_table);
@@ -2836,9 +2842,22 @@ partititon_err:
bzero((char*) bitmaps, bitmap_size*3);
#endif
- outparam->no_replicate= outparam->file &&
- test(outparam->file->ha_table_flags() &
- HA_HAS_OWN_BINLOGGING);
+ if (share->table_category == TABLE_CATEGORY_LOG)
+ {
+ outparam->no_replicate= TRUE;
+ }
+ else if (outparam->file)
+ {
+ handler::Table_flags flags= outparam->file->ha_table_flags();
+ outparam->no_replicate= ! test(flags & (HA_BINLOG_STMT_CAPABLE
+ | HA_BINLOG_ROW_CAPABLE))
+ || test(flags & HA_HAS_OWN_BINLOGGING);
+ }
+ else
+ {
+ outparam->no_replicate= FALSE;
+ }
+
thd->status_var.opened_tables++;
thd->lex->context_analysis_only= save_context_analysis_only;
@@ -2898,6 +2917,7 @@ int closefrm(register TABLE *table, bool free_share)
#ifdef WITH_PARTITION_STORAGE_ENGINE
if (table->part_info)
{
+ /* Allocated through table->mem_root, freed below */
free_items(table->part_info->item_free_list);
table->part_info->item_free_list= 0;
table->part_info= 0;
@@ -3236,11 +3256,10 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
fileinfo[39]= (uchar) ((uint) create_info->transactional |
((uint) create_info->page_checksum << 2));
fileinfo[40]= (uchar) create_info->row_type;
- /* Next few bytes where for RAID support */
+ /* Bytes 41-46 were for RAID support; now reused for other purposes */
fileinfo[41]= (uchar) (csid >> 8);
- fileinfo[42]= 0;
- fileinfo[43]= 0;
- fileinfo[44]= 0;
+ int2store(fileinfo+42, create_info->stats_sample_pages & 0xffff);
+ fileinfo[44]= (uchar) create_info->stats_auto_recalc;
fileinfo[45]= 0;
fileinfo[46]= 0;
int4store(fileinfo+47, key_length);
@@ -3665,18 +3684,18 @@ Table_check_intact::check(TABLE *table, const TABLE_FIELD_DEF *table_def)
else
{
KEY *pk= &table->s->key_info[table->s->primary_key];
- if (pk->key_parts != table_def->primary_key_parts)
+ if (pk->user_defined_key_parts != table_def->primary_key_parts)
{
report_error(0, "Incorrect definition of table %s.%s: "
"Expected primary key to have %u columns, but instead "
"found %u columns.", table->s->db.str,
table->alias.c_ptr(), table_def->primary_key_parts,
- pk->key_parts);
+ pk->user_defined_key_parts);
error= TRUE;
}
else
{
- for (i= 0; i < pk->key_parts; ++i)
+ for (i= 0; i < pk->user_defined_key_parts; ++i)
{
if (table_def->primary_key_columns[i] + 1 != pk->key_part[i].fieldnr)
{
@@ -3750,7 +3769,7 @@ bool TABLE_SHARE::visit_subgraph(Wait_for_flush *wait_for_flush,
if (gvisitor->m_lock_open_count++ == 0)
mysql_mutex_lock(&LOCK_open);
- I_P_List_iterator <TABLE, TABLE_share> tables_it(used_tables);
+ TABLE_list::Iterator tables_it(used_tables);
/*
In case of multiple searches running in parallel, avoid going
@@ -4418,27 +4437,32 @@ void TABLE_LIST::hide_view_error(THD *thd)
return;
/* Hide "Unknown column" or "Unknown function" error */
DBUG_ASSERT(thd->is_error());
+ switch (thd->get_stmt_da()->sql_errno()) {
+ case ER_BAD_FIELD_ERROR:
+ case ER_SP_DOES_NOT_EXIST:
+ case ER_FUNC_INEXISTENT_NAME_COLLISION:
+ case ER_PROCACCESS_DENIED_ERROR:
+ case ER_COLUMNACCESS_DENIED_ERROR:
+ case ER_TABLEACCESS_DENIED_ERROR:
+ case ER_TABLE_NOT_LOCKED:
+ case ER_NO_SUCH_TABLE:
+ {
+ TABLE_LIST *top= top_table();
+ thd->clear_error();
+ my_error(ER_VIEW_INVALID, MYF(0),
+ top->view_db.str, top->view_name.str);
+ break;
+ }
- if (thd->stmt_da->sql_errno() == ER_BAD_FIELD_ERROR ||
- thd->stmt_da->sql_errno() == ER_SP_DOES_NOT_EXIST ||
- thd->stmt_da->sql_errno() == ER_FUNC_INEXISTENT_NAME_COLLISION ||
- thd->stmt_da->sql_errno() == ER_PROCACCESS_DENIED_ERROR ||
- thd->stmt_da->sql_errno() == ER_COLUMNACCESS_DENIED_ERROR ||
- thd->stmt_da->sql_errno() == ER_TABLEACCESS_DENIED_ERROR ||
- thd->stmt_da->sql_errno() == ER_TABLE_NOT_LOCKED ||
- thd->stmt_da->sql_errno() == ER_NO_SUCH_TABLE)
- {
- TABLE_LIST *top= top_table();
- thd->clear_error();
- my_error(ER_VIEW_INVALID, MYF(0), top->view_db.str, top->view_name.str);
- }
- else if (thd->stmt_da->sql_errno() == ER_NO_DEFAULT_FOR_FIELD)
- {
- TABLE_LIST *top= top_table();
- thd->clear_error();
- // TODO: make correct error message
- my_error(ER_NO_DEFAULT_FOR_VIEW_FIELD, MYF(0),
- top->view_db.str, top->view_name.str);
+ case ER_NO_DEFAULT_FOR_FIELD:
+ {
+ TABLE_LIST *top= top_table();
+ thd->clear_error();
+ // TODO: make correct error message
+ my_error(ER_NO_DEFAULT_FOR_VIEW_FIELD, MYF(0),
+ top->view_db.str, top->view_name.str);
+ break;
+ }
}
}
@@ -4514,7 +4538,7 @@ int TABLE_LIST::view_check_option(THD *thd, bool ignore_failure)
TABLE_LIST *main_view= top_table();
if (ignore_failure)
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_VIEW_CHECK_FAILED, ER(ER_VIEW_CHECK_FAILED),
main_view->view_db.str, main_view->view_name.str);
return(VIEW_CHECK_SKIP);
@@ -4807,7 +4831,7 @@ bool TABLE_LIST::prepare_view_security_context(THD *thd)
if ((thd->lex->sql_command == SQLCOM_SHOW_CREATE) ||
(thd->lex->sql_command == SQLCOM_SHOW_FIELDS))
{
- push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_NO_SUCH_USER,
ER(ER_NO_SUCH_USER),
definer.user.str, definer.host.str);
@@ -4836,6 +4860,7 @@ bool TABLE_LIST::prepare_view_security_context(THD *thd)
}
}
DBUG_RETURN(FALSE);
+
}
#endif
@@ -5602,7 +5627,7 @@ void TABLE::mark_columns_used_by_index_no_reset(uint index,
{
KEY_PART_INFO *key_part= key_info[index].key_part;
KEY_PART_INFO *key_part_end= (key_part +
- key_info[index].key_parts);
+ key_info[index].user_defined_key_parts);
for (;key_part != key_part_end; key_part++)
{
bitmap_set_bit(bitmap, key_part->fieldnr-1);
@@ -6032,8 +6057,8 @@ bool TABLE::add_tmp_key(uint key, uint key_parts,
return TRUE;
keyinfo= key_info + key;
keyinfo->key_part= key_part_info;
- keyinfo->usable_key_parts= keyinfo->key_parts = key_parts;
- keyinfo->ext_key_parts= keyinfo->key_parts;
+ keyinfo->usable_key_parts= keyinfo->user_defined_key_parts = key_parts;
+ keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
keyinfo->key_length=0;
keyinfo->algorithm= HA_KEY_ALG_UNDEF;
keyinfo->flags= HA_GENERATED_KEY;
@@ -6132,7 +6157,7 @@ bool TABLE::is_filled_at_execution()
uint TABLE::actual_n_key_parts(KEY *keyinfo)
{
return optimizer_flag(in_use, OPTIMIZER_SWITCH_EXTENDED_KEYS) ?
- keyinfo->ext_key_parts : keyinfo->key_parts;
+ keyinfo->ext_key_parts : keyinfo->user_defined_key_parts;
}
@@ -6449,7 +6474,7 @@ bool TABLE::update_const_key_parts(COND *conds)
for (uint index= 0; index < s->keys; index++)
{
KEY_PART_INFO *keyinfo= key_info[index].key_part;
- KEY_PART_INFO *keyinfo_end= keyinfo + key_info[index].key_parts;
+ KEY_PART_INFO *keyinfo_end= keyinfo + key_info[index].user_defined_key_parts;
for (key_part_map part_map= (key_part_map)1;
keyinfo < keyinfo_end;
diff --git a/sql/table.h b/sql/table.h
index c7282cee093..9079d6fa847 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -498,19 +498,6 @@ typedef struct st_table_field_def
} TABLE_FIELD_DEF;
-#ifdef WITH_PARTITION_STORAGE_ENGINE
-/**
- Partition specific ha_data struct.
-*/
-typedef struct st_ha_data_partition
-{
- bool auto_inc_initialized;
- mysql_mutex_t LOCK_auto_inc; /**< protecting auto_inc val */
- ulonglong next_auto_inc_val; /**< first non reserved value */
-} HA_DATA_PARTITION;
-#endif
-
-
class Table_check_intact
{
protected:
@@ -611,14 +598,16 @@ struct TABLE_SHARE
TYPELIB fieldnames; /* Pointer to fieldnames */
TYPELIB *intervals; /* pointer to interval info */
mysql_mutex_t LOCK_ha_data; /* To protect access to ha_data */
+ mysql_mutex_t LOCK_share; /* To protect TABLE_SHARE */
TABLE_SHARE *next, **prev; /* Link to unused shares */
/*
Doubly-linked (back-linked) lists of used and unused TABLE objects
for this share.
*/
- I_P_List <TABLE, TABLE_share> used_tables;
- I_P_List <TABLE, TABLE_share> free_tables;
+ typedef I_P_List <TABLE, TABLE_share> TABLE_list;
+ TABLE_list used_tables;
+ TABLE_list free_tables;
LEX_CUSTRING tabledef_version;
@@ -663,7 +652,8 @@ struct TABLE_SHARE
key_map keys_for_keyread;
ha_rows min_rows, max_rows; /* create information */
ulong avg_row_length; /* create information */
- ulong version, mysql_version;
+ ulong version;
+ ulong mysql_version; /* 0 if .frm is created before 5.0 */
ulong reclength; /* Recordlength */
/* Stored record length. No generated-only virtual fields are included */
ulong stored_rec_length;
@@ -683,8 +673,10 @@ struct TABLE_SHARE
enum ha_choice page_checksum;
uint ref_count; /* How many TABLE objects uses this */
- uint blob_ptr_size; /* 4 or 8 */
uint key_block_size; /* create key_block_size, if used */
+ uint stats_sample_pages; /* number of pages to sample during
+ stats estimation, if used, otherwise 0. */
+ enum_stats_auto_recalc stats_auto_recalc; /* Automatic recalc of stats. */
uint null_bytes, last_null_bit_pos;
/*
Same as null_bytes, except that if there is only a 'delete-marker' in
@@ -735,6 +727,9 @@ struct TABLE_SHARE
*/
int cached_row_logging_check;
+ /* Name of the tablespace used for this table */
+ char *tablespace;
+
#ifdef WITH_PARTITION_STORAGE_ENGINE
/* filled in when reading from frm */
bool auto_partitioned;
@@ -756,16 +751,8 @@ struct TABLE_SHARE
*/
const TABLE_FIELD_DEF *table_field_def_cache;
- /** place to store storage engine specific data */
- void *ha_data;
- void (*ha_data_destroy)(void *); /* An optional destructor for ha_data */
-
-#ifdef WITH_PARTITION_STORAGE_ENGINE
- /** place to store partition specific data, LOCK_ha_data hold while init. */
- HA_DATA_PARTITION *ha_part_data;
- /* Destructor for ha_part_data */
- void (*ha_part_data_destroy)(HA_DATA_PARTITION *);
-#endif
+ /** Main handler's share */
+ Handler_share *ha_share;
/** Instrumentation for this table share. */
PSI_table_share *m_psi;
@@ -1248,6 +1235,9 @@ public:
*/
bool key_read;
bool no_keyread;
+ /**
+ If set, indicate that the table is not replicated by the server.
+ */
bool locked_by_logger;
bool no_replicate;
bool locked_by_name;
@@ -1281,7 +1271,8 @@ public:
Query_arena *expr_arena;
#ifdef WITH_PARTITION_STORAGE_ENGINE
partition_info *part_info; /* Partition related information */
- bool no_partitions_used; /* If true, all partitions have been pruned away */
+ /* If true, all partitions have been pruned away */
+ bool all_partitions_pruned_away;
#endif
uint max_keys; /* Size of allocated key_info array. */
bool stats_is_read; /* Persistent statistics is read for the table */
@@ -2051,6 +2042,11 @@ struct TABLE_LIST
MDL_request mdl_request;
+#ifdef WITH_PARTITION_STORAGE_ENGINE
+ /* List to carry partition names from PARTITION (...) clause in statement */
+ List<String> *partition_names;
+#endif /* WITH_PARTITION_STORAGE_ENGINE */
+
void calc_md5(char *buffer);
int view_check_option(THD *thd, bool ignore_failure);
bool create_field_translation(THD *thd);
@@ -2206,7 +2202,7 @@ struct TABLE_LIST
@brief Returns the name of the database that the referenced table belongs
to.
*/
- char *get_db_name() { return view != NULL ? view_db.str : db; }
+ char *get_db_name() const { return view != NULL ? view_db.str : db; }
/**
@brief Returns the name of the table that this TABLE_LIST represents.
@@ -2214,7 +2210,7 @@ struct TABLE_LIST
@details The unqualified table name or view name for a table or view,
respectively.
*/
- char *get_table_name() { return view != NULL ? view_name.str : table_name; }
+ char *get_table_name() const { return view != NULL ? view_name.str : table_name; }
bool is_active_sjm();
bool is_jtbm() { return test(jtbm_subselect!=NULL); }
st_select_lex_unit *get_unit();
@@ -2511,7 +2507,7 @@ bool unpack_vcol_info_from_frm(THD *thd, MEM_ROOT *mem_root,
TABLE *table, Field *field,
LEX_STRING *vcol_expr, bool *error_reported);
TABLE_SHARE *alloc_table_share(const char *db, const char *table_name,
- char *key, uint key_length);
+ const char *key, uint key_length);
void init_tmp_table_share(THD *thd, TABLE_SHARE *share, const char *key,
uint key_length,
const char *table_name, const char *path);
diff --git a/sql/thr_malloc.cc b/sql/thr_malloc.cc
index 8c7db0673ac..a14ed36837b 100644
--- a/sql/thr_malloc.cc
+++ b/sql/thr_malloc.cc
@@ -46,10 +46,7 @@ extern "C" {
returned in the error packet.
- SHOW ERROR/SHOW WARNINGS may be empty.
*/
- thd->stmt_da->set_error_status(thd,
- ER_OUT_OF_RESOURCES,
- ER(ER_OUT_OF_RESOURCES),
- NULL);
+ thd->get_stmt_da()->set_error_status(ER_OUT_OF_RESOURCES);
}
}
@@ -134,7 +131,7 @@ char *sql_strmake_with_convert(const char *str, size_t arg_length,
if ((from_cs == &my_charset_bin) || (to_cs == &my_charset_bin))
{
// Safety if to_cs->mbmaxlen > 0
- new_length= min(arg_length, max_res_length);
+ new_length= MY_MIN(arg_length, max_res_length);
memcpy(pos, str, new_length);
}
else
diff --git a/sql/transaction.cc b/sql/transaction.cc
index 09de480e236..239fdef7064 100644
--- a/sql/transaction.cc
+++ b/sql/transaction.cc
@@ -524,7 +524,7 @@ bool trans_rollback_to_savepoint(THD *thd, LEX_STRING name)
else if (((thd->variables.option_bits & OPTION_KEEP_LOG) ||
thd->transaction.all.modified_non_trans_table) &&
!thd->slave_thread)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
ER_WARNING_NOT_COMPLETE_ROLLBACK,
ER(ER_WARNING_NOT_COMPLETE_ROLLBACK));
@@ -815,7 +815,7 @@ bool trans_xa_rollback(THD *thd)
ha_commit_or_rollback_by_xid(thd->lex->xid, 0);
xid_cache_delete(xs);
}
- DBUG_RETURN(thd->stmt_da->is_error());
+ DBUG_RETURN(thd->get_stmt_da()->is_error());
}
if (xa_state != XA_IDLE && xa_state != XA_PREPARED && xa_state != XA_ROLLBACK_ONLY)
diff --git a/sql/tztime.cc b/sql/tztime.cc
index f5e9182522e..272dfb6381b 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -176,7 +176,7 @@ tz_load(const char *name, TIME_ZONE_INFO *sp, MEM_ROOT *storage)
uchar buf[sizeof(struct tzhead) + sizeof(my_time_t) * TZ_MAX_TIMES +
TZ_MAX_TIMES + sizeof(TRAN_TYPE_INFO) * TZ_MAX_TYPES +
#ifdef ABBR_ARE_USED
- max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1))) +
+ MY_MAX(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1))) +
#endif
sizeof(LS_INFO) * TZ_MAX_LEAPS];
} u;
@@ -405,7 +405,7 @@ prepare_tz_info(TIME_ZONE_INFO *sp, MEM_ROOT *storage)
Let us choose end_t as point before next time type change or leap
second correction.
*/
- end_t= min((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1:
+ end_t= MY_MIN((next_trans_idx < sp->timecnt) ? sp->ats[next_trans_idx] - 1:
MY_TIME_T_MAX,
(next_leap_idx < sp->leapcnt) ?
sp->lsis[next_leap_idx].ls_trans - 1: MY_TIME_T_MAX);
@@ -1690,7 +1690,8 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
MYSQL_OPEN_IGNORE_FLUSH | MYSQL_LOCK_IGNORE_TIMEOUT))
{
sql_print_warning("Can't open and lock time zone table: %s "
- "trying to live without them", thd->stmt_da->message());
+ "trying to live without them",
+ thd->get_stmt_da()->message());
/* We will try emulate that everything is ok */
return_val= time_zone_tables_exist= 0;
goto end_with_setting_default_tz;
@@ -1876,7 +1877,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
uchar types[TZ_MAX_TIMES];
TRAN_TYPE_INFO ttis[TZ_MAX_TYPES];
#ifdef ABBR_ARE_USED
- char chars[max(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))];
+ char chars[MY_MAX(TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))];
#endif
/*
Used as a temporary tz_info until we decide that we actually want to
@@ -1927,7 +1928,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
field->store((longlong) tzid, TRUE);
DBUG_ASSERT(field->key_length() <= sizeof(keybuff));
field->get_key_image(keybuff,
- min(field->key_length(), sizeof(keybuff)),
+ MY_MIN(field->key_length(), sizeof(keybuff)),
Field::itRAW);
if (table->file->ha_index_init(0, 1))
goto end;
@@ -1960,7 +1961,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
field->store((longlong) tzid, TRUE);
DBUG_ASSERT(field->key_length() <= sizeof(keybuff));
field->get_key_image(keybuff,
- min(field->key_length(), sizeof(keybuff)),
+ MY_MIN(field->key_length(), sizeof(keybuff)),
Field::itRAW);
if (table->file->ha_index_init(0, 1))
goto end;
diff --git a/sql/unireg.cc b/sql/unireg.cc
index 388aa2863af..7bb943dc9b0 100644
--- a/sql/unireg.cc
+++ b/sql/unireg.cc
@@ -191,7 +191,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const char *table,
char warn_buff[MYSQL_ERRMSG_SIZE];
my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TOO_LONG_TABLE_COMMENT),
real_table_name, TABLE_COMMENT_MAXLEN);
- push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN,
ER_TOO_LONG_TABLE_COMMENT, warn_buff);
create_info->comment.length= tmp_len;
}
@@ -369,59 +369,53 @@ err:
}
-/*
+/**
Create a frm (table definition) file and the tables
- SYNOPSIS
- rea_create_table()
- thd Thread handler
- frm binary frm image of the table to create
- path Name of file (including database, without .frm)
- db Data base name
- table_name Table name
- create_info create info parameters
- file Handler to use or NULL if only frm needs to be created
-
- RETURN
- 0 ok
- 1 error
+ @param thd Thread handler
+ @param frm Binary frm image of the table to create
+ @param path Name of file (including database, without .frm)
+ @param db Data base name
+ @param table_name Table name
+ @param create_info create info parameters
+ @param file Handler to use or NULL if only frm needs to be created
+
+ @retval 0 ok
+ @retval 1 error
*/
int rea_create_table(THD *thd, LEX_CUSTRING *frm,
const char *path, const char *db, const char *table_name,
- HA_CREATE_INFO *create_info, handler *file)
+ HA_CREATE_INFO *create_info, handler *file,
+ bool no_ha_create_table)
{
DBUG_ENTER("rea_create_table");
- if (file)
+ // TODO don't write frm for temp tables
+ if (no_ha_create_table || create_info->tmp_table())
{
- // TODO don't write frm for temp tables
- if (create_info->tmp_table() &&
- writefrm(path, db, table_name, true, frm->str, frm->length))
- goto err_handler;
+ if (writefrm(path, db, table_name, true, frm->str, frm->length))
+ goto err_frm;
+ }
- if (thd->variables.keep_files_on_create)
- create_info->options|= HA_CREATE_KEEP_FILES;
+ if (thd->variables.keep_files_on_create)
+ create_info->options|= HA_CREATE_KEEP_FILES;
- if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG) ||
- ha_create_table(thd, path, db, table_name, create_info, frm))
- {
- file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
- goto err_handler;
- }
- }
- else
+ if (file->ha_create_partitioning_metadata(path, NULL, CHF_CREATE_FLAG))
+ goto err_part;
+
+ if (!no_ha_create_table)
{
- if (writefrm(path, db, table_name, false, frm->str, frm->length))
- goto err_handler;
+ if (ha_create_table(thd, path, db, table_name, create_info, frm))
+ goto err_part;
}
DBUG_RETURN(0);
-err_handler:
- char frm_name[FN_REFLEN];
- strxmov(frm_name, path, reg_ext, NullS);
- mysql_file_delete(key_file_frm, frm_name, MYF(0));
+err_part:
+ file->ha_create_partitioning_metadata(path, NULL, CHF_DELETE_FLAG);
+err_frm:
+ deletefrm(path);
DBUG_RETURN(1);
} /* rea_create_table */
@@ -443,15 +437,15 @@ static uint pack_keys(uchar *keybuff, uint key_count, KEY *keyinfo,
{
int2store(pos, (key->flags ^ HA_NOSAME));
int2store(pos+2,key->key_length);
- pos[4]= (uchar) key->key_parts;
+ pos[4]= (uchar) key->user_defined_key_parts;
pos[5]= (uchar) key->algorithm;
int2store(pos+6, key->block_size);
pos+=8;
- key_parts+=key->key_parts;
+ key_parts+=key->user_defined_key_parts;
DBUG_PRINT("loop", ("flags: %lu key_parts: %d key_part: 0x%lx",
- key->flags, key->key_parts,
+ key->flags, key->user_defined_key_parts,
(long) key->key_part));
- for (key_part=key->key_part,key_part_end=key_part+key->key_parts ;
+ for (key_part=key->key_part,key_part_end=key_part+key->user_defined_key_parts ;
key_part != key_part_end ;
key_part++)
@@ -660,7 +654,7 @@ static bool pack_header(uchar *forminfo, List<Create_field> &create_fields,
DBUG_RETURN(1);
}
/* Hack to avoid bugs with small static rows in MySQL */
- reclength=max(file->min_record_length(table_options),reclength);
+ reclength=MY_MAX(file->min_record_length(table_options),reclength);
if ((ulong) create_fields.elements*FCOMP+FRM_FORMINFO_SIZE+
n_length+int_length+com_length+vcol_info_length > 65535L ||
int_count > 255)
@@ -940,7 +934,6 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options,
table.s= &share;
table.in_use= thd;
- table.s->blob_ptr_size= portable_sizeof_char_ptr;
null_count=0;
if (!(table_options & HA_OPTION_PACK_RECORD))
diff --git a/sql/unireg.h b/sql/unireg.h
index c867f50197d..9b40b7b0779 100644
--- a/sql/unireg.h
+++ b/sql/unireg.h
@@ -132,6 +132,10 @@
The flag means that I_S table uses optimization algorithm.
*/
#define OPTIMIZE_I_S_TABLE OPEN_VIEW_FULL*2
+/**
+ This flag is used to instruct tdc_open_view() to check metadata version.
+*/
+#define CHECK_METADATA_VERSION OPEN_TRIGGER_ONLY*2
/*
The flag means that we need to process trigger files only.
@@ -190,7 +194,8 @@ enum extra2_frm_value_type {
int rea_create_table(THD *thd, LEX_CUSTRING *frm,
const char *path, const char *db, const char *table_name,
- HA_CREATE_INFO *create_info, handler *file);
+ HA_CREATE_INFO *create_info, handler *file,
+ bool no_ha_create_table);
LEX_CUSTRING build_frm_image(THD *thd, const char *table,
HA_CREATE_INFO *create_info,
List<Create_field> &create_fields,
diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc
index 1d1d6b7b743..4122e76bbef 100644
--- a/storage/archive/ha_archive.cc
+++ b/storage/archive/ha_archive.cc
@@ -99,13 +99,19 @@
-Brian
*/
-/* Variables for archive share methods */
-mysql_mutex_t archive_mutex;
-static HASH archive_open_tables;
/* The file extension */
#define ARZ ".ARZ" // The data file
#define ARN ".ARN" // Files used during an optimize call
+#define ARM ".ARM" // Meta file (deprecated)
+
+/* 5.0 compatibility */
+#define META_V1_OFFSET_CHECK_HEADER 0
+#define META_V1_OFFSET_VERSION 1
+#define META_V1_OFFSET_ROWS_RECORDED 2
+#define META_V1_OFFSET_CHECK_POINT 10
+#define META_V1_OFFSET_CRASHED 18
+#define META_V1_LENGTH 19
/*
uchar + uchar
@@ -140,23 +146,12 @@ static handler *archive_create_handler(handlerton *hton,
return new (mem_root) ha_archive(hton, table);
}
-/*
- Used for hash table that tracks open tables.
-*/
-static uchar* archive_get_key(ARCHIVE_SHARE *share, size_t *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (uchar*) share->table_name;
-}
-
#ifdef HAVE_PSI_INTERFACE
-PSI_mutex_key az_key_mutex_archive_mutex, az_key_mutex_ARCHIVE_SHARE_mutex;
+PSI_mutex_key az_key_mutex_Archive_share_mutex;
static PSI_mutex_info all_archive_mutexes[]=
{
- { &az_key_mutex_archive_mutex, "archive_mutex", PSI_FLAG_GLOBAL},
- { &az_key_mutex_ARCHIVE_SHARE_mutex, "ARCHIVE_SHARE::mutex", 0}
+ { &az_key_mutex_Archive_share_mutex, "Archive_share::mutex", 0}
};
PSI_file_key arch_key_file_metadata, arch_key_file_data;
@@ -220,39 +215,24 @@ int archive_db_init(void *p)
archive_hton->discover_table= archive_discover;
archive_hton->tablefile_extensions= ha_archive_exts;
- if (mysql_mutex_init(az_key_mutex_archive_mutex,
- &archive_mutex, MY_MUTEX_INIT_FAST))
- goto error;
- if (my_hash_init(&archive_open_tables, table_alias_charset, 32, 0, 0,
- (my_hash_get_key) archive_get_key, 0, 0))
- {
- mysql_mutex_destroy(&archive_mutex);
- }
- else
- {
- DBUG_RETURN(FALSE);
- }
-error:
- DBUG_RETURN(TRUE);
+ DBUG_RETURN(0);
}
-/*
- Release the archive handler.
-
- SYNOPSIS
- archive_db_done()
- void
-
- RETURN
- FALSE OK
-*/
-int archive_db_done(void *p)
+Archive_share::Archive_share()
{
- my_hash_free(&archive_open_tables);
- mysql_mutex_destroy(&archive_mutex);
-
- return 0;
+ crashed= false;
+ in_optimize= false;
+ archive_write_open= false;
+ dirty= false;
+ DBUG_PRINT("ha_archive", ("Archive_share: %p",
+ this));
+ thr_lock_init(&lock);
+ /*
+ We will use this lock for rows.
+ */
+ mysql_mutex_init(az_key_mutex_Archive_share_mutex,
+ &mutex, MY_MUTEX_INIT_FAST);
}
@@ -309,6 +289,103 @@ ret:
DBUG_RETURN(my_errno);
}
+/**
+ @brief Read version 1 meta file (5.0 compatibility routine).
+
+ @return Completion status
+ @retval 0 Success
+ @retval !0 Failure
+*/
+
+int Archive_share::read_v1_metafile()
+{
+ char file_name[FN_REFLEN];
+ uchar buf[META_V1_LENGTH];
+ File fd;
+ DBUG_ENTER("Archive_share::read_v1_metafile");
+
+ fn_format(file_name, data_file_name, "", ARM, MY_REPLACE_EXT);
+ if ((fd= mysql_file_open(arch_key_file_metadata, file_name, O_RDONLY, MYF(0))) == -1)
+ DBUG_RETURN(-1);
+
+ if (mysql_file_read(fd, buf, sizeof(buf), MYF(0)) != sizeof(buf))
+ {
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(-1);
+ }
+
+ rows_recorded= uint8korr(buf + META_V1_OFFSET_ROWS_RECORDED);
+ crashed= buf[META_V1_OFFSET_CRASHED];
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(0);
+}
+
+
+/**
+ @brief Write version 1 meta file (5.0 compatibility routine).
+
+ @return Completion status
+ @retval 0 Success
+ @retval !0 Failure
+*/
+
+int Archive_share::write_v1_metafile()
+{
+ char file_name[FN_REFLEN];
+ uchar buf[META_V1_LENGTH];
+ File fd;
+ DBUG_ENTER("Archive_share::write_v1_metafile");
+
+ buf[META_V1_OFFSET_CHECK_HEADER]= ARCHIVE_CHECK_HEADER;
+ buf[META_V1_OFFSET_VERSION]= 1;
+ int8store(buf + META_V1_OFFSET_ROWS_RECORDED, rows_recorded);
+ int8store(buf + META_V1_OFFSET_CHECK_POINT, (ulonglong) 0);
+ buf[META_V1_OFFSET_CRASHED]= crashed;
+
+ fn_format(file_name, data_file_name, "", ARM, MY_REPLACE_EXT);
+ if ((fd= mysql_file_open(arch_key_file_metadata, file_name, O_WRONLY, MYF(0))) == -1)
+ DBUG_RETURN(-1);
+
+ if (mysql_file_write(fd, buf, sizeof(buf), MYF(0)) != sizeof(buf))
+ {
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(-1);
+ }
+
+ mysql_file_close(fd, MYF(0));
+ DBUG_RETURN(0);
+}
+
+/**
+ @brief Pack version 1 row (5.0 compatibility routine).
+
+ @param[in] record the record to pack
+
+ @return Length of packed row
+*/
+
+unsigned int ha_archive::pack_row_v1(uchar *record)
+{
+ uint *blob, *end;
+ uchar *pos;
+ DBUG_ENTER("pack_row_v1");
+ memcpy(record_buffer->buffer, record, table->s->reclength);
+ pos= record_buffer->buffer + table->s->reclength;
+ for (blob= table->s->blob_field, end= blob + table->s->blob_fields;
+ blob != end; blob++)
+ {
+ uint32 length= ((Field_blob *) table->field[*blob])->get_length();
+ if (length)
+ {
+ uchar *data_ptr;
+ ((Field_blob *) table->field[*blob])->get_ptr(&data_ptr);
+ memcpy(pos, data_ptr, length);
+ pos+= length;
+ }
+ }
+ DBUG_RETURN(pos - record_buffer->buffer);
+}
+
/*
This method reads the header of a datafile and returns whether or not it was successful.
*/
@@ -361,159 +438,102 @@ int ha_archive::read_data_header(azio_stream *file_to_read)
See ha_example.cc for a longer description.
*/
-ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
+Archive_share *ha_archive::get_share(const char *table_name, int *rc)
{
- uint length;
- DBUG_ENTER("ha_archive::get_share");
+ Archive_share *tmp_share;
- mysql_mutex_lock(&archive_mutex);
- length=(uint) strlen(table_name);
+ DBUG_ENTER("ha_archive::get_share");
- if (!(share=(ARCHIVE_SHARE*) my_hash_search(&archive_open_tables,
- (uchar*) table_name,
- length)))
+ lock_shared_ha_data();
+ if (!(tmp_share= static_cast<Archive_share*>(get_ha_share_ptr())))
{
- char *tmp_name;
azio_stream archive_tmp;
- if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length+1,
- NullS))
+ tmp_share= new Archive_share;
+
+ if (!tmp_share)
{
- mysql_mutex_unlock(&archive_mutex);
*rc= HA_ERR_OUT_OF_MEM;
- DBUG_RETURN(NULL);
+ goto err;
}
+ DBUG_PRINT("ha_archive", ("new Archive_share: %p",
+ tmp_share));
- share->use_count= 0;
- share->table_name_length= length;
- share->table_name= tmp_name;
- share->crashed= FALSE;
- share->archive_write_open= FALSE;
- fn_format(share->data_file_name, table_name, "",
+ fn_format(tmp_share->data_file_name, table_name, "",
ARZ, MY_REPLACE_EXT | MY_UNPACK_FILENAME);
- strmov(share->table_name, table_name);
- DBUG_PRINT("ha_archive", ("Data File %s",
- share->data_file_name));
- /*
- We will use this lock for rows.
- */
- mysql_mutex_init(az_key_mutex_ARCHIVE_SHARE_mutex,
- &share->mutex, MY_MUTEX_INIT_FAST);
-
+ strmov(tmp_share->table_name, table_name);
+ DBUG_PRINT("ha_archive", ("Data File %s",
+ tmp_share->data_file_name));
+
/*
We read the meta file, but do not mark it dirty. Since we are not
doing a write we won't mark it dirty (and we won't open it for
anything but reading... open it for write and we will generate null
compression writes).
*/
- if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY)))
- {
- *rc= my_errno ? my_errno : -1;
- mysql_mutex_unlock(&archive_mutex);
- mysql_mutex_destroy(&share->mutex);
- my_free(share);
- DBUG_RETURN(NULL);
- }
- share->version= archive_tmp.version;
- if (archive_tmp.version == ARCHIVE_VERSION)
- {
- stats.auto_increment_value= archive_tmp.auto_increment + 1;
- share->rows_recorded= (ha_rows)archive_tmp.rows;
- share->crashed= archive_tmp.dirty;
- }
- else
+ if (!(azopen(&archive_tmp, tmp_share->data_file_name, O_RDONLY|O_BINARY)))
{
- /* Used by repair */
- share->rows_recorded= ~(ha_rows) 0;
- stats.auto_increment_value= 0;
+ delete tmp_share;
+ *rc= my_errno ? my_errno : HA_ERR_CRASHED;
+ tmp_share= NULL;
+ goto err;
}
- /*
- If archive version is less than 3, It should be upgraded before
- use.
- */
- if (archive_tmp.version < ARCHIVE_VERSION)
- *rc= HA_ERR_TABLE_NEEDS_UPGRADE;
- else if (frm_compare(&archive_tmp))
- *rc= HA_ERR_TABLE_DEF_CHANGED;
-
+ stats.auto_increment_value= archive_tmp.auto_increment + 1;
+ tmp_share->rows_recorded= (ha_rows)archive_tmp.rows;
+ tmp_share->crashed= archive_tmp.dirty;
+ share= tmp_share;
+ if (archive_tmp.version == 1)
+ share->read_v1_metafile();
azclose(&archive_tmp);
- (void) my_hash_insert(&archive_open_tables, (uchar*) share);
- thr_lock_init(&share->lock);
+ set_ha_share_ptr(static_cast<Handler_share*>(tmp_share));
}
- share->use_count++;
- DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles now",
- share->table_name_length, share->table_name,
- share->use_count));
- if (share->crashed)
+ if (tmp_share->crashed)
*rc= HA_ERR_CRASHED_ON_USAGE;
- mysql_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(share);
-}
+err:
+ unlock_shared_ha_data();
+ DBUG_ASSERT(tmp_share || *rc);
-/*
- Free the share.
- See ha_example.cc for a description.
-*/
-int ha_archive::free_share()
-{
- int rc= 0;
- DBUG_ENTER("ha_archive::free_share");
- DBUG_PRINT("ha_archive",
- ("archive table %.*s has %d open handles on entrance",
- share->table_name_length, share->table_name,
- share->use_count));
-
- mysql_mutex_lock(&archive_mutex);
- if (!--share->use_count)
- {
- my_hash_delete(&archive_open_tables, (uchar*) share);
- thr_lock_delete(&share->lock);
- mysql_mutex_destroy(&share->mutex);
- /*
- We need to make sure we don't reset the crashed state.
- If we open a crashed file, wee need to close it as crashed unless
- it has been repaired.
- Since we will close the data down after this, we go on and count
- the flush on close;
- */
- if (share->archive_write_open)
- {
- if (azclose(&(share->archive_write)))
- rc= 1;
- }
- my_free(share);
- }
- mysql_mutex_unlock(&archive_mutex);
-
- DBUG_RETURN(rc);
+ DBUG_RETURN(tmp_share);
}
-int ha_archive::init_archive_writer()
+
+int Archive_share::init_archive_writer()
{
- DBUG_ENTER("ha_archive::init_archive_writer");
- /*
+ DBUG_ENTER("Archive_share::init_archive_writer");
+ /*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
that is shared amoung all open tables.
*/
- if (!(azopen(&(share->archive_write), share->data_file_name,
+ if (!(azopen(&archive_write, data_file_name,
O_RDWR|O_BINARY)))
{
DBUG_PRINT("ha_archive", ("Could not open archive write file"));
- share->crashed= TRUE;
+ crashed= true;
DBUG_RETURN(1);
}
- share->archive_write_open= TRUE;
+ archive_write_open= true;
DBUG_RETURN(0);
}
+void Archive_share::close_archive_writer()
+{
+ mysql_mutex_assert_owner(&mutex);
+ if (archive_write_open)
+ {
+ if (archive_write.version == 1)
+ (void) write_v1_metafile();
+ azclose(&archive_write);
+ archive_write_open= false;
+ dirty= false;
+ }
+}
+
+
/*
No locks are required because it is associated with just one handler instance
*/
@@ -523,7 +543,8 @@ int ha_archive::init_archive_reader()
/*
It is expensive to open and close the data files and since you can't have
a gzip file that can be both read and written we keep a writer open
- that is shared amoung all open tables.
+ that is shared amoung all open tables, but have one reader open for
+ each handler instance.
*/
if (!archive_reader_open)
{
@@ -554,6 +575,8 @@ int ha_archive::open(const char *name, int mode, uint open_options)
DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s",
(open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no"));
share= get_share(name, &rc);
+ if (!share)
+ DBUG_RETURN(rc);
/*
Allow open on crashed table in repair mode only.
@@ -574,7 +597,6 @@ int ha_archive::open(const char *name, int mode, uint open_options)
rc= 0;
break;
}
- free_share();
/* fall through */
default:
DBUG_RETURN(rc);
@@ -586,13 +608,17 @@ int ha_archive::open(const char *name, int mode, uint open_options)
ARCHIVE_ROW_HEADER_SIZE);
if (!record_buffer)
- {
- free_share();
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
- }
thr_lock_data_init(&share->lock, &lock, NULL);
+ DBUG_PRINT("ha_archive", ("archive table was crashed %s",
+ rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no"));
+ if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR)
+ {
+ DBUG_RETURN(0);
+ }
+
DBUG_RETURN(rc);
}
@@ -627,8 +653,6 @@ int ha_archive::close(void)
if (azclose(&archive))
rc= 1;
}
- /* then also close share */
- rc|= free_share();
DBUG_RETURN(rc);
}
@@ -709,7 +733,7 @@ int ha_archive::create(const char *name, TABLE *table_arg,
{
KEY *pos= table_arg->key_info+key;
KEY_PART_INFO *key_part= pos->key_part;
- KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
+ KEY_PART_INFO *key_part_end= key_part + pos->user_defined_key_parts;
for (; key_part != key_part_end; key_part++)
{
@@ -806,7 +830,7 @@ int ha_archive::real_write_row(uchar *buf, azio_stream *writer)
DBUG_ENTER("ha_archive::real_write_row");
/* We pack the row for writing */
- r_pack_length= pack_row(buf);
+ r_pack_length= pack_row(buf, writer);
written= azwrite(writer, record_buffer->buffer, r_pack_length);
if (written != r_pack_length)
@@ -847,7 +871,7 @@ uint32 ha_archive::max_row_length(const uchar *buf)
}
-unsigned int ha_archive::pack_row(uchar *record)
+unsigned int ha_archive::pack_row(uchar *record, azio_stream *writer)
{
uchar *ptr;
@@ -857,6 +881,9 @@ unsigned int ha_archive::pack_row(uchar *record)
if (fix_rec_buff(max_row_length(record)))
DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */
+ if (writer->version == 1)
+ DBUG_RETURN(pack_row_v1(record));
+
/* Copy null bits */
memcpy(record_buffer->buffer+ARCHIVE_ROW_HEADER_SIZE,
record, table->s->null_bytes);
@@ -900,7 +927,7 @@ int ha_archive::write_row(uchar *buf)
mysql_mutex_lock(&share->mutex);
- if (!share->archive_write_open && init_archive_writer())
+ if (!share->archive_write_open && share->init_archive_writer())
{
rc= errno;
goto error;
@@ -1368,11 +1395,26 @@ end:
DBUG_RETURN(rc);
}
+
+/**
+ @brief Check for upgrade
+
+ @param[in] check_opt check options
+
+ @return Completion status
+ @retval HA_ADMIN_OK No upgrade required
+ @retval HA_ADMIN_CORRUPT Cannot read meta-data
+ @retval HA_ADMIN_NEEDS_UPGRADE Upgrade required
+*/
+
int ha_archive::check_for_upgrade(HA_CHECK_OPT *check_opt)
{
- if (share->version < ARCHIVE_VERSION)
- return HA_ADMIN_NEEDS_ALTER;
- return 0;
+ DBUG_ENTER("ha_archive::check_for_upgrade");
+ if (init_archive_reader())
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
+ if (archive.version < ARCHIVE_VERSION)
+ DBUG_RETURN(HA_ADMIN_NEEDS_UPGRADE);
+ DBUG_RETURN(HA_ADMIN_OK);
}
@@ -1571,6 +1613,7 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd,
void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
{
+ char tmp_real_path[FN_REFLEN];
DBUG_ENTER("ha_archive::update_create_info");
ha_archive::info(HA_STATUS_AUTO);
@@ -1579,8 +1622,8 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info)
create_info->auto_increment_value= stats.auto_increment_value;
}
- if (!(my_readlink(share->real_path, share->data_file_name, MYF(0))))
- create_info->data_file_name= share->real_path;
+ if (!(my_readlink(tmp_real_path, share->data_file_name, MYF(0))))
+ create_info->data_file_name= sql_strdup(tmp_real_path);
DBUG_VOID_RETURN;
}
@@ -1804,6 +1847,20 @@ void ha_archive::destroy_record_buffer(archive_record_buffer *r)
DBUG_VOID_RETURN;
}
+bool ha_archive::check_if_incompatible_data(HA_CREATE_INFO *info,
+ uint table_changes)
+{
+ if (info->auto_increment_value != stats.auto_increment_value ||
+ (info->used_fields & HA_CREATE_USED_DATADIR) ||
+ info->data_file_name ||
+ (info->used_fields & HA_CREATE_USED_COMMENT) ||
+ table_changes != IS_EQUAL_YES)
+ return COMPATIBLE_DATA_NO;
+
+ return COMPATIBLE_DATA_YES;
+}
+
+
struct st_mysql_storage_engine archive_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
@@ -1816,7 +1873,7 @@ maria_declare_plugin(archive)
"Archive storage engine",
PLUGIN_LICENSE_GPL,
archive_db_init, /* Plugin Init */
- archive_db_done, /* Plugin Deinit */
+ NULL, /* Plugin Deinit */
0x0300 /* 3.0 */,
NULL, /* status variables */
NULL, /* system variables */
diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h
index 7e8d5cee47b..47ee89198e6 100644
--- a/storage/archive/ha_archive.h
+++ b/storage/archive/ha_archive.h
@@ -32,20 +32,38 @@ typedef struct st_archive_record_buffer {
} archive_record_buffer;
-typedef struct st_archive_share {
- char *table_name;
- char data_file_name[FN_REFLEN];
- uint table_name_length,use_count, version;
+class Archive_share : public Handler_share
+{
+public:
mysql_mutex_t mutex;
THR_LOCK lock;
azio_stream archive_write; /* Archive file we are working with */
+ ha_rows rows_recorded; /* Number of rows in tables */
+ char table_name[FN_REFLEN];
+ char data_file_name[FN_REFLEN];
+ bool in_optimize;
bool archive_write_open;
bool dirty; /* Flag for if a flush should occur */
bool crashed; /* Meta file is crashed */
- ha_rows rows_recorded; /* Number of rows in tables */
- ulonglong mean_rec_length;
- char real_path[FN_REFLEN];
-} ARCHIVE_SHARE;
+ Archive_share();
+ ~Archive_share()
+ {
+ DBUG_PRINT("ha_archive", ("~Archive_share: %p",
+ this));
+ if (archive_write_open)
+ {
+ mysql_mutex_lock(&mutex);
+ (void) close_archive_writer();
+ mysql_mutex_unlock(&mutex);
+ }
+ thr_lock_delete(&lock);
+ mysql_mutex_destroy(&mutex);
+ }
+ int init_archive_writer();
+ void close_archive_writer();
+ int write_v1_metafile();
+ int read_v1_metafile();
+};
/*
Version for file format.
@@ -58,7 +76,7 @@ typedef struct st_archive_share {
class ha_archive: public handler
{
THR_LOCK_DATA lock; /* MySQL lock */
- ARCHIVE_SHARE *share; /* Shared lock info */
+ Archive_share *share; /* Shared lock info */
azio_stream archive; /* Archive file we are working with */
my_off_t current_position; /* The position of the row we just read */
@@ -77,6 +95,7 @@ class ha_archive: public handler
void destroy_record_buffer(archive_record_buffer *r);
int frm_copy(azio_stream *src, azio_stream *dst);
int frm_compare(azio_stream *src);
+ unsigned int pack_row_v1(uchar *record);
public:
ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
@@ -121,9 +140,7 @@ public:
int get_row(azio_stream *file_to_read, uchar *buf);
int get_row_version2(azio_stream *file_to_read, uchar *buf);
int get_row_version3(azio_stream *file_to_read, uchar *buf);
- ARCHIVE_SHARE *get_share(const char *table_name, int *rc);
- int free_share();
- int init_archive_writer();
+ Archive_share *get_share(const char *table_name, int *rc);
int init_archive_reader();
// Always try auto_repair in case of HA_ERR_CRASHED_ON_USAGE
bool auto_repair(int error) const
@@ -150,6 +167,7 @@ public:
uint32 max_row_length(const uchar *buf);
bool fix_rec_buff(unsigned int length);
int unpack_row(azio_stream *file_to_read, uchar *record);
- unsigned int pack_row(uchar *record);
+ unsigned int pack_row(uchar *record, azio_stream *writer);
+ bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
};
diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc
index b24c0adb0fb..3fbe1834f89 100644
--- a/storage/cassandra/ha_cassandra.cc
+++ b/storage/cassandra/ha_cassandra.cc
@@ -509,7 +509,7 @@ int ha_cassandra::create(const char *name, TABLE *table_arg,
DBUG_ENTER("ha_cassandra::create");
if (table_arg->s->keys != 1 || table_arg->s->primary_key !=0 ||
- table_arg->key_info[0].key_parts != 1 ||
+ table_arg->key_info[0].user_defined_key_parts != 1 ||
table_arg->key_info[0].key_part[0].fieldnr != 1)
{
my_error(ER_WRONG_COLUMN_NAME, MYF(0),
@@ -2342,7 +2342,7 @@ int ha_cassandra::multi_range_read_explain_info(uint mrr_mode, char *str, size_t
if (!(mrr_mode & HA_MRR_USE_DEFAULT_IMPL))
{
uint mrr_str_len= strlen(mrr_str);
- uint copy_len= min(mrr_str_len, size);
+ uint copy_len= MY_MIN(mrr_str_len, size);
memcpy(str, mrr_str, size);
return copy_len;
}
diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc
index c97cfc57bdb..b49cb3fc5ab 100644
--- a/storage/csv/ha_tina.cc
+++ b/storage/csv/ha_tina.cc
@@ -1304,7 +1304,7 @@ bool ha_tina::get_write_pos(my_off_t *end_pos, tina_set *closest_hole)
if (closest_hole == chain_ptr) /* no more chains */
*end_pos= file_buff->end();
else
- *end_pos= min(file_buff->end(), closest_hole->begin);
+ *end_pos= MY_MIN(file_buff->end(), closest_hole->begin);
return (closest_hole != chain_ptr) && (*end_pos == closest_hole->begin);
}
@@ -1541,7 +1541,7 @@ int ha_tina::repair(THD* thd, HA_CHECK_OPT* check_opt)
/* write repaired file */
while (1)
{
- write_end= min(file_buff->end(), current_position);
+ write_end= MY_MIN(file_buff->end(), current_position);
if ((write_end - write_begin) &&
(mysql_file_write(repair_file, (uchar*)file_buff->ptr(),
(size_t) (write_end - write_begin), MYF_RW)))
diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc
index 588cf11f641..10dd3b1c33f 100644
--- a/storage/example/ha_example.cc
+++ b/storage/example/ha_example.cc
@@ -107,17 +107,6 @@ static handler *example_create_handler(handlerton *hton,
handlerton *example_hton;
-/* Variables for example share methods */
-
-/*
- Hash used to track the number of open tables; variable for example share
- methods
-*/
-static HASH example_open_tables;
-
-/* The mutex used to init the hash; variable for example share methods */
-mysql_mutex_t example_mutex;
-
static MYSQL_THDVAR_ULONG(varopt_default, PLUGIN_VAR_RQCMDARG,
"default value of the VAROPT table option", NULL, NULL, 5, 0, 100, 0);
@@ -208,20 +197,12 @@ ha_create_table_option example_field_option_list[]=
Function we use in the creation of our hash to get key.
*/
-static uchar* example_get_key(EXAMPLE_SHARE *share, size_t *length,
- my_bool not_used __attribute__((unused)))
-{
- *length=share->table_name_length;
- return (uchar*) share->table_name;
-}
-
#ifdef HAVE_PSI_INTERFACE
-static PSI_mutex_key ex_key_mutex_example, ex_key_mutex_EXAMPLE_SHARE_mutex;
+static PSI_mutex_key ex_key_mutex_Example_share_mutex;
static PSI_mutex_info all_example_mutexes[]=
{
- { &ex_key_mutex_example, "example", PSI_FLAG_GLOBAL},
- { &ex_key_mutex_EXAMPLE_SHARE_mutex, "EXAMPLE_SHARE::mutex", 0}
+ { &ex_key_mutex_Example_share_mutex, "Example_share::mutex", 0}
};
static void init_example_psi_keys()
@@ -229,11 +210,8 @@ static void init_example_psi_keys()
const char* category= "example";
int count;
- if (PSI_server == NULL)
- return;
-
count= array_elements(all_example_mutexes);
- PSI_server->register_mutex(category, all_example_mutexes, count);
+ mysql_mutex_register(category, all_example_mutexes, count);
}
#endif
@@ -259,6 +237,15 @@ static void init_example_psi_keys()
static const char *ha_example_exts[] = {
NullS
};
+
+Example_share::Example_share()
+{
+ thr_lock_init(&lock);
+ mysql_mutex_init(ex_key_mutex_Example_share_mutex,
+ &mutex, MY_MUTEX_INIT_FAST);
+}
+
+
static int example_init_func(void *p)
{
DBUG_ENTER("example_init_func");
@@ -268,10 +255,6 @@ static int example_init_func(void *p)
#endif
example_hton= (handlerton *)p;
- mysql_mutex_init(ex_key_mutex_example, &example_mutex, MY_MUTEX_INIT_FAST);
- (void) my_hash_init(&example_open_tables,system_charset_info,32,0,0,
- (my_hash_get_key) example_get_key,0,0);
-
example_hton->state= SHOW_OPTION_YES;
example_hton->create= example_create_handler;
example_hton->flags= HTON_CAN_RECREATE;
@@ -283,20 +266,6 @@ static int example_init_func(void *p)
}
-static int example_done_func(void *p)
-{
- int error= 0;
- DBUG_ENTER("example_done_func");
-
- if (example_open_tables.records)
- error= 1;
- my_hash_free(&example_open_tables);
- mysql_mutex_destroy(&example_mutex);
-
- DBUG_RETURN(error);
-}
-
-
/**
@brief
Example of simple lock controls. The "share" it creates is a
@@ -305,71 +274,24 @@ static int example_done_func(void *p)
they are needed to function.
*/
-static EXAMPLE_SHARE *get_share(const char *table_name, TABLE *table)
+Example_share *ha_example::get_share()
{
- EXAMPLE_SHARE *share;
- uint length;
- char *tmp_name;
+ Example_share *tmp_share;
- mysql_mutex_lock(&example_mutex);
- length=(uint) strlen(table_name);
+ DBUG_ENTER("ha_example::get_share()");
- if (!(share=(EXAMPLE_SHARE*) my_hash_search(&example_open_tables,
- (uchar*) table_name,
- length)))
+ lock_shared_ha_data();
+ if (!(tmp_share= static_cast<Example_share*>(get_ha_share_ptr())))
{
- if (!(share=(EXAMPLE_SHARE *)
- my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
- &share, sizeof(*share),
- &tmp_name, length+1,
- NullS)))
- {
- mysql_mutex_unlock(&example_mutex);
- return NULL;
- }
+ tmp_share= new Example_share;
+ if (!tmp_share)
+ goto err;
- share->use_count=0;
- share->table_name_length=length;
- share->table_name=tmp_name;
- strmov(share->table_name,table_name);
- if (my_hash_insert(&example_open_tables, (uchar*) share))
- goto error;
- thr_lock_init(&share->lock);
- mysql_mutex_init(ex_key_mutex_EXAMPLE_SHARE_mutex,
- &share->mutex, MY_MUTEX_INIT_FAST);
+ set_ha_share_ptr(static_cast<Handler_share*>(tmp_share));
}
- share->use_count++;
- mysql_mutex_unlock(&example_mutex);
-
- return share;
-
-error:
- mysql_mutex_destroy(&share->mutex);
- my_free(share);
-
- return NULL;
-}
-
-
-/**
- @brief
- Free lock controls. We call this whenever we close a table. If the table had
- the last reference to the share, then we free memory associated with it.
-*/
-
-static int free_share(EXAMPLE_SHARE *share)
-{
- mysql_mutex_lock(&example_mutex);
- if (!--share->use_count)
- {
- my_hash_delete(&example_open_tables, (uchar*) share);
- thr_lock_delete(&share->lock);
- mysql_mutex_destroy(&share->mutex);
- my_free(share);
- }
- mysql_mutex_unlock(&example_mutex);
-
- return 0;
+err:
+ unlock_shared_ha_data();
+ DBUG_RETURN(tmp_share);
}
static handler* example_create_handler(handlerton *hton,
@@ -404,7 +326,7 @@ int ha_example::open(const char *name, int mode, uint test_if_locked)
{
DBUG_ENTER("ha_example::open");
- if (!(share = get_share(name, table)))
+ if (!(share = get_share()))
DBUG_RETURN(1);
thr_lock_data_init(&share->lock,&lock,NULL);
@@ -424,8 +346,7 @@ int ha_example::open(const char *name, int mode, uint test_if_locked)
/**
@brief
- Closes a table. We call the free_share() function to free any resources
- that we have allocated in the "shared" structure.
+ Closes a table.
@details
Called from sql_base.cc, sql_select.cc, and table.cc. In sql_select.cc it is
@@ -441,7 +362,7 @@ int ha_example::open(const char *name, int mode, uint test_if_locked)
int ha_example::close(void)
{
DBUG_ENTER("ha_example::close");
- DBUG_RETURN(free_share(share));
+ DBUG_RETURN(0);
}
@@ -1022,10 +943,26 @@ bool ha_example::check_if_incompatible_data(HA_CREATE_INFO *info,
for this example engine, we'll assume that changing ullparam or
boolparam requires a table to be rebuilt, while changing strparam
or enumparam - does not.
+
+ For debugging purposes we'll announce this to the client
+ (don't do it in your engine!)
+
*/
- if (param_new->ullparam != param_old->ullparam ||
- param_new->boolparam != param_old->boolparam)
+ if (param_new->ullparam != param_old->ullparam)
+ {
+ push_warning_printf(ha_thd(), Sql_condition::WARN_LEVEL_NOTE,
+ ER_UNKNOWN_ERROR, "EXAMPLE DEBUG: ULL %llu -> %llu",
+ param_old->ullparam, param_new->ullparam);
+ DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
+
+ if (param_new->boolparam != param_old->boolparam)
+ {
+ push_warning_printf(ha_thd(), Sql_condition::WARN_LEVEL_NOTE,
+ ER_UNKNOWN_ERROR, "EXAMPLE DEBUG: YESNO %u -> %u",
+ param_old->boolparam, param_new->boolparam);
DBUG_RETURN(COMPATIBLE_DATA_NO);
+ }
#ifndef DBUG_OFF
for (uint i= 0; i < table->s->fields; i++)
@@ -1033,17 +970,14 @@ bool ha_example::check_if_incompatible_data(HA_CREATE_INFO *info,
ha_field_option_struct *f_old, *f_new;
f_old= table->s->field[i]->option_struct;
DBUG_ASSERT(f_old);
- DBUG_PRINT("info", ("old field: %u old complex: '%-.64s'", i,
- (f_old->complex_param_to_parse_it_in_engine ?
- f_old->complex_param_to_parse_it_in_engine :
- "<NULL>")));
if (info->fields_option_struct[i])
{
f_new= info->fields_option_struct[i];
- DBUG_PRINT("info", ("old field: %u new complex: '%-.64s'", i,
- (f_new->complex_param_to_parse_it_in_engine ?
- f_new->complex_param_to_parse_it_in_engine :
- "<NULL>")));
+ push_warning_printf(ha_thd(), Sql_condition::WARN_LEVEL_NOTE,
+ ER_UNKNOWN_ERROR, "EXAMPLE DEBUG: Field %`s COMPLEX '%s' -> '%s'",
+ table->s->field[i]->field_name,
+ f_old->complex_param_to_parse_it_in_engine,
+ f_new->complex_param_to_parse_it_in_engine);
}
else
DBUG_PRINT("info", ("old field %i did not changed", i));
@@ -1131,7 +1065,7 @@ mysql_declare_plugin(example)
"Example storage engine",
PLUGIN_LICENSE_GPL,
example_init_func, /* Plugin Init */
- example_done_func, /* Plugin Deinit */
+ NULL, /* Plugin Deinit */
0x0001 /* 0.1 */,
func_status, /* status variables */
example_system_variables, /* system variables */
@@ -1148,7 +1082,7 @@ maria_declare_plugin(example)
"Example storage engine",
PLUGIN_LICENSE_GPL,
example_init_func, /* Plugin Init */
- example_done_func, /* Plugin Deinit */
+ NULL, /* Plugin Deinit */
0x0001, /* version number (0.1) */
func_status, /* status variables */
example_system_variables, /* system variables */
diff --git a/storage/example/ha_example.h b/storage/example/ha_example.h
index d3d31893781..d25541f7422 100644
--- a/storage/example/ha_example.h
+++ b/storage/example/ha_example.h
@@ -42,15 +42,20 @@
#include "my_base.h" /* ha_rows */
/** @brief
- EXAMPLE_SHARE is a structure that will be shared among all open handlers.
+ Example_share is a class that will be shared among all open handlers.
This example implements the minimum of what you will probably need.
*/
-typedef struct st_example_share {
- char *table_name;
- uint table_name_length,use_count;
+class Example_share : public Handler_share {
+public:
mysql_mutex_t mutex;
THR_LOCK lock;
-} EXAMPLE_SHARE;
+ Example_share();
+ ~Example_share()
+ {
+ thr_lock_delete(&lock);
+ mysql_mutex_destroy(&mutex);
+ }
+};
/** @brief
Class definition for the storage engine
@@ -58,7 +63,8 @@ typedef struct st_example_share {
class ha_example: public handler
{
THR_LOCK_DATA lock; ///< MySQL lock
- EXAMPLE_SHARE *share; ///< Shared lock info
+ Example_share *share; ///< Shared lock info
+ Example_share *get_share(); ///< Get the share
public:
ha_example(handlerton *hton, TABLE_SHARE *table_arg);
diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc
index 035af2d348e..e5eb8fb7bd7 100644
--- a/storage/federated/ha_federated.cc
+++ b/storage/federated/ha_federated.cc
@@ -584,7 +584,7 @@ static int parse_url_error(FEDERATED_SHARE *share, TABLE *table, int error_num)
size_t buf_len;
DBUG_ENTER("ha_federated parse_url_error");
- buf_len= min(table->s->connect_string.length,
+ buf_len= MY_MIN(table->s->connect_string.length,
FEDERATED_QUERY_BUFFER_SIZE-1);
strmake(buf, table->s->connect_string.str, buf_len);
my_error(error_num, MYF(0), buf, 14);
@@ -1317,7 +1317,7 @@ bool ha_federated::create_where_from_key(String *to,
}
for (key_part= key_info->key_part,
- remainder= key_info->key_parts,
+ remainder= key_info->user_defined_key_parts,
length= ranges[i]->length,
ptr= ranges[i]->key; ;
remainder--,
@@ -1325,7 +1325,7 @@ bool ha_federated::create_where_from_key(String *to,
{
Field *field= key_part->field;
uint store_length= key_part->store_length;
- uint part_length= min(store_length, length);
+ uint part_length= MY_MIN(store_length, length);
needs_quotes= field->str_needs_quotes();
DBUG_DUMP("key, start of loop", ptr, length);
diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc
index 1c9db78da4f..f8ec026be66 100644
--- a/storage/federatedx/ha_federatedx.cc
+++ b/storage/federatedx/ha_federatedx.cc
@@ -524,8 +524,8 @@ static int parse_url_error(FEDERATEDX_SHARE *share, TABLE_SHARE *table_s,
int buf_len;
DBUG_ENTER("ha_federatedx parse_url_error");
- buf_len= min(table_s->connect_string.length,
- FEDERATEDX_QUERY_BUFFER_SIZE-1);
+ buf_len= MY_MIN(table_s->connect_string.length,
+ FEDERATEDX_QUERY_BUFFER_SIZE-1);
strmake(buf, table_s->connect_string.str, buf_len);
my_error(error_num, MYF(0), buf, 14);
DBUG_RETURN(error_num);
@@ -1237,16 +1237,16 @@ bool ha_federatedx::create_where_from_key(String *to,
tmp.append(STRING_WITH_LEN(" ("));
}
- for (key_part= key_info->key_part,
- remainder= key_info->key_parts,
- length= ranges[i]->length,
- ptr= ranges[i]->key; ;
+ for (key_part= key_info->key_part,
+ remainder= key_info->user_defined_key_parts,
+ length= ranges[i]->length,
+ ptr= ranges[i]->key; ;
remainder--,
- key_part++)
+ key_part++)
{
Field *field= key_part->field;
uint store_length= key_part->store_length;
- uint part_length= min(store_length, length);
+ uint part_length= MY_MIN(store_length, length);
needs_quotes= field->str_needs_quotes();
DBUG_DUMP("key, start of loop", ptr, length);
diff --git a/storage/heap/ha_heap.cc b/storage/heap/ha_heap.cc
index 916abaa60ea..b9ff9d28159 100644
--- a/storage/heap/ha_heap.cc
+++ b/storage/heap/ha_heap.cc
@@ -211,14 +211,14 @@ void ha_heap::update_key_stats()
if (key->algorithm != HA_KEY_ALG_BTREE)
{
if (key->flags & HA_NOSAME)
- key->rec_per_key[key->key_parts-1]= 1;
+ key->rec_per_key[key->user_defined_key_parts-1]= 1;
else
{
ha_rows hash_buckets= file->s->keydef[i].hash_buckets;
uint no_records= hash_buckets ? (uint) (file->s->records/hash_buckets) : 2;
if (no_records < 2)
no_records= 2;
- key->rec_per_key[key->key_parts-1]= no_records;
+ key->rec_per_key[key->user_defined_key_parts-1]= no_records;
}
}
}
@@ -601,7 +601,7 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key,
/* Assert that info() did run. We need current statistics here. */
DBUG_ASSERT(key_stat_version == file->s->key_stat_version);
- return key->rec_per_key[key->key_parts-1];
+ return key->rec_per_key[key->user_defined_key_parts-1];
}
@@ -620,7 +620,7 @@ heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table,
bzero(hp_create_info, sizeof(*hp_create_info));
for (key= parts= 0; key < keys; key++)
- parts+= table_arg->key_info[key].key_parts;
+ parts+= table_arg->key_info[key].user_defined_key_parts;
if (!(keydef= (HP_KEYDEF*) my_malloc(keys * sizeof(HP_KEYDEF) +
parts * sizeof(HA_KEYSEG),
@@ -631,9 +631,9 @@ heap_prepare_hp_create_info(TABLE *table_arg, bool internal_table,
{
KEY *pos= table_arg->key_info+key;
KEY_PART_INFO *key_part= pos->key_part;
- KEY_PART_INFO *key_part_end= key_part + pos->key_parts;
+ KEY_PART_INFO *key_part_end= key_part + pos->user_defined_key_parts;
- keydef[key].keysegs= (uint) pos->key_parts;
+ keydef[key].keysegs= (uint) pos->user_defined_key_parts;
keydef[key].flag= (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL));
keydef[key].seg= seg;
diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c
index a8bc8e63810..e286ff69e61 100644
--- a/storage/heap/hp_create.c
+++ b/storage/heap/hp_create.c
@@ -254,18 +254,18 @@ static void init_block(HP_BLOCK *block, uint reclength, ulong min_records,
If not min_records and max_records are given, optimize for 1000 rows
*/
if (!min_records)
- min_records= min(1000, max_records);
+ min_records= MY_MIN(1000, max_records);
if (!max_records)
- max_records= max(min_records, 1000);
+ max_records= MY_MAX(min_records, 1000);
/*
We don't want too few records_in_block as otherwise the overhead of
of the HP_PTRS block will be too notable
*/
- records_in_block= max(1000, min_records);
- records_in_block= min(records_in_block, max_records);
+ records_in_block= MY_MAX(1000, min_records);
+ records_in_block= MY_MIN(records_in_block, max_records);
/* If big max_records is given, allocate bigger blocks */
- records_in_block= max(records_in_block, max_records / 10);
+ records_in_block= MY_MAX(records_in_block, max_records / 10);
/* We don't want too few blocks per row either */
if (records_in_block < 10)
records_in_block= 10;
diff --git a/storage/heap/hp_test2.c b/storage/heap/hp_test2.c
index 058a2904697..13b49fbb7ec 100644
--- a/storage/heap/hp_test2.c
+++ b/storage/heap/hp_test2.c
@@ -132,7 +132,7 @@ int main(int argc, char *argv[])
for (i=0 ; i < recant ; i++)
{
- n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*5,MAX_RECORDS));
+ n1=rnd(1000); n2=rnd(100); n3=rnd(MY_MIN(recant*5,MAX_RECORDS));
make_record(record,n1,n2,n3,"Pos",write_count);
if (heap_write(file,record))
@@ -208,7 +208,7 @@ int main(int argc, char *argv[])
printf("- Update\n");
for (i=0 ; i < write_count/10 ; i++)
{
- n1=rnd(1000); n2=rnd(100); n3=rnd(min(recant*2,MAX_RECORDS));
+ n1=rnd(1000); n2=rnd(100); n3=rnd(MY_MIN(recant*2,MAX_RECORDS));
make_record(record2, n1, n2, n3, "XXX", update);
if (rnd(2) == 1)
{
diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt
index 318b45e43ae..ee8758a08d2 100644
--- a/storage/innobase/CMakeLists.txt
+++ b/storage/innobase/CMakeLists.txt
@@ -54,6 +54,8 @@ SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DUNIV_DEBUG -DUNIV_SYNC_DEB
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wconversion")
#ENDIF()
+CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU)
+
IF(NOT MSVC)
# either define HAVE_IB_GCC_ATOMIC_BUILTINS or not
IF(NOT CMAKE_CROSSCOMPILING)
@@ -95,12 +97,36 @@ IF(NOT CMAKE_CROSSCOMPILING)
}"
HAVE_IB_GCC_ATOMIC_BUILTINS
)
+ CHECK_C_SOURCE_RUNS(
+ "#include<stdint.h>
+ int main()
+ {
+ int64_t x,y,res;
+
+ x = 10;
+ y = 123;
+ res = __sync_sub_and_fetch(&y, x);
+ if (res != y || y != 113) {
+ return(1);
+ }
+ res = __sync_add_and_fetch(&y, x);
+ if (res != y || y != 123) {
+ return(1);
+ }
+ return(0);
+ }"
+ HAVE_IB_GCC_ATOMIC_BUILTINS_64
+ )
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS=1)
ENDIF()
+IF(HAVE_IB_GCC_ATOMIC_BUILTINS_64)
+ ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_64=1)
+ENDIF()
+
# either define HAVE_IB_ATOMIC_PTHREAD_T_GCC or not
IF(NOT CMAKE_CROSSCOMPILING)
CHECK_C_SOURCE_RUNS(
@@ -129,7 +155,8 @@ ENDIF()
ENDIF(NOT MSVC)
-SET(LINKER_SCRIPT)
+CHECK_FUNCTION_EXISTS(asprintf HAVE_ASPRINTF)
+CHECK_FUNCTION_EXISTS(vasprintf HAVE_VASPRINTF)
# Solaris atomics
IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
@@ -150,10 +177,6 @@ IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
ADD_DEFINITIONS(-DHAVE_IB_SOLARIS_ATOMICS=1)
ENDIF()
- IF(CMAKE_COMPILER_IS_GNUCC AND NOT HAVE_VISIBILITY_HIDDEN)
- SET(LINKER_SCRIPT "-Wl,-M${CMAKE_CURRENT_SOURCE_DIR}/plugin_exports")
- ENDIF()
-
IF(NOT CMAKE_CROSSCOMPILING)
# either define HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS or not
CHECK_C_SOURCE_COMPILES(
@@ -233,13 +256,16 @@ ENDIF()
IF(MSVC)
# Avoid "unreferenced label" warning in generated file
GET_FILENAME_COMPONENT(_SRC_DIR ${CMAKE_CURRENT_LIST_FILE} PATH)
- SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/pars0grm.cc
+ SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/pars0grm.c
PROPERTIES COMPILE_FLAGS "/wd4102")
- SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/lexyy.cc
+ SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/lexyy.c
PROPERTIES COMPILE_FLAGS "/wd4003")
ENDIF()
-
+
+
SET(INNOBASE_SOURCES
+ api/api0api.cc
+ api/api0misc.cc
btr/btr0btr.cc
btr/btr0cur.cc
btr/btr0pcur.cc
@@ -260,6 +286,7 @@ SET(INNOBASE_SOURCES
dict/dict0load.cc
dict/dict0mem.cc
dict/dict0stats.cc
+ dict/dict0stats_bg.cc
dyn/dyn0dyn.cc
eval/eval0eval.cc
eval/eval0proc.cc
@@ -311,9 +338,11 @@ SET(INNOBASE_SOURCES
rem/rem0rec.cc
row/row0ext.cc
row/row0ftsort.cc
+ row/row0import.cc
row/row0ins.cc
row/row0merge.cc
row/row0mysql.cc
+ row/row0log.cc
row/row0purge.cc
row/row0row.cc
row/row0sel.cc
@@ -321,6 +350,7 @@ SET(INNOBASE_SOURCES
row/row0umod.cc
row/row0undo.cc
row/row0upd.cc
+ row/row0quiesce.cc
row/row0vers.cc
srv/srv0conc.cc
srv/srv0mon.cc
@@ -355,7 +385,18 @@ IF(WITH_INNODB)
SET(WITH_INNOBASE_STORAGE_ENGINE TRUE)
ENDIF()
+
+# On solaris, reduce symbol visibility, so loader does not mix
+# the same symbols from builtin innodb and from shared one.
+# Only required for old GCC (3.4.3) that does not support hidden visibility
+IF(CMAKE_SYSTEM_NAME MATCHES "SunOS" AND CMAKE_COMPILER_IS_GNUCC
+ AND NOT HAVE_VISIBILITY_HIDDEN)
+ SET(LINKER_SCRIPT "-Wl,-M${CMAKE_CURRENT_SOURCE_DIR}/plugin_exports")
+ELSE()
+ SET(LINKER_SCRIPT)
+ENDIF()
+
MYSQL_ADD_PLUGIN(innobase ${INNOBASE_SOURCES} STORAGE_ENGINE
DEFAULT
MODULE_OUTPUT_NAME ha_innodb
- LINK_LIBRARIES ${ZLIB_LIBRARY})
+ LINK_LIBRARIES ${ZLIB_LIBRARY} ${LINKER_SCRIPT})
diff --git a/storage/innobase/api/api0api.cc b/storage/innobase/api/api0api.cc
new file mode 100644
index 00000000000..5f9762a1846
--- /dev/null
+++ b/storage/innobase/api/api0api.cc
@@ -0,0 +1,3859 @@
+/*****************************************************************************
+
+Copyright (c) 2008, 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file api/api0api.cc
+InnoDB Native API
+
+2008-08-01 Created Sunny Bains
+3/20/2011 Jimmy Yang extracted from Embedded InnoDB
+*******************************************************/
+
+#include "univ.i"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "api0api.h"
+#include "api0misc.h"
+#include "srv0start.h"
+#include "dict0dict.h"
+#include "btr0pcur.h"
+#include "row0ins.h"
+#include "row0upd.h"
+#include "row0vers.h"
+#include "trx0roll.h"
+#include "dict0crea.h"
+#include "row0merge.h"
+#include "pars0pars.h"
+#include "lock0types.h"
+#include "row0sel.h"
+#include "lock0lock.h"
+#include "rem0cmp.h"
+#include "ut0dbg.h"
+#include "dict0priv.h"
+#include "ut0ut.h"
+#include "ha_prototypes.h"
+#include "trx0roll.h"
+
+/** configure variable for binlog option with InnoDB APIs */
+my_bool ib_binlog_enabled = FALSE;
+
+/** configure variable for MDL option with InnoDB APIs */
+my_bool ib_mdl_enabled = FALSE;
+
+/** configure variable for disable rowlock with InnoDB APIs */
+my_bool ib_disable_row_lock = FALSE;
+
+/** configure variable for Transaction isolation levels */
+ulong ib_trx_level_setting = IB_TRX_READ_UNCOMMITTED;
+
+/** configure variable for background commit interval in seconds */
+ulong ib_bk_commit_interval = 0;
+
+/** InnoDB tuple types. */
+enum ib_tuple_type_t{
+ TPL_TYPE_ROW, /*!< Data row tuple */
+ TPL_TYPE_KEY /*!< Index key tuple */
+};
+
+/** Query types supported. */
+enum ib_qry_type_t{
+ QRY_NON, /*!< None/Sentinel */
+ QRY_INS, /*!< Insert operation */
+ QRY_UPD, /*!< Update operation */
+ QRY_SEL /*!< Select operation */
+};
+
+/** Query graph types. */
+struct ib_qry_grph_t {
+ que_fork_t* ins; /*!< Innobase SQL query graph used
+ in inserts */
+ que_fork_t* upd; /*!< Innobase SQL query graph used
+ in updates or deletes */
+ que_fork_t* sel; /*!< dummy query graph used in
+ selects */
+};
+
+/** Query node types. */
+struct ib_qry_node_t {
+ ins_node_t* ins; /*!< Innobase SQL insert node
+ used to perform inserts to the table */
+ upd_node_t* upd; /*!< Innobase SQL update node
+ used to perform updates and deletes */
+ sel_node_t* sel; /*!< Innobase SQL select node
+ used to perform selects on the table */
+};
+
+/** Query processing fields. */
+struct ib_qry_proc_t {
+
+ ib_qry_node_t node; /*!< Query node*/
+
+ ib_qry_grph_t grph; /*!< Query graph */
+};
+
+/** Cursor instance for traversing tables/indexes. This will eventually
+become row_prebuilt_t. */
+struct ib_cursor_t {
+ mem_heap_t* heap; /*!< Instance heap */
+
+ mem_heap_t* query_heap; /*!< Heap to use for query graphs */
+
+ ib_qry_proc_t q_proc; /*!< Query processing info */
+
+ ib_match_mode_t match_mode; /*!< ib_cursor_moveto match mode */
+
+ row_prebuilt_t* prebuilt; /*!< For reading rows */
+
+ bool valid_trx; /*!< Valid transaction attached */
+};
+
+/** InnoDB table columns used during table and index schema creation. */
+struct ib_col_t {
+ const char* name; /*!< Name of column */
+
+ ib_col_type_t ib_col_type; /*!< Main type of the column */
+
+ ulint len; /*!< Length of the column */
+
+ ib_col_attr_t ib_col_attr; /*!< Column attributes */
+
+};
+
+/** InnoDB index columns used during index and index schema creation. */
+struct ib_key_col_t {
+ const char* name; /*!< Name of column */
+
+ ulint prefix_len; /*!< Column index prefix len or 0 */
+};
+
+struct ib_table_def_t;
+
+/** InnoDB index schema used during index creation */
+struct ib_index_def_t {
+ mem_heap_t* heap; /*!< Heap used to build this and all
+ its columns in the list */
+
+ const char* name; /*!< Index name */
+
+ dict_table_t* table; /*!< Parent InnoDB table */
+
+ ib_table_def_t* schema; /*!< Parent table schema that owns
+ this instance */
+
+ ibool clustered; /*!< True if clustered index */
+
+ ibool unique; /*!< True if unique index */
+
+ ib_vector_t* cols; /*!< Vector of columns */
+
+ trx_t* usr_trx; /*!< User transacton covering the
+ DDL operations */
+};
+
+/** InnoDB table schema used during table creation */
+struct ib_table_def_t {
+ mem_heap_t* heap; /*!< Heap used to build this and all
+ its columns in the list */
+ const char* name; /*!< Table name */
+
+ ib_tbl_fmt_t ib_tbl_fmt; /*!< Row format */
+
+ ulint page_size; /*!< Page size */
+
+ ib_vector_t* cols; /*!< Vector of columns */
+
+ ib_vector_t* indexes; /*!< Vector of indexes */
+
+ dict_table_t* table; /* Table read from or NULL */
+};
+
+/** InnoDB tuple used for key operations. */
+struct ib_tuple_t {
+ mem_heap_t* heap; /*!< Heap used to build
+ this and for copying
+ the column values. */
+
+ ib_tuple_type_t type; /*!< Tuple discriminitor. */
+
+ const dict_index_t* index; /*!< Index for tuple can be either
+ secondary or cluster index. */
+
+ dtuple_t* ptr; /*!< The internal tuple
+ instance */
+};
+
+/** The following counter is used to convey information to InnoDB
+about server activity: in selects it is not sensible to call
+srv_active_wake_master_thread after each fetch or search, we only do
+it every INNOBASE_WAKE_INTERVAL'th step. */
+
+#define INNOBASE_WAKE_INTERVAL 32
+
+/*****************************************************************//**
+Check whether the Innodb persistent cursor is positioned.
+@return IB_TRUE if positioned */
+UNIV_INLINE
+ib_bool_t
+ib_btr_cursor_is_positioned(
+/*========================*/
+ btr_pcur_t* pcur) /*!< in: InnoDB persistent cursor */
+{
+ return(pcur->old_stored == BTR_PCUR_OLD_STORED
+ && (pcur->pos_state == BTR_PCUR_IS_POSITIONED
+ || pcur->pos_state == BTR_PCUR_WAS_POSITIONED));
+}
+
+
+/********************************************************************//**
+Open a table using the table id, if found then increment table ref count.
+@return table instance if found */
+static
+dict_table_t*
+ib_open_table_by_id(
+/*================*/
+ ib_id_u64_t tid, /*!< in: table id to lookup */
+ ib_bool_t locked) /*!< in: TRUE if own dict mutex */
+{
+ dict_table_t* table;
+ table_id_t table_id;
+
+ table_id = tid;
+
+ if (!locked) {
+ dict_mutex_enter_for_mysql();
+ }
+
+ table = dict_table_open_on_id(table_id, FALSE, FALSE);
+
+ if (table != NULL && table->ibd_file_missing) {
+ table = NULL;
+ }
+
+ if (!locked) {
+ dict_mutex_exit_for_mysql();
+ }
+
+ return(table);
+}
+
+/********************************************************************//**
+Open a table using the table name, if found then increment table ref count.
+@return table instance if found */
+UNIV_INTERN
+void*
+ib_open_table_by_name(
+/*==================*/
+ const char* name) /*!< in: table name to lookup */
+{
+ dict_table_t* table;
+
+ table = dict_table_open_on_name(name, FALSE, FALSE,
+ DICT_ERR_IGNORE_NONE);
+
+ if (table != NULL && table->ibd_file_missing) {
+ table = NULL;
+ }
+
+ return(table);
+}
+
+/********************************************************************//**
+Find table using table name.
+@return table instance if found */
+static
+dict_table_t*
+ib_lookup_table_by_name(
+/*====================*/
+ const char* name) /*!< in: table name to lookup */
+{
+ dict_table_t* table;
+
+ table = dict_table_get_low(name);
+
+ if (table != NULL && table->ibd_file_missing) {
+ table = NULL;
+ }
+
+ return(table);
+}
+
+/********************************************************************//**
+Increments innobase_active_counter and every INNOBASE_WAKE_INTERVALth
+time calls srv_active_wake_master_thread. This function should be used
+when a single database operation may introduce a small need for
+server utility activity, like checkpointing. */
+UNIV_INLINE
+void
+ib_wake_master_thread(void)
+/*=======================*/
+{
+ static ulint ib_signal_counter = 0;
+
+ ++ib_signal_counter;
+
+ if ((ib_signal_counter % INNOBASE_WAKE_INTERVAL) == 0) {
+ srv_active_wake_master_thread();
+ }
+}
+
+/*********************************************************************//**
+Calculate the max row size of the columns in a cluster index.
+@return max row length */
+UNIV_INLINE
+ulint
+ib_get_max_row_len(
+/*===============*/
+ dict_index_t* cluster) /*!< in: cluster index */
+{
+ ulint i;
+ ulint max_len = 0;
+ ulint n_fields = cluster->n_fields;
+
+ /* Add the size of the ordering columns in the
+ clustered index. */
+ for (i = 0; i < n_fields; ++i) {
+ const dict_col_t* col;
+
+ col = dict_index_get_nth_col(cluster, i);
+
+ /* Use the maximum output size of
+ mach_write_compressed(), although the encoded
+ length should always fit in 2 bytes. */
+ max_len += dict_col_get_max_size(col);
+ }
+
+ return(max_len);
+}
+
+/*****************************************************************//**
+Read the columns from a rec into a tuple. */
+static
+void
+ib_read_tuple(
+/*==========*/
+ const rec_t* rec, /*!< in: Record to read */
+ ib_bool_t page_format, /*!< in: IB_TRUE if compressed format */
+ ib_tuple_t* tuple) /*!< in: tuple to read into */
+{
+ ulint i;
+ void* ptr;
+ rec_t* copy;
+ ulint rec_meta_data;
+ ulint n_index_fields;
+ ulint offsets_[REC_OFFS_NORMAL_SIZE];
+ ulint* offsets = offsets_;
+ dtuple_t* dtuple = tuple->ptr;
+ const dict_index_t* index = tuple->index;
+
+ rec_offs_init(offsets_);
+
+ offsets = rec_get_offsets(
+ rec, index, offsets, ULINT_UNDEFINED, &tuple->heap);
+
+ rec_meta_data = rec_get_info_bits(rec, page_format);
+ dtuple_set_info_bits(dtuple, rec_meta_data);
+
+ /* Make a copy of the rec. */
+ ptr = mem_heap_alloc(tuple->heap, rec_offs_size(offsets));
+ copy = rec_copy(ptr, rec, offsets);
+
+ n_index_fields = ut_min(
+ rec_offs_n_fields(offsets), dtuple_get_n_fields(dtuple));
+
+ for (i = 0; i < n_index_fields; ++i) {
+ ulint len;
+ const byte* data;
+ dfield_t* dfield;
+
+ if (tuple->type == TPL_TYPE_ROW) {
+ const dict_col_t* col;
+ ulint col_no;
+ const dict_field_t* index_field;
+
+ index_field = dict_index_get_nth_field(index, i);
+ col = dict_field_get_col(index_field);
+ col_no = dict_col_get_no(col);
+
+ dfield = dtuple_get_nth_field(dtuple, col_no);
+ } else {
+ dfield = dtuple_get_nth_field(dtuple, i);
+ }
+
+ data = rec_get_nth_field(copy, offsets, i, &len);
+
+ /* Fetch and copy any externally stored column. */
+ if (rec_offs_nth_extern(offsets, i)) {
+
+ ulint zip_size;
+
+ zip_size = dict_table_zip_size(index->table);
+
+ data = btr_rec_copy_externally_stored_field(
+ copy, offsets, zip_size, i, &len,
+ tuple->heap);
+
+ ut_a(len != UNIV_SQL_NULL);
+ }
+
+ dfield_set_data(dfield, data, len);
+ }
+}
+
+/*****************************************************************//**
+Create an InnoDB key tuple.
+@return tuple instance created, or NULL */
+static
+ib_tpl_t
+ib_key_tuple_new_low(
+/*=================*/
+ const dict_index_t* index, /*!< in: index for which tuple
+ required */
+ ulint n_cols, /*!< in: no. of user defined cols */
+ mem_heap_t* heap) /*!< in: memory heap */
+{
+ ib_tuple_t* tuple;
+ ulint i;
+ ulint n_cmp_cols;
+
+ tuple = static_cast<ib_tuple_t*>(
+ mem_heap_alloc(heap, sizeof(*tuple)));
+
+ if (tuple == NULL) {
+ mem_heap_free(heap);
+ return(NULL);
+ }
+
+ tuple->heap = heap;
+ tuple->index = index;
+ tuple->type = TPL_TYPE_KEY;
+
+ /* Is it a generated clustered index ? */
+ if (n_cols == 0) {
+ ++n_cols;
+ }
+
+ tuple->ptr = dtuple_create(heap, n_cols);
+
+ /* Copy types and set to SQL_NULL. */
+ dict_index_copy_types(tuple->ptr, index, n_cols);
+
+ for (i = 0; i < n_cols; i++) {
+
+ dfield_t* dfield;
+
+ dfield = dtuple_get_nth_field(tuple->ptr, i);
+ dfield_set_null(dfield);
+ }
+
+ n_cmp_cols = dict_index_get_n_ordering_defined_by_user(index);
+
+ dtuple_set_n_fields_cmp(tuple->ptr, n_cmp_cols);
+
+ return((ib_tpl_t) tuple);
+}
+
+/*****************************************************************//**
+Create an InnoDB key tuple.
+@return tuple instance created, or NULL */
+static
+ib_tpl_t
+ib_key_tuple_new(
+/*=============*/
+ const dict_index_t* index, /*!< in: index of tuple */
+ ulint n_cols) /*!< in: no. of user defined cols */
+{
+ mem_heap_t* heap;
+
+ heap = mem_heap_create(64);
+
+ if (heap == NULL) {
+ return(NULL);
+ }
+
+ return(ib_key_tuple_new_low(index, n_cols, heap));
+}
+
+/*****************************************************************//**
+Create an InnoDB row tuple.
+@return tuple instance, or NULL */
+static
+ib_tpl_t
+ib_row_tuple_new_low(
+/*=================*/
+ const dict_index_t* index, /*!< in: index of tuple */
+ ulint n_cols, /*!< in: no. of cols in tuple */
+ mem_heap_t* heap) /*!< in: memory heap */
+{
+ ib_tuple_t* tuple;
+
+ tuple = static_cast<ib_tuple_t*>(mem_heap_alloc(heap, sizeof(*tuple)));
+
+ if (tuple == NULL) {
+ mem_heap_free(heap);
+ return(NULL);
+ }
+
+ tuple->heap = heap;
+ tuple->index = index;
+ tuple->type = TPL_TYPE_ROW;
+
+ tuple->ptr = dtuple_create(heap, n_cols);
+
+ /* Copy types and set to SQL_NULL. */
+ dict_table_copy_types(tuple->ptr, index->table);
+
+ return((ib_tpl_t) tuple);
+}
+
+/*****************************************************************//**
+Create an InnoDB row tuple.
+@return tuple instance, or NULL */
+static
+ib_tpl_t
+ib_row_tuple_new(
+/*=============*/
+ const dict_index_t* index, /*!< in: index of tuple */
+ ulint n_cols) /*!< in: no. of cols in tuple */
+{
+ mem_heap_t* heap;
+
+ heap = mem_heap_create(64);
+
+ if (heap == NULL) {
+ return(NULL);
+ }
+
+ return(ib_row_tuple_new_low(index, n_cols, heap));
+}
+
+/*****************************************************************//**
+Begin a transaction.
+@return innobase txn handle */
+UNIV_INTERN
+ib_err_t
+ib_trx_start(
+/*=========*/
+ ib_trx_t ib_trx, /*!< in: transaction to restart */
+ ib_trx_level_t ib_trx_level, /*!< in: trx isolation level */
+ void* thd) /*!< in: THD */
+{
+ ib_err_t err = DB_SUCCESS;
+ trx_t* trx = (trx_t*) ib_trx;
+
+ ut_a(ib_trx_level <= IB_TRX_SERIALIZABLE);
+
+ trx_start_if_not_started(trx);
+
+ trx->isolation_level = ib_trx_level;
+
+ /* FIXME: This is a place holder, we should add an arg that comes
+ from the client. */
+ trx->mysql_thd = static_cast<THD*>(thd);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Begin a transaction. This will allocate a new transaction handle.
+put the transaction in the active state.
+@return innobase txn handle */
+UNIV_INTERN
+ib_trx_t
+ib_trx_begin(
+/*=========*/
+ ib_trx_level_t ib_trx_level) /*!< in: trx isolation level */
+{
+ trx_t* trx;
+ ib_bool_t started;
+
+ trx = trx_allocate_for_mysql();
+ started = ib_trx_start((ib_trx_t) trx, ib_trx_level, NULL);
+ ut_a(started);
+
+ return((ib_trx_t) trx);
+}
+
+/*****************************************************************//**
+Get the transaction's state.
+@return transaction state */
+UNIV_INTERN
+ib_trx_state_t
+ib_trx_state(
+/*=========*/
+ ib_trx_t ib_trx) /*!< in: trx handle */
+{
+ trx_t* trx = (trx_t*) ib_trx;
+
+ return((ib_trx_state_t) trx->state);
+}
+
+/*****************************************************************//**
+Get a trx start time.
+@return trx start_time */
+UNIV_INTERN
+ib_u64_t
+ib_trx_get_start_time(
+/*==================*/
+ ib_trx_t ib_trx) /*!< in: transaction */
+{
+ trx_t* trx = (trx_t*) ib_trx;
+ return(static_cast<ib_u64_t>(trx->start_time));
+}
+/*****************************************************************//**
+Release the resources of the transaction.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_trx_release(
+/*===========*/
+ ib_trx_t ib_trx) /*!< in: trx handle */
+{
+ trx_t* trx = (trx_t*) ib_trx;
+
+ ut_ad(trx != NULL);
+ trx_free_for_mysql(trx);
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Commit a transaction. This function will also release the schema
+latches too.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_trx_commit(
+/*==========*/
+ ib_trx_t ib_trx) /*!< in: trx handle */
+{
+ ib_err_t err = DB_SUCCESS;
+ trx_t* trx = (trx_t*) ib_trx;
+
+ if (trx->state == TRX_STATE_NOT_STARTED) {
+ err = ib_trx_release(ib_trx);
+ return(err);
+ }
+
+ trx_commit(trx);
+
+ err = ib_trx_release(ib_trx);
+ ut_a(err == DB_SUCCESS);
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Rollback a transaction. This function will also release the schema
+latches too.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_trx_rollback(
+/*============*/
+ ib_trx_t ib_trx) /*!< in: trx handle */
+{
+ ib_err_t err;
+ trx_t* trx = (trx_t*) ib_trx;
+
+ err = static_cast<ib_err_t>(trx_rollback_for_mysql(trx));
+
+ /* It should always succeed */
+ ut_a(err == DB_SUCCESS);
+
+ err = ib_trx_release(ib_trx);
+ ut_a(err == DB_SUCCESS);
+
+ ib_wake_master_thread();
+
+ return(err);
+}
+
+/*****************************************************************//**
+Find an index definition from the index vector using index name.
+@return index def. if found else NULL */
+UNIV_INLINE
+const ib_index_def_t*
+ib_table_find_index(
+/*================*/
+ ib_vector_t* indexes, /*!< in: vector of indexes */
+ const char* name) /*!< in: index name */
+{
+ ulint i;
+
+ for (i = 0; i < ib_vector_size(indexes); ++i) {
+ const ib_index_def_t* index_def;
+
+ index_def = (ib_index_def_t*) ib_vector_get(indexes, i);
+
+ if (innobase_strcasecmp(name, index_def->name) == 0) {
+ return(index_def);
+ }
+ }
+
+ return(NULL);
+}
+
+/*****************************************************************//**
+Get the InnoDB internal precise type from the schema column definition.
+@return precise type in api format */
+UNIV_INLINE
+ulint
+ib_col_get_prtype(
+/*==============*/
+ const ib_col_t* ib_col) /*!< in: column definition */
+{
+ ulint prtype = 0;
+
+ if (ib_col->ib_col_attr & IB_COL_UNSIGNED) {
+ prtype |= DATA_UNSIGNED;
+
+ ut_a(ib_col->ib_col_type == IB_INT);
+ }
+
+ if (ib_col->ib_col_attr & IB_COL_NOT_NULL) {
+ prtype |= DATA_NOT_NULL;
+ }
+
+ return(prtype);
+}
+
+/*****************************************************************//**
+Get the InnoDB internal main type from the schema column definition.
+@return column main type */
+UNIV_INLINE
+ulint
+ib_col_get_mtype(
+/*==============*/
+ const ib_col_t* ib_col) /*!< in: column definition */
+{
+ /* Note: The api0api.h types should map directly to
+ the internal numeric codes. */
+ return(ib_col->ib_col_type);
+}
+
+/*****************************************************************//**
+Find a column in the the column vector with the same name.
+@return col. def. if found else NULL */
+UNIV_INLINE
+const ib_col_t*
+ib_table_find_col(
+/*==============*/
+ const ib_vector_t* cols, /*!< in: column list head */
+ const char* name) /*!< in: column name to find */
+{
+ ulint i;
+
+ for (i = 0; i < ib_vector_size(cols); ++i) {
+ const ib_col_t* ib_col;
+
+ ib_col = static_cast<const ib_col_t*>(
+ ib_vector_get((ib_vector_t*) cols, i));
+
+ if (innobase_strcasecmp(ib_col->name, name) == 0) {
+ return(ib_col);
+ }
+ }
+
+ return(NULL);
+}
+
+/*****************************************************************//**
+Find a column in the the column list with the same name.
+@return col. def. if found else NULL */
+UNIV_INLINE
+const ib_key_col_t*
+ib_index_find_col(
+/*==============*/
+ ib_vector_t* cols, /*!< in: column list head */
+ const char* name) /*!< in: column name to find */
+{
+ ulint i;
+
+ for (i = 0; i < ib_vector_size(cols); ++i) {
+ const ib_key_col_t* ib_col;
+
+ ib_col = static_cast<ib_key_col_t*>(ib_vector_get(cols, i));
+
+ if (innobase_strcasecmp(ib_col->name, name) == 0) {
+ return(ib_col);
+ }
+ }
+
+ return(NULL);
+}
+
+#ifdef __WIN__
+/*****************************************************************//**
+Convert a string to lower case. */
+static
+void
+ib_to_lower_case(
+/*=============*/
+ char* ptr) /*!< string to convert to lower case */
+{
+ while (*ptr) {
+ *ptr = tolower(*ptr);
+ ++ptr;
+ }
+}
+#endif /* __WIN__ */
+
+/*****************************************************************//**
+Normalizes a table name string. A normalized name consists of the
+database name catenated to '/' and table name. An example:
+test/mytable. On Windows normalization puts both the database name and the
+table name always to lower case. This function can be called for system
+tables and they don't have a database component. For tables that don't have
+a database component, we don't normalize them to lower case on Windows.
+The assumption is that they are system tables that reside in the system
+table space. */
+static
+void
+ib_normalize_table_name(
+/*====================*/
+ char* norm_name, /*!< out: normalized name as a
+ null-terminated string */
+ const char* name) /*!< in: table name string */
+{
+ const char* ptr = name;
+
+ /* Scan name from the end */
+
+ ptr += ut_strlen(name) - 1;
+
+ /* Find the start of the table name. */
+ while (ptr >= name && *ptr != '\\' && *ptr != '/' && ptr > name) {
+ --ptr;
+ }
+
+
+ /* For system tables there is no '/' or dbname. */
+ ut_a(ptr >= name);
+
+ if (ptr > name) {
+ const char* db_name;
+ const char* table_name;
+
+ table_name = ptr + 1;
+
+ --ptr;
+
+ while (ptr >= name && *ptr != '\\' && *ptr != '/') {
+ ptr--;
+ }
+
+ db_name = ptr + 1;
+
+ memcpy(norm_name, db_name,
+ ut_strlen(name) + 1 - (db_name - name));
+
+ norm_name[table_name - db_name - 1] = '/';
+#ifdef __WIN__
+ ib_to_lower_case(norm_name);
+#endif
+ } else {
+ ut_strcpy(norm_name, name);
+ }
+}
+
+/*****************************************************************//**
+Check whether the table name conforms to our requirements. Currently
+we only do a simple check for the presence of a '/'.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_table_name_check(
+/*================*/
+ const char* name) /*!< in: table name to check */
+{
+ const char* slash = NULL;
+ ulint len = ut_strlen(name);
+
+ if (len < 2
+ || *name == '/'
+ || name[len - 1] == '/'
+ || (name[0] == '.' && name[1] == '/')
+ || (name[0] == '.' && name[1] == '.' && name[2] == '/')) {
+
+ return(DB_DATA_MISMATCH);
+ }
+
+ for ( ; *name; ++name) {
+#ifdef __WIN__
+ /* Check for reserved characters in DOS filenames. */
+ switch (*name) {
+ case ':':
+ case '|':
+ case '"':
+ case '*':
+ case '<':
+ case '>':
+ return(DB_DATA_MISMATCH);
+ }
+#endif /* __WIN__ */
+ if (*name == '/') {
+ if (slash) {
+ return(DB_DATA_MISMATCH);
+ }
+ slash = name;
+ }
+ }
+
+ return(slash ? DB_SUCCESS : DB_DATA_MISMATCH);
+}
+
+
+
+/*****************************************************************//**
+Get an index definition that is tagged as a clustered index.
+@return cluster index schema */
+UNIV_INLINE
+ib_index_def_t*
+ib_find_clustered_index(
+/*====================*/
+ ib_vector_t* indexes) /*!< in: index defs. to search */
+{
+ ulint i;
+ ulint n_indexes;
+
+ n_indexes = ib_vector_size(indexes);
+
+ for (i = 0; i < n_indexes; ++i) {
+ ib_index_def_t* ib_index_def;
+
+ ib_index_def = static_cast<ib_index_def_t*>(
+ ib_vector_get(indexes, i));
+
+ if (ib_index_def->clustered) {
+ return(ib_index_def);
+ }
+ }
+
+ return(NULL);
+}
+
+/*****************************************************************//**
+Get a table id. The caller must have acquired the dictionary mutex.
+@return DB_SUCCESS if found */
+static
+ib_err_t
+ib_table_get_id_low(
+/*================*/
+ const char* table_name, /*!< in: table to find */
+ ib_id_u64_t* table_id) /*!< out: table id if found */
+{
+ dict_table_t* table;
+ ib_err_t err = DB_TABLE_NOT_FOUND;
+
+ *table_id = 0;
+
+ table = ib_lookup_table_by_name(table_name);
+
+ if (table != NULL) {
+ *table_id = (table->id);
+
+ err = DB_SUCCESS;
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Create an internal cursor instance.
+@return DB_SUCCESS or err code */
+static
+ib_err_t
+ib_create_cursor(
+/*=============*/
+ ib_crsr_t* ib_crsr, /*!< out: InnoDB cursor */
+ dict_table_t* table, /*!< in: table instance */
+ dict_index_t* index, /*!< in: index to use */
+ trx_t* trx) /*!< in: transaction */
+{
+ mem_heap_t* heap;
+ ib_cursor_t* cursor;
+ ib_err_t err = DB_SUCCESS;
+
+ heap = mem_heap_create(sizeof(*cursor) * 2);
+
+ if (heap != NULL) {
+ row_prebuilt_t* prebuilt;
+
+ cursor = static_cast<ib_cursor_t*>(
+ mem_heap_zalloc(heap, sizeof(*cursor)));
+
+ cursor->heap = heap;
+
+ cursor->query_heap = mem_heap_create(64);
+
+ if (cursor->query_heap == NULL) {
+ mem_heap_free(heap);
+
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ cursor->prebuilt = row_create_prebuilt(table, 0);
+
+ prebuilt = cursor->prebuilt;
+
+ prebuilt->trx = trx;
+
+ cursor->valid_trx = TRUE;
+
+ prebuilt->table = table;
+ prebuilt->select_lock_type = LOCK_NONE;
+ prebuilt->innodb_api = TRUE;
+
+ prebuilt->index = index;
+
+ ut_a(prebuilt->index != NULL);
+
+ if (prebuilt->trx != NULL) {
+ ++prebuilt->trx->n_mysql_tables_in_use;
+
+ prebuilt->index_usable =
+ row_merge_is_index_usable(
+ prebuilt->trx, prebuilt->index);
+
+ /* Assign a read view if the transaction does
+ not have it yet */
+
+ trx_assign_read_view(prebuilt->trx);
+ }
+
+ *ib_crsr = (ib_crsr_t) cursor;
+ } else {
+ err = DB_OUT_OF_MEMORY;
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Create an internal cursor instance, and set prebuilt->index to index
+with supplied index_id.
+@return DB_SUCCESS or err code */
+static
+ib_err_t
+ib_create_cursor_with_index_id(
+/*===========================*/
+ ib_crsr_t* ib_crsr, /*!< out: InnoDB cursor */
+ dict_table_t* table, /*!< in: table instance */
+ ib_id_u64_t index_id, /*!< in: index id or 0 */
+ trx_t* trx) /*!< in: transaction */
+{
+ dict_index_t* index;
+
+ if (index_id != 0) {
+ mutex_enter(&dict_sys->mutex);
+ index = dict_index_find_on_id_low(index_id);
+ mutex_exit(&dict_sys->mutex);
+ } else {
+ index = dict_table_get_first_index(table);
+ }
+
+ return(ib_create_cursor(ib_crsr, table, index, trx));
+}
+
+/*****************************************************************//**
+Open an InnoDB table and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_open_table_using_id(
+/*==========================*/
+ ib_id_u64_t table_id, /*!< in: table id of table to open */
+ ib_trx_t ib_trx, /*!< in: Current transaction handle
+ can be NULL */
+ ib_crsr_t* ib_crsr) /*!< out,own: InnoDB cursor */
+{
+ ib_err_t err;
+ dict_table_t* table;
+
+ if (ib_trx == NULL || !ib_schema_lock_is_exclusive(ib_trx)) {
+ table = ib_open_table_by_id(table_id, FALSE);
+ } else {
+ table = ib_open_table_by_id(table_id, TRUE);
+ }
+
+ if (table == NULL) {
+
+ return(DB_TABLE_NOT_FOUND);
+ }
+
+ err = ib_create_cursor_with_index_id(ib_crsr, table, 0,
+ (trx_t*) ib_trx);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Open an InnoDB index and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_open_index_using_id(
+/*==========================*/
+ ib_id_u64_t index_id, /*!< in: index id of index to open */
+ ib_trx_t ib_trx, /*!< in: Current transaction handle
+ can be NULL */
+ ib_crsr_t* ib_crsr) /*!< out: InnoDB cursor */
+{
+ ib_err_t err;
+ dict_table_t* table;
+ ulint table_id = (ulint)( index_id >> 32);
+
+ if (ib_trx == NULL || !ib_schema_lock_is_exclusive(ib_trx)) {
+ table = ib_open_table_by_id(table_id, FALSE);
+ } else {
+ table = ib_open_table_by_id(table_id, TRUE);
+ }
+
+ if (table == NULL) {
+
+ return(DB_TABLE_NOT_FOUND);
+ }
+
+ /* We only return the lower 32 bits of the dulint. */
+ err = ib_create_cursor_with_index_id(
+ ib_crsr, table, index_id, (trx_t*) ib_trx);
+
+ if (ib_crsr != NULL) {
+ const ib_cursor_t* cursor;
+
+ cursor = *(ib_cursor_t**) ib_crsr;
+
+ if (cursor->prebuilt->index == NULL) {
+ ib_err_t crsr_err;
+
+ crsr_err = ib_cursor_close(*ib_crsr);
+ ut_a(crsr_err == DB_SUCCESS);
+
+ *ib_crsr = NULL;
+ }
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Open an InnoDB secondary index cursor and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_open_index_using_name(
+/*============================*/
+ ib_crsr_t ib_open_crsr, /*!< in: open/active cursor */
+ const char* index_name, /*!< in: secondary index name */
+ ib_crsr_t* ib_crsr, /*!< out,own: InnoDB index cursor */
+ int* idx_type, /*!< out: index is cluster index */
+ ib_id_u64_t* idx_id) /*!< out: index id */
+{
+ dict_table_t* table;
+ dict_index_t* index;
+ index_id_t index_id = 0;
+ ib_err_t err = DB_TABLE_NOT_FOUND;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_open_crsr;
+
+ *idx_type = 0;
+ *idx_id = 0;
+ *ib_crsr = NULL;
+
+ /* We want to increment the ref count, so we do a redundant search. */
+ table = dict_table_open_on_id(cursor->prebuilt->table->id,
+ FALSE, FALSE);
+ ut_a(table != NULL);
+
+ /* The first index is always the cluster index. */
+ index = dict_table_get_first_index(table);
+
+ /* Traverse the user defined indexes. */
+ while (index != NULL) {
+ if (innobase_strcasecmp(index->name, index_name) == 0) {
+ index_id = index->id;
+ *idx_type = index->type;
+ *idx_id = index_id;
+ break;
+ }
+ index = UT_LIST_GET_NEXT(indexes, index);
+ }
+
+ if (!index_id) {
+ dict_table_close(table, FALSE, FALSE);
+ return(DB_ERROR);
+ }
+
+ if (index_id > 0) {
+ ut_ad(index->id == index_id);
+ err = ib_create_cursor(
+ ib_crsr, table, index, cursor->prebuilt->trx);
+ }
+
+ if (*ib_crsr != NULL) {
+ const ib_cursor_t* cursor;
+
+ cursor = *(ib_cursor_t**) ib_crsr;
+
+ if (cursor->prebuilt->index == NULL) {
+ err = ib_cursor_close(*ib_crsr);
+ ut_a(err == DB_SUCCESS);
+ *ib_crsr = NULL;
+ }
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Open an InnoDB table and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_open_table(
+/*=================*/
+ const char* name, /*!< in: table name */
+ ib_trx_t ib_trx, /*!< in: Current transaction handle
+ can be NULL */
+ ib_crsr_t* ib_crsr) /*!< out,own: InnoDB cursor */
+{
+ ib_err_t err;
+ dict_table_t* table;
+ char* normalized_name;
+
+ normalized_name = static_cast<char*>(mem_alloc(ut_strlen(name) + 1));
+ ib_normalize_table_name(normalized_name, name);
+
+ if (ib_trx != NULL) {
+ if (!ib_schema_lock_is_exclusive(ib_trx)) {
+ table = (dict_table_t*)ib_open_table_by_name(
+ normalized_name);
+ } else {
+ /* NOTE: We do not acquire MySQL metadata lock */
+ table = ib_lookup_table_by_name(normalized_name);
+ }
+ } else {
+ table = (dict_table_t*)ib_open_table_by_name(normalized_name);
+ }
+
+ mem_free(normalized_name);
+ normalized_name = NULL;
+
+ /* It can happen that another thread has created the table but
+ not the cluster index or it's a broken table definition. Refuse to
+ open if that's the case. */
+ if (table != NULL && dict_table_get_first_index(table) == NULL) {
+ table = NULL;
+ }
+
+ if (table != NULL) {
+ err = ib_create_cursor_with_index_id(ib_crsr, table, 0,
+ (trx_t*) ib_trx);
+ } else {
+ err = DB_TABLE_NOT_FOUND;
+ }
+
+ return(err);
+}
+
+/********************************************************************//**
+Free a context struct for a table handle. */
+static
+void
+ib_qry_proc_free(
+/*=============*/
+ ib_qry_proc_t* q_proc) /*!< in, own: qproc struct */
+{
+ que_graph_free_recursive(q_proc->grph.ins);
+ que_graph_free_recursive(q_proc->grph.upd);
+ que_graph_free_recursive(q_proc->grph.sel);
+
+ memset(q_proc, 0x0, sizeof(*q_proc));
+}
+
+/*****************************************************************//**
+set a cursor trx to NULL */
+UNIV_INTERN
+void
+ib_cursor_clear_trx(
+/*================*/
+ ib_crsr_t ib_crsr) /*!< in/out: InnoDB cursor */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+
+ cursor->prebuilt->trx = NULL;
+}
+
+/*****************************************************************//**
+Reset the cursor.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_reset(
+/*============*/
+ ib_crsr_t ib_crsr) /*!< in/out: InnoDB cursor */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ if (cursor->valid_trx && prebuilt->trx != NULL
+ && prebuilt->trx->n_mysql_tables_in_use > 0) {
+
+ --prebuilt->trx->n_mysql_tables_in_use;
+ }
+
+ /* The fields in this data structure are allocated from
+ the query heap and so need to be reset too. */
+ ib_qry_proc_free(&cursor->q_proc);
+
+ mem_heap_empty(cursor->query_heap);
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+update the cursor with new transactions and also reset the cursor
+@return DB_SUCCESS or err code */
+ib_err_t
+ib_cursor_new_trx(
+/*==============*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_trx_t ib_trx) /*!< in: transaction */
+{
+ ib_err_t err = DB_SUCCESS;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ trx_t* trx = (trx_t*) ib_trx;
+
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ row_update_prebuilt_trx(prebuilt, trx);
+
+ cursor->valid_trx = TRUE;
+
+ trx_assign_read_view(prebuilt->trx);
+
+ ib_qry_proc_free(&cursor->q_proc);
+
+ mem_heap_empty(cursor->query_heap);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Commit the transaction in a cursor
+@return DB_SUCCESS or err code */
+ib_err_t
+ib_cursor_commit_trx(
+/*=================*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_trx_t ib_trx) /*!< in: transaction */
+{
+ ib_err_t err = DB_SUCCESS;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ ut_ad(prebuilt->trx == (trx_t*) ib_trx);
+ err = ib_trx_commit(ib_trx);
+ prebuilt->trx = NULL;
+ cursor->valid_trx = FALSE;
+ return(err);
+}
+
+/*****************************************************************//**
+Close an InnoDB table and free the cursor.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_close(
+/*============*/
+ ib_crsr_t ib_crsr) /*!< in,own: InnoDB cursor */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt;
+ trx_t* trx;
+
+ if (!cursor) {
+ return(DB_SUCCESS);
+ }
+
+ prebuilt = cursor->prebuilt;
+ trx = prebuilt->trx;
+
+ ib_qry_proc_free(&cursor->q_proc);
+
+ /* The transaction could have been detached from the cursor. */
+ if (cursor->valid_trx && trx != NULL
+ && trx->n_mysql_tables_in_use > 0) {
+ --trx->n_mysql_tables_in_use;
+ }
+
+ row_prebuilt_free(prebuilt, FALSE);
+ cursor->prebuilt = NULL;
+
+ mem_heap_free(cursor->query_heap);
+ mem_heap_free(cursor->heap);
+ cursor = NULL;
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Close the table, decrement n_ref_count count.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_close_table(
+/*==================*/
+ ib_crsr_t ib_crsr) /*!< in,own: InnoDB cursor */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ if (prebuilt && prebuilt->table) {
+ dict_table_close(prebuilt->table, FALSE, FALSE);
+ }
+
+ return(DB_SUCCESS);
+}
+/**********************************************************************//**
+Run the insert query and do error handling.
+@return DB_SUCCESS or error code */
+UNIV_INLINE
+ib_err_t
+ib_insert_row_with_lock_retry(
+/*==========================*/
+ que_thr_t* thr, /*!< in: insert query graph */
+ ins_node_t* node, /*!< in: insert node for the query */
+ trx_savept_t* savept) /*!< in: savepoint to rollback to
+ in case of an error */
+{
+ trx_t* trx;
+ ib_err_t err;
+ ib_bool_t lock_wait;
+
+ trx = thr_get_trx(thr);
+
+ do {
+ thr->run_node = node;
+ thr->prev_node = node;
+
+ row_ins_step(thr);
+
+ err = trx->error_state;
+
+ if (err != DB_SUCCESS) {
+ que_thr_stop_for_mysql(thr);
+
+ thr->lock_state = QUE_THR_LOCK_ROW;
+ lock_wait = ib_handle_errors(&err, trx, thr, savept);
+ thr->lock_state = QUE_THR_LOCK_NOLOCK;
+ } else {
+ lock_wait = FALSE;
+ }
+ } while (lock_wait);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Write a row.
+@return DB_SUCCESS or err code */
+static
+ib_err_t
+ib_execute_insert_query_graph(
+/*==========================*/
+ dict_table_t* table, /*!< in: table where to insert */
+ que_fork_t* ins_graph, /*!< in: query graph */
+ ins_node_t* node) /*!< in: insert node */
+{
+ trx_t* trx;
+ que_thr_t* thr;
+ trx_savept_t savept;
+ ib_err_t err = DB_SUCCESS;
+
+ trx = ins_graph->trx;
+
+ savept = trx_savept_take(trx);
+
+ thr = que_fork_get_first_thr(ins_graph);
+
+ que_thr_move_to_run_state_for_mysql(thr, trx);
+
+ err = ib_insert_row_with_lock_retry(thr, node, &savept);
+
+ if (err == DB_SUCCESS) {
+ que_thr_stop_for_mysql_no_error(thr, trx);
+
+ dict_table_n_rows_inc(table);
+
+ srv_stats.n_rows_inserted.inc();
+ }
+
+ trx->op_info = "";
+
+ return(err);
+}
+
+/*****************************************************************//**
+Create an insert query graph node. */
+static
+void
+ib_insert_query_graph_create(
+/*==========================*/
+ ib_cursor_t* cursor) /*!< in: Cursor instance */
+{
+ ib_qry_proc_t* q_proc = &cursor->q_proc;
+ ib_qry_node_t* node = &q_proc->node;
+ trx_t* trx = cursor->prebuilt->trx;
+
+ ut_a(trx->state != TRX_STATE_NOT_STARTED);
+
+ if (node->ins == NULL) {
+ dtuple_t* row;
+ ib_qry_grph_t* grph = &q_proc->grph;
+ mem_heap_t* heap = cursor->query_heap;
+ dict_table_t* table = cursor->prebuilt->table;
+
+ node->ins = ins_node_create(INS_DIRECT, table, heap);
+
+ node->ins->select = NULL;
+ node->ins->values_list = NULL;
+
+ row = dtuple_create(heap, dict_table_get_n_cols(table));
+ dict_table_copy_types(row, table);
+
+ ins_node_set_new_row(node->ins, row);
+
+ grph->ins = static_cast<que_fork_t*>(
+ que_node_get_parent(
+ pars_complete_graph_for_exec(node->ins, trx,
+ heap)));
+
+ grph->ins->state = QUE_FORK_ACTIVE;
+ }
+}
+
+/*****************************************************************//**
+Insert a row to a table.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_insert_row(
+/*=================*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor instance */
+ const ib_tpl_t ib_tpl) /*!< in: tuple to insert */
+{
+ ib_ulint_t i;
+ ib_qry_node_t* node;
+ ib_qry_proc_t* q_proc;
+ ulint n_fields;
+ dtuple_t* dst_dtuple;
+ ib_err_t err = DB_SUCCESS;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ const ib_tuple_t* src_tuple = (const ib_tuple_t*) ib_tpl;
+
+ ib_insert_query_graph_create(cursor);
+
+ ut_ad(src_tuple->type == TPL_TYPE_ROW);
+
+ q_proc = &cursor->q_proc;
+ node = &q_proc->node;
+
+ node->ins->state = INS_NODE_ALLOC_ROW_ID;
+ dst_dtuple = node->ins->row;
+
+ n_fields = dtuple_get_n_fields(src_tuple->ptr);
+ ut_ad(n_fields == dtuple_get_n_fields(dst_dtuple));
+
+ /* Do a shallow copy of the data fields and check for NULL
+ constraints on columns. */
+ for (i = 0; i < n_fields; i++) {
+ ulint mtype;
+ dfield_t* src_field;
+ dfield_t* dst_field;
+
+ src_field = dtuple_get_nth_field(src_tuple->ptr, i);
+
+ mtype = dtype_get_mtype(dfield_get_type(src_field));
+
+ /* Don't touch the system columns. */
+ if (mtype != DATA_SYS) {
+ ulint prtype;
+
+ prtype = dtype_get_prtype(dfield_get_type(src_field));
+
+ if ((prtype & DATA_NOT_NULL)
+ && dfield_is_null(src_field)) {
+
+ err = DB_DATA_MISMATCH;
+ break;
+ }
+
+ dst_field = dtuple_get_nth_field(dst_dtuple, i);
+ ut_ad(mtype
+ == dtype_get_mtype(dfield_get_type(dst_field)));
+
+ /* Do a shallow copy. */
+ dfield_set_data(
+ dst_field, src_field->data, src_field->len);
+
+ if (dst_field->len != IB_SQL_NULL) {
+ UNIV_MEM_ASSERT_RW(dst_field->data,
+ dst_field->len);
+ }
+ }
+ }
+
+ if (err == DB_SUCCESS) {
+ err = ib_execute_insert_query_graph(
+ src_tuple->index->table, q_proc->grph.ins, node->ins);
+ }
+
+ return(err);
+}
+
+/*********************************************************************//**
+Gets pointer to a prebuilt update vector used in updates.
+@return update vector */
+UNIV_INLINE
+upd_t*
+ib_update_vector_create(
+/*====================*/
+ ib_cursor_t* cursor) /*!< in: current cursor */
+{
+ trx_t* trx = cursor->prebuilt->trx;
+ mem_heap_t* heap = cursor->query_heap;
+ dict_table_t* table = cursor->prebuilt->table;
+ ib_qry_proc_t* q_proc = &cursor->q_proc;
+ ib_qry_grph_t* grph = &q_proc->grph;
+ ib_qry_node_t* node = &q_proc->node;
+
+ ut_a(trx->state != TRX_STATE_NOT_STARTED);
+
+ if (node->upd == NULL) {
+ node->upd = static_cast<upd_node_t*>(
+ row_create_update_node_for_mysql(table, heap));
+ }
+
+ grph->upd = static_cast<que_fork_t*>(
+ que_node_get_parent(
+ pars_complete_graph_for_exec(node->upd, trx, heap)));
+
+ grph->upd->state = QUE_FORK_ACTIVE;
+
+ return(node->upd->update);
+}
+
+/**********************************************************************//**
+Note that a column has changed. */
+static
+void
+ib_update_col(
+/*==========*/
+
+ ib_cursor_t* cursor, /*!< in: current cursor */
+ upd_field_t* upd_field, /*!< in/out: update field */
+ ulint col_no, /*!< in: column number */
+ dfield_t* dfield) /*!< in: updated dfield */
+{
+ ulint data_len;
+ dict_table_t* table = cursor->prebuilt->table;
+ dict_index_t* index = dict_table_get_first_index(table);
+
+ data_len = dfield_get_len(dfield);
+
+ if (data_len == UNIV_SQL_NULL) {
+ dfield_set_null(&upd_field->new_val);
+ } else {
+ dfield_copy_data(&upd_field->new_val, dfield);
+ }
+
+ upd_field->exp = NULL;
+
+ upd_field->orig_len = 0;
+
+ upd_field->field_no = dict_col_get_clust_pos(
+ &table->cols[col_no], index);
+}
+
+/**********************************************************************//**
+Checks which fields have changed in a row and stores the new data
+to an update vector.
+@return DB_SUCCESS or err code */
+static
+ib_err_t
+ib_calc_diff(
+/*=========*/
+ ib_cursor_t* cursor, /*!< in: current cursor */
+ upd_t* upd, /*!< in/out: update vector */
+ const ib_tuple_t*old_tuple, /*!< in: Old tuple in table */
+ const ib_tuple_t*new_tuple) /*!< in: New tuple to update */
+{
+ ulint i;
+ ulint n_changed = 0;
+ ib_err_t err = DB_SUCCESS;
+ ulint n_fields = dtuple_get_n_fields(new_tuple->ptr);
+
+ ut_a(old_tuple->type == TPL_TYPE_ROW);
+ ut_a(new_tuple->type == TPL_TYPE_ROW);
+ ut_a(old_tuple->index->table == new_tuple->index->table);
+
+ for (i = 0; i < n_fields; ++i) {
+ ulint mtype;
+ ulint prtype;
+ upd_field_t* upd_field;
+ dfield_t* new_dfield;
+ dfield_t* old_dfield;
+
+ new_dfield = dtuple_get_nth_field(new_tuple->ptr, i);
+ old_dfield = dtuple_get_nth_field(old_tuple->ptr, i);
+
+ mtype = dtype_get_mtype(dfield_get_type(old_dfield));
+ prtype = dtype_get_prtype(dfield_get_type(old_dfield));
+
+ /* Skip the system columns */
+ if (mtype == DATA_SYS) {
+ continue;
+
+ } else if ((prtype & DATA_NOT_NULL)
+ && dfield_is_null(new_dfield)) {
+
+ err = DB_DATA_MISMATCH;
+ break;
+ }
+
+ if (dfield_get_len(new_dfield) != dfield_get_len(old_dfield)
+ || (!dfield_is_null(old_dfield)
+ && memcmp(dfield_get_data(new_dfield),
+ dfield_get_data(old_dfield),
+ dfield_get_len(old_dfield)) != 0)) {
+
+ upd_field = &upd->fields[n_changed];
+
+ ib_update_col(cursor, upd_field, i, new_dfield);
+
+ ++n_changed;
+ }
+ }
+
+ if (err == DB_SUCCESS) {
+ upd->info_bits = 0;
+ upd->n_fields = n_changed;
+ }
+
+ return(err);
+}
+
+/**********************************************************************//**
+Run the update query and do error handling.
+@return DB_SUCCESS or error code */
+UNIV_INLINE
+ib_err_t
+ib_update_row_with_lock_retry(
+/*==========================*/
+ que_thr_t* thr, /*!< in: Update query graph */
+ upd_node_t* node, /*!< in: Update node for the query */
+ trx_savept_t* savept) /*!< in: savepoint to rollback to
+ in case of an error */
+
+{
+ trx_t* trx;
+ ib_err_t err;
+ ib_bool_t lock_wait;
+
+ trx = thr_get_trx(thr);
+
+ do {
+ thr->run_node = node;
+ thr->prev_node = node;
+
+ row_upd_step(thr);
+
+ err = trx->error_state;
+
+ if (err != DB_SUCCESS) {
+ que_thr_stop_for_mysql(thr);
+
+ if (err != DB_RECORD_NOT_FOUND) {
+ thr->lock_state = QUE_THR_LOCK_ROW;
+
+ lock_wait = ib_handle_errors(
+ &err, trx, thr, savept);
+
+ thr->lock_state = QUE_THR_LOCK_NOLOCK;
+ } else {
+ lock_wait = FALSE;
+ }
+ } else {
+ lock_wait = FALSE;
+ }
+ } while (lock_wait);
+
+ return(err);
+}
+
+/*********************************************************************//**
+Does an update or delete of a row.
+@return DB_SUCCESS or err code */
+UNIV_INLINE
+ib_err_t
+ib_execute_update_query_graph(
+/*==========================*/
+ ib_cursor_t* cursor, /*!< in: Cursor instance */
+ btr_pcur_t* pcur) /*!< in: Btree persistent cursor */
+{
+ ib_err_t err;
+ que_thr_t* thr;
+ upd_node_t* node;
+ trx_savept_t savept;
+ trx_t* trx = cursor->prebuilt->trx;
+ dict_table_t* table = cursor->prebuilt->table;
+ ib_qry_proc_t* q_proc = &cursor->q_proc;
+
+ /* The transaction must be running. */
+ ut_a(trx->state != TRX_STATE_NOT_STARTED);
+
+ node = q_proc->node.upd;
+
+ ut_a(dict_index_is_clust(pcur->btr_cur.index));
+ btr_pcur_copy_stored_position(node->pcur, pcur);
+
+ ut_a(node->pcur->rel_pos == BTR_PCUR_ON);
+
+ savept = trx_savept_take(trx);
+
+ thr = que_fork_get_first_thr(q_proc->grph.upd);
+
+ node->state = UPD_NODE_UPDATE_CLUSTERED;
+
+ que_thr_move_to_run_state_for_mysql(thr, trx);
+
+ err = ib_update_row_with_lock_retry(thr, node, &savept);
+
+ if (err == DB_SUCCESS) {
+
+ que_thr_stop_for_mysql_no_error(thr, trx);
+
+ if (node->is_delete) {
+
+ dict_table_n_rows_dec(table);
+
+ srv_stats.n_rows_deleted.inc();
+ } else {
+ srv_stats.n_rows_updated.inc();
+ }
+
+ } else if (err == DB_RECORD_NOT_FOUND) {
+ trx->error_state = DB_SUCCESS;
+ }
+
+ trx->op_info = "";
+
+ return(err);
+}
+
+/*****************************************************************//**
+Update a row in a table.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_update_row(
+/*=================*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ const ib_tpl_t ib_old_tpl, /*!< in: Old tuple in table */
+ const ib_tpl_t ib_new_tpl) /*!< in: New tuple to update */
+{
+ upd_t* upd;
+ ib_err_t err;
+ btr_pcur_t* pcur;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+ const ib_tuple_t*old_tuple = (const ib_tuple_t*) ib_old_tpl;
+ const ib_tuple_t*new_tuple = (const ib_tuple_t*) ib_new_tpl;
+
+ if (dict_index_is_clust(prebuilt->index)) {
+ pcur = &cursor->prebuilt->pcur;
+ } else if (prebuilt->need_to_access_clustered) {
+ pcur = &cursor->prebuilt->clust_pcur;
+ } else {
+ return(DB_ERROR);
+ }
+
+ ut_a(old_tuple->type == TPL_TYPE_ROW);
+ ut_a(new_tuple->type == TPL_TYPE_ROW);
+
+ upd = ib_update_vector_create(cursor);
+
+ err = ib_calc_diff(cursor, upd, old_tuple, new_tuple);
+
+ if (err == DB_SUCCESS) {
+ /* Note that this is not a delete. */
+ cursor->q_proc.node.upd->is_delete = FALSE;
+
+ err = ib_execute_update_query_graph(cursor, pcur);
+ }
+
+ return(err);
+}
+
+/**********************************************************************//**
+Build the update query graph to delete a row from an index.
+@return DB_SUCCESS or err code */
+static
+ib_err_t
+ib_delete_row(
+/*==========*/
+ ib_cursor_t* cursor, /*!< in: current cursor */
+ btr_pcur_t* pcur, /*!< in: Btree persistent cursor */
+ const rec_t* rec) /*!< in: record to delete */
+{
+ ulint i;
+ upd_t* upd;
+ ib_err_t err;
+ ib_tuple_t* tuple;
+ ib_tpl_t ib_tpl;
+ ulint n_cols;
+ upd_field_t* upd_field;
+ ib_bool_t page_format;
+ dict_table_t* table = cursor->prebuilt->table;
+ dict_index_t* index = dict_table_get_first_index(table);
+
+ n_cols = dict_index_get_n_ordering_defined_by_user(index);
+ ib_tpl = ib_key_tuple_new(index, n_cols);
+
+ if (!ib_tpl) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ tuple = (ib_tuple_t*) ib_tpl;
+
+ upd = ib_update_vector_create(cursor);
+
+ page_format = dict_table_is_comp(index->table);
+ ib_read_tuple(rec, page_format, tuple);
+
+ upd->n_fields = ib_tuple_get_n_cols(ib_tpl);
+
+ for (i = 0; i < upd->n_fields; ++i) {
+ dfield_t* dfield;
+
+ upd_field = &upd->fields[i];
+ dfield = dtuple_get_nth_field(tuple->ptr, i);
+
+ dfield_copy_data(&upd_field->new_val, dfield);
+
+ upd_field->exp = NULL;
+
+ upd_field->orig_len = 0;
+
+ upd->info_bits = 0;
+
+ upd_field->field_no = dict_col_get_clust_pos(
+ &table->cols[i], index);
+ }
+
+ /* Note that this is a delete. */
+ cursor->q_proc.node.upd->is_delete = TRUE;
+
+ err = ib_execute_update_query_graph(cursor, pcur);
+
+ ib_tuple_delete(ib_tpl);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Delete a row in a table.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_delete_row(
+/*=================*/
+ ib_crsr_t ib_crsr) /*!< in: InnoDB cursor instance */
+{
+ ib_err_t err;
+ btr_pcur_t* pcur;
+ dict_index_t* index;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ index = dict_table_get_first_index(prebuilt->index->table);
+
+ /* Check whether this is a secondary index cursor */
+ if (index != prebuilt->index) {
+ if (prebuilt->need_to_access_clustered) {
+ pcur = &prebuilt->clust_pcur;
+ } else {
+ return(DB_ERROR);
+ }
+ } else {
+ pcur = &prebuilt->pcur;
+ }
+
+ if (ib_btr_cursor_is_positioned(pcur)) {
+ const rec_t* rec;
+ ib_bool_t page_format;
+ mtr_t mtr;
+
+ page_format = dict_table_is_comp(index->table);
+
+ mtr_start(&mtr);
+
+ if (btr_pcur_restore_position(
+ BTR_SEARCH_LEAF, pcur, &mtr)) {
+
+ rec = btr_pcur_get_rec(pcur);
+ } else {
+ rec = NULL;
+ }
+
+ mtr_commit(&mtr);
+
+ if (rec && !rec_get_deleted_flag(rec, page_format)) {
+ err = ib_delete_row(cursor, pcur, rec);
+ } else {
+ err = DB_RECORD_NOT_FOUND;
+ }
+ } else {
+ err = DB_RECORD_NOT_FOUND;
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Read current row.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_read_row(
+/*===============*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_tpl_t ib_tpl) /*!< out: read cols into this tuple */
+{
+ ib_err_t err;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+
+ ut_a(cursor->prebuilt->trx->state != TRX_STATE_NOT_STARTED);
+
+ /* When searching with IB_EXACT_MATCH set, row_search_for_mysql()
+ will not position the persistent cursor but will copy the record
+ found into the row cache. It should be the only entry. */
+ if (!ib_cursor_is_positioned(ib_crsr) ) {
+ err = DB_RECORD_NOT_FOUND;
+ } else {
+ mtr_t mtr;
+ btr_pcur_t* pcur;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ if (prebuilt->need_to_access_clustered
+ && tuple->type == TPL_TYPE_ROW) {
+ pcur = &prebuilt->clust_pcur;
+ } else {
+ pcur = &prebuilt->pcur;
+ }
+
+ if (pcur == NULL) {
+ return(DB_ERROR);
+ }
+
+ mtr_start(&mtr);
+
+ if (btr_pcur_restore_position(BTR_SEARCH_LEAF, pcur, &mtr)) {
+ const rec_t* rec;
+ ib_bool_t page_format;
+
+ page_format = dict_table_is_comp(tuple->index->table);
+ rec = btr_pcur_get_rec(pcur);
+
+ if (prebuilt->innodb_api_rec &&
+ prebuilt->innodb_api_rec != rec) {
+ rec = prebuilt->innodb_api_rec;
+ }
+
+ if (!rec_get_deleted_flag(rec, page_format)) {
+ ib_read_tuple(rec, page_format, tuple);
+ err = DB_SUCCESS;
+ } else{
+ err = DB_RECORD_NOT_FOUND;
+ }
+
+ } else {
+ err = DB_RECORD_NOT_FOUND;
+ }
+
+ mtr_commit(&mtr);
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Move cursor to the first record in the table.
+@return DB_SUCCESS or err code */
+UNIV_INLINE
+ib_err_t
+ib_cursor_position(
+/*===============*/
+ ib_cursor_t* cursor, /*!< in: InnoDB cursor instance */
+ ib_srch_mode_t mode) /*!< in: Search mode */
+{
+ ib_err_t err;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+ unsigned char* buf;
+
+ buf = static_cast<unsigned char*>(mem_alloc(UNIV_PAGE_SIZE));
+
+ /* We want to position at one of the ends, row_search_for_mysql()
+ uses the search_tuple fields to work out what to do. */
+ dtuple_set_n_fields(prebuilt->search_tuple, 0);
+
+ err = static_cast<ib_err_t>(row_search_for_mysql(
+ buf, mode, prebuilt, 0, 0));
+
+ mem_free(buf);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Move cursor to the first record in the table.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_first(
+/*============*/
+ ib_crsr_t ib_crsr) /*!< in: InnoDB cursor instance */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+
+ return(ib_cursor_position(cursor, IB_CUR_G));
+}
+
+/*****************************************************************//**
+Move cursor to the last record in the table.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_last(
+/*===========*/
+ ib_crsr_t ib_crsr) /*!< in: InnoDB cursor instance */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+
+ return(ib_cursor_position(cursor, IB_CUR_L));
+}
+
+/*****************************************************************//**
+Move cursor to the next user record in the table.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_next(
+/*===========*/
+ ib_crsr_t ib_crsr) /*!< in: InnoDB cursor instance */
+{
+ ib_err_t err;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+ byte buf[UNIV_PAGE_SIZE_MAX];
+
+ /* We want to move to the next record */
+ dtuple_set_n_fields(prebuilt->search_tuple, 0);
+
+ err = static_cast<ib_err_t>(row_search_for_mysql(
+ buf, PAGE_CUR_G, prebuilt, 0, ROW_SEL_NEXT));
+
+ return(err);
+}
+
+/*****************************************************************//**
+Search for key.
+@return DB_SUCCESS or err code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_moveto(
+/*=============*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_tpl_t ib_tpl, /*!< in: Key to search for */
+ ib_srch_mode_t ib_srch_mode) /*!< in: search mode */
+{
+ ulint i;
+ ulint n_fields;
+ ib_err_t err = DB_SUCCESS;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+ dtuple_t* search_tuple = prebuilt->search_tuple;
+ unsigned char* buf;
+
+ ut_a(tuple->type == TPL_TYPE_KEY);
+
+ n_fields = dict_index_get_n_ordering_defined_by_user(prebuilt->index);
+
+ dtuple_set_n_fields(search_tuple, n_fields);
+ dtuple_set_n_fields_cmp(search_tuple, n_fields);
+
+ /* Do a shallow copy */
+ for (i = 0; i < n_fields; ++i) {
+ dfield_copy(dtuple_get_nth_field(search_tuple, i),
+ dtuple_get_nth_field(tuple->ptr, i));
+ }
+
+ ut_a(prebuilt->select_lock_type <= LOCK_NUM);
+
+ prebuilt->innodb_api_rec = NULL;
+
+ buf = static_cast<unsigned char*>(mem_alloc(UNIV_PAGE_SIZE));
+
+ err = static_cast<ib_err_t>(row_search_for_mysql(
+ buf, ib_srch_mode, prebuilt, cursor->match_mode, 0));
+
+ mem_free(buf);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Set the cursor search mode. */
+UNIV_INTERN
+void
+ib_cursor_set_match_mode(
+/*=====================*/
+ ib_crsr_t ib_crsr, /*!< in: Cursor instance */
+ ib_match_mode_t match_mode) /*!< in: ib_cursor_moveto match mode */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+
+ cursor->match_mode = match_mode;
+}
+
+/*****************************************************************//**
+Get the dfield instance for the column in the tuple.
+@return dfield instance in tuple */
+UNIV_INLINE
+dfield_t*
+ib_col_get_dfield(
+/*==============*/
+ ib_tuple_t* tuple, /*!< in: tuple instance */
+ ulint col_no) /*!< in: col no. in tuple */
+{
+ dfield_t* dfield;
+
+ dfield = dtuple_get_nth_field(tuple->ptr, col_no);
+
+ return(dfield);
+}
+
+/*****************************************************************//**
+Predicate to check whether a column type contains variable length data.
+@return DB_SUCCESS or error code */
+UNIV_INLINE
+ib_err_t
+ib_col_is_capped(
+/*==============*/
+ const dtype_t* dtype) /*!< in: column type */
+{
+ return(static_cast<ib_err_t>(
+ (dtype_get_mtype(dtype) == DATA_VARCHAR
+ || dtype_get_mtype(dtype) == DATA_CHAR
+ || dtype_get_mtype(dtype) == DATA_MYSQL
+ || dtype_get_mtype(dtype) == DATA_VARMYSQL
+ || dtype_get_mtype(dtype) == DATA_FIXBINARY
+ || dtype_get_mtype(dtype) == DATA_BINARY)
+ && dtype_get_len(dtype) > 0));
+}
+
+/*****************************************************************//**
+Set a column of the tuple. Make a copy using the tuple's heap.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_col_set_value(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t col_no, /*!< in: column index in tuple */
+ const void* src, /*!< in: data value */
+ ib_ulint_t len) /*!< in: data value len */
+{
+ const dtype_t* dtype;
+ dfield_t* dfield;
+ void* dst = NULL;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, col_no);
+
+ /* User wants to set the column to NULL. */
+ if (len == IB_SQL_NULL) {
+ dfield_set_null(dfield);
+ return(DB_SUCCESS);
+ }
+
+ dtype = dfield_get_type(dfield);
+
+ /* Not allowed to update system columns. */
+ if (dtype_get_mtype(dtype) == DATA_SYS) {
+ return(DB_DATA_MISMATCH);
+ }
+
+ dst = dfield_get_data(dfield);
+
+ /* Since TEXT/CLOB also map to DATA_VARCHAR we need to make an
+ exception. Perhaps we need to set the precise type and check
+ for that. */
+ if (ib_col_is_capped(dtype)) {
+
+ len = ut_min(len, dtype_get_len(dtype));
+
+ if (dst == NULL || len > dfield_get_len(dfield)) {
+ dst = mem_heap_alloc(tuple->heap, dtype_get_len(dtype));
+ ut_a(dst != NULL);
+ }
+ } else if (dst == NULL || len > dfield_get_len(dfield)) {
+ dst = mem_heap_alloc(tuple->heap, len);
+ }
+
+ if (dst == NULL) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ switch (dtype_get_mtype(dtype)) {
+ case DATA_INT: {
+
+ if (dtype_get_len(dtype) == len) {
+ ibool usign;
+
+ usign = dtype_get_prtype(dtype) & DATA_UNSIGNED;
+ mach_write_int_type(static_cast<byte*>(dst),
+ static_cast<const byte*>(src),
+ len, usign);
+
+ } else {
+ return(DB_DATA_MISMATCH);
+ }
+ break;
+ }
+
+ case DATA_FLOAT:
+ if (len == sizeof(float)) {
+ mach_float_write(static_cast<byte*>(dst), *(float*)src);
+ } else {
+ return(DB_DATA_MISMATCH);
+ }
+ break;
+
+ case DATA_DOUBLE:
+ if (len == sizeof(double)) {
+ mach_double_write(static_cast<byte*>(dst),
+ *(double*)src);
+ } else {
+ return(DB_DATA_MISMATCH);
+ }
+ break;
+
+ case DATA_SYS:
+ ut_error;
+ break;
+
+ case DATA_CHAR: {
+ ulint pad_char = ULINT_UNDEFINED;
+
+ pad_char = dtype_get_pad_char(
+ dtype_get_mtype(dtype), dtype_get_prtype(dtype));
+
+ ut_a(pad_char != ULINT_UNDEFINED);
+
+ memset((byte*) dst + len,
+ pad_char,
+ dtype_get_len(dtype) - len);
+
+ memcpy(dst, src, len);
+
+ len = dtype_get_len(dtype);
+ break;
+ }
+ case DATA_BLOB:
+ case DATA_BINARY:
+ case DATA_MYSQL:
+ case DATA_DECIMAL:
+ case DATA_VARCHAR:
+ case DATA_VARMYSQL:
+ case DATA_FIXBINARY:
+ memcpy(dst, src, len);
+ break;
+
+ default:
+ ut_error;
+ }
+
+ if (dst != dfield_get_data(dfield)) {
+ dfield_set_data(dfield, dst, len);
+ } else {
+ dfield_set_len(dfield, len);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Get the size of the data available in a column of the tuple.
+@return bytes avail or IB_SQL_NULL */
+UNIV_INTERN
+ib_ulint_t
+ib_col_get_len(
+/*===========*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i) /*!< in: column index in tuple */
+{
+ const dfield_t* dfield;
+ ulint data_len;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, i);
+
+ data_len = dfield_get_len(dfield);
+
+ return(data_len == UNIV_SQL_NULL ? IB_SQL_NULL : data_len);
+}
+
+/*****************************************************************//**
+Copy a column value from the tuple.
+@return bytes copied or IB_SQL_NULL */
+UNIV_INLINE
+ib_ulint_t
+ib_col_copy_value_low(
+/*==================*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i, /*!< in: column index in tuple */
+ void* dst, /*!< out: copied data value */
+ ib_ulint_t len) /*!< in: max data value len to copy */
+{
+ const void* data;
+ const dfield_t* dfield;
+ ulint data_len;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, i);
+
+ data = dfield_get_data(dfield);
+ data_len = dfield_get_len(dfield);
+
+ if (data_len != UNIV_SQL_NULL) {
+
+ const dtype_t* dtype = dfield_get_type(dfield);
+
+ switch (dtype_get_mtype(dfield_get_type(dfield))) {
+ case DATA_INT: {
+ ibool usign;
+ ullint ret;
+
+ ut_a(data_len == len);
+
+ usign = dtype_get_prtype(dtype) & DATA_UNSIGNED;
+ ret = mach_read_int_type(static_cast<const byte*>(data),
+ data_len, usign);
+
+ if (usign) {
+ if (len == 2) {
+ *(ib_i16_t*)dst = (ib_i16_t)ret;
+ } else if (len == 4) {
+ *(ib_i32_t*)dst = (ib_i32_t)ret;
+ } else {
+ *(ib_i64_t*)dst = (ib_i64_t)ret;
+ }
+ } else {
+ if (len == 2) {
+ *(ib_u16_t*)dst = (ib_i16_t)ret;
+ } else if (len == 4) {
+ *(ib_u32_t*)dst = (ib_i32_t)ret;
+ } else {
+ *(ib_u64_t*)dst = (ib_i64_t)ret;
+ }
+ }
+
+ break;
+ }
+ case DATA_FLOAT:
+ if (len == data_len) {
+ float f;
+
+ ut_a(data_len == sizeof(f));
+ f = mach_float_read(static_cast<const byte*>(
+ data));
+ memcpy(dst, &f, sizeof(f));
+ } else {
+ data_len = 0;
+ }
+ break;
+ case DATA_DOUBLE:
+ if (len == data_len) {
+ double d;
+
+ ut_a(data_len == sizeof(d));
+ d = mach_double_read(static_cast<const byte*>(
+ data));
+ memcpy(dst, &d, sizeof(d));
+ } else {
+ data_len = 0;
+ }
+ break;
+ default:
+ data_len = ut_min(data_len, len);
+ memcpy(dst, data, data_len);
+ }
+ } else {
+ data_len = IB_SQL_NULL;
+ }
+
+ return(data_len);
+}
+
+/*****************************************************************//**
+Copy a column value from the tuple.
+@return bytes copied or IB_SQL_NULL */
+UNIV_INTERN
+ib_ulint_t
+ib_col_copy_value(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i, /*!< in: column index in tuple */
+ void* dst, /*!< out: copied data value */
+ ib_ulint_t len) /*!< in: max data value len to copy */
+{
+ return(ib_col_copy_value_low(ib_tpl, i, dst, len));
+}
+
+/*****************************************************************//**
+Get the InnoDB column attribute from the internal column precise type.
+@return precise type in api format */
+UNIV_INLINE
+ib_col_attr_t
+ib_col_get_attr(
+/*============*/
+ ulint prtype) /*!< in: column definition */
+{
+ ib_col_attr_t attr = IB_COL_NONE;
+
+ if (prtype & DATA_UNSIGNED) {
+ attr = static_cast<ib_col_attr_t>(attr | IB_COL_UNSIGNED);
+ }
+
+ if (prtype & DATA_NOT_NULL) {
+ attr = static_cast<ib_col_attr_t>(attr | IB_COL_NOT_NULL);
+ }
+
+ return(attr);
+}
+
+/*****************************************************************//**
+Get a column name from the tuple.
+@return name of the column */
+UNIV_INTERN
+const char*
+ib_col_get_name(
+/*============*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_ulint_t i) /*!< in: column index in tuple */
+{
+ const char* name;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ dict_table_t* table = cursor->prebuilt->table;
+ dict_col_t* col = dict_table_get_nth_col(table, i);
+ ulint col_no = dict_col_get_no(col);
+
+ name = dict_table_get_col_name(table, col_no);
+
+ return(name);
+}
+
+/*****************************************************************//**
+Get an index field name from the cursor.
+@return name of the field */
+UNIV_INTERN
+const char*
+ib_get_idx_field_name(
+/*==================*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_ulint_t i) /*!< in: column index in tuple */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ dict_index_t* index = cursor->prebuilt->index;
+ dict_field_t* field;
+
+ if (index) {
+ field = dict_index_get_nth_field(cursor->prebuilt->index, i);
+
+ if (field) {
+ return(field->name);
+ }
+ }
+
+ return(NULL);
+}
+
+/*****************************************************************//**
+Get a column type, length and attributes from the tuple.
+@return len of column data */
+UNIV_INLINE
+ib_ulint_t
+ib_col_get_meta_low(
+/*================*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i, /*!< in: column index in tuple */
+ ib_col_meta_t* ib_col_meta) /*!< out: column meta data */
+{
+ ib_u16_t prtype;
+ const dfield_t* dfield;
+ ulint data_len;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, i);
+
+ data_len = dfield_get_len(dfield);
+
+ /* We assume 1-1 mapping between the ENUM and internal type codes. */
+ ib_col_meta->type = static_cast<ib_col_type_t>(
+ dtype_get_mtype(dfield_get_type(dfield)));
+
+ ib_col_meta->type_len = dtype_get_len(dfield_get_type(dfield));
+
+ prtype = (ib_u16_t) dtype_get_prtype(dfield_get_type(dfield));
+
+ ib_col_meta->attr = ib_col_get_attr(prtype);
+ ib_col_meta->client_type = prtype & DATA_MYSQL_TYPE_MASK;
+
+ return(data_len);
+}
+
+/*************************************************************//**
+Read a signed int 8 bit column from an InnoDB tuple. */
+UNIV_INLINE
+ib_err_t
+ib_tuple_check_int(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_bool_t usign, /*!< in: true if unsigned */
+ ulint size) /*!< in: size of integer */
+{
+ ib_col_meta_t ib_col_meta;
+
+ ib_col_get_meta_low(ib_tpl, i, &ib_col_meta);
+
+ if (ib_col_meta.type != IB_INT) {
+ return(DB_DATA_MISMATCH);
+ } else if (ib_col_meta.type_len == IB_SQL_NULL) {
+ return(DB_UNDERFLOW);
+ } else if (ib_col_meta.type_len != size) {
+ return(DB_DATA_MISMATCH);
+ } else if ((ib_col_meta.attr & IB_COL_UNSIGNED) && !usign) {
+ return(DB_DATA_MISMATCH);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*************************************************************//**
+Read a signed int 8 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_i8(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i8_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, IB_FALSE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
+Read an unsigned int 8 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_u8(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u8_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, IB_TRUE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
+Read a signed int 16 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_i16(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i16_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, FALSE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
+Read an unsigned int 16 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_u16(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u16_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, IB_TRUE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
+Read a signed int 32 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_i32(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i32_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, FALSE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
+Read an unsigned int 32 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_u32(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u32_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, IB_TRUE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
+Read a signed int 64 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_i64(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i64_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, FALSE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*************************************************************//**
+Read an unsigned int 64 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_u64(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u64_t* ival) /*!< out: integer value */
+{
+ ib_err_t err;
+
+ err = ib_tuple_check_int(ib_tpl, i, IB_TRUE, sizeof(*ival));
+
+ if (err == DB_SUCCESS) {
+ ib_col_copy_value_low(ib_tpl, i, ival, sizeof(*ival));
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Get a column value pointer from the tuple.
+@return NULL or pointer to buffer */
+UNIV_INTERN
+const void*
+ib_col_get_value(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i) /*!< in: column index in tuple */
+{
+ const void* data;
+ const dfield_t* dfield;
+ ulint data_len;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, i);
+
+ data = dfield_get_data(dfield);
+ data_len = dfield_get_len(dfield);
+
+ return(data_len != UNIV_SQL_NULL ? data : NULL);
+}
+
+/*****************************************************************//**
+Get a column type, length and attributes from the tuple.
+@return len of column data */
+UNIV_INTERN
+ib_ulint_t
+ib_col_get_meta(
+/*============*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i, /*!< in: column index in tuple */
+ ib_col_meta_t* ib_col_meta) /*!< out: column meta data */
+{
+ return(ib_col_get_meta_low(ib_tpl, i, ib_col_meta));
+}
+
+/*****************************************************************//**
+"Clear" or reset an InnoDB tuple. We free the heap and recreate the tuple.
+@return new tuple, or NULL */
+UNIV_INTERN
+ib_tpl_t
+ib_tuple_clear(
+/*============*/
+ ib_tpl_t ib_tpl) /*!< in,own: tuple (will be freed) */
+{
+ const dict_index_t* index;
+ ulint n_cols;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+ ib_tuple_type_t type = tuple->type;
+ mem_heap_t* heap = tuple->heap;
+
+ index = tuple->index;
+ n_cols = dtuple_get_n_fields(tuple->ptr);
+
+ mem_heap_empty(heap);
+
+ if (type == TPL_TYPE_ROW) {
+ return(ib_row_tuple_new_low(index, n_cols, heap));
+ } else {
+ return(ib_key_tuple_new_low(index, n_cols, heap));
+ }
+}
+
+/*****************************************************************//**
+Create a new cluster key search tuple and copy the contents of the
+secondary index key tuple columns that refer to the cluster index record
+to the cluster key. It does a deep copy of the column data.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_tuple_get_cluster_key(
+/*=====================*/
+ ib_crsr_t ib_crsr, /*!< in: secondary index cursor */
+ ib_tpl_t* ib_dst_tpl, /*!< out,own: destination tuple */
+ const ib_tpl_t ib_src_tpl) /*!< in: source tuple */
+{
+ ulint i;
+ ulint n_fields;
+ ib_err_t err = DB_SUCCESS;
+ ib_tuple_t* dst_tuple = NULL;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ ib_tuple_t* src_tuple = (ib_tuple_t*) ib_src_tpl;
+ dict_index_t* clust_index;
+
+ clust_index = dict_table_get_first_index(cursor->prebuilt->table);
+
+ /* We need to ensure that the src tuple belongs to the same table
+ as the open cursor and that it's not a tuple for a cluster index. */
+ if (src_tuple->type != TPL_TYPE_KEY) {
+ return(DB_ERROR);
+ } else if (src_tuple->index->table != cursor->prebuilt->table) {
+ return(DB_DATA_MISMATCH);
+ } else if (src_tuple->index == clust_index) {
+ return(DB_ERROR);
+ }
+
+ /* Create the cluster index key search tuple. */
+ *ib_dst_tpl = ib_clust_search_tuple_create(ib_crsr);
+
+ if (!*ib_dst_tpl) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ dst_tuple = (ib_tuple_t*) *ib_dst_tpl;
+ ut_a(dst_tuple->index == clust_index);
+
+ n_fields = dict_index_get_n_unique(dst_tuple->index);
+
+ /* Do a deep copy of the data fields. */
+ for (i = 0; i < n_fields; i++) {
+ ulint pos;
+ dfield_t* src_field;
+ dfield_t* dst_field;
+
+ pos = dict_index_get_nth_field_pos(
+ src_tuple->index, dst_tuple->index, i);
+
+ ut_a(pos != ULINT_UNDEFINED);
+
+ src_field = dtuple_get_nth_field(src_tuple->ptr, pos);
+ dst_field = dtuple_get_nth_field(dst_tuple->ptr, i);
+
+ if (!dfield_is_null(src_field)) {
+ UNIV_MEM_ASSERT_RW(src_field->data, src_field->len);
+
+ dst_field->data = mem_heap_dup(
+ dst_tuple->heap,
+ src_field->data,
+ src_field->len);
+
+ dst_field->len = src_field->len;
+ } else {
+ dfield_set_null(dst_field);
+ }
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Copy the contents of source tuple to destination tuple. The tuples
+must be of the same type and belong to the same table/index.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_tuple_copy(
+/*==========*/
+ ib_tpl_t ib_dst_tpl, /*!< in: destination tuple */
+ const ib_tpl_t ib_src_tpl) /*!< in: source tuple */
+{
+ ulint i;
+ ulint n_fields;
+ ib_err_t err = DB_SUCCESS;
+ const ib_tuple_t*src_tuple = (const ib_tuple_t*) ib_src_tpl;
+ ib_tuple_t* dst_tuple = (ib_tuple_t*) ib_dst_tpl;
+
+ /* Make sure src and dst are not the same. */
+ ut_a(src_tuple != dst_tuple);
+
+ /* Make sure they are the same type and refer to the same index. */
+ if (src_tuple->type != dst_tuple->type
+ || src_tuple->index != dst_tuple->index) {
+
+ return(DB_DATA_MISMATCH);
+ }
+
+ n_fields = dtuple_get_n_fields(src_tuple->ptr);
+ ut_ad(n_fields == dtuple_get_n_fields(dst_tuple->ptr));
+
+ /* Do a deep copy of the data fields. */
+ for (i = 0; i < n_fields; ++i) {
+ dfield_t* src_field;
+ dfield_t* dst_field;
+
+ src_field = dtuple_get_nth_field(src_tuple->ptr, i);
+ dst_field = dtuple_get_nth_field(dst_tuple->ptr, i);
+
+ if (!dfield_is_null(src_field)) {
+ UNIV_MEM_ASSERT_RW(src_field->data, src_field->len);
+
+ dst_field->data = mem_heap_dup(
+ dst_tuple->heap,
+ src_field->data,
+ src_field->len);
+
+ dst_field->len = src_field->len;
+ } else {
+ dfield_set_null(dst_field);
+ }
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Create an InnoDB tuple used for index/table search.
+@return own: Tuple for current index */
+UNIV_INTERN
+ib_tpl_t
+ib_sec_search_tuple_create(
+/*=======================*/
+ ib_crsr_t ib_crsr) /*!< in: Cursor instance */
+{
+ ulint n_cols;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ dict_index_t* index = cursor->prebuilt->index;
+
+ n_cols = dict_index_get_n_unique_in_tree(index);
+ return(ib_key_tuple_new(index, n_cols));
+}
+
+/*****************************************************************//**
+Create an InnoDB tuple used for index/table search.
+@return own: Tuple for current index */
+UNIV_INTERN
+ib_tpl_t
+ib_sec_read_tuple_create(
+/*=====================*/
+ ib_crsr_t ib_crsr) /*!< in: Cursor instance */
+{
+ ulint n_cols;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ dict_index_t* index = cursor->prebuilt->index;
+
+ n_cols = dict_index_get_n_fields(index);
+ return(ib_row_tuple_new(index, n_cols));
+}
+
+/*****************************************************************//**
+Create an InnoDB tuple used for table key operations.
+@return own: Tuple for current table */
+UNIV_INTERN
+ib_tpl_t
+ib_clust_search_tuple_create(
+/*=========================*/
+ ib_crsr_t ib_crsr) /*!< in: Cursor instance */
+{
+ ulint n_cols;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ dict_index_t* index;
+
+ index = dict_table_get_first_index(cursor->prebuilt->table);
+
+ n_cols = dict_index_get_n_ordering_defined_by_user(index);
+ return(ib_key_tuple_new(index, n_cols));
+}
+
+/*****************************************************************//**
+Create an InnoDB tuple for table row operations.
+@return own: Tuple for current table */
+UNIV_INTERN
+ib_tpl_t
+ib_clust_read_tuple_create(
+/*=======================*/
+ ib_crsr_t ib_crsr) /*!< in: Cursor instance */
+{
+ ulint n_cols;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ dict_index_t* index;
+
+ index = dict_table_get_first_index(cursor->prebuilt->table);
+
+ n_cols = dict_table_get_n_cols(cursor->prebuilt->table);
+ return(ib_row_tuple_new(index, n_cols));
+}
+
+/*****************************************************************//**
+Return the number of user columns in the tuple definition.
+@return number of user columns */
+UNIV_INTERN
+ib_ulint_t
+ib_tuple_get_n_user_cols(
+/*=====================*/
+ const ib_tpl_t ib_tpl) /*!< in: Tuple for current table */
+{
+ const ib_tuple_t* tuple = (const ib_tuple_t*) ib_tpl;
+
+ if (tuple->type == TPL_TYPE_ROW) {
+ return(dict_table_get_n_user_cols(tuple->index->table));
+ }
+
+ return(dict_index_get_n_ordering_defined_by_user(tuple->index));
+}
+
+/*****************************************************************//**
+Return the number of columns in the tuple definition.
+@return number of columns */
+UNIV_INTERN
+ib_ulint_t
+ib_tuple_get_n_cols(
+/*================*/
+ const ib_tpl_t ib_tpl) /*!< in: Tuple for table/index */
+{
+ const ib_tuple_t* tuple = (const ib_tuple_t*) ib_tpl;
+
+ return(dtuple_get_n_fields(tuple->ptr));
+}
+
+/*****************************************************************//**
+Destroy an InnoDB tuple. */
+UNIV_INTERN
+void
+ib_tuple_delete(
+/*============*/
+ ib_tpl_t ib_tpl) /*!< in,own: Tuple instance to delete */
+{
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ if (!ib_tpl) {
+ return;
+ }
+
+ mem_heap_free(tuple->heap);
+}
+
+/*****************************************************************//**
+Get a table id. This function will acquire the dictionary mutex.
+@return DB_SUCCESS if found */
+UNIV_INTERN
+ib_err_t
+ib_table_get_id(
+/*============*/
+ const char* table_name, /*!< in: table to find */
+ ib_id_u64_t* table_id) /*!< out: table id if found */
+{
+ ib_err_t err;
+
+ dict_mutex_enter_for_mysql();
+
+ err = ib_table_get_id_low(table_name, table_id);
+
+ dict_mutex_exit_for_mysql();
+
+ return(err);
+}
+
+/*****************************************************************//**
+Get an index id.
+@return DB_SUCCESS if found */
+UNIV_INTERN
+ib_err_t
+ib_index_get_id(
+/*============*/
+ const char* table_name, /*!< in: find index for this table */
+ const char* index_name, /*!< in: index to find */
+ ib_id_u64_t* index_id) /*!< out: index id if found */
+{
+ dict_table_t* table;
+ char* normalized_name;
+ ib_err_t err = DB_TABLE_NOT_FOUND;
+
+ *index_id = 0;
+
+ normalized_name = static_cast<char*>(
+ mem_alloc(ut_strlen(table_name) + 1));
+ ib_normalize_table_name(normalized_name, table_name);
+
+ table = ib_lookup_table_by_name(normalized_name);
+
+ mem_free(normalized_name);
+ normalized_name = NULL;
+
+ if (table != NULL) {
+ dict_index_t* index;
+
+ index = dict_table_get_index_on_name(table, index_name);
+
+ if (index != NULL) {
+ /* We only support 32 bit table and index ids. Because
+ we need to pack the table id into the index id. */
+
+ *index_id = (table->id);
+ *index_id <<= 32;
+ *index_id |= (index->id);
+
+ err = DB_SUCCESS;
+ }
+ }
+
+ return(err);
+}
+
+#ifdef __WIN__
+#define SRV_PATH_SEPARATOR '\\'
+#else
+#define SRV_PATH_SEPARATOR '/'
+#endif
+
+
+/*****************************************************************//**
+Check if cursor is positioned.
+@return IB_TRUE if positioned */
+UNIV_INTERN
+ib_bool_t
+ib_cursor_is_positioned(
+/*====================*/
+ const ib_crsr_t ib_crsr) /*!< in: InnoDB cursor instance */
+{
+ const ib_cursor_t* cursor = (const ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ return(ib_btr_cursor_is_positioned(&prebuilt->pcur));
+}
+
+
+/*****************************************************************//**
+Checks if the data dictionary is latched in exclusive mode.
+@return TRUE if exclusive latch */
+UNIV_INTERN
+ib_bool_t
+ib_schema_lock_is_exclusive(
+/*========================*/
+ const ib_trx_t ib_trx) /*!< in: transaction */
+{
+ const trx_t* trx = (const trx_t*) ib_trx;
+
+ return(trx->dict_operation_lock_mode == RW_X_LATCH);
+}
+
+/*****************************************************************//**
+Checks if the data dictionary is latched in shared mode.
+@return TRUE if shared latch */
+UNIV_INTERN
+ib_bool_t
+ib_schema_lock_is_shared(
+/*=====================*/
+ const ib_trx_t ib_trx) /*!< in: transaction */
+{
+ const trx_t* trx = (const trx_t*) ib_trx;
+
+ return(trx->dict_operation_lock_mode == RW_S_LATCH);
+}
+
+/*****************************************************************//**
+Set the Lock an InnoDB cursor/table.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_lock(
+/*===========*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_lck_mode_t ib_lck_mode) /*!< in: InnoDB lock mode */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+ trx_t* trx = prebuilt->trx;
+ dict_table_t* table = prebuilt->table;
+
+ return(ib_trx_lock_table_with_retry(
+ trx, table, (enum lock_mode) ib_lck_mode));
+}
+
+/*****************************************************************//**
+Set the Lock an InnoDB table using the table id.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_table_lock(
+/*==========*/
+ ib_trx_t ib_trx, /*!< in/out: transaction */
+ ib_id_u64_t table_id, /*!< in: table id */
+ ib_lck_mode_t ib_lck_mode) /*!< in: InnoDB lock mode */
+{
+ ib_err_t err;
+ que_thr_t* thr;
+ mem_heap_t* heap;
+ dict_table_t* table;
+ ib_qry_proc_t q_proc;
+ trx_t* trx = (trx_t*) ib_trx;
+
+ ut_a(trx->state != TRX_STATE_NOT_STARTED);
+
+ table = ib_open_table_by_id(table_id, FALSE);
+
+ if (table == NULL) {
+ return(DB_TABLE_NOT_FOUND);
+ }
+
+ ut_a(ib_lck_mode <= static_cast<ib_lck_mode_t>(LOCK_NUM));
+
+ heap = mem_heap_create(128);
+
+ q_proc.node.sel = sel_node_create(heap);
+
+ thr = pars_complete_graph_for_exec(q_proc.node.sel, trx, heap);
+
+ q_proc.grph.sel = static_cast<que_fork_t*>(que_node_get_parent(thr));
+ q_proc.grph.sel->state = QUE_FORK_ACTIVE;
+
+ trx->op_info = "setting table lock";
+
+ ut_a(ib_lck_mode == IB_LOCK_IS || ib_lck_mode == IB_LOCK_IX);
+ err = static_cast<ib_err_t>(
+ lock_table(0, table, (enum lock_mode) ib_lck_mode, thr));
+
+ trx->error_state = err;
+
+ mem_heap_free(heap);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Unlock an InnoDB table.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_unlock(
+/*=============*/
+ ib_crsr_t ib_crsr) /*!< in/out: InnoDB cursor */
+{
+ ib_err_t err = DB_SUCCESS;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ if (prebuilt->trx->mysql_n_tables_locked > 0) {
+ --prebuilt->trx->mysql_n_tables_locked;
+ } else {
+ err = DB_ERROR;
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Set the Lock mode of the cursor.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_set_lock_mode(
+/*====================*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_lck_mode_t ib_lck_mode) /*!< in: InnoDB lock mode */
+{
+ ib_err_t err = DB_SUCCESS;
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ ut_a(ib_lck_mode <= static_cast<ib_lck_mode_t>(LOCK_NUM));
+
+ if (ib_lck_mode == IB_LOCK_X) {
+ err = ib_cursor_lock(ib_crsr, IB_LOCK_IX);
+ } else if (ib_lck_mode == IB_LOCK_S) {
+ err = ib_cursor_lock(ib_crsr, IB_LOCK_IS);
+ }
+
+ if (err == DB_SUCCESS) {
+ prebuilt->select_lock_type = (enum lock_mode) ib_lck_mode;
+ ut_a(prebuilt->trx->state != TRX_STATE_NOT_STARTED);
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Set need to access clustered index record. */
+UNIV_INTERN
+void
+ib_cursor_set_cluster_access(
+/*=========================*/
+ ib_crsr_t ib_crsr) /*!< in/out: InnoDB cursor */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ prebuilt->need_to_access_clustered = TRUE;
+}
+
+/*************************************************************//**
+Convert and write an INT column value to an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INLINE
+ib_err_t
+ib_tuple_write_int(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ ulint col_no, /*!< in: column number */
+ const void* value, /*!< in: integer value */
+ ulint value_len) /*!< in: sizeof value type */
+{
+ const dfield_t* dfield;
+ ulint data_len;
+ ulint type_len;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ ut_a(col_no < ib_tuple_get_n_cols(ib_tpl));
+
+ dfield = ib_col_get_dfield(tuple, col_no);
+
+ data_len = dfield_get_len(dfield);
+ type_len = dtype_get_len(dfield_get_type(dfield));
+
+ if (dtype_get_mtype(dfield_get_type(dfield)) != DATA_INT
+ || value_len != data_len) {
+
+ return(DB_DATA_MISMATCH);
+ }
+
+ return(ib_col_set_value(ib_tpl, col_no, value, type_len));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_i8(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i8_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_i16(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i16_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_i32(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i32_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_i64(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i64_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_u8(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_u8_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_u16(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tupe to write to */
+ int col_no, /*!< in: column number */
+ ib_u16_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_u32(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_u32_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_u64(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_u64_t val) /*!< in: value to write */
+{
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+}
+
+/*****************************************************************//**
+Inform the cursor that it's the start of an SQL statement. */
+UNIV_INTERN
+void
+ib_cursor_stmt_begin(
+/*=================*/
+ ib_crsr_t ib_crsr) /*!< in: cursor */
+{
+ ib_cursor_t* cursor = (ib_cursor_t*) ib_crsr;
+
+ cursor->prebuilt->sql_stat_start = TRUE;
+}
+
+/*****************************************************************//**
+Write a double value to a column.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_double(
+/*==================*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ double val) /*!< in: value to write */
+{
+ const dfield_t* dfield;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, col_no);
+
+ if (dtype_get_mtype(dfield_get_type(dfield)) == DATA_DOUBLE) {
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+ } else {
+ return(DB_DATA_MISMATCH);
+ }
+}
+
+/*************************************************************//**
+Read a double column value from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_double(
+/*=================*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t col_no, /*!< in: column number */
+ double* dval) /*!< out: double value */
+{
+ ib_err_t err;
+ const dfield_t* dfield;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, col_no);
+
+ if (dtype_get_mtype(dfield_get_type(dfield)) == DATA_DOUBLE) {
+ ib_col_copy_value_low(ib_tpl, col_no, dval, sizeof(*dval));
+ err = DB_SUCCESS;
+ } else {
+ err = DB_DATA_MISMATCH;
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Write a float value to a column.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_write_float(
+/*=================*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ float val) /*!< in: value to write */
+{
+ const dfield_t* dfield;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, col_no);
+
+ if (dtype_get_mtype(dfield_get_type(dfield)) == DATA_FLOAT) {
+ return(ib_col_set_value(ib_tpl, col_no, &val, sizeof(val)));
+ } else {
+ return(DB_DATA_MISMATCH);
+ }
+}
+
+/*************************************************************//**
+Read a float value from an InnoDB tuple.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+ib_err_t
+ib_tuple_read_float(
+/*================*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t col_no, /*!< in: column number */
+ float* fval) /*!< out: float value */
+{
+ ib_err_t err;
+ const dfield_t* dfield;
+ ib_tuple_t* tuple = (ib_tuple_t*) ib_tpl;
+
+ dfield = ib_col_get_dfield(tuple, col_no);
+
+ if (dtype_get_mtype(dfield_get_type(dfield)) == DATA_FLOAT) {
+ ib_col_copy_value_low(ib_tpl, col_no, fval, sizeof(*fval));
+ err = DB_SUCCESS;
+ } else {
+ err = DB_DATA_MISMATCH;
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Truncate a table. The cursor handle will be closed and set to NULL
+on success.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_cursor_truncate(
+/*===============*/
+ ib_crsr_t* ib_crsr, /*!< in/out: cursor for table
+ to truncate */
+ ib_id_u64_t* table_id) /*!< out: new table id */
+{
+ ib_err_t err;
+ ib_cursor_t* cursor = *(ib_cursor_t**) ib_crsr;
+ row_prebuilt_t* prebuilt = cursor->prebuilt;
+
+ *table_id = 0;
+
+ err = ib_cursor_lock(*ib_crsr, IB_LOCK_X);
+
+ if (err == DB_SUCCESS) {
+ trx_t* trx;
+ dict_table_t* table = prebuilt->table;
+
+ /* We are going to free the cursor and the prebuilt. Store
+ the transaction handle locally. */
+ trx = prebuilt->trx;
+ err = ib_cursor_close(*ib_crsr);
+ ut_a(err == DB_SUCCESS);
+
+ *ib_crsr = NULL;
+
+ /* A temp go around for assertion in trx_start_for_ddl_low
+ we already start the trx */
+ if (trx->state == TRX_STATE_ACTIVE) {
+#ifdef UNIV_DEBUG
+ trx->start_file = 0;
+#endif /* UNIV_DEBUG */
+ trx->dict_operation = TRX_DICT_OP_TABLE;
+ }
+
+ /* This function currently commits the transaction
+ on success. */
+ err = static_cast<ib_err_t>(
+ row_truncate_table_for_mysql(table, trx));
+
+ if (err == DB_SUCCESS) {
+ *table_id = (table->id);
+ }
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Truncate a table.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+ib_err_t
+ib_table_truncate(
+/*==============*/
+ const char* table_name, /*!< in: table name */
+ ib_id_u64_t* table_id) /*!< out: new table id */
+{
+ ib_err_t err;
+ dict_table_t* table;
+ ib_err_t trunc_err;
+ ib_trx_t ib_trx = NULL;
+ ib_crsr_t ib_crsr = NULL;
+
+ ib_trx = ib_trx_begin(IB_TRX_SERIALIZABLE);
+
+ dict_mutex_enter_for_mysql();
+
+ table = dict_table_open_on_name(table_name, TRUE, FALSE,
+ DICT_ERR_IGNORE_NONE);
+
+ if (table != NULL && dict_table_get_first_index(table)) {
+ err = ib_create_cursor_with_index_id(&ib_crsr, table, 0,
+ (trx_t*) ib_trx);
+ } else {
+ err = DB_TABLE_NOT_FOUND;
+ }
+
+ dict_mutex_exit_for_mysql();
+
+ if (err == DB_SUCCESS) {
+ trunc_err = ib_cursor_truncate(&ib_crsr, table_id);
+ ut_a(err == DB_SUCCESS);
+ } else {
+ trunc_err = err;
+ }
+
+ if (ib_crsr != NULL) {
+ err = ib_cursor_close(ib_crsr);
+ ut_a(err == DB_SUCCESS);
+ }
+
+ if (trunc_err == DB_SUCCESS) {
+ ut_a(ib_trx_state(ib_trx) == static_cast<ib_trx_state_t>(
+ TRX_STATE_NOT_STARTED));
+
+ err = ib_trx_release(ib_trx);
+ ut_a(err == DB_SUCCESS);
+ } else {
+ err = ib_trx_rollback(ib_trx);
+ ut_a(err == DB_SUCCESS);
+ }
+
+ return(trunc_err);
+}
+
+/*****************************************************************//**
+Frees a possible InnoDB trx object associated with the current THD.
+@return 0 or error number */
+UNIV_INTERN
+ib_err_t
+ib_close_thd(
+/*=========*/
+ void* thd) /*!< in: handle to the MySQL thread of the user
+ whose resources should be free'd */
+{
+ innobase_close_thd(static_cast<THD*>(thd));
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Return isolation configuration set by "innodb_api_trx_level"
+@return trx isolation level*/
+UNIV_INTERN
+ib_trx_state_t
+ib_cfg_trx_level()
+/*==============*/
+{
+ return(static_cast<ib_trx_state_t>(ib_trx_level_setting));
+}
+
+/*****************************************************************//**
+Return configure value for background commit interval (in seconds)
+@return background commit interval (in seconds) */
+UNIV_INTERN
+ib_ulint_t
+ib_cfg_bk_commit_interval()
+/*=======================*/
+{
+ return(static_cast<ib_ulint_t>(ib_bk_commit_interval));
+}
+
+/*****************************************************************//**
+Get generic configure status
+@return configure status*/
+UNIV_INTERN
+int
+ib_cfg_get_cfg()
+/*============*/
+{
+ int cfg_status;
+
+ cfg_status = (ib_binlog_enabled) ? IB_CFG_BINLOG_ENABLED : 0;
+
+ if (ib_mdl_enabled) {
+ cfg_status |= IB_CFG_MDL_ENABLED;
+ }
+
+ if (ib_disable_row_lock) {
+ cfg_status |= IB_CFG_DISABLE_ROWLOCK;
+ }
+
+ return(cfg_status);
+}
diff --git a/storage/innobase/api/api0misc.cc b/storage/innobase/api/api0misc.cc
new file mode 100644
index 00000000000..b2370105938
--- /dev/null
+++ b/storage/innobase/api/api0misc.cc
@@ -0,0 +1,206 @@
+/*****************************************************************************
+
+Copyright (c) 2008, 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file api/api0misc.cc
+InnoDB Native API
+
+2008-08-01 Created by Sunny Bains
+3/20/2011 Jimmy Yang extracted from Embedded InnoDB
+*******************************************************/
+
+#include <errno.h>
+
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif /* HAVE_UNISTD_H */
+
+#include "api0misc.h"
+#include "trx0roll.h"
+#include "srv0srv.h"
+#include "dict0mem.h"
+#include "dict0dict.h"
+#include "pars0pars.h"
+#include "row0sel.h"
+#include "lock0lock.h"
+#include "ha_prototypes.h"
+#include <m_ctype.h>
+#include <mysys_err.h>
+#include <mysql/plugin.h>
+
+/*********************************************************************//**
+Sets a lock on a table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+ib_trx_lock_table_with_retry(
+/*=========================*/
+ trx_t* trx, /*!< in/out: transaction */
+ dict_table_t* table, /*!< in: table to lock */
+ enum lock_mode mode) /*!< in: LOCK_X or LOCK_S */
+{
+ que_thr_t* thr;
+ dberr_t err;
+ mem_heap_t* heap;
+ sel_node_t* node;
+
+ heap = mem_heap_create(512);
+
+ trx->op_info = "setting table lock";
+
+ node = sel_node_create(heap);
+ thr = pars_complete_graph_for_exec(node, trx, heap);
+ thr->graph->state = QUE_FORK_ACTIVE;
+
+ /* We use the select query graph as the dummy graph needed
+ in the lock module call */
+
+ thr = que_fork_get_first_thr(static_cast<que_fork_t*>(
+ que_node_get_parent(thr)));
+ que_thr_move_to_run_state_for_mysql(thr, trx);
+
+run_again:
+ thr->run_node = thr;
+ thr->prev_node = thr->common.parent;
+
+ err = lock_table(0, table, mode, thr);
+
+ trx->error_state = err;
+
+ if (UNIV_LIKELY(err == DB_SUCCESS)) {
+ que_thr_stop_for_mysql_no_error(thr, trx);
+ } else {
+ que_thr_stop_for_mysql(thr);
+
+ if (err != DB_QUE_THR_SUSPENDED) {
+ ibool was_lock_wait;
+
+ was_lock_wait = ib_handle_errors(&err, trx, thr, NULL);
+
+ if (was_lock_wait) {
+ goto run_again;
+ }
+ } else {
+ que_thr_t* run_thr;
+ que_node_t* parent;
+
+ parent = que_node_get_parent(thr);
+ run_thr = que_fork_start_command(
+ static_cast<que_fork_t*>(parent));
+
+ ut_a(run_thr == thr);
+
+ /* There was a lock wait but the thread was not
+ in a ready to run or running state. */
+ trx->error_state = DB_LOCK_WAIT;
+
+ goto run_again;
+ }
+ }
+
+ que_graph_free(thr->graph);
+ trx->op_info = "";
+
+ return(err);
+}
+/****************************************************************//**
+Handles user errors and lock waits detected by the database engine.
+@return TRUE if it was a lock wait and we should continue running
+the query thread */
+UNIV_INTERN
+ibool
+ib_handle_errors(
+/*=============*/
+ dberr_t* new_err,/*!< out: possible new error encountered in
+ lock wait, or if no new error, the value
+ of trx->error_state at the entry of this
+ function */
+ trx_t* trx, /*!< in: transaction */
+ que_thr_t* thr, /*!< in: query thread */
+ trx_savept_t* savept) /*!< in: savepoint or NULL */
+{
+ dberr_t err;
+handle_new_error:
+ err = trx->error_state;
+
+ ut_a(err != DB_SUCCESS);
+
+ trx->error_state = DB_SUCCESS;
+
+ switch (err) {
+ case DB_LOCK_WAIT_TIMEOUT:
+ trx_rollback_for_mysql(trx);
+ break;
+ /* fall through */
+ case DB_DUPLICATE_KEY:
+ case DB_FOREIGN_DUPLICATE_KEY:
+ case DB_TOO_BIG_RECORD:
+ case DB_ROW_IS_REFERENCED:
+ case DB_NO_REFERENCED_ROW:
+ case DB_CANNOT_ADD_CONSTRAINT:
+ case DB_TOO_MANY_CONCURRENT_TRXS:
+ case DB_OUT_OF_FILE_SPACE:
+ if (savept) {
+ /* Roll back the latest, possibly incomplete
+ insertion or update */
+
+ trx_rollback_to_savepoint(trx, savept);
+ }
+ break;
+ case DB_LOCK_WAIT:
+ lock_wait_suspend_thread(thr);
+
+ if (trx->error_state != DB_SUCCESS) {
+ que_thr_stop_for_mysql(thr);
+
+ goto handle_new_error;
+ }
+
+ *new_err = err;
+
+ return(TRUE); /* Operation needs to be retried. */
+
+ case DB_DEADLOCK:
+ case DB_LOCK_TABLE_FULL:
+ /* Roll back the whole transaction; this resolution was added
+ to version 3.23.43 */
+
+ trx_rollback_for_mysql(trx);
+ break;
+
+ case DB_MUST_GET_MORE_FILE_SPACE:
+
+ exit(1);
+
+ case DB_CORRUPTION:
+ case DB_FOREIGN_EXCEED_MAX_CASCADE:
+ break;
+ default:
+ ut_error;
+ }
+
+ if (trx->error_state != DB_SUCCESS) {
+ *new_err = trx->error_state;
+ } else {
+ *new_err = err;
+ }
+
+ trx->error_state = DB_SUCCESS;
+
+ return(FALSE);
+}
diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc
index 8eae3c7e3bc..e3e127c3ace 100644
--- a/storage/innobase/btr/btr0btr.cc
+++ b/storage/innobase/btr/btr0btr.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -697,14 +698,16 @@ btr_root_fseg_validate(
#endif /* UNIV_BTR_DEBUG */
/**************************************************************//**
-Gets the root node of a tree and x-latches it.
-@return root page, x-latched */
+Gets the root node of a tree and x- or s-latches it.
+@return root page, x- or s-latched */
static
buf_block_t*
btr_root_block_get(
/*===============*/
- dict_index_t* index, /*!< in: index tree */
- mtr_t* mtr) /*!< in: mtr */
+ const dict_index_t* index, /*!< in: index tree */
+ ulint mode, /*!< in: either RW_S_LATCH
+ or RW_X_LATCH */
+ mtr_t* mtr) /*!< in: mtr */
{
ulint space;
ulint zip_size;
@@ -715,8 +718,7 @@ btr_root_block_get(
zip_size = dict_table_zip_size(index->table);
root_page_no = dict_index_get_page(index);
- block = btr_block_get(space, zip_size, root_page_no, RW_X_LATCH,
- index, mtr);
+ block = btr_block_get(space, zip_size, root_page_no, mode, index, mtr);
btr_assert_not_corrupted(block, index);
#ifdef UNIV_BTR_DEBUG
if (!dict_index_is_ibuf(index)) {
@@ -739,10 +741,162 @@ UNIV_INTERN
page_t*
btr_root_get(
/*=========*/
+ const dict_index_t* index, /*!< in: index tree */
+ mtr_t* mtr) /*!< in: mtr */
+{
+ return(buf_block_get_frame(btr_root_block_get(index, RW_X_LATCH,
+ mtr)));
+}
+
+/**************************************************************//**
+Gets the height of the B-tree (the level of the root, when the leaf
+level is assumed to be 0). The caller must hold an S or X latch on
+the index.
+@return tree height (level of the root) */
+UNIV_INTERN
+ulint
+btr_height_get(
+/*===========*/
dict_index_t* index, /*!< in: index tree */
- mtr_t* mtr) /*!< in: mtr */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
+{
+ ulint height;
+ buf_block_t* root_block;
+
+ ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
+ MTR_MEMO_S_LOCK)
+ || mtr_memo_contains(mtr, dict_index_get_lock(index),
+ MTR_MEMO_X_LOCK));
+
+ /* S latches the page */
+ root_block = btr_root_block_get(index, RW_S_LATCH, mtr);
+
+ height = btr_page_get_level(buf_block_get_frame(root_block), mtr);
+
+ /* Release the S latch on the root page. */
+ mtr_memo_release(mtr, root_block, MTR_MEMO_PAGE_S_FIX);
+#ifdef UNIV_SYNC_DEBUG
+ sync_thread_reset_level(&root_block->lock);
+#endif /* UNIV_SYNC_DEBUG */
+
+ return(height);
+}
+
+/**************************************************************//**
+Checks a file segment header within a B-tree root page and updates
+the segment header space id.
+@return TRUE if valid */
+static
+bool
+btr_root_fseg_adjust_on_import(
+/*===========================*/
+ fseg_header_t* seg_header, /*!< in/out: segment header */
+ page_zip_des_t* page_zip, /*!< in/out: compressed page,
+ or NULL */
+ ulint space, /*!< in: tablespace identifier */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
+{
+ ulint offset = mach_read_from_2(seg_header + FSEG_HDR_OFFSET);
+
+ if (offset < FIL_PAGE_DATA
+ || offset > UNIV_PAGE_SIZE - FIL_PAGE_DATA_END) {
+
+ return(FALSE);
+
+ } else if (page_zip) {
+ mach_write_to_4(seg_header + FSEG_HDR_SPACE, space);
+ page_zip_write_header(page_zip, seg_header + FSEG_HDR_SPACE,
+ 4, mtr);
+ } else {
+ mlog_write_ulint(seg_header + FSEG_HDR_SPACE,
+ space, MLOG_4BYTES, mtr);
+ }
+
+ return(TRUE);
+}
+
+/**************************************************************//**
+Checks and adjusts the root node of a tree during IMPORT TABLESPACE.
+@return error code, or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+btr_root_adjust_on_import(
+/*======================*/
+ const dict_index_t* index) /*!< in: index tree */
{
- return(buf_block_get_frame(btr_root_block_get(index, mtr)));
+ dberr_t err;
+ mtr_t mtr;
+ page_t* page;
+ buf_block_t* block;
+ page_zip_des_t* page_zip;
+ dict_table_t* table = index->table;
+ ulint space_id = dict_index_get_space(index);
+ ulint zip_size = dict_table_zip_size(table);
+ ulint root_page_no = dict_index_get_page(index);
+
+ mtr_start(&mtr);
+
+ mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
+
+ DBUG_EXECUTE_IF("ib_import_trigger_corruption_3",
+ return(DB_CORRUPTION););
+
+ block = btr_block_get(
+ space_id, zip_size, root_page_no, RW_X_LATCH, index, &mtr);
+
+ page = buf_block_get_frame(block);
+ page_zip = buf_block_get_page_zip(block);
+
+ /* Check that this is a B-tree page and both the PREV and NEXT
+ pointers are FIL_NULL, because the root page does not have any
+ siblings. */
+ if (fil_page_get_type(page) != FIL_PAGE_INDEX
+ || fil_page_get_prev(page) != FIL_NULL
+ || fil_page_get_next(page) != FIL_NULL) {
+
+ err = DB_CORRUPTION;
+
+ } else if (dict_index_is_clust(index)) {
+ bool page_is_compact_format;
+
+ page_is_compact_format = page_is_comp(page) > 0;
+
+ /* Check if the page format and table format agree. */
+ if (page_is_compact_format != dict_table_is_comp(table)) {
+ err = DB_CORRUPTION;
+ } else {
+
+ /* Check that the table flags and the tablespace
+ flags match. */
+ ulint flags = fil_space_get_flags(table->space);
+
+ if (flags
+ && flags != dict_tf_to_fsp_flags(table->flags)) {
+
+ err = DB_CORRUPTION;
+ } else {
+ err = DB_SUCCESS;
+ }
+ }
+ } else {
+ err = DB_SUCCESS;
+ }
+
+ /* Check and adjust the file segment headers, if all OK so far. */
+ if (err == DB_SUCCESS
+ && (!btr_root_fseg_adjust_on_import(
+ FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF
+ + page, page_zip, space_id, &mtr)
+ || !btr_root_fseg_adjust_on_import(
+ FIL_PAGE_DATA + PAGE_BTR_SEG_TOP
+ + page, page_zip, space_id, &mtr))) {
+
+ err = DB_CORRUPTION;
+ }
+
+ mtr_commit(&mtr);
+
+ return(err);
}
/*************************************************************//**
@@ -1033,8 +1187,7 @@ btr_get_size(
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_S_LOCK));
- if (index->page == FIL_NULL
- || index->to_be_dropped
+ if (index->page == FIL_NULL || dict_index_is_online_ddl(index)
|| *index->name == TEMP_INDEX_PREFIX) {
return(ULINT_UNDEFINED);
}
@@ -1584,6 +1737,8 @@ btr_page_reorganize_low(
there cannot exist locks on the
page, and a hash index should not be
dropped: it cannot exist */
+ ulint compression_level,/*!< in: compression level to be used
+ if dealing with compressed page */
buf_block_t* block, /*!< in: page to be reorganized */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
@@ -1601,6 +1756,8 @@ btr_page_reorganize_low(
ulint max_ins_size1;
ulint max_ins_size2;
ibool success = FALSE;
+ byte type;
+ byte* log_ptr;
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
btr_assert_not_corrupted(block, index);
@@ -1612,9 +1769,23 @@ btr_page_reorganize_low(
#ifndef UNIV_HOTBACKUP
/* Write the log record */
- mlog_open_and_write_index(mtr, page, index, page_is_comp(page)
- ? MLOG_COMP_PAGE_REORGANIZE
- : MLOG_PAGE_REORGANIZE, 0);
+ if (page_zip) {
+ type = MLOG_ZIP_PAGE_REORGANIZE;
+ } else if (page_is_comp(page)) {
+ type = MLOG_COMP_PAGE_REORGANIZE;
+ } else {
+ type = MLOG_PAGE_REORGANIZE;
+ }
+
+ log_ptr = mlog_open_and_write_index(
+ mtr, page, index, type, page_zip ? 1 : 0);
+
+ /* For compressed pages write the compression level. */
+ if (log_ptr && page_zip) {
+ mach_write_to_1(log_ptr, compression_level);
+ mlog_close(mtr, log_ptr + 1);
+ }
+
#endif /* !UNIV_HOTBACKUP */
/* Turn logging off */
@@ -1662,7 +1833,9 @@ btr_page_reorganize_low(
ut_ad(max_trx_id != 0 || recovery);
}
- if (page_zip && !page_zip_compress(page_zip, page, index, NULL)) {
+ if (page_zip
+ && !page_zip_compress(page_zip, page, index,
+ compression_level, NULL)) {
/* Restore the old page and exit. */
btr_blob_dbg_restore(page, temp_page, index,
@@ -1750,7 +1923,8 @@ btr_page_reorganize(
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr) /*!< in: mtr */
{
- return(btr_page_reorganize_low(FALSE, block, index, mtr));
+ return(btr_page_reorganize_low(FALSE, page_compression_level,
+ block, index, mtr));
}
#endif /* !UNIV_HOTBACKUP */
@@ -1762,18 +1936,32 @@ byte*
btr_parse_page_reorganize(
/*======================*/
byte* ptr, /*!< in: buffer */
- byte* end_ptr __attribute__((unused)),
- /*!< in: buffer end */
+ byte* end_ptr,/*!< in: buffer end */
dict_index_t* index, /*!< in: record descriptor */
+ bool compressed,/*!< in: true if compressed page */
buf_block_t* block, /*!< in: page to be reorganized, or NULL */
mtr_t* mtr) /*!< in: mtr or NULL */
{
+ ulint level = page_compression_level;
+
ut_ad(ptr && end_ptr);
- /* The record is empty, except for the record initial part */
+ /* If dealing with a compressed page the record has the
+ compression level used during original compression written in
+ one byte. Otherwise record is empty. */
+ if (compressed) {
+ if (ptr == end_ptr) {
+ return(NULL);
+ }
+
+ level = (ulint)mach_read_from_1(ptr);
+
+ ut_a(level <= 9);
+ ++ptr;
+ }
if (block != NULL) {
- btr_page_reorganize_low(TRUE, block, index, mtr);
+ btr_page_reorganize_low(TRUE, level, block, index, mtr);
}
return(ptr);
@@ -1827,10 +2015,13 @@ UNIV_INTERN
rec_t*
btr_root_raise_and_insert(
/*======================*/
+ ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor at which to insert: must be
on the root page; when the function returns,
the cursor is positioned on the predecessor
of the inserted record */
+ ulint** offsets,/*!< out: offsets on inserted record */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
const dtuple_t* tuple, /*!< in: tuple to insert */
ulint n_ext, /*!< in: number of externally stored columns */
mtr_t* mtr) /*!< in: mtr */
@@ -1840,7 +2031,6 @@ btr_root_raise_and_insert(
page_t* new_page;
ulint new_page_no;
rec_t* rec;
- mem_heap_t* heap;
dtuple_t* node_ptr;
ulint level;
rec_t* node_ptr_rec;
@@ -1926,7 +2116,9 @@ btr_root_raise_and_insert(
lock_update_root_raise(new_block, root_block);
/* Create a memory heap where the node pointer is stored */
- heap = mem_heap_create(100);
+ if (!*heap) {
+ *heap = mem_heap_create(1000);
+ }
rec = page_rec_get_next(page_get_infimum_rec(new_page));
new_page_no = buf_block_get_page_no(new_block);
@@ -1934,8 +2126,8 @@ btr_root_raise_and_insert(
/* Build the node pointer (= node key and page address) for the
child */
- node_ptr = dict_index_build_node_ptr(index, rec, new_page_no, heap,
- level);
+ node_ptr = dict_index_build_node_ptr(
+ index, rec, new_page_no, *heap, level);
/* The node pointer must be marked as the predefined minimum record,
as there is no lower alphabetical limit to records in the leftmost
node of a level: */
@@ -1961,15 +2153,12 @@ btr_root_raise_and_insert(
page_cur_set_before_first(root_block, page_cursor);
node_ptr_rec = page_cur_tuple_insert(page_cursor, node_ptr,
- index, 0, mtr);
+ index, offsets, heap, 0, mtr);
/* The root page should only contain the node pointer
to new_page at this point. Thus, the data should fit. */
ut_a(node_ptr_rec);
- /* Free the memory heap */
- mem_heap_free(heap);
-
/* We play safe and reset the free bits for the new page */
#if 0
@@ -1985,7 +2174,8 @@ btr_root_raise_and_insert(
PAGE_CUR_LE, page_cursor);
/* Split the child and insert tuple */
- return(btr_page_split_and_insert(cursor, tuple, n_ext, mtr));
+ return(btr_page_split_and_insert(flags, cursor, offsets, heap,
+ tuple, n_ext, mtr));
}
/*************************************************************//**
@@ -2213,9 +2403,9 @@ func_exit:
/*************************************************************//**
Returns TRUE if the insert fits on the appropriate half-page with the
chosen split_rec.
-@return TRUE if fits */
-static
-ibool
+@return true if fits */
+static __attribute__((nonnull(1,3,4,6), warn_unused_result))
+bool
btr_page_insert_fits(
/*=================*/
btr_cur_t* cursor, /*!< in: cursor at which insert
@@ -2223,11 +2413,11 @@ btr_page_insert_fits(
const rec_t* split_rec,/*!< in: suggestion for first record
on upper half-page, or NULL if
tuple to be inserted should be first */
- const ulint* offsets,/*!< in: rec_get_offsets(
- split_rec, cursor->index) */
+ ulint** offsets,/*!< in: rec_get_offsets(
+ split_rec, cursor->index); out: garbage */
const dtuple_t* tuple, /*!< in: tuple to insert */
ulint n_ext, /*!< in: number of externally stored columns */
- mem_heap_t* heap) /*!< in: temporary memory heap */
+ mem_heap_t** heap) /*!< in: temporary memory heap */
{
page_t* page;
ulint insert_size;
@@ -2236,15 +2426,13 @@ btr_page_insert_fits(
ulint total_n_recs;
const rec_t* rec;
const rec_t* end_rec;
- ulint* offs;
page = btr_cur_get_page(cursor);
- ut_ad(!split_rec == !offsets);
- ut_ad(!offsets
- || !page_is_comp(page) == !rec_offs_comp(offsets));
- ut_ad(!offsets
- || rec_offs_validate(split_rec, cursor->index, offsets));
+ ut_ad(!split_rec
+ || !page_is_comp(page) == !rec_offs_comp(*offsets));
+ ut_ad(!split_rec
+ || rec_offs_validate(split_rec, cursor->index, *offsets));
insert_size = rec_get_converted_size(cursor->index, tuple, n_ext);
free_space = page_get_free_space_of_empty(page_is_comp(page));
@@ -2262,7 +2450,7 @@ btr_page_insert_fits(
rec = page_rec_get_next(page_get_infimum_rec(page));
end_rec = page_rec_get_next(btr_cur_get_rec(cursor));
- } else if (cmp_dtuple_rec(tuple, split_rec, offsets) >= 0) {
+ } else if (cmp_dtuple_rec(tuple, split_rec, *offsets) >= 0) {
rec = page_rec_get_next(page_get_infimum_rec(page));
end_rec = split_rec;
@@ -2277,19 +2465,17 @@ btr_page_insert_fits(
/* Ok, there will be enough available space on the
half page where the tuple is inserted */
- return(TRUE);
+ return(true);
}
- offs = NULL;
-
while (rec != end_rec) {
/* In this loop we calculate the amount of reserved
space after rec is removed from page. */
- offs = rec_get_offsets(rec, cursor->index, offs,
- ULINT_UNDEFINED, &heap);
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ ULINT_UNDEFINED, heap);
- total_data -= rec_offs_size(offs);
+ total_data -= rec_offs_size(*offsets);
total_n_recs--;
if (total_data + page_dir_calc_reserved_space(total_n_recs)
@@ -2298,13 +2484,13 @@ btr_page_insert_fits(
/* Ok, there will be enough available space on the
half page where the tuple is inserted */
- return(TRUE);
+ return(true);
}
rec = page_rec_get_next_const(rec);
}
- return(FALSE);
+ return(false);
}
/*******************************************************//**
@@ -2314,6 +2500,7 @@ UNIV_INTERN
void
btr_insert_on_non_leaf_level_func(
/*==============================*/
+ ulint flags, /*!< in: undo logging and locking flags */
dict_index_t* index, /*!< in: index */
ulint level, /*!< in: level, must be > 0 */
dtuple_t* tuple, /*!< in: the record to be inserted */
@@ -2323,8 +2510,10 @@ btr_insert_on_non_leaf_level_func(
{
big_rec_t* dummy_big_rec;
btr_cur_t cursor;
- ulint err;
+ dberr_t err;
rec_t* rec;
+ ulint* offsets = NULL;
+ mem_heap_t* heap = NULL;
ut_ad(level > 0);
@@ -2335,26 +2524,35 @@ btr_insert_on_non_leaf_level_func(
ut_ad(cursor.flag == BTR_CUR_BINARY);
err = btr_cur_optimistic_insert(
- BTR_NO_LOCKING_FLAG | BTR_KEEP_SYS_FLAG
- | BTR_NO_UNDO_LOG_FLAG, &cursor, tuple, &rec,
- &dummy_big_rec, 0, NULL, mtr);
+ flags
+ | BTR_NO_LOCKING_FLAG
+ | BTR_KEEP_SYS_FLAG
+ | BTR_NO_UNDO_LOG_FLAG,
+ &cursor, &offsets, &heap,
+ tuple, &rec, &dummy_big_rec, 0, NULL, mtr);
if (err == DB_FAIL) {
- err = btr_cur_pessimistic_insert(
- BTR_NO_LOCKING_FLAG | BTR_KEEP_SYS_FLAG
- | BTR_NO_UNDO_LOG_FLAG,
- &cursor, tuple, &rec, &dummy_big_rec, 0, NULL, mtr);
+ err = btr_cur_pessimistic_insert(flags
+ | BTR_NO_LOCKING_FLAG
+ | BTR_KEEP_SYS_FLAG
+ | BTR_NO_UNDO_LOG_FLAG,
+ &cursor, &offsets, &heap,
+ tuple, &rec,
+ &dummy_big_rec, 0, NULL, mtr);
ut_a(err == DB_SUCCESS);
}
+ mem_heap_free(heap);
}
/**************************************************************//**
Attaches the halves of an index page on the appropriate level in an
index tree. */
-static
+static __attribute__((nonnull))
void
btr_attach_half_pages(
/*==================*/
+ ulint flags, /*!< in: undo logging and
+ locking flags */
dict_index_t* index, /*!< in: the index tree */
buf_block_t* block, /*!< in/out: page to be split */
const rec_t* split_rec, /*!< in: first record on upper
@@ -2432,7 +2630,8 @@ btr_attach_half_pages(
/* Insert it next to the pointer to the lower half. Note that this
may generate recursion leading to a split on the higher level. */
- btr_insert_on_non_leaf_level(index, level + 1, node_ptr_upper, mtr);
+ btr_insert_on_non_leaf_level(flags, index, level + 1,
+ node_ptr_upper, mtr);
/* Free the memory heap */
mem_heap_free(heap);
@@ -2484,13 +2683,13 @@ btr_attach_half_pages(
/*************************************************************//**
Determine if a tuple is smaller than any record on the page.
@return TRUE if smaller */
-static
-ibool
+static __attribute__((nonnull, warn_unused_result))
+bool
btr_page_tuple_smaller(
/*===================*/
btr_cur_t* cursor, /*!< in: b-tree cursor */
const dtuple_t* tuple, /*!< in: tuple to consider */
- ulint* offsets,/*!< in/out: temporary storage */
+ ulint** offsets,/*!< in/out: temporary storage */
ulint n_uniq, /*!< in: number of unique fields
in the index page records */
mem_heap_t** heap) /*!< in/out: heap for offsets */
@@ -2505,11 +2704,11 @@ btr_page_tuple_smaller(
page_cur_move_to_next(&pcur);
first_rec = page_cur_get_rec(&pcur);
- offsets = rec_get_offsets(
- first_rec, cursor->index, offsets,
+ *offsets = rec_get_offsets(
+ first_rec, cursor->index, *offsets,
n_uniq, heap);
- return(cmp_dtuple_rec(tuple, first_rec, offsets) < 0);
+ return(cmp_dtuple_rec(tuple, first_rec, *offsets) < 0);
}
/*************************************************************//**
@@ -2525,9 +2724,12 @@ UNIV_INTERN
rec_t*
btr_page_split_and_insert(
/*======================*/
+ ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor at which to insert; when the
function returns, the cursor is positioned
on the predecessor of the inserted record */
+ ulint** offsets,/*!< out: offsets on inserted record */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
const dtuple_t* tuple, /*!< in: tuple to insert */
ulint n_ext, /*!< in: number of externally stored columns */
mtr_t* mtr) /*!< in: mtr */
@@ -2553,18 +2755,21 @@ btr_page_split_and_insert(
ibool insert_left;
ulint n_iterations = 0;
rec_t* rec;
- mem_heap_t* heap;
ulint n_uniq;
- ulint* offsets;
- heap = mem_heap_create(1024);
+ if (!*heap) {
+ *heap = mem_heap_create(1024);
+ }
n_uniq = dict_index_get_n_unique_in_tree(cursor->index);
func_start:
- mem_heap_empty(heap);
- offsets = NULL;
+ mem_heap_empty(*heap);
+ *offsets = NULL;
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(cursor->index),
MTR_MEMO_X_LOCK));
+ ut_ad(!dict_index_is_online_ddl(cursor->index)
+ || (flags & BTR_CREATE_FLAG)
+ || dict_index_is_clust(cursor->index));
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(dict_index_get_lock(cursor->index), RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
@@ -2590,7 +2795,7 @@ func_start:
if (split_rec == NULL) {
insert_left = btr_page_tuple_smaller(
- cursor, tuple, offsets, n_uniq, &heap);
+ cursor, tuple, offsets, n_uniq, heap);
}
} else if (btr_page_get_split_rec_to_right(cursor, &split_rec)) {
direction = FSP_UP;
@@ -2612,7 +2817,7 @@ func_start:
if (page_get_n_recs(page) > 1) {
split_rec = page_get_middle_rec(page);
} else if (btr_page_tuple_smaller(cursor, tuple,
- offsets, n_uniq, &heap)) {
+ offsets, n_uniq, heap)) {
split_rec = page_rec_get_next(
page_get_infimum_rec(page));
} else {
@@ -2635,10 +2840,10 @@ func_start:
if (split_rec) {
first_rec = move_limit = split_rec;
- offsets = rec_get_offsets(split_rec, cursor->index, offsets,
- n_uniq, &heap);
+ *offsets = rec_get_offsets(split_rec, cursor->index, *offsets,
+ n_uniq, heap);
- insert_left = cmp_dtuple_rec(tuple, split_rec, offsets) < 0;
+ insert_left = cmp_dtuple_rec(tuple, split_rec, *offsets) < 0;
if (!insert_left && new_page_zip && n_iterations > 0) {
/* If a compressed page has already been split,
@@ -2665,7 +2870,7 @@ insert_empty:
/* 4. Do first the modifications in the tree structure */
- btr_attach_half_pages(cursor->index, block,
+ btr_attach_half_pages(flags, cursor->index, block,
first_rec, new_block, direction, mtr);
/* If the split is made on the leaf level and the insert will fit
@@ -2685,10 +2890,11 @@ insert_empty:
insert_will_fit = !new_page_zip
&& btr_page_insert_fits(cursor, NULL,
- NULL, tuple, n_ext, heap);
+ offsets, tuple, n_ext, heap);
}
- if (insert_will_fit && page_is_leaf(page)) {
+ if (insert_will_fit && page_is_leaf(page)
+ && !dict_index_is_online_ddl(cursor->index)) {
mtr_memo_release(mtr, dict_index_get_lock(cursor->index),
MTR_MEMO_X_LOCK);
@@ -2805,8 +3011,8 @@ insert_empty:
page_cur_search(insert_block, cursor->index, tuple,
PAGE_CUR_LE, page_cursor);
- rec = page_cur_tuple_insert(page_cursor, tuple,
- cursor->index, n_ext, mtr);
+ rec = page_cur_tuple_insert(page_cursor, tuple, cursor->index,
+ offsets, heap, n_ext, mtr);
#ifdef UNIV_ZIP_DEBUG
{
@@ -2837,7 +3043,7 @@ insert_empty:
page_cur_search(insert_block, cursor->index, tuple,
PAGE_CUR_LE, page_cursor);
rec = page_cur_tuple_insert(page_cursor, tuple, cursor->index,
- n_ext, mtr);
+ offsets, heap, n_ext, mtr);
if (rec == NULL) {
/* The insert did not fit on the page: loop back to the
@@ -2878,7 +3084,7 @@ func_exit:
ut_ad(page_validate(buf_block_get_frame(left_block), cursor->index));
ut_ad(page_validate(buf_block_get_frame(right_block), cursor->index));
- mem_heap_free(heap);
+ ut_ad(!rec || rec_offs_validate(rec, cursor->index, *offsets));
return(rec);
}
@@ -3058,15 +3264,15 @@ btr_node_ptr_delete(
{
btr_cur_t cursor;
ibool compressed;
- ulint err;
+ dberr_t err;
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
/* Delete node pointer on father page */
btr_page_get_father(index, block, mtr, &cursor);
- compressed = btr_cur_pessimistic_delete(&err, TRUE, &cursor, RB_NONE,
- mtr);
+ compressed = btr_cur_pessimistic_delete(&err, TRUE, &cursor,
+ BTR_CREATE_FLAG, RB_NONE, mtr);
ut_a(err == DB_SUCCESS);
if (!compressed) {
@@ -3098,7 +3304,7 @@ btr_lift_page_up(
buf_block_t* blocks[BTR_MAX_LEVELS];
ulint n_blocks; /*!< last used index in blocks[] */
ulint i;
- ibool lift_father_up = FALSE;
+ bool lift_father_up;
buf_block_t* block_orig = block;
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
@@ -3140,7 +3346,8 @@ btr_lift_page_up(
blocks[n_blocks++] = b = btr_cur_get_block(&cursor);
}
- if (n_blocks && page_level == 0) {
+ lift_father_up = (n_blocks && page_level == 0);
+ if (lift_father_up) {
/* The father page also should be the only on its level (not
root). We should lift up the father page at first.
Because the leaf page should be lifted up only for root page.
@@ -3149,7 +3356,6 @@ btr_lift_page_up(
later freeing of the page doesn't find the page allocation
to be freed.*/
- lift_father_up = TRUE;
block = father_block;
page = buf_block_get_frame(block);
page_level = btr_page_get_level(page, mtr);
@@ -3295,6 +3501,7 @@ btr_compress(
if (adjust) {
nth_rec = page_rec_get_n_recs_before(btr_cur_get_rec(cursor));
+ ut_ad(nth_rec > 0);
}
/* Decide the page to which we try to merge and which will inherit
@@ -3351,6 +3558,16 @@ err_exit:
return(FALSE);
}
+ /* If compression padding tells us that merging will result in
+ too packed up page i.e.: which is likely to cause compression
+ failure then don't merge the pages. */
+ if (zip_size && page_is_leaf(merge_page)
+ && (page_get_data_size(merge_page) + data_size
+ >= dict_index_zip_pad_optimal_page_size(index))) {
+
+ goto err_exit;
+ }
+
ut_ad(page_validate(merge_page, index));
max_ins_size = page_get_max_insert_size(merge_page, n_recs);
@@ -3530,6 +3747,7 @@ func_exit:
mem_heap_free(heap);
if (adjust) {
+ ut_ad(nth_rec > 0);
btr_cur_position(
index,
page_rec_get_nth(merge_block->frame, nth_rec),
@@ -3846,7 +4064,7 @@ btr_print_index(
mtr_start(&mtr);
- root = btr_root_block_get(index, &mtr);
+ root = btr_root_block_get(index, RW_X_LATCH, &mtr);
btr_print_recursive(index, root, width, &heap, &offsets, &mtr);
if (heap) {
@@ -3855,7 +4073,7 @@ btr_print_index(
mtr_commit(&mtr);
- btr_validate_index(index, NULL);
+ btr_validate_index(index, 0);
}
#endif /* UNIV_BTR_PRINT */
@@ -4041,8 +4259,22 @@ btr_index_page_validate(
{
page_cur_t cur;
ibool ret = TRUE;
+#ifndef DBUG_OFF
+ ulint nth = 1;
+#endif /* !DBUG_OFF */
page_cur_set_before_first(block, &cur);
+
+ /* Directory slot 0 should only contain the infimum record. */
+ DBUG_EXECUTE_IF("check_table_rec_next",
+ ut_a(page_rec_get_nth_const(
+ page_cur_get_page(&cur), 0)
+ == cur.rec);
+ ut_a(page_dir_slot_get_n_owned(
+ page_dir_get_nth_slot(
+ page_cur_get_page(&cur), 0))
+ == 1););
+
page_cur_move_to_next(&cur);
for (;;) {
@@ -4056,6 +4288,16 @@ btr_index_page_validate(
return(FALSE);
}
+ /* Verify that page_rec_get_nth_const() is correctly
+ retrieving each record. */
+ DBUG_EXECUTE_IF("check_table_rec_next",
+ ut_a(cur.rec == page_rec_get_nth_const(
+ page_cur_get_page(&cur),
+ page_rec_get_n_recs_before(
+ cur.rec)));
+ ut_a(nth++ == page_rec_get_n_recs_before(
+ cur.rec)););
+
page_cur_move_to_next(&cur);
}
@@ -4106,14 +4348,15 @@ btr_validate_report2(
Validates index tree level.
@return TRUE if ok */
static
-ibool
+bool
btr_validate_level(
/*===============*/
dict_index_t* index, /*!< in: index tree */
- trx_t* trx, /*!< in: transaction or NULL */
+ const trx_t* trx, /*!< in: transaction or NULL */
ulint level) /*!< in: level number */
{
ulint space;
+ ulint space_flags;
ulint zip_size;
buf_block_t* block;
page_t* page;
@@ -4127,9 +4370,10 @@ btr_validate_level(
ulint left_page_no;
page_cur_t cursor;
dtuple_t* node_ptr_tuple;
- ibool ret = TRUE;
+ bool ret = true;
mtr_t mtr;
mem_heap_t* heap = mem_heap_create(256);
+ fseg_header_t* seg;
ulint* offsets = NULL;
ulint* offsets2= NULL;
#ifdef UNIV_ZIP_DEBUG
@@ -4140,15 +4384,39 @@ btr_validate_level(
mtr_x_lock(dict_index_get_lock(index), &mtr);
- block = btr_root_block_get(index, &mtr);
+ block = btr_root_block_get(index, RW_X_LATCH, &mtr);
page = buf_block_get_frame(block);
+ seg = page + PAGE_HEADER + PAGE_BTR_SEG_TOP;
space = dict_index_get_space(index);
zip_size = dict_table_zip_size(index->table);
+ fil_space_get_latch(space, &space_flags);
+
+ if (zip_size != dict_tf_get_zip_size(space_flags)) {
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Flags mismatch: table=%lu, tablespace=%lu",
+ (ulint) index->table->flags, (ulint) space_flags);
+
+ mtr_commit(&mtr);
+
+ return(false);
+ }
+
while (level != btr_page_get_level(page, &mtr)) {
const rec_t* node_ptr;
+ if (fseg_page_is_free(seg,
+ block->page.space, block->page.offset)) {
+
+ btr_validate_report1(index, level, block);
+
+ ib_logf(IB_LOG_LEVEL_WARN, "page is free");
+
+ ret = false;
+ }
+
ut_a(space == buf_block_get_space(block));
ut_a(space == page_get_space_id(page));
#ifdef UNIV_ZIP_DEBUG
@@ -4169,12 +4437,13 @@ btr_validate_level(
/* Now we are on the desired level. Loop through the pages on that
level. */
-loop:
- if (trx_is_interrupted(trx)) {
- mtr_commit(&mtr);
- mem_heap_free(heap);
- return(ret);
+
+ if (level == 0) {
+ /* Leaf pages are managed in their own file segment. */
+ seg -= PAGE_BTR_SEG_TOP - PAGE_BTR_SEG_LEAF;
}
+
+loop:
mem_heap_empty(heap);
offsets = offsets2 = NULL;
mtr_x_lock(dict_index_get_lock(index), &mtr);
@@ -4184,20 +4453,35 @@ loop:
ut_a(!page_zip || page_zip_validate(page_zip, page, index));
#endif /* UNIV_ZIP_DEBUG */
- /* Check ordering etc. of records */
+ ut_a(block->page.space == space);
+
+ if (fseg_page_is_free(seg, block->page.space, block->page.offset)) {
- if (!page_validate(page, index)) {
btr_validate_report1(index, level, block);
- ret = FALSE;
- } else if (level == 0) {
+ ib_logf(IB_LOG_LEVEL_WARN, "Page is marked as free");
+ ret = false;
+
+ } else if (btr_page_get_index_id(page) != index->id) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Page index id " IB_ID_FMT " != data dictionary "
+ "index id " IB_ID_FMT,
+ btr_page_get_index_id(page), index->id);
+
+ ret = false;
+
+ } else if (!page_validate(page, index)) {
+
+ btr_validate_report1(index, level, block);
+ ret = false;
+
+ } else if (level == 0 && !btr_index_page_validate(block, index)) {
+
/* We are on level 0. Check that the records have the right
number of fields, and field lengths are right. */
- if (!btr_index_page_validate(block, index)) {
-
- ret = FALSE;
- }
+ ret = false;
}
ut_a(btr_page_get_level(page, &mtr) == level);
@@ -4223,7 +4507,7 @@ loop:
buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH);
buf_page_print(right_page, 0, BUF_PAGE_PRINT_NO_CRASH);
- ret = FALSE;
+ ret = false;
}
if (page_is_comp(right_page) != page_is_comp(page)) {
@@ -4232,7 +4516,7 @@ loop:
buf_page_print(page, 0, BUF_PAGE_PRINT_NO_CRASH);
buf_page_print(right_page, 0, BUF_PAGE_PRINT_NO_CRASH);
- ret = FALSE;
+ ret = false;
goto node_ptr_fails;
}
@@ -4265,7 +4549,7 @@ loop:
rec_print(stderr, rec, index);
putc('\n', stderr);
- ret = FALSE;
+ ret = false;
}
}
@@ -4316,7 +4600,7 @@ loop:
fputs("InnoDB: record on page ", stderr);
rec_print_new(stderr, rec, offsets);
putc('\n', stderr);
- ret = FALSE;
+ ret = false;
goto node_ptr_fails;
}
@@ -4346,7 +4630,7 @@ loop:
fputs("InnoDB: first rec ", stderr);
rec_print(stderr, first_rec, index);
putc('\n', stderr);
- ret = FALSE;
+ ret = false;
goto node_ptr_fails;
}
@@ -4374,7 +4658,7 @@ loop:
if (btr_cur_get_rec(&right_node_cur)
!= right_node_ptr) {
- ret = FALSE;
+ ret = false;
fputs("InnoDB: node pointer to"
" the right page is wrong\n",
stderr);
@@ -4400,7 +4684,7 @@ loop:
!= page_rec_get_next(
page_get_infimum_rec(
right_father_page))) {
- ret = FALSE;
+ ret = false;
fputs("InnoDB: node pointer 2 to"
" the right page is wrong\n",
stderr);
@@ -4425,7 +4709,7 @@ loop:
if (page_get_page_no(right_father_page)
!= btr_page_get_next(father_page, &mtr)) {
- ret = FALSE;
+ ret = false;
fputs("InnoDB: node pointer 3 to"
" the right page is wrong\n",
stderr);
@@ -4456,17 +4740,23 @@ node_ptr_fails:
on the next loop. The page has already been checked. */
mtr_commit(&mtr);
- if (right_page_no != FIL_NULL) {
+ if (trx_is_interrupted(trx)) {
+ /* On interrupt, return the current status. */
+ } else if (right_page_no != FIL_NULL) {
+
mtr_start(&mtr);
- block = btr_block_get(space, zip_size, right_page_no,
- RW_X_LATCH, index, &mtr);
+ block = btr_block_get(
+ space, zip_size, right_page_no,
+ RW_X_LATCH, index, &mtr);
+
page = buf_block_get_frame(block);
goto loop;
}
mem_heap_free(heap);
+
return(ret);
}
@@ -4474,40 +4764,39 @@ node_ptr_fails:
Checks the consistency of an index tree.
@return TRUE if ok */
UNIV_INTERN
-ibool
+bool
btr_validate_index(
/*===============*/
dict_index_t* index, /*!< in: index */
- trx_t* trx) /*!< in: transaction or NULL */
+ const trx_t* trx) /*!< in: transaction or NULL */
{
- mtr_t mtr;
- page_t* root;
- ulint i;
- ulint n;
-
/* Full Text index are implemented by auxiliary tables,
not the B-tree */
- if (index->type & DICT_FTS) {
- return(TRUE);
+ if (dict_index_is_online_ddl(index) || (index->type & DICT_FTS)) {
+ return(true);
}
+ mtr_t mtr;
+
mtr_start(&mtr);
- mtr_x_lock(dict_index_get_lock(index), &mtr);
- root = btr_root_get(index, &mtr);
- n = btr_page_get_level(root, &mtr);
+ mtr_x_lock(dict_index_get_lock(index), &mtr);
- for (i = 0; i <= n && !trx_is_interrupted(trx); i++) {
- if (!btr_validate_level(index, trx, n - i)) {
+ bool ok = true;
+ page_t* root = btr_root_get(index, &mtr);
+ ulint n = btr_page_get_level(root, &mtr);
- mtr_commit(&mtr);
+ for (ulint i = 0; i <= n; ++i) {
- return(FALSE);
+ if (!btr_validate_level(index, trx, n - i)) {
+ ok = false;
+ break;
}
}
mtr_commit(&mtr);
- return(TRUE);
+ return(ok);
}
+
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc
index 56cce411bba..913b2088f24 100644
--- a/storage/innobase/btr/btr0cur.cc
+++ b/storage/innobase/btr/btr0cur.cc
@@ -2,6 +2,7 @@
Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
+Copyright (c) 2012, Facebook Inc.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -57,6 +58,7 @@ Created 10/16/1994 Heikki Tuuri
#include "buf0lru.h"
#include "btr0btr.h"
#include "btr0sea.h"
+#include "row0log.h"
#include "row0purge.h"
#include "row0upd.h"
#include "trx0rec.h"
@@ -69,13 +71,13 @@ Created 10/16/1994 Heikki Tuuri
#include "zlib.h"
/** Buffered B-tree operation types, introduced as part of delete buffering. */
-typedef enum btr_op_enum {
+enum btr_op_t {
BTR_NO_OP = 0, /*!< Not buffered */
BTR_INSERT_OP, /*!< Insert, do not ignore UNIQUE */
BTR_INSERT_IGNORE_UNIQUE_OP, /*!< Insert, ignoring UNIQUE */
BTR_DELETE_OP, /*!< Purge a delete-marked record */
BTR_DELMARK_OP /*!< Mark a record for deletion */
-} btr_op_t;
+};
#ifdef UNIV_DEBUG
/** If the following is set to TRUE, this module prints a lot of
@@ -430,6 +432,14 @@ btr_cur_search_to_nth_level(
cursor->low_match = ULINT_UNDEFINED;
#endif
+ ibool s_latch_by_caller;
+
+ s_latch_by_caller = latch_mode & BTR_ALREADY_S_LATCHED;
+
+ ut_ad(!s_latch_by_caller
+ || mtr_memo_contains(mtr, dict_index_get_lock(index),
+ MTR_MEMO_S_LOCK));
+
/* These flags are mutually exclusive, they are lumped together
with the latch mode for historical reasons. It's possible for
none of the flags to be set. */
@@ -465,11 +475,11 @@ btr_cur_search_to_nth_level(
estimate = latch_mode & BTR_ESTIMATE;
/* Turn the flags unrelated to the latch mode off. */
- latch_mode &= ~(BTR_INSERT
- | BTR_DELETE_MARK
- | BTR_DELETE
- | BTR_ESTIMATE
- | BTR_IGNORE_SEC_UNIQUE);
+ latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode);
+
+ ut_ad(!s_latch_by_caller
+ || latch_mode == BTR_SEARCH_LEAF
+ || latch_mode == BTR_MODIFY_LEAF);
cursor->flag = BTR_CUR_BINARY;
cursor->index = index;
@@ -483,16 +493,16 @@ btr_cur_search_to_nth_level(
#ifdef BTR_CUR_HASH_ADAPT
-#ifdef UNIV_SEARCH_PERF_STAT
+# ifdef UNIV_SEARCH_PERF_STAT
info->n_searches++;
-#endif
+# endif
if (rw_lock_get_writer(&btr_search_latch) == RW_LOCK_NOT_LOCKED
&& latch_mode <= BTR_MODIFY_LEAF
&& info->last_hash_succ
&& !estimate
-#ifdef PAGE_CUR_LE_OR_EXTENDS
+# ifdef PAGE_CUR_LE_OR_EXTENDS
&& mode != PAGE_CUR_LE_OR_EXTENDS
-#endif /* PAGE_CUR_LE_OR_EXTENDS */
+# endif /* PAGE_CUR_LE_OR_EXTENDS */
/* If !has_search_latch, we do a dirty read of
btr_search_enabled below, and btr_search_guess_on_hash()
will have to check it again. */
@@ -513,7 +523,7 @@ btr_cur_search_to_nth_level(
return;
}
-#endif /* BTR_CUR_HASH_ADAPT */
+# endif /* BTR_CUR_HASH_ADAPT */
#endif /* BTR_CUR_ADAPT */
btr_cur_n_non_sea++;
@@ -530,15 +540,19 @@ btr_cur_search_to_nth_level(
savepoint = mtr_set_savepoint(mtr);
- if (latch_mode == BTR_MODIFY_TREE) {
+ switch (latch_mode) {
+ case BTR_MODIFY_TREE:
mtr_x_lock(dict_index_get_lock(index), mtr);
-
- } else if (latch_mode == BTR_CONT_MODIFY_TREE) {
+ break;
+ case BTR_CONT_MODIFY_TREE:
/* Do nothing */
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
- } else {
- mtr_s_lock(dict_index_get_lock(index), mtr);
+ break;
+ default:
+ if (!s_latch_by_caller) {
+ mtr_s_lock(dict_index_get_lock(index), mtr);
+ }
}
page_cursor = btr_cur_get_page_cur(cursor);
@@ -692,6 +706,7 @@ retry_page_get:
? SYNC_IBUF_TREE_NODE : SYNC_TREE_NODE);
}
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
ut_ad(index->id == btr_page_get_index_id(page));
if (UNIV_UNLIKELY(height == ULINT_UNDEFINED)) {
@@ -716,13 +731,17 @@ retry_page_get:
cursor, mtr);
}
- if (latch_mode != BTR_MODIFY_TREE
- && latch_mode != BTR_CONT_MODIFY_TREE) {
-
- /* Release the tree s-latch */
-
- mtr_release_s_latch_at_savepoint(
- mtr, savepoint, dict_index_get_lock(index));
+ switch (latch_mode) {
+ case BTR_MODIFY_TREE:
+ case BTR_CONT_MODIFY_TREE:
+ break;
+ default:
+ if (!s_latch_by_caller) {
+ /* Release the tree s-latch */
+ mtr_release_s_latch_at_savepoint(
+ mtr, savepoint,
+ dict_index_get_lock(index));
+ }
}
page_mode = mode;
@@ -789,8 +808,7 @@ retry_page_get:
will properly check btr_search_enabled again in
btr_search_build_page_hash_index() before building a
page hash index, while holding btr_search_latch. */
- if (UNIV_LIKELY(btr_search_enabled)) {
-
+ if (btr_search_enabled) {
btr_search_info_update(index, cursor);
}
#endif
@@ -820,14 +838,16 @@ UNIV_INTERN
void
btr_cur_open_at_index_side_func(
/*============================*/
- ibool from_left, /*!< in: TRUE if open to the low end,
- FALSE if to the high end */
+ bool from_left, /*!< in: true if open to the low end,
+ false if to the high end */
dict_index_t* index, /*!< in: index */
ulint latch_mode, /*!< in: latch mode */
- btr_cur_t* cursor, /*!< in: cursor */
+ btr_cur_t* cursor, /*!< in/out: cursor */
+ ulint level, /*!< in: level to search for
+ (0=leaf). */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
- mtr_t* mtr) /*!< in: mtr */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
{
page_cur_t* page_cursor;
ulint page_no;
@@ -844,16 +864,27 @@ btr_cur_open_at_index_side_func(
rec_offs_init(offsets_);
estimate = latch_mode & BTR_ESTIMATE;
- latch_mode = latch_mode & ~BTR_ESTIMATE;
+ latch_mode &= ~BTR_ESTIMATE;
+
+ ut_ad(level != ULINT_UNDEFINED);
/* Store the position of the tree latch we push to mtr so that we
know how to release it when we have latched the leaf node */
savepoint = mtr_set_savepoint(mtr);
- if (latch_mode == BTR_MODIFY_TREE) {
+ switch (latch_mode) {
+ case BTR_CONT_MODIFY_TREE:
+ break;
+ case BTR_MODIFY_TREE:
mtr_x_lock(dict_index_get_lock(index), mtr);
- } else {
+ break;
+ case BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED:
+ case BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED:
+ ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
+ MTR_MEMO_S_LOCK));
+ break;
+ default:
mtr_s_lock(dict_index_get_lock(index), mtr);
}
@@ -873,6 +904,7 @@ btr_cur_open_at_index_side_func(
RW_NO_LATCH, NULL, BUF_GET,
file, line, mtr);
page = buf_block_get_frame(block);
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
ut_ad(index->id == btr_page_get_index_id(page));
block->check_index_page_at_flush = TRUE;
@@ -882,26 +914,40 @@ btr_cur_open_at_index_side_func(
height = btr_page_get_level(page, mtr);
root_height = height;
+ ut_a(height >= level);
+ } else {
+ /* TODO: flag the index corrupted if this fails */
+ ut_ad(height == btr_page_get_level(page, mtr));
}
- if (height == 0) {
- btr_cur_latch_leaves(page, space, zip_size, page_no,
- latch_mode, cursor, mtr);
-
- /* In versions <= 3.23.52 we had forgotten to
- release the tree latch here. If in an index scan
- we had to scan far to find a record visible to the
- current transaction, that could starve others
- waiting for the tree latch. */
-
- if ((latch_mode != BTR_MODIFY_TREE)
- && (latch_mode != BTR_CONT_MODIFY_TREE)) {
+ if (height == level) {
+ btr_cur_latch_leaves(
+ page, space, zip_size, page_no,
+ latch_mode & ~BTR_ALREADY_S_LATCHED,
+ cursor, mtr);
- /* Release the tree s-latch */
+ if (height == 0) {
+ /* In versions <= 3.23.52 we had
+ forgotten to release the tree latch
+ here. If in an index scan we had to
+ scan far to find a record visible to
+ the current transaction, that could
+ starve others waiting for the tree
+ latch. */
+
+ switch (latch_mode) {
+ case BTR_MODIFY_TREE:
+ case BTR_CONT_MODIFY_TREE:
+ case BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED:
+ case BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED:
+ break;
+ default:
+ /* Release the tree s-latch */
- mtr_release_s_latch_at_savepoint(
- mtr, savepoint,
- dict_index_get_lock(index));
+ mtr_release_s_latch_at_savepoint(
+ mtr, savepoint,
+ dict_index_get_lock(index));
+ }
}
}
@@ -911,7 +957,7 @@ btr_cur_open_at_index_side_func(
page_cur_set_after_last(block, page_cursor);
}
- if (height == 0) {
+ if (height == level) {
if (estimate) {
btr_cur_add_path_info(cursor, height,
root_height);
@@ -970,9 +1016,12 @@ btr_cur_open_at_rnd_pos_func(
ulint* offsets = offsets_;
rec_offs_init(offsets_);
- if (latch_mode == BTR_MODIFY_TREE) {
+ switch (latch_mode) {
+ case BTR_MODIFY_TREE:
mtr_x_lock(dict_index_get_lock(index), mtr);
- } else {
+ break;
+ default:
+ ut_ad(latch_mode != BTR_CONT_MODIFY_TREE);
mtr_s_lock(dict_index_get_lock(index), mtr);
}
@@ -993,6 +1042,7 @@ btr_cur_open_at_rnd_pos_func(
RW_NO_LATCH, NULL, BUF_GET,
file, line, mtr);
page = buf_block_get_frame(block);
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
ut_ad(index->id == btr_page_get_index_id(page));
if (height == ULINT_UNDEFINED) {
@@ -1037,7 +1087,7 @@ be freed by reorganizing. Differs from btr_cur_optimistic_insert because
no heuristics is applied to whether it pays to use CPU time for
reorganizing the page or not.
@return pointer to inserted record if succeed, else NULL */
-static
+static __attribute__((nonnull, warn_unused_result))
rec_t*
btr_cur_insert_if_possible(
/*=======================*/
@@ -1045,6 +1095,8 @@ btr_cur_insert_if_possible(
cursor stays valid */
const dtuple_t* tuple, /*!< in: tuple to insert; the size info need not
have been stored to tuple */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
ulint n_ext, /*!< in: number of externally stored columns */
mtr_t* mtr) /*!< in: mtr */
{
@@ -1060,8 +1112,8 @@ btr_cur_insert_if_possible(
page_cursor = btr_cur_get_page_cur(cursor);
/* Now, try the insert */
- rec = page_cur_tuple_insert(page_cursor, tuple,
- cursor->index, n_ext, mtr);
+ rec = page_cur_tuple_insert(page_cursor, tuple, cursor->index,
+ offsets, heap, n_ext, mtr);
if (UNIV_UNLIKELY(!rec)) {
/* If record did not fit, reorganize */
@@ -1071,19 +1123,21 @@ btr_cur_insert_if_possible(
page_cur_search(block, cursor->index, tuple,
PAGE_CUR_LE, page_cursor);
- rec = page_cur_tuple_insert(page_cursor, tuple,
- cursor->index, n_ext, mtr);
+ rec = page_cur_tuple_insert(
+ page_cursor, tuple, cursor->index,
+ offsets, heap, n_ext, mtr);
}
}
+ ut_ad(!rec || rec_offs_validate(rec, cursor->index, *offsets));
return(rec);
}
/*************************************************************//**
For an insert, checks the locks and does the undo logging if desired.
@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */
-UNIV_INLINE
-ulint
+UNIV_INLINE __attribute__((warn_unused_result, nonnull(2,3,5,6)))
+dberr_t
btr_cur_ins_lock_and_undo(
/*======================*/
ulint flags, /*!< in: undo logging and locking flags: if
@@ -1098,7 +1152,7 @@ btr_cur_ins_lock_and_undo(
successor record */
{
dict_index_t* index;
- ulint err;
+ dberr_t err;
rec_t* rec;
roll_ptr_t roll_ptr;
@@ -1108,6 +1162,10 @@ btr_cur_ins_lock_and_undo(
rec = btr_cur_get_rec(cursor);
index = cursor->index;
+ ut_ad(!dict_index_is_online_ddl(index)
+ || dict_index_is_clust(index)
+ || (flags & BTR_CREATE_FLAG));
+
err = lock_rec_insert_check_and_lock(flags, rec,
btr_cur_get_block(cursor),
index, thr, mtr, inherit);
@@ -1120,7 +1178,7 @@ btr_cur_ins_lock_and_undo(
err = trx_undo_report_row_operation(flags, TRX_UNDO_INSERT_OP,
thr, index, entry,
- NULL, 0, NULL,
+ NULL, 0, NULL, NULL,
&roll_ptr);
if (err != DB_SUCCESS) {
@@ -1145,13 +1203,13 @@ static
void
btr_cur_trx_report(
/*===============*/
- trx_t* trx, /*!< in: transaction */
+ trx_id_t trx_id, /*!< in: transaction id */
const dict_index_t* index, /*!< in: index */
const char* op) /*!< in: operation */
{
- fprintf(stderr, "Trx with id " TRX_ID_FMT " going to ", trx->id);
+ fprintf(stderr, "Trx with id " TRX_ID_FMT " going to ", trx_id);
fputs(op, stderr);
- dict_index_name_print(stderr, trx, index);
+ dict_index_name_print(stderr, NULL, index);
putc('\n', stderr);
}
#endif /* UNIV_DEBUG */
@@ -1164,7 +1222,7 @@ one record on the page, the insert will always succeed; this is to
prevent trying to split a page with just one record.
@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_optimistic_insert(
/*======================*/
ulint flags, /*!< in: undo logging and locking flags: if not
@@ -1172,6 +1230,8 @@ btr_cur_optimistic_insert(
specified */
btr_cur_t* cursor, /*!< in: cursor on page after which to insert;
cursor stays valid */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
@@ -1198,13 +1258,16 @@ btr_cur_optimistic_insert(
ibool inherit;
ulint zip_size;
ulint rec_size;
- ulint err;
+ dberr_t err;
*big_rec = NULL;
block = btr_cur_get_block(cursor);
page = buf_block_get_frame(block);
index = cursor->index;
+ ut_ad(!dict_index_is_online_ddl(index)
+ || dict_index_is_clust(index)
+ || (flags & BTR_CREATE_FLAG));
zip_size = buf_block_get_zip_size(block);
#ifdef UNIV_DEBUG_VALGRIND
if (zip_size) {
@@ -1219,7 +1282,7 @@ btr_cur_optimistic_insert(
}
#ifdef UNIV_DEBUG
if (btr_cur_print_record_ops && thr) {
- btr_cur_trx_report(thr_get_trx(thr), index, "insert into ");
+ btr_cur_trx_report(thr_get_trx(thr)->id, index, "insert ");
dtuple_print(stderr, entry);
}
#endif /* UNIV_DEBUG */
@@ -1313,6 +1376,15 @@ fail_err:
goto fail;
}
+ /* If compression padding tells us that insertion will result in
+ too packed up page i.e.: which is likely to cause compression
+ failure then don't do an optimistic insertion. */
+ if (zip_size && leaf
+ && (page_get_data_size(page) + rec_size
+ >= dict_index_zip_pad_optimal_page_size(index))) {
+
+ goto fail;
+ }
/* Check locks and write to the undo log, if specified */
err = btr_cur_ins_lock_and_undo(flags, cursor, entry,
thr, mtr, &inherit);
@@ -1329,7 +1401,7 @@ fail_err:
{
const rec_t* page_cursor_rec = page_cur_get_rec(page_cursor);
*rec = page_cur_tuple_insert(page_cursor, entry, index,
- n_ext, mtr);
+ offsets, heap, n_ext, mtr);
reorg = page_cursor_rec != page_cur_get_rec(page_cursor);
if (UNIV_UNLIKELY(reorg)) {
@@ -1359,7 +1431,7 @@ fail_err:
page_cur_search(block, index, entry, PAGE_CUR_LE, page_cursor);
*rec = page_cur_tuple_insert(page_cursor, entry, index,
- n_ext, mtr);
+ offsets, heap, n_ext, mtr);
if (UNIV_UNLIKELY(!*rec)) {
if (zip_size != 0) {
@@ -1434,7 +1506,7 @@ made on the leaf level, to avoid deadlocks, mtr must also own x-latches
to brothers of page, if those brothers exist.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_pessimistic_insert(
/*=======================*/
ulint flags, /*!< in: undo logging and locking flags: if not
@@ -1445,6 +1517,9 @@ btr_cur_pessimistic_insert(
insertion will certainly succeed */
btr_cur_t* cursor, /*!< in: cursor after which to insert;
cursor stays valid */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap
+ that can be emptied, or NULL */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
@@ -1458,8 +1533,7 @@ btr_cur_pessimistic_insert(
dict_index_t* index = cursor->index;
ulint zip_size = dict_table_zip_size(index->table);
big_rec_t* big_rec_vec = NULL;
- mem_heap_t* heap = NULL;
- ulint err;
+ dberr_t err;
ibool dummy_inh;
ibool success;
ulint n_extents = 0;
@@ -1474,6 +1548,9 @@ btr_cur_pessimistic_insert(
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
+ ut_ad(!dict_index_is_online_ddl(index)
+ || dict_index_is_clust(index)
+ || (flags & BTR_CREATE_FLAG));
cursor->flag = BTR_CUR_BINARY;
@@ -1531,13 +1608,11 @@ btr_cur_pessimistic_insert(
== buf_block_get_page_no(btr_cur_get_block(cursor))) {
/* The page is the root page */
- *rec = btr_root_raise_and_insert(cursor, entry, n_ext, mtr);
+ *rec = btr_root_raise_and_insert(
+ flags, cursor, offsets, heap, entry, n_ext, mtr);
} else {
- *rec = btr_page_split_and_insert(cursor, entry, n_ext, mtr);
- }
-
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
+ *rec = btr_page_split_and_insert(
+ flags, cursor, offsets, heap, entry, n_ext, mtr);
}
ut_ad(page_rec_get_next(btr_cur_get_rec(cursor)) == *rec);
@@ -1564,29 +1639,36 @@ btr_cur_pessimistic_insert(
/*************************************************************//**
For an update, checks the locks and does the undo logging.
@return DB_SUCCESS, DB_WAIT_LOCK, or error number */
-UNIV_INLINE
-ulint
+UNIV_INLINE __attribute__((warn_unused_result, nonnull(2,3,6,7)))
+dberr_t
btr_cur_upd_lock_and_undo(
/*======================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor on record to update */
+ const ulint* offsets,/*!< in: rec_get_offsets() on cursor */
const upd_t* update, /*!< in: update vector */
ulint cmpl_info,/*!< in: compiler info on secondary index
updates */
- que_thr_t* thr, /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread
+ (can be NULL if BTR_NO_LOCKING_FLAG) */
mtr_t* mtr, /*!< in/out: mini-transaction */
roll_ptr_t* roll_ptr)/*!< out: roll pointer */
{
dict_index_t* index;
- rec_t* rec;
- ulint err;
+ const rec_t* rec;
+ dberr_t err;
- ut_ad(cursor && update && thr && roll_ptr);
+ ut_ad(thr || (flags & BTR_NO_LOCKING_FLAG));
rec = btr_cur_get_rec(cursor);
index = cursor->index;
+ ut_ad(rec_offs_validate(rec, index, offsets));
+
if (!dict_index_is_clust(index)) {
+ ut_ad(dict_index_is_online_ddl(index)
+ == !!(flags & BTR_CREATE_FLAG));
+
/* We do undo logging only when we update a clustered index
record */
return(lock_sec_rec_modify_check_and_lock(
@@ -1597,50 +1679,39 @@ btr_cur_upd_lock_and_undo(
/* Check if we have to wait for a lock: enqueue an explicit lock
request if yes */
- err = DB_SUCCESS;
-
if (!(flags & BTR_NO_LOCKING_FLAG)) {
- mem_heap_t* heap = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- rec_offs_init(offsets_);
-
err = lock_clust_rec_modify_check_and_lock(
flags, btr_cur_get_block(cursor), rec, index,
- rec_get_offsets(rec, index, offsets_,
- ULINT_UNDEFINED, &heap), thr);
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
+ offsets, thr);
if (err != DB_SUCCESS) {
-
return(err);
}
}
/* Append the info about the update in the undo log */
- err = trx_undo_report_row_operation(flags, TRX_UNDO_MODIFY_OP, thr,
- index, NULL, update,
- cmpl_info, rec, roll_ptr);
- return(err);
+ return(trx_undo_report_row_operation(
+ flags, TRX_UNDO_MODIFY_OP, thr,
+ index, NULL, update,
+ cmpl_info, rec, offsets, roll_ptr));
}
/***********************************************************//**
Writes a redo log record of updating a record in-place. */
-UNIV_INLINE
+UNIV_INLINE __attribute__((nonnull))
void
btr_cur_update_in_place_log(
/*========================*/
ulint flags, /*!< in: flags */
- rec_t* rec, /*!< in: record */
- dict_index_t* index, /*!< in: index where cursor positioned */
+ const rec_t* rec, /*!< in: record */
+ dict_index_t* index, /*!< in: index of the record */
const upd_t* update, /*!< in: update vector */
- trx_t* trx, /*!< in: transaction */
+ trx_id_t trx_id, /*!< in: transaction id */
roll_ptr_t roll_ptr, /*!< in: roll ptr */
mtr_t* mtr) /*!< in: mtr */
{
- byte* log_ptr;
- page_t* page = page_align(rec);
+ byte* log_ptr;
+ const page_t* page = page_align(rec);
ut_ad(flags < 256);
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
@@ -1665,8 +1736,8 @@ btr_cur_update_in_place_log(
mach_write_to_1(log_ptr, flags);
log_ptr++;
- log_ptr = row_upd_write_sys_vals_to_log(index, trx, roll_ptr, log_ptr,
- mtr);
+ log_ptr = row_upd_write_sys_vals_to_log(
+ index, trx_id, roll_ptr, log_ptr, mtr);
mach_write_to_2(log_ptr, page_offset(rec));
log_ptr += 2;
@@ -1769,6 +1840,13 @@ btr_cur_update_alloc_zip(
FALSE=update-in-place */
mtr_t* mtr) /*!< in: mini-transaction */
{
+
+ /* Have a local copy of the variables as these can change
+ dynamically. */
+ bool log_compressed = page_log_compressed_pages;
+ ulint compression_level = page_compression_level;
+ page_t* page = buf_block_get_frame(block);
+
ut_a(page_zip == buf_block_get_page_zip(block));
ut_ad(page_zip);
ut_ad(!dict_index_is_ibuf(index));
@@ -1784,12 +1862,27 @@ btr_cur_update_alloc_zip(
return(FALSE);
}
- if (!page_zip_compress(page_zip, buf_block_get_frame(block),
- index, mtr)) {
+ page = buf_block_get_frame(block);
+
+ if (create && page_is_leaf(page)
+ && (length + page_get_data_size(page)
+ >= dict_index_zip_pad_optimal_page_size(index))) {
+
+ return(FALSE);
+ }
+
+ if (!page_zip_compress(
+ page_zip, page, index, compression_level,
+ log_compressed ? mtr : NULL)) {
/* Unable to compress the page */
return(FALSE);
}
+ if (mtr && !log_compressed) {
+ page_zip_compress_write_log_no_data(
+ compression_level, page, index, mtr);
+ }
+
/* After recompressing a page, we must make sure that the free
bits in the insert buffer bitmap will not exceed the free
space on the page. Because this function will not attempt
@@ -1803,8 +1896,7 @@ btr_cur_update_alloc_zip(
if (!page_zip_available(page_zip, dict_index_is_clust(index),
length, create)) {
/* Out of space: reset the free bits. */
- if (!dict_index_is_clust(index)
- && page_is_leaf(buf_block_get_frame(block))) {
+ if (!dict_index_is_clust(index) && page_is_leaf(page)) {
ibuf_reset_free_bits(block);
}
return(FALSE);
@@ -1818,45 +1910,50 @@ Updates a record when the update causes no size changes in its fields.
We assume here that the ordering fields of the record do not change.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_update_in_place(
/*====================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor on the record to update;
cursor stays valid and positioned on the
same record */
+ const ulint* offsets,/*!< in: offsets on cursor->page_cur.rec */
const upd_t* update, /*!< in: update vector */
ulint cmpl_info,/*!< in: compiler info on secondary index
updates */
- que_thr_t* thr, /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread, or NULL if
+ appropriate flags are set */
+ trx_id_t trx_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
{
dict_index_t* index;
buf_block_t* block;
page_zip_des_t* page_zip;
- ulint err;
+ dberr_t err;
rec_t* rec;
roll_ptr_t roll_ptr = 0;
- trx_t* trx;
ulint was_delete_marked;
ibool is_hashed;
- mem_heap_t* heap = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- ulint* offsets = offsets_;
- rec_offs_init(offsets_);
rec = btr_cur_get_rec(cursor);
index = cursor->index;
+ ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
/* The insert buffer tree should never be updated in place. */
ut_ad(!dict_index_is_ibuf(index));
+ ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
+ || dict_index_is_clust(index));
+ ut_ad(!thr || thr_get_trx(thr)->id == trx_id);
+ ut_ad(thr || (flags & ~BTR_KEEP_POS_FLAG)
+ == (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG
+ | BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG));
+ ut_ad(fil_page_get_type(btr_cur_get_page(cursor)) == FIL_PAGE_INDEX);
+ ut_ad(btr_page_get_index_id(btr_cur_get_page(cursor)) == index->id);
- trx = thr_get_trx(thr);
- offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
#ifdef UNIV_DEBUG
- if (btr_cur_print_record_ops && thr) {
- btr_cur_trx_report(trx, index, "update ");
+ if (btr_cur_print_record_ops) {
+ btr_cur_trx_report(trx_id, index, "update ");
rec_print_new(stderr, rec, offsets);
}
#endif /* UNIV_DEBUG */
@@ -1872,19 +1969,17 @@ btr_cur_update_in_place(
}
/* Do lock checking and undo logging */
- err = btr_cur_upd_lock_and_undo(flags, cursor, update, cmpl_info,
+ err = btr_cur_upd_lock_and_undo(flags, cursor, offsets,
+ update, cmpl_info,
thr, mtr, &roll_ptr);
if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
return(err);
}
if (!(flags & BTR_KEEP_SYS_FLAG)) {
- row_upd_rec_sys_fields(rec, NULL,
- index, offsets, trx, roll_ptr);
+ row_upd_rec_sys_fields(rec, NULL, index, offsets,
+ thr_get_trx(thr), roll_ptr);
}
was_delete_marked = rec_get_deleted_flag(
@@ -1925,7 +2020,7 @@ btr_cur_update_in_place(
}
btr_cur_update_in_place_log(flags, rec, index, update,
- trx, roll_ptr, mtr);
+ trx_id, roll_ptr, mtr);
if (was_delete_marked
&& !rec_get_deleted_flag(
@@ -1937,9 +2032,6 @@ btr_cur_update_in_place(
rec, index, offsets, mtr);
}
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
return(DB_SUCCESS);
}
@@ -1953,24 +2045,28 @@ fields of the record do not change.
DB_UNDERFLOW if the page would become too empty, or DB_ZIP_OVERFLOW if
there is not enough space left on the compressed page */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_optimistic_update(
/*======================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor on the record to update;
cursor stays valid and positioned on the
same record */
+ ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
const upd_t* update, /*!< in: update vector; this must also
contain trx id and roll ptr fields */
ulint cmpl_info,/*!< in: compiler info on secondary index
updates */
- que_thr_t* thr, /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread, or NULL if
+ appropriate flags are set */
+ trx_id_t trx_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
{
dict_index_t* index;
page_cur_t* page_cursor;
- ulint err;
+ dberr_t err;
buf_block_t* block;
page_t* page;
page_zip_des_t* page_zip;
@@ -1980,10 +2076,8 @@ btr_cur_optimistic_update(
ulint old_rec_size;
dtuple_t* new_entry;
roll_ptr_t roll_ptr;
- mem_heap_t* heap;
ulint i;
ulint n_ext;
- ulint* offsets;
block = btr_cur_get_block(cursor);
page = buf_block_get_frame(block);
@@ -1993,39 +2087,46 @@ btr_cur_optimistic_update(
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
/* The insert buffer tree should never be updated in place. */
ut_ad(!dict_index_is_ibuf(index));
-
- heap = mem_heap_create(1024);
- offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
+ ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
+ || dict_index_is_clust(index));
+ ut_ad(!thr || thr_get_trx(thr)->id == trx_id);
+ ut_ad(thr || (flags & ~BTR_KEEP_POS_FLAG)
+ == (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG
+ | BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG));
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
+ ut_ad(btr_page_get_index_id(page) == index->id);
+
+ *offsets = rec_get_offsets(rec, index, *offsets,
+ ULINT_UNDEFINED, heap);
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
- ut_a(!rec_offs_any_null_extern(rec, offsets)
+ ut_a(!rec_offs_any_null_extern(rec, *offsets)
|| trx_is_recv(thr_get_trx(thr)));
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
#ifdef UNIV_DEBUG
- if (btr_cur_print_record_ops && thr) {
- btr_cur_trx_report(thr_get_trx(thr), index, "update ");
- rec_print_new(stderr, rec, offsets);
+ if (btr_cur_print_record_ops) {
+ btr_cur_trx_report(trx_id, index, "update ");
+ rec_print_new(stderr, rec, *offsets);
}
#endif /* UNIV_DEBUG */
- if (!row_upd_changes_field_size_or_external(index, offsets, update)) {
+ if (!row_upd_changes_field_size_or_external(index, *offsets, update)) {
/* The simplest and the most common case: the update does not
change the size of any field and none of the updated fields is
externally stored in rec or update, and there is enough space
on the compressed page to log the update. */
- mem_heap_free(heap);
- return(btr_cur_update_in_place(flags, cursor, update,
- cmpl_info, thr, mtr));
+ return(btr_cur_update_in_place(
+ flags, cursor, *offsets, update,
+ cmpl_info, thr, trx_id, mtr));
}
- if (rec_offs_any_extern(offsets)) {
+ if (rec_offs_any_extern(*offsets)) {
any_extern:
/* Externally stored fields are treated in pessimistic
update */
- mem_heap_free(heap);
return(DB_OVERFLOW);
}
@@ -2038,8 +2139,14 @@ any_extern:
page_cursor = btr_cur_get_page_cur(cursor);
- new_entry = row_rec_to_index_entry(ROW_COPY_DATA, rec, index, offsets,
- &n_ext, heap);
+ if (!*heap) {
+ *heap = mem_heap_create(
+ rec_offs_size(*offsets)
+ + DTUPLE_EST_ALLOC(rec_offs_n_fields(*offsets)));
+ }
+
+ new_entry = row_rec_to_index_entry(rec, index, *offsets,
+ &n_ext, *heap);
/* We checked above that there are no externally stored fields. */
ut_a(!n_ext);
@@ -2047,8 +2154,8 @@ any_extern:
corresponding to new_entry is latched in mtr.
Thus the following call is safe. */
row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update,
- FALSE, heap);
- old_rec_size = rec_offs_size(offsets);
+ FALSE, *heap);
+ old_rec_size = rec_offs_size(*offsets);
new_rec_size = rec_get_converted_size(index, new_entry, 0);
page_zip = buf_block_get_page_zip(block);
@@ -2059,16 +2166,14 @@ any_extern:
if (page_zip
&& !btr_cur_update_alloc_zip(page_zip, block, index,
new_rec_size, TRUE, mtr)) {
- err = DB_ZIP_OVERFLOW;
- goto err_exit;
+ return(DB_ZIP_OVERFLOW);
}
if (UNIV_UNLIKELY(new_rec_size
>= (page_get_free_space_of_empty(page_is_comp(page))
/ 2))) {
- err = DB_OVERFLOW;
- goto err_exit;
+ return(DB_OVERFLOW);
}
if (UNIV_UNLIKELY(page_get_data_size(page)
@@ -2077,8 +2182,7 @@ any_extern:
/* The page would become too empty */
- err = DB_UNDERFLOW;
- goto err_exit;
+ return(DB_UNDERFLOW);
}
/* We do not attempt to reorganize if the page is compressed.
@@ -2096,16 +2200,16 @@ any_extern:
reorganize: for simplicity, we decide what to do assuming a
reorganization is needed, though it might not be necessary */
- err = DB_OVERFLOW;
- goto err_exit;
+ return(DB_OVERFLOW);
}
/* Do lock checking and undo logging */
- err = btr_cur_upd_lock_and_undo(flags, cursor, update, cmpl_info,
+ err = btr_cur_upd_lock_and_undo(flags, cursor, *offsets,
+ update, cmpl_info,
thr, mtr, &roll_ptr);
if (err != DB_SUCCESS) {
- goto err_exit;
+ return(err);
}
/* Ok, we may do the replacement. Store on the page infimum the
@@ -2116,13 +2220,7 @@ any_extern:
btr_search_update_hash_on_delete(cursor);
- /* The call to row_rec_to_index_entry(ROW_COPY_DATA, ...) above
- invokes rec_offs_make_valid() to point to the copied record that
- the fields of new_entry point to. We have to undo it here. */
- ut_ad(rec_offs_validate(NULL, index, offsets));
- rec_offs_make_valid(page_cur_get_rec(page_cursor), index, offsets);
-
- page_cur_delete_rec(page_cursor, index, offsets, mtr);
+ page_cur_delete_rec(page_cursor, index, *offsets, mtr);
page_cur_move_to_prev(page_cursor);
@@ -2130,11 +2228,12 @@ any_extern:
row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
roll_ptr);
row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID,
- thr_get_trx(thr)->id);
+ trx_id);
}
/* There are no externally stored columns in new_entry */
- rec = btr_cur_insert_if_possible(cursor, new_entry, 0/*n_ext*/, mtr);
+ rec = btr_cur_insert_if_possible(
+ cursor, new_entry, offsets, heap, 0/*n_ext*/, mtr);
ut_a(rec); /* <- We calculated above the insert would fit */
if (page_zip && !dict_index_is_clust(index)
@@ -2149,10 +2248,7 @@ any_extern:
page_cur_move_to_next(page_cursor);
- err = DB_SUCCESS;
-err_exit:
- mem_heap_free(heap);
- return(err);
+ return(DB_SUCCESS);
}
/*************************************************************//**
@@ -2211,7 +2307,7 @@ own x-latches to brothers of page, if those brothers exist. We assume
here that the ordering fields of the record do not change.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_pessimistic_update(
/*=======================*/
ulint flags, /*!< in: undo logging, locking, and rollback
@@ -2219,7 +2315,13 @@ btr_cur_pessimistic_update(
btr_cur_t* cursor, /*!< in/out: cursor on the record to update;
cursor may become invalid if *big_rec == NULL
|| !(flags & BTR_KEEP_POS_FLAG) */
- mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
+ mem_heap_t** offsets_heap,
+ /*!< in/out: pointer to memory heap
+ that can be emptied, or NULL */
+ mem_heap_t* entry_heap,
+ /*!< in/out: memory heap for allocating
+ big_rec and the index tuple */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
be stored externally by the caller, or NULL */
const upd_t* update, /*!< in: update vector; this is allowed also
@@ -2227,7 +2329,9 @@ btr_cur_pessimistic_update(
the values in update vector have no effect */
ulint cmpl_info,/*!< in: compiler info on secondary index
updates */
- que_thr_t* thr, /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread, or NULL if
+ appropriate flags are set */
+ trx_id_t trx_id, /*!< in: transaction id */
mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
{
@@ -2239,17 +2343,15 @@ btr_cur_pessimistic_update(
page_zip_des_t* page_zip;
rec_t* rec;
page_cur_t* page_cursor;
- dtuple_t* new_entry;
- ulint err;
- ulint optim_err;
+ dberr_t err;
+ dberr_t optim_err;
roll_ptr_t roll_ptr;
- trx_t* trx;
ibool was_first;
ulint n_extents = 0;
ulint n_reserved;
ulint n_ext;
- ulint* offsets = NULL;
+ *offsets = NULL;
*big_rec = NULL;
block = btr_cur_get_block(cursor);
@@ -2266,9 +2368,16 @@ btr_cur_pessimistic_update(
#endif /* UNIV_ZIP_DEBUG */
/* The insert buffer tree should never be updated in place. */
ut_ad(!dict_index_is_ibuf(index));
+ ut_ad(dict_index_is_online_ddl(index) == !!(flags & BTR_CREATE_FLAG)
+ || dict_index_is_clust(index));
+ ut_ad(!thr || thr_get_trx(thr)->id == trx_id);
+ ut_ad(thr || (flags & ~BTR_KEEP_POS_FLAG)
+ == (BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG
+ | BTR_CREATE_FLAG | BTR_KEEP_SYS_FLAG));
- optim_err = btr_cur_optimistic_update(flags, cursor, update,
- cmpl_info, thr, mtr);
+ optim_err = btr_cur_optimistic_update(
+ flags, cursor, offsets, offsets_heap, update,
+ cmpl_info, thr, trx_id, mtr);
switch (optim_err) {
case DB_UNDERFLOW:
@@ -2280,7 +2389,8 @@ btr_cur_pessimistic_update(
}
/* Do lock checking and undo logging */
- err = btr_cur_upd_lock_and_undo(flags, cursor, update, cmpl_info,
+ err = btr_cur_upd_lock_and_undo(flags, cursor, *offsets,
+ update, cmpl_info,
thr, mtr, &roll_ptr);
if (err != DB_SUCCESS) {
@@ -2308,20 +2418,11 @@ btr_cur_pessimistic_update(
}
}
- if (!*heap) {
- *heap = mem_heap_create(1024);
- }
- offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, heap);
-
- trx = thr_get_trx(thr);
+ *offsets = rec_get_offsets(
+ rec, index, *offsets, ULINT_UNDEFINED, offsets_heap);
- new_entry = row_rec_to_index_entry(ROW_COPY_DATA, rec, index, offsets,
- &n_ext, *heap);
- /* The call to row_rec_to_index_entry(ROW_COPY_DATA, ...) above
- invokes rec_offs_make_valid() to point to the copied record that
- the fields of new_entry point to. We have to undo it here. */
- ut_ad(rec_offs_validate(NULL, index, offsets));
- rec_offs_make_valid(rec, index, offsets);
+ dtuple_t* new_entry = row_rec_to_index_entry(
+ rec, index, *offsets, &n_ext, entry_heap);
/* The page containing the clustered index record
corresponding to new_entry is latched in mtr. If the
@@ -2330,15 +2431,15 @@ btr_cur_pessimistic_update(
purge would also have removed the clustered index record
itself. Thus the following call is safe. */
row_upd_index_replace_new_col_vals_index_pos(new_entry, index, update,
- FALSE, *heap);
+ FALSE, entry_heap);
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_index_entry_sys_field(new_entry, index, DATA_ROLL_PTR,
roll_ptr);
row_upd_index_entry_sys_field(new_entry, index, DATA_TRX_ID,
- trx->id);
+ trx_id);
}
- if ((flags & BTR_NO_UNDO_LOG_FLAG) && rec_offs_any_extern(offsets)) {
+ if ((flags & BTR_NO_UNDO_LOG_FLAG) && rec_offs_any_extern(*offsets)) {
/* We are in a transaction rollback undoing a row
update: we must free possible externally stored fields
which got new values in the update, if they are not
@@ -2349,16 +2450,17 @@ btr_cur_pessimistic_update(
ut_ad(big_rec_vec == NULL);
btr_rec_free_updated_extern_fields(
- index, rec, page_zip, offsets, update,
- trx_is_recv(trx) ? RB_RECOVERY : RB_NORMAL, mtr);
+ index, rec, page_zip, *offsets, update,
+ trx_is_recv(thr_get_trx(thr))
+ ? RB_RECOVERY : RB_NORMAL, mtr);
}
/* We have to set appropriate extern storage bits in the new
record to be inserted: we have to remember which fields were such */
ut_ad(!page_is_comp(page) || !rec_get_node_ptr_flag(rec));
- ut_ad(rec_offs_validate(rec, index, offsets));
- n_ext += btr_push_update_extern_fields(new_entry, update, *heap);
+ ut_ad(rec_offs_validate(rec, index, *offsets));
+ n_ext += btr_push_update_extern_fields(new_entry, update, entry_heap);
if (page_zip) {
ut_ad(page_is_comp(page));
@@ -2404,11 +2506,12 @@ make_external:
#endif /* UNIV_ZIP_DEBUG */
page_cursor = btr_cur_get_page_cur(cursor);
- page_cur_delete_rec(page_cursor, index, offsets, mtr);
+ page_cur_delete_rec(page_cursor, index, *offsets, mtr);
page_cur_move_to_prev(page_cursor);
- rec = btr_cur_insert_if_possible(cursor, new_entry, n_ext, mtr);
+ rec = btr_cur_insert_if_possible(cursor, new_entry,
+ offsets, offsets_heap, n_ext, mtr);
if (rec) {
page_cursor->rec = rec;
@@ -2416,20 +2519,19 @@ make_external:
lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor),
rec, block);
- offsets = rec_get_offsets(rec, index, offsets,
- ULINT_UNDEFINED, heap);
-
- if (!rec_get_deleted_flag(rec, rec_offs_comp(offsets))) {
+ if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) {
/* The new inserted record owns its possible externally
stored fields */
- btr_cur_unmark_extern_fields(page_zip,
- rec, index, offsets, mtr);
+ btr_cur_unmark_extern_fields(
+ page_zip, rec, index, *offsets, mtr);
}
- btr_cur_compress_if_useful(
- cursor,
- big_rec_vec != NULL && (flags & BTR_KEEP_POS_FLAG),
- mtr);
+ bool adjust = big_rec_vec && (flags & BTR_KEEP_POS_FLAG);
+
+ if (btr_cur_compress_if_useful(cursor, adjust, mtr)
+ && adjust) {
+ rec_offs_make_valid(page_cursor->rec, index, *offsets);
+ }
if (page_zip && !dict_index_is_clust(index)
&& page_is_leaf(page)) {
@@ -2448,8 +2550,7 @@ make_external:
ut_a(page_zip || optim_err != DB_UNDERFLOW);
/* Out of space: reset the free bits. */
- if (!dict_index_is_clust(index)
- && page_is_leaf(page)) {
+ if (!dict_index_is_clust(index) && page_is_leaf(page)) {
ibuf_reset_free_bits(block);
}
}
@@ -2481,11 +2582,13 @@ make_external:
err = btr_cur_pessimistic_insert(BTR_NO_UNDO_LOG_FLAG
| BTR_NO_LOCKING_FLAG
| BTR_KEEP_SYS_FLAG,
- cursor, new_entry, &rec,
+ cursor, offsets, offsets_heap,
+ new_entry, &rec,
&dummy_big_rec, n_ext, NULL, mtr);
ut_a(rec);
ut_a(err == DB_SUCCESS);
ut_a(dummy_big_rec == NULL);
+ ut_ad(rec_offs_validate(rec, cursor->index, *offsets));
page_cursor->rec = rec;
if (dict_index_is_sec_or_ibuf(index)) {
@@ -2498,10 +2601,10 @@ make_external:
page_update_max_trx_id(rec_block,
buf_block_get_page_zip(rec_block),
- trx->id, mtr);
+ trx_id, mtr);
}
- if (!rec_get_deleted_flag(rec, rec_offs_comp(offsets))) {
+ if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) {
/* The new inserted record owns its possible externally
stored fields */
buf_block_t* rec_block = btr_cur_get_block(cursor);
@@ -2512,10 +2615,8 @@ make_external:
#endif /* UNIV_ZIP_DEBUG */
page_zip = buf_block_get_page_zip(rec_block);
- offsets = rec_get_offsets(rec, index, offsets,
- ULINT_UNDEFINED, heap);
btr_cur_unmark_extern_fields(page_zip,
- rec, index, offsets, mtr);
+ rec, index, *offsets, mtr);
}
lock_rec_restore_from_page_infimum(btr_cur_get_block(cursor),
@@ -2554,17 +2655,13 @@ UNIV_INLINE
void
btr_cur_del_mark_set_clust_rec_log(
/*===============================*/
- ulint flags, /*!< in: flags */
rec_t* rec, /*!< in: record */
dict_index_t* index, /*!< in: index of the record */
- ibool val, /*!< in: value to set */
- trx_t* trx, /*!< in: deleting transaction */
+ trx_id_t trx_id, /*!< in: transaction id */
roll_ptr_t roll_ptr,/*!< in: roll ptr to the undo log record */
mtr_t* mtr) /*!< in: mtr */
{
byte* log_ptr;
- ut_ad(flags < 256);
- ut_ad(val <= 1);
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
@@ -2580,13 +2677,11 @@ btr_cur_del_mark_set_clust_rec_log(
return;
}
- mach_write_to_1(log_ptr, flags);
- log_ptr++;
- mach_write_to_1(log_ptr, val);
- log_ptr++;
+ *log_ptr++ = 0;
+ *log_ptr++ = 1;
- log_ptr = row_upd_write_sys_vals_to_log(index, trx, roll_ptr, log_ptr,
- mtr);
+ log_ptr = row_upd_write_sys_vals_to_log(
+ index, trx_id, roll_ptr, log_ptr, mtr);
mach_write_to_2(log_ptr, page_offset(rec));
log_ptr += 2;
@@ -2683,20 +2778,18 @@ of the deleting transaction, and in the roll ptr field pointer to the
undo log record created.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_del_mark_set_clust_rec(
/*===========================*/
- ulint flags, /*!< in: undo logging and locking flags */
buf_block_t* block, /*!< in/out: buffer block of the record */
rec_t* rec, /*!< in/out: record */
dict_index_t* index, /*!< in: clustered index of the record */
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
- ibool val, /*!< in: value to set */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr */
{
roll_ptr_t roll_ptr;
- ulint err;
+ dberr_t err;
page_zip_des_t* page_zip;
trx_t* trx;
@@ -2708,7 +2801,7 @@ btr_cur_del_mark_set_clust_rec(
#ifdef UNIV_DEBUG
if (btr_cur_print_record_ops && thr) {
- btr_cur_trx_report(thr_get_trx(thr), index, "del mark ");
+ btr_cur_trx_report(thr_get_trx(thr)->id, index, "del mark ");
rec_print_new(stderr, rec, offsets);
}
#endif /* UNIV_DEBUG */
@@ -2716,7 +2809,7 @@ btr_cur_del_mark_set_clust_rec(
ut_ad(dict_index_is_clust(index));
ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
- err = lock_clust_rec_modify_check_and_lock(flags, block,
+ err = lock_clust_rec_modify_check_and_lock(BTR_NO_LOCKING_FLAG, block,
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
@@ -2724,8 +2817,8 @@ btr_cur_del_mark_set_clust_rec(
return(err);
}
- err = trx_undo_report_row_operation(flags, TRX_UNDO_MODIFY_OP, thr,
- index, NULL, NULL, 0, rec,
+ err = trx_undo_report_row_operation(0, TRX_UNDO_MODIFY_OP, thr,
+ index, NULL, NULL, 0, rec, offsets,
&roll_ptr);
if (err != DB_SUCCESS) {
@@ -2738,17 +2831,21 @@ btr_cur_del_mark_set_clust_rec(
page_zip = buf_block_get_page_zip(block);
- btr_blob_dbg_set_deleted_flag(rec, index, offsets, val);
- btr_rec_set_deleted_flag(rec, page_zip, val);
+ btr_blob_dbg_set_deleted_flag(rec, index, offsets, TRUE);
+ btr_rec_set_deleted_flag(rec, page_zip, TRUE);
trx = thr_get_trx(thr);
- if (!(flags & BTR_KEEP_SYS_FLAG)) {
- row_upd_rec_sys_fields(rec, page_zip,
- index, offsets, trx, roll_ptr);
+ if (dict_index_is_online_ddl(index)) {
+ row_log_table_delete(
+ rec, index, offsets,
+ trx_read_trx_id(row_get_trx_id_offset(index, offsets)
+ + rec));
}
- btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx,
+ row_upd_rec_sys_fields(rec, page_zip, index, offsets, trx, roll_ptr);
+
+ btr_cur_del_mark_set_clust_rec_log(rec, index, trx->id,
roll_ptr, mtr);
return(err);
@@ -2837,7 +2934,7 @@ btr_cur_parse_del_mark_set_sec_rec(
Sets a secondary index record delete mark to TRUE or FALSE.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_del_mark_set_sec_rec(
/*=========================*/
ulint flags, /*!< in: locking flag */
@@ -2848,14 +2945,14 @@ btr_cur_del_mark_set_sec_rec(
{
buf_block_t* block;
rec_t* rec;
- ulint err;
+ dberr_t err;
block = btr_cur_get_block(cursor);
rec = btr_cur_get_rec(cursor);
#ifdef UNIV_DEBUG
if (btr_cur_print_record_ops && thr) {
- btr_cur_trx_report(thr_get_trx(thr), cursor->index,
+ btr_cur_trx_report(thr_get_trx(thr)->id, cursor->index,
"del mark ");
rec_print(stderr, rec, cursor->index);
}
@@ -2945,12 +3042,15 @@ positioned, but no latch on the whole tree.
@return TRUE if success, i.e., the page did not become too empty */
UNIV_INTERN
ibool
-btr_cur_optimistic_delete(
-/*======================*/
+btr_cur_optimistic_delete_func(
+/*===========================*/
btr_cur_t* cursor, /*!< in: cursor on leaf page, on the record to
delete; cursor stays valid: if deletion
succeeds, on function exit it points to the
successor of the deleted record */
+#ifdef UNIV_DEBUG
+ ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */
+#endif /* UNIV_DEBUG */
mtr_t* mtr) /*!< in: mtr; if this function returns
TRUE on a leaf page of a secondary
index, the mtr must be committed
@@ -2964,6 +3064,7 @@ btr_cur_optimistic_delete(
ibool no_compress_needed;
rec_offs_init(offsets_);
+ ut_ad(flags == 0 || flags == BTR_CREATE_FLAG);
ut_ad(mtr_memo_contains(mtr, btr_cur_get_block(cursor),
MTR_MEMO_PAGE_X_FIX));
/* This is intended only for leaf page deletions */
@@ -2971,6 +3072,9 @@ btr_cur_optimistic_delete(
block = btr_cur_get_block(cursor);
ut_ad(page_is_leaf(buf_block_get_frame(block)));
+ ut_ad(!dict_index_is_online_ddl(cursor->index)
+ || dict_index_is_clust(cursor->index)
+ || (flags & BTR_CREATE_FLAG));
rec = btr_cur_get_rec(cursor);
offsets = rec_get_offsets(rec, cursor->index, offsets,
@@ -3038,7 +3142,7 @@ UNIV_INTERN
ibool
btr_cur_pessimistic_delete(
/*=======================*/
- ulint* err, /*!< out: DB_SUCCESS or DB_OUT_OF_FILE_SPACE;
+ dberr_t* err, /*!< out: DB_SUCCESS or DB_OUT_OF_FILE_SPACE;
the latter may occur because we may have
to update node pointers on upper levels,
and in the case of variable length keys
@@ -3051,6 +3155,7 @@ btr_cur_pessimistic_delete(
if compression does not occur, the cursor
stays valid: it points to successor of
deleted record on function exit */
+ ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */
enum trx_rb_ctx rb_ctx, /*!< in: rollback context */
mtr_t* mtr) /*!< in: mtr */
{
@@ -3059,7 +3164,6 @@ btr_cur_pessimistic_delete(
page_zip_des_t* page_zip;
dict_index_t* index;
rec_t* rec;
- dtuple_t* node_ptr;
ulint n_extents = 0;
ulint n_reserved;
ibool success;
@@ -3072,6 +3176,10 @@ btr_cur_pessimistic_delete(
page = buf_block_get_frame(block);
index = btr_cur_get_index(cursor);
+ ut_ad(flags == 0 || flags == BTR_CREATE_FLAG);
+ ut_ad(!dict_index_is_online_ddl(index)
+ || dict_index_is_clust(index)
+ || (flags & BTR_CREATE_FLAG));
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
@@ -3120,13 +3228,15 @@ btr_cur_pessimistic_delete(
btr_discard_page(cursor, mtr);
- *err = DB_SUCCESS;
ret = TRUE;
goto return_after_reservations;
}
- lock_update_delete(block, rec);
+ if (flags == 0) {
+ lock_update_delete(block, rec);
+ }
+
level = btr_page_get_level(page, mtr);
if (level > 0
@@ -3155,12 +3265,12 @@ btr_cur_pessimistic_delete(
btr_node_ptr_delete(index, block, mtr);
- node_ptr = dict_index_build_node_ptr(
+ dtuple_t* node_ptr = dict_index_build_node_ptr(
index, next_rec, buf_block_get_page_no(block),
heap, level);
- btr_insert_on_non_leaf_level(index,
- level + 1, node_ptr, mtr);
+ btr_insert_on_non_leaf_level(
+ flags, index, level + 1, node_ptr, mtr);
}
}
@@ -3173,9 +3283,9 @@ btr_cur_pessimistic_delete(
ut_ad(btr_check_node_ptr(index, block, mtr));
+return_after_reservations:
*err = DB_SUCCESS;
-return_after_reservations:
mem_heap_free(heap);
if (ret == FALSE) {
@@ -3202,8 +3312,8 @@ btr_cur_add_path_info(
ulint root_height) /*!< in: root node height in tree */
{
btr_path_t* slot;
- rec_t* rec;
- page_t* page;
+ const rec_t* rec;
+ const page_t* page;
ut_a(cursor->path_arr);
@@ -3415,6 +3525,9 @@ btr_estimate_n_rows_in_range(
ibool is_n_rows_exact;
ulint i;
mtr_t mtr;
+ ib_int64_t table_n_rows;
+
+ table_n_rows = dict_table_get_n_rows(index->table);
mtr_start(&mtr);
@@ -3427,9 +3540,9 @@ btr_estimate_n_rows_in_range(
&cursor, 0,
__FILE__, __LINE__, &mtr);
} else {
- btr_cur_open_at_index_side(TRUE, index,
+ btr_cur_open_at_index_side(true, index,
BTR_SEARCH_LEAF | BTR_ESTIMATE,
- &cursor, &mtr);
+ &cursor, 0, &mtr);
}
mtr_commit(&mtr);
@@ -3445,9 +3558,9 @@ btr_estimate_n_rows_in_range(
&cursor, 0,
__FILE__, __LINE__, &mtr);
} else {
- btr_cur_open_at_index_side(FALSE, index,
+ btr_cur_open_at_index_side(false, index,
BTR_SEARCH_LEAF | BTR_ESTIMATE,
- &cursor, &mtr);
+ &cursor, 0, &mtr);
}
mtr_commit(&mtr);
@@ -3479,20 +3592,21 @@ btr_estimate_n_rows_in_range(
n_rows = n_rows * 2;
}
+ DBUG_EXECUTE_IF("bug14007649", return(n_rows););
+
/* Do not estimate the number of rows in the range
to over 1 / 2 of the estimated rows in the whole
table */
- if (n_rows > index->table->stat_n_rows / 2
- && !is_n_rows_exact) {
+ if (n_rows > table_n_rows / 2 && !is_n_rows_exact) {
- n_rows = index->table->stat_n_rows / 2;
+ n_rows = table_n_rows / 2;
/* If there are just 0 or 1 rows in the table,
then we estimate all rows are in the range */
if (n_rows == 0) {
- n_rows = index->table->stat_n_rows;
+ n_rows = table_n_rows;
}
}
@@ -3552,9 +3666,9 @@ btr_estimate_n_rows_in_range(
/*******************************************************************//**
Record the number of non_null key values in a given index for
-each n-column prefix of the index where n < dict_index_get_n_unique(index).
+each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
The estimates are eventually stored in the array:
-index->stat_n_non_null_key_vals. */
+index->stat_n_non_null_key_vals[], which is indexed from 0 to n-1. */
static
void
btr_record_not_null_field_in_rec(
@@ -3565,7 +3679,7 @@ btr_record_not_null_field_in_rec(
const ulint* offsets, /*!< in: rec_get_offsets(rec, index),
its size could be for all fields or
that of "n_unique" */
- ib_int64_t* n_not_null) /*!< in/out: array to record number of
+ ib_uint64_t* n_not_null) /*!< in/out: array to record number of
not null rows for n-column prefix */
{
ulint i;
@@ -3587,11 +3701,12 @@ btr_record_not_null_field_in_rec(
/*******************************************************************//**
Estimates the number of different key values in a given index, for
-each n-column prefix of the index where n <= dict_index_get_n_unique(index).
-The estimates are stored in the array index->stat_n_diff_key_vals[] and
-the number of pages that were sampled is saved in index->stat_n_sample_sizes[].
-If innodb_stats_method is "nulls_ignored", we also record the number of
-non-null values for each prefix and store the estimates in
+each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
+The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed
+0..n_uniq-1) and the number of pages that were sampled is saved in
+index->stat_n_sample_sizes[].
+If innodb_stats_method is nulls_ignored, we also record the number of
+non-null values for each prefix and stored the estimates in
array index->stat_n_non_null_key_vals. */
UNIV_INTERN
void
@@ -3605,8 +3720,8 @@ btr_estimate_number_of_different_key_vals(
ulint n_cols;
ulint matched_fields;
ulint matched_bytes;
- ib_int64_t* n_diff;
- ib_int64_t* n_not_null;
+ ib_uint64_t* n_diff;
+ ib_uint64_t* n_not_null;
ibool stats_null_not_equal;
ullint n_sample_pages; /* number of pages to sample */
ulint not_empty_flag = 0;
@@ -3622,13 +3737,13 @@ btr_estimate_number_of_different_key_vals(
n_cols = dict_index_get_n_unique(index);
heap = mem_heap_create((sizeof *n_diff + sizeof *n_not_null)
- * (n_cols + 1)
+ * n_cols
+ dict_index_get_n_fields(index)
* (sizeof *offsets_rec
+ sizeof *offsets_next_rec));
- n_diff = (ib_int64_t*) mem_heap_zalloc(heap, (n_cols + 1)
- * sizeof(ib_int64_t));
+ n_diff = (ib_uint64_t*) mem_heap_zalloc(
+ heap, n_cols * sizeof(ib_int64_t));
n_not_null = NULL;
@@ -3637,8 +3752,8 @@ btr_estimate_number_of_different_key_vals(
considered equal (by setting stats_null_not_equal value) */
switch (srv_innodb_stats_method) {
case SRV_STATS_NULLS_IGNORED:
- n_not_null = (ib_int64_t*) mem_heap_zalloc(heap, (n_cols + 1)
- * sizeof *n_not_null);
+ n_not_null = (ib_uint64_t*) mem_heap_zalloc(
+ heap, n_cols * sizeof *n_not_null);
/* fall through */
case SRV_STATS_NULLS_UNEQUAL:
@@ -3689,7 +3804,7 @@ btr_estimate_number_of_different_key_vals(
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
ULINT_UNDEFINED, &heap);
- if (n_not_null) {
+ if (n_not_null != NULL) {
btr_record_not_null_field_in_rec(
n_cols, offsets_rec, n_not_null);
}
@@ -3717,14 +3832,14 @@ btr_estimate_number_of_different_key_vals(
&matched_fields,
&matched_bytes);
- for (j = matched_fields + 1; j <= n_cols; j++) {
+ for (j = matched_fields; j < n_cols; j++) {
/* We add one if this index record has
a different prefix from the previous */
n_diff[j]++;
}
- if (n_not_null) {
+ if (n_not_null != NULL) {
btr_record_not_null_field_in_rec(
n_cols, offsets_next_rec, n_not_null);
}
@@ -3759,7 +3874,7 @@ btr_estimate_number_of_different_key_vals(
if (btr_page_get_prev(page, &mtr) != FIL_NULL
|| btr_page_get_next(page, &mtr) != FIL_NULL) {
- n_diff[n_cols]++;
+ n_diff[n_cols - 1]++;
}
}
@@ -3774,7 +3889,7 @@ btr_estimate_number_of_different_key_vals(
also the pages used for external storage of fields (those pages are
included in index->stat_n_leaf_pages) */
- for (j = 0; j <= n_cols; j++) {
+ for (j = 0; j < n_cols; j++) {
index->stat_n_diff_key_vals[j]
= BTR_TABLE_STATS_FROM_SAMPLE(
n_diff[j], index, n_sample_pages,
@@ -3804,7 +3919,7 @@ btr_estimate_number_of_different_key_vals(
sampled result. stat_n_non_null_key_vals[] is created
and initialized to zero in dict_index_add_to_cache(),
along with stat_n_diff_key_vals[] array */
- if (n_not_null != NULL && (j < n_cols)) {
+ if (n_not_null != NULL) {
index->stat_n_non_null_key_vals[j] =
BTR_TABLE_STATS_FROM_SAMPLE(
n_not_null[j], index, n_sample_pages,
@@ -4154,7 +4269,7 @@ The fields are stored on pages allocated from leaf node
file segment of the index tree.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
UNIV_INTERN
-enum db_err
+dberr_t
btr_store_big_rec_extern_fields(
/*============================*/
dict_index_t* index, /*!< in: index of rec; the index tree
@@ -4188,7 +4303,7 @@ btr_store_big_rec_extern_fields(
z_stream c_stream;
buf_block_t** freed_pages = NULL;
ulint n_freed_pages = 0;
- enum db_err error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(rec_offs_any_extern(offsets));
@@ -4219,7 +4334,7 @@ btr_store_big_rec_extern_fields(
heap = mem_heap_create(250000);
page_zip_set_alloc(&c_stream, heap);
- err = deflateInit2(&c_stream, Z_DEFAULT_COMPRESSION,
+ err = deflateInit2(&c_stream, page_compression_level,
Z_DEFLATED, 15, 7, Z_DEFAULT_STRATEGY);
ut_a(err == Z_OK);
}
@@ -5091,6 +5206,7 @@ btr_copy_zblob_prefix(
" page %lu space %lu\n",
(ulong) fil_page_get_type(bpage->zip.data),
(ulong) page_no, (ulong) space_id);
+ ut_ad(0);
goto end_of_blob;
}
diff --git a/storage/innobase/btr/btr0pcur.cc b/storage/innobase/btr/btr0pcur.cc
index 5a67afc7e69..aceb6bd1d41 100644
--- a/storage/innobase/btr/btr0pcur.cc
+++ b/storage/innobase/btr/btr0pcur.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -133,6 +133,8 @@ btr_pcur_store_position(
ut_a(btr_page_get_next(page, mtr) == FIL_NULL);
ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);
+ ut_ad(page_is_leaf(page));
+ ut_ad(page_get_page_no(page) == index->page);
cursor->old_stored = BTR_PCUR_OLD_STORED;
@@ -258,7 +260,8 @@ btr_pcur_restore_position_func(
btr_cur_open_at_index_side(
cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
- index, latch_mode, btr_pcur_get_btr_cur(cursor), mtr);
+ index, latch_mode,
+ btr_pcur_get_btr_cur(cursor), 0, mtr);
cursor->latch_mode = latch_mode;
cursor->pos_state = BTR_PCUR_IS_POSITIONED;
@@ -326,13 +329,19 @@ btr_pcur_restore_position_func(
/* Save the old search mode of the cursor */
old_mode = cursor->search_mode;
- if (UNIV_LIKELY(cursor->rel_pos == BTR_PCUR_ON)) {
+ switch (cursor->rel_pos) {
+ case BTR_PCUR_ON:
mode = PAGE_CUR_LE;
- } else if (cursor->rel_pos == BTR_PCUR_AFTER) {
+ break;
+ case BTR_PCUR_AFTER:
mode = PAGE_CUR_G;
- } else {
- ut_ad(cursor->rel_pos == BTR_PCUR_BEFORE);
+ break;
+ case BTR_PCUR_BEFORE:
mode = PAGE_CUR_L;
+ break;
+ default:
+ ut_error;
+ mode = 0;
}
btr_pcur_open_with_no_init_func(index, tuple, mode, latch_mode,
@@ -341,25 +350,39 @@ btr_pcur_restore_position_func(
/* Restore the old search mode */
cursor->search_mode = old_mode;
- if (cursor->rel_pos == BTR_PCUR_ON
- && btr_pcur_is_on_user_rec(cursor)
- && 0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
- rec_get_offsets(
- btr_pcur_get_rec(cursor), index,
- NULL, ULINT_UNDEFINED, &heap))) {
-
- /* We have to store the NEW value for the modify clock, since
- the cursor can now be on a different page! But we can retain
- the value of old_rec */
-
- cursor->block_when_stored = btr_pcur_get_block(cursor);
- cursor->modify_clock = buf_block_get_modify_clock(
- cursor->block_when_stored);
- cursor->old_stored = BTR_PCUR_OLD_STORED;
-
- mem_heap_free(heap);
-
- return(TRUE);
+ switch (cursor->rel_pos) {
+ case BTR_PCUR_ON:
+ if (btr_pcur_is_on_user_rec(cursor)
+ && !cmp_dtuple_rec(
+ tuple, btr_pcur_get_rec(cursor),
+ rec_get_offsets(btr_pcur_get_rec(cursor),
+ index, NULL,
+ ULINT_UNDEFINED, &heap))) {
+
+ /* We have to store the NEW value for
+ the modify clock, since the cursor can
+ now be on a different page! But we can
+ retain the value of old_rec */
+
+ cursor->block_when_stored =
+ btr_pcur_get_block(cursor);
+ cursor->modify_clock =
+ buf_block_get_modify_clock(
+ cursor->block_when_stored);
+ cursor->old_stored = BTR_PCUR_OLD_STORED;
+
+ mem_heap_free(heap);
+
+ return(TRUE);
+ }
+#ifdef UNIV_DEBUG
+ /* fall through */
+ case BTR_PCUR_BEFORE:
+ case BTR_PCUR_AFTER:
+ break;
+ default:
+ ut_error;
+#endif /* UNIV_DEBUG */
}
mem_heap_free(heap);
diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc
index 7e6e2ef1cb1..dcb508a7f29 100644
--- a/storage/innobase/btr/btr0sea.cc
+++ b/storage/innobase/btr/btr0sea.cc
@@ -42,7 +42,6 @@ Created 2/17/1996 Heikki Tuuri
#include "btr0pcur.h"
#include "btr0btr.h"
#include "ha0ha.h"
-#include "srv0mon.h"
/** Flag: has the search system been enabled?
Protected by btr_search_latch. */
@@ -352,7 +351,7 @@ void
btr_search_info_update_hash(
/*========================*/
btr_search_t* info, /*!< in/out: search info */
- btr_cur_t* cursor) /*!< in: cursor which was just positioned */
+ const btr_cur_t* cursor)/*!< in: cursor which was just positioned */
{
dict_index_t* index;
ulint n_unique;
@@ -621,7 +620,7 @@ void
btr_search_info_update_slow(
/*========================*/
btr_search_t* info, /*!< in/out: search info */
- btr_cur_t* cursor) /*!< in: cursor which was just positioned */
+ btr_cur_t* cursor) /*!< in: cursor which was just positioned */
{
buf_block_t* block;
ibool build_index;
@@ -865,7 +864,7 @@ btr_search_guess_on_hash(
{
buf_pool_t* buf_pool;
buf_block_t* block;
- rec_t* rec;
+ const rec_t* rec;
ulint fold;
index_id_t index_id;
#ifdef notdefined
@@ -951,7 +950,7 @@ btr_search_guess_on_hash(
ut_ad(page_rec_is_user_rec(rec));
- btr_cur_position(index, rec, block, cursor);
+ btr_cur_position(index, (rec_t*) rec, block, cursor);
/* Check the validity of the guess within the page */
@@ -1077,6 +1076,7 @@ btr_search_drop_page_hash_index(
mem_heap_t* heap;
const dict_index_t* index;
ulint* offsets;
+ btr_search_t* info;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
@@ -1102,6 +1102,27 @@ retry:
}
ut_a(!dict_index_is_ibuf(index));
+#ifdef UNIV_DEBUG
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_CREATION:
+ /* The index is being created (bulk loaded). */
+ case ONLINE_INDEX_COMPLETE:
+ /* The index has been published. */
+ case ONLINE_INDEX_ABORTED:
+ /* Either the index creation was aborted due to an
+ error observed by InnoDB (in which case there should
+ not be any adaptive hash index entries), or it was
+ completed and then flagged aborted in
+ rollback_inplace_alter_table(). */
+ break;
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ /* The index should have been dropped from the tablespace
+ already, and the adaptive hash index entries should have
+ been dropped as well. */
+ ut_error;
+ }
+#endif /* UNIV_DEBUG */
+
table = btr_search_sys->hash_index;
#ifdef UNIV_SYNC_DEBUG
@@ -1196,8 +1217,9 @@ next_rec:
ha_remove_all_nodes_to_page(table, folds[i], page);
}
- ut_a(index->search_info->ref_count > 0);
- index->search_info->ref_count--;
+ info = btr_search_get_info(block->index);
+ ut_a(info->ref_count > 0);
+ info->ref_count--;
block->index = NULL;
diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc
index b6774aede8e..e34216dbc8f 100644
--- a/storage/innobase/buf/buf0buddy.cc
+++ b/storage/innobase/buf/buf0buddy.cc
@@ -335,7 +335,7 @@ buf_buddy_relocate(
{
buf_page_t* bpage;
const ulint size = BUF_BUDDY_LOW << i;
- mutex_t* mutex;
+ ib_mutex_t* mutex;
ulint space;
ulint page_no;
diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc
index 28d5a472531..6efa14e6791 100644
--- a/storage/innobase/buf/buf0buf.cc
+++ b/storage/innobase/buf/buf0buf.cc
@@ -53,7 +53,6 @@ Created 11/5/1995 Heikki Tuuri
#include "page0zip.h"
#include "srv0mon.h"
#include "buf0checksum.h"
-#include "buf0dblwr.h"
/*
IMPLEMENTATION OF THE BUFFER POOL
@@ -372,10 +371,6 @@ buf_get_total_list_len(
buf_pool = buf_pool_from_array(i);
- if (!buf_pool) {
- continue;
- }
-
*LRU_len += UT_LIST_GET_LEN(buf_pool->LRU);
*free_len += UT_LIST_GET_LEN(buf_pool->free);
*flush_list_len += UT_LIST_GET_LEN(buf_pool->flush_list);
@@ -391,11 +386,10 @@ buf_get_total_list_size_in_bytes(
buf_pools_list_size_t* buf_pools_list_size) /*!< out: list sizes
in all buffer pools */
{
- ulint i;
ut_ad(buf_pools_list_size);
memset(buf_pools_list_size, 0, sizeof(*buf_pools_list_size));
- for (i = 0; i < srv_buf_pool_instances; i++) {
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i);
@@ -427,10 +421,6 @@ buf_get_total_stat(
buf_pool = buf_pool_from_array(i);
- if (!buf_pool) {
- continue;
- }
-
buf_stat = &buf_pool->stat;
tot_stat->n_page_gets += buf_stat->n_page_gets;
tot_stat->n_pages_read += buf_stat->n_pages_read;
@@ -483,6 +473,8 @@ UNIV_INTERN
ibool
buf_page_is_corrupted(
/*==================*/
+ bool check_lsn, /*!< in: true if we need to check
+ and complain about the LSN */
const byte* read_buf, /*!< in: a database page */
ulint zip_size) /*!< in: size of compressed page;
0 for uncompressed pages */
@@ -507,14 +499,17 @@ buf_page_is_corrupted(
if (recv_lsn_checks_on) {
lsn_t current_lsn;
- if (log_peek_lsn(&current_lsn)
- && UNIV_UNLIKELY
- (current_lsn
- < mach_read_from_8(read_buf + FIL_PAGE_LSN))) {
+ /* Since we are going to reset the page LSN during the import
+ phase it makes no sense to spam the log with error messages. */
+
+ if (check_lsn
+ && log_peek_lsn(&current_lsn)
+ && current_lsn
+ < mach_read_from_8(read_buf + FIL_PAGE_LSN)) {
ut_print_timestamp(stderr);
fprintf(stderr,
- " InnoDB: Error: page %lu log sequence number"
+ " InnoDB: Error: page %lu log sequence number"
" " LSN_PF "\n"
"InnoDB: is in the future! Current system "
"log sequence number " LSN_PF ".\n"
@@ -700,6 +695,8 @@ buf_page_is_corrupted(
is added and not handled here */
}
+ DBUG_EXECUTE_IF("buf_page_is_corrupt_failure", return(TRUE); );
+
return(FALSE);
}
@@ -912,7 +909,7 @@ pfs_register_buffer_block(
PFS_MAX_BUFFER_MUTEX_LOCK_REGISTER);
for (i = 0; i < num_to_register; i++) {
- mutex_t* mutex;
+ ib_mutex_t* mutex;
rw_lock_t* rwlock;
# ifdef UNIV_PFS_MUTEX
@@ -1294,7 +1291,7 @@ buf_pool_init_instance(
SYNC_BUF_FLUSH_LIST);
for (i = BUF_FLUSH_LRU; i < BUF_FLUSH_N_TYPES; i++) {
- buf_pool->no_flush[i] = os_event_create(NULL);
+ buf_pool->no_flush[i] = os_event_create();
}
buf_pool->watch = (buf_page_t*) mem_zalloc(
@@ -1361,7 +1358,7 @@ buf_pool_free_instance(
Creates the buffer pool.
@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */
UNIV_INTERN
-ulint
+dberr_t
buf_pool_init(
/*==========*/
ulint total_size, /*!< in: size of the total pool in bytes */
@@ -1758,7 +1755,7 @@ buf_pool_watch_unset(
ut_a(bpage);
if (UNIV_UNLIKELY(!buf_pool_watch_is_sentinel(buf_pool, bpage))) {
- mutex_t* mutex = buf_page_get_mutex(bpage);
+ ib_mutex_t* mutex = buf_page_get_mutex(bpage);
mutex_enter(mutex);
ut_a(bpage->buf_fix_count > 0);
@@ -1897,7 +1894,7 @@ buf_page_set_file_page_was_freed(
&hash_lock);
if (bpage) {
- mutex_t* block_mutex = buf_page_get_mutex(bpage);
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
mutex_enter(block_mutex);
rw_lock_s_unlock(hash_lock);
@@ -1930,7 +1927,7 @@ buf_page_reset_file_page_was_freed(
bpage = buf_page_hash_get_s_locked(buf_pool, space, offset,
&hash_lock);
if (bpage) {
- mutex_t* block_mutex = buf_page_get_mutex(bpage);
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
ut_ad(!buf_pool_watch_is_sentinel(buf_pool, bpage));
mutex_enter(block_mutex);
rw_lock_s_unlock(hash_lock);
@@ -1991,7 +1988,7 @@ buf_page_get_zip(
ulint offset) /*!< in: page number */
{
buf_page_t* bpage;
- mutex_t* block_mutex;
+ ib_mutex_t* block_mutex;
rw_lock_t* hash_lock;
ibool discard_attempted = FALSE;
ibool must_read;
@@ -2390,6 +2387,28 @@ buf_block_is_uncompressed(
return(buf_pointer_is_block_field_instance(buf_pool, (void*) block));
}
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+/********************************************************************//**
+Return true if probe is enabled.
+@return true if probe enabled. */
+static
+bool
+buf_debug_execute_is_force_flush()
+/*==============================*/
+{
+ DBUG_EXECUTE_IF("ib_buf_force_flush", return(true); );
+
+ /* This is used during queisce testing, we want to ensure maximum
+ buffering by the change buffer. */
+
+ if (srv_ibuf_disable_background_merge) {
+ return(true);
+ }
+
+ return(false);
+}
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
+
/********************************************************************//**
This is the general function used to get access to a database page.
@return pointer to the block or NULL */
@@ -2416,7 +2435,7 @@ buf_page_get_gen(
ulint fix_type;
ibool must_read;
rw_lock_t* hash_lock;
- mutex_t* block_mutex;
+ ib_mutex_t* block_mutex;
buf_page_t* hash_bpage;
ulint retries = 0;
buf_pool_t* buf_pool = buf_pool_get(space, offset);
@@ -2684,11 +2703,12 @@ wait_until_unfixed:
block->page.buf_fix_count = 1;
buf_block_set_io_fix(block, BUF_IO_READ);
- rw_lock_x_lock_func(&block->lock, 0, file, line);
+ rw_lock_x_lock_inline(&block->lock, 0, file, line);
UNIV_MEM_INVALID(bpage, sizeof *bpage);
rw_lock_x_unlock(hash_lock);
+
buf_pool->n_pend_unzip++;
buf_pool_mutex_exit(buf_pool);
@@ -2701,7 +2721,10 @@ wait_until_unfixed:
/* Decompress the page while not holding
buf_pool->mutex or block->mutex. */
- ut_a(buf_zip_decompress(block, TRUE));
+ /* Page checksum verification is already done when
+ the page is read from disk. Hence page checksum
+ verification is not necessary when decompressing the page. */
+ ut_a(buf_zip_decompress(block, FALSE));
if (UNIV_LIKELY(!recv_no_ibuf_operations)) {
if (access_time) {
@@ -2748,8 +2771,9 @@ wait_until_unfixed:
UNIV_MEM_ASSERT_RW(&block->page, sizeof block->page);
#endif
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+
if ((mode == BUF_GET_IF_IN_POOL || mode == BUF_GET_IF_IN_POOL_OR_WATCH)
- && ibuf_debug) {
+ && (ibuf_debug || buf_debug_execute_is_force_flush())) {
/* Try to evict the block from the buffer pool, to use the
insert buffer (change buffer) as much as possible. */
@@ -2784,19 +2808,18 @@ wait_until_unfixed:
buf_pool, space, offset, fold);
}
- if (UNIV_LIKELY_NULL(block)) {
- block_mutex = buf_page_get_mutex(
- &block->page);
- /* The page entered the buffer
- pool for some reason. Try to
- evict it again. */
- mutex_enter(block_mutex);
- rw_lock_x_unlock(hash_lock);
+ rw_lock_x_unlock(hash_lock);
- goto got_block;
+ if (UNIV_LIKELY_NULL(block)) {
+ /* Either the page has been read in or
+ a watch was set on that in the window
+ where we released the buf_pool::mutex
+ and before we acquire the hash_lock
+ above. Try again. */
+ guess = block;
+ goto loop;
}
- rw_lock_x_unlock(hash_lock);
fprintf(stderr,
"innodb_change_buffering_debug evict %u %u\n",
(unsigned) space, (unsigned) offset);
@@ -2868,14 +2891,14 @@ wait_until_unfixed:
break;
case RW_S_LATCH:
- rw_lock_s_lock_func(&(block->lock), 0, file, line);
+ rw_lock_s_lock_inline(&(block->lock), 0, file, line);
fix_type = MTR_MEMO_PAGE_S_FIX;
break;
default:
ut_ad(rw_latch == RW_X_LATCH);
- rw_lock_x_lock_func(&(block->lock), 0, file, line);
+ rw_lock_x_lock_inline(&(block->lock), 0, file, line);
fix_type = MTR_MEMO_PAGE_X_FIX;
break;
@@ -2956,8 +2979,8 @@ buf_page_optimistic_get(
file, line);
fix_type = MTR_MEMO_PAGE_S_FIX;
} else {
- success = rw_lock_x_lock_func_nowait(&(block->lock),
- file, line);
+ success = rw_lock_x_lock_func_nowait_inline(&(block->lock),
+ file, line);
fix_type = MTR_MEMO_PAGE_X_FIX;
}
@@ -3079,8 +3102,8 @@ buf_page_get_known_nowait(
file, line);
fix_type = MTR_MEMO_PAGE_S_FIX;
} else {
- success = rw_lock_x_lock_func_nowait(&(block->lock),
- file, line);
+ success = rw_lock_x_lock_func_nowait_inline(&(block->lock),
+ file, line);
fix_type = MTR_MEMO_PAGE_X_FIX;
}
@@ -3181,8 +3204,8 @@ buf_page_try_get_func(
S-latch. */
fix_type = MTR_MEMO_PAGE_X_FIX;
- success = rw_lock_x_lock_func_nowait(&block->lock,
- file, line);
+ success = rw_lock_x_lock_func_nowait_inline(&block->lock,
+ file, line);
}
if (!success) {
@@ -3336,7 +3359,7 @@ UNIV_INTERN
buf_page_t*
buf_page_init_for_read(
/*===================*/
- ulint* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */
+ dberr_t* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */
ulint mode, /*!< in: BUF_READ_IBUF_PAGES_ONLY, ... */
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size, or 0 */
@@ -3544,7 +3567,7 @@ err_exit:
rw_lock_x_unlock(hash_lock);
- /* The block must be put to the LRU list, to the old blocks
+ /* The block must be put to the LRU list, to the old blocks.
The zip_size is already set into the page zip */
buf_LRU_add_block(bpage, TRUE/* to old blocks */);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -3666,6 +3689,7 @@ buf_page_create(
buf_page_set_io_fix(&block->page, BUF_IO_READ);
rw_lock_x_lock(&block->lock);
+
mutex_exit(&block->mutex);
/* buf_pool->mutex may be released and reacquired by
buf_buddy_alloc(). Thus, we must release block->mutex
@@ -3863,6 +3887,8 @@ buf_mark_space_corrupt(
BUF_IO_READ);
}
+ mutex_exit(buf_page_get_mutex(bpage));
+
/* Find the table with specified space id, and mark it corrupted */
if (dict_set_corrupted_by_space(space)) {
buf_LRU_free_one_page(bpage);
@@ -3873,7 +3899,6 @@ buf_mark_space_corrupt(
ut_ad(buf_pool->n_pend_reads > 0);
buf_pool->n_pend_reads--;
- mutex_exit(buf_page_get_mutex(bpage));
buf_pool_mutex_exit(buf_pool);
return(ret);
@@ -3882,9 +3907,9 @@ buf_mark_space_corrupt(
/********************************************************************//**
Completes an asynchronous read or write request of a file page to or from
the buffer pool.
-@return TRUE if successful */
+@return true if successful */
UNIV_INTERN
-ibool
+bool
buf_page_io_complete(
/*=================*/
buf_page_t* bpage) /*!< in: pointer to the block in question */
@@ -3966,8 +3991,20 @@ buf_page_io_complete(
/* From version 3.23.38 up we store the page checksum
to the 4 first bytes of the page end lsn field */
- if (buf_page_is_corrupted(frame,
+ if (buf_page_is_corrupted(true, frame,
buf_page_get_zip_size(bpage))) {
+
+ /* Not a real corruption if it was triggered by
+ error injection */
+ DBUG_EXECUTE_IF("buf_page_is_corrupt_failure",
+ if (bpage->space > TRX_SYS_SPACE
+ && buf_mark_space_corrupt(bpage)) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Simulated page corruption");
+ return(true);
+ }
+ goto page_not_corrupt;
+ ;);
corrupt:
fprintf(stderr,
"InnoDB: Database page corruption on disk"
@@ -4011,7 +4048,7 @@ corrupt:
table as corrupted instead of crashing server */
if (bpage->space > TRX_SYS_SPACE
&& buf_mark_space_corrupt(bpage)) {
- return(FALSE);
+ return(false);
} else {
fputs("InnoDB: Ending processing"
" because of"
@@ -4022,6 +4059,9 @@ corrupt:
}
}
+ DBUG_EXECUTE_IF("buf_page_is_corrupt_failure",
+ page_not_corrupt: bpage = bpage; );
+
if (recv_recovery_is_on()) {
/* Pages must be uncompressed for crash recovery. */
ut_a(uncompressed);
@@ -4104,7 +4144,7 @@ corrupt:
mutex_exit(buf_page_get_mutex(bpage));
buf_pool_mutex_exit(buf_pool);
- return(TRUE);
+ return(true);
}
/*********************************************************************//**
@@ -5132,9 +5172,7 @@ void
buf_refresh_io_stats_all(void)
/*==========================*/
{
- ulint i;
-
- for (i = 0; i < srv_buf_pool_instances; i++) {
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i);
@@ -5151,9 +5189,7 @@ ibool
buf_all_freed(void)
/*===============*/
{
- ulint i;
-
- for (i = 0; i < srv_buf_pool_instances; i++) {
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i);
diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc
index ad6ef7c4cef..fb853fe1543 100644
--- a/storage/innobase/buf/buf0dblwr.cc
+++ b/storage/innobase/buf/buf0dblwr.cc
@@ -25,16 +25,16 @@ Created 2011/12/19
#include "buf0dblwr.h"
+#ifdef UNIV_NONINL
+#include "buf0buf.ic"
+#endif
+
#include "buf0buf.h"
-#include "buf0lru.h"
-#include "buf0flu.h"
#include "buf0checksum.h"
#include "srv0start.h"
#include "srv0srv.h"
#include "page0zip.h"
#include "trx0sys.h"
-#include "page0page.h"
-#include "mtr0log.h"
#ifndef UNIV_HOTBACKUP
@@ -195,22 +195,20 @@ start_again:
return;
}
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Doublewrite buffer not found:"
- " creating new\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Doublewrite buffer not found: creating new");
if (buf_pool_get_curr_size()
< ((2 * TRX_SYS_DOUBLEWRITE_BLOCK_SIZE
+ FSP_EXTENT_SIZE / 2 + 100)
* UNIV_PAGE_SIZE)) {
- fprintf(stderr,
- "InnoDB: Cannot create doublewrite buffer:"
- " you must\n"
- "InnoDB: increase your buffer pool size.\n"
- "InnoDB: Cannot continue operation.\n");
- exit(1);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create doublewrite buffer: you must "
+ "increase your buffer pool size. Cannot continue "
+ "operation.");
+
+ exit(EXIT_FAILURE);
}
block2 = fseg_create(TRX_SYS_SPACE, TRX_SYS_PAGE_NO,
@@ -223,16 +221,15 @@ start_again:
buf_block_dbg_add_level(block2, SYNC_NO_ORDER_CHECK);
if (block2 == NULL) {
- fprintf(stderr,
- "InnoDB: Cannot create doublewrite buffer:"
- " you must\n"
- "InnoDB: increase your tablespace size.\n"
- "InnoDB: Cannot continue operation.\n");
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create doublewrite buffer: you must "
+ "increase your tablespace size. "
+ "Cannot continue operation.");
/* We exit without committing the mtr to prevent
its modifications to the database getting to disk */
- exit(1);
+ exit(EXIT_FAILURE);
}
fseg_header = doublewrite + TRX_SYS_DOUBLEWRITE_FSEG;
@@ -243,15 +240,12 @@ start_again:
new_block = fseg_alloc_free_page(
fseg_header, prev_page_no + 1, FSP_UP, &mtr);
if (new_block == NULL) {
- fprintf(stderr,
- "InnoDB: Cannot create doublewrite"
- " buffer: you must\n"
- "InnoDB: increase your"
- " tablespace size.\n"
- "InnoDB: Cannot continue operation.\n"
- );
-
- exit(1);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create doublewrite buffer: you must "
+ "increase your tablespace size. "
+ "Cannot continue operation.");
+
+ exit(EXIT_FAILURE);
}
/* We read the allocated pages to the buffer pool;
@@ -331,8 +325,7 @@ start_again:
/* Remove doublewrite pages from LRU */
buf_pool_invalidate();
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Doublewrite buffer created\n");
+ ib_logf(IB_LOG_LEVEL_INFO, "Doublewrite buffer created");
goto start_again;
}
@@ -391,7 +384,7 @@ buf_dblwr_init_or_restore_pages(
}
if (mach_read_from_4(doublewrite + TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED)
- != TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N) {
+ != TRX_SYS_DOUBLEWRITE_SPACE_ID_STORED_N) {
/* We are upgrading from a version < 4.1.x to a version where
multiple tablespaces are supported. We must reset the space id
@@ -401,9 +394,8 @@ buf_dblwr_init_or_restore_pages(
reset_space_ids = TRUE;
- fprintf(stderr,
- "InnoDB: Resetting space id's in the"
- " doublewrite buffer\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Resetting space id's in the doublewrite buffer");
}
/* Read the pages from the doublewrite buffer to memory */
@@ -459,12 +451,11 @@ buf_dblwr_init_or_restore_pages(
} else if (!fil_check_adress_in_tablespace(space_id,
page_no)) {
- fprintf(stderr,
- "InnoDB: Warning: a page in the"
- " doublewrite buffer is not within space\n"
- "InnoDB: bounds; space id %lu"
- " page number %lu, page %lu in"
- " doublewrite buf.\n",
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "A page in the doublewrite buffer is not "
+ "within space bounds; space id %lu "
+ "page number %lu, page %lu in "
+ "doublewrite buf.",
(ulong) space_id, (ulong) page_no, (ulong) i);
} else if (space_id == TRX_SYS_SPACE
@@ -489,8 +480,7 @@ buf_dblwr_init_or_restore_pages(
/* Check if the page is corrupt */
- if (UNIV_UNLIKELY
- (buf_page_is_corrupted(read_buf, zip_size))) {
+ if (buf_page_is_corrupted(true, read_buf, zip_size)) {
fprintf(stderr,
"InnoDB: Warning: database page"
@@ -501,7 +491,8 @@ buf_dblwr_init_or_restore_pages(
" the doublewrite buffer.\n",
(ulong) space_id, (ulong) page_no);
- if (buf_page_is_corrupted(page, zip_size)) {
+ if (buf_page_is_corrupted(true,
+ page, zip_size)) {
fprintf(stderr,
"InnoDB: Dump of the page:\n");
buf_page_print(
@@ -538,9 +529,10 @@ buf_dblwr_init_or_restore_pages(
zip_size, page_no, 0,
zip_size ? zip_size : UNIV_PAGE_SIZE,
page, NULL);
- fprintf(stderr,
- "InnoDB: Recovered the page from"
- " the doublewrite buffer.\n");
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Recovered the page from"
+ " the doublewrite buffer.");
}
}
@@ -595,6 +587,7 @@ buf_dblwr_update(void)
ut_ad(buf_dblwr->batch_running);
ut_ad(buf_dblwr->b_reserved > 0);
+ ut_ad(buf_dblwr->b_reserved <= buf_dblwr->first_free);
buf_dblwr->b_reserved--;
if (buf_dblwr->b_reserved == 0) {
@@ -705,23 +698,29 @@ static
void
buf_dblwr_write_block_to_datafile(
/*==============================*/
- const buf_block_t* block) /*!< in: block to write */
+ const buf_page_t* bpage) /*!< in: page to write */
{
- ut_a(block);
- ut_a(buf_page_in_file(&block->page));
+ ut_a(bpage);
+ ut_a(buf_page_in_file(bpage));
- if (block->page.zip.data) {
+ /* Increment the counter of I/O operations used
+ for selecting LRU policy. */
+ buf_LRU_stat_inc_io();
+
+ if (bpage->zip.data) {
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
- FALSE, buf_page_get_space(&block->page),
- buf_page_get_zip_size(&block->page),
- buf_page_get_page_no(&block->page), 0,
- buf_page_get_zip_size(&block->page),
- (void*) block->page.zip.data,
- (void*) block);
-
- goto exit;
+ FALSE, buf_page_get_space(bpage),
+ buf_page_get_zip_size(bpage),
+ buf_page_get_page_no(bpage), 0,
+ buf_page_get_zip_size(bpage),
+ (void*) bpage->zip.data,
+ (void*) bpage);
+
+ return;
}
+
+ const buf_block_t* block = (buf_block_t*) bpage;
ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
buf_dblwr_check_page_lsn(block->frame);
@@ -729,11 +728,6 @@ buf_dblwr_write_block_to_datafile(
FALSE, buf_block_get_space(block), 0,
buf_block_get_page_no(block), 0, UNIV_PAGE_SIZE,
(void*) block->frame, (void*) block);
-
-exit:
- /* Increment the counter of I/O operations used
- for selecting LRU policy. */
- buf_LRU_stat_inc_io();
}
/********************************************************************//**
@@ -748,9 +742,8 @@ buf_dblwr_flush_buffered_writes(void)
/*=================================*/
{
byte* write_buf;
+ ulint first_free;
ulint len;
- ulint len2;
- ulint i;
if (!srv_use_doublewrite_buf || buf_dblwr == NULL) {
/* Sync the writes to the disk. */
@@ -782,10 +775,12 @@ try_again:
}
ut_a(!buf_dblwr->batch_running);
+ ut_ad(buf_dblwr->first_free == buf_dblwr->b_reserved);
/* Disallow anyone else to post to doublewrite buffer or to
start another batch of flushing. */
buf_dblwr->batch_running = TRUE;
+ first_free = buf_dblwr->first_free;
/* Now safe to release the mutex. Note that though no other
thread is allowed to post to the doublewrite batch flushing
@@ -795,7 +790,7 @@ try_again:
write_buf = buf_dblwr->write_buf;
- for (len2 = 0, i = 0;
+ for (ulint len2 = 0, i = 0;
i < buf_dblwr->first_free;
len2 += UNIV_PAGE_SIZE, i++) {
@@ -845,8 +840,8 @@ try_again:
flush:
/* increment the doublewrite flushed pages counter */
- srv_dblwr_pages_written += buf_dblwr->first_free;
- srv_dblwr_writes++;
+ srv_stats.dblwr_pages_written.add(buf_dblwr->first_free);
+ srv_stats.dblwr_writes.inc();
/* Now flush the doublewrite buffer data to disk */
fil_flush(TRX_SYS_SPACE);
@@ -855,11 +850,21 @@ flush:
and in recovery we will find them in the doublewrite buffer
blocks. Next do the writes to the intended positions. */
- for (i = 0; i < buf_dblwr->first_free; i++) {
- const buf_block_t* block = (buf_block_t*)
- buf_dblwr->buf_block_arr[i];
-
- buf_dblwr_write_block_to_datafile(block);
+ /* Up to this point first_free and buf_dblwr->first_free are
+ same because we have set the buf_dblwr->batch_running flag
+ disallowing any other thread to post any request but we
+ can't safely access buf_dblwr->first_free in the loop below.
+ This is so because it is possible that after we are done with
+ the last iteration and before we terminate the loop, the batch
+ gets finished in the IO helper thread and another thread posts
+ a new batch setting buf_dblwr->first_free to a higher value.
+ If this happens and we are using buf_dblwr->first_free in the
+ loop termination condition then we'll end up dispatching
+ the same block twice from two different threads. */
+ ut_ad(first_free == buf_dblwr->first_free);
+ for (ulint i = 0; i < first_free; i++) {
+ buf_dblwr_write_block_to_datafile(
+ buf_dblwr->buf_block_arr[i]);
}
/* Wake possible simulated aio thread to actually post the
@@ -935,6 +940,8 @@ try_again:
buf_dblwr->first_free++;
buf_dblwr->b_reserved++;
+ ut_ad(!buf_dblwr->batch_running);
+ ut_ad(buf_dblwr->first_free == buf_dblwr->b_reserved);
ut_ad(buf_dblwr->b_reserved <= srv_doublewrite_batch_size);
if (buf_dblwr->first_free == srv_doublewrite_batch_size) {
@@ -1065,7 +1072,7 @@ retry:
/* We know that the write has been flushed to disk now
and during recovery we will find it in the doublewrite buffer
blocks. Next do the write to the intended position. */
- buf_dblwr_write_block_to_datafile((buf_block_t*) bpage);
+ buf_dblwr_write_block_to_datafile(bpage);
/* Sync the writes to the disk. */
buf_flush_sync_datafiles();
@@ -1077,8 +1084,8 @@ retry:
buf_dblwr->in_use[i] = FALSE;
/* increment the doublewrite flushed pages counter */
- srv_dblwr_pages_written += buf_dblwr->first_free;
- srv_dblwr_writes++;
+ srv_stats.dblwr_pages_written.inc();
+ srv_stats.dblwr_writes.inc();
mutex_exit(&(buf_dblwr->mutex));
diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc
index 27757241c3e..467f817a2d1 100644
--- a/storage/innobase/buf/buf0dump.cc
+++ b/storage/innobase/buf/buf0dump.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -23,14 +23,14 @@ Implements a buffer pool dump/load.
Created April 08, 2011 Vasil Dimov
*******************************************************/
+#include "univ.i"
+
#include <stdarg.h> /* va_* */
#include <string.h> /* strerror() */
-#include "univ.i"
-
#include "buf0buf.h" /* buf_pool_mutex_enter(), srv_buf_pool_instances */
#include "buf0dump.h"
-#include "db0err.h" /* enum db_err */
+#include "db0err.h"
#include "dict0dict.h" /* dict_operation_lock */
#include "os0file.h" /* OS_FILE_MAX_PATH */
#include "os0sync.h" /* os_event* */
@@ -40,7 +40,6 @@ Created April 08, 2011 Vasil Dimov
#include "sync0rw.h" /* rw_lock_s_lock() */
#include "ut0byte.h" /* ut_ull_create() */
#include "ut0sort.h" /* UT_SORT_FUNCTION_BODY */
-#include "buf0rea.h" /* buf_read_page_async() */
enum status_severity {
STATUS_INFO,
@@ -579,6 +578,8 @@ DECLARE_THREAD(buf_dump_thread)(
void* arg __attribute__((unused))) /*!< in: a dummy parameter
required by os_thread_create */
{
+ ut_ad(!srv_read_only_mode);
+
srv_buf_dump_thread_active = TRUE;
buf_dump_status(STATUS_INFO, "not started");
diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc
index 577878ef964..542c1669667 100644
--- a/storage/innobase/buf/buf0flu.cc
+++ b/storage/innobase/buf/buf0flu.cc
@@ -25,6 +25,10 @@ Created 11/11/1995 Heikki Tuuri
#include "buf0flu.h"
+#ifdef UNIV_NONINL
+#include "buf0flu.ic"
+#endif
+
#include "buf0buf.h"
#include "buf0checksum.h"
#include "srv0start.h"
@@ -44,39 +48,6 @@ Created 11/11/1995 Heikki Tuuri
#include "srv0mon.h"
#include "mysql/plugin.h"
#include "mysql/service_thd_wait.h"
-#include "buf0dblwr.h"
-
-#ifdef UNIV_NONINL
-#include "buf0flu.ic"
-#endif
-
-/**********************************************************************
-These statistics are generated for heuristics used in estimating the
-rate at which we should flush the dirty blocks to avoid bursty IO
-activity. Note that the rate of flushing not only depends on how many
-dirty pages we have in the buffer pool but it is also a fucntion of
-how much redo the workload is generating and at what rate. */
-/* @{ */
-
-/** Number of intervals for which we keep the history of these stats.
-Each interval is 1 second, defined by the rate at which
-srv_error_monitor_thread() calls buf_flush_stat_update(). */
-#define BUF_FLUSH_STAT_N_INTERVAL 20
-
-/** Sampled values buf_flush_stat_cur.
-Not protected by any mutex. Updated by buf_flush_stat_update(). */
-static buf_flush_stat_t buf_flush_stat_arr[BUF_FLUSH_STAT_N_INTERVAL];
-
-/** Cursor to buf_flush_stat_arr[]. Updated in a round-robin fashion. */
-static ulint buf_flush_stat_arr_ind;
-
-/** Values at start of the current interval. Reset by
-buf_flush_stat_update(). */
-static buf_flush_stat_t buf_flush_stat_cur;
-
-/** Running sum of past values of buf_flush_stat_cur.
-Updated by buf_flush_stat_update(). Not protected by any mutex. */
-static buf_flush_stat_t buf_flush_stat_sum;
/** Number of pages flushed through non flush_list flushes. */
static ulint buf_lru_flush_page_count = 0;
@@ -114,9 +85,8 @@ incr_flush_list_size_in_bytes(
buf_block_t* block, /*!< in: control block */
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
{
- ulint zip_size;
ut_ad(buf_flush_list_mutex_own(buf_pool));
- zip_size = page_zip_get_size(&block->page.zip);
+ ulint zip_size = page_zip_get_size(&block->page.zip);
buf_pool->stat.flush_list_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE;
ut_ad(buf_pool->stat.flush_list_bytes <= buf_pool->curr_pool_size);
}
@@ -455,8 +425,6 @@ buf_flush_insert_sorted_into_flush_list(
prev_b, &block->page);
}
- MONITOR_INC(MONITOR_PAGE_INFLUSH);
-
incr_flush_list_size_in_bytes(block, buf_pool);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -606,8 +574,6 @@ buf_flush_remove(
ut_a(buf_flush_validate_skip(buf_pool));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
- MONITOR_DEC(MONITOR_PAGE_INFLUSH);
-
buf_flush_list_mutex_exit(buf_pool);
}
@@ -630,7 +596,7 @@ buf_flush_relocate_on_flush_list(
buf_page_t* dpage) /*!< in/out: destination block */
{
buf_page_t* prev;
- buf_page_t* prev_b = NULL;
+ buf_page_t* prev_b = NULL;
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
ut_ad(buf_pool_mutex_own(buf_pool));
@@ -734,6 +700,27 @@ buf_flush_write_complete(
#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
+Calculate the checksum of a page from compressed table and update the page. */
+UNIV_INTERN
+void
+buf_flush_update_zip_checksum(
+/*==========================*/
+ buf_frame_t* page, /*!< in/out: Page to update */
+ ulint zip_size, /*!< in: Compressed page size */
+ lsn_t lsn) /*!< in: Lsn to stamp on the page */
+{
+ ut_a(zip_size > 0);
+
+ ib_uint32_t checksum = page_zip_calc_checksum(
+ page, zip_size,
+ static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm));
+
+ mach_write_to_8(page + FIL_PAGE_LSN, lsn);
+ memset(page + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);
+ mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
+}
+
+/********************************************************************//**
Initializes a page for writing to the tablespace. */
UNIV_INTERN
void
@@ -771,17 +758,10 @@ buf_flush_init_for_writing(
case FIL_PAGE_TYPE_ZBLOB:
case FIL_PAGE_TYPE_ZBLOB2:
case FIL_PAGE_INDEX:
- checksum = page_zip_calc_checksum(
- page_zip->data, zip_size,
- static_cast<srv_checksum_algorithm_t>(
- srv_checksum_algorithm));
-
- mach_write_to_8(page_zip->data
- + FIL_PAGE_LSN, newest_lsn);
- memset(page_zip->data + FIL_PAGE_FILE_FLUSH_LSN, 0, 8);
- mach_write_to_4(page_zip->data
- + FIL_PAGE_SPACE_OR_CHKSUM,
- checksum);
+
+ buf_flush_update_zip_checksum(
+ page_zip->data, zip_size, newest_lsn);
+
return;
}
@@ -889,7 +869,7 @@ buf_flush_write_block_low(
#endif
#ifdef UNIV_LOG_DEBUG
- static ibool univ_log_debug_warned;
+ static ibool univ_log_debug_warned;
#endif /* UNIV_LOG_DEBUG */
ut_ad(buf_page_in_file(bpage));
@@ -973,15 +953,15 @@ os_aio_simulated_wake_handler_threads after we have posted a batch of
writes! NOTE: buf_pool->mutex and buf_page_get_mutex(bpage) must be
held upon entering this function, and they will be released by this
function. */
-static
+UNIV_INTERN
void
buf_flush_page(
/*===========*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */
buf_page_t* bpage, /*!< in: buffer control block */
- enum buf_flush flush_type) /*!< in: type of flush */
+ buf_flush flush_type) /*!< in: type of flush */
{
- mutex_t* block_mutex;
+ ib_mutex_t* block_mutex;
ibool is_uncompressed;
ut_ad(flush_type < BUF_FLUSH_N_TYPES);
@@ -1115,6 +1095,56 @@ buf_flush_page_try(
}
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
/***********************************************************//**
+Check the page is in buffer pool and can be flushed.
+@return true if the page can be flushed. */
+static
+bool
+buf_flush_check_neighbor(
+/*=====================*/
+ ulint space, /*!< in: space id */
+ ulint offset, /*!< in: page offset */
+ enum buf_flush flush_type) /*!< in: BUF_FLUSH_LRU or
+ BUF_FLUSH_LIST */
+{
+ buf_page_t* bpage;
+ buf_pool_t* buf_pool = buf_pool_get(space, offset);
+ bool ret;
+
+ ut_ad(flush_type == BUF_FLUSH_LRU
+ || flush_type == BUF_FLUSH_LIST);
+
+ buf_pool_mutex_enter(buf_pool);
+
+ /* We only want to flush pages from this buffer pool. */
+ bpage = buf_page_hash_get(buf_pool, space, offset);
+
+ if (!bpage) {
+
+ buf_pool_mutex_exit(buf_pool);
+ return(false);
+ }
+
+ ut_a(buf_page_in_file(bpage));
+
+ /* We avoid flushing 'non-old' blocks in an LRU flush,
+ because the flushed blocks are soon freed */
+
+ ret = false;
+ if (flush_type != BUF_FLUSH_LRU || buf_page_is_old(bpage)) {
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
+
+ mutex_enter(block_mutex);
+ if (buf_flush_ready_for_flush(bpage, flush_type)) {
+ ret = true;
+ }
+ mutex_exit(block_mutex);
+ }
+ buf_pool_mutex_exit(buf_pool);
+
+ return(ret);
+}
+
+/***********************************************************//**
Flushes to disk all flushable pages within the flush area.
@return number of pages flushed */
static
@@ -1139,7 +1169,7 @@ buf_flush_try_neighbors(
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
if (UT_LIST_GET_LEN(buf_pool->LRU) < BUF_LRU_OLD_MIN_LEN
- || !srv_flush_neighbors) {
+ || srv_flush_neighbors == 0) {
/* If there is little space or neighbor flushing is
not enabled then just flush the victim. */
low = offset;
@@ -1157,6 +1187,30 @@ buf_flush_try_neighbors(
low = (offset / buf_flush_area) * buf_flush_area;
high = (offset / buf_flush_area + 1) * buf_flush_area;
+
+ if (srv_flush_neighbors == 1) {
+ /* adjust 'low' and 'high' to limit
+ for contiguous dirty area */
+ if (offset > low) {
+ for (i = offset - 1;
+ i >= low
+ && buf_flush_check_neighbor(
+ space, i, flush_type);
+ i--) {
+ /* do nothing */
+ }
+ low = i + 1;
+ }
+
+ for (i = offset + 1;
+ i < high
+ && buf_flush_check_neighbor(
+ space, i, flush_type);
+ i++) {
+ /* do nothing */
+ }
+ high = i;
+ }
}
/* fprintf(stderr, "Flush area: low %lu high %lu\n", low, high); */
@@ -1205,7 +1259,7 @@ buf_flush_try_neighbors(
if (flush_type != BUF_FLUSH_LRU
|| i == offset
|| buf_page_is_old(bpage)) {
- mutex_t* block_mutex = buf_page_get_mutex(bpage);
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
@@ -1264,7 +1318,7 @@ buf_flush_page_and_try_neighbors(
ulint* count) /*!< in/out: number of pages
flushed */
{
- mutex_t* block_mutex;
+ ib_mutex_t* block_mutex;
ibool flushed = FALSE;
#ifdef UNIV_DEBUG
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
@@ -1398,7 +1452,7 @@ buf_flush_LRU_list_batch(
&& free_len < srv_LRU_scan_depth
&& lru_len > BUF_LRU_MIN_LEN) {
- mutex_t* block_mutex = buf_page_get_mutex(bpage);
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
ibool evict;
mutex_enter(block_mutex);
@@ -1600,8 +1654,7 @@ NOTE 1: in the case of an LRU flush the calling thread may own latches to
pages: to avoid deadlocks, this function must be written so that it cannot
end up waiting for these latches! NOTE 2: in the case of a flush list flush,
the calling thread is not allowed to own any latches on pages!
-@return number of blocks for which the write request was queued;
-ULINT_UNDEFINED if there was a flush of the same type already running */
+@return number of blocks for which the write request was queued */
static
ulint
buf_flush_batch(
@@ -1645,8 +1698,6 @@ buf_flush_batch(
buf_pool_mutex_exit(buf_pool);
- buf_dblwr_flush_buffered_writes();
-
#ifdef UNIV_DEBUG
if (buf_debug_prints && count > 0) {
fprintf(stderr, flush_type == BUF_FLUSH_LRU
@@ -1656,8 +1707,6 @@ buf_flush_batch(
}
#endif /* UNIV_DEBUG */
- srv_buf_pool_flushed += count;
-
return(count);
}
@@ -1683,14 +1732,7 @@ buf_flush_common(
}
#endif /* UNIV_DEBUG */
- srv_buf_pool_flushed += page_count;
-
- if (flush_type == BUF_FLUSH_LRU) {
- /* We keep track of all flushes happening as part of LRU
- flush. When estimating the desired rate at which flush_list
- should be flushed we factor in this value. */
- buf_lru_flush_page_count += page_count;
- }
+ srv_stats.buf_pool_flushed.add(page_count);
}
/******************************************************************//**
@@ -1774,7 +1816,7 @@ buf_flush_wait_batch_end(
}
} else {
thd_wait_begin(NULL, THD_WAIT_DISKIO);
- os_event_wait(buf_pool->no_flush[type]);
+ os_event_wait(buf_pool->no_flush[type]);
thd_wait_end(NULL);
}
}
@@ -1784,21 +1826,28 @@ This utility flushes dirty blocks from the end of the LRU list and also
puts replaceable clean pages from the end of the LRU list to the free
list.
NOTE: The calling thread is not allowed to own any latches on pages!
-@return number of blocks for which the write request was queued;
-ULINT_UNDEFINED if there was a flush of the same type already running */
+@return true if a batch was queued successfully. false if another batch
+of same type was already running. */
static
-ulint
+bool
buf_flush_LRU(
/*==========*/
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
- ulint min_n) /*!< in: wished minimum mumber of blocks
+ ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
+ ulint* n_processed) /*!< out: the number of pages
+ which were processed is passed
+ back to caller. Ignored if NULL */
{
ulint page_count;
+ if (n_processed) {
+ *n_processed = 0;
+ }
+
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
- return(ULINT_UNDEFINED);
+ return(false);
}
page_count = buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0);
@@ -1807,31 +1856,43 @@ buf_flush_LRU(
buf_flush_common(BUF_FLUSH_LRU, page_count);
- return(page_count);
+ if (n_processed) {
+ *n_processed = page_count;
+ }
+
+ return(true);
}
/*******************************************************************//**
This utility flushes dirty blocks from the end of the flush list of
all buffer pool instances.
NOTE: The calling thread is not allowed to own any latches on pages!
-@return number of blocks for which the write request was queued;
-ULINT_UNDEFINED if there was a flush of the same type already running */
+@return true if a batch was queued successfully for each buffer pool
+instance. false if another batch of same type was already running in
+at least one of the buffer pool instance */
UNIV_INTERN
-ulint
+bool
buf_flush_list(
/*===========*/
ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
- lsn_t lsn_limit) /*!< in the case BUF_FLUSH_LIST all
+ lsn_t lsn_limit, /*!< in the case BUF_FLUSH_LIST all
blocks whose oldest_modification is
smaller than this should be flushed
(if their number does not exceed
min_n), otherwise ignored */
+ ulint* n_processed) /*!< out: the number of pages
+ which were processed is passed
+ back to caller. Ignored if NULL */
+
{
ulint i;
- ulint total_page_count = 0;
- ibool skipped = FALSE;
+ bool success = true;
+
+ if (n_processed) {
+ *n_processed = 0;
+ }
if (min_n != ULINT_MAX) {
/* Ensure that flushing is spread evenly amongst the
@@ -1860,7 +1921,7 @@ buf_flush_list(
pools based on the assumption that it will
help in the retry which will follow the
failure. */
- skipped = TRUE;
+ success = false;
continue;
}
@@ -1872,7 +1933,9 @@ buf_flush_list(
buf_flush_common(BUF_FLUSH_LIST, page_count);
- total_page_count += page_count;
+ if (n_processed) {
+ *n_processed += page_count;
+ }
if (page_count) {
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1883,8 +1946,7 @@ buf_flush_list(
}
}
- return(lsn_limit != LSN_MAX && skipped
- ? ULINT_UNDEFINED : total_page_count);
+ return(success);
}
/******************************************************************//**
@@ -1903,7 +1965,7 @@ buf_flush_single_page_from_LRU(
{
ulint scanned;
buf_page_t* bpage;
- mutex_t* block_mutex;
+ ib_mutex_t* block_mutex;
ibool freed;
ibool evict_zip;
@@ -1981,128 +2043,6 @@ buf_flush_single_page_from_LRU(
return(freed);
}
-/*********************************************************************
-Update the historical stats that we are collecting for flush rate
-heuristics at the end of each interval.
-Flush rate heuristic depends on (a) rate of redo log generation and
-(b) the rate at which LRU flush is happening. */
-UNIV_INTERN
-void
-buf_flush_stat_update(void)
-/*=======================*/
-{
- buf_flush_stat_t* item;
- lsn_t lsn_diff;
- lsn_t lsn;
- ulint n_flushed;
-
- lsn = log_get_lsn();
- if (buf_flush_stat_cur.redo == 0) {
- /* First time around. Just update the current LSN
- and return. */
- buf_flush_stat_cur.redo = lsn;
- return;
- }
-
- item = &buf_flush_stat_arr[buf_flush_stat_arr_ind];
-
- /* values for this interval */
- lsn_diff = lsn - buf_flush_stat_cur.redo;
- n_flushed = buf_lru_flush_page_count
- - buf_flush_stat_cur.n_flushed;
-
- /* add the current value and subtract the obsolete entry. */
- buf_flush_stat_sum.redo += lsn_diff - item->redo;
- buf_flush_stat_sum.n_flushed += n_flushed - item->n_flushed;
-
- /* put current entry in the array. */
- item->redo = lsn_diff;
- item->n_flushed = n_flushed;
-
- /* update the index */
- buf_flush_stat_arr_ind++;
- buf_flush_stat_arr_ind %= BUF_FLUSH_STAT_N_INTERVAL;
-
- /* reset the current entry. */
- buf_flush_stat_cur.redo = lsn;
- buf_flush_stat_cur.n_flushed = buf_lru_flush_page_count;
-}
-
-/*********************************************************************
-Determines the fraction of dirty pages that need to be flushed based
-on the speed at which we generate redo log. Note that if redo log
-is generated at a significant rate without corresponding increase
-in the number of dirty pages (for example, an in-memory workload)
-it can cause IO bursts of flushing. This function implements heuristics
-to avoid this burstiness.
-@return number of dirty pages to be flushed / second */
-static
-ulint
-buf_flush_get_desired_flush_rate(void)
-/*==================================*/
-{
- ulint i;
- lsn_t redo_avg;
- ulint n_dirty = 0;
- ib_uint64_t n_flush_req;
- ib_uint64_t lru_flush_avg;
- lsn_t lsn = log_get_lsn();
- lsn_t log_capacity = log_get_capacity();
-
- /* log_capacity should never be zero after the initialization
- of log subsystem. */
- ut_ad(log_capacity != 0);
-
- /* Get total number of dirty pages. It is OK to access
- flush_list without holding any mutex as we are using this
- only for heuristics. */
- for (i = 0; i < srv_buf_pool_instances; i++) {
- buf_pool_t* buf_pool;
-
- buf_pool = buf_pool_from_array(i);
- n_dirty += UT_LIST_GET_LEN(buf_pool->flush_list);
- }
-
- /* An overflow can happen if we generate more than 2^32 bytes
- of redo in this interval i.e.: 4G of redo in 1 second. We can
- safely consider this as infinity because if we ever come close
- to 4G we'll start a synchronous flush of dirty pages. */
- /* redo_avg below is average at which redo is generated in
- past BUF_FLUSH_STAT_N_INTERVAL + redo generated in the current
- interval. */
- redo_avg = buf_flush_stat_sum.redo / BUF_FLUSH_STAT_N_INTERVAL
- + (lsn - buf_flush_stat_cur.redo);
-
- /* An overflow can happen possibly if we flush more than 2^32
- pages in BUF_FLUSH_STAT_N_INTERVAL. This is a very very
- unlikely scenario. Even when this happens it means that our
- flush rate will be off the mark. It won't affect correctness
- of any subsystem. */
- /* lru_flush_avg below is rate at which pages are flushed as
- part of LRU flush in past BUF_FLUSH_STAT_N_INTERVAL + the
- number of pages flushed in the current interval. */
- lru_flush_avg = buf_flush_stat_sum.n_flushed
- / BUF_FLUSH_STAT_N_INTERVAL
- + (buf_lru_flush_page_count
- - buf_flush_stat_cur.n_flushed);
-
- n_flush_req = (n_dirty * redo_avg) / log_capacity;
-
- /* The number of pages that we want to flush from the flush
- list is the difference between the required rate and the
- number of pages that we are historically flushing from the
- LRU list */
- if (n_flush_req <= lru_flush_avg) {
- return(0);
- } else {
- ib_uint64_t rate;
-
- rate = n_flush_req - lru_flush_avg;
-
- return((ulint) (rate < PCT_IO(100) ? rate : PCT_IO(100)));
- }
-}
-
/*********************************************************************//**
Clears up tail of the LRU lists:
* Put replaceable pages at the tail of LRU to the free list
@@ -2110,36 +2050,35 @@ Clears up tail of the LRU lists:
The depth to which we scan each buffer pool is controlled by dynamic
config parameter innodb_LRU_scan_depth.
@return total pages flushed */
-UNIV_INLINE
+UNIV_INTERN
ulint
-page_cleaner_flush_LRU_tail(void)
-/*=============================*/
+buf_flush_LRU_tail(void)
+/*====================*/
{
- ulint i;
- ulint j;
ulint total_flushed = 0;
- for (i = 0; i < srv_buf_pool_instances; i++) {
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool = buf_pool_from_array(i);
/* We divide LRU flush into smaller chunks because
there may be user threads waiting for the flush to
end in buf_LRU_get_free_block(). */
- for (j = 0;
+ for (ulint j = 0;
j < srv_LRU_scan_depth;
j += PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE) {
- ulint n_flushed = buf_flush_LRU(buf_pool,
- PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE);
+ ulint n_flushed = 0;
/* Currently page_cleaner is the only thread
that can trigger an LRU flush. It is possible
that a batch triggered during last iteration is
still running, */
- if (n_flushed != ULINT_UNDEFINED) {
- total_flushed += n_flushed;
- }
+ buf_flush_LRU(buf_pool,
+ PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE,
+ &n_flushed);
+
+ total_flushed += n_flushed;
}
}
@@ -2156,14 +2095,12 @@ page_cleaner_flush_LRU_tail(void)
/*********************************************************************//**
Wait for any possible LRU flushes that are in progress to end. */
-UNIV_INLINE
+UNIV_INTERN
void
-page_cleaner_wait_LRU_flush(void)
-/*=============================*/
+buf_flush_wait_LRU_batch_end(void)
+/*==============================*/
{
- ulint i;
-
- for (i = 0; i < srv_buf_pool_instances; i++) {
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i);
@@ -2190,22 +2127,87 @@ ulint
page_cleaner_do_flush_batch(
/*========================*/
ulint n_to_flush, /*!< in: number of pages that
- we should attempt to flush. If
- an lsn_limit is provided then
- this value will have no affect */
+ we should attempt to flush. */
lsn_t lsn_limit) /*!< in: LSN up to which flushing
must happen */
{
ulint n_flushed;
- ut_ad(n_to_flush == ULINT_MAX || lsn_limit == LSN_MAX);
+ buf_flush_list(n_to_flush, lsn_limit, &n_flushed);
+
+ return(n_flushed);
+}
+
+/*********************************************************************//**
+Calculates if flushing is required based on number of dirty pages in
+the buffer pool.
+@return percent of io_capacity to flush to manage dirty page ratio */
+static
+ulint
+af_get_pct_for_dirty()
+/*==================*/
+{
+ ulint dirty_pct = buf_get_modified_ratio_pct();
+
+ ut_a(srv_max_dirty_pages_pct_lwm
+ <= srv_max_buf_pool_modified_pct);
+
+ if (srv_max_dirty_pages_pct_lwm == 0) {
+ /* The user has not set the option to preflush dirty
+ pages as we approach the high water mark. */
+ if (dirty_pct > srv_max_buf_pool_modified_pct) {
+ /* We have crossed the high water mark of dirty
+ pages In this case we start flushing at 100% of
+ innodb_io_capacity. */
+ return(100);
+ }
+ } else if (dirty_pct > srv_max_dirty_pages_pct_lwm) {
+ /* We should start flushing pages gradually. */
+ return((dirty_pct * 100)
+ / (srv_max_buf_pool_modified_pct + 1));
+ }
+
+ return(0);
+}
+
+/*********************************************************************//**
+Calculates if flushing is required based on redo generation rate.
+@return percent of io_capacity to flush to manage redo space */
+static
+ulint
+af_get_pct_for_lsn(
+/*===============*/
+ lsn_t age) /*!< in: current age of LSN. */
+{
+ lsn_t max_async_age;
+ lsn_t lsn_age_factor;
+ lsn_t af_lwm = (srv_adaptive_flushing_lwm
+ * log_get_capacity()) / 100;
- n_flushed = buf_flush_list(n_to_flush, lsn_limit);
- if (n_flushed == ULINT_UNDEFINED) {
- n_flushed = 0;
+ if (age < af_lwm) {
+ /* No adaptive flushing. */
+ return(0);
}
- return(n_flushed);
+ max_async_age = log_get_max_modified_age_async();
+
+ if (age < max_async_age && !srv_adaptive_flushing) {
+ /* We have still not reached the max_async point and
+ the user has disabled adaptive flushing. */
+ return(0);
+ }
+
+ /* If we are here then we know that either:
+ 1) User has enabled adaptive flushing
+ 2) User may have disabled adaptive flushing but we have reached
+ max_async_age. */
+ lsn_age_factor = (age * 100) / max_async_age;
+
+ ut_ad(srv_max_io_capacity >= srv_io_capacity);
+ return(static_cast<ulint>(
+ ((srv_max_io_capacity / srv_io_capacity)
+ * (lsn_age_factor * sqrt((double)lsn_age_factor)))
+ / 7.5));
}
/*********************************************************************//**
@@ -2219,78 +2221,103 @@ ulint
page_cleaner_flush_pages_if_needed(void)
/*====================================*/
{
- ulint n_pages_flushed = 0;
- lsn_t lsn_limit = log_async_flush_lsn();
+ static lsn_t lsn_avg_rate = 0;
+ static lsn_t prev_lsn = 0;
+ static lsn_t last_lsn = 0;
+ static ulint sum_pages = 0;
+ static ulint last_pages = 0;
+ static ulint prev_pages = 0;
+ static ulint avg_page_rate = 0;
+ static ulint n_iterations = 0;
+ lsn_t oldest_lsn;
+ lsn_t cur_lsn;
+ lsn_t age;
+ lsn_t lsn_rate;
+ ulint n_pages = 0;
+ ulint pct_for_dirty = 0;
+ ulint pct_for_lsn = 0;
+ ulint pct_total = 0;
+ int age_factor = 0;
+
+ cur_lsn = log_get_lsn();
+
+ if (prev_lsn == 0) {
+ /* First time around. */
+ prev_lsn = cur_lsn;
+ return(0);
+ }
+
+ if (prev_lsn == cur_lsn) {
+ return(0);
+ }
- /* Currently we decide whether or not to flush and how much to
- flush based on three factors.
+ /* We update our variables every srv_flushing_avg_loops
+ iterations to smooth out transition in workload. */
+ if (++n_iterations >= srv_flushing_avg_loops) {
- 1) If the amount of LSN for which pages are not flushed to disk
- yet is greater than log_sys->max_modified_age_async. This is
- the most urgent type of flush and we attempt to cleanup enough
- of the tail of the flush_list to avoid flushing inside user
- threads.
+ avg_page_rate = ((sum_pages / srv_flushing_avg_loops)
+ + avg_page_rate) / 2;
- 2) If modified page ratio is greater than the one specified by
- the user. In that case we flush full 100% IO_CAPACITY of the
- server. Note that 1 and 2 are not mutually exclusive. We can
- end up executing both steps.
+ /* How much LSN we have generated since last call. */
+ lsn_rate = (cur_lsn - prev_lsn) / srv_flushing_avg_loops;
- 3) If adaptive_flushing is set by the user and neither of 1
- or 2 has occurred above then we flush a batch based on our
- heuristics. */
+ lsn_avg_rate = (lsn_avg_rate + lsn_rate) / 2;
- if (lsn_limit != LSN_MAX) {
+ prev_lsn = cur_lsn;
- /* async flushing is requested */
- n_pages_flushed = page_cleaner_do_flush_batch(ULINT_MAX,
- lsn_limit);
+ n_iterations = 0;
- MONITOR_INC_VALUE_CUMULATIVE(
- MONITOR_FLUSH_ASYNC_TOTAL_PAGE,
- MONITOR_FLUSH_ASYNC_COUNT,
- MONITOR_FLUSH_ASYNC_PAGES,
- n_pages_flushed);
+ sum_pages = 0;
}
- if (UNIV_UNLIKELY(n_pages_flushed < PCT_IO(100)
- && buf_get_modified_ratio_pct()
- > srv_max_buf_pool_modified_pct)) {
+ oldest_lsn = buf_pool_get_oldest_modification();
- /* Try to keep the number of modified pages in the
- buffer pool under the limit wished by the user */
+ ut_ad(oldest_lsn <= cur_lsn);
- n_pages_flushed += page_cleaner_do_flush_batch(PCT_IO(100),
- LSN_MAX);
+ age = cur_lsn - oldest_lsn;
- MONITOR_INC_VALUE_CUMULATIVE(
- MONITOR_FLUSH_MAX_DIRTY_TOTAL_PAGE,
- MONITOR_FLUSH_MAX_DIRTY_COUNT,
- MONITOR_FLUSH_MAX_DIRTY_PAGES,
- n_pages_flushed);
+ pct_for_dirty = af_get_pct_for_dirty();
+ pct_for_lsn = af_get_pct_for_lsn(age);
+
+ pct_total = ut_max(pct_for_dirty, pct_for_lsn);
+
+ /* Cap the maximum IO capacity that we are going to use by
+ max_io_capacity. */
+ n_pages = (PCT_IO(pct_total) + avg_page_rate) / 2;
+
+ if (n_pages > srv_max_io_capacity) {
+ n_pages = srv_max_io_capacity;
}
- if (srv_adaptive_flushing && n_pages_flushed == 0) {
+ if (last_pages && cur_lsn - last_lsn > lsn_avg_rate / 2) {
+ age_factor = prev_pages / last_pages;
+ }
- /* Try to keep the rate of flushing of dirty
- pages such that redo log generation does not
- produce bursts of IO at checkpoint time. */
- ulint n_flush = buf_flush_get_desired_flush_rate();
+ MONITOR_SET(MONITOR_FLUSH_N_TO_FLUSH_REQUESTED, n_pages);
- ut_ad(n_flush <= PCT_IO(100));
- if (n_flush) {
- n_pages_flushed = page_cleaner_do_flush_batch(
- n_flush, LSN_MAX);
+ prev_pages = n_pages;
+ n_pages = page_cleaner_do_flush_batch(
+ n_pages, oldest_lsn + lsn_avg_rate * (age_factor + 1));
- MONITOR_INC_VALUE_CUMULATIVE(
- MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE,
- MONITOR_FLUSH_ADAPTIVE_COUNT,
- MONITOR_FLUSH_ADAPTIVE_PAGES,
- n_pages_flushed);
- }
+ last_lsn= cur_lsn;
+ last_pages= n_pages + 1;
+
+ MONITOR_SET(MONITOR_FLUSH_AVG_PAGE_RATE, avg_page_rate);
+ MONITOR_SET(MONITOR_FLUSH_LSN_AVG_RATE, lsn_avg_rate);
+ MONITOR_SET(MONITOR_FLUSH_PCT_FOR_DIRTY, pct_for_dirty);
+ MONITOR_SET(MONITOR_FLUSH_PCT_FOR_LSN, pct_for_lsn);
+
+ if (n_pages) {
+ MONITOR_INC_VALUE_CUMULATIVE(
+ MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE,
+ MONITOR_FLUSH_ADAPTIVE_COUNT,
+ MONITOR_FLUSH_ADAPTIVE_PAGES,
+ n_pages);
+
+ sum_pages += n_pages;
}
- return(n_pages_flushed);
+ return(n_pages);
}
/*********************************************************************//**
@@ -2330,7 +2357,8 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
ulint next_loop_time = ut_time_ms() + 1000;
ulint n_flushed = 0;
ulint last_activity = srv_get_activity_count();
- ulint i;
+
+ ut_ad(!srv_read_only_mode);
#ifdef UNIV_PFS_THREAD
pfs_register_thread(buf_page_cleaner_thread_key);
@@ -2360,7 +2388,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
last_activity = srv_get_activity_count();
/* Flush pages from end of LRU if required */
- n_flushed = page_cleaner_flush_LRU_tail();
+ n_flushed = buf_flush_LRU_tail();
/* Flush pages from flush_list if required */
n_flushed += page_cleaner_flush_pages_if_needed();
@@ -2420,19 +2448,21 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
sweep and we'll come out of the loop leaving behind dirty pages
in the flush_list */
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
- page_cleaner_wait_LRU_flush();
+ buf_flush_wait_LRU_batch_end();
+
+ bool success;
do {
- n_flushed = buf_flush_list(PCT_IO(100), LSN_MAX);
+ success = buf_flush_list(PCT_IO(100), LSN_MAX, &n_flushed);
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
- } while (n_flushed > 0);
+ } while (!success || n_flushed > 0);
/* Some sanity checks */
ut_a(srv_get_active_thread_type() == SRV_NONE);
ut_a(srv_shutdown_state == SRV_SHUTDOWN_FLUSH_PHASE);
- for (i = 0; i < srv_buf_pool_instances; i++) {
+ for (ulint i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool = buf_pool_from_array(i);
ut_a(UT_LIST_GET_LEN(buf_pool->flush_list) == 0);
}
@@ -2545,3 +2575,66 @@ buf_flush_validate(
}
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#endif /* !UNIV_HOTBACKUP */
+
+#ifdef UNIV_DEBUG
+/******************************************************************//**
+Check if there are any dirty pages that belong to a space id in the flush
+list in a particular buffer pool.
+@return number of dirty pages present in a single buffer pool */
+UNIV_INTERN
+ulint
+buf_pool_get_dirty_pages_count(
+/*===========================*/
+ buf_pool_t* buf_pool, /*!< in: buffer pool */
+ ulint id) /*!< in: space id to check */
+
+{
+ ulint count = 0;
+
+ buf_pool_mutex_enter(buf_pool);
+ buf_flush_list_mutex_enter(buf_pool);
+
+ buf_page_t* bpage;
+
+ for (bpage = UT_LIST_GET_FIRST(buf_pool->flush_list);
+ bpage != 0;
+ bpage = UT_LIST_GET_NEXT(list, bpage)) {
+
+ ut_ad(buf_page_in_file(bpage));
+ ut_ad(bpage->in_flush_list);
+ ut_ad(bpage->oldest_modification > 0);
+
+ if (buf_page_get_space(bpage) == id) {
+ ++count;
+ }
+ }
+
+ buf_flush_list_mutex_exit(buf_pool);
+ buf_pool_mutex_exit(buf_pool);
+
+ return(count);
+}
+
+/******************************************************************//**
+Check if there are any dirty pages that belong to a space id in the flush list.
+@return number of dirty pages present in all the buffer pools */
+UNIV_INTERN
+ulint
+buf_flush_get_dirty_pages_count(
+/*============================*/
+ ulint id) /*!< in: space id to check */
+
+{
+ ulint count = 0;
+
+ for (ulint i = 0; i < srv_buf_pool_instances; ++i) {
+ buf_pool_t* buf_pool;
+
+ buf_pool = buf_pool_from_array(i);
+
+ count += buf_pool_get_dirty_pages_count(buf_pool, id);
+ }
+
+ return(count);
+}
+#endif /* UNIV_DEBUG */
diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc
index 5f0c0cae96c..270263d95f1 100644
--- a/storage/innobase/buf/buf0lru.cc
+++ b/storage/innobase/buf/buf0lru.cc
@@ -51,6 +51,9 @@ Created 11/5/1995 Heikki Tuuri
#include "log0recv.h"
#include "srv0srv.h"
#include "srv0mon.h"
+#include "lock0lock.h"
+
+#include "ha_prototypes.h"
/** The number of blocks from the LRU_old pointer onward, including
the block pointed to, must be buf_pool->LRU_old_ratio/BUF_LRU_OLD_RATIO_DIV
@@ -167,9 +170,8 @@ incr_LRU_size_in_bytes(
buf_page_t* bpage, /*!< in: control block */
buf_pool_t* buf_pool) /*!< in: buffer pool instance */
{
- ulint zip_size;
ut_ad(buf_pool_mutex_own(buf_pool));
- zip_size = page_zip_get_size(&bpage->zip);
+ ulint zip_size = page_zip_get_size(&bpage->zip);
buf_pool->stat.LRU_bytes += zip_size ? zip_size : UNIV_PAGE_SIZE;
ut_ad(buf_pool->stat.LRU_bytes <= buf_pool->curr_pool_size);
}
@@ -359,39 +361,338 @@ next_page:
}
/******************************************************************//**
+While flushing (or removing dirty) pages from a tablespace we don't
+want to hog the CPU and resources. Release the buffer pool and block
+mutex and try to force a context switch. Then reacquire the same mutexes.
+The current page is "fixed" before the release of the mutexes and then
+"unfixed" again once we have reacquired the mutexes. */
+static __attribute__((nonnull))
+void
+buf_flush_yield(
+/*============*/
+ buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
+ buf_page_t* bpage) /*!< in/out: current page */
+{
+ ib_mutex_t* block_mutex;
+
+ ut_ad(buf_pool_mutex_own(buf_pool));
+ ut_ad(buf_page_in_file(bpage));
+
+ block_mutex = buf_page_get_mutex(bpage);
+
+ mutex_enter(block_mutex);
+ /* "Fix" the block so that the position cannot be
+ changed after we release the buffer pool and
+ block mutexes. */
+ buf_page_set_sticky(bpage);
+
+ /* Now it is safe to release the buf_pool->mutex. */
+ buf_pool_mutex_exit(buf_pool);
+
+ mutex_exit(block_mutex);
+ /* Try and force a context switch. */
+ os_thread_yield();
+
+ buf_pool_mutex_enter(buf_pool);
+
+ mutex_enter(block_mutex);
+ /* "Unfix" the block now that we have both the
+ buffer pool and block mutex again. */
+ buf_page_unset_sticky(bpage);
+ mutex_exit(block_mutex);
+}
+
+/******************************************************************//**
+If we have hogged the resources for too long then release the buffer
+pool and flush list mutex and do a thread yield. Set the current page
+to "sticky" so that it is not relocated during the yield.
+@return true if yielded */
+static __attribute__((nonnull(1), warn_unused_result))
+bool
+buf_flush_try_yield(
+/*================*/
+ buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
+ buf_page_t* bpage, /*!< in/out: bpage to remove */
+ ulint processed) /*!< in: number of pages processed */
+{
+ /* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
+ loop we release buf_pool->mutex to let other threads
+ do their job but only if the block is not IO fixed. This
+ ensures that the block stays in its position in the
+ flush_list. */
+
+ if (bpage != NULL
+ && processed >= BUF_LRU_DROP_SEARCH_SIZE
+ && buf_page_get_io_fix(bpage) == BUF_IO_NONE) {
+
+ buf_flush_list_mutex_exit(buf_pool);
+
+ /* Release the buffer pool and block mutex
+ to give the other threads a go. */
+
+ buf_flush_yield(buf_pool, bpage);
+
+ buf_flush_list_mutex_enter(buf_pool);
+
+ /* Should not have been removed from the flush
+ list during the yield. However, this check is
+ not sufficient to catch a remove -> add. */
+
+ ut_ad(bpage->in_flush_list);
+
+ return(true);
+ }
+
+ return(false);
+}
+
+/******************************************************************//**
+Removes a single page from a given tablespace inside a specific
+buffer pool instance.
+@return true if page was removed. */
+static __attribute__((nonnull, warn_unused_result))
+bool
+buf_flush_or_remove_page(
+/*=====================*/
+ buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
+ buf_page_t* bpage, /*!< in/out: bpage to remove */
+ bool flush) /*!< in: flush to disk if true but
+ don't remove else remove without
+ flushing to disk */
+{
+ ib_mutex_t* block_mutex;
+ bool processed = false;
+
+ ut_ad(buf_pool_mutex_own(buf_pool));
+ ut_ad(buf_flush_list_mutex_own(buf_pool));
+
+ block_mutex = buf_page_get_mutex(bpage);
+
+ /* bpage->space and bpage->io_fix are protected by
+ buf_pool->mutex and block_mutex. It is safe to check
+ them while holding buf_pool->mutex only. */
+
+ if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
+
+ /* We cannot remove this page during this scan
+ yet; maybe the system is currently reading it
+ in, or flushing the modifications to the file */
+
+ } else {
+
+ /* We have to release the flush_list_mutex to obey the
+ latching order. We are however guaranteed that the page
+ will stay in the flush_list because buf_flush_remove()
+ needs buf_pool->mutex as well (for the non-flush case). */
+
+ buf_flush_list_mutex_exit(buf_pool);
+
+ mutex_enter(block_mutex);
+
+ ut_ad(bpage->oldest_modification != 0);
+
+ if (bpage->buf_fix_count > 0) {
+
+ mutex_exit(block_mutex);
+
+ /* We cannot remove this page yet;
+ maybe the system is currently reading
+ it in, or flushing the modifications
+ to the file */
+
+ } else if (!flush) {
+
+ buf_flush_remove(bpage);
+
+ mutex_exit(block_mutex);
+
+ processed = true;
+
+ } else if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
+
+ /* Check the status again after releasing the flush
+ list mutex and acquiring the block mutex. The background
+ flush thread may be in the process of flushing this
+ page when we released the flush list mutex. */
+
+ /* The following call will release the buffer pool
+ and block mutex. */
+ buf_flush_page(buf_pool, bpage, BUF_FLUSH_SINGLE_PAGE);
+
+ /* Wake possible simulated aio thread to actually
+ post the writes to the operating system */
+ os_aio_simulated_wake_handler_threads();
+
+ buf_pool_mutex_enter(buf_pool);
+
+ processed = true;
+ } else {
+ mutex_exit(block_mutex);
+ }
+
+ buf_flush_list_mutex_enter(buf_pool);
+ }
+
+ ut_ad(!mutex_own(block_mutex));
+
+ return(processed);
+}
+
+/******************************************************************//**
Remove all dirty pages belonging to a given tablespace inside a specific
buffer pool instance when we are deleting the data file(s) of that
tablespace. The pages still remain a part of LRU and are evicted from
-the list as they age towards the tail of the LRU. */
-static
+the list as they age towards the tail of the LRU.
+@retval DB_SUCCESS if all freed
+@retval DB_FAIL if not all freed
+@retval DB_INTERRUPTED if the transaction was interrupted */
+static __attribute__((nonnull(1), warn_unused_result))
+dberr_t
+buf_flush_or_remove_pages(
+/*======================*/
+ buf_pool_t* buf_pool, /*!< buffer pool instance */
+ ulint id, /*!< in: target space id for which
+ to remove or flush pages */
+ bool flush, /*!< in: flush to disk if true but
+ don't remove else remove without
+ flushing to disk */
+ const trx_t* trx) /*!< to check if the operation must
+ be interrupted, can be 0 */
+{
+ buf_page_t* prev;
+ buf_page_t* bpage;
+ ulint processed = 0;
+ bool all_freed = true;
+
+ buf_flush_list_mutex_enter(buf_pool);
+
+ for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list);
+ bpage != NULL;
+ bpage = prev) {
+
+ ut_a(buf_page_in_file(bpage));
+
+ /* Save the previous link because once we free the
+ page we can't rely on the links. */
+
+ prev = UT_LIST_GET_PREV(list, bpage);
+
+ if (buf_page_get_space(bpage) != id) {
+
+ /* Skip this block, as it does not belong to
+ the target space. */
+
+ } else if (!buf_flush_or_remove_page(buf_pool, bpage, flush)) {
+
+ /* Remove was unsuccessful, we have to try again
+ by scanning the entire list from the end. */
+
+ all_freed = false;
+ }
+
+ ++processed;
+
+ /* Yield if we have hogged the CPU and mutexes for too long. */
+ if (buf_flush_try_yield(buf_pool, prev, processed)) {
+
+ /* Reset the batch size counter if we had to yield. */
+
+ processed = 0;
+ }
+
+#ifdef DBUG_OFF
+ if (flush) {
+ DBUG_EXECUTE_IF("ib_export_flush_crash",
+ static ulint n_pages;
+ if (++n_pages == 4) {DBUG_SUICIDE();});
+ }
+#endif /* DBUG_OFF */
+
+ /* The check for trx is interrupted is expensive, we want
+ to check every N iterations. */
+ if (!processed && trx && trx_is_interrupted(trx)) {
+ buf_flush_list_mutex_exit(buf_pool);
+ return(DB_INTERRUPTED);
+ }
+ }
+
+ buf_flush_list_mutex_exit(buf_pool);
+
+ return(all_freed ? DB_SUCCESS : DB_FAIL);
+}
+
+/******************************************************************//**
+Remove or flush all the dirty pages that belong to a given tablespace
+inside a specific buffer pool instance. The pages will remain in the LRU
+list and will be evicted from the LRU list as they age and move towards
+the tail of the LRU list. */
+static __attribute__((nonnull(1)))
void
-buf_LRU_remove_dirty_pages_for_tablespace(
-/*======================================*/
+buf_flush_dirty_pages(
+/*==================*/
+ buf_pool_t* buf_pool, /*!< buffer pool instance */
+ ulint id, /*!< in: space id */
+ bool flush, /*!< in: flush to disk if true otherwise
+ remove the pages without flushing */
+ const trx_t* trx) /*!< to check if the operation must
+ be interrupted */
+{
+ dberr_t err;
+
+ do {
+ buf_pool_mutex_enter(buf_pool);
+
+ err = buf_flush_or_remove_pages(buf_pool, id, flush, trx);
+
+ buf_pool_mutex_exit(buf_pool);
+
+ ut_ad(buf_flush_validate(buf_pool));
+
+ if (err == DB_FAIL) {
+ os_thread_sleep(20000);
+ }
+
+ /* DB_FAIL is a soft error, it means that the task wasn't
+ completed, needs to be retried. */
+
+ ut_ad(buf_flush_validate(buf_pool));
+
+ } while (err == DB_FAIL);
+}
+
+/******************************************************************//**
+Remove all pages that belong to a given tablespace inside a specific
+buffer pool instance when we are DISCARDing the tablespace. */
+static __attribute__((nonnull))
+void
+buf_LRU_remove_all_pages(
+/*=====================*/
buf_pool_t* buf_pool, /*!< buffer pool instance */
ulint id) /*!< in: space id */
{
buf_page_t* bpage;
ibool all_freed;
- ulint i;
scan_again:
buf_pool_mutex_enter(buf_pool);
- buf_flush_list_mutex_enter(buf_pool);
all_freed = TRUE;
- for (bpage = UT_LIST_GET_LAST(buf_pool->flush_list), i = 0;
- bpage != NULL; ++i) {
+ for (bpage = UT_LIST_GET_LAST(buf_pool->LRU);
+ bpage != NULL;
+ /* No op */) {
+ rw_lock_t* hash_lock;
buf_page_t* prev_bpage;
- mutex_t* block_mutex = NULL;
+ ib_mutex_t* block_mutex = NULL;
ut_a(buf_page_in_file(bpage));
+ ut_ad(bpage->in_LRU_list);
- prev_bpage = UT_LIST_GET_PREV(list, bpage);
+ prev_bpage = UT_LIST_GET_PREV(LRU, bpage);
/* bpage->space and bpage->io_fix are protected by
- buf_pool->mutex and block_mutex. It is safe to check
+ buf_pool->mutex and the block_mutex. It is safe to check
them while holding buf_pool->mutex only. */
if (buf_page_get_space(bpage) != id) {
@@ -405,83 +706,103 @@ scan_again:
all_freed = FALSE;
goto next_page;
- }
+ } else {
+ ulint fold = buf_page_address_fold(
+ bpage->space, bpage->offset);
- /* We have to release the flush_list_mutex to obey the
- latching order. We are however guaranteed that the page
- will stay in the flush_list because buf_flush_remove()
- needs buf_pool->mutex as well. */
- buf_flush_list_mutex_exit(buf_pool);
- block_mutex = buf_page_get_mutex(bpage);
- mutex_enter(block_mutex);
+ hash_lock = buf_page_hash_lock_get(buf_pool, fold);
- if (bpage->buf_fix_count > 0) {
- mutex_exit(block_mutex);
- buf_flush_list_mutex_enter(buf_pool);
+ rw_lock_x_lock(hash_lock);
- /* We cannot remove this page during
- this scan yet; maybe the system is
- currently reading it in, or flushing
- the modifications to the file */
+ block_mutex = buf_page_get_mutex(bpage);
+ mutex_enter(block_mutex);
- all_freed = FALSE;
- goto next_page;
- }
+ if (bpage->buf_fix_count > 0) {
- ut_ad(bpage->oldest_modification != 0);
+ mutex_exit(block_mutex);
- buf_flush_remove(bpage);
+ rw_lock_x_unlock(hash_lock);
- mutex_exit(block_mutex);
- buf_flush_list_mutex_enter(buf_pool);
-next_page:
- bpage = prev_bpage;
+ /* We cannot remove this page during
+ this scan yet; maybe the system is
+ currently reading it in, or flushing
+ the modifications to the file */
- if (!bpage) {
- break;
+ all_freed = FALSE;
+
+ goto next_page;
+ }
}
- /* Every BUF_LRU_DROP_SEARCH_SIZE iterations in the
- loop we release buf_pool->mutex to let other threads
- do their job. */
- if (i < BUF_LRU_DROP_SEARCH_SIZE) {
- continue;
+ ut_ad(mutex_own(block_mutex));
+
+#ifdef UNIV_DEBUG
+ if (buf_debug_prints) {
+ fprintf(stderr,
+ "Dropping space %lu page %lu\n",
+ (ulong) buf_page_get_space(bpage),
+ (ulong) buf_page_get_page_no(bpage));
}
+#endif
+ if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
+ /* Do nothing, because the adaptive hash index
+ covers uncompressed pages only. */
+ } else if (((buf_block_t*) bpage)->index) {
+ ulint page_no;
+ ulint zip_size;
- /* We IO-fix the block to make sure that the block
- stays in its position in the flush_list. */
- if (buf_page_get_io_fix(bpage) != BUF_IO_NONE) {
- /* Block is already IO-fixed. We don't
- want to change the value. Lets leave
- this block alone. */
- continue;
+ buf_pool_mutex_exit(buf_pool);
+
+ zip_size = buf_page_get_zip_size(bpage);
+ page_no = buf_page_get_page_no(bpage);
+
+ rw_lock_x_unlock(hash_lock);
+
+ mutex_exit(block_mutex);
+
+ /* Note that the following call will acquire
+ and release block->lock X-latch. */
+
+ btr_search_drop_page_hash_when_freed(
+ id, zip_size, page_no);
+
+ goto scan_again;
}
- buf_flush_list_mutex_exit(buf_pool);
- block_mutex = buf_page_get_mutex(bpage);
- mutex_enter(block_mutex);
- buf_page_set_sticky(bpage);
- mutex_exit(block_mutex);
+ if (bpage->oldest_modification != 0) {
- /* Now it is safe to release the buf_pool->mutex. */
- buf_pool_mutex_exit(buf_pool);
- os_thread_yield();
- buf_pool_mutex_enter(buf_pool);
+ buf_flush_remove(bpage);
+ }
- mutex_enter(block_mutex);
- buf_page_unset_sticky(bpage);
- mutex_exit(block_mutex);
+ ut_ad(!bpage->in_flush_list);
- buf_flush_list_mutex_enter(buf_pool);
- ut_ad(bpage->in_flush_list);
+ /* Remove from the LRU list. */
+
+ if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
+ != BUF_BLOCK_ZIP_FREE) {
+
+ buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
+
+ } else {
+ /* The block_mutex should have been released
+ by buf_LRU_block_remove_hashed_page() when it
+ returns BUF_BLOCK_ZIP_FREE. */
+ ut_ad(block_mutex == &buf_pool->zip_mutex);
+ }
+
+ ut_ad(!mutex_own(block_mutex));
+
+#ifdef UNIV_SYNC_DEBUG
+ /* buf_LRU_block_remove_hashed_page() releases the hash_lock */
+ ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX));
+ ut_ad(!rw_lock_own(hash_lock, RW_LOCK_SHARED));
+#endif /* UNIV_SYNC_DEBUG */
- i = 0;
+next_page:
+ bpage = prev_bpage;
}
buf_pool_mutex_exit(buf_pool);
- buf_flush_list_mutex_exit(buf_pool);
-
- ut_ad(buf_flush_validate(buf_pool));
if (!all_freed) {
os_thread_sleep(20000);
@@ -491,15 +812,60 @@ next_page:
}
/******************************************************************//**
-Invalidates all pages belonging to a given tablespace when we are deleting
-the data file(s) of that tablespace. */
+Remove pages belonging to a given tablespace inside a specific
+buffer pool instance when we are deleting the data file(s) of that
+tablespace. The pages still remain a part of LRU and are evicted from
+the list as they age towards the tail of the LRU only if buf_remove
+is BUF_REMOVE_FLUSH_NO_WRITE. */
+static __attribute__((nonnull(1)))
+void
+buf_LRU_remove_pages(
+/*=================*/
+ buf_pool_t* buf_pool, /*!< buffer pool instance */
+ ulint id, /*!< in: space id */
+ buf_remove_t buf_remove, /*!< in: remove or flush strategy */
+ const trx_t* trx) /*!< to check if the operation must
+ be interrupted */
+{
+ switch (buf_remove) {
+ case BUF_REMOVE_ALL_NO_WRITE:
+ buf_LRU_remove_all_pages(buf_pool, id);
+ break;
+
+ case BUF_REMOVE_FLUSH_NO_WRITE:
+ ut_a(trx == 0);
+ buf_flush_dirty_pages(buf_pool, id, false, NULL);
+ ut_ad(trx_is_interrupted(trx)
+ || buf_pool_get_dirty_pages_count(buf_pool, id) == 0);
+ break;
+
+ case BUF_REMOVE_FLUSH_WRITE:
+ ut_a(trx != 0);
+ buf_flush_dirty_pages(buf_pool, id, true, trx);
+ ut_ad(trx_is_interrupted(trx)
+ || buf_pool_get_dirty_pages_count(buf_pool, id) == 0);
+ /* Ensure that all asynchronous IO is completed. */
+ os_aio_wait_until_no_pending_writes();
+ fil_flush(id);
+ break;
+ }
+}
+
+/******************************************************************//**
+Flushes all dirty pages or removes all pages belonging
+to a given tablespace. A PROBLEM: if readahead is being started, what
+guarantees that it will not try to read in pages after this operation
+has completed? */
UNIV_INTERN
void
-buf_LRU_invalidate_tablespace(
+buf_LRU_flush_or_remove_pages(
/*==========================*/
- ulint id) /*!< in: space id */
+ ulint id, /*!< in: space id */
+ buf_remove_t buf_remove, /*!< in: remove or flush strategy */
+ const trx_t* trx) /*!< to check if the operation must
+ be interrupted */
{
- ulint i;
+ ulint i;
/* Before we attempt to drop pages one by one we first
attempt to drop page hash index entries in batches to make
@@ -511,9 +877,28 @@ buf_LRU_invalidate_tablespace(
buf_pool_t* buf_pool;
buf_pool = buf_pool_from_array(i);
- buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
- buf_LRU_remove_dirty_pages_for_tablespace(buf_pool, id);
+
+ switch (buf_remove) {
+ case BUF_REMOVE_ALL_NO_WRITE:
+ case BUF_REMOVE_FLUSH_NO_WRITE:
+ buf_LRU_drop_page_hash_for_tablespace(buf_pool, id);
+ break;
+
+ case BUF_REMOVE_FLUSH_WRITE:
+ /* We allow read-only queries against the
+ table, there is no need to drop the AHI entries. */
+ break;
+ }
+
+ buf_LRU_remove_pages(buf_pool, id, buf_remove, trx);
}
+
+#ifdef UNIV_DEBUG
+ if (trx != 0 && id != 0) {
+ ut_ad(trx_is_interrupted(trx)
+ || buf_flush_get_dirty_pages_count(id) == 0);
+ }
+#endif /* UNIV_DEBUG */
}
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
@@ -807,7 +1192,7 @@ buf_LRU_check_size_of_non_data_objects(
buf_lru_switched_on_innodb_mon = TRUE;
srv_print_innodb_monitor = TRUE;
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
}
} else if (buf_lru_switched_on_innodb_mon) {
@@ -955,7 +1340,7 @@ loop:
mon_value_was = srv_print_innodb_monitor;
started_monitor = TRUE;
srv_print_innodb_monitor = TRUE;
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
}
/* If we have scanned the whole LRU and still are unable to
@@ -982,7 +1367,7 @@ loop:
++flush_failures;
}
- ++srv_buf_pool_wait_free;
+ srv_stats.buf_pool_wait_free.add(n_iterations, 1);
n_iterations++;
@@ -1425,7 +1810,7 @@ buf_LRU_free_block(
bpage->offset);
rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, fold);
- mutex_t* block_mutex = buf_page_get_mutex(bpage);
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
ut_ad(buf_pool_mutex_own(buf_pool));
ut_ad(buf_page_in_file(bpage));
@@ -2031,24 +2416,28 @@ buf_LRU_free_one_page(
be in a state where it can be freed; there
may or may not be a hash index to the page */
{
-#ifdef UNIV_DEBUG
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
-#endif
- mutex_t* block_mutex = buf_page_get_mutex(bpage);
+ const ulint fold = buf_page_address_fold(bpage->space,
+ bpage->offset);
+ rw_lock_t* hash_lock = buf_page_hash_lock_get(buf_pool, fold);
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
ut_ad(buf_pool_mutex_own(buf_pool));
- ut_ad(mutex_own(block_mutex));
+
+ rw_lock_x_lock(hash_lock);
+ mutex_enter(block_mutex);
if (buf_LRU_block_remove_hashed_page(bpage, TRUE)
!= BUF_BLOCK_ZIP_FREE) {
buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
- } else {
- /* The block_mutex should have been released by
- buf_LRU_block_remove_hashed_page() when it returns
- BUF_BLOCK_ZIP_FREE. */
- ut_ad(block_mutex == &buf_pool->zip_mutex);
- mutex_enter(block_mutex);
}
+
+ /* buf_LRU_block_remove_hashed_page() releases hash_lock and block_mutex */
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)
+ && !rw_lock_own(hash_lock, RW_LOCK_SHARED));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(!mutex_own(block_mutex));
}
/**********************************************************************//**
diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc
index 227cb083725..3a579e251ff 100644
--- a/storage/innobase/buf/buf0rea.cc
+++ b/storage/innobase/buf/buf0rea.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -61,7 +61,7 @@ buf_read_page_handle_error(
buf_page_t* bpage) /*!< in: pointer to the block */
{
buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
- const ibool uncompressed = (buf_page_get_state(bpage)
+ const bool uncompressed = (buf_page_get_state(bpage)
== BUF_BLOCK_FILE_PAGE);
/* First unfix and release lock on the bpage */
@@ -79,13 +79,14 @@ buf_read_page_handle_error(
BUF_IO_READ);
}
+ mutex_exit(buf_page_get_mutex(bpage));
+
/* remove the block from LRU list */
buf_LRU_free_one_page(bpage);
ut_ad(buf_pool->n_pend_reads > 0);
buf_pool->n_pend_reads--;
- mutex_exit(buf_page_get_mutex(bpage));
buf_pool_mutex_exit(buf_pool);
}
@@ -103,7 +104,7 @@ static
ulint
buf_read_page_low(
/*==============*/
- ulint* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED if we are
+ dberr_t* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED if we are
trying to read from a non-existent tablespace, or a
tablespace which is just now being dropped */
ibool sync, /*!< in: TRUE if synchronous aio is desired */
@@ -192,13 +193,9 @@ buf_read_page_low(
}
thd_wait_end(NULL);
- if (*err == DB_TABLESPACE_DELETED) {
- buf_read_page_handle_error(bpage);
- return(0);
- }
-
if (*err != DB_SUCCESS) {
- if (ignore_nonexistent_pages) {
+ if (ignore_nonexistent_pages || *err == DB_TABLESPACE_DELETED) {
+ buf_read_page_handle_error(bpage);
return(0);
}
/* else */
@@ -248,7 +245,7 @@ buf_read_ahead_random(
ulint ibuf_mode;
ulint count;
ulint low, high;
- ulint err;
+ dberr_t err;
ulint i;
const ulint buf_read_ahead_random_area
= BUF_READ_AHEAD_AREA(buf_pool);
@@ -377,7 +374,7 @@ read_ahead:
buf_LRU_stat_inc_io();
buf_pool->stat.n_ra_pages_read_rnd += count;
- srv_buf_pool_reads += count;
+ srv_stats.buf_pool_reads.add(count);
return(count);
}
@@ -397,7 +394,7 @@ buf_read_page(
{
ib_int64_t tablespace_version;
ulint count;
- ulint err;
+ dberr_t err;
tablespace_version = fil_space_get_version(space);
@@ -407,7 +404,7 @@ buf_read_page(
count = buf_read_page_low(&err, TRUE, BUF_READ_ANY_PAGE, space,
zip_size, FALSE,
tablespace_version, offset);
- srv_buf_pool_reads += count;
+ srv_stats.buf_pool_reads.add(count);
if (err == DB_TABLESPACE_DELETED) {
ut_print_timestamp(stderr);
fprintf(stderr,
@@ -440,7 +437,7 @@ buf_read_page_async(
ulint zip_size;
ib_int64_t tablespace_version;
ulint count;
- ulint err;
+ dberr_t err;
zip_size = fil_space_get_zip_size(space);
@@ -455,7 +452,7 @@ buf_read_page_async(
| BUF_READ_IGNORE_NONEXISTENT_PAGES,
space, zip_size, FALSE,
tablespace_version, offset);
- srv_buf_pool_reads += count;
+ srv_stats.buf_pool_reads.add(count);
/* We do not increment number of I/O operations used for LRU policy
here (buf_LRU_stat_inc_io()). We use this in heuristics to decide
@@ -513,7 +510,7 @@ buf_read_ahead_linear(
ulint fail_count;
ulint ibuf_mode;
ulint low, high;
- ulint err;
+ dberr_t err;
ulint i;
const ulint buf_read_ahead_linear_area
= BUF_READ_AHEAD_AREA(buf_pool);
@@ -784,7 +781,7 @@ buf_read_ibuf_merge_pages(
#endif
for (i = 0; i < n_stored; i++) {
- ulint err;
+ dberr_t err;
buf_pool_t* buf_pool;
ulint zip_size = fil_space_get_zip_size(space_ids[i]);
@@ -850,7 +847,7 @@ buf_read_recv_pages(
{
ib_int64_t tablespace_version;
ulint count;
- ulint err;
+ dberr_t err;
ulint i;
zip_size = fil_space_get_zip_size(space);
diff --git a/storage/innobase/dict/dict0boot.cc b/storage/innobase/dict/dict0boot.cc
index 8e305364ac8..eea10759fcd 100644
--- a/storage/innobase/dict/dict0boot.cc
+++ b/storage/innobase/dict/dict0boot.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -241,9 +241,10 @@ dict_hdr_create(
/*****************************************************************//**
Initializes the data dictionary memory structures when the database is
-started. This function is also called when the data dictionary is created. */
+started. This function is also called when the data dictionary is created.
+@return DB_SUCCESS or error code. */
UNIV_INTERN
-void
+dberr_t
dict_boot(void)
/*===========*/
{
@@ -252,7 +253,7 @@ dict_boot(void)
dict_hdr_t* dict_hdr;
mem_heap_t* heap;
mtr_t mtr;
- ulint error;
+ dberr_t error;
/* Be sure these constants do not ever change. To avoid bloat,
only check the *NUM_FIELDS* in each table */
@@ -307,9 +308,7 @@ dict_boot(void)
dict_mem_table_add_col(table, heap, "ID", DATA_BINARY, 0, 0);
/* ROW_FORMAT = (N_COLS >> 31) ? COMPACT : REDUNDANT */
dict_mem_table_add_col(table, heap, "N_COLS", DATA_INT, 0, 4);
- /* If the format is UNIV_FORMAT_A, table->flags == 0, and
- TYPE == 1, which is defined as SYS_TABLE_TYPE_ANTELOPE.
- The low order bit of TYPE is always set to 1. If the format
+ /* The low order bit of TYPE is always set to 1. If the format
is UNIV_FORMAT_B or higher, this field matches table->flags. */
dict_mem_table_add_col(table, heap, "TYPE", DATA_INT, 0, 4);
dict_mem_table_add_col(table, heap, "MIX_ID", DATA_BINARY, 0, 0);
@@ -454,14 +453,27 @@ dict_boot(void)
ibuf_init_at_db_start();
- /* Load definitions of other indexes on system tables */
+ dberr_t err = DB_SUCCESS;
+
+ if (srv_read_only_mode && !ibuf_is_empty()) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Change buffer must be empty when --innodb-read-only "
+ "is set!");
- dict_load_sys_table(dict_sys->sys_tables);
- dict_load_sys_table(dict_sys->sys_columns);
- dict_load_sys_table(dict_sys->sys_indexes);
- dict_load_sys_table(dict_sys->sys_fields);
+ err = DB_ERROR;
+ } else {
+ /* Load definitions of other indexes on system tables */
+
+ dict_load_sys_table(dict_sys->sys_tables);
+ dict_load_sys_table(dict_sys->sys_columns);
+ dict_load_sys_table(dict_sys->sys_indexes);
+ dict_load_sys_table(dict_sys->sys_fields);
+ }
mutex_exit(&(dict_sys->mutex));
+
+ return(err);
}
/*****************************************************************//**
@@ -476,9 +488,10 @@ dict_insert_initial_data(void)
}
/*****************************************************************//**
-Creates and initializes the data dictionary at the database creation. */
+Creates and initializes the data dictionary at the server bootstrap.
+@return DB_SUCCESS or error code. */
UNIV_INTERN
-void
+dberr_t
dict_create(void)
/*=============*/
{
@@ -490,7 +503,11 @@ dict_create(void)
mtr_commit(&mtr);
- dict_boot();
+ dberr_t err = dict_boot();
+
+ if (err == DB_SUCCESS) {
+ dict_insert_initial_data();
+ }
- dict_insert_initial_data();
+ return(err);
}
diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc
index d58b304ab92..864150b324a 100644
--- a/storage/innobase/dict/dict0crea.cc
+++ b/storage/innobase/dict/dict0crea.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -43,6 +43,7 @@ Created 1/8/1996 Heikki Tuuri
#include "usr0sess.h"
#include "ut0vec.h"
#include "dict0priv.h"
+#include "fts0priv.h"
/*****************************************************************//**
Based on a table object, this function builds the entry to be inserted
@@ -244,8 +245,8 @@ dict_create_sys_columns_tuple(
/***************************************************************//**
Builds a table definition to insert.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
dict_build_table_def_step(
/*======================*/
que_thr_t* thr, /*!< in: query thread */
@@ -253,9 +254,8 @@ dict_build_table_def_step(
{
dict_table_t* table;
dtuple_t* row;
- ulint error;
- const char* path_or_name;
- ibool is_path;
+ dberr_t error;
+ const char* path;
mtr_t mtr;
ulint space = 0;
bool use_tablespace;
@@ -263,7 +263,7 @@ dict_build_table_def_step(
ut_ad(mutex_own(&(dict_sys->mutex)));
table = node->table;
- use_tablespace = !!(table->flags2 & DICT_TF2_USE_TABLESPACE);
+ use_tablespace = DICT_TF2_FLAG_IS_SET(table, DICT_TF2_USE_TABLESPACE);
dict_hdr_get_new_id(&table->id, NULL, NULL);
@@ -274,6 +274,11 @@ dict_build_table_def_step(
Get a new space id. */
dict_hdr_get_new_id(NULL, NULL, &space);
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_out_of_space_ids",
+ space = ULINT_UNDEFINED;
+ );
+
if (UNIV_UNLIKELY(space == ULINT_UNDEFINED)) {
return(DB_ERROR);
}
@@ -286,26 +291,19 @@ dict_build_table_def_step(
- page 3 will contain the root of the clustered index of the
table we create here. */
- if (table->dir_path_of_temp_table) {
- /* We place tables created with CREATE TEMPORARY
- TABLE in the tmp dir of mysqld server */
-
- path_or_name = table->dir_path_of_temp_table;
- is_path = TRUE;
- } else {
- path_or_name = table->name;
- is_path = FALSE;
- }
+ path = table->data_dir_path ? table->data_dir_path
+ : table->dir_path_of_temp_table;
ut_ad(dict_table_get_format(table) <= UNIV_FORMAT_MAX);
ut_ad(!dict_table_zip_size(table)
|| dict_table_get_format(table) >= UNIV_FORMAT_B);
error = fil_create_new_single_table_tablespace(
- space, path_or_name, is_path,
+ space, table->name, path,
dict_tf_to_fsp_flags(table->flags),
table->flags2,
FIL_IBD_FILE_INITIAL_SIZE);
+
table->space = (unsigned int) space;
if (error != DB_SUCCESS) {
@@ -333,10 +331,9 @@ dict_build_table_def_step(
}
/***************************************************************//**
-Builds a column definition to insert.
-@return DB_SUCCESS */
+Builds a column definition to insert. */
static
-ulint
+void
dict_build_col_def_step(
/*====================*/
tab_node_t* node) /*!< in: table create node */
@@ -346,8 +343,6 @@ dict_build_col_def_step(
row = dict_create_sys_columns_tuple(node->table, node->col_no,
node->heap);
ins_node_set_new_row(node->col_def, row);
-
- return(DB_SUCCESS);
}
/*****************************************************************//**
@@ -571,8 +566,8 @@ dict_create_search_tuple(
/***************************************************************//**
Builds an index definition row to insert.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
dict_build_index_def_step(
/*======================*/
que_thr_t* thr, /*!< in: query thread */
@@ -595,7 +590,10 @@ dict_build_index_def_step(
return(DB_TABLE_NOT_FOUND);
}
- trx->table_id = table->id;
+ if (!trx->table_id) {
+ /* Record only the first table id. */
+ trx->table_id = table->id;
+ }
node->table = table;
@@ -616,15 +614,16 @@ dict_build_index_def_step(
/* Note that the index was created by this transaction. */
index->trx_id = trx->id;
+ ut_ad(table->def_trx_id <= trx->id);
+ table->def_trx_id = trx->id;
return(DB_SUCCESS);
}
/***************************************************************//**
-Builds a field definition row to insert.
-@return DB_SUCCESS */
+Builds a field definition row to insert. */
static
-ulint
+void
dict_build_field_def_step(
/*======================*/
ind_node_t* node) /*!< in: index create node */
@@ -637,15 +636,13 @@ dict_build_field_def_step(
row = dict_create_sys_fields_tuple(index, node->field_no, node->heap);
ins_node_set_new_row(node->field_def, row);
-
- return(DB_SUCCESS);
}
/***************************************************************//**
Creates an index tree for the index if it is not a member of a cluster.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
dict_create_index_tree_step(
/*========================*/
ind_node_t* node) /*!< in: index create node */
@@ -653,7 +650,6 @@ dict_create_index_tree_step(
dict_index_t* index;
dict_table_t* sys_indexes;
dtuple_t* search_tuple;
- ulint zip_size;
btr_pcur_t pcur;
mtr_t mtr;
@@ -682,25 +678,37 @@ dict_create_index_tree_step(
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
- zip_size = dict_table_zip_size(index->table);
- node->page_no = btr_create(index->type, index->space, zip_size,
- index->id, index, &mtr);
- /* printf("Created a new index tree in space %lu root page %lu\n",
- index->space, node->page_no); */
+ dberr_t err = DB_SUCCESS;
+ ulint zip_size = dict_table_zip_size(index->table);
- page_rec_write_field(btr_pcur_get_rec(&pcur),
- DICT_FLD__SYS_INDEXES__PAGE_NO,
- node->page_no, &mtr);
- btr_pcur_close(&pcur);
- mtr_commit(&mtr);
+ if (node->index->table->ibd_file_missing
+ || dict_table_is_discarded(node->index->table)) {
+
+ node->page_no = FIL_NULL;
+ } else {
+ node->page_no = btr_create(
+ index->type, index->space, zip_size,
+ index->id, index, &mtr);
- if (node->page_no == FIL_NULL) {
+ if (node->page_no == FIL_NULL) {
+ err = DB_OUT_OF_FILE_SPACE;
+ }
- return(DB_OUT_OF_FILE_SPACE);
+ DBUG_EXECUTE_IF("ib_import_create_index_failure_1",
+ node->page_no = FIL_NULL;
+ err = DB_OUT_OF_FILE_SPACE; );
}
- return(DB_SUCCESS);
+ page_rec_write_field(
+ btr_pcur_get_rec(&pcur), DICT_FLD__SYS_INDEXES__PAGE_NO,
+ node->page_no, &mtr);
+
+ btr_pcur_close(&pcur);
+
+ mtr_commit(&mtr);
+
+ return(err);
}
/*******************************************************************//**
@@ -883,7 +891,7 @@ create:
for (index = UT_LIST_GET_FIRST(table->indexes);
index;
index = UT_LIST_GET_NEXT(indexes, index)) {
- if (index->id == index_id) {
+ if (index->id == index_id && !(index->type & DICT_FTS)) {
root_page_no = btr_create(type, space, zip_size,
index_id, index, mtr);
index->page = (unsigned int) root_page_no;
@@ -910,7 +918,9 @@ tab_create_graph_create(
/*====================*/
dict_table_t* table, /*!< in: table to create, built as a memory data
structure */
- mem_heap_t* heap) /*!< in: heap where created */
+ mem_heap_t* heap, /*!< in: heap where created */
+ bool commit) /*!< in: true if the commit node should be
+ added to the query graph */
{
tab_node_t* node;
@@ -932,8 +942,12 @@ tab_create_graph_create(
heap);
node->col_def->common.parent = node;
- node->commit_node = trx_commit_node_create(heap);
- node->commit_node->common.parent = node;
+ if (commit) {
+ node->commit_node = trx_commit_node_create(heap);
+ node->commit_node->common.parent = node;
+ } else {
+ node->commit_node = 0;
+ }
return(node);
}
@@ -947,7 +961,9 @@ ind_create_graph_create(
/*====================*/
dict_index_t* index, /*!< in: index to create, built as a memory data
structure */
- mem_heap_t* heap) /*!< in: heap where created */
+ mem_heap_t* heap, /*!< in: heap where created */
+ bool commit) /*!< in: true if the commit node should be
+ added to the query graph */
{
ind_node_t* node;
@@ -970,8 +986,12 @@ ind_create_graph_create(
dict_sys->sys_fields, heap);
node->field_def->common.parent = node;
- node->commit_node = trx_commit_node_create(heap);
- node->commit_node->common.parent = node;
+ if (commit) {
+ node->commit_node = trx_commit_node_create(heap);
+ node->commit_node->common.parent = node;
+ } else {
+ node->commit_node = 0;
+ }
return(node);
}
@@ -986,7 +1006,7 @@ dict_create_table_step(
que_thr_t* thr) /*!< in: query thread */
{
tab_node_t* node;
- ulint err = DB_ERROR;
+ dberr_t err = DB_ERROR;
trx_t* trx;
ut_ad(thr);
@@ -1025,12 +1045,7 @@ dict_create_table_step(
if (node->col_no < (node->table)->n_def) {
- err = dict_build_col_def_step(node);
-
- if (err != DB_SUCCESS) {
-
- goto function_exit;
- }
+ dict_build_col_def_step(node);
node->col_no++;
@@ -1063,7 +1078,7 @@ dict_create_table_step(
}
function_exit:
- trx->error_state = (enum db_err) err;
+ trx->error_state = err;
if (err == DB_SUCCESS) {
/* Ok: do nothing */
@@ -1093,7 +1108,7 @@ dict_create_index_step(
que_thr_t* thr) /*!< in: query thread */
{
ind_node_t* node;
- ulint err = DB_ERROR;
+ dberr_t err = DB_ERROR;
trx_t* trx;
ut_ad(thr);
@@ -1130,12 +1145,7 @@ dict_create_index_step(
if (node->field_no < (node->index)->n_fields) {
- err = dict_build_field_def_step(node);
-
- if (err != DB_SUCCESS) {
-
- goto function_exit;
- }
+ dict_build_field_def_step(node);
node->field_no++;
@@ -1172,7 +1182,37 @@ dict_create_index_step(
err = dict_create_index_tree_step(node);
+ DBUG_EXECUTE_IF("ib_dict_create_index_tree_fail",
+ err = DB_OUT_OF_MEMORY;);
+
if (err != DB_SUCCESS) {
+ /* If this is a FTS index, we will need to remove
+ it from fts->cache->indexes list as well */
+ if ((node->index->type & DICT_FTS)
+ && node->table->fts) {
+ fts_index_cache_t* index_cache;
+
+ rw_lock_x_lock(
+ &node->table->fts->cache->init_lock);
+
+ index_cache = (fts_index_cache_t*)
+ fts_find_index_cache(
+ node->table->fts->cache,
+ node->index);
+
+ if (index_cache->words) {
+ rbt_free(index_cache->words);
+ index_cache->words = 0;
+ }
+
+ ib_vector_remove(
+ node->table->fts->cache->indexes,
+ *reinterpret_cast<void**>(index_cache));
+
+ rw_lock_x_unlock(
+ &node->table->fts->cache->init_lock);
+ }
+
dict_index_remove_from_cache(node->table, node->index);
node->index = NULL;
@@ -1180,6 +1220,11 @@ dict_create_index_step(
}
node->index->page = node->page_no;
+ /* These should have been set in
+ dict_build_index_def_step() and
+ dict_index_add_to_cache(). */
+ ut_ad(node->index->trx_id == trx->id);
+ ut_ad(node->index->table->def_trx_id == trx->id);
node->state = INDEX_COMMIT_WORK;
}
@@ -1197,7 +1242,7 @@ dict_create_index_step(
}
function_exit:
- trx->error_state = static_cast<enum db_err>(err);
+ trx->error_state = err;
if (err == DB_SUCCESS) {
/* Ok: do nothing */
@@ -1217,93 +1262,107 @@ function_exit:
}
/****************************************************************//**
-Check whether the system foreign key tables exist. Additionally, If
-they exist then move them to non-LRU end of the table LRU list.
-@return TRUE if they exist. */
+Check whether a system table exists. Additionally, if it exists,
+move it to the non-LRU end of the table LRU list. This is oly used
+for system tables that can be upgraded or added to an older database,
+which include SYS_FOREIGN, SYS_FOREIGN_COLS, SYS_TABLESPACES and
+SYS_DATAFILES.
+@return DB_SUCCESS if the sys table exists, DB_CORRUPTION if it exists
+but is not current, DB_TABLE_NOT_FOUND if it does not exist*/
static
-ibool
-dict_check_sys_foreign_tables_exist(void)
-/*=====================================*/
+dberr_t
+dict_check_if_system_table_exists(
+/*==============================*/
+ const char* tablename, /*!< in: name of table */
+ ulint num_fields, /*!< in: number of fields */
+ ulint num_indexes) /*!< in: number of indexes */
{
- dict_table_t* sys_foreign;
- ibool exists = FALSE;
- dict_table_t* sys_foreign_cols;
+ dict_table_t* sys_table;
+ dberr_t error = DB_SUCCESS;
ut_a(srv_get_active_thread_type() == SRV_NONE);
mutex_enter(&dict_sys->mutex);
- sys_foreign = dict_table_get_low("SYS_FOREIGN");
- sys_foreign_cols = dict_table_get_low("SYS_FOREIGN_COLS");
+ sys_table = dict_table_get_low(tablename);
- if (sys_foreign != NULL
- && sys_foreign_cols != NULL
- && UT_LIST_GET_LEN(sys_foreign->indexes) == 3
- && UT_LIST_GET_LEN(sys_foreign_cols->indexes) == 1) {
+ if (sys_table == NULL) {
+ error = DB_TABLE_NOT_FOUND;
- /* Foreign constraint system tables have already been
- created, and they are ok. Ensure that they can't be
- evicted from the table LRU cache. */
+ } else if (UT_LIST_GET_LEN(sys_table->indexes) != num_indexes
+ || sys_table->n_cols != num_fields) {
+ error = DB_CORRUPTION;
- dict_table_move_from_lru_to_non_lru(sys_foreign);
- dict_table_move_from_lru_to_non_lru(sys_foreign_cols);
+ } else {
+ /* This table has already been created, and it is OK.
+ Ensure that it can't be evicted from the table LRU cache. */
- exists = TRUE;
+ dict_table_move_from_lru_to_non_lru(sys_table);
}
mutex_exit(&dict_sys->mutex);
- return(exists);
+ return(error);
}
/****************************************************************//**
Creates the foreign key constraints system tables inside InnoDB
-at database creation or database start if they are not found or are
+at server bootstrap or server start if they are not found or are
not of the right form.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
dict_create_or_check_foreign_constraint_tables(void)
/*================================================*/
{
trx_t* trx;
- ulint error;
- ibool success;
- ibool srv_file_per_table_backup;
+ my_bool srv_file_per_table_backup;
+ dberr_t err;
+ dberr_t sys_foreign_err;
+ dberr_t sys_foreign_cols_err;
ut_a(srv_get_active_thread_type() == SRV_NONE);
/* Note: The master thread has not been started at this point. */
- if (dict_check_sys_foreign_tables_exist()) {
+
+ sys_foreign_err = dict_check_if_system_table_exists(
+ "SYS_FOREIGN", DICT_NUM_FIELDS__SYS_FOREIGN + 1, 3);
+ sys_foreign_cols_err = dict_check_if_system_table_exists(
+ "SYS_FOREIGN_COLS", DICT_NUM_FIELDS__SYS_FOREIGN_COLS + 1, 1);
+
+ if (sys_foreign_err == DB_SUCCESS
+ && sys_foreign_cols_err == DB_SUCCESS) {
return(DB_SUCCESS);
}
trx = trx_allocate_for_mysql();
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+
trx->op_info = "creating foreign key sys tables";
row_mysql_lock_data_dictionary(trx);
/* Check which incomplete table definition to drop. */
- if (dict_table_get_low("SYS_FOREIGN") != NULL) {
- fprintf(stderr,
- "InnoDB: dropping incompletely created"
- " SYS_FOREIGN table\n");
+ if (sys_foreign_err == DB_CORRUPTION) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Dropping incompletely created "
+ "SYS_FOREIGN table.");
row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
}
- if (dict_table_get_low("SYS_FOREIGN_COLS") != NULL) {
- fprintf(stderr,
- "InnoDB: dropping incompletely created"
- " SYS_FOREIGN_COLS table\n");
+ if (sys_foreign_cols_err == DB_CORRUPTION) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Dropping incompletely created "
+ "SYS_FOREIGN_COLS table.");
row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
}
- fprintf(stderr,
- "InnoDB: Creating foreign key constraint system tables\n");
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Creating foreign key constraint system tables.");
/* NOTE: in dict_load_foreigns we use the fact that
there are 2 secondary indexes on SYS_FOREIGN, and they
@@ -1315,50 +1374,50 @@ dict_create_or_check_foreign_constraint_tables(void)
VARBINARY, like in other InnoDB system tables, to get a clean
design. */
- srv_file_per_table_backup = (ibool) srv_file_per_table;
+ srv_file_per_table_backup = srv_file_per_table;
/* We always want SYSTEM tables to be created inside the system
tablespace. */
srv_file_per_table = 0;
- error = que_eval_sql(NULL,
- "PROCEDURE CREATE_FOREIGN_SYS_TABLES_PROC () IS\n"
- "BEGIN\n"
- "CREATE TABLE\n"
- "SYS_FOREIGN(ID CHAR, FOR_NAME CHAR,"
- " REF_NAME CHAR, N_COLS INT);\n"
- "CREATE UNIQUE CLUSTERED INDEX ID_IND"
- " ON SYS_FOREIGN (ID);\n"
- "CREATE INDEX FOR_IND"
- " ON SYS_FOREIGN (FOR_NAME);\n"
- "CREATE INDEX REF_IND"
- " ON SYS_FOREIGN (REF_NAME);\n"
- "CREATE TABLE\n"
- "SYS_FOREIGN_COLS(ID CHAR, POS INT,"
- " FOR_COL_NAME CHAR, REF_COL_NAME CHAR);\n"
- "CREATE UNIQUE CLUSTERED INDEX ID_IND"
- " ON SYS_FOREIGN_COLS (ID, POS);\n"
- "END;\n"
- , FALSE, trx);
-
- if (error != DB_SUCCESS) {
- fprintf(stderr, "InnoDB: error %lu in creation\n",
- (ulong) error);
-
- ut_a(error == DB_OUT_OF_FILE_SPACE
- || error == DB_TOO_MANY_CONCURRENT_TRXS);
-
- fprintf(stderr,
- "InnoDB: creation failed\n"
- "InnoDB: tablespace is full\n"
- "InnoDB: dropping incompletely created"
- " SYS_FOREIGN tables\n");
+ err = que_eval_sql(
+ NULL,
+ "PROCEDURE CREATE_FOREIGN_SYS_TABLES_PROC () IS\n"
+ "BEGIN\n"
+ "CREATE TABLE\n"
+ "SYS_FOREIGN(ID CHAR, FOR_NAME CHAR,"
+ " REF_NAME CHAR, N_COLS INT);\n"
+ "CREATE UNIQUE CLUSTERED INDEX ID_IND"
+ " ON SYS_FOREIGN (ID);\n"
+ "CREATE INDEX FOR_IND"
+ " ON SYS_FOREIGN (FOR_NAME);\n"
+ "CREATE INDEX REF_IND"
+ " ON SYS_FOREIGN (REF_NAME);\n"
+ "CREATE TABLE\n"
+ "SYS_FOREIGN_COLS(ID CHAR, POS INT,"
+ " FOR_COL_NAME CHAR, REF_COL_NAME CHAR);\n"
+ "CREATE UNIQUE CLUSTERED INDEX ID_IND"
+ " ON SYS_FOREIGN_COLS (ID, POS);\n"
+ "END;\n",
+ FALSE, trx);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Creation of SYS_FOREIGN and SYS_FOREIGN_COLS "
+ "has failed with error %lu. Tablespace is full. "
+ "Dropping incompletely created tables.",
+ (ulong) err);
+
+ ut_ad(err == DB_OUT_OF_FILE_SPACE
+ || err == DB_TOO_MANY_CONCURRENT_TRXS);
row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
- error = DB_MUST_GET_MORE_FILE_SPACE;
+ if (err == DB_OUT_OF_FILE_SPACE) {
+ err = DB_MUST_GET_MORE_FILE_SPACE;
+ }
}
trx_commit_for_mysql(trx);
@@ -1367,28 +1426,31 @@ dict_create_or_check_foreign_constraint_tables(void)
trx_free_for_mysql(trx);
- if (error == DB_SUCCESS) {
- fprintf(stderr,
- "InnoDB: Foreign key constraint system tables"
- " created\n");
+ srv_file_per_table = srv_file_per_table_backup;
+
+ if (err == DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Foreign key constraint system tables created");
}
/* Note: The master thread has not been started at this point. */
/* Confirm and move to the non-LRU part of the table LRU list. */
+ sys_foreign_err = dict_check_if_system_table_exists(
+ "SYS_FOREIGN", DICT_NUM_FIELDS__SYS_FOREIGN + 1, 3);
+ ut_a(sys_foreign_err == DB_SUCCESS);
- success = dict_check_sys_foreign_tables_exist();
- ut_a(success);
-
- srv_file_per_table = (my_bool) srv_file_per_table_backup;
+ sys_foreign_cols_err = dict_check_if_system_table_exists(
+ "SYS_FOREIGN_COLS", DICT_NUM_FIELDS__SYS_FOREIGN_COLS + 1, 1);
+ ut_a(sys_foreign_cols_err == DB_SUCCESS);
- return(error);
+ return(err);
}
/****************************************************************//**
Evaluate the given foreign key SQL statement.
@return error code or DB_SUCCESS */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
dict_foreign_eval_sql(
/*==================*/
pars_info_t* info, /*!< in: info struct, or NULL */
@@ -1397,8 +1459,8 @@ dict_foreign_eval_sql(
dict_foreign_t* foreign,/*!< in: foreign */
trx_t* trx) /*!< in: transaction */
{
- ulint error;
- FILE* ef = dict_foreign_err_file;
+ dberr_t error;
+ FILE* ef = dict_foreign_err_file;
error = que_eval_sql(info, sql, FALSE, trx);
@@ -1453,8 +1515,8 @@ dict_foreign_eval_sql(
Add a single foreign key field definition to the data dictionary tables in
the database.
@return error code or DB_SUCCESS */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
dict_create_add_foreign_field_to_dictionary(
/*========================================*/
ulint field_nr, /*!< in: foreign field number */
@@ -1492,17 +1554,17 @@ databasename/tablename_ibfk_NUMBER, where the numbers start from 1, and
are given locally for this table, that is, the number is not global, as in
the old format constraints < 4.0.18 it used to be.
@return error code or DB_SUCCESS */
-static
-ulint
+UNIV_INTERN
+dberr_t
dict_create_add_foreign_to_dictionary(
/*==================================*/
ulint* id_nr, /*!< in/out: number to use in id generation;
incremented if used */
dict_table_t* table, /*!< in: table */
dict_foreign_t* foreign,/*!< in: foreign */
- trx_t* trx) /*!< in: transaction */
+ trx_t* trx) /*!< in/out: dictionary transaction */
{
- ulint error;
+ dberr_t error;
ulint i;
pars_info_t* info = pars_info_create();
@@ -1553,12 +1615,6 @@ dict_create_add_foreign_to_dictionary(
}
}
- trx->op_info = "committing foreign key definitions";
-
- trx_commit(trx);
-
- trx->op_info = "";
-
return(error);
}
@@ -1566,7 +1622,7 @@ dict_create_add_foreign_to_dictionary(
Adds foreign key definitions to data dictionary tables in the database.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
dict_create_add_foreigns_to_dictionary(
/*===================================*/
ulint start_id,/*!< in: if we are actually doing ALTER TABLE
@@ -1582,7 +1638,7 @@ dict_create_add_foreigns_to_dictionary(
{
dict_foreign_t* foreign;
ulint number = start_id + 1;
- ulint error;
+ dberr_t error;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -1607,5 +1663,188 @@ dict_create_add_foreigns_to_dictionary(
}
}
+ trx->op_info = "committing foreign key definitions";
+
+ trx_commit(trx);
+
+ trx->op_info = "";
+
return(DB_SUCCESS);
}
+
+/****************************************************************//**
+Creates the tablespaces and datafiles system tables inside InnoDB
+at server bootstrap or server start if they are not found or are
+not of the right form.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+dict_create_or_check_sys_tablespace(void)
+/*=====================================*/
+{
+ trx_t* trx;
+ my_bool srv_file_per_table_backup;
+ dberr_t err;
+ dberr_t sys_tablespaces_err;
+ dberr_t sys_datafiles_err;
+
+ ut_a(srv_get_active_thread_type() == SRV_NONE);
+
+ /* Note: The master thread has not been started at this point. */
+
+ sys_tablespaces_err = dict_check_if_system_table_exists(
+ "SYS_TABLESPACES", DICT_NUM_FIELDS__SYS_TABLESPACES + 1, 1);
+ sys_datafiles_err = dict_check_if_system_table_exists(
+ "SYS_DATAFILES", DICT_NUM_FIELDS__SYS_DATAFILES + 1, 1);
+
+ if (sys_tablespaces_err == DB_SUCCESS
+ && sys_datafiles_err == DB_SUCCESS) {
+ return(DB_SUCCESS);
+ }
+
+ trx = trx_allocate_for_mysql();
+
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+
+ trx->op_info = "creating tablepace and datafile sys tables";
+
+ row_mysql_lock_data_dictionary(trx);
+
+ /* Check which incomplete table definition to drop. */
+
+ if (sys_tablespaces_err == DB_CORRUPTION) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Dropping incompletely created "
+ "SYS_TABLESPACES table.");
+ row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE);
+ }
+
+ if (sys_datafiles_err == DB_CORRUPTION) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Dropping incompletely created "
+ "SYS_DATAFILES table.");
+
+ row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE);
+ }
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Creating tablespace and datafile system tables.");
+
+ /* We always want SYSTEM tables to be created inside the system
+ tablespace. */
+ srv_file_per_table_backup = srv_file_per_table;
+ srv_file_per_table = 0;
+
+ err = que_eval_sql(
+ NULL,
+ "PROCEDURE CREATE_SYS_TABLESPACE_PROC () IS\n"
+ "BEGIN\n"
+ "CREATE TABLE SYS_TABLESPACES(\n"
+ " SPACE INT, NAME CHAR, FLAGS INT);\n"
+ "CREATE UNIQUE CLUSTERED INDEX SYS_TABLESPACES_SPACE"
+ " ON SYS_TABLESPACES (SPACE);\n"
+ "CREATE TABLE SYS_DATAFILES(\n"
+ " SPACE INT, PATH CHAR);\n"
+ "CREATE UNIQUE CLUSTERED INDEX SYS_DATAFILES_SPACE"
+ " ON SYS_DATAFILES (SPACE);\n"
+ "END;\n",
+ FALSE, trx);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Creation of SYS_TABLESPACES and SYS_DATAFILES "
+ "has failed with error %lu. Tablespace is full. "
+ "Dropping incompletely created tables.",
+ (ulong) err);
+
+ ut_a(err == DB_OUT_OF_FILE_SPACE
+ || err == DB_TOO_MANY_CONCURRENT_TRXS);
+
+ row_drop_table_for_mysql("SYS_TABLESPACES", trx, TRUE);
+ row_drop_table_for_mysql("SYS_DATAFILES", trx, TRUE);
+
+ if (err == DB_OUT_OF_FILE_SPACE) {
+ err = DB_MUST_GET_MORE_FILE_SPACE;
+ }
+ }
+
+ trx_commit_for_mysql(trx);
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ trx_free_for_mysql(trx);
+
+ srv_file_per_table = srv_file_per_table_backup;
+
+ if (err == DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Tablespace and datafile system tables created.");
+ }
+
+ /* Note: The master thread has not been started at this point. */
+ /* Confirm and move to the non-LRU part of the table LRU list. */
+
+ sys_tablespaces_err = dict_check_if_system_table_exists(
+ "SYS_TABLESPACES", DICT_NUM_FIELDS__SYS_TABLESPACES + 1, 1);
+ ut_a(sys_tablespaces_err == DB_SUCCESS);
+
+ sys_datafiles_err = dict_check_if_system_table_exists(
+ "SYS_DATAFILES", DICT_NUM_FIELDS__SYS_DATAFILES + 1, 1);
+ ut_a(sys_datafiles_err == DB_SUCCESS);
+
+ return(err);
+}
+
+/********************************************************************//**
+Add a single tablespace definition to the data dictionary tables in the
+database.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_add_tablespace_to_dictionary(
+/*=====================================*/
+ ulint space, /*!< in: tablespace id */
+ const char* name, /*!< in: tablespace name */
+ ulint flags, /*!< in: tablespace flags */
+ const char* path, /*!< in: tablespace path */
+ trx_t* trx, /*!< in/out: transaction */
+ bool commit) /*!< in: if true then commit the
+ transaction */
+{
+ dberr_t error;
+
+ pars_info_t* info = pars_info_create();
+
+ ut_a(space > TRX_SYS_SPACE);
+
+ pars_info_add_int4_literal(info, "space", space);
+
+ pars_info_add_str_literal(info, "name", name);
+
+ pars_info_add_int4_literal(info, "flags", flags);
+
+ pars_info_add_str_literal(info, "path", path);
+
+ error = que_eval_sql(info,
+ "PROCEDURE P () IS\n"
+ "BEGIN\n"
+ "INSERT INTO SYS_TABLESPACES VALUES"
+ "(:space, :name, :flags);\n"
+ "INSERT INTO SYS_DATAFILES VALUES"
+ "(:space, :path);\n"
+ "END;\n",
+ FALSE, trx);
+
+ if (error != DB_SUCCESS) {
+ return(error);
+ }
+
+ if (commit) {
+ trx->op_info = "committing tablespace and datafile definition";
+ trx_commit(trx);
+ }
+
+ trx->op_info = "";
+
+ return(error);
+}
diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc
index 8282dafda0c..a560dc54eac 100644
--- a/storage/innobase/dict/dict0dict.cc
+++ b/storage/innobase/dict/dict0dict.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -25,6 +26,7 @@ Created 1/8/1996 Heikki Tuuri
#include "dict0dict.h"
#include "fts0fts.h"
+#include "fil0fil.h"
#ifdef UNIV_NONINL
#include "dict0dict.ic"
@@ -56,7 +58,6 @@ UNIV_INTERN dict_index_t* dict_ind_compact;
#include "rem0cmp.h"
#include "fts0fts.h"
#include "fts0types.h"
-#include "row0merge.h"
#include "m_ctype.h" /* my_isspace() */
#include "ha_prototypes.h" /* innobase_strcasecmp(), innobase_casedn_str() */
#include "srv0mon.h"
@@ -64,6 +65,14 @@ UNIV_INTERN dict_index_t* dict_ind_compact;
#include "lock0lock.h"
#include "dict0priv.h"
#include "row0upd.h"
+#include "row0mysql.h"
+#include "row0merge.h"
+#include "row0log.h"
+#include "ut0ut.h" /* ut_format_name() */
+#include "m_string.h"
+#include "my_sys.h"
+#include "mysqld.h" /* system_charset_info */
+#include "strfunc.h" /* strconvert() */
#include <ctype.h>
@@ -77,17 +86,27 @@ backround operations purge, rollback, foreign key checks reserve this
in S-mode; we cannot trust that MySQL protects implicit or background
operations a table drop since MySQL does not know of them; therefore
we need this; NOTE: a transaction which reserves this must keep book
-on the mode in trx_struct::dict_operation_lock_mode */
+on the mode in trx_t::dict_operation_lock_mode */
UNIV_INTERN rw_lock_t dict_operation_lock;
+/** Percentage of compression failures that are allowed in a single
+round */
+UNIV_INTERN ulong zip_failure_threshold_pct = 5;
+
+/** Maximum percentage of a page that can be allowed as a pad to avoid
+compression failures */
+UNIV_INTERN ulong zip_pad_max = 50;
+
/* Keys to register rwlocks and mutexes with performance schema */
#ifdef UNIV_PFS_RWLOCK
UNIV_INTERN mysql_pfs_key_t dict_operation_lock_key;
UNIV_INTERN mysql_pfs_key_t index_tree_rw_lock_key;
+UNIV_INTERN mysql_pfs_key_t index_online_log_key;
UNIV_INTERN mysql_pfs_key_t dict_table_stats_latch_key;
#endif /* UNIV_PFS_RWLOCK */
#ifdef UNIV_PFS_MUTEX
+UNIV_INTERN mysql_pfs_key_t zip_pad_mutex_key;
UNIV_INTERN mysql_pfs_key_t dict_sys_mutex_key;
UNIV_INTERN mysql_pfs_key_t dict_foreign_err_mutex_key;
#endif /* UNIV_PFS_MUTEX */
@@ -157,13 +176,6 @@ dict_index_build_internal_fts(
dict_table_t* table, /*!< in: table */
dict_index_t* index); /*!< in: user representation of an FTS index */
/**********************************************************************//**
-Removes a foreign constraint struct from the dictionary cache. */
-static
-void
-dict_foreign_remove_from_cache(
-/*===========================*/
- dict_foreign_t* foreign); /*!< in, own: foreign constraint */
-/**********************************************************************//**
Prints a column data. */
static
void
@@ -185,14 +197,6 @@ void
dict_field_print_low(
/*=================*/
const dict_field_t* field); /*!< in: field */
-#ifndef UNIV_HOTBACKUP
-/*********************************************************************//**
-Frees a foreign key struct. */
-static
-void
-dict_foreign_free(
-/*==============*/
- dict_foreign_t* foreign); /*!< in, own: foreign key struct */
/**********************************************************************//**
Removes an index from the dictionary cache. */
@@ -216,14 +220,14 @@ dict_table_remove_from_cache_low(
/**********************************************************************//**
Validate the dictionary table LRU list.
@return TRUE if validate OK */
-UNIV_INTERN
+static
ibool
dict_lru_validate(void);
/*===================*/
/**********************************************************************//**
Check if table is in the dictionary table LRU list.
@return TRUE if table found */
-UNIV_INTERN
+static
ibool
dict_lru_find_table(
/*================*/
@@ -239,11 +243,11 @@ dict_non_lru_find_table(
#endif /* UNIV_DEBUG */
/* Stream for storing detailed information about the latest foreign key
-and unique key errors */
+and unique key errors. Only created if !srv_read_only_mode */
UNIV_INTERN FILE* dict_foreign_err_file = NULL;
/* mutex protecting the foreign and unique error buffers */
-UNIV_INTERN mutex_t dict_foreign_err_mutex;
-#endif /* !UNIV_HOTBACKUP */
+UNIV_INTERN ib_mutex_t dict_foreign_err_mutex;
+
/******************************************************************//**
Makes all characters in a NUL-terminated UTF-8 string lower case. */
UNIV_INTERN
@@ -330,7 +334,7 @@ dict_mutex_exit_for_mysql(void)
/** Get the latch that protects the stats of a given table */
#define GET_TABLE_STATS_LATCH(table) \
- (&dict_table_stats_latches[ut_fold_ull(table->id) \
+ (&dict_table_stats_latches[ut_fold_ull((ib_uint64_t) table) \
% DICT_TABLE_STATS_LATCHES_SIZE])
/**********************************************************************//**
@@ -389,6 +393,75 @@ dict_table_stats_unlock(
}
}
+/**********************************************************************//**
+Try to drop any indexes after an aborted index creation.
+This can also be after a server kill during DROP INDEX. */
+static
+void
+dict_table_try_drop_aborted(
+/*========================*/
+ dict_table_t* table, /*!< in: table, or NULL if it
+ needs to be looked up again */
+ table_id_t table_id, /*!< in: table identifier */
+ ulint ref_count) /*!< in: expected table->n_ref_count */
+{
+ trx_t* trx;
+
+ trx = trx_allocate_for_background();
+ trx->op_info = "try to drop any indexes after an aborted index creation";
+ row_mysql_lock_data_dictionary(trx);
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+
+ if (table == NULL) {
+ table = dict_table_open_on_id_low(table_id);
+ } else {
+ ut_ad(table->id == table_id);
+ }
+
+ if (table && table->n_ref_count == ref_count && table->drop_aborted) {
+ /* Silence a debug assertion in row_merge_drop_indexes(). */
+ ut_d(table->n_ref_count++);
+ row_merge_drop_indexes(trx, table, TRUE);
+ ut_d(table->n_ref_count--);
+ ut_ad(table->n_ref_count == ref_count);
+ trx_commit_for_mysql(trx);
+ }
+
+ row_mysql_unlock_data_dictionary(trx);
+ trx_free_for_background(trx);
+}
+
+/**********************************************************************//**
+When opening a table,
+try to drop any indexes after an aborted index creation.
+Release the dict_sys->mutex. */
+static
+void
+dict_table_try_drop_aborted_and_mutex_exit(
+/*=======================================*/
+ dict_table_t* table, /*!< in: table (may be NULL) */
+ ibool try_drop) /*!< in: FALSE if should try to
+ drop indexes whose online creation
+ was aborted */
+{
+ if (try_drop
+ && table != NULL
+ && table->drop_aborted
+ && table->n_ref_count == 1
+ && dict_table_get_first_index(table)) {
+
+ /* Attempt to drop the indexes whose online creation
+ was aborted. */
+ table_id_t table_id = table->id;
+
+ mutex_exit(&dict_sys->mutex);
+
+ dict_table_try_drop_aborted(table, table_id, 1);
+ } else {
+ mutex_exit(&dict_sys->mutex);
+ }
+}
+
/********************************************************************//**
Decrements the count of open handles to a table. */
UNIV_INTERN
@@ -396,7 +469,10 @@ void
dict_table_close(
/*=============*/
dict_table_t* table, /*!< in/out: table */
- ibool dict_locked) /*!< in: TRUE=data dictionary locked */
+ ibool dict_locked, /*!< in: TRUE=data dictionary locked */
+ ibool try_drop) /*!< in: TRUE=try to drop any orphan
+ indexes after an aborted online
+ index creation */
{
if (!dict_locked) {
mutex_enter(&dict_sys->mutex);
@@ -407,6 +483,18 @@ dict_table_close(
--table->n_ref_count;
+ /* Force persistent stats re-read upon next open of the table
+ so that FLUSH TABLE can be used to forcibly fetch stats from disk
+ if they have been manually modified. We reset table->stat_initialized
+ only if table reference count is 0 because we do not want too frequent
+ stats re-reads (e.g. in other cases than FLUSH TABLE). */
+ if (strchr(table->name, '/') != NULL
+ && table->n_ref_count == 0
+ && dict_stats_is_persistent_enabled(table)) {
+
+ dict_stats_deinit(table);
+ }
+
MONITOR_DEC(MONITOR_TABLE_REFERENCE);
ut_ad(dict_lru_validate());
@@ -420,7 +508,19 @@ dict_table_close(
#endif /* UNIV_DEBUG */
if (!dict_locked) {
+ table_id_t table_id = table->id;
+ ibool drop_aborted;
+
+ drop_aborted = try_drop
+ && table->drop_aborted
+ && table->n_ref_count == 1
+ && dict_table_get_first_index(table);
+
mutex_exit(&dict_sys->mutex);
+
+ if (drop_aborted) {
+ dict_table_try_drop_aborted(NULL, table_id, 0);
+ }
}
}
#endif /* !UNIV_HOTBACKUP */
@@ -550,33 +650,6 @@ dict_table_autoinc_unlock(
{
mutex_exit(&table->autoinc_mutex);
}
-
-/**********************************************************************//**
-Looks for an index with the given table and index id.
-Note: Does not reserve the dictionary mutex.
-@return index or NULL if not found in cache */
-UNIV_INTERN
-dict_index_t*
-dict_index_get_on_id_low(
-/*=====================*/
- dict_table_t* table, /*!< in: table */
- index_id_t id) /*!< in: index id */
-{
- dict_index_t* index;
-
- for (index = dict_table_get_first_index(table);
- index != NULL;
- index = dict_table_get_next_index(index)) {
-
- if (id == index->id) {
- /* Found */
-
- return(index);
- }
- }
-
- return(NULL);
-}
#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
@@ -712,7 +785,10 @@ dict_table_t*
dict_table_open_on_id(
/*==================*/
table_id_t table_id, /*!< in: table id */
- ibool dict_locked) /*!< in: TRUE=data dictionary locked */
+ ibool dict_locked, /*!< in: TRUE=data dictionary locked */
+ ibool try_drop) /*!< in: TRUE=try to drop any orphan
+ indexes after an aborted online
+ index creation */
{
dict_table_t* table;
@@ -736,7 +812,7 @@ dict_table_open_on_id(
}
if (!dict_locked) {
- mutex_exit(&dict_sys->mutex);
+ dict_table_try_drop_aborted_and_mutex_exit(table, try_drop);
}
return(table);
@@ -815,11 +891,13 @@ dict_init(void)
rw_lock_create(dict_operation_lock_key,
&dict_operation_lock, SYNC_DICT_OPERATION);
- dict_foreign_err_file = os_file_create_tmpfile();
- ut_a(dict_foreign_err_file);
+ if (!srv_read_only_mode) {
+ dict_foreign_err_file = os_file_create_tmpfile();
+ ut_a(dict_foreign_err_file);
- mutex_create(dict_foreign_err_mutex_key,
- &dict_foreign_err_mutex, SYNC_NO_ORDER_CHECK);
+ mutex_create(dict_foreign_err_mutex_key,
+ &dict_foreign_err_mutex, SYNC_NO_ORDER_CHECK);
+ }
for (i = 0; i < DICT_TABLE_STATS_LATCHES_SIZE; i++) {
rw_lock_create(dict_table_stats_latch_key,
@@ -849,14 +927,20 @@ dict_move_to_mru(
}
/**********************************************************************//**
-Returns a table object and increments its open handle count.
+Returns a table object and increment its open handle count.
+NOTE! This is a high-level function to be used mainly from outside the
+'dict' module. Inside this directory dict_table_get_low
+is usually the appropriate function.
@return table, NULL if does not exist */
-static
+UNIV_INTERN
dict_table_t*
-dict_table_open_on_name_low(
-/*========================*/
+dict_table_open_on_name(
+/*====================*/
const char* table_name, /*!< in: table name */
ibool dict_locked, /*!< in: TRUE=data dictionary locked */
+ ibool try_drop, /*!< in: TRUE=try to drop any orphan
+ indexes after an aborted online
+ index creation */
dict_err_ignore_t
ignore_err) /*!< in: error to be ignored when
loading a table definition */
@@ -915,61 +999,11 @@ dict_table_open_on_name_low(
ut_ad(dict_lru_validate());
if (!dict_locked) {
- mutex_exit(&(dict_sys->mutex));
+ dict_table_try_drop_aborted_and_mutex_exit(table, try_drop);
}
return(table);
}
-
-/**********************************************************************//**
-Returns a table object and increment its open handle count.
-NOTE! This is a high-level function to be used mainly from outside the
-'dict' directory. Inside this directory dict_table_get_low
-is usually the appropriate function.
-@return table, NULL if does not exist */
-UNIV_INTERN
-dict_table_t*
-dict_table_open_on_name(
-/*====================*/
- const char* table_name, /*!< in: table name */
- ibool dict_locked) /*!< in: TRUE=data dictionary locked */
-{
- dict_table_t* table;
-
- table = dict_table_open_on_name_low(table_name, dict_locked,
- DICT_ERR_IGNORE_NONE);
-
- if (table != NULL) {
- /* If table->ibd_file_missing == TRUE, this will
- print an error message and return without doing
- anything. */
- dict_stats_update(table,
- DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY,
- dict_locked);
- }
-
- return(table);
-}
-
-/**********************************************************************//**
-Returns a table object and increment its open handle count. Table
-statistics will not be updated if they are not initialized.
-Call this function when dropping a table.
-@return table, NULL if does not exist */
-UNIV_INTERN
-dict_table_t*
-dict_table_open_on_name_no_stats(
-/*=============================*/
- const char* table_name, /*!< in: table name */
- ibool dict_locked, /*!< in: TRUE=data dictionary locked */
- dict_err_ignore_t
- ignore_err) /*!< in: error to be ignored during
- table open */
-{
- return(dict_table_open_on_name_low(table_name, dict_locked,
- ignore_err));
-}
-
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -1156,7 +1190,7 @@ dict_table_can_be_evicted(
index != NULL;
index = dict_table_get_next_index(index)) {
- btr_search_t* info = index->search_info;
+ btr_search_t* info = btr_search_get_info(index);
/* We are not allowed to free the in-memory index
struct dict_index_t until all entries in the adaptive
@@ -1358,7 +1392,7 @@ dict_index_find_on_id_low(
Renames a table object.
@return TRUE if success */
UNIV_INTERN
-ibool
+dberr_t
dict_table_rename_in_cache(
/*=======================*/
dict_table_t* table, /*!< in/out: table */
@@ -1372,7 +1406,6 @@ dict_table_rename_in_cache(
ulint fold;
char old_name[MAX_FULL_NAME_LEN + 1];
- ut_ad(table);
ut_ad(mutex_own(&(dict_sys->mutex)));
/* store the old/current name to an automatic variable */
@@ -1389,28 +1422,59 @@ dict_table_rename_in_cache(
fold = ut_fold_string(new_name);
/* Look for a table with the same name: error if such exists */
- {
- dict_table_t* table2;
- HASH_SEARCH(name_hash, dict_sys->table_hash, fold,
- dict_table_t*, table2, ut_ad(table2->cached),
- (ut_strcmp(table2->name, new_name) == 0));
- if (UNIV_LIKELY_NULL(table2)) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: dictionary cache"
- " already contains a table ", stderr);
- ut_print_name(stderr, NULL, TRUE, new_name);
- fputs("\n"
- "InnoDB: cannot rename table ", stderr);
- ut_print_name(stderr, NULL, TRUE, old_name);
- putc('\n', stderr);
- return(FALSE);
- }
+ dict_table_t* table2;
+ HASH_SEARCH(name_hash, dict_sys->table_hash, fold,
+ dict_table_t*, table2, ut_ad(table2->cached),
+ (ut_strcmp(table2->name, new_name) == 0));
+ DBUG_EXECUTE_IF("dict_table_rename_in_cache_failure",
+ if (table2 == NULL) {
+ table2 = (dict_table_t*) -1;
+ } );
+ if (table2) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot rename table '%s' to '%s' since the "
+ "dictionary cache already contains '%s'.",
+ old_name, new_name, new_name);
+ return(DB_ERROR);
}
/* If the table is stored in a single-table tablespace, rename the
- .ibd file */
+ .ibd file and rebuild the .isl file if needed. */
+
+ if (dict_table_is_discarded(table)) {
+ os_file_type_t type;
+ ibool exists;
+ char* filepath;
+
+ ut_ad(table->space != TRX_SYS_SPACE);
+
+ if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+
+ dict_get_and_save_data_dir_path(table, true);
+ ut_a(table->data_dir_path);
+
+ filepath = os_file_make_remote_pathname(
+ table->data_dir_path, table->name, "ibd");
+ } else {
+ filepath = fil_make_ibd_name(table->name, false);
+ }
+
+ fil_delete_tablespace(table->space, BUF_REMOVE_FLUSH_NO_WRITE);
+
+ /* Delete any temp file hanging around. */
+ if (os_file_status(filepath, &exists, &type)
+ && exists
+ && !os_file_delete_if_exists(filepath)) {
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Delete of %s failed.", filepath);
+ }
+
+ mem_free(filepath);
+
+ } else if (table->space != TRX_SYS_SPACE) {
+ char* new_path = NULL;
- if (table->space != 0) {
if (table->dir_path_of_temp_table != NULL) {
ut_print_timestamp(stderr);
fputs(" InnoDB: Error: trying to rename a"
@@ -1420,10 +1484,40 @@ dict_table_rename_in_cache(
ut_print_filename(stderr,
table->dir_path_of_temp_table);
fputs(" )\n", stderr);
- return(FALSE);
- } else if (!fil_rename_tablespace(old_name, table->space,
- new_name)) {
- return(FALSE);
+ return(DB_ERROR);
+
+ } else if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+ char* old_path;
+
+ old_path = fil_space_get_first_path(table->space);
+
+ new_path = os_file_make_new_pathname(
+ old_path, new_name);
+
+ mem_free(old_path);
+
+ dberr_t err = fil_create_link_file(
+ new_name, new_path);
+
+ if (err != DB_SUCCESS) {
+ mem_free(new_path);
+ return(DB_TABLESPACE_EXISTS);
+ }
+ }
+
+ ibool success = fil_rename_tablespace(
+ old_name, table->space, new_name, new_path);
+
+ /* If the tablespace is remote, a new .isl file was created
+ If success, delete the old one. If not, delete the new one. */
+ if (new_path) {
+
+ mem_free(new_path);
+ fil_delete_link_file(success ? old_name : new_name);
+ }
+
+ if (!success) {
+ return(DB_ERROR);
}
}
@@ -1450,12 +1544,11 @@ dict_table_rename_in_cache(
ut_a(dict_sys->size > 0);
/* Update the table_name field in indexes */
- index = dict_table_get_first_index(table);
+ for (index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
- while (index != NULL) {
index->table_name = table->name;
-
- index = dict_table_get_next_index(index);
}
if (!rename_also_foreigns) {
@@ -1490,7 +1583,7 @@ dict_table_rename_in_cache(
UT_LIST_INIT(table->referenced_list);
- return(TRUE);
+ return(DB_SUCCESS);
}
/* Update the table name fields in foreign constraints, and update also
@@ -1571,9 +1664,10 @@ dict_table_rename_in_cache(
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ foreign != NULL;
+ foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
- while (foreign != NULL) {
if (ut_strlen(foreign->referenced_table_name)
< ut_strlen(table->name)) {
/* Allocate a longer name buffer;
@@ -1581,16 +1675,19 @@ dict_table_rename_in_cache(
foreign->referenced_table_name = mem_heap_strdup(
foreign->heap, table->name);
- dict_mem_referenced_table_name_lookup_set(foreign, TRUE);
+
+ dict_mem_referenced_table_name_lookup_set(
+ foreign, TRUE);
} else {
/* Use the same buffer */
strcpy(foreign->referenced_table_name, table->name);
- dict_mem_referenced_table_name_lookup_set(foreign, FALSE);
+
+ dict_mem_referenced_table_name_lookup_set(
+ foreign, FALSE);
}
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
- return(TRUE);
+ return(DB_SUCCESS);
}
/**********************************************************************//**
@@ -1692,6 +1789,30 @@ dict_table_remove_from_cache_low(
ut_ad(dict_lru_validate());
+ if (lru_evict && table->drop_aborted) {
+ /* Do as dict_table_try_drop_aborted() does. */
+
+ trx_t* trx = trx_allocate_for_background();
+
+ ut_ad(mutex_own(&dict_sys->mutex));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ /* Mimic row_mysql_lock_data_dictionary(). */
+ trx->dict_operation_lock_mode = RW_X_LATCH;
+
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+
+ /* Silence a debug assertion in row_merge_drop_indexes(). */
+ ut_d(table->n_ref_count++);
+ row_merge_drop_indexes(trx, table, TRUE);
+ ut_d(table->n_ref_count--);
+ ut_ad(table->n_ref_count == 0);
+ trx_commit_for_mysql(trx);
+ trx->dict_operation_lock_mode = 0;
+ trx_free_for_background(trx);
+ }
+
size = mem_heap_get_size(table->heap) + strlen(table->name) + 1;
ut_ad(dict_sys->size >= size);
@@ -1777,6 +1898,12 @@ dict_index_too_big_for_undo(
+ 10 + FIL_PAGE_DATA_END /* trx_undo_left() */
+ 2/* pointer to previous undo log record */;
+ /* FTS index consists of auxiliary tables, they shall be excluded from
+ index row size check */
+ if (new_index->type & DICT_FTS) {
+ return(false);
+ }
+
if (!clust_index) {
ut_a(dict_index_is_clust(new_index));
clust_index = new_index;
@@ -1900,6 +2027,12 @@ dict_index_too_big_for_tree(
/* maximum allowed size of a node pointer record */
ulint page_ptr_max;
+ /* FTS index consists of auxiliary tables, they shall be excluded from
+ index row size check */
+ if (new_index->type & DICT_FTS) {
+ return(false);
+ }
+
comp = dict_table_is_comp(table);
zip_size = dict_table_zip_size(table);
@@ -2032,7 +2165,7 @@ add_field_size:
Adds an index to the dictionary cache.
@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
UNIV_INTERN
-ulint
+dberr_t
dict_index_add_to_cache(
/*====================*/
dict_table_t* table, /*!< in: table on which the index is */
@@ -2051,6 +2184,7 @@ dict_index_add_to_cache(
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_ad(index->n_def == index->n_fields);
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
+ ut_ad(!dict_index_is_online_ddl(index));
ut_ad(mem_heap_validate(index->heap));
ut_a(!dict_index_is_clust(index)
@@ -2077,6 +2211,7 @@ dict_index_add_to_cache(
number of fields in the cache internal representation */
new_index->n_fields = new_index->n_def;
+ new_index->trx_id = index->trx_id;
if (strict && dict_index_too_big_for_tree(table, new_index)) {
too_big:
@@ -2169,51 +2304,41 @@ undo_size_ok:
}
}
- /* Add the new index as the last index for the table */
-
- UT_LIST_ADD_LAST(indexes, table->indexes, new_index);
- new_index->table = table;
- new_index->table_name = table->name;
-
- new_index->search_info = btr_search_info_create(new_index->heap);
-
- new_index->stat_index_size = 1;
- new_index->stat_n_leaf_pages = 1;
-
- new_index->page = page_no;
- rw_lock_create(index_tree_rw_lock_key, &new_index->lock,
- dict_index_is_ibuf(index)
- ? SYNC_IBUF_INDEX_TREE : SYNC_INDEX_TREE);
-
if (!dict_index_is_univ(new_index)) {
new_index->stat_n_diff_key_vals =
- static_cast<ib_uint64_t*>(mem_heap_alloc(
+ static_cast<ib_uint64_t*>(mem_heap_zalloc(
new_index->heap,
- (1 + dict_index_get_n_unique(new_index))
+ dict_index_get_n_unique(new_index)
* sizeof(*new_index->stat_n_diff_key_vals)));
new_index->stat_n_sample_sizes =
- static_cast<ib_uint64_t*>(mem_heap_alloc(
+ static_cast<ib_uint64_t*>(mem_heap_zalloc(
new_index->heap,
- (1 + dict_index_get_n_unique(new_index))
+ dict_index_get_n_unique(new_index)
* sizeof(*new_index->stat_n_sample_sizes)));
new_index->stat_n_non_null_key_vals =
static_cast<ib_uint64_t*>(mem_heap_zalloc(
new_index->heap,
- (1 + dict_index_get_n_unique(new_index))
+ dict_index_get_n_unique(new_index)
* sizeof(*new_index->stat_n_non_null_key_vals)));
+ }
- /* Give some sensible values to stat_n_... in case we do
- not calculate statistics quickly enough */
+ new_index->stat_index_size = 1;
+ new_index->stat_n_leaf_pages = 1;
- for (i = 0; i <= dict_index_get_n_unique(new_index); i++) {
+ /* Add the new index as the last index for the table */
- new_index->stat_n_diff_key_vals[i] = 100;
- new_index->stat_n_sample_sizes[i] = 0;
- }
- }
+ UT_LIST_ADD_LAST(indexes, table->indexes, new_index);
+ new_index->table = table;
+ new_index->table_name = table->name;
+ new_index->search_info = btr_search_info_create(new_index->heap);
+
+ new_index->page = page_no;
+ rw_lock_create(index_tree_rw_lock_key, &new_index->lock,
+ dict_index_is_ibuf(index)
+ ? SYNC_IBUF_INDEX_TREE : SYNC_INDEX_TREE);
dict_sys->size += mem_heap_get_size(new_index->heap);
@@ -2242,9 +2367,17 @@ dict_index_remove_from_cache_low(
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
ut_ad(mutex_own(&(dict_sys->mutex)));
+ /* No need to acquire the dict_index_t::lock here because
+ there can't be any active operations on this index (or table). */
+
+ if (index->online_log) {
+ ut_ad(index->online_status == ONLINE_INDEX_CREATION);
+ row_log_free(index->online_log);
+ }
+
/* We always create search info whether or not adaptive
hash index is enabled or not. */
- info = index->search_info;
+ info = btr_search_get_info(index);
ut_ad(info);
/* We are not allowed to free the in-memory index struct
@@ -2270,15 +2403,15 @@ dict_index_remove_from_cache_low(
if (retries % 500 == 0) {
/* No luck after 5 seconds of wait. */
fprintf(stderr, "InnoDB: Error: Waited for"
- " %lu secs for hash index"
- " ref_count (%lu) to drop"
- " to 0.\n"
- "index: \"%s\""
- " table: \"%s\"\n",
- retries/100,
- ref_count,
- index->name,
- table->name);
+ " %lu secs for hash index"
+ " ref_count (%lu) to drop"
+ " to 0.\n"
+ "index: \"%s\""
+ " table: \"%s\"\n",
+ retries/100,
+ ref_count,
+ index->name,
+ table->name);
}
/* To avoid a hang here we commit suicide if the
@@ -2821,8 +2954,6 @@ dict_index_build_internal_fts(
return(new_index);
}
-
-#ifndef UNIV_HOTBACKUP
/*====================== FOREIGN KEY PROCESSING ========================*/
/*********************************************************************//**
@@ -2889,8 +3020,7 @@ dict_table_get_foreign_constraint(
foreign;
foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
- if (foreign->foreign_index == index
- || foreign->referenced_index == index) {
+ if (foreign->foreign_index == index) {
return(foreign);
}
@@ -2901,7 +3031,7 @@ dict_table_get_foreign_constraint(
/*********************************************************************//**
Frees a foreign key struct. */
-static
+UNIV_INTERN
void
dict_foreign_free(
/*==============*/
@@ -2912,7 +3042,7 @@ dict_foreign_free(
/**********************************************************************//**
Removes a foreign constraint struct from the dictionary cache. */
-static
+UNIV_INTERN
void
dict_foreign_remove_from_cache(
/*===========================*/
@@ -2976,84 +3106,50 @@ dict_foreign_find(
return(NULL);
}
+
/*********************************************************************//**
Tries to find an index whose first fields are the columns in the array,
in the same order and is not marked for deletion and is not the same
as types_idx.
@return matching index, NULL if not found */
-static
+UNIV_INTERN
dict_index_t*
dict_foreign_find_index(
/*====================*/
- dict_table_t* table, /*!< in: table */
- const char** columns,/*!< in: array of column names */
- ulint n_cols, /*!< in: number of columns */
- dict_index_t* types_idx, /*!< in: NULL or an index to whose types the
- column types must match */
- ibool check_charsets,
- /*!< in: whether to check charsets.
- only has an effect if types_idx != NULL */
- ulint check_null)
- /*!< in: nonzero if none of the columns must
- be declared NOT NULL */
+ const dict_table_t* table, /*!< in: table */
+ const char** columns,/*!< in: array of column names */
+ ulint n_cols, /*!< in: number of columns */
+ const dict_index_t* types_idx,
+ /*!< in: NULL or an index
+ whose types the column types
+ must match */
+ ibool check_charsets,
+ /*!< in: whether to check
+ charsets. only has an effect
+ if types_idx != NULL */
+ ulint check_null)
+ /*!< in: nonzero if none of
+ the columns must be declared
+ NOT NULL */
{
dict_index_t* index;
+ ut_ad(mutex_own(&dict_sys->mutex));
+
index = dict_table_get_first_index(table);
while (index != NULL) {
/* Ignore matches that refer to the same instance
- or the index is to be dropped */
- if (index->to_be_dropped || types_idx == index
- || index->type & DICT_FTS) {
+ (or the index is to be dropped) */
+ if (types_idx == index || index->type & DICT_FTS
+ || index->to_be_dropped) {
goto next_rec;
- } else if (dict_index_get_n_fields(index) >= n_cols) {
- ulint i;
-
- for (i = 0; i < n_cols; i++) {
- dict_field_t* field;
- const char* col_name;
-
- field = dict_index_get_nth_field(index, i);
-
- col_name = dict_table_get_col_name(
- table, dict_col_get_no(field->col));
-
- if (field->prefix_len != 0) {
- /* We do not accept column prefix
- indexes here */
-
- break;
- }
-
- if (0 != innobase_strcasecmp(columns[i],
- col_name)) {
- break;
- }
-
- if (check_null
- && (field->col->prtype & DATA_NOT_NULL)) {
-
- return(NULL);
- }
-
- if (types_idx && !cmp_cols_are_equal(
- dict_index_get_nth_col(index, i),
- dict_index_get_nth_col(types_idx,
- i),
- check_charsets)) {
-
- break;
- }
- }
-
- if (i == n_cols) {
- /* We found a matching index */
-
- return(index);
- }
+ } else if (dict_foreign_qualify_index(
+ table, columns, n_cols, index, types_idx,
+ check_charsets, check_null)) {
+ return(index);
}
next_rec:
@@ -3064,90 +3160,6 @@ next_rec:
}
/**********************************************************************//**
-Find an index that is equivalent to the one passed in and is not marked
-for deletion.
-@return index equivalent to foreign->foreign_index, or NULL */
-UNIV_INTERN
-dict_index_t*
-dict_foreign_find_equiv_index(
-/*==========================*/
- dict_foreign_t* foreign)/*!< in: foreign key */
-{
- ut_a(foreign != NULL);
-
- /* Try to find an index which contains the columns as the
- first fields and in the right order, and the types are the
- same as in foreign->foreign_index */
-
- return(dict_foreign_find_index(
- foreign->foreign_table,
- foreign->foreign_col_names, foreign->n_fields,
- foreign->foreign_index, TRUE, /* check types */
- FALSE/* allow columns to be NULL */));
-}
-
-#endif /* !UNIV_HOTBACKUP */
-/**********************************************************************//**
-Returns an index object by matching on the name and column names and
-if more than one index matches return the index with the max id
-@return matching index, NULL if not found */
-UNIV_INTERN
-dict_index_t*
-dict_table_get_index_by_max_id(
-/*===========================*/
- dict_table_t* table, /*!< in: table */
- const char* name, /*!< in: the index name to find */
- const char** columns,/*!< in: array of column names */
- ulint n_cols) /*!< in: number of columns */
-{
- dict_index_t* index;
- dict_index_t* found;
-
- found = NULL;
- index = dict_table_get_first_index(table);
-
- while (index != NULL) {
- if (ut_strcmp(index->name, name) == 0
- && dict_index_get_n_ordering_defined_by_user(index)
- == n_cols) {
-
- ulint i;
-
- for (i = 0; i < n_cols; i++) {
- dict_field_t* field;
- const char* col_name;
-
- field = dict_index_get_nth_field(index, i);
-
- col_name = dict_table_get_col_name(
- table, dict_col_get_no(field->col));
-
- if (0 != innobase_strcasecmp(
- columns[i], col_name)) {
-
- break;
- }
- }
-
- if (i == n_cols) {
- /* We found a matching index, select
- the index with the higher id*/
-
- if (!found || index->id > found->id) {
-
- found = index;
- }
- }
- }
-
- index = dict_table_get_next_index(index);
- }
-
- return(found);
-}
-
-#ifndef UNIV_HOTBACKUP
-/**********************************************************************//**
Report an error in a foreign key definition. */
static
void
@@ -3196,7 +3208,7 @@ At least one of the foreign table and the referenced table must already
be in the dictionary cache!
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
dict_foreign_add_to_cache(
/*======================*/
dict_foreign_t* foreign, /*!< in, own: foreign key constraint */
@@ -3325,7 +3337,6 @@ dict_foreign_add_to_cache(
return(DB_SUCCESS);
}
-#endif /* !UNIV_HOTBACKUP */
/*********************************************************************//**
Scans from pointer onwards. Stops if is at the start of a copy of
'string' where characters are compared without case sensitivity, and
@@ -3579,6 +3590,67 @@ dict_scan_col(
return(ptr);
}
+
+/*********************************************************************//**
+Open a table from its database and table name, this is currently used by
+foreign constraint parser to get the referenced table.
+@return complete table name with database and table name, allocated from
+heap memory passed in */
+UNIV_INTERN
+char*
+dict_get_referenced_table(
+/*======================*/
+ const char* name, /*!< in: foreign key table name */
+ const char* database_name, /*!< in: table db name */
+ ulint database_name_len, /*!< in: db name length */
+ const char* table_name, /*!< in: table name */
+ ulint table_name_len, /*!< in: table name length */
+ dict_table_t** table, /*!< out: table object or NULL */
+ mem_heap_t* heap) /*!< in/out: heap memory */
+{
+ char* ref;
+ const char* db_name;
+
+ if (!database_name) {
+ /* Use the database name of the foreign key table */
+
+ db_name = name;
+ database_name_len = dict_get_db_name_len(name);
+ } else {
+ db_name = database_name;
+ }
+
+ /* Copy database_name, '/', table_name, '\0' */
+ ref = static_cast<char*>(
+ mem_heap_alloc(heap, database_name_len + table_name_len + 2));
+
+ memcpy(ref, db_name, database_name_len);
+ ref[database_name_len] = '/';
+ memcpy(ref + database_name_len + 1, table_name, table_name_len + 1);
+
+ /* Values; 0 = Store and compare as given; case sensitive
+ 1 = Store and compare in lower; case insensitive
+ 2 = Store as given, compare in lower; case semi-sensitive */
+ if (innobase_get_lower_case_table_names() == 2) {
+ innobase_casedn_str(ref);
+ *table = dict_table_get_low(ref);
+ memcpy(ref, db_name, database_name_len);
+ ref[database_name_len] = '/';
+ memcpy(ref + database_name_len + 1, table_name, table_name_len + 1);
+
+ } else {
+#ifndef __WIN__
+ if (innobase_get_lower_case_table_names() == 1) {
+ innobase_casedn_str(ref);
+ }
+#else
+ innobase_casedn_str(ref);
+#endif /* !__WIN__ */
+ *table = dict_table_get_low(ref);
+ }
+
+ return(ref);
+}
/*********************************************************************//**
Scans a table name from an SQL string.
@return scanned to */
@@ -3598,9 +3670,7 @@ dict_scan_table_name(
const char* database_name = NULL;
ulint database_name_len = 0;
const char* table_name = NULL;
- ulint table_name_len;
const char* scan_name;
- char* ref;
*success = FALSE;
*table = NULL;
@@ -3648,46 +3718,11 @@ dict_scan_table_name(
table_name = scan_name;
}
- if (database_name == NULL) {
- /* Use the database name of the foreign key table */
-
- database_name = name;
- database_name_len = dict_get_db_name_len(name);
- }
-
- table_name_len = strlen(table_name);
-
- /* Copy database_name, '/', table_name, '\0' */
- ref = static_cast<char*>(
- mem_heap_alloc(heap, database_name_len + table_name_len + 2));
-
- memcpy(ref, database_name, database_name_len);
- ref[database_name_len] = '/';
- memcpy(ref + database_name_len + 1, table_name, table_name_len + 1);
-
- /* Values; 0 = Store and compare as given; case sensitive
- 1 = Store and compare in lower; case insensitive
- 2 = Store as given, compare in lower; case semi-sensitive */
- if (innobase_get_lower_case_table_names() == 2) {
- innobase_casedn_str(ref);
- *table = dict_table_get_low(ref);
- memcpy(ref, database_name, database_name_len);
- ref[database_name_len] = '/';
- memcpy(ref + database_name_len + 1, table_name, table_name_len + 1);
-
- } else {
-#ifndef __WIN__
- if (innobase_get_lower_case_table_names() == 1) {
- innobase_casedn_str(ref);
- }
-#else
- innobase_casedn_str(ref);
-#endif /* !__WIN__ */
- *table = dict_table_get_low(ref);
- }
+ *ref_name = dict_get_referenced_table(
+ name, database_name, database_name_len,
+ table_name, strlen(table_name), table, heap);
*success = TRUE;
- *ref_name = ref;
return(ptr);
}
@@ -3810,13 +3845,12 @@ end_of_string:
}
}
-#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
Finds the highest [number] for foreign key constraints of the table. Looks
only at the >= 4.0.18-format id's, which are of the form
databasename/tablename_ibfk_[number].
@return highest number, 0 if table has no new format foreign key constraints */
-static
+UNIV_INTERN
ulint
dict_table_get_highest_foreign_id(
/*==============================*/
@@ -3871,6 +3905,8 @@ dict_foreign_report_syntax_err(
in the SQL string */
const char* ptr) /*!< in: place of the syntax error */
{
+ ut_ad(!srv_read_only_mode);
+
FILE* ef = dict_foreign_err_file;
mutex_enter(&dict_foreign_err_mutex);
@@ -3888,7 +3924,7 @@ be accompanied with indexes in both participating tables. The indexes are
allowed to contain more fields than mentioned in the constraint.
@return error code or DB_SUCCESS */
static
-ulint
+dberr_t
dict_create_foreign_constraints_low(
/*================================*/
trx_t* trx, /*!< in: transaction */
@@ -3919,7 +3955,7 @@ dict_create_foreign_constraints_low(
FILE* ef = dict_foreign_err_file;
const char* constraint_name;
ibool success;
- ulint error;
+ dberr_t error;
const char* ptr1;
const char* ptr2;
ulint i;
@@ -3931,6 +3967,7 @@ dict_create_foreign_constraints_low(
const char* column_names[500];
const char* referenced_table_name;
+ ut_ad(!srv_read_only_mode);
ut_ad(mutex_own(&(dict_sys->mutex)));
table = dict_table_get_low(name);
@@ -4470,11 +4507,11 @@ UNIV_INTERN
ibool
dict_str_starts_with_keyword(
/*=========================*/
- void* mysql_thd, /*!< in: MySQL thread handle */
+ THD* thd, /*!< in: MySQL thread handle */
const char* str, /*!< in: string to scan for keyword */
const char* keyword) /*!< in: keyword to look for */
{
- struct charset_info_st* cs = innobase_get_charset(mysql_thd);
+ struct charset_info_st* cs = innobase_get_charset(thd);
ibool success;
dict_accept(cs, str, keyword, &success);
@@ -4489,7 +4526,7 @@ be accompanied with indexes in both participating tables. The indexes are
allowed to contain more fields than mentioned in the constraint.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
dict_create_foreign_constraints(
/*============================*/
trx_t* trx, /*!< in: transaction */
@@ -4509,9 +4546,9 @@ dict_create_foreign_constraints(
code DB_CANNOT_ADD_CONSTRAINT if
any foreign keys are found. */
{
- char* str;
- ulint err;
- mem_heap_t* heap;
+ char* str;
+ dberr_t err;
+ mem_heap_t* heap;
ut_a(trx);
ut_a(trx->mysql_thd);
@@ -4534,7 +4571,7 @@ Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement.
@return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the
constraint id does not match */
UNIV_INTERN
-ulint
+dberr_t
dict_foreign_parse_drop_constraints(
/*================================*/
mem_heap_t* heap, /*!< in: heap from which we can
@@ -4552,7 +4589,6 @@ dict_foreign_parse_drop_constraints(
size_t len;
const char* ptr;
const char* id;
- FILE* ef = dict_foreign_err_file;
struct charset_info_st* cs;
ut_a(trx);
@@ -4618,10 +4654,11 @@ loop:
foreign = UT_LIST_GET_FIRST(table->foreign_list);
while (foreign != NULL) {
- if (0 == strcmp(foreign->id, id)
+ if (0 == innobase_strcasecmp(foreign->id, id)
|| (strchr(foreign->id, '/')
- && 0 == strcmp(id,
- dict_remove_db_name(foreign->id)))) {
+ && 0 == innobase_strcasecmp(
+ id,
+ dict_remove_db_name(foreign->id)))) {
/* Found */
break;
}
@@ -4629,20 +4666,26 @@ loop:
foreign = UT_LIST_GET_NEXT(foreign_list, foreign);
}
+
if (foreign == NULL) {
- mutex_enter(&dict_foreign_err_mutex);
- rewind(ef);
- ut_print_timestamp(ef);
- fputs(" Error in dropping of a foreign key constraint"
- " of table ", ef);
- ut_print_name(ef, NULL, TRUE, table->name);
- fputs(",\n"
- "in SQL command\n", ef);
- fputs(str, ef);
- fputs("\nCannot find a constraint with the given id ", ef);
- ut_print_name(ef, NULL, FALSE, id);
- fputs(".\n", ef);
- mutex_exit(&dict_foreign_err_mutex);
+
+ if (!srv_read_only_mode) {
+ FILE* ef = dict_foreign_err_file;
+
+ mutex_enter(&dict_foreign_err_mutex);
+ rewind(ef);
+ ut_print_timestamp(ef);
+ fputs(" Error in dropping of a foreign key "
+ "constraint of table ", ef);
+ ut_print_name(ef, NULL, TRUE, table->name);
+ fputs(",\nin SQL command\n", ef);
+ fputs(str, ef);
+ fputs("\nCannot find a constraint with the "
+ "given id ", ef);
+ ut_print_name(ef, NULL, FALSE, id);
+ fputs(".\n", ef);
+ mutex_exit(&dict_foreign_err_mutex);
+ }
mem_free(str);
@@ -4652,15 +4695,19 @@ loop:
goto loop;
syntax_error:
- mutex_enter(&dict_foreign_err_mutex);
- rewind(ef);
- ut_print_timestamp(ef);
- fputs(" Syntax error in dropping of a"
- " foreign key constraint of table ", ef);
- ut_print_name(ef, NULL, TRUE, table->name);
- fprintf(ef, ",\n"
- "close to:\n%s\n in SQL command\n%s\n", ptr, str);
- mutex_exit(&dict_foreign_err_mutex);
+ if (!srv_read_only_mode) {
+ FILE* ef = dict_foreign_err_file;
+
+ mutex_enter(&dict_foreign_err_mutex);
+ rewind(ef);
+ ut_print_timestamp(ef);
+ fputs(" Syntax error in dropping of a"
+ " foreign key constraint of table ", ef);
+ ut_print_name(ef, NULL, TRUE, table->name);
+ fprintf(ef, ",\n"
+ "close to:\n%s\n in SQL command\n%s\n", ptr, str);
+ mutex_exit(&dict_foreign_err_mutex);
+ }
mem_free(str);
@@ -4668,7 +4715,7 @@ syntax_error:
}
/*==================== END OF FOREIGN KEY PROCESSING ====================*/
-#endif /* !UNIV_HOTBACKUP */
+
/**********************************************************************//**
Returns an index object if it is found in the dictionary cache.
Assumes that dict_sys->mutex is already being held.
@@ -4908,7 +4955,6 @@ dict_index_calc_min_rec_len(
return(sum);
}
-#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Prints info of a foreign key constraint. */
static
@@ -4939,7 +4985,6 @@ dict_foreign_print_low(
fputs(" )\n", stderr);
}
-#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
Prints a table data. */
UNIV_INTERN
@@ -4948,60 +4993,29 @@ dict_table_print(
/*=============*/
dict_table_t* table) /*!< in: table */
{
- mutex_enter(&(dict_sys->mutex));
- dict_table_print_low(table);
- mutex_exit(&(dict_sys->mutex));
-}
-
-/**********************************************************************//**
-Prints a table data when we know the table name. */
-UNIV_INTERN
-void
-dict_table_print_by_name(
-/*=====================*/
- const char* name) /*!< in: table name */
-{
- dict_table_t* table;
-
- mutex_enter(&(dict_sys->mutex));
-
- table = dict_table_get_low(name);
-
- ut_a(table);
-
- dict_table_print_low(table);
- mutex_exit(&(dict_sys->mutex));
-}
-
-/**********************************************************************//**
-Prints a table data. */
-UNIV_INTERN
-void
-dict_table_print_low(
-/*=================*/
- dict_table_t* table) /*!< in: table */
-{
dict_index_t* index;
dict_foreign_t* foreign;
ulint i;
ut_ad(mutex_own(&(dict_sys->mutex)));
- dict_stats_update(table, DICT_STATS_FETCH, TRUE);
+ dict_table_stats_lock(table, RW_X_LATCH);
- dict_table_stats_lock(table, RW_S_LATCH);
+ if (!table->stat_initialized) {
+ dict_stats_update_transient(table);
+ }
fprintf(stderr,
"--------------------------------------\n"
"TABLE: name %s, id %llu, flags %lx, columns %lu,"
- " indexes %lu, appr.rows %lu\n"
+ " indexes %lu, appr.rows " UINT64PF "\n"
" COLUMNS: ",
table->name,
(ullint) table->id,
(ulong) table->flags,
(ulong) table->n_cols,
(ulong) UT_LIST_GET_LEN(table->indexes),
- (ulong) table->stat_n_rows);
+ table->stat_n_rows);
for (i = 0; i < (ulint) table->n_cols; i++) {
dict_col_print_low(table, dict_table_get_nth_col(table, i));
@@ -5017,7 +5031,9 @@ dict_table_print_low(
index = UT_LIST_GET_NEXT(indexes, index);
}
- dict_table_stats_unlock(table, RW_S_LATCH);
+ table->stat_initialized = FALSE;
+
+ dict_table_stats_unlock(table, RW_X_LATCH);
foreign = UT_LIST_GET_FIRST(table->foreign_list);
@@ -5065,13 +5081,15 @@ dict_index_print_low(
ib_int64_t n_vals;
ulint i;
+ ut_a(index->table->stat_initialized);
+
ut_ad(mutex_own(&(dict_sys->mutex)));
if (index->n_user_defined_cols > 0) {
n_vals = index->stat_n_diff_key_vals[
- index->n_user_defined_cols];
+ index->n_user_defined_cols - 1];
} else {
- n_vals = index->stat_n_diff_key_vals[1];
+ n_vals = index->stat_n_diff_key_vals[0];
}
fprintf(stderr,
@@ -5121,7 +5139,6 @@ dict_field_print_low(
}
}
-#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Outputs info on a foreign key of a table in a format suitable for
CREATE TABLE. */
@@ -5310,7 +5327,6 @@ dict_print_info_on_foreign_keys(
mutex_exit(&(dict_sys->mutex));
}
-#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
Displays the names of the index and the table. */
UNIV_INTERN
@@ -5318,7 +5334,7 @@ void
dict_index_name_print(
/*==================*/
FILE* file, /*!< in: output stream */
- trx_t* trx, /*!< in: transaction */
+ const trx_t* trx, /*!< in: transaction */
const dict_index_t* index) /*!< in: index to print */
{
fputs("index ", file);
@@ -5393,7 +5409,9 @@ UNIV_INTERN
void
dict_set_corrupted(
/*===============*/
- dict_index_t* index) /*!< in/out: index */
+ dict_index_t* index, /*!< in/out: index */
+ trx_t* trx, /*!< in/out: transaction */
+ const char* ctx) /*!< in: context */
{
mem_heap_t* heap;
mtr_t mtr;
@@ -5401,8 +5419,14 @@ dict_set_corrupted(
dtuple_t* tuple;
dfield_t* dfield;
byte* buf;
+ char* table_name;
const char* status;
btr_cur_t cursor;
+ bool locked = RW_X_LATCH == trx->dict_operation_lock_mode;
+
+ if (!locked) {
+ row_mysql_lock_data_dictionary(trx);
+ }
ut_ad(index);
ut_ad(mutex_own(&dict_sys->mutex));
@@ -5422,7 +5446,7 @@ dict_set_corrupted(
if (index->type & DICT_CORRUPT) {
/* The index was already flagged corrupted. */
ut_ad(!dict_index_is_clust(index) || index->table->corrupted);
- return;
+ goto func_exit;
}
heap = mem_heap_create(sizeof(dtuple_t) + 2 * (sizeof(dfield_t)
@@ -5463,19 +5487,29 @@ dict_set_corrupted(
goto fail;
}
mlog_write_ulint(field, index->type, MLOG_4BYTES, &mtr);
- status = " InnoDB: Flagged corruption of ";
+ status = "Flagged";
} else {
fail:
- status = " InnoDB: Unable to flag corruption of ";
+ status = "Unable to flag";
}
mtr_commit(&mtr);
+ mem_heap_empty(heap);
+ table_name = static_cast<char*>(mem_heap_alloc(heap, FN_REFLEN + 1));
+ *innobase_convert_name(
+ table_name, FN_REFLEN,
+ index->table_name, strlen(index->table_name),
+ NULL, TRUE) = 0;
+
+ ib_logf(IB_LOG_LEVEL_ERROR, "%s corruption of %s in table %s in %s",
+ status, index->name, table_name, ctx);
+
mem_heap_free(heap);
- ut_print_timestamp(stderr);
- fputs(status, stderr);
- dict_index_name_print(stderr, NULL, index);
- putc('\n', stderr);
+func_exit:
+ if (!locked) {
+ row_mysql_unlock_data_dictionary(trx);
+ }
}
/**********************************************************************//**
@@ -5582,7 +5616,7 @@ dict_table_get_index_on_name(
/* If name is NULL, just return */
if (!name) {
- return NULL;
+ return(NULL);
}
index = dict_table_get_first_index(table);
@@ -5597,42 +5631,47 @@ dict_table_get_index_on_name(
}
return(NULL);
-
}
/**********************************************************************//**
-Replace the index passed in with another equivalent index in the tables
-foreign key list. */
+Replace the index passed in with another equivalent index in the
+foreign key lists of the table. */
UNIV_INTERN
void
-dict_table_replace_index_in_foreign_list(
-/*=====================================*/
- dict_table_t* table, /*!< in/out: table */
- dict_index_t* index, /*!< in: index to be replaced */
- const trx_t* trx) /*!< in: transaction handle */
+dict_foreign_replace_index(
+/*=======================*/
+ dict_table_t* table, /*!< in/out: table */
+ const dict_index_t* index, /*!< in: index to be replaced */
+ const trx_t* trx) /*!< in: transaction handle */
{
dict_foreign_t* foreign;
+ ut_ad(index->to_be_dropped);
+
for (foreign = UT_LIST_GET_FIRST(table->foreign_list);
foreign;
foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
- if (foreign->foreign_index == index) {
- dict_index_t* new_index
- = dict_foreign_find_equiv_index(foreign);
+ dict_index_t* new_index;
- /* There must exist an alternative index if
- check_foreigns (FOREIGN_KEY_CHECKS) is on,
- since ha_innobase::prepare_drop_index had done
- the check before we reach here. */
+ if (foreign->foreign_index == index) {
+ ut_ad(foreign->foreign_table == index->table);
+ new_index = dict_foreign_find_index(
+ foreign->foreign_table,
+ foreign->foreign_col_names,
+ foreign->n_fields, index,
+ /*check_charsets=*/TRUE, /*check_null=*/FALSE);
+ /* There must exist an alternative index,
+ since this must have been checked earlier. */
ut_a(new_index || !trx->check_foreigns);
+ ut_ad(!new_index || new_index->table == index->table);
+ ut_ad(!new_index || !new_index->to_be_dropped);
foreign->foreign_index = new_index;
}
}
-
for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
foreign;
foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
@@ -5647,8 +5686,11 @@ dict_table_replace_index_in_foreign_list(
foreign->referenced_col_names,
foreign->n_fields, index,
/*check_charsets=*/TRUE, /*check_null=*/FALSE);
- ut_ad(new_index || !trx->check_foreigns);
+ /* There must exist an alternative index,
+ since this must have been checked earlier. */
+ ut_a(new_index || !trx->check_foreigns);
ut_ad(!new_index || new_index->table == index->table);
+ ut_ad(!new_index || !new_index->to_be_dropped);
foreign->referenced_index = new_index;
}
@@ -5696,8 +5738,8 @@ dict_table_check_for_dup_indexes(
/*=============================*/
const dict_table_t* table, /*!< in: Check for dup indexes
in this table */
- ibool tmp_ok) /*!< in: TRUE=allow temporary
- index names */
+ enum check_name check) /*!< in: whether and when to allow
+ temporary index names */
{
/* Check for duplicates, ignoring indexes that are marked
as to be dropped */
@@ -5713,17 +5755,32 @@ dict_table_check_for_dup_indexes(
index1 = UT_LIST_GET_FIRST(table->indexes);
do {
- ut_ad(tmp_ok || *index1->name != TEMP_INDEX_PREFIX);
-
- index2 = UT_LIST_GET_NEXT(indexes, index1);
-
- while (index2) {
-
- if (!index2->to_be_dropped) {
- ut_ad(ut_strcmp(index1->name, index2->name));
+ if (*index1->name == TEMP_INDEX_PREFIX) {
+ ut_a(!dict_index_is_clust(index1));
+
+ switch (check) {
+ case CHECK_ALL_COMPLETE:
+ ut_error;
+ case CHECK_ABORTED_OK:
+ switch (dict_index_get_online_status(index1)) {
+ case ONLINE_INDEX_COMPLETE:
+ case ONLINE_INDEX_CREATION:
+ ut_error;
+ break;
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ break;
+ }
+ /* fall through */
+ case CHECK_PARTIAL_OK:
+ break;
}
+ }
- index2 = UT_LIST_GET_NEXT(indexes, index2);
+ for (index2 = UT_LIST_GET_NEXT(indexes, index1);
+ index2 != NULL;
+ index2 = UT_LIST_GET_NEXT(indexes, index2)) {
+ ut_ad(ut_strcmp(index1->name, index2->name));
}
index1 = UT_LIST_GET_NEXT(indexes, index1);
@@ -5739,17 +5796,17 @@ The caller must own the dictionary mutex.
dict_table_schema_check() @{
@return DB_SUCCESS if the table exists and contains the necessary columns */
UNIV_INTERN
-enum db_err
+dberr_t
dict_table_schema_check(
/*====================*/
dict_table_schema_t* req_schema, /*!< in/out: required table
schema */
char* errstr, /*!< out: human readable error
- message if != DB_SUCCESS and
- != DB_TABLE_NOT_FOUND is
+ message if != DB_SUCCESS is
returned */
size_t errstr_sz) /*!< in: errstr size */
{
+ char buf[MAX_FULL_NAME_LEN];
dict_table_t* table;
ulint i;
@@ -5757,8 +5814,24 @@ dict_table_schema_check(
table = dict_table_get_low(req_schema->table_name);
- if (table == NULL || table->ibd_file_missing) {
- /* no such table or missing tablespace */
+ if (table == NULL) {
+ /* no such table */
+
+ ut_snprintf(errstr, errstr_sz,
+ "Table %s not found.",
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)));
+
+ return(DB_TABLE_NOT_FOUND);
+ }
+
+ if (table->ibd_file_missing) {
+ /* missing tablespace */
+
+ ut_snprintf(errstr, errstr_sz,
+ "Tablespace for table %s is missing.",
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)));
return(DB_TABLE_NOT_FOUND);
}
@@ -5769,7 +5842,8 @@ dict_table_schema_check(
ut_snprintf(errstr, errstr_sz,
"%s has %d columns but should have %lu.",
- req_schema->table_name,
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)),
table->n_def - DATA_N_SYS_COLS,
req_schema->n_cols);
@@ -5814,9 +5888,12 @@ dict_table_schema_check(
if (j == table->n_def) {
ut_snprintf(errstr, errstr_sz,
- "required column %s.%s not found.",
- req_schema->table_name,
- req_schema->columns[i].name);
+ "required column %s "
+ "not found in table %s.",
+ req_schema->columns[i].name,
+ ut_format_name(
+ req_schema->table_name,
+ TRUE, buf, sizeof(buf)));
return(DB_ERROR);
}
@@ -5839,23 +5916,33 @@ dict_table_schema_check(
if (req_schema->columns[i].len != table->cols[j].len) {
ut_snprintf(errstr, errstr_sz,
- "Column %s.%s is %s but should be %s "
- "(length mismatch).",
- req_schema->table_name,
+ "Column %s in table %s is %s "
+ "but should be %s (length mismatch).",
req_schema->columns[i].name,
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)),
actual_type, req_type);
return(DB_ERROR);
}
- /* check mtype for exact match */
- if (req_schema->columns[i].mtype != table->cols[j].mtype) {
-
+ /*
+ check mtype for exact match.
+ This check is relaxed to allow use to use TIMESTAMP
+ (ie INT) for last_update instead of DATA_BINARY.
+ We have to test for both values as the innodb_table_stats
+ table may come from MySQL and have the old type.
+ */
+ if (req_schema->columns[i].mtype != table->cols[j].mtype &&
+ !(req_schema->columns[i].mtype == DATA_INT &&
+ table->cols[j].mtype == DATA_FIXBINARY))
+ {
ut_snprintf(errstr, errstr_sz,
- "Column %s.%s is %s but should be %s "
- "(type mismatch).",
- req_schema->table_name,
+ "Column %s in table %s is %s "
+ "but should be %s (type mismatch).",
req_schema->columns[i].name,
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)),
actual_type, req_type);
return(DB_ERROR);
@@ -5868,20 +5955,110 @@ dict_table_schema_check(
!= req_schema->columns[i].prtype_mask) {
ut_snprintf(errstr, errstr_sz,
- "Column %s.%s is %s but should be %s "
- "(flags mismatch).",
- req_schema->table_name,
+ "Column %s in table %s is %s "
+ "but should be %s (flags mismatch).",
req_schema->columns[i].name,
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)),
actual_type, req_type);
return(DB_ERROR);
}
}
+ if (req_schema->n_foreign != UT_LIST_GET_LEN(table->foreign_list)) {
+ ut_snprintf(
+ errstr, errstr_sz,
+ "Table %s has %lu foreign key(s) pointing to other "
+ "tables, but it must have %lu.",
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)),
+ UT_LIST_GET_LEN(table->foreign_list),
+ req_schema->n_foreign);
+ return(DB_ERROR);
+ }
+
+ if (req_schema->n_referenced != UT_LIST_GET_LEN(table->referenced_list)) {
+ ut_snprintf(
+ errstr, errstr_sz,
+ "There are %lu foreign key(s) pointing to %s, "
+ "but there must be %lu.",
+ UT_LIST_GET_LEN(table->referenced_list),
+ ut_format_name(req_schema->table_name,
+ TRUE, buf, sizeof(buf)),
+ req_schema->n_referenced);
+ return(DB_ERROR);
+ }
+
return(DB_SUCCESS);
}
/* @} */
+/*********************************************************************//**
+Converts a database and table name from filesystem encoding
+(e.g. d@i1b/a@q1b@1Kc, same format as used in dict_table_t::name) in two
+strings in UTF8 encoding (e.g. dцb and aюbØc). The output buffers must be
+at least MAX_DB_UTF8_LEN and MAX_TABLE_UTF8_LEN bytes. */
+UNIV_INTERN
+void
+dict_fs2utf8(
+/*=========*/
+ const char* db_and_table, /*!< in: database and table names,
+ e.g. d@i1b/a@q1b@1Kc */
+ char* db_utf8, /*!< out: database name, e.g. dцb */
+ size_t db_utf8_size, /*!< in: dbname_utf8 size */
+ char* table_utf8, /*!< out: table name, e.g. aюbØc */
+ size_t table_utf8_size)/*!< in: table_utf8 size */
+{
+ char db[MAX_DATABASE_NAME_LEN + 1];
+ ulint db_len;
+ uint errors;
+
+ db_len = dict_get_db_name_len(db_and_table);
+
+ ut_a(db_len <= sizeof(db));
+
+ memcpy(db, db_and_table, db_len);
+ db[db_len] = '\0';
+
+ strconvert(
+ &my_charset_filename, db, db_len,
+ system_charset_info, db_utf8, db_utf8_size,
+ &errors);
+
+ /* convert each # to @0023 in table name and store the result in buf */
+ const char* table = dict_remove_db_name(db_and_table);
+ const char* table_p;
+ char buf[MAX_TABLE_NAME_LEN * 5 + 1];
+ char* buf_p;
+ for (table_p = table, buf_p = buf; table_p[0] != '\0'; table_p++) {
+ if (table_p[0] != '#') {
+ buf_p[0] = table_p[0];
+ buf_p++;
+ } else {
+ buf_p[0] = '@';
+ buf_p[1] = '0';
+ buf_p[2] = '0';
+ buf_p[3] = '2';
+ buf_p[4] = '3';
+ buf_p += 5;
+ }
+ ut_a((size_t) (buf_p - buf) < sizeof(buf));
+ }
+ buf_p[0] = '\0';
+
+ errors = 0;
+ strconvert(
+ &my_charset_filename, buf, (uint) (buf_p - buf),
+ system_charset_info, table_utf8, table_utf8_size,
+ &errors);
+
+ if (errors != 0) {
+ ut_snprintf(table_utf8, table_utf8_size, "%s%s",
+ srv_mysql50_table_name_prefix, table);
+ }
+}
+
/**********************************************************************//**
Closes the data dictionary module. */
UNIV_INTERN
@@ -5929,7 +6106,9 @@ dict_close(void)
rw_lock_free(&dict_operation_lock);
memset(&dict_operation_lock, 0x0, sizeof(dict_operation_lock));
- mutex_free(&dict_foreign_err_mutex);
+ if (!srv_read_only_mode) {
+ mutex_free(&dict_foreign_err_mutex);
+ }
mem_free(dict_sys);
dict_sys = NULL;
@@ -5943,7 +6122,7 @@ dict_close(void)
/**********************************************************************//**
Validate the dictionary table LRU list.
@return TRUE if valid */
-UNIV_INTERN
+static
ibool
dict_lru_validate(void)
/*===================*/
@@ -5972,7 +6151,7 @@ dict_lru_validate(void)
/**********************************************************************//**
Check if a table exists in the dict table LRU list.
@return TRUE if table found in LRU list */
-UNIV_INTERN
+static
ibool
dict_lru_find_table(
/*================*/
@@ -6025,4 +6204,279 @@ dict_non_lru_find_table(
return(FALSE);
}
# endif /* UNIV_DEBUG */
+/*********************************************************************//**
+Check an index to see whether its first fields are the columns in the array,
+in the same order and is not marked for deletion and is not the same
+as types_idx.
+@return true if the index qualifies, otherwise false */
+UNIV_INTERN
+bool
+dict_foreign_qualify_index(
+/*=======================*/
+ const dict_table_t* table, /*!< in: table */
+ const char** columns,/*!< in: array of column names */
+ ulint n_cols, /*!< in: number of columns */
+ const dict_index_t* index, /*!< in: index to check */
+ const dict_index_t* types_idx,
+ /*!< in: NULL or an index
+ whose types the column types
+ must match */
+ ibool check_charsets,
+ /*!< in: whether to check
+ charsets. only has an effect
+ if types_idx != NULL */
+ ulint check_null)
+ /*!< in: nonzero if none of
+ the columns must be declared
+ NOT NULL */
+{
+ ulint i;
+
+ if (dict_index_get_n_fields(index) < n_cols) {
+ return(false);
+ }
+
+ for (i= 0; i < n_cols; i++) {
+ dict_field_t* field;
+ const char* col_name;
+
+ field = dict_index_get_nth_field(index, i);
+
+ col_name = dict_table_get_col_name(
+ table, dict_col_get_no(field->col));
+
+ if (field->prefix_len != 0) {
+ /* We do not accept column prefix
+ indexes here */
+
+ break;
+ }
+
+ if (0 != innobase_strcasecmp(columns[i],
+ col_name)) {
+ break;
+ }
+
+ if (check_null
+ && (field->col->prtype & DATA_NOT_NULL)) {
+
+ break;
+ }
+
+ if (types_idx && !cmp_cols_are_equal(
+ dict_index_get_nth_col(index, i),
+ dict_index_get_nth_col(types_idx,
+ i),
+ check_charsets)) {
+
+ break;
+ }
+ }
+
+ return((i == n_cols) ? true : false);
+}
+
+/*********************************************************************//**
+Update the state of compression failure padding heuristics. This is
+called whenever a compression operation succeeds or fails.
+The caller must be holding info->mutex */
+static
+void
+dict_index_zip_pad_update(
+/*======================*/
+ zip_pad_info_t* info, /*<! in/out: info to be updated */
+ ulint zip_threshold) /*<! in: zip threshold value */
+{
+ ulint total;
+ ulint fail_pct;
+
+ ut_ad(info);
+
+ total = info->success + info->failure;
+
+ ut_ad(total > 0);
+
+ if(zip_threshold == 0) {
+ /* User has just disabled the padding. */
+ return;
+ }
+
+ if (total < ZIP_PAD_ROUND_LEN) {
+ /* We are in middle of a round. Do nothing. */
+ return;
+ }
+
+ /* We are at a 'round' boundary. Reset the values but first
+ calculate fail rate for our heuristic. */
+ fail_pct = (info->failure * 100) / total;
+ info->failure = 0;
+ info->success = 0;
+
+ if (fail_pct > zip_threshold) {
+ /* Compression failures are more then user defined
+ threshold. Increase the pad size to reduce chances of
+ compression failures. */
+ ut_ad(info->pad % ZIP_PAD_INCR == 0);
+
+ /* Only do increment if it won't increase padding
+ beyond max pad size. */
+ if (info->pad + ZIP_PAD_INCR
+ < (UNIV_PAGE_SIZE * zip_pad_max) / 100) {
+#ifdef HAVE_ATOMIC_BUILTINS
+ /* Use atomics even though we have the mutex.
+ This is to ensure that we are able to read
+ info->pad atomically where atomics are
+ supported. */
+ os_atomic_increment_ulint(&info->pad, ZIP_PAD_INCR);
+#else /* HAVE_ATOMIC_BUILTINS */
+ info->pad += ZIP_PAD_INCR;
+#endif /* HAVE_ATOMIC_BUILTINS */
+
+ MONITOR_INC(MONITOR_PAD_INCREMENTS);
+ }
+
+ info->n_rounds = 0;
+
+ } else {
+ /* Failure rate was OK. Another successful round
+ completed. */
+ ++info->n_rounds;
+
+ /* If enough successful rounds are completed with
+ compression failure rate in control, decrease the
+ padding. */
+ if (info->n_rounds >= ZIP_PAD_SUCCESSFUL_ROUND_LIMIT
+ && info->pad > 0) {
+
+ ut_ad(info->pad % ZIP_PAD_INCR == 0);
+#ifdef HAVE_ATOMIC_BUILTINS
+ /* Use atomics even though we have the mutex.
+ This is to ensure that we are able to read
+ info->pad atomically where atomics are
+ supported. */
+ os_atomic_decrement_ulint(&info->pad, ZIP_PAD_INCR);
+#else /* HAVE_ATOMIC_BUILTINS */
+ info->pad -= ZIP_PAD_INCR;
+#endif /* HAVE_ATOMIC_BUILTINS */
+
+ info->n_rounds = 0;
+
+ MONITOR_INC(MONITOR_PAD_DECREMENTS);
+ }
+ }
+}
+
+/*********************************************************************//**
+This function should be called whenever a page is successfully
+compressed. Updates the compression padding information. */
+UNIV_INTERN
+void
+dict_index_zip_success(
+/*===================*/
+ dict_index_t* index) /*!< in/out: index to be updated. */
+{
+ ut_ad(index);
+
+ ulint zip_threshold = zip_failure_threshold_pct;
+ if (!zip_threshold) {
+ /* Disabled by user. */
+ return;
+ }
+
+ os_fast_mutex_lock(&index->zip_pad.mutex);
+ ++index->zip_pad.success;
+ dict_index_zip_pad_update(&index->zip_pad, zip_threshold);
+ os_fast_mutex_unlock(&index->zip_pad.mutex);
+}
+
+/*********************************************************************//**
+This function should be called whenever a page compression attempt
+fails. Updates the compression padding information. */
+UNIV_INTERN
+void
+dict_index_zip_failure(
+/*===================*/
+ dict_index_t* index) /*!< in/out: index to be updated. */
+{
+ ut_ad(index);
+
+ ulint zip_threshold = zip_failure_threshold_pct;
+ if (!zip_threshold) {
+ /* Disabled by user. */
+ return;
+ }
+
+ os_fast_mutex_lock(&index->zip_pad.mutex);
+ ++index->zip_pad.failure;
+ dict_index_zip_pad_update(&index->zip_pad, zip_threshold);
+ os_fast_mutex_unlock(&index->zip_pad.mutex);
+}
+
+
+/*********************************************************************//**
+Return the optimal page size, for which page will likely compress.
+@return page size beyond which page might not compress */
+UNIV_INTERN
+ulint
+dict_index_zip_pad_optimal_page_size(
+/*=================================*/
+ dict_index_t* index) /*!< in: index for which page size
+ is requested */
+{
+ ulint pad;
+ ulint min_sz;
+ ulint sz;
+
+ ut_ad(index);
+
+ if (!zip_failure_threshold_pct) {
+ /* Disabled by user. */
+ return(UNIV_PAGE_SIZE);
+ }
+
+ /* We use atomics to read index->zip_pad.pad. Here we use zero
+ as increment as are not changing the value of the 'pad'. On
+ platforms where atomics are not available we grab the mutex. */
+
+#ifdef HAVE_ATOMIC_BUILTINS
+ pad = os_atomic_increment_ulint(&index->zip_pad.pad, 0);
+#else /* HAVE_ATOMIC_BUILTINS */
+ os_fast_mutex_lock(&index->zip_pad.mutex);
+ pad = index->zip_pad.pad;
+ os_fast_mutex_unlock(&index->zip_pad.mutex);
+#endif /* HAVE_ATOMIC_BUILTINS */
+
+ ut_ad(pad < UNIV_PAGE_SIZE);
+ sz = UNIV_PAGE_SIZE - pad;
+
+ /* Min size allowed by user. */
+ ut_ad(zip_pad_max < 100);
+ min_sz = (UNIV_PAGE_SIZE * (100 - zip_pad_max)) / 100;
+
+ return(ut_max(sz, min_sz));
+}
+
+/*************************************************************//**
+Convert table flag to row format string.
+@return row format name. */
+UNIV_INTERN
+const char*
+dict_tf_to_row_format_string(
+/*=========================*/
+ ulint table_flag) /*!< in: row format setting */
+{
+ switch (dict_tf_get_rec_format(table_flag)) {
+ case REC_FORMAT_REDUNDANT:
+ return("ROW_TYPE_REDUNDANT");
+ case REC_FORMAT_COMPACT:
+ return("ROW_TYPE_COMPACT");
+ case REC_FORMAT_COMPRESSED:
+ return("ROW_TYPE_COMPRESSED");
+ case REC_FORMAT_DYNAMIC:
+ return("ROW_TYPE_DYNAMIC");
+ }
+
+ ut_error;
+ return(0);
+}
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc
index 95bc022de8b..d423a3b7304 100644
--- a/storage/innobase/dict/dict0load.cc
+++ b/storage/innobase/dict/dict0load.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -41,18 +41,22 @@ Created 4/24/1996 Heikki Tuuri
#include "rem0cmp.h"
#include "srv0start.h"
#include "srv0srv.h"
+#include "dict0crea.h"
#include "dict0priv.h"
#include "ha_prototypes.h" /* innobase_casedn_str() */
#include "fts0priv.h"
-/** Following are six InnoDB system tables */
+/** Following are the InnoDB system tables. The positions in
+this array are referenced by enum dict_system_table_id. */
static const char* SYSTEM_TABLE_NAME[] = {
"SYS_TABLES",
"SYS_INDEXES",
"SYS_COLUMNS",
"SYS_FIELDS",
"SYS_FOREIGN",
- "SYS_FOREIGN_COLS"
+ "SYS_FOREIGN_COLS",
+ "SYS_TABLESPACES",
+ "SYS_DATAFILES"
};
/* If this flag is TRUE, then we will load the cluster index's (and tables')
@@ -183,7 +187,8 @@ dict_print(void)
os_increment_counter_by_amount(
server_mutex,
- srv_fatal_semaphore_wait_threshold, 7200/*2 hours*/);
+ srv_fatal_semaphore_wait_threshold,
+ SRV_SEMAPHORE_WAIT_EXTENSION);
heap = mem_heap_create(1000);
mutex_enter(&(dict_sys->mutex));
@@ -196,13 +201,11 @@ dict_print(void)
err_msg = static_cast<const char*>(
dict_process_sys_tables_rec_and_mtr_commit(
- heap, rec, &table,
- static_cast<dict_table_info_t>(
- DICT_TABLE_LOAD_FROM_CACHE
- | DICT_TABLE_UPDATE_STATS), &mtr));
+ heap, rec, &table, DICT_TABLE_LOAD_FROM_CACHE,
+ &mtr));
if (!err_msg) {
- dict_table_print_low(table);
+ dict_table_print(table);
} else {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: %s\n", err_msg);
@@ -221,7 +224,8 @@ dict_print(void)
/* Restore the fatal semaphore wait timeout */
os_decrement_counter_by_amount(
server_mutex,
- srv_fatal_semaphore_wait_threshold, 7200/*2 hours*/);
+ srv_fatal_semaphore_wait_threshold,
+ SRV_SEMAPHORE_WAIT_EXTENSION);
}
/********************************************************************//**
@@ -278,8 +282,8 @@ dict_startscan_system(
clust_index = UT_LIST_GET_FIRST(system_table->indexes);
- btr_pcur_open_at_index_side(TRUE, clust_index, BTR_SEARCH_LEAF, pcur,
- TRUE, mtr);
+ btr_pcur_open_at_index_side(true, clust_index, BTR_SEARCH_LEAF, pcur,
+ true, 0, mtr);
rec = dict_getnext_system_low(pcur, mtr);
@@ -307,6 +311,7 @@ dict_getnext_system(
return(rec);
}
+
/********************************************************************//**
This function processes one SYS_TABLES record and populate the dict_table_t
struct for the table. Extracted out of dict_print() to be used by
@@ -362,15 +367,6 @@ dict_process_sys_tables_rec_and_mtr_commit(
return(err_msg);
}
- if ((status & DICT_TABLE_UPDATE_STATS)
- && dict_table_get_first_index(*table)) {
-
- /* Update statistics member fields in *table if
- DICT_TABLE_UPDATE_STATS is set */
- ut_ad(mutex_own(&dict_sys->mutex));
- dict_stats_update(*table, DICT_STATS_FETCH, TRUE);
- }
-
return(NULL);
}
@@ -401,6 +397,7 @@ dict_process_sys_indexes_rec(
return(err_msg);
}
+
/********************************************************************//**
This function parses a SYS_COLUMNS record and populate a dict_column_t
structure with the information from the record.
@@ -423,6 +420,7 @@ dict_process_sys_columns_rec(
return(err_msg);
}
+
/********************************************************************//**
This function parses a SYS_FIELDS record and populates a dict_field_t
structure with the information from the record.
@@ -475,7 +473,7 @@ dict_process_sys_foreign_rec(
const byte* field;
ulint n_fields_and_type;
- if (UNIV_UNLIKELY(rec_get_deleted_flag(rec, 0))) {
+ if (rec_get_deleted_flag(rec, 0)) {
return("delete-marked record in SYS_FOREIGN");
}
@@ -485,7 +483,7 @@ dict_process_sys_foreign_rec(
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN__ID, &len);
- if (UNIV_UNLIKELY(len < 1 || len == UNIV_SQL_NULL)) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
err_len:
return("incorrect column length in SYS_FOREIGN");
}
@@ -512,7 +510,7 @@ err_len:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN__FOR_NAME, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
goto err_len;
}
foreign->foreign_table_name = mem_heap_strdupl(
@@ -520,7 +518,7 @@ err_len:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN__REF_NAME, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
goto err_len;
}
foreign->referenced_table_name = mem_heap_strdupl(
@@ -568,7 +566,7 @@ dict_process_sys_foreign_col_rec(
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_COLS__ID, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
err_len:
return("incorrect column length in SYS_FOREIGN_COLS");
}
@@ -594,14 +592,14 @@ err_len:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_COLS__FOR_COL_NAME, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
goto err_len;
}
*for_col_name = mem_heap_strdupl(heap, (char*) field, len);
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_COLS__REF_COL_NAME, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
goto err_len;
}
*ref_col_name = mem_heap_strdupl(heap, (char*) field, len);
@@ -610,6 +608,127 @@ err_len:
}
/********************************************************************//**
+This function parses a SYS_TABLESPACES record, extracts necessary
+information from the record and returns to caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_tablespaces(
+/*=========================*/
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ const rec_t* rec, /*!< in: current SYS_TABLESPACES rec */
+ ulint* space, /*!< out: space id */
+ const char** name, /*!< out: tablespace name */
+ ulint* flags) /*!< out: tablespace flags */
+{
+ ulint len;
+ const byte* field;
+
+ /* Initialize the output values */
+ *space = ULINT_UNDEFINED;
+ *name = NULL;
+ *flags = ULINT_UNDEFINED;
+
+ if (rec_get_deleted_flag(rec, 0)) {
+ return("delete-marked record in SYS_TABLESPACES");
+ }
+
+ if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_TABLESPACES) {
+ return("wrong number of columns in SYS_TABLESPACES record");
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLESPACES__SPACE, &len);
+ if (len != DICT_FLD_LEN_SPACE) {
+err_len:
+ return("incorrect column length in SYS_TABLESPACES");
+ }
+ *space = mach_read_from_4(field);
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_TABLESPACES__DB_TRX_ID, &len);
+ if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) {
+ goto err_len;
+ }
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_TABLESPACES__DB_ROLL_PTR, &len);
+ if (len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL) {
+ goto err_len;
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLESPACES__NAME, &len);
+ if (len == 0 || len == UNIV_SQL_NULL) {
+ goto err_len;
+ }
+ *name = mem_heap_strdupl(heap, (char*) field, len);
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLESPACES__FLAGS, &len);
+ if (len != DICT_FLD_LEN_FLAGS) {
+ goto err_len;
+ }
+ *flags = mach_read_from_4(field);
+
+ return(NULL);
+}
+
+/********************************************************************//**
+This function parses a SYS_DATAFILES record, extracts necessary
+information from the record and returns it to the caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_datafiles(
+/*=======================*/
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ const rec_t* rec, /*!< in: current SYS_DATAFILES rec */
+ ulint* space, /*!< out: space id */
+ const char** path) /*!< out: datafile paths */
+{
+ ulint len;
+ const byte* field;
+
+ if (rec_get_deleted_flag(rec, 0)) {
+ return("delete-marked record in SYS_DATAFILES");
+ }
+
+ if (rec_get_n_fields_old(rec) != DICT_NUM_FIELDS__SYS_DATAFILES) {
+ return("wrong number of columns in SYS_DATAFILES record");
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_DATAFILES__SPACE, &len);
+ if (len != DICT_FLD_LEN_SPACE) {
+err_len:
+ return("incorrect column length in SYS_DATAFILES");
+ }
+ *space = mach_read_from_4(field);
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_DATAFILES__DB_TRX_ID, &len);
+ if (len != DATA_TRX_ID_LEN && len != UNIV_SQL_NULL) {
+ goto err_len;
+ }
+
+ rec_get_nth_field_offs_old(
+ rec, DICT_FLD__SYS_DATAFILES__DB_ROLL_PTR, &len);
+ if (len != DATA_ROLL_PTR_LEN && len != UNIV_SQL_NULL) {
+ goto err_len;
+ }
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_DATAFILES__PATH, &len);
+ if (len == 0 || len == UNIV_SQL_NULL) {
+ goto err_len;
+ }
+ *path = mem_heap_strdupl(heap, (char*) field, len);
+
+ return(NULL);
+}
+
+/********************************************************************//**
Determine the flags of a table as stored in SYS_TABLES.TYPE and N_COLS.
@return ULINT_UNDEFINED if error, else a valid dict_table_t::flags. */
static
@@ -629,11 +748,9 @@ dict_sys_tables_get_flags(
ut_a(len == 4);
type = mach_read_from_4(field);
- /* The low order bit of SYS_TABLES.TYPE is always set to 1. If no
- other bits are used, that is defined as SYS_TABLE_TYPE_ANTELOPE.
- But in dict_table_t::flags the low order bit is used to determine
- if the row format is Redundant or Compact when the format is
- Antelope.
+ /* The low order bit of SYS_TABLES.TYPE is always set to 1. But in
+ dict_table_t::flags the low order bit is used to determine if the
+ row format is Redundant or Compact when the format is Antelope.
Read the 4 byte N_COLS field and look at the high order bit. It
should be set for COMPACT and later. It should not be set for
REDUNDANT. */
@@ -645,10 +762,193 @@ dict_sys_tables_get_flags(
/* This validation function also combines the DICT_N_COLS_COMPACT
flag in n_cols into the type field to effectively make it a
dict_table_t::flags. */
- return(dict_sys_tables_type_validate(type, n_cols));
+
+ if (ULINT_UNDEFINED == dict_sys_tables_type_validate(type, n_cols)) {
+ return(ULINT_UNDEFINED);
+ }
+
+ return(dict_sys_tables_type_to_tf(type, n_cols));
}
/********************************************************************//**
+Gets the filepath for a spaceid from SYS_DATAFILES and checks it against
+the contents of a link file. This function is called when there is no
+fil_node_t entry for this space ID so both durable locations on disk
+must be checked and compared.
+We use a temporary heap here for the table lookup, but not for the path
+returned which the caller must free.
+This function can return NULL if the space ID is not found in SYS_DATAFILES,
+then the caller will assume that the ibd file is in the normal datadir.
+@return own: A copy of the first datafile found in SYS_DATAFILES.PATH for
+the given space ID. NULL if space ID is zero or not found. */
+UNIV_INTERN
+char*
+dict_get_first_path(
+/*================*/
+ ulint space, /*!< in: space id */
+ const char* name) /*!< in: tablespace name */
+{
+ mtr_t mtr;
+ dict_table_t* sys_datafiles;
+ dict_index_t* sys_index;
+ dtuple_t* tuple;
+ dfield_t* dfield;
+ byte* buf;
+ btr_pcur_t pcur;
+ const rec_t* rec;
+ const byte* field;
+ ulint len;
+ char* dict_filepath = NULL;
+ mem_heap_t* heap = mem_heap_create(1024);
+
+ ut_ad(mutex_own(&(dict_sys->mutex)));
+
+ mtr_start(&mtr);
+
+ sys_datafiles = dict_table_get_low("SYS_DATAFILES");
+ sys_index = UT_LIST_GET_FIRST(sys_datafiles->indexes);
+ ut_ad(!dict_table_is_comp(sys_datafiles));
+ ut_ad(name_of_col_is(sys_datafiles, sys_index,
+ DICT_FLD__SYS_DATAFILES__SPACE, "SPACE"));
+ ut_ad(name_of_col_is(sys_datafiles, sys_index,
+ DICT_FLD__SYS_DATAFILES__PATH, "PATH"));
+
+ tuple = dtuple_create(heap, 1);
+ dfield = dtuple_get_nth_field(tuple, DICT_FLD__SYS_DATAFILES__SPACE);
+
+ buf = static_cast<byte*>(mem_heap_alloc(heap, 4));
+ mach_write_to_4(buf, space);
+
+ dfield_set_data(dfield, buf, 4);
+ dict_index_copy_types(tuple, sys_index, 1);
+
+ btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
+ BTR_SEARCH_LEAF, &pcur, &mtr);
+
+ rec = btr_pcur_get_rec(&pcur);
+
+ /* If the file-per-table tablespace was created with
+ an earlier version of InnoDB, then this record is not
+ in SYS_DATAFILES. But a link file still might exist. */
+
+ if (btr_pcur_is_on_user_rec(&pcur)) {
+ /* A record for this space ID was found. */
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_DATAFILES__PATH, &len);
+ ut_a(len > 0 || len == UNIV_SQL_NULL);
+ ut_a(len < OS_FILE_MAX_PATH);
+ dict_filepath = mem_strdupl((char*) field, len);
+ ut_a(dict_filepath);
+ }
+
+ btr_pcur_close(&pcur);
+ mtr_commit(&mtr);
+ mem_heap_free(heap);
+
+ return(dict_filepath);
+}
+
+/********************************************************************//**
+Update the record for space_id in SYS_TABLESPACES to this filepath.
+@return DB_SUCCESS if OK, dberr_t if the insert failed */
+UNIV_INTERN
+dberr_t
+dict_update_filepath(
+/*=================*/
+ ulint space_id, /*!< in: space id */
+ const char* filepath) /*!< in: filepath */
+{
+ dberr_t err = DB_SUCCESS;
+ trx_t* trx;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(mutex_own(&(dict_sys->mutex)));
+
+ trx = trx_allocate_for_background();
+ trx->op_info = "update filepath";
+ trx->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ pars_info_t* info = pars_info_create();
+
+ pars_info_add_int4_literal(info, "space", space_id);
+ pars_info_add_str_literal(info, "path", filepath);
+
+ err = que_eval_sql(info,
+ "PROCEDURE UPDATE_FILEPATH () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_DATAFILES"
+ " SET PATH = :path\n"
+ " WHERE SPACE = :space;\n"
+ "END;\n", FALSE, trx);
+
+ trx_commit_for_mysql(trx);
+ trx->dict_operation_lock_mode = 0;
+ trx_free_for_background(trx);
+
+ if (err == DB_SUCCESS) {
+ /* We just updated SYS_DATAFILES due to the contents in
+ a link file. Make a note that we did this. */
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "The InnoDB data dictionary table SYS_DATAFILES "
+ "for tablespace ID %lu was updated to use file %s.",
+ (ulong) space_id, filepath);
+ } else {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Problem updating InnoDB data dictionary table "
+ "SYS_DATAFILES for tablespace ID %lu to file %s.",
+ (ulong) space_id, filepath);
+ }
+
+ return(err);
+}
+
+/********************************************************************//**
+Insert records into SYS_TABLESPACES and SYS_DATAFILES.
+@return DB_SUCCESS if OK, dberr_t if the insert failed */
+UNIV_INTERN
+dberr_t
+dict_insert_tablespace_and_filepath(
+/*================================*/
+ ulint space, /*!< in: space id */
+ const char* name, /*!< in: talespace name */
+ const char* filepath, /*!< in: filepath */
+ ulint fsp_flags) /*!< in: tablespace flags */
+{
+ dberr_t err = DB_SUCCESS;
+ trx_t* trx;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(mutex_own(&(dict_sys->mutex)));
+ ut_ad(filepath);
+
+ trx = trx_allocate_for_background();
+ trx->op_info = "insert tablespace and filepath";
+ trx->dict_operation_lock_mode = RW_X_LATCH;
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ /* A record for this space ID was not found in
+ SYS_DATAFILES. Assume the record is also missing in
+ SYS_TABLESPACES. Insert records onto them both. */
+ err = dict_create_add_tablespace_to_dictionary(
+ space, name, fsp_flags, filepath, trx, false);
+
+ trx_commit_for_mysql(trx);
+ trx->dict_operation_lock_mode = 0;
+ trx_free_for_background(trx);
+
+ return(err);
+}
+
+/********************************************************************//**
+This function looks at each table defined in SYS_TABLES. It checks the
+tablespace for any table with a space_id > 0. It looks up the tablespace
+in SYS_DATAFILES to ensure the correct path.
+
In a crash recovery we already have all the tablespace objects created.
This function compares the space id information in the InnoDB data dictionary
to what we already read with fil_load_single_table_tablespaces().
@@ -669,6 +969,7 @@ dict_check_tablespaces_and_store_max_id(
ulint max_space_id;
mtr_t mtr;
+ rw_lock_x_lock(&dict_operation_lock);
mutex_enter(&(dict_sys->mutex));
mtr_start(&mtr);
@@ -682,8 +983,8 @@ dict_check_tablespaces_and_store_max_id(
MLOG_4BYTES, &mtr);
fil_set_max_space_id_if_bigger(max_space_id);
- btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur,
- TRUE, &mtr);
+ btr_pcur_open_at_index_side(true, sys_index, BTR_SEARCH_LEAF, &pcur,
+ true, 0, &mtr);
loop:
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
@@ -703,6 +1004,7 @@ loop:
fil_set_max_space_id_if_bigger(max_space_id);
mutex_exit(&(dict_sys->mutex));
+ rw_lock_x_unlock(&dict_operation_lock);
return;
}
@@ -718,8 +1020,14 @@ loop:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_TABLES__NAME, &len);
+
name = mem_strdupl((char*) field, len);
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name), name, FALSE);
+
flags = dict_sys_tables_get_flags(rec);
if (UNIV_UNLIKELY(flags == ULINT_UNDEFINED)) {
/* Read again the 4 bytes from rec. */
@@ -728,13 +1036,9 @@ loop:
ut_ad(len == 4); /* this was checked earlier */
flags = mach_read_from_4(field);
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: table ", stderr);
- ut_print_filename(stderr, name);
- fprintf(stderr, "\n"
- "InnoDB: in InnoDB data dictionary"
- " has unknown type %lx.\n",
- (ulong) flags);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Table '%s' in InnoDB data dictionary"
+ " has unknown type %lx", table_name, flags);
goto loop;
}
@@ -749,43 +1053,84 @@ loop:
mtr_commit(&mtr);
+ /* For tables created with old versions of InnoDB,
+ SYS_TABLES.MIX_LEN may contain garbage. Such tables
+ would always be in ROW_FORMAT=REDUNDANT. Pretend that
+ all such tables are non-temporary. That is, do not
+ suppress error printouts about temporary or discarded
+ tablespaces not being found. */
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len);
+
+ bool is_temp = false;
+ bool discarded = false;
+ ib_uint32_t flags2 = mach_read_from_4(field);
+
+ /* Check that the tablespace (the .ibd file) really
+ exists; print a warning to the .err log if not.
+ Do not print warnings for temporary tables or for
+ tablespaces that have been discarded. */
+
+ field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLES__N_COLS, &len);
+
+ /* MIX_LEN valid only for ROW_FORMAT > REDUNDANT. */
+ if (mach_read_from_4(field) & DICT_N_COLS_COMPACT) {
+
+ is_temp = !!(flags2 & DICT_TF2_TEMPORARY);
+ discarded = !!(flags2 & DICT_TF2_DISCARDED);
+ }
+
if (space_id == 0) {
/* The system tablespace always exists. */
+ ut_ad(!discarded);
} else if (in_crash_recovery) {
- /* Check that the tablespace (the .ibd file) really
- exists; print a warning to the .err log if not.
- Do not print warnings for temporary tables. */
- ibool is_temp;
+ /* All tablespaces should have been found in
+ fil_load_single_table_tablespaces(). */
- field = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_TABLES__N_COLS, &len);
- if (mach_read_from_4(field) & DICT_N_COLS_COMPACT) {
- /* ROW_FORMAT=COMPACT: read the is_temp
- flag from SYS_TABLES.MIX_LEN. */
- field = rec_get_nth_field_old(
- rec, 7/*MIX_LEN*/, &len);
- is_temp = !!(mach_read_from_4(field)
- & DICT_TF2_TEMPORARY);
- } else {
- /* For tables created with old versions
- of InnoDB, SYS_TABLES.MIX_LEN may contain
- garbage. Such tables would always be
- in ROW_FORMAT=REDUNDANT. Pretend that
- all such tables are non-temporary. That is,
- do not suppress error printouts about
- temporary tables not being found. */
- is_temp = FALSE;
+ fil_space_for_table_exists_in_mem(
+ space_id, name, TRUE, !(is_temp || discarded),
+ false, NULL, 0);
+
+ } else if (!discarded) {
+
+ /* It is a normal database startup: create the
+ space object and check that the .ibd file exists.
+ If the table uses a remote tablespace, look for the
+ space_id in SYS_DATAFILES to find the filepath */
+
+ /* Use the remote filepath if known. */
+ char* filepath = NULL;
+ if (DICT_TF_HAS_DATA_DIR(flags)) {
+ filepath = dict_get_first_path(
+ space_id, name);
}
- fil_space_for_table_exists_in_mem(
- space_id, name, TRUE, !is_temp);
- } else {
- /* It is a normal database startup: create the space
- object and check that the .ibd file exists. */
+ /* We set the 2nd param (fix_dict = true)
+ here because we already have an x-lock on
+ dict_operation_lock and dict_sys->mutex. Besides,
+ this is at startup and we are now single threaded.
+ If the filepath is not known, it will need to
+ be discovered. */
+ dberr_t err = fil_open_single_table_tablespace(
+ false, srv_read_only_mode ? false : true,
+ space_id, dict_tf_to_fsp_flags(flags),
+ name, filepath);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Tablespace open failed for '%s', "
+ "ignored.", table_name);
+ }
- fil_open_single_table_tablespace(
- FALSE, space_id,
- dict_tf_to_fsp_flags(flags), name);
+ if (filepath) {
+ mem_free(filepath);
+ }
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "DISCARD flag set for table '%s', ignored.",
+ table_name);
}
mem_free(name);
@@ -879,7 +1224,7 @@ err_len:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_COLUMNS__NAME, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
goto err_len;
}
@@ -1003,6 +1348,11 @@ dict_load_columns(
err_msg = dict_load_column_low(table, heap, NULL, NULL,
&name, rec);
+ if (err_msg) {
+ fprintf(stderr, "InnoDB: %s\n", err_msg);
+ ut_error;
+ }
+
/* Note: Currently we have one DOC_ID column that is
shared by all FTS indexes on a table. */
if (innobase_strcasecmp(name,
@@ -1037,11 +1387,6 @@ dict_load_columns(
table->fts->doc_col = i;
}
- if (err_msg) {
- fprintf(stderr, "InnoDB: %s\n", err_msg);
- ut_error;
- }
-
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
}
@@ -1154,7 +1499,7 @@ err_len:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FIELDS__COL_NAME, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
goto err_len;
}
@@ -1194,7 +1539,7 @@ dict_load_fields(
byte* buf;
ulint i;
mtr_t mtr;
- ulint error;
+ dberr_t error;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -1394,8 +1739,8 @@ Loads definitions for table indexes. Adds them to the data dictionary
cache.
@return DB_SUCCESS if ok, DB_CORRUPTION if corruption of dictionary
table or DB_UNSUPPORTED if table has unknown index type */
-static
-ulint
+static __attribute__((nonnull))
+dberr_t
dict_load_indexes(
/*==============*/
dict_table_t* table, /*!< in/out: table */
@@ -1412,7 +1757,7 @@ dict_load_indexes(
const rec_t* rec;
byte* buf;
mtr_t mtr;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -1443,6 +1788,21 @@ dict_load_indexes(
if (!btr_pcur_is_on_user_rec(&pcur)) {
+ /* We should allow the table to open even
+ without index when DICT_ERR_IGNORE_CORRUPT is set.
+ DICT_ERR_IGNORE_CORRUPT is currently only set
+ for drop table */
+ if (dict_table_get_first_index(table) == NULL
+ && !(ignore_err & DICT_ERR_IGNORE_CORRUPT)) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Cannot load table %s "
+ "because it has no indexes in "
+ "InnoDB internal data dictionary.",
+ table->name);
+ error = DB_CORRUPTION;
+ goto func_exit;
+ }
+
break;
}
@@ -1456,6 +1816,20 @@ dict_load_indexes(
if (err_msg == dict_load_index_id_err) {
/* TABLE_ID mismatch means that we have
run out of index definitions for the table. */
+
+ if (dict_table_get_first_index(table) == NULL
+ && !(ignore_err & DICT_ERR_IGNORE_CORRUPT)) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Failed to load the "
+ "clustered index for table %s "
+ "because of the following error: %s. "
+ "Refusing to load the rest of the "
+ "indexes (if any) and the whole table "
+ "altogether.", table->name, err_msg);
+ error = DB_CORRUPTION;
+ goto func_exit;
+ }
+
break;
} else if (err_msg == dict_load_index_del) {
/* Skip delete-marked records. */
@@ -1510,15 +1884,15 @@ dict_load_indexes(
subsequent checks are relevant for the supported types. */
if (index->type & ~(DICT_CLUSTERED | DICT_UNIQUE
| DICT_CORRUPT | DICT_FTS)) {
- fprintf(stderr,
- "InnoDB: Error: unknown type %lu"
- " of index %s of table %s\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown type %lu of index %s of table %s",
(ulong) index->type, index->name, table->name);
error = DB_UNSUPPORTED;
dict_mem_index_free(index);
goto func_exit;
} else if (index->page == FIL_NULL
+ && !table->ibd_file_missing
&& (!(index->type & DICT_FTS))) {
fprintf(stderr,
@@ -1560,7 +1934,7 @@ corrupted:
" is not clustered!\n", stderr);
goto corrupted;
- } else if (table->id < DICT_HDR_FIRST_ID
+ } else if (dict_is_sys_table(table->id)
&& (dict_index_is_clust(index)
|| ((table == dict_sys->sys_tables)
&& !strcmp("ID_IND", index->name)))) {
@@ -1570,8 +1944,10 @@ corrupted:
dict_mem_index_free(index);
} else {
dict_load_fields(index, heap);
- error = dict_index_add_to_cache(table, index,
- index->page, FALSE);
+
+ error = dict_index_add_to_cache(
+ table, index, index->page, FALSE);
+
/* The data dictionary tables should never contain
invalid index definitions. If we ignored this error
and simply did not load this index definition, the
@@ -1629,7 +2005,7 @@ dict_load_table_low(
rec_get_nth_field_offs_old(
rec, DICT_FLD__SYS_TABLES__NAME, &len);
- if (len < 1 || len == UNIV_SQL_NULL) {
+ if (len == 0 || len == UNIV_SQL_NULL) {
err_len:
return("incorrect column length in SYS_TABLES");
}
@@ -1751,6 +2127,77 @@ err_len:
}
/********************************************************************//**
+Using the table->heap, copy the null-terminated filepath into
+table->data_dir_path and replace the 'databasename/tablename.ibd'
+portion with 'tablename'.
+This allows SHOW CREATE TABLE to return the correct DATA DIRECTORY path.
+Make this data directory path only if it has not yet been saved. */
+UNIV_INTERN
+void
+dict_save_data_dir_path(
+/*====================*/
+ dict_table_t* table, /*!< in/out: table */
+ char* filepath) /*!< in: filepath of tablespace */
+{
+ ut_ad(mutex_own(&(dict_sys->mutex)));
+ ut_a(DICT_TF_HAS_DATA_DIR(table->flags));
+
+ ut_a(!table->data_dir_path);
+ ut_a(filepath);
+
+ /* Be sure this filepath is not the default filepath. */
+ char* default_filepath = fil_make_ibd_name(table->name, false);
+ if (strcmp(filepath, default_filepath)) {
+ ulint pathlen = strlen(filepath);
+ ut_a(pathlen < OS_FILE_MAX_PATH);
+ ut_a(0 == strcmp(filepath + pathlen - 4, ".ibd"));
+
+ table->data_dir_path = mem_heap_strdup(table->heap, filepath);
+ os_file_make_data_dir_path(table->data_dir_path);
+ } else {
+ /* This does not change SYS_DATAFILES or SYS_TABLES
+ or FSP_FLAGS on the header page of the tablespace,
+ but it makes dict_table_t consistent */
+ table->flags &= ~DICT_TF_MASK_DATA_DIR;
+ }
+ mem_free(default_filepath);
+}
+
+/*****************************************************************//**
+Make sure the data_file_name is saved in dict_table_t if needed. Try to
+read it from the file dictionary first, then from SYS_DATAFILES. */
+UNIV_INTERN
+void
+dict_get_and_save_data_dir_path(
+/*============================*/
+ dict_table_t* table, /*!< in/out: table */
+ bool dict_mutex_own) /*!< in: true if dict_sys->mutex
+ is owned already */
+{
+ if (DICT_TF_HAS_DATA_DIR(table->flags)
+ && (!table->data_dir_path)) {
+ char* path = fil_space_get_first_path(table->space);
+
+ if (!dict_mutex_own) {
+ dict_mutex_enter_for_mysql();
+ }
+ if (!path) {
+ path = dict_get_first_path(
+ table->space, table->name);
+ }
+
+ if (path) {
+ dict_save_data_dir_path(table, path);
+ mem_free(path);
+ }
+
+ if (!dict_mutex_own) {
+ dict_mutex_exit_for_mysql();
+ }
+ }
+}
+
+/********************************************************************//**
Loads a table definition and also all its index definitions, and also
the cluster definition if the table is a member in a cluster. Also loads
all foreign key constraints where the foreign key is in the table or where
@@ -1770,6 +2217,7 @@ dict_load_table(
/*!< in: error to be ignored when loading
table and its indexes' definition */
{
+ dberr_t err;
dict_table_t* table;
dict_table_t* sys_tables;
btr_pcur_t pcur;
@@ -1780,7 +2228,7 @@ dict_load_table(
const rec_t* rec;
const byte* field;
ulint len;
- ulint err;
+ char* filepath = NULL;
const char* err_msg;
mtr_t mtr;
@@ -1843,39 +2291,71 @@ err_exit:
goto err_exit;
}
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(table_name, sizeof(table_name), name, FALSE);
+
+ btr_pcur_close(&pcur);
+ mtr_commit(&mtr);
+
if (table->space == 0) {
/* The system tablespace is always available. */
+ } else if (table->flags2 & DICT_TF2_DISCARDED) {
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Table '%s' tablespace is set as discarded.",
+ table_name);
+
+ table->ibd_file_missing = TRUE;
+
} else if (!fil_space_for_table_exists_in_mem(
- table->space, name, FALSE, FALSE)) {
+ table->space, name, FALSE, FALSE, true, heap,
+ table->id)) {
- if (table->flags2 & DICT_TF2_TEMPORARY) {
+ if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY)) {
/* Do not bother to retry opening temporary tables. */
table->ibd_file_missing = TRUE;
+
} else {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: error: space object of table ");
- ut_print_filename(stderr, name);
- fprintf(stderr, ",\n"
- "InnoDB: space id %lu did not exist in memory."
- " Retrying an open.\n",
- (ulong) table->space);
- /* Try to open the tablespace */
- if (!fil_open_single_table_tablespace(
- TRUE, table->space,
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Failed to find tablespace for table '%s' "
+ "in the cache. Attempting to load the "
+ "tablespace with space id %lu.",
+ table_name, (ulong) table->space);
+
+ /* Use the remote filepath if needed. */
+ if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+ /* This needs to be added to the table
+ from SYS_DATAFILES */
+ dict_get_and_save_data_dir_path(table, true);
+
+ if (table->data_dir_path) {
+ filepath = os_file_make_remote_pathname(
+ table->data_dir_path,
+ table->name, "ibd");
+ }
+ }
+
+ /* Try to open the tablespace. We set the
+ 2nd param (fix_dict = false) here because we
+ do not have an x-lock on dict_operation_lock */
+ err = fil_open_single_table_tablespace(
+ true, false, table->space,
dict_tf_to_fsp_flags(table->flags),
- name)) {
+ name, filepath);
+
+ if (err != DB_SUCCESS) {
/* We failed to find a sensible
tablespace file */
table->ibd_file_missing = TRUE;
}
+ if (filepath) {
+ mem_free(filepath);
+ }
}
}
- btr_pcur_close(&pcur);
- mtr_commit(&mtr);
-
dict_load_columns(table, heap);
if (cached) {
@@ -1886,7 +2366,15 @@ err_exit:
mem_heap_empty(heap);
- err = dict_load_indexes(table, heap, ignore_err);
+ /* If there is no tablespace for the table then we only need to
+ load the index definitions. So that we can IMPORT the tablespace
+ later. */
+ if (table->ibd_file_missing) {
+ err = dict_load_indexes(
+ table, heap, DICT_ERR_IGNORE_ALL);
+ } else {
+ err = dict_load_indexes(table, heap, ignore_err);
+ }
if (err == DB_INDEX_CORRUPT) {
/* Refuse to load the table if the table has a corrupted
@@ -1920,7 +2408,8 @@ err_exit:
of the error condition, since the user may want to dump data from the
clustered index. However we load the foreign key information only if
all indexes were loaded. */
- if (!cached) {
+ if (!cached || table->ibd_file_missing) {
+ /* Don't attempt to load the indexes from disk. */
} else if (err == DB_SUCCESS) {
err = dict_load_foreigns(table->name, TRUE, TRUE);
@@ -1937,11 +2426,15 @@ err_exit:
Otherwise refuse to load the table */
index = dict_table_get_first_index(table);
- if (!srv_force_recovery || !index
+ if (!srv_force_recovery
+ || !index
|| !dict_index_is_clust(index)) {
+
dict_table_remove_from_cache(table);
table = NULL;
- } else if (dict_index_is_corrupted(index)) {
+
+ } else if (dict_index_is_corrupted(index)
+ && !table->ibd_file_missing) {
/* It is possible we force to load a corrupted
clustered index if srv_load_corrupted is set.
@@ -1949,36 +2442,28 @@ err_exit:
table->corrupted = TRUE;
}
}
-#if 0
- if (err != DB_SUCCESS && table != NULL) {
- mutex_enter(&dict_foreign_err_mutex);
-
- ut_print_timestamp(stderr);
-
- fprintf(stderr,
- " InnoDB: Error: could not make a foreign key"
- " definition to match\n"
- "InnoDB: the foreign key table"
- " or the referenced table!\n"
- "InnoDB: The data dictionary of InnoDB is corrupt."
- " You may need to drop\n"
- "InnoDB: and recreate the foreign key table"
- " or the referenced table.\n"
- "InnoDB: Submit a detailed bug report"
- " to http://bugs.mysql.com\n"
- "InnoDB: Latest foreign key error printout:\n%s\n",
- dict_foreign_err_buf);
-
- mutex_exit(&dict_foreign_err_mutex);
- }
-#endif /* 0 */
func_exit:
mem_heap_free(heap);
- ut_ad(!table || ignore_err != DICT_ERR_IGNORE_NONE
+ ut_ad(!table
+ || ignore_err != DICT_ERR_IGNORE_NONE
+ || table->ibd_file_missing
|| !table->corrupted);
+ if (table && table->fts) {
+ if (!(dict_table_has_fts_index(table)
+ || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)
+ || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID))) {
+ /* the table->fts could be created in dict_load_column
+ when a user defined FTS_DOC_ID is present, but no
+ FTS */
+ fts_free(table);
+ } else {
+ fts_optimize_add_table(table);
+ }
+ }
+
return(table);
}
@@ -2019,6 +2504,7 @@ dict_load_table_on_id(
sys_table_ids = dict_table_get_next_index(
dict_table_get_first_index(sys_tables));
ut_ad(!dict_table_is_comp(sys_tables));
+ ut_ad(!dict_index_is_clust(sys_table_ids));
heap = mem_heap_create(256);
tuple = dtuple_create(heap, 1);
@@ -2099,15 +2585,20 @@ dict_load_sys_table(
}
/********************************************************************//**
-Loads foreign key constraint col names (also for the referenced table). */
+Loads foreign key constraint col names (also for the referenced table).
+Members that must be set (and valid) in foreign:
+foreign->heap
+foreign->n_fields
+foreign->id ('\0'-terminated)
+Members that will be created and set by this function:
+foreign->foreign_col_names[i]
+foreign->referenced_col_names[i]
+(for i=0..foreign->n_fields-1) */
static
void
dict_load_foreign_cols(
/*===================*/
- const char* id, /*!< in: foreign constraint id, not
- necessary '\0'-terminated */
- ulint id_len, /*!< in: id length */
- dict_foreign_t* foreign)/*!< in: foreign constraint object */
+ dict_foreign_t* foreign)/*!< in/out: foreign constraint object */
{
dict_table_t* sys_foreign_cols;
dict_index_t* sys_index;
@@ -2119,9 +2610,12 @@ dict_load_foreign_cols(
ulint len;
ulint i;
mtr_t mtr;
+ size_t id_len;
ut_ad(mutex_own(&(dict_sys->mutex)));
+ id_len = strlen(foreign->id);
+
foreign->foreign_col_names = static_cast<const char**>(
mem_heap_alloc(foreign->heap,
foreign->n_fields * sizeof(void*)));
@@ -2140,7 +2634,7 @@ dict_load_foreign_cols(
tuple = dtuple_create(foreign->heap, 1);
dfield = dtuple_get_nth_field(tuple, 0);
- dfield_set_data(dfield, id, id_len);
+ dfield_set_data(dfield, foreign->id, id_len);
dict_index_copy_types(tuple, sys_index, 1);
btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
@@ -2154,8 +2648,42 @@ dict_load_foreign_cols(
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_COLS__ID, &len);
- ut_a(len == id_len);
- ut_a(ut_memcmp(id, field, len) == 0);
+
+ if (len != id_len || ut_memcmp(foreign->id, field, len) != 0) {
+ const rec_t* pos;
+ ulint pos_len;
+ const rec_t* for_col_name;
+ ulint for_col_name_len;
+ const rec_t* ref_col_name;
+ ulint ref_col_name_len;
+
+ pos = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_FOREIGN_COLS__POS,
+ &pos_len);
+
+ for_col_name = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_FOREIGN_COLS__FOR_COL_NAME,
+ &for_col_name_len);
+
+ ref_col_name = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_FOREIGN_COLS__REF_COL_NAME,
+ &ref_col_name_len);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to load columns names for foreign "
+ "key '%s' because it was not found in "
+ "InnoDB internal table SYS_FOREIGN_COLS. The "
+ "closest entry we found is: "
+ "(ID='%.*s', POS=%lu, FOR_COL_NAME='%.*s', "
+ "REF_COL_NAME='%.*s')",
+ foreign->id,
+ (int) len, field,
+ mach_read_from_4(pos),
+ (int) for_col_name_len, for_col_name,
+ (int) ref_col_name_len, ref_col_name);
+
+ ut_error;
+ }
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_COLS__POS, &len);
@@ -2182,13 +2710,12 @@ dict_load_foreign_cols(
/***********************************************************************//**
Loads a foreign key constraint to the dictionary cache.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
dict_load_foreign(
/*==============*/
- const char* id, /*!< in: foreign constraint id, not
- necessary '\0'-terminated */
- ulint id_len, /*!< in: id length */
+ const char* id, /*!< in: foreign constraint id, must be
+ '\0'-terminated */
ibool check_charsets,
/*!< in: TRUE=check charset compatibility */
ibool check_recursive)
@@ -2210,9 +2737,12 @@ dict_load_foreign(
mtr_t mtr;
dict_table_t* for_table;
dict_table_t* ref_table;
+ size_t id_len;
ut_ad(mutex_own(&(dict_sys->mutex)));
+ id_len = strlen(id);
+
heap2 = mem_heap_create(1000);
mtr_start(&mtr);
@@ -2238,8 +2768,8 @@ dict_load_foreign(
fprintf(stderr,
"InnoDB: Error: cannot load foreign constraint "
- "%.*s: could not find the relevant record in "
- "SYS_FOREIGN\n", (int) id_len, id);
+ "%s: could not find the relevant record in "
+ "SYS_FOREIGN\n", id);
btr_pcur_close(&pcur);
mtr_commit(&mtr);
@@ -2255,8 +2785,8 @@ dict_load_foreign(
fprintf(stderr,
"InnoDB: Error: cannot load foreign constraint "
- "%.*s: found %.*s instead in SYS_FOREIGN\n",
- (int) id_len, id, (int) len, field);
+ "%s: found %.*s instead in SYS_FOREIGN\n",
+ id, (int) len, field);
btr_pcur_close(&pcur);
mtr_commit(&mtr);
@@ -2301,7 +2831,7 @@ dict_load_foreign(
btr_pcur_close(&pcur);
mtr_commit(&mtr);
- dict_load_foreign_cols(id, id_len, foreign);
+ dict_load_foreign_cols(foreign);
ref_table = dict_table_check_if_in_cache_low(
foreign->referenced_table_name_lookup);
@@ -2371,7 +2901,7 @@ cache already contains all constraints where the other relevant table is
already in the dictionary cache.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
dict_load_foreigns(
/*===============*/
const char* table_name, /*!< in: table name */
@@ -2390,7 +2920,7 @@ dict_load_foreigns(
const rec_t* rec;
const byte* field;
ulint len;
- ulint err;
+ dberr_t err;
mtr_t mtr;
ut_ad(mutex_own(&(dict_sys->mutex)));
@@ -2415,6 +2945,7 @@ dict_load_foreigns(
sec_index = dict_table_get_next_index(
dict_table_get_first_index(sys_foreign));
+ ut_ad(!dict_index_is_clust(sec_index));
start_load:
tuple = dtuple_create_from_mem(tuple_buf, sizeof(tuple_buf), 1);
@@ -2437,7 +2968,6 @@ loop:
/* Now we have the record in the secondary index containing a table
name and a foreign constraint ID */
- rec = btr_pcur_get_rec(&pcur);
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_FOR_NAME__NAME, &len);
@@ -2476,14 +3006,21 @@ loop:
field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_FOREIGN_FOR_NAME__ID, &len);
+ /* Copy the string because the page may be modified or evicted
+ after mtr_commit() below. */
+ char fk_id[MAX_TABLE_NAME_LEN + 1];
+
+ ut_a(len <= MAX_TABLE_NAME_LEN);
+ memcpy(fk_id, field, len);
+ fk_id[len] = '\0';
+
btr_pcur_store_position(&pcur, &mtr);
mtr_commit(&mtr);
/* Load the foreign constraint definition to the dictionary cache */
- err = dict_load_foreign((char*) field, len, check_charsets,
- check_recursive);
+ err = dict_load_foreign(fk_id, check_charsets, check_recursive);
if (err != DB_SUCCESS) {
btr_pcur_close(&pcur);
diff --git a/storage/innobase/dict/dict0mem.cc b/storage/innobase/dict/dict0mem.cc
index 28b935d2e58..116a6a6d96a 100644
--- a/storage/innobase/dict/dict0mem.cc
+++ b/storage/innobase/dict/dict0mem.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -35,8 +36,9 @@ Created 1/8/1996 Heikki Tuuri
#include "dict0dict.h"
#include "fts0priv.h"
#ifndef UNIV_HOTBACKUP
-#include "ha_prototypes.h" /* innobase_casedn_str(),
+# include "ha_prototypes.h" /* innobase_casedn_str(),
innobase_get_lower_case_table_names */
+# include "mysql_com.h" /* NAME_LEN */
# include "lock0lock.h"
#endif /* !UNIV_HOTBACKUP */
#ifdef UNIV_BLOB_DEBUG
@@ -51,6 +53,10 @@ Created 1/8/1996 Heikki Tuuri
UNIV_INTERN mysql_pfs_key_t autoinc_mutex_key;
#endif /* UNIV_PFS_MUTEX */
+/** Prefix for tmp tables, adopted from sql/table.h */
+#define tmp_file_prefix "#sql"
+#define tmp_file_prefix_length 4
+
/**********************************************************************//**
Creates a table memory object.
@return own: table object */
@@ -60,9 +66,7 @@ dict_mem_table_create(
/*==================*/
const char* name, /*!< in: table name */
ulint space, /*!< in: space where the clustered index of
- the table is placed; this parameter is
- ignored if the table is made a member of
- a cluster */
+ the table is placed */
ulint n_cols, /*!< in: number of columns */
ulint flags, /*!< in: table flags */
ulint flags2) /*!< in: table flags2 */
@@ -71,7 +75,7 @@ dict_mem_table_create(
mem_heap_t* heap;
ut_ad(name);
- dict_tf_validate(flags);
+ ut_a(dict_tf_is_valid(flags));
ut_a(!(flags2 & ~DICT_TF2_BIT_MASK));
heap = mem_heap_create(DICT_HEAP_SIZE);
@@ -115,7 +119,6 @@ dict_mem_table_create(
|| DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) {
table->fts = fts_create(table);
table->fts->cache = fts_cache_create(table);
- fts_optimize_add_table(table);
} else {
table->fts = NULL;
}
@@ -243,6 +246,156 @@ dict_mem_table_add_col(
dict_mem_fill_column_struct(col, i, mtype, prtype, len);
}
+/**********************************************************************//**
+Renames a column of a table in the data dictionary cache. */
+static __attribute__((nonnull))
+void
+dict_mem_table_col_rename_low(
+/*==========================*/
+ dict_table_t* table, /*!< in/out: table */
+ unsigned i, /*!< in: column offset corresponding to s */
+ const char* to, /*!< in: new column name */
+ const char* s) /*!< in: pointer to table->col_names */
+{
+ size_t from_len = strlen(s), to_len = strlen(to);
+
+ ut_ad(i < table->n_def);
+ ut_ad(from_len <= NAME_LEN);
+ ut_ad(to_len <= NAME_LEN);
+
+ if (from_len == to_len) {
+ /* The easy case: simply replace the column name in
+ table->col_names. */
+ strcpy(const_cast<char*>(s), to);
+ } else {
+ /* We need to adjust all affected index->field
+ pointers, as in dict_index_add_col(). First, copy
+ table->col_names. */
+ ulint prefix_len = s - table->col_names;
+
+ for (; i < table->n_def; i++) {
+ s += strlen(s) + 1;
+ }
+
+ ulint full_len = s - table->col_names;
+ char* col_names;
+
+ if (to_len > from_len) {
+ col_names = static_cast<char*>(
+ mem_heap_alloc(
+ table->heap,
+ full_len + to_len - from_len));
+
+ memcpy(col_names, table->col_names, prefix_len);
+ } else {
+ col_names = const_cast<char*>(table->col_names);
+ }
+
+ memcpy(col_names + prefix_len, to, to_len);
+ memmove(col_names + prefix_len + to_len,
+ table->col_names + (prefix_len + from_len),
+ full_len - (prefix_len + from_len));
+
+ /* Replace the field names in every index. */
+ for (dict_index_t* index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+ ulint n_fields = dict_index_get_n_fields(index);
+
+ for (ulint i = 0; i < n_fields; i++) {
+ dict_field_t* field
+ = dict_index_get_nth_field(
+ index, i);
+ ulint name_ofs
+ = field->name - table->col_names;
+ if (name_ofs <= prefix_len) {
+ field->name = col_names + name_ofs;
+ } else {
+ ut_a(name_ofs < full_len);
+ field->name = col_names
+ + name_ofs + to_len - from_len;
+ }
+ }
+ }
+
+ table->col_names = col_names;
+ }
+
+ /* Replace the field names in every foreign key constraint. */
+ for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(table->foreign_list);
+ foreign != NULL;
+ foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (unsigned f = 0; f < foreign->n_fields; f++) {
+ /* These can point straight to
+ table->col_names, because the foreign key
+ constraints will be freed at the same time
+ when the table object is freed. */
+ foreign->foreign_col_names[f]
+ = dict_index_get_nth_field(
+ foreign->foreign_index, f)->name;
+ }
+ }
+
+ for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(
+ table->referenced_list);
+ foreign != NULL;
+ foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (unsigned f = 0; f < foreign->n_fields; f++) {
+ /* foreign->referenced_col_names[] need to be
+ copies, because the constraint may become
+ orphan when foreign_key_checks=0 and the
+ parent table is dropped. */
+
+ const char* col_name = dict_index_get_nth_field(
+ foreign->referenced_index, f)->name;
+
+ if (strcmp(foreign->referenced_col_names[f],
+ col_name)) {
+ char** rc = const_cast<char**>(
+ foreign->referenced_col_names + f);
+ size_t col_name_len_1 = strlen(col_name) + 1;
+
+ if (col_name_len_1 <= strlen(*rc) + 1) {
+ memcpy(*rc, col_name, col_name_len_1);
+ } else {
+ *rc = static_cast<char*>(
+ mem_heap_dup(
+ foreign->heap,
+ col_name,
+ col_name_len_1));
+ }
+ }
+ }
+ }
+}
+
+/**********************************************************************//**
+Renames a column of a table in the data dictionary cache. */
+UNIV_INTERN
+void
+dict_mem_table_col_rename(
+/*======================*/
+ dict_table_t* table, /*!< in/out: table */
+ unsigned nth_col,/*!< in: column index */
+ const char* from, /*!< in: old column name */
+ const char* to) /*!< in: new column name */
+{
+ const char* s = table->col_names;
+
+ ut_ad(nth_col < table->n_def);
+
+ for (unsigned i = 0; i < nth_col; i++) {
+ size_t len = strlen(s);
+ ut_ad(len > 0);
+ s += len + 1;
+ }
+
+ /* This could fail if the data dictionaries are out of sync.
+ Proceed with the renaming anyway. */
+ ut_ad(!strcmp(from, s));
+
+ dict_mem_table_col_rename_low(table, nth_col, to, s);
+}
/**********************************************************************//**
This function populates a dict_col_t memory structure with
@@ -304,6 +457,8 @@ dict_mem_index_create(
dict_mem_fill_index_struct(index, heap, table_name, index_name,
space, type, n_fields);
+ os_fast_mutex_init(zip_pad_mutex_key, &index->zip_pad.mutex);
+
return(index);
}
@@ -436,5 +591,31 @@ dict_mem_index_free(
}
#endif /* UNIV_BLOB_DEBUG */
+ os_fast_mutex_free(&index->zip_pad.mutex);
+
mem_heap_free(index->heap);
}
+
+/*******************************************************************//**
+Create a temporary tablename.
+@return temporary tablename suitable for InnoDB use */
+UNIV_INTERN
+char*
+dict_mem_create_temporary_tablename(
+/*================================*/
+ mem_heap_t* heap, /*!< in: memory heap */
+ const char* dbtab, /*!< in: database/table name */
+ table_id_t id) /*!< in: InnoDB table id */
+{
+ const char* dbend = strchr(dbtab, '/');
+ ut_ad(dbend);
+ size_t dblen = dbend - dbtab + 1;
+ size_t size = tmp_file_prefix_length + 4 + 9 + 9 + dblen;
+
+ char* name = static_cast<char*>(mem_heap_alloc(heap, size));
+ memcpy(name, dbtab, dblen);
+ ut_snprintf(name + dblen, size - dblen,
+ tmp_file_prefix "-ib" UINT64PF, id);
+ return(name);
+}
+
diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc
index eebf6b1ec26..ed10525b07d 100644
--- a/storage/innobase/dict/dict0stats.cc
+++ b/storage/innobase/dict/dict0stats.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2009, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2009, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,27 +29,27 @@ Created Jan 06, 2010 Vasil Dimov
#include "btr0btr.h" /* btr_get_size() */
#include "btr0cur.h" /* btr_estimate_number_of_different_key_vals() */
-#include "dict0dict.h" /* dict_table_get_first_index() */
+#include "dict0dict.h" /* dict_table_get_first_index(), dict_fs2utf8() */
#include "dict0mem.h" /* DICT_TABLE_MAGIC_N */
#include "dict0stats.h"
#include "data0type.h" /* dtype_t */
-#include "db0err.h" /* db_err */
+#include "db0err.h" /* dberr_t */
#include "dyn0dyn.h" /* dyn_array* */
+#include "page0page.h" /* page_align() */
#include "pars0pars.h" /* pars_info_create() */
#include "pars0types.h" /* pars_info_t */
#include "que0que.h" /* que_eval_sql() */
#include "rem0cmp.h" /* REC_MAX_N_FIELDS,cmp_rec_rec_with_match() */
-#include "row0sel.h" /* sel_node_struct */
+#include "row0sel.h" /* sel_node_t */
#include "row0types.h" /* sel_node_t */
#include "trx0trx.h" /* trx_create() */
#include "trx0roll.h" /* trx_rollback_to_savepoint() */
#include "ut0rnd.h" /* ut_rnd_interval() */
-
-#include "ha_prototypes.h" /* innobase_strcasecmp() */
+#include "ut0ut.h" /* ut_format_name(), ut_time() */
/* Sampling algorithm description @{
-The algorithm is controlled by one number - srv_stats_persistent_sample_pages,
+The algorithm is controlled by one number - N_SAMPLE_PAGES(index),
let it be A, which is the number of leaf pages to analyze for a given index
for each n-prefix (if the index is on 3 columns, then 3*A leaf pages will be
analyzed).
@@ -124,126 +124,34 @@ where n=1..n_uniq.
#define DEBUG_PRINTF(fmt, ...) /* noop */
#endif /* UNIV_STATS_DEBUG */
-/* number of distinct records on a given level that are required to stop
-descending to lower levels and fetch
-srv_stats_persistent_sample_pages records from that level */
-#define N_DIFF_REQUIRED (srv_stats_persistent_sample_pages * 10)
+/* Gets the number of leaf pages to sample in persistent stats estimation */
+#define N_SAMPLE_PAGES(index) \
+ ((index)->table->stats_sample_pages != 0 ? \
+ (index)->table->stats_sample_pages : \
+ srv_stats_persistent_sample_pages)
-/** Open handles on the stats tables. Currently this is used to increase the
-reference count of the stats tables. */
-typedef struct dict_stats_struct {
- dict_table_t* table_stats; /*!< Handle to open TABLE_STATS_NAME */
- dict_table_t* index_stats; /*!< Handle to open INDEX_STATS_NAME */
-} dict_stats_t;
+/* number of distinct records on a given level that are required to stop
+descending to lower levels and fetch N_SAMPLE_PAGES(index) records
+from that level */
+#define N_DIFF_REQUIRED(index) (N_SAMPLE_PAGES(index) * 10)
/*********************************************************************//**
-Calculates new estimates for table and index statistics. This function
-is relatively quick and is used to calculate transient statistics that
-are not saved on disk.
-This was the only way to calculate statistics before the
-Persistent Statistics feature was introduced.
-dict_stats_update_transient() @{ */
-static
-void
-dict_stats_update_transient(
-/*========================*/
- dict_table_t* table) /*!< in/out: table */
+Checks whether an index should be ignored in stats manipulations:
+* stats fetch
+* stats recalc
+* stats save
+dict_stats_should_ignore_index() @{
+@return true if exists and all tables are ok */
+UNIV_INLINE
+bool
+dict_stats_should_ignore_index(
+/*===========================*/
+ const dict_index_t* index) /*!< in: index */
{
- dict_index_t* index;
- ulint sum_of_index_sizes = 0;
-
- /* Find out the sizes of the indexes and how many different values
- for the key they approximately have */
-
- index = dict_table_get_first_index(table);
-
- if (index == NULL) {
- /* Table definition is corrupt */
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: table %s has no indexes. "
- "Cannot calculate statistics.\n", table->name);
- return;
- }
-
- do {
-
- if (index->type & DICT_FTS) {
- index = dict_table_get_next_index(index);
- continue;
- }
-
- if (UNIV_LIKELY
- (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
- || (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
- && dict_index_is_clust(index)))) {
- mtr_t mtr;
- ulint size;
-
- mtr_start(&mtr);
- mtr_s_lock(dict_index_get_lock(index), &mtr);
-
- size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr);
-
- if (size != ULINT_UNDEFINED) {
- index->stat_index_size = size;
-
- size = btr_get_size(
- index, BTR_N_LEAF_PAGES, &mtr);
- }
-
- mtr_commit(&mtr);
-
- switch (size) {
- case ULINT_UNDEFINED:
- goto fake_statistics;
- case 0:
- /* The root node of the tree is a leaf */
- size = 1;
- }
-
- sum_of_index_sizes += index->stat_index_size;
-
- index->stat_n_leaf_pages = size;
-
- btr_estimate_number_of_different_key_vals(index);
- } else {
- /* If we have set a high innodb_force_recovery
- level, do not calculate statistics, as a badly
- corrupted index can cause a crash in it.
- Initialize some bogus index cardinality
- statistics, so that the data can be queried in
- various means, also via secondary indexes. */
- ulint i;
-
-fake_statistics:
- sum_of_index_sizes++;
- index->stat_index_size = index->stat_n_leaf_pages = 1;
-
- for (i = dict_index_get_n_unique(index); i; ) {
- index->stat_n_diff_key_vals[i--] = 1;
- }
-
- memset(index->stat_n_non_null_key_vals, 0,
- (1 + dict_index_get_n_unique(index))
- * sizeof(*index->stat_n_non_null_key_vals));
- }
-
- index = dict_table_get_next_index(index);
- } while (index);
-
- index = dict_table_get_first_index(table);
-
- table->stat_n_rows = index->stat_n_diff_key_vals[
- dict_index_get_n_unique(index)];
-
- table->stat_clustered_index_size = index->stat_index_size;
-
- table->stat_sum_of_other_index_sizes = sum_of_index_sizes
- - index->stat_index_size;
-
- table->stat_modified_counter = 0;
-
- table->stat_initialized = TRUE;
+ return((index->type & DICT_FTS)
+ || dict_index_is_corrupted(index)
+ || index->to_be_dropped
+ || *index->name == TEMP_INDEX_PREFIX);
}
/* @} */
@@ -251,21 +159,21 @@ fake_statistics:
Checks whether the persistent statistics storage exists and that all
tables have the proper structure.
dict_stats_persistent_storage_check() @{
-@return TRUE if exists and all tables are ok */
+@return true if exists and all tables are ok */
static
-ibool
+bool
dict_stats_persistent_storage_check(
/*================================*/
- ibool caller_has_dict_sys_mutex) /*!< in: TRUE if the caller
+ bool caller_has_dict_sys_mutex) /*!< in: true if the caller
owns dict_sys->mutex */
{
/* definition for the table TABLE_STATS_NAME */
dict_col_meta_t table_stats_columns[] = {
{"database_name", DATA_VARMYSQL,
- DATA_NOT_NULL, 192 /* NAME_LEN from mysql_com.h */},
+ DATA_NOT_NULL, 192},
{"table_name", DATA_VARMYSQL,
- DATA_NOT_NULL, 192 /* NAME_LEN from mysql_com.h */},
+ DATA_NOT_NULL, 192},
{"last_update", DATA_INT,
DATA_NOT_NULL | DATA_UNSIGNED, 4},
@@ -282,19 +190,21 @@ dict_stats_persistent_storage_check(
dict_table_schema_t table_stats_schema = {
TABLE_STATS_NAME,
UT_ARR_SIZE(table_stats_columns),
- table_stats_columns
+ table_stats_columns,
+ 0 /* n_foreign */,
+ 0 /* n_referenced */
};
/* definition for the table INDEX_STATS_NAME */
dict_col_meta_t index_stats_columns[] = {
{"database_name", DATA_VARMYSQL,
- DATA_NOT_NULL, 192 /* NAME_LEN from mysql_com.h */},
+ DATA_NOT_NULL, 192},
{"table_name", DATA_VARMYSQL,
- DATA_NOT_NULL, 192 /* NAME_LEN from mysql_com.h */},
+ DATA_NOT_NULL, 192},
{"index_name", DATA_VARMYSQL,
- DATA_NOT_NULL, 192 /* NAME_LEN from mysql_com.h */},
+ DATA_NOT_NULL, 192},
{"last_update", DATA_INT,
DATA_NOT_NULL | DATA_UNSIGNED, 4},
@@ -314,11 +224,13 @@ dict_stats_persistent_storage_check(
dict_table_schema_t index_stats_schema = {
INDEX_STATS_NAME,
UT_ARR_SIZE(index_stats_columns),
- index_stats_columns
+ index_stats_columns,
+ 0 /* n_foreign */,
+ 0 /* n_referenced */
};
char errstr[512];
- enum db_err ret;
+ dberr_t ret;
if (!caller_has_dict_sys_mutex) {
mutex_enter(&(dict_sys->mutex));
@@ -339,24 +251,660 @@ dict_stats_persistent_storage_check(
mutex_exit(&(dict_sys->mutex));
}
- if (ret != DB_SUCCESS && ret != DB_TABLE_NOT_FOUND) {
+ if (ret != DB_SUCCESS) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Error: %s\n", errstr);
+ return(false);
+ }
+ /* else */
+
+ return(true);
+}
+/* @} */
+
+/*********************************************************************//**
+Executes a given SQL statement using the InnoDB internal SQL parser
+in its own transaction and commits it.
+This function will free the pinfo object.
+@return DB_SUCCESS or error code */
+static
+dberr_t
+dict_stats_exec_sql(
+/*================*/
+ pars_info_t* pinfo, /*!< in/out: pinfo to pass to que_eval_sql()
+ must already have any literals bound to it */
+ const char* sql) /*!< in: SQL string to execute */
+{
+ trx_t* trx;
+ dberr_t err;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ if (!dict_stats_persistent_storage_check(true)) {
+ pars_info_free(pinfo);
+ return(DB_STATS_DO_NOT_EXIST);
+ }
+
+ trx = trx_allocate_for_background();
+ trx_start_if_not_started(trx);
+
+ err = que_eval_sql(pinfo, sql, FALSE, trx); /* pinfo is freed here */
+
+ if (err == DB_SUCCESS) {
+ trx_commit_for_mysql(trx);
+ } else {
+ trx->op_info = "rollback of internal trx on stats tables";
+ trx->dict_operation_lock_mode = RW_X_LATCH;
+ trx_rollback_to_savepoint(trx, NULL);
+ trx->dict_operation_lock_mode = 0;
+ trx->op_info = "";
+ ut_a(trx->error_state == DB_SUCCESS);
+ }
+
+ trx_free_for_background(trx);
+
+ return(err);
+}
+
+/*********************************************************************//**
+Duplicate a table object and its indexes.
+This function creates a dummy dict_table_t object and initializes the
+following table and index members:
+dict_table_t::id (copied)
+dict_table_t::heap (newly created)
+dict_table_t::name (copied)
+dict_table_t::corrupted (copied)
+dict_table_t::indexes<> (newly created)
+dict_table_t::magic_n
+for each entry in dict_table_t::indexes, the following are initialized:
+(indexes that have DICT_FTS set in index->type are skipped)
+dict_index_t::id (copied)
+dict_index_t::name (copied)
+dict_index_t::table_name (points to the copied table name)
+dict_index_t::table (points to the above semi-initialized object)
+dict_index_t::type (copied)
+dict_index_t::to_be_dropped (copied)
+dict_index_t::online_status (copied)
+dict_index_t::n_uniq (copied)
+dict_index_t::fields[] (newly created, only first n_uniq, only fields[i].name)
+dict_index_t::indexes<> (newly created)
+dict_index_t::stat_n_diff_key_vals[] (only allocated, left uninitialized)
+dict_index_t::stat_n_sample_sizes[] (only allocated, left uninitialized)
+dict_index_t::stat_n_non_null_key_vals[] (only allocated, left uninitialized)
+dict_index_t::magic_n
+The returned object should be freed with dict_stats_table_clone_free()
+when no longer needed.
+@return incomplete table object */
+static
+dict_table_t*
+dict_stats_table_clone_create(
+/*==========================*/
+ const dict_table_t* table) /*!< in: table whose stats to copy */
+{
+ size_t heap_size;
+ dict_index_t* index;
+
+ /* Estimate the size needed for the table and all of its indexes */
+
+ heap_size = 0;
+ heap_size += sizeof(dict_table_t);
+ heap_size += strlen(table->name) + 1;
+
+ for (index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ if (dict_stats_should_ignore_index(index)) {
+ continue;
+ }
+
+ ut_ad(!dict_index_is_univ(index));
+
+ ulint n_uniq = dict_index_get_n_unique(index);
+
+ heap_size += sizeof(dict_index_t);
+ heap_size += strlen(index->name) + 1;
+ heap_size += n_uniq * sizeof(index->fields[0]);
+ for (ulint i = 0; i < n_uniq; i++) {
+ heap_size += strlen(index->fields[i].name) + 1;
+ }
+ heap_size += n_uniq * sizeof(index->stat_n_diff_key_vals[0]);
+ heap_size += n_uniq * sizeof(index->stat_n_sample_sizes[0]);
+ heap_size += n_uniq * sizeof(index->stat_n_non_null_key_vals[0]);
+ }
+
+ /* Allocate the memory and copy the members */
+
+ mem_heap_t* heap;
+
+ heap = mem_heap_create(heap_size);
+
+ dict_table_t* t;
+
+ t = (dict_table_t*) mem_heap_alloc(heap, sizeof(*t));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->id, sizeof(table->id));
+ t->id = table->id;
+
+ t->heap = heap;
+
+ UNIV_MEM_ASSERT_RW_ABORT(table->name, strlen(table->name) + 1);
+ t->name = (char*) mem_heap_strdup(heap, table->name);
+
+ t->corrupted = table->corrupted;
+
+ UT_LIST_INIT(t->indexes);
+
+ for (index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ if (dict_stats_should_ignore_index(index)) {
+ continue;
+ }
+
+ ut_ad(!dict_index_is_univ(index));
+
+ dict_index_t* idx;
+
+ idx = (dict_index_t*) mem_heap_alloc(heap, sizeof(*idx));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&index->id, sizeof(index->id));
+ idx->id = index->id;
+
+ UNIV_MEM_ASSERT_RW_ABORT(index->name, strlen(index->name) + 1);
+ idx->name = (char*) mem_heap_strdup(heap, index->name);
+
+ idx->table_name = t->name;
+
+ idx->table = t;
+
+ idx->type = index->type;
+
+ idx->to_be_dropped = 0;
+
+ idx->online_status = ONLINE_INDEX_COMPLETE;
+
+ idx->n_uniq = index->n_uniq;
+
+ idx->fields = (dict_field_t*) mem_heap_alloc(
+ heap, idx->n_uniq * sizeof(idx->fields[0]));
+
+ for (ulint i = 0; i < idx->n_uniq; i++) {
+ UNIV_MEM_ASSERT_RW_ABORT(index->fields[i].name, strlen(index->fields[i].name) + 1);
+ idx->fields[i].name = (char*) mem_heap_strdup(
+ heap, index->fields[i].name);
+ }
+
+ /* hook idx into t->indexes */
+ UT_LIST_ADD_LAST(indexes, t->indexes, idx);
+
+ idx->stat_n_diff_key_vals = (ib_uint64_t*) mem_heap_alloc(
+ heap,
+ idx->n_uniq * sizeof(idx->stat_n_diff_key_vals[0]));
+
+ idx->stat_n_sample_sizes = (ib_uint64_t*) mem_heap_alloc(
+ heap,
+ idx->n_uniq * sizeof(idx->stat_n_sample_sizes[0]));
+
+ idx->stat_n_non_null_key_vals = (ib_uint64_t*) mem_heap_alloc(
+ heap,
+ idx->n_uniq * sizeof(idx->stat_n_non_null_key_vals[0]));
+ ut_d(idx->magic_n = DICT_INDEX_MAGIC_N);
+ }
+
+ ut_d(t->magic_n = DICT_TABLE_MAGIC_N);
+
+ return(t);
+}
+
+/*********************************************************************//**
+Free the resources occupied by an object returned by
+dict_stats_table_clone_create().
+dict_stats_table_clone_free() @{ */
+static
+void
+dict_stats_table_clone_free(
+/*========================*/
+ dict_table_t* t) /*!< in: dummy table object to free */
+{
+ mem_heap_free(t->heap);
+}
+/* @} */
+
+/*********************************************************************//**
+Write all zeros (or 1 where it makes sense) into an index
+statistics members. The resulting stats correspond to an empty index.
+The caller must own index's table stats latch in X mode
+(dict_table_stats_lock(table, RW_X_LATCH))
+dict_stats_empty_index() @{ */
+static
+void
+dict_stats_empty_index(
+/*===================*/
+ dict_index_t* index) /*!< in/out: index */
+{
+ ut_ad(!(index->type & DICT_FTS));
+ ut_ad(!dict_index_is_univ(index));
+
+ ulint n_uniq = index->n_uniq;
+
+ for (ulint i = 0; i < n_uniq; i++) {
+ index->stat_n_diff_key_vals[i] = 0;
+ index->stat_n_sample_sizes[i] = 1;
+ index->stat_n_non_null_key_vals[i] = 0;
+ }
+
+ index->stat_index_size = 1;
+ index->stat_n_leaf_pages = 1;
+}
+/* @} */
+
+/*********************************************************************//**
+Write all zeros (or 1 where it makes sense) into a table and its indexes'
+statistics members. The resulting stats correspond to an empty table.
+dict_stats_empty_table() @{ */
+static
+void
+dict_stats_empty_table(
+/*===================*/
+ dict_table_t* table) /*!< in/out: table */
+{
+ /* Zero the stats members */
+
+ dict_table_stats_lock(table, RW_X_LATCH);
+
+ table->stat_n_rows = 0;
+ table->stat_clustered_index_size = 1;
+ /* 1 page for each index, not counting the clustered */
+ table->stat_sum_of_other_index_sizes
+ = UT_LIST_GET_LEN(table->indexes) - 1;
+ table->stat_modified_counter = 0;
+
+ dict_index_t* index;
+
+ for (index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ if (index->type & DICT_FTS) {
+ continue;
+ }
+
+ ut_ad(!dict_index_is_univ(index));
+
+ dict_stats_empty_index(index);
+ }
+
+ table->stat_initialized = TRUE;
+
+ dict_table_stats_unlock(table, RW_X_LATCH);
+}
+/* @} */
+
+/*********************************************************************//**
+Check whether index's stats are initialized (assert if they are not). */
+static
+void
+dict_stats_assert_initialized_index(
+/*================================*/
+ const dict_index_t* index) /*!< in: index */
+{
+ UNIV_MEM_ASSERT_RW_ABORT(
+ index->stat_n_diff_key_vals,
+ index->n_uniq * sizeof(index->stat_n_diff_key_vals[0]));
+
+ UNIV_MEM_ASSERT_RW_ABORT(
+ index->stat_n_sample_sizes,
+ index->n_uniq * sizeof(index->stat_n_sample_sizes[0]));
+
+ UNIV_MEM_ASSERT_RW_ABORT(
+ index->stat_n_non_null_key_vals,
+ index->n_uniq * sizeof(index->stat_n_non_null_key_vals[0]));
+
+ UNIV_MEM_ASSERT_RW_ABORT(
+ &index->stat_index_size,
+ sizeof(index->stat_index_size));
+
+ UNIV_MEM_ASSERT_RW_ABORT(
+ &index->stat_n_leaf_pages,
+ sizeof(index->stat_n_leaf_pages));
+}
+/*********************************************************************//**
+Check whether table's stats are initialized (assert if they are not). */
+static
+void
+dict_stats_assert_initialized(
+/*==========================*/
+ const dict_table_t* table) /*!< in: table */
+{
+ ut_a(table->stat_initialized);
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stats_last_recalc,
+ sizeof(table->stats_last_recalc));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stat_persistent,
+ sizeof(table->stat_persistent));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stats_auto_recalc,
+ sizeof(table->stats_auto_recalc));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stats_sample_pages,
+ sizeof(table->stats_sample_pages));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stat_n_rows,
+ sizeof(table->stat_n_rows));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stat_clustered_index_size,
+ sizeof(table->stat_clustered_index_size));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stat_sum_of_other_index_sizes,
+ sizeof(table->stat_sum_of_other_index_sizes));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stat_modified_counter,
+ sizeof(table->stat_modified_counter));
+
+ UNIV_MEM_ASSERT_RW_ABORT(&table->stats_bg_flag,
+ sizeof(table->stats_bg_flag));
+
+ for (dict_index_t* index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ if (!dict_stats_should_ignore_index(index)) {
+ dict_stats_assert_initialized_index(index);
+ }
+ }
+}
+
+#define INDEX_EQ(i1, i2) \
+ ((i1) != NULL \
+ && (i2) != NULL \
+ && (i1)->id == (i2)->id \
+ && strcmp((i1)->name, (i2)->name) == 0)
+/*********************************************************************//**
+Copy table and index statistics from one table to another, including index
+stats. Extra indexes in src are ignored and extra indexes in dst are
+initialized to correspond to an empty index. */
+static
+void
+dict_stats_copy(
+/*============*/
+ dict_table_t* dst, /*!< in/out: destination table */
+ const dict_table_t* src) /*!< in: source table */
+{
+ dst->stats_last_recalc = src->stats_last_recalc;
+ dst->stat_n_rows = src->stat_n_rows;
+ dst->stat_clustered_index_size = src->stat_clustered_index_size;
+ dst->stat_sum_of_other_index_sizes = src->stat_sum_of_other_index_sizes;
+ dst->stat_modified_counter = src->stat_modified_counter;
+
+ dict_index_t* dst_idx;
+ dict_index_t* src_idx;
+
+ for (dst_idx = dict_table_get_first_index(dst),
+ src_idx = dict_table_get_first_index(src);
+ dst_idx != NULL;
+ dst_idx = dict_table_get_next_index(dst_idx),
+ (src_idx != NULL
+ && (src_idx = dict_table_get_next_index(src_idx)))) {
+
+ if (dict_stats_should_ignore_index(dst_idx)) {
+ continue;
+ }
+
+ ut_ad(!dict_index_is_univ(dst_idx));
+
+ if (!INDEX_EQ(src_idx, dst_idx)) {
+ for (src_idx = dict_table_get_first_index(src);
+ src_idx != NULL;
+ src_idx = dict_table_get_next_index(src_idx)) {
+
+ if (INDEX_EQ(src_idx, dst_idx)) {
+ break;
+ }
+ }
+ }
+
+ if (!INDEX_EQ(src_idx, dst_idx)) {
+ dict_stats_empty_index(dst_idx);
+ continue;
+ }
+
+ ulint n_copy_el;
+
+ if (dst_idx->n_uniq > src_idx->n_uniq) {
+ n_copy_el = src_idx->n_uniq;
+ /* Since src is smaller some elements in dst
+ will remain untouched by the following memmove(),
+ thus we init all of them here. */
+ dict_stats_empty_index(dst_idx);
+ } else {
+ n_copy_el = dst_idx->n_uniq;
+ }
+
+ memmove(dst_idx->stat_n_diff_key_vals,
+ src_idx->stat_n_diff_key_vals,
+ n_copy_el * sizeof(dst_idx->stat_n_diff_key_vals[0]));
+
+ memmove(dst_idx->stat_n_sample_sizes,
+ src_idx->stat_n_sample_sizes,
+ n_copy_el * sizeof(dst_idx->stat_n_sample_sizes[0]));
+
+ memmove(dst_idx->stat_n_non_null_key_vals,
+ src_idx->stat_n_non_null_key_vals,
+ n_copy_el * sizeof(dst_idx->stat_n_non_null_key_vals[0]));
+
+ dst_idx->stat_index_size = src_idx->stat_index_size;
+
+ dst_idx->stat_n_leaf_pages = src_idx->stat_n_leaf_pages;
+ }
+
+ dst->stat_initialized = TRUE;
+}
+
+/*********************************************************************//**
+Duplicate the stats of a table and its indexes.
+This function creates a dummy dict_table_t object and copies the input
+table's stats into it. The returned table object is not in the dictionary
+cache and cannot be accessed by any other threads. In addition to the
+members copied in dict_stats_table_clone_create() this function initializes
+the following:
+dict_table_t::stat_initialized
+dict_table_t::stat_persistent
+dict_table_t::stat_n_rows
+dict_table_t::stat_clustered_index_size
+dict_table_t::stat_sum_of_other_index_sizes
+dict_table_t::stat_modified_counter
+dict_index_t::stat_n_diff_key_vals[]
+dict_index_t::stat_n_sample_sizes[]
+dict_index_t::stat_n_non_null_key_vals[]
+dict_index_t::stat_index_size
+dict_index_t::stat_n_leaf_pages
+The returned object should be freed with dict_stats_snapshot_free()
+when no longer needed.
+@return incomplete table object */
+static
+dict_table_t*
+dict_stats_snapshot_create(
+/*=======================*/
+ const dict_table_t* table) /*!< in: table whose stats to copy */
+{
+ mutex_enter(&dict_sys->mutex);
+
+ dict_table_stats_lock(table, RW_S_LATCH);
+
+ dict_stats_assert_initialized(table);
+
+ dict_table_t* t;
+
+ t = dict_stats_table_clone_create(table);
+
+ dict_stats_copy(t, table);
+
+ t->stat_persistent = table->stat_persistent;
+ t->stats_auto_recalc = table->stats_auto_recalc;
+ t->stats_sample_pages = table->stats_sample_pages;
+ t->stats_bg_flag = table->stats_bg_flag;
+
+ dict_table_stats_unlock(table, RW_S_LATCH);
+
+ mutex_exit(&dict_sys->mutex);
+
+ return(t);
+}
+
+/*********************************************************************//**
+Free the resources occupied by an object returned by
+dict_stats_snapshot_create().
+dict_stats_snapshot_free() @{ */
+static
+void
+dict_stats_snapshot_free(
+/*=====================*/
+ dict_table_t* t) /*!< in: dummy table object to free */
+{
+ dict_stats_table_clone_free(t);
+}
+/* @} */
+
+/*********************************************************************//**
+Calculates new estimates for index statistics. This function is
+relatively quick and is used to calculate transient statistics that
+are not saved on disk. This was the only way to calculate statistics
+before the Persistent Statistics feature was introduced.
+dict_stats_update_transient_for_index() @{ */
+static
+void
+dict_stats_update_transient_for_index(
+/*==================================*/
+ dict_index_t* index) /*!< in/out: index */
+{
+ if (UNIV_LIKELY
+ (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE
+ || (srv_force_recovery < SRV_FORCE_NO_LOG_REDO
+ && dict_index_is_clust(index)))) {
+ mtr_t mtr;
+ ulint size;
+ mtr_start(&mtr);
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+
+ size = btr_get_size(index, BTR_TOTAL_SIZE, &mtr);
+
+ if (size != ULINT_UNDEFINED) {
+ index->stat_index_size = size;
+
+ size = btr_get_size(
+ index, BTR_N_LEAF_PAGES, &mtr);
+ }
+
+ mtr_commit(&mtr);
+
+ switch (size) {
+ case ULINT_UNDEFINED:
+ dict_stats_empty_index(index);
+ return;
+ case 0:
+ /* The root node of the tree is a leaf */
+ size = 1;
+ }
+
+ index->stat_n_leaf_pages = size;
+
+ btr_estimate_number_of_different_key_vals(index);
+ } else {
+ /* If we have set a high innodb_force_recovery
+ level, do not calculate statistics, as a badly
+ corrupted index can cause a crash in it.
+ Initialize some bogus index cardinality
+ statistics, so that the data can be queried in
+ various means, also via secondary indexes. */
+ dict_stats_empty_index(index);
+ }
+}
+/* @} */
+
+/*********************************************************************//**
+Calculates new estimates for table and index statistics. This function
+is relatively quick and is used to calculate transient statistics that
+are not saved on disk.
+This was the only way to calculate statistics before the
+Persistent Statistics feature was introduced.
+dict_stats_update_transient() @{ */
+UNIV_INTERN
+void
+dict_stats_update_transient(
+/*========================*/
+ dict_table_t* table) /*!< in/out: table */
+{
+ dict_index_t* index;
+ ulint sum_of_index_sizes = 0;
+
+ /* Find out the sizes of the indexes and how many different values
+ for the key they approximately have */
+
+ index = dict_table_get_first_index(table);
+
+ if (dict_table_is_discarded(table)) {
+ /* Nothing to do. */
+ dict_stats_empty_table(table);
+ return;
+ } else if (index == NULL) {
+ /* Table definition is corrupt */
+
+ char buf[MAX_FULL_NAME_LEN];
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: %s\n", errstr);
+ fprintf(stderr, " InnoDB: table %s has no indexes. "
+ "Cannot calculate statistics.\n",
+ ut_format_name(table->name, TRUE, buf, sizeof(buf)));
+ dict_stats_empty_table(table);
+ return;
+ }
+
+ for (; index != NULL; index = dict_table_get_next_index(index)) {
+
+ ut_ad(!dict_index_is_univ(index));
+
+ if (index->type & DICT_FTS) {
+ continue;
+ }
+
+ dict_stats_empty_index(index);
+
+ if (dict_stats_should_ignore_index(index)) {
+ continue;
+ }
+
+ dict_stats_update_transient_for_index(index);
+
+ sum_of_index_sizes += index->stat_index_size;
}
- /* We return silently if some of the tables are not present because
- this code is executed during open table. By design we check if the
- persistent statistics storage is present and whether there are stats
- for the table being opened and if so, then we use them, otherwise we
- silently switch back to using the transient stats. */
- return(ret == DB_SUCCESS);
+ index = dict_table_get_first_index(table);
+
+ table->stat_n_rows = index->stat_n_diff_key_vals[
+ dict_index_get_n_unique(index) - 1];
+
+ table->stat_clustered_index_size = index->stat_index_size;
+
+ table->stat_sum_of_other_index_sizes = sum_of_index_sizes
+ - index->stat_index_size;
+
+ table->stats_last_recalc = ut_time();
+
+ table->stat_modified_counter = 0;
+
+ table->stat_initialized = TRUE;
}
/* @} */
/* @{ Pseudo code about the relation between the following functions
-let N = srv_stats_persistent_sample_pages
+let N = N_SAMPLE_PAGES(index)
dict_stats_analyze_index()
for each n_prefix
@@ -375,14 +923,11 @@ dict_stats_analyze_index()
/*********************************************************************//**
Find the total number and the number of distinct keys on a given level in
an index. Each of the 1..n_uniq prefixes are looked up and the results are
-saved in the array n_diff[]. Notice that n_diff[] must be able to store
-n_uniq+1 numbers because the results are saved in
-n_diff[1] .. n_diff[n_uniq]. The total number of records on the level is
-saved in total_recs.
+saved in the array n_diff[0] .. n_diff[n_uniq - 1]. The total number of
+records on the level is saved in total_recs.
Also, the index of the last record in each group of equal records is saved
-in n_diff_boundaries[1..n_uniq], records indexing starts from the leftmost
-record on the level and continues cross pages boundaries, counting from 0.
-dict_stats_analyze_index_level() @{ */
+in n_diff_boundaries[0..n_uniq - 1], records indexing starts from the leftmost
+record on the level and continues cross pages boundaries, counting from 0. */
static
void
dict_stats_analyze_index_level(
@@ -393,78 +938,87 @@ dict_stats_analyze_index_level(
distinct keys for all prefixes */
ib_uint64_t* total_recs, /*!< out: total number of records */
ib_uint64_t* total_pages, /*!< out: total number of pages */
- dyn_array_t* n_diff_boundaries)/*!< out: boundaries of the groups
+ dyn_array_t* n_diff_boundaries,/*!< out: boundaries of the groups
of distinct keys */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint n_uniq;
mem_heap_t* heap;
- dtuple_t* dtuple;
btr_pcur_t pcur;
- mtr_t mtr;
const page_t* page;
const rec_t* rec;
const rec_t* prev_rec;
+ bool prev_rec_is_copied;
byte* prev_rec_buf = NULL;
ulint prev_rec_buf_size = 0;
+ ulint* rec_offsets;
+ ulint* prev_rec_offsets;
ulint i;
DEBUG_PRINTF(" %s(table=%s, index=%s, level=%lu)\n", __func__,
index->table->name, index->name, level);
+ ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
+ MTR_MEMO_S_LOCK));
+
n_uniq = dict_index_get_n_unique(index);
- /* elements in the n_diff array are 1..n_uniq (inclusive) */
- memset(n_diff, 0x0, (n_uniq + 1) * sizeof(*n_diff));
+ /* elements in the n_diff array are 0..n_uniq-1 (inclusive) */
+ memset(n_diff, 0x0, n_uniq * sizeof(n_diff[0]));
+
+ /* Allocate space for the offsets header (the allocation size at
+ offsets[0] and the REC_OFFS_HEADER_SIZE bytes), and n_fields + 1,
+ so that this will never be less than the size calculated in
+ rec_get_offsets_func(). */
+ i = (REC_OFFS_HEADER_SIZE + 1 + 1) + index->n_fields;
- heap = mem_heap_create(256);
+ heap = mem_heap_create((2 * sizeof *rec_offsets) * i);
+ rec_offsets = static_cast<ulint*>(
+ mem_heap_alloc(heap, i * sizeof *rec_offsets));
+ prev_rec_offsets = static_cast<ulint*>(
+ mem_heap_alloc(heap, i * sizeof *prev_rec_offsets));
+ rec_offs_set_n_alloc(rec_offsets, i);
+ rec_offs_set_n_alloc(prev_rec_offsets, i);
- /* reset the dynamic arrays n_diff_boundaries[1..n_uniq];
- n_diff_boundaries[0] is ignored to follow the same convention
- as n_diff[] */
+ /* reset the dynamic arrays n_diff_boundaries[0..n_uniq-1] */
if (n_diff_boundaries != NULL) {
- for (i = 1; i <= n_uniq; i++) {
+ for (i = 0; i < n_uniq; i++) {
dyn_array_free(&n_diff_boundaries[i]);
dyn_array_create(&n_diff_boundaries[i]);
}
}
- /* craft a record that is always smaller than the others,
- this way we are sure that the cursor pcur will be positioned
- on the leftmost record on the leftmost page on the desired level */
- dtuple = dtuple_create(heap, dict_index_get_n_unique(index));
- dict_table_copy_types(dtuple, index->table);
- dtuple_set_info_bits(dtuple, REC_INFO_MIN_REC_FLAG);
-
- mtr_start(&mtr);
+ /* Position pcur on the leftmost record on the leftmost page
+ on the desired level. */
- btr_pcur_open_low(index, level, dtuple, PAGE_CUR_LE, BTR_SEARCH_LEAF,
- &pcur, __FILE__, __LINE__, &mtr);
+ btr_pcur_open_at_index_side(
+ true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED,
+ &pcur, true, level, mtr);
+ btr_pcur_move_to_next_on_page(&pcur);
page = btr_pcur_get_page(&pcur);
+ /* The page must not be empty, except when
+ it is the root page (and the whole index is empty). */
+ ut_ad(btr_pcur_is_on_user_rec(&pcur) || page_is_leaf(page));
+ ut_ad(btr_pcur_get_rec(&pcur)
+ == page_rec_get_next_const(page_get_infimum_rec(page)));
+
/* check that we are indeed on the desired level */
- ut_a(btr_page_get_level(page, &mtr) == level);
+ ut_a(btr_page_get_level(page, mtr) == level);
/* there should not be any pages on the left */
- ut_a(btr_page_get_prev(page, &mtr) == FIL_NULL);
+ ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);
/* check whether the first record on the leftmost page is marked
as such, if we are on a non-leaf level */
- ut_a(level == 0
- || (REC_INFO_MIN_REC_FLAG & rec_get_info_bits(
- page_rec_get_next_const(page_get_infimum_rec(page)),
- page_is_comp(page))));
-
- if (btr_pcur_is_before_first_on_page(&pcur)) {
- btr_pcur_move_to_next_on_page(&pcur);
- }
-
- if (btr_pcur_is_after_last_on_page(&pcur)) {
- btr_pcur_move_to_prev_on_page(&pcur);
- }
+ ut_a((level == 0)
+ == !(REC_INFO_MIN_REC_FLAG & rec_get_info_bits(
+ btr_pcur_get_rec(&pcur), page_is_comp(page))));
prev_rec = NULL;
+ prev_rec_is_copied = false;
/* no records by default */
*total_recs = 0;
@@ -476,56 +1030,83 @@ dict_stats_analyze_index_level(
X and the fist on page X+1 */
for (;
btr_pcur_is_on_user_rec(&pcur);
- btr_pcur_move_to_next_user_rec(&pcur, &mtr)) {
+ btr_pcur_move_to_next_user_rec(&pcur, mtr)) {
ulint matched_fields = 0;
ulint matched_bytes = 0;
- ulint offsets_rec_onstack[REC_OFFS_NORMAL_SIZE];
- ulint* offsets_rec;
-
- rec_offs_init(offsets_rec_onstack);
+ bool rec_is_last_on_page;
rec = btr_pcur_get_rec(&pcur);
+ /* If rec and prev_rec are on different pages, then prev_rec
+ must have been copied, because we hold latch only on the page
+ where rec resides. */
+ if (prev_rec != NULL
+ && page_align(rec) != page_align(prev_rec)) {
+
+ ut_a(prev_rec_is_copied);
+ }
+
+ rec_is_last_on_page =
+ page_rec_is_supremum(page_rec_get_next_const(rec));
+
/* increment the pages counter at the end of each page */
- if (page_rec_is_supremum(page_rec_get_next_const(rec))) {
+ if (rec_is_last_on_page) {
(*total_pages)++;
}
- /* skip delete-marked records */
- if (rec_get_deleted_flag(rec, page_is_comp(
- btr_pcur_get_page(&pcur)))) {
+ /* Skip delete-marked records on the leaf level. If we
+ do not skip them, then ANALYZE quickly after DELETE
+ could count them or not (purge may have already wiped
+ them away) which brings non-determinism. We skip only
+ leaf-level delete marks because delete marks on
+ non-leaf level do not make sense. */
+ if (level == 0 &&
+ rec_get_deleted_flag(
+ rec,
+ page_is_comp(btr_pcur_get_page(&pcur)))) {
+
+ if (rec_is_last_on_page
+ && !prev_rec_is_copied
+ && prev_rec != NULL) {
+ /* copy prev_rec */
+
+ prev_rec_offsets = rec_get_offsets(
+ prev_rec, index, prev_rec_offsets,
+ n_uniq, &heap);
+
+ prev_rec = rec_copy_prefix_to_buf(
+ prev_rec, index,
+ rec_offs_n_fields(prev_rec_offsets),
+ &prev_rec_buf, &prev_rec_buf_size);
+
+ prev_rec_is_copied = true;
+ }
continue;
}
- offsets_rec = rec_get_offsets(rec, index, offsets_rec_onstack,
- n_uniq, &heap);
+ rec_offsets = rec_get_offsets(
+ rec, index, rec_offsets, n_uniq, &heap);
(*total_recs)++;
if (prev_rec != NULL) {
-
- ulint offsets_prev_rec_onstack[REC_OFFS_NORMAL_SIZE];
- ulint* offsets_prev_rec;
-
- rec_offs_init(offsets_prev_rec_onstack);
-
- offsets_prev_rec = rec_get_offsets(
- prev_rec, index, offsets_prev_rec_onstack,
+ prev_rec_offsets = rec_get_offsets(
+ prev_rec, index, prev_rec_offsets,
n_uniq, &heap);
cmp_rec_rec_with_match(rec,
prev_rec,
- offsets_rec,
- offsets_prev_rec,
+ rec_offsets,
+ prev_rec_offsets,
index,
FALSE,
&matched_fields,
&matched_bytes);
- for (i = matched_fields + 1; i <= n_uniq; i++) {
+ for (i = matched_fields; i < n_uniq; i++) {
if (n_diff_boundaries != NULL) {
/* push the index of the previous
@@ -553,17 +1134,18 @@ dict_stats_analyze_index_level(
}
/* increment the number of different keys
- for n_prefix=i */
+ for n_prefix=i+1 (e.g. if i=0 then we increment
+ for n_prefix=1 which is stored in n_diff[0]) */
n_diff[i]++;
}
} else {
/* this is the first non-delete marked record */
- for (i = 1; i <= n_uniq; i++) {
+ for (i = 0; i < n_uniq; i++) {
n_diff[i] = 1;
}
}
- if (page_rec_is_supremum(page_rec_get_next_const(rec))) {
+ if (rec_is_last_on_page) {
/* end of a page has been reached */
/* we need to copy the record instead of assigning
@@ -574,8 +1156,9 @@ dict_stats_analyze_index_level(
btr_pcur_move_to_next_user_rec() will release the
latch on the page that prev_rec is on */
prev_rec = rec_copy_prefix_to_buf(
- rec, index, rec_offs_n_fields(offsets_rec),
+ rec, index, rec_offs_n_fields(rec_offsets),
&prev_rec_buf, &prev_rec_buf_size);
+ prev_rec_is_copied = true;
} else {
/* still on the same page, the next call to
@@ -584,12 +1167,14 @@ dict_stats_analyze_index_level(
instead of copying the records like above */
prev_rec = rec;
+ prev_rec_is_copied = false;
}
}
/* if *total_pages is left untouched then the above loop was not
entered at all and there is one page in the whole tree which is
- empty */
+ empty or the loop was entered but this is level 0, contains one page
+ and all records are delete-marked */
if (*total_pages == 0) {
ut_ad(level == 0);
@@ -605,7 +1190,7 @@ dict_stats_analyze_index_level(
/* remember the index of the last record on the level as the
last one from the last group of equal keys; this holds for
all possible prefixes */
- for (i = 1; i <= n_uniq; i++) {
+ for (i = 0; i < n_uniq; i++) {
void* p;
ib_uint64_t idx;
@@ -619,10 +1204,10 @@ dict_stats_analyze_index_level(
}
/* now in n_diff_boundaries[i] there are exactly n_diff[i] integers,
- for i=1..n_uniq */
+ for i=0..n_uniq-1 */
#ifdef UNIV_STATS_DEBUG
- for (i = 1; i <= n_uniq; i++) {
+ for (i = 0; i < n_uniq; i++) {
DEBUG_PRINTF(" %s(): total recs: " UINT64PF
", total pages: " UINT64PF
@@ -654,9 +1239,11 @@ dict_stats_analyze_index_level(
}
#endif /* UNIV_STATS_DEBUG */
- btr_pcur_close(&pcur);
+ /* Release the latch on the last page, because that is not done by
+ btr_pcur_close(). This function works also for non-leaf pages. */
+ btr_leaf_page_release(btr_pcur_get_block(&pcur), BTR_SEARCH_LEAF, mtr);
- mtr_commit(&mtr);
+ btr_pcur_close(&pcur);
if (prev_rec_buf != NULL) {
@@ -665,15 +1252,16 @@ dict_stats_analyze_index_level(
mem_heap_free(heap);
}
-/* @} */
/* aux enum for controlling the behavior of dict_stats_scan_page() @{ */
-typedef enum page_scan_method_enum {
- COUNT_ALL_NON_BORING, /* scan all records on the given page
- and count the number of distinct ones */
+enum page_scan_method_t {
+ COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED,/* scan all records on
+ the given page and count the number of
+ distinct ones, also ignore delete marked
+ records */
QUIT_ON_FIRST_NON_BORING/* quit when the first record that differs
from its right neighbor is found */
-} page_scan_method_t;
+};
/* @} */
/*********************************************************************//**
@@ -715,11 +1303,18 @@ dict_stats_scan_page(
Because offsets1,offsets2 should be big enough,
this memory heap should never be used. */
mem_heap_t* heap = NULL;
+ const rec_t* (*get_next)(const rec_t*);
- rec = page_rec_get_next_const(page_get_infimum_rec(page));
+ if (scan_method == COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED) {
+ get_next = page_rec_get_next_non_del_marked;
+ } else {
+ get_next = page_rec_get_next_const;
+ }
+
+ rec = get_next(page_get_infimum_rec(page));
if (page_rec_is_supremum(rec)) {
- /* the page is empty */
+ /* the page is empty or contains only delete-marked records */
*n_diff = 0;
*out_rec = NULL;
return(NULL);
@@ -728,7 +1323,7 @@ dict_stats_scan_page(
offsets_rec = rec_get_offsets(rec, index, offsets_rec,
ULINT_UNDEFINED, &heap);
- next_rec = page_rec_get_next_const(rec);
+ next_rec = get_next(rec);
*n_diff = 1;
@@ -777,7 +1372,8 @@ dict_stats_scan_page(
offsets_rec = offsets_next_rec;
offsets_next_rec = offsets_tmp;
}
- next_rec = page_rec_get_next_const(next_rec);
+
+ next_rec = get_next(next_rec);
}
func_exit:
@@ -814,7 +1410,6 @@ dict_stats_analyze_index_below_cur(
ulint* offsets1;
ulint* offsets2;
ulint* offsets_rec;
- ulint root_height;
ib_uint64_t n_diff; /* the result */
ulint size;
@@ -841,8 +1436,6 @@ dict_stats_analyze_index_below_cur(
rec_offs_set_n_alloc(offsets1, size);
rec_offs_set_n_alloc(offsets2, size);
- root_height = btr_page_get_level(btr_root_get(index, mtr), mtr);
-
space = dict_index_get_space(index);
zip_size = dict_table_zip_size(index->table);
@@ -907,14 +1500,7 @@ dict_stats_analyze_index_below_cur(
offsets_rec = dict_stats_scan_page(
&rec, offsets1, offsets2, index, page, n_prefix,
- COUNT_ALL_NON_BORING, &n_diff);
-
- if (root_height > 0) {
-
- /* empty pages are allowed only if the whole B-tree is empty
- and contains a single empty page */
- ut_a(offsets_rec != NULL);
- }
+ COUNT_ALL_NON_BORING_AND_SKIP_DEL_MARKED, &n_diff);
#if 0
DEBUG_PRINTF(" %s(): n_diff below page_no=%lu: " UINT64PF "\n",
@@ -928,42 +1514,40 @@ dict_stats_analyze_index_below_cur(
/* @} */
/*********************************************************************//**
-For a given level in an index select srv_stats_persistent_sample_pages
+For a given level in an index select N_SAMPLE_PAGES(index)
(or less) records from that level and dive below them to the corresponding
leaf pages, then scan those leaf pages and save the sampling results in
-index->stat_n_diff_key_vals[n_prefix] and the number of pages scanned in
-index->stat_n_sample_sizes[n_prefix].
-dict_stats_analyze_index_for_n_prefix() @{ */
+index->stat_n_diff_key_vals[n_prefix - 1] and the number of pages scanned in
+index->stat_n_sample_sizes[n_prefix - 1]. */
static
void
dict_stats_analyze_index_for_n_prefix(
/*==================================*/
- dict_index_t* index, /*!< in/out: index */
- ulint level, /*!< in: level,
- must be >= 1 */
- ib_uint64_t total_recs_on_level, /*!< in: total number of
- records on the given level */
- ulint n_prefix, /*!< in: look at first
- n_prefix columns when
- comparing records */
- ib_uint64_t n_diff_for_this_prefix, /*!< in: number of distinct
- records on the given level,
- when looking at the first
- n_prefix columns */
- dyn_array_t* boundaries) /*!< in: array that contains
- n_diff_for_this_prefix
- integers each of which
- represents the index (on the
- level, counting from
- left/smallest to right/biggest
- from 0) of the last record
- from each group of distinct
- keys */
+ dict_index_t* index, /*!< in/out: index */
+ ulint level, /*!< in: level, must be >= 1 */
+ ib_uint64_t total_recs_on_level,
+ /*!< in: total number of
+ records on the given level */
+ ulint n_prefix, /*!< in: look at first
+ n_prefix columns when
+ comparing records */
+ ib_uint64_t n_diff_for_this_prefix,
+ /*!< in: number of distinct
+ records on the given level,
+ when looking at the first
+ n_prefix columns */
+ dyn_array_t* boundaries, /*!< in: array that contains
+ n_diff_for_this_prefix
+ integers each of which
+ represents the index (on the
+ level, counting from
+ left/smallest to right/biggest
+ from 0) of the last record
+ from each group of distinct
+ keys */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
{
- mem_heap_t* heap;
- dtuple_t* dtuple;
btr_pcur_t pcur;
- mtr_t mtr;
const page_t* page;
ib_uint64_t rec_idx;
ib_uint64_t last_idx_on_level;
@@ -978,51 +1562,45 @@ dict_stats_analyze_index_for_n_prefix(
n_prefix, n_diff_for_this_prefix);
#endif
+ ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
+ MTR_MEMO_S_LOCK));
+
/* if some of those is 0 then this means that there is exactly one
page in the B-tree and it is empty and we should have done full scan
and should not be here */
ut_ad(total_recs_on_level > 0);
ut_ad(n_diff_for_this_prefix > 0);
- /* this is configured to be min 1, someone has changed the code */
- ut_ad(srv_stats_persistent_sample_pages > 0);
+ /* this must be at least 1 */
+ ut_ad(N_SAMPLE_PAGES(index) > 0);
- heap = mem_heap_create(256);
+ /* Position pcur on the leftmost record on the leftmost page
+ on the desired level. */
- /* craft a record that is always smaller than the others,
- this way we are sure that the cursor pcur will be positioned
- on the leftmost record on the leftmost page on the desired level */
- dtuple = dtuple_create(heap, dict_index_get_n_unique(index));
- dict_table_copy_types(dtuple, index->table);
- dtuple_set_info_bits(dtuple, REC_INFO_MIN_REC_FLAG);
-
- mtr_start(&mtr);
-
- btr_pcur_open_low(index, level, dtuple, PAGE_CUR_LE, BTR_SEARCH_LEAF,
- &pcur, __FILE__, __LINE__, &mtr);
+ btr_pcur_open_at_index_side(
+ true, index, BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED,
+ &pcur, true, level, mtr);
+ btr_pcur_move_to_next_on_page(&pcur);
page = btr_pcur_get_page(&pcur);
+ /* The page must not be empty, except when
+ it is the root page (and the whole index is empty). */
+ ut_ad(btr_pcur_is_on_user_rec(&pcur) || page_is_leaf(page));
+ ut_ad(btr_pcur_get_rec(&pcur)
+ == page_rec_get_next_const(page_get_infimum_rec(page)));
+
/* check that we are indeed on the desired level */
- ut_a(btr_page_get_level(page, &mtr) == level);
+ ut_a(btr_page_get_level(page, mtr) == level);
/* there should not be any pages on the left */
- ut_a(btr_page_get_prev(page, &mtr) == FIL_NULL);
+ ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);
/* check whether the first record on the leftmost page is marked
as such, if we are on a non-leaf level */
- ut_a(level == 0 || REC_INFO_MIN_REC_FLAG
- & rec_get_info_bits(page_rec_get_next_const(
- page_get_infimum_rec(page)),
- page_is_comp(page)));
-
- if (btr_pcur_is_before_first_on_page(&pcur)) {
- btr_pcur_move_to_next_on_page(&pcur);
- }
-
- if (btr_pcur_is_after_last_on_page(&pcur)) {
- btr_pcur_move_to_prev_on_page(&pcur);
- }
+ ut_a((level == 0)
+ == !(REC_INFO_MIN_REC_FLAG & rec_get_info_bits(
+ btr_pcur_get_rec(&pcur), page_is_comp(page))));
last_idx_on_level = *(ib_uint64_t*) dyn_array_get_element(boundaries,
(ulint) ((n_diff_for_this_prefix - 1) * sizeof(ib_uint64_t)));
@@ -1031,7 +1609,7 @@ dict_stats_analyze_index_for_n_prefix(
n_diff_sum_of_all_analyzed_pages = 0;
- n_recs_to_dive_below = ut_min(srv_stats_persistent_sample_pages,
+ n_recs_to_dive_below = ut_min(N_SAMPLE_PAGES(index),
n_diff_for_this_prefix);
for (i = 0; i < n_recs_to_dive_below; i++) {
@@ -1093,7 +1671,7 @@ dict_stats_analyze_index_for_n_prefix(
while (rec_idx < dive_below_idx
&& btr_pcur_is_on_user_rec(&pcur)) {
- btr_pcur_move_to_next_user_rec(&pcur, &mtr);
+ btr_pcur_move_to_next_user_rec(&pcur, mtr);
rec_idx++;
}
@@ -1107,12 +1685,20 @@ dict_stats_analyze_index_for_n_prefix(
break;
}
+ /* it could be that the tree has changed in such a way that
+ the record under dive_below_idx is the supremum record, in
+ this case rec_idx == dive_below_idx and pcur is positioned
+ on the supremum, we do not want to dive below it */
+ if (!btr_pcur_is_on_user_rec(&pcur)) {
+ break;
+ }
+
ut_a(rec_idx == dive_below_idx);
ib_uint64_t n_diff_on_leaf_page;
n_diff_on_leaf_page = dict_stats_analyze_index_below_cur(
- btr_pcur_get_btr_cur(&pcur), n_prefix, &mtr);
+ btr_pcur_get_btr_cur(&pcur), n_prefix, mtr);
/* We adjust n_diff_on_leaf_page here to avoid counting
one record twice - once as the last on some page and once
@@ -1135,12 +1721,13 @@ dict_stats_analyze_index_for_n_prefix(
n_diff_sum_of_all_analyzed_pages += n_diff_on_leaf_page;
}
- if (n_diff_sum_of_all_analyzed_pages == 0) {
- n_diff_sum_of_all_analyzed_pages = 1;
- }
+ /* n_diff_sum_of_all_analyzed_pages can be 0 here if all the leaf
+ pages sampled contained only delete-marked records. In this case
+ we should assign 0 to index->stat_n_diff_key_vals[n_prefix - 1], which
+ the formula below does. */
/* See REF01 for an explanation of the algorithm */
- index->stat_n_diff_key_vals[n_prefix]
+ index->stat_n_diff_key_vals[n_prefix - 1]
= index->stat_n_leaf_pages
* n_diff_for_this_prefix
@@ -1149,31 +1736,25 @@ dict_stats_analyze_index_for_n_prefix(
* n_diff_sum_of_all_analyzed_pages
/ n_recs_to_dive_below;
- index->stat_n_sample_sizes[n_prefix] = n_recs_to_dive_below;
+ index->stat_n_sample_sizes[n_prefix - 1] = n_recs_to_dive_below;
DEBUG_PRINTF(" %s(): n_diff=" UINT64PF " for n_prefix=%lu "
"(%lu"
" * " UINT64PF " / " UINT64PF
" * " UINT64PF " / " UINT64PF ")\n",
- __func__, index->stat_n_diff_key_vals[n_prefix],
+ __func__, index->stat_n_diff_key_vals[n_prefix - 1],
n_prefix,
index->stat_n_leaf_pages,
n_diff_for_this_prefix, total_recs_on_level,
n_diff_sum_of_all_analyzed_pages, n_recs_to_dive_below);
btr_pcur_close(&pcur);
-
- mtr_commit(&mtr);
-
- mem_heap_free(heap);
}
-/* @} */
/*********************************************************************//**
Calculates new statistics for a given index and saves them to the index
members stat_n_diff_key_vals[], stat_n_sample_sizes[], stat_index_size and
-stat_n_leaf_pages. This function could be slow.
-dict_stats_analyze_index() @{ */
+stat_n_leaf_pages. This function could be slow. */
static
void
dict_stats_analyze_index(
@@ -1182,7 +1763,7 @@ dict_stats_analyze_index(
{
ulint root_level;
ulint level;
- ibool level_is_analyzed;
+ bool level_is_analyzed;
ulint n_uniq;
ulint n_prefix;
ib_uint64_t* n_diff_on_level;
@@ -1191,10 +1772,11 @@ dict_stats_analyze_index(
dyn_array_t* n_diff_boundaries;
mtr_t mtr;
ulint size;
- ulint i;
DEBUG_PRINTF(" %s(index=%s)\n", __func__, index->name);
+ dict_stats_empty_index(index);
+
mtr_start(&mtr);
mtr_s_lock(dict_index_get_lock(index), &mtr);
@@ -1206,19 +1788,12 @@ dict_stats_analyze_index(
size = btr_get_size(index, BTR_N_LEAF_PAGES, &mtr);
}
+ /* Release the X locks on the root page taken by btr_get_size() */
+ mtr_commit(&mtr);
+
switch (size) {
case ULINT_UNDEFINED:
- mtr_commit(&mtr);
- /* Fake some statistics. */
- index->stat_index_size = index->stat_n_leaf_pages = 1;
-
- for (i = dict_index_get_n_unique(index); i; ) {
- index->stat_n_diff_key_vals[i--] = 1;
- }
-
- memset(index->stat_n_non_null_key_vals, 0,
- (1 + dict_index_get_n_unique(index))
- * sizeof(*index->stat_n_non_null_key_vals));
+ dict_stats_assert_initialized_index(index);
return;
case 0:
/* The root node of the tree is a leaf */
@@ -1227,23 +1802,25 @@ dict_stats_analyze_index(
index->stat_n_leaf_pages = size;
- root_level = btr_page_get_level(btr_root_get(index, &mtr), &mtr);
+ mtr_start(&mtr);
+
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
- mtr_commit(&mtr);
+ root_level = btr_height_get(index, &mtr);
n_uniq = dict_index_get_n_unique(index);
- /* if the tree has just one level (and one page) or if the user
- has requested to sample too many pages then do full scan */
+ /* If the tree has just one level (and one page) or if the user
+ has requested to sample too many pages then do full scan.
+
+ For each n-column prefix (for n=1..n_uniq) N_SAMPLE_PAGES(index)
+ will be sampled, so in total N_SAMPLE_PAGES(index) * n_uniq leaf
+ pages will be sampled. If that number is bigger than the total
+ number of leaf pages then do full scan of the leaf level instead
+ since it will be faster and will give better results. */
+
if (root_level == 0
- /* for each n-column prefix (for n=1..n_uniq)
- srv_stats_persistent_sample_pages will be sampled, so in total
- srv_stats_persistent_sample_pages * n_uniq leaf pages will be
- sampled. If that number is bigger than the total number of leaf
- pages then do full scan of the leaf level instead since it will
- be faster and will give better results. */
- || srv_stats_persistent_sample_pages * n_uniq
- > index->stat_n_leaf_pages) {
+ || N_SAMPLE_PAGES(index) * n_uniq > index->stat_n_leaf_pages) {
if (root_level == 0) {
DEBUG_PRINTF(" %s(): just one page, "
@@ -1261,27 +1838,28 @@ dict_stats_analyze_index(
index->stat_n_diff_key_vals,
&total_recs,
&total_pages,
- NULL /*boundaries not needed*/);
+ NULL /* boundaries not needed */,
+ &mtr);
- for (i = 1; i <= n_uniq; i++) {
+ for (ulint i = 0; i < n_uniq; i++) {
index->stat_n_sample_sizes[i] = total_pages;
}
+ mtr_commit(&mtr);
+
+ dict_stats_assert_initialized_index(index);
return;
}
- /* else */
/* set to zero */
- n_diff_on_level = (ib_uint64_t*) mem_zalloc((n_uniq + 1)
- * sizeof(ib_uint64_t));
+ n_diff_on_level = reinterpret_cast<ib_uint64_t*>
+ (mem_zalloc(n_uniq * sizeof(ib_uint64_t)));
- n_diff_boundaries = (dyn_array_t*) mem_alloc((n_uniq + 1)
- * sizeof(dyn_array_t));
+ n_diff_boundaries = reinterpret_cast<dyn_array_t*>
+ (mem_alloc(n_uniq * sizeof(dyn_array_t)));
- for (i = 1; i <= n_uniq; i++) {
- /* initialize the dynamic arrays, the first one
- (index=0) is ignored to follow the same indexing
- scheme as n_diff_on_level[] */
+ for (ulint i = 0; i < n_uniq; i++) {
+ /* initialize the dynamic arrays */
dyn_array_create(&n_diff_boundaries[i]);
}
@@ -1299,25 +1877,42 @@ dict_stats_analyze_index(
So if we find that the first level containing D distinct
keys (on n_prefix columns) is L, we continue from L when
searching for D distinct keys on n_prefix-1 columns. */
- level = (long) root_level;
- level_is_analyzed = FALSE;
+ level = root_level;
+ level_is_analyzed = false;
+
for (n_prefix = n_uniq; n_prefix >= 1; n_prefix--) {
DEBUG_PRINTF(" %s(): searching level with >=%llu "
"distinct records, n_prefix=%lu\n",
- __func__, N_DIFF_REQUIRED, n_prefix);
+ __func__, N_DIFF_REQUIRED(index), n_prefix);
+
+ /* Commit the mtr to release the tree S lock to allow
+ other threads to do some work too. */
+ mtr_commit(&mtr);
+ mtr_start(&mtr);
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ if (root_level != btr_height_get(index, &mtr)) {
+ /* Just quit if the tree has changed beyond
+ recognition here. The old stats from previous
+ runs will remain in the values that we have
+ not calculated yet. Initially when the index
+ object is created the stats members are given
+ some sensible values so leaving them untouched
+ here even the first time will not cause us to
+ read uninitialized memory later. */
+ break;
+ }
/* check whether we should pick the current level;
we pick level 1 even if it does not have enough
distinct records because we do not want to scan the
leaf level because it may contain too many records */
if (level_is_analyzed
- && (n_diff_on_level[n_prefix] >= N_DIFF_REQUIRED
+ && (n_diff_on_level[n_prefix - 1] >= N_DIFF_REQUIRED(index)
|| level == 1)) {
goto found_level;
}
- /* else */
/* search for a level that contains enough distinct records */
@@ -1325,12 +1920,14 @@ dict_stats_analyze_index(
/* if this does not hold we should be on
"found_level" instead of here */
- ut_ad(n_diff_on_level[n_prefix] < N_DIFF_REQUIRED);
+ ut_ad(n_diff_on_level[n_prefix - 1]
+ < N_DIFF_REQUIRED(index));
level--;
- level_is_analyzed = FALSE;
+ level_is_analyzed = false;
}
+ /* descend into the tree, searching for "good enough" level */
for (;;) {
/* make sure we do not scan the leaf level
@@ -1349,18 +1946,19 @@ dict_stats_analyze_index(
total_recs is left from the previous iteration when
we scanned one level upper or we have not scanned any
levels yet in which case total_recs is 1. */
- if (total_recs > srv_stats_persistent_sample_pages) {
+ if (total_recs > N_SAMPLE_PAGES(index)) {
- /* if the above cond is true then we are not
- at the root level since on the root level
- total_recs == 1 and cannot
- be > srv_stats_persistent_sample_pages */
+ /* if the above cond is true then we are
+ not at the root level since on the root
+ level total_recs == 1 (set before we
+ enter the n-prefix loop) and cannot
+ be > N_SAMPLE_PAGES(index) */
ut_a(level != root_level);
/* step one level back and be satisfied with
whatever it contains */
level++;
- level_is_analyzed = TRUE;
+ level_is_analyzed = true;
break;
}
@@ -1370,27 +1968,28 @@ dict_stats_analyze_index(
n_diff_on_level,
&total_recs,
&total_pages,
- n_diff_boundaries);
+ n_diff_boundaries,
+ &mtr);
- level_is_analyzed = TRUE;
+ level_is_analyzed = true;
- if (n_diff_on_level[n_prefix] >= N_DIFF_REQUIRED
+ if (n_diff_on_level[n_prefix - 1]
+ >= N_DIFF_REQUIRED(index)
|| level == 1) {
/* we found a good level with many distinct
records or we have reached the last level we
could scan */
break;
}
- /* else */
level--;
- level_is_analyzed = FALSE;
+ level_is_analyzed = false;
}
found_level:
DEBUG_PRINTF(" %s(): found level %lu that has " UINT64PF
" distinct records for n_prefix=%lu\n",
- __func__, level, n_diff_on_level[n_prefix],
+ __func__, level, n_diff_on_level[n_prefix - 1],
n_prefix);
/* here we are either on level 1 or the level that we are on
@@ -1406,28 +2005,30 @@ found_level:
dict_stats_analyze_index_for_n_prefix(
index, level, total_recs, n_prefix,
- n_diff_on_level[n_prefix],
- &n_diff_boundaries[n_prefix]);
+ n_diff_on_level[n_prefix - 1],
+ &n_diff_boundaries[n_prefix - 1], &mtr);
}
- for (i = 1; i <= n_uniq; i++) {
+ mtr_commit(&mtr);
+
+ for (ulint i = 0; i < n_uniq; i++) {
dyn_array_free(&n_diff_boundaries[i]);
}
mem_free(n_diff_boundaries);
mem_free(n_diff_on_level);
+
+ dict_stats_assert_initialized_index(index);
}
-/* @} */
/*********************************************************************//**
Calculates new estimates for table and index statistics. This function
is relatively slow and is used to calculate persistent statistics that
will be saved on disk.
-dict_stats_update_persistent() @{
@return DB_SUCCESS or error code */
static
-enum db_err
+dberr_t
dict_stats_update_persistent(
/*=========================*/
dict_table_t* table) /*!< in/out: table */
@@ -1436,21 +2037,30 @@ dict_stats_update_persistent(
DEBUG_PRINTF("%s(table=%s)\n", __func__, table->name);
- /* XXX quit if interrupted, e.g. SIGTERM */
+ dict_table_stats_lock(table, RW_X_LATCH);
/* analyze the clustered index first */
index = dict_table_get_first_index(table);
- if (index == NULL) {
+ if (index == NULL
+ || dict_index_is_corrupted(index)
+ || (index->type | DICT_UNIQUE) != (DICT_CLUSTERED | DICT_UNIQUE)) {
+
/* Table definition is corrupt */
+ dict_table_stats_unlock(table, RW_X_LATCH);
+ dict_stats_empty_table(table);
+
return(DB_CORRUPTION);
}
+ ut_ad(!dict_index_is_univ(index));
+
dict_stats_analyze_index(index);
- table->stat_n_rows
- = index->stat_n_diff_key_vals[dict_index_get_n_unique(index)];
+ ulint n_unique = dict_index_get_n_unique(index);
+
+ table->stat_n_rows = index->stat_n_diff_key_vals[n_unique - 1];
table->stat_clustered_index_size = index->stat_index_size;
@@ -1462,31 +2072,47 @@ dict_stats_update_persistent(
index != NULL;
index = dict_table_get_next_index(index)) {
+ ut_ad(!dict_index_is_univ(index));
+
if (index->type & DICT_FTS) {
continue;
}
- dict_stats_analyze_index(index);
+ dict_stats_empty_index(index);
+
+ if (dict_stats_should_ignore_index(index)) {
+ continue;
+ }
+
+ if (!(table->stats_bg_flag & BG_STAT_SHOULD_QUIT)) {
+ dict_stats_analyze_index(index);
+ }
table->stat_sum_of_other_index_sizes
+= index->stat_index_size;
}
+ table->stats_last_recalc = ut_time();
+
table->stat_modified_counter = 0;
table->stat_initialized = TRUE;
+ dict_stats_assert_initialized(table);
+
+ dict_table_stats_unlock(table, RW_X_LATCH);
+
return(DB_SUCCESS);
}
-/* @} */
+#include "mysql_com.h"
/*********************************************************************//**
Save an individual index's statistic into the persistent statistics
storage.
dict_stats_save_index_stat() @{
@return DB_SUCCESS or error code */
static
-enum db_err
+dberr_t
dict_stats_save_index_stat(
/*=======================*/
dict_index_t* index, /*!< in: index */
@@ -1494,95 +2120,114 @@ dict_stats_save_index_stat(
const char* stat_name, /*!< in: name of the stat */
ib_uint64_t stat_value, /*!< in: value of the stat */
ib_uint64_t* sample_size, /*!< in: n pages sampled or NULL */
- const char* stat_description,/*!< in: description of the stat */
- trx_t* trx, /*!< in/out: transaction to use */
- ibool caller_has_dict_sys_mutex)/*!< in: TRUE if the caller
- owns dict_sys->mutex */
+ const char* stat_description)/*!< in: description of the stat */
{
pars_info_t* pinfo;
- enum db_err ret;
+ dberr_t ret;
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
- pinfo = pars_info_create();
-
- pars_info_add_literal(pinfo, "database_name", index->table->name,
- dict_get_db_name_len(index->table->name),
- DATA_VARCHAR, 0);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(mutex_own(&dict_sys->mutex));
- pars_info_add_str_literal(pinfo, "table_name",
- dict_remove_db_name(index->table->name));
+ dict_fs2utf8(index->table->name, db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
+ pinfo = pars_info_create();
+ pars_info_add_str_literal(pinfo, "database_name", db_utf8);
+ pars_info_add_str_literal(pinfo, "table_name", table_utf8);
+ UNIV_MEM_ASSERT_RW_ABORT(index->name, strlen(index->name));
pars_info_add_str_literal(pinfo, "index_name", index->name);
-
+ UNIV_MEM_ASSERT_RW_ABORT(&last_update, 4);
pars_info_add_int4_literal(pinfo, "last_update", last_update);
-
+ UNIV_MEM_ASSERT_RW_ABORT(stat_name, strlen(stat_name));
pars_info_add_str_literal(pinfo, "stat_name", stat_name);
-
+ UNIV_MEM_ASSERT_RW_ABORT(&stat_value, 8);
pars_info_add_ull_literal(pinfo, "stat_value", stat_value);
-
if (sample_size != NULL) {
+ UNIV_MEM_ASSERT_RW_ABORT(sample_size, 8);
pars_info_add_ull_literal(pinfo, "sample_size", *sample_size);
} else {
pars_info_add_literal(pinfo, "sample_size", NULL,
UNIV_SQL_NULL, DATA_FIXBINARY, 0);
}
-
+ UNIV_MEM_ASSERT_RW_ABORT(stat_description, strlen(stat_description));
pars_info_add_str_literal(pinfo, "stat_description",
stat_description);
- ret = que_eval_sql(pinfo,
- "PROCEDURE INDEX_STATS_SAVE () IS\n"
- "dummy CHAR;\n"
- "BEGIN\n"
-
- "SELECT database_name INTO dummy\n"
- "FROM \"" INDEX_STATS_NAME "\"\n"
- "WHERE\n"
- "database_name = :database_name AND\n"
- "table_name = :table_name AND\n"
- "index_name = :index_name AND\n"
- "stat_name = :stat_name\n"
- "FOR UPDATE;\n"
-
- "IF (SQL % NOTFOUND) THEN\n"
- " INSERT INTO \"" INDEX_STATS_NAME "\"\n"
- " VALUES\n"
- " (\n"
- " :database_name,\n"
- " :table_name,\n"
- " :index_name,\n"
- " :last_update,\n"
- " :stat_name,\n"
- " :stat_value,\n"
- " :sample_size,\n"
- " :stat_description\n"
- " );\n"
- "ELSE\n"
- " UPDATE \"" INDEX_STATS_NAME "\" SET\n"
- " last_update = :last_update,\n"
- " stat_value = :stat_value,\n"
- " sample_size = :sample_size,\n"
- " stat_description = :stat_description\n"
- " WHERE\n"
- " database_name = :database_name AND\n"
- " table_name = :table_name AND\n"
- " index_name = :index_name AND\n"
- " stat_name = :stat_name;\n"
- "END IF;\n"
- "END;",
- !caller_has_dict_sys_mutex, trx);
-
- /* pinfo is freed by que_eval_sql() */
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE INDEX_STATS_SAVE_INSERT () IS\n"
+ "BEGIN\n"
+ "INSERT INTO \"" INDEX_STATS_NAME "\"\n"
+ "VALUES\n"
+ "(\n"
+ ":database_name,\n"
+ ":table_name,\n"
+ ":index_name,\n"
+ ":last_update,\n"
+ ":stat_name,\n"
+ ":stat_value,\n"
+ ":sample_size,\n"
+ ":stat_description\n"
+ ");\n"
+ "END;");
+
+ if (ret == DB_DUPLICATE_KEY) {
+
+ pinfo = pars_info_create();
+ pars_info_add_str_literal(pinfo, "database_name", db_utf8);
+ pars_info_add_str_literal(pinfo, "table_name", table_utf8);
+ UNIV_MEM_ASSERT_RW_ABORT(index->name, strlen(index->name));
+ pars_info_add_str_literal(pinfo, "index_name", index->name);
+ UNIV_MEM_ASSERT_RW_ABORT(&last_update, 4);
+ pars_info_add_int4_literal(pinfo, "last_update", last_update);
+ UNIV_MEM_ASSERT_RW_ABORT(stat_name, strlen(stat_name));
+ pars_info_add_str_literal(pinfo, "stat_name", stat_name);
+ UNIV_MEM_ASSERT_RW_ABORT(&stat_value, 8);
+ pars_info_add_ull_literal(pinfo, "stat_value", stat_value);
+ if (sample_size != NULL) {
+ UNIV_MEM_ASSERT_RW_ABORT(sample_size, 8);
+ pars_info_add_ull_literal(pinfo, "sample_size", *sample_size);
+ } else {
+ pars_info_add_literal(pinfo, "sample_size", NULL,
+ UNIV_SQL_NULL, DATA_FIXBINARY, 0);
+ }
+ UNIV_MEM_ASSERT_RW_ABORT(stat_description, strlen(stat_description));
+ pars_info_add_str_literal(pinfo, "stat_description",
+ stat_description);
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE INDEX_STATS_SAVE_UPDATE () IS\n"
+ "BEGIN\n"
+ "UPDATE \"" INDEX_STATS_NAME "\" SET\n"
+ "last_update = :last_update,\n"
+ "stat_value = :stat_value,\n"
+ "sample_size = :sample_size,\n"
+ "stat_description = :stat_description\n"
+ "WHERE\n"
+ "database_name = :database_name AND\n"
+ "table_name = :table_name AND\n"
+ "index_name = :index_name AND\n"
+ "stat_name = :stat_name;\n"
+ "END;");
+ }
if (ret != DB_SUCCESS) {
+ char buf_table[MAX_FULL_NAME_LEN];
+ char buf_index[MAX_FULL_NAME_LEN];
ut_print_timestamp(stderr);
fprintf(stderr,
- " InnoDB: Error while trying to save index "
- "statistics for table %s, index %s, "
- "stat name %s: %s\n",
- index->table->name, index->name,
+ " InnoDB: Cannot save index statistics for table "
+ "%s, index %s, stat name \"%s\": %s\n",
+ ut_format_name(index->table->name, TRUE,
+ buf_table, sizeof(buf_table)),
+ ut_format_name(index->name, FALSE,
+ buf_index, sizeof(buf_index)),
stat_name, ut_strerr(ret));
-
- trx->error_state = DB_SUCCESS;
}
return(ret);
@@ -1594,196 +2239,165 @@ Save the table's statistics into the persistent statistics storage.
dict_stats_save() @{
@return DB_SUCCESS or error code */
static
-enum db_err
+dberr_t
dict_stats_save(
/*============*/
- dict_table_t* table, /*!< in: table */
- ibool caller_has_dict_sys_mutex)/*!< in: TRUE if the caller
- owns dict_sys->mutex */
+ dict_table_t* table_orig) /*!< in: table */
{
- trx_t* trx;
pars_info_t* pinfo;
- dict_index_t* index;
lint now;
- enum db_err ret;
+ dberr_t ret;
+ dict_table_t* table;
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
+
+ table = dict_stats_snapshot_create(table_orig);
+
+ dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
+
+ rw_lock_x_lock(&dict_operation_lock);
+ mutex_enter(&dict_sys->mutex);
/* MySQL's timestamp is 4 byte, so we use
pars_info_add_int4_literal() which takes a lint arg, so "now" is
lint */
now = (lint) ut_time();
- trx = trx_allocate_for_background();
-
- /* Use 'read-uncommitted' so that the SELECTs we execute
- do not get blocked in case some user has locked the rows we
- are SELECTing */
-
- trx->isolation_level = TRX_ISO_READ_UNCOMMITTED;
-
- trx_start_if_not_started(trx);
+#define PREPARE_PINFO_FOR_TABLE_SAVE(p, t, n) \
+ do { \
+ pars_info_add_str_literal((p), "database_name", db_utf8); \
+ pars_info_add_str_literal((p), "table_name", table_utf8); \
+ pars_info_add_int4_literal((p), "last_update", (n)); \
+ pars_info_add_ull_literal((p), "n_rows", (t)->stat_n_rows); \
+ pars_info_add_ull_literal((p), "clustered_index_size", \
+ (t)->stat_clustered_index_size); \
+ pars_info_add_ull_literal((p), "sum_of_other_index_sizes", \
+ (t)->stat_sum_of_other_index_sizes); \
+ } while(false);
pinfo = pars_info_create();
- pars_info_add_literal(pinfo, "database_name", table->name,
- dict_get_db_name_len(table->name),
- DATA_VARCHAR, 0);
-
- pars_info_add_str_literal(pinfo, "table_name",
- dict_remove_db_name(table->name));
-
- pars_info_add_int4_literal(pinfo, "last_update", now);
-
- pars_info_add_ull_literal(pinfo, "n_rows", table->stat_n_rows);
-
- pars_info_add_ull_literal(pinfo, "clustered_index_size",
- table->stat_clustered_index_size);
-
- pars_info_add_ull_literal(pinfo, "sum_of_other_index_sizes",
- table->stat_sum_of_other_index_sizes);
-
- ret = que_eval_sql(pinfo,
- "PROCEDURE TABLE_STATS_SAVE () IS\n"
- "dummy CHAR;\n"
- "BEGIN\n"
-
- "SELECT database_name INTO dummy\n"
- "FROM \"" TABLE_STATS_NAME "\"\n"
- "WHERE\n"
- "database_name = :database_name AND\n"
- "table_name = :table_name\n"
- "FOR UPDATE;\n"
-
- "IF (SQL % NOTFOUND) THEN\n"
- " INSERT INTO \"" TABLE_STATS_NAME "\"\n"
- " VALUES\n"
- " (\n"
- " :database_name,\n"
- " :table_name,\n"
- " :last_update,\n"
- " :n_rows,\n"
- " :clustered_index_size,\n"
- " :sum_of_other_index_sizes\n"
- " );\n"
- "ELSE\n"
- " UPDATE \"" TABLE_STATS_NAME "\" SET\n"
- " last_update = :last_update,\n"
- " n_rows = :n_rows,\n"
- " clustered_index_size = :clustered_index_size,\n"
- " sum_of_other_index_sizes = "
- " :sum_of_other_index_sizes\n"
- " WHERE\n"
- " database_name = :database_name AND\n"
- " table_name = :table_name;\n"
- "END IF;\n"
- "END;",
- !caller_has_dict_sys_mutex, trx);
-
- /* pinfo is freed by que_eval_sql() */
+ PREPARE_PINFO_FOR_TABLE_SAVE(pinfo, table, now);
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE TABLE_STATS_SAVE_INSERT () IS\n"
+ "BEGIN\n"
+ "INSERT INTO \"" TABLE_STATS_NAME "\"\n"
+ "VALUES\n"
+ "(\n"
+ ":database_name,\n"
+ ":table_name,\n"
+ ":last_update,\n"
+ ":n_rows,\n"
+ ":clustered_index_size,\n"
+ ":sum_of_other_index_sizes\n"
+ ");\n"
+ "END;");
+
+ if (ret == DB_DUPLICATE_KEY) {
+ pinfo = pars_info_create();
+
+ PREPARE_PINFO_FOR_TABLE_SAVE(pinfo, table, now);
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE TABLE_STATS_SAVE_UPDATE () IS\n"
+ "BEGIN\n"
+ "UPDATE \"" TABLE_STATS_NAME "\" SET\n"
+ "last_update = :last_update,\n"
+ "n_rows = :n_rows,\n"
+ "clustered_index_size = :clustered_index_size,\n"
+ "sum_of_other_index_sizes = "
+ " :sum_of_other_index_sizes\n"
+ "WHERE\n"
+ "database_name = :database_name AND\n"
+ "table_name = :table_name;\n"
+ "END;");
+ }
if (ret != DB_SUCCESS) {
-
+ char buf[MAX_FULL_NAME_LEN];
ut_print_timestamp(stderr);
fprintf(stderr,
- " InnoDB: Error while trying to save table "
- "statistics for table %s: %s\n",
- table->name, ut_strerr(ret));
-
- goto end_rollback;
+ " InnoDB: Cannot save table statistics for table "
+ "%s: %s\n",
+ ut_format_name(table->name, TRUE, buf, sizeof(buf)),
+ ut_strerr(ret));
+ goto end;
}
+ dict_index_t* index;
+
for (index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
- ib_uint64_t stat_n_diff_key_vals[REC_MAX_N_FIELDS];
- ib_uint64_t stat_n_sample_sizes[REC_MAX_N_FIELDS];
- ulint n_uniq;
- ulint i;
+ if (dict_stats_should_ignore_index(index)) {
+ continue;
+ }
+
+ ut_ad(!dict_index_is_univ(index));
ret = dict_stats_save_index_stat(index, now, "size",
index->stat_index_size,
NULL,
"Number of pages "
- "in the index",
- trx,
- caller_has_dict_sys_mutex);
+ "in the index");
if (ret != DB_SUCCESS) {
- goto end_rollback;
+ goto end;
}
ret = dict_stats_save_index_stat(index, now, "n_leaf_pages",
index->stat_n_leaf_pages,
NULL,
"Number of leaf pages "
- "in the index",
- trx,
- caller_has_dict_sys_mutex);
+ "in the index");
if (ret != DB_SUCCESS) {
- goto end_rollback;
+ goto end;
}
- n_uniq = dict_index_get_n_unique(index);
-
- ut_ad(n_uniq + 1 <= UT_ARR_SIZE(stat_n_diff_key_vals));
-
- memcpy(stat_n_diff_key_vals, index->stat_n_diff_key_vals,
- (n_uniq + 1) * sizeof(index->stat_n_diff_key_vals[0]));
-
- ut_ad(n_uniq + 1 <= UT_ARR_SIZE(stat_n_sample_sizes));
-
- memcpy(stat_n_sample_sizes, index->stat_n_sample_sizes,
- (n_uniq + 1) * sizeof(index->stat_n_sample_sizes[0]));
-
- for (i = 1; i <= n_uniq; i++) {
+ for (ulint i = 0; i < index->n_uniq; i++) {
char stat_name[16];
char stat_description[1024];
ulint j;
ut_snprintf(stat_name, sizeof(stat_name),
- "n_diff_pfx%02lu", i);
+ "n_diff_pfx%02lu", i + 1);
/* craft a string that contains the columns names */
ut_snprintf(stat_description,
sizeof(stat_description),
"%s", index->fields[0].name);
- for (j = 2; j <= i; j++) {
+ for (j = 1; j <= i; j++) {
size_t len;
len = strlen(stat_description);
ut_snprintf(stat_description + len,
sizeof(stat_description) - len,
- ",%s", index->fields[j - 1].name);
+ ",%s", index->fields[j].name);
}
ret = dict_stats_save_index_stat(
index, now, stat_name,
- stat_n_diff_key_vals[i],
- &stat_n_sample_sizes[i],
- stat_description, trx,
- caller_has_dict_sys_mutex);
+ index->stat_n_diff_key_vals[i],
+ &index->stat_n_sample_sizes[i],
+ stat_description);
if (ret != DB_SUCCESS) {
- goto end_rollback;
+ goto end;
}
}
}
- trx_commit_for_mysql(trx);
- ret = DB_SUCCESS;
- goto end_free;
-
-end_rollback:
-
- trx->op_info = "rollback of internal transaction on stats tables";
- trx_rollback_to_savepoint(trx, NULL);
- trx->op_info = "";
- ut_a(trx->error_state == DB_SUCCESS);
+end:
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(&dict_operation_lock);
-end_free:
-
- trx_free_for_background(trx);
+ dict_stats_snapshot_free(table);
return(ret);
}
@@ -1875,11 +2489,11 @@ dict_stats_fetch_table_stats_step(
/** Aux struct used to pass a table and a boolean to
dict_stats_fetch_index_stats_step(). */
-typedef struct index_fetch_struct {
+struct index_fetch_t {
dict_table_t* table; /*!< table whose indexes are to be modified */
- ibool stats_were_modified; /*!< will be set to TRUE if at
+ bool stats_were_modified; /*!< will be set to true if at
least one index stats were modified */
-} index_fetch_t;
+};
/*********************************************************************//**
Called for the rows that are selected by
@@ -2036,12 +2650,12 @@ dict_stats_fetch_index_stats_step(
if (stat_name_len == 4 /* strlen("size") */
&& strncasecmp("size", stat_name, stat_name_len) == 0) {
index->stat_index_size = (ulint) stat_value;
- arg->stats_were_modified = TRUE;
+ arg->stats_were_modified = true;
} else if (stat_name_len == 12 /* strlen("n_leaf_pages") */
&& strncasecmp("n_leaf_pages", stat_name, stat_name_len)
== 0) {
index->stat_n_leaf_pages = (ulint) stat_value;
- arg->stats_were_modified = TRUE;
+ arg->stats_were_modified = true;
} else if (stat_name_len > PFX_LEN /* e.g. stat_name=="n_diff_pfx01" */
&& strncasecmp(PFX, stat_name, PFX_LEN) == 0) {
@@ -2057,19 +2671,24 @@ dict_stats_fetch_index_stats_step(
|| num_ptr[0] < '0' || num_ptr[0] > '9'
|| num_ptr[1] < '0' || num_ptr[1] > '9') {
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
+
+ dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
+
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Ignoring strange row from "
"%s WHERE "
- "database_name = '%.*s' AND "
+ "database_name = '%s' AND "
"table_name = '%s' AND "
"index_name = '%s' AND "
"stat_name = '%.*s'; because stat_name "
"is malformed\n",
INDEX_STATS_NAME_PRINT,
- (int) dict_get_db_name_len(table->name),
- table->name,
- dict_remove_db_name(table->name),
+ db_utf8,
+ table_utf8,
index->name,
(int) stat_name_len,
stat_name);
@@ -2081,41 +2700,50 @@ dict_stats_fetch_index_stats_step(
note that stat_name does not have a terminating '\0' */
n_pfx = (num_ptr[0] - '0') * 10 + (num_ptr[1] - '0');
- if (n_pfx == 0 || n_pfx > dict_index_get_n_unique(index)) {
+ ulint n_uniq = index->n_uniq;
+
+ if (n_pfx == 0 || n_pfx > n_uniq) {
+
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
+
+ dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Ignoring strange row from "
"%s WHERE "
- "database_name = '%.*s' AND "
+ "database_name = '%s' AND "
"table_name = '%s' AND "
"index_name = '%s' AND "
"stat_name = '%.*s'; because stat_name is "
"out of range, the index has %lu unique "
"columns\n",
INDEX_STATS_NAME_PRINT,
- (int) dict_get_db_name_len(table->name),
- table->name,
- dict_remove_db_name(table->name),
+ db_utf8,
+ table_utf8,
index->name,
(int) stat_name_len,
stat_name,
- dict_index_get_n_unique(index));
+ n_uniq);
return(TRUE);
}
/* else */
- index->stat_n_diff_key_vals[n_pfx] = stat_value;
+ index->stat_n_diff_key_vals[n_pfx - 1] = stat_value;
if (sample_size != UINT64_UNDEFINED) {
- index->stat_n_sample_sizes[n_pfx] = sample_size;
+ index->stat_n_sample_sizes[n_pfx - 1] = sample_size;
} else {
/* hmm, strange... the user must have UPDATEd the
table manually and SET sample_size = NULL */
- index->stat_n_sample_sizes[n_pfx] = 0;
+ index->stat_n_sample_sizes[n_pfx - 1] = 0;
}
- arg->stats_were_modified = TRUE;
+ index->stat_n_non_null_key_vals[n_pfx - 1] = 0;
+
+ arg->stats_were_modified = true;
} else {
/* silently ignore rows with unknown stat_name, the
user may have developed her own stats */
@@ -2131,19 +2759,25 @@ Read table's statistics from the persistent statistics storage.
dict_stats_fetch_from_ps() @{
@return DB_SUCCESS or error code */
static
-enum db_err
+dberr_t
dict_stats_fetch_from_ps(
/*=====================*/
- dict_table_t* table, /*!< in/out: table */
- ibool caller_has_dict_sys_mutex)/*!< in: TRUE if the caller
- owns dict_sys->mutex */
+ dict_table_t* table) /*!< in/out: table */
{
index_fetch_t index_fetch_arg;
trx_t* trx;
pars_info_t* pinfo;
- enum db_err ret;
+ dberr_t ret;
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
+
+ ut_ad(!mutex_own(&dict_sys->mutex));
- ut_ad(mutex_own(&dict_sys->mutex) == caller_has_dict_sys_mutex);
+ /* Initialize all stats to dummy values before fetching because if
+ the persistent storage contains incomplete stats (e.g. missing stats
+ for some index) then we would end up with (partially) uninitialized
+ stats. */
+ dict_stats_empty_table(table);
trx = trx_allocate_for_background();
@@ -2155,14 +2789,14 @@ dict_stats_fetch_from_ps(
trx_start_if_not_started(trx);
+ dict_fs2utf8(table->name, db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
+
pinfo = pars_info_create();
- pars_info_add_literal(pinfo, "database_name", table->name,
- dict_get_db_name_len(table->name),
- DATA_VARCHAR, 0);
+ pars_info_add_str_literal(pinfo, "database_name", db_utf8);
- pars_info_add_str_literal(pinfo, "table_name",
- dict_remove_db_name(table->name));
+ pars_info_add_str_literal(pinfo, "table_name", table_utf8);
pars_info_bind_function(pinfo,
"fetch_table_stats_step",
@@ -2170,7 +2804,7 @@ dict_stats_fetch_from_ps(
table);
index_fetch_arg.table = table;
- index_fetch_arg.stats_were_modified = FALSE;
+ index_fetch_arg.stats_were_modified = false;
pars_info_bind_function(pinfo,
"fetch_index_stats_step",
dict_stats_fetch_index_stats_step,
@@ -2230,19 +2864,9 @@ dict_stats_fetch_from_ps(
"CLOSE index_stats_cur;\n"
"END;",
- !caller_has_dict_sys_mutex, trx);
-
+ TRUE, trx);
/* pinfo is freed by que_eval_sql() */
- /* XXX If mysql.innodb_index_stats contained less rows than the number
- of indexes in the table, then some of the indexes of the table
- were left uninitialized. Currently this is ignored and those
- indexes are left with uninitialized stats until ANALYZE TABLE is
- run. This condition happens when the user creates a new index
- on a table. We could return DB_STATS_DO_NOT_EXIST from here,
- forcing the usage of transient stats until mysql.innodb_index_stats
- is complete. */
-
trx_commit_for_mysql(trx);
trx_free_for_background(trx);
@@ -2256,32 +2880,67 @@ dict_stats_fetch_from_ps(
/* @} */
/*********************************************************************//**
+Fetches or calculates new estimates for index statistics.
+dict_stats_update_for_index() @{ */
+UNIV_INTERN
+void
+dict_stats_update_for_index(
+/*========================*/
+ dict_index_t* index) /*!< in/out: index */
+{
+ ut_ad(!mutex_own(&dict_sys->mutex));
+
+ if (dict_stats_is_persistent_enabled(index->table)) {
+
+ if (dict_stats_persistent_storage_check(false)) {
+ dict_table_stats_lock(index->table, RW_X_LATCH);
+ dict_stats_analyze_index(index);
+ dict_table_stats_unlock(index->table, RW_X_LATCH);
+ dict_stats_save(index->table);
+ return;
+ }
+ /* else */
+
+ /* Fall back to transient stats since the persistent
+ storage is not present or is corrupted */
+ char buf_table[MAX_FULL_NAME_LEN];
+ char buf_index[MAX_FULL_NAME_LEN];
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Recalculation of persistent statistics "
+ "requested for table %s index %s but the required "
+ "persistent statistics storage is not present or is "
+ "corrupted. Using transient stats instead.\n",
+ ut_format_name(index->table->name, TRUE,
+ buf_table, sizeof(buf_table)),
+ ut_format_name(index->name, FALSE,
+ buf_index, sizeof(buf_index)));
+ }
+
+ dict_table_stats_lock(index->table, RW_X_LATCH);
+ dict_stats_update_transient_for_index(index);
+ dict_table_stats_unlock(index->table, RW_X_LATCH);
+}
+/* @} */
+
+/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
are used in query optimization.
-dict_stats_update() @{
-@return DB_* error code or DB_SUCCESS */
+@return DB_SUCCESS or error code */
UNIV_INTERN
-enum db_err
+dberr_t
dict_stats_update(
/*==============*/
dict_table_t* table, /*!< in/out: table */
- dict_stats_upd_option_t stats_upd_option,
+ dict_stats_upd_option_t stats_upd_option)
/*!< in: whether to (re) calc
the stats or to fetch them from
the persistent statistics
storage */
- ibool caller_has_dict_sys_mutex)
- /*!< in: TRUE if the caller
- owns dict_sys->mutex */
{
- enum db_err ret = DB_ERROR;
+ char buf[MAX_FULL_NAME_LEN];
- /* check whether caller_has_dict_sys_mutex is set correctly;
- note that mutex_own() is not implemented in non-debug code so
- we cannot avoid having this extra param to the current function */
- ut_ad(caller_has_dict_sys_mutex
- ? mutex_own(&dict_sys->mutex)
- : !mutex_own(&dict_sys->mutex));
+ ut_ad(!mutex_own(&dict_sys->mutex));
if (table->ibd_file_missing) {
ut_print_timestamp(stderr);
@@ -2289,83 +2948,61 @@ dict_stats_update(
" InnoDB: cannot calculate statistics for table %s "
"because the .ibd file is missing. For help, please "
"refer to " REFMAN "innodb-troubleshooting.html\n",
- table->name);
-
+ ut_format_name(table->name, TRUE, buf, sizeof(buf)));
+ dict_stats_empty_table(table);
return(DB_TABLESPACE_DELETED);
- }
-
- /* If we have set a high innodb_force_recovery level, do not calculate
- statistics, as a badly corrupted index can cause a crash in it. */
-
- if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
-
+ } else if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
+ /* If we have set a high innodb_force_recovery level, do
+ not calculate statistics, as a badly corrupted index can
+ cause a crash in it. */
+ dict_stats_empty_table(table);
return(DB_SUCCESS);
}
switch (stats_upd_option) {
case DICT_STATS_RECALC_PERSISTENT:
- case DICT_STATS_RECALC_PERSISTENT_SILENT:
+
+ ut_ad(!srv_read_only_mode);
+
/* Persistent recalculation requested, called from
- ANALYZE TABLE or from TRUNCATE TABLE */
-
- /* FTS auxiliary tables do not need persistent stats */
- if ((ut_strcount(table->name, "FTS") > 0
- && (ut_strcount(table->name, "CONFIG") > 0
- || ut_strcount(table->name, "INDEX") > 0
- || ut_strcount(table->name, "DELETED") > 0
- || ut_strcount(table->name, "DOC_ID") > 0
- || ut_strcount(table->name, "ADDED") > 0))) {
- goto transient;
- }
+ 1) ANALYZE TABLE, or
+ 2) the auto recalculation background thread, or
+ 3) open table if stats do not exist on disk and auto recalc
+ is enabled */
+
+ /* InnoDB internal tables (e.g. SYS_TABLES) cannot have
+ persistent stats enabled */
+ ut_a(strchr(table->name, '/') != NULL);
/* check if the persistent statistics storage exists
before calling the potentially slow function
dict_stats_update_persistent(); that is a
prerequisite for dict_stats_save() succeeding */
- if (dict_stats_persistent_storage_check(
- caller_has_dict_sys_mutex)) {
-
- dict_table_stats_lock(table, RW_X_LATCH);
+ if (dict_stats_persistent_storage_check(false)) {
- ret = dict_stats_update_persistent(table);
+ dberr_t err;
- /* XXX Currently dict_stats_save() would read the
- stats from the table without dict_table_stats_lock()
- which means it could save inconsistent data on the
- disk. This is because we must call
- dict_table_stats_lock() after locking dict_sys->mutex.
- A solution is to copy here the stats to a temporary
- buffer while holding the _stats_lock(), release it,
- and pass that buffer to dict_stats_save(). */
+ err = dict_stats_update_persistent(table);
- dict_table_stats_unlock(table, RW_X_LATCH);
-
- if (ret == DB_SUCCESS) {
- ret = dict_stats_save(
- table,
- caller_has_dict_sys_mutex);
+ if (err != DB_SUCCESS) {
+ return(err);
}
- return(ret);
+ err = dict_stats_save(table);
+
+ return(err);
}
- /* else */
/* Fall back to transient stats since the persistent
storage is not present or is corrupted */
- if (stats_upd_option == DICT_STATS_RECALC_PERSISTENT) {
-
- ut_print_timestamp(stderr);
- /* XXX add link to the doc about storage
- creation */
- fprintf(stderr,
- " InnoDB: Recalculation of persistent "
- "statistics requested but the required "
- "persistent statistics storage is not "
- "present or is corrupted. "
- "Using quick transient stats "
- "instead.\n");
- }
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Recalculation of persistent statistics "
+ "requested for table %s but the required persistent "
+ "statistics storage is not present or is corrupted. "
+ "Using transient stats instead.\n",
+ ut_format_name(table->name, TRUE, buf, sizeof(buf)));
goto transient;
@@ -2373,265 +3010,317 @@ dict_stats_update(
goto transient;
- case DICT_STATS_FETCH:
- case DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY:
- /* fetch requested, either fetch from persistent statistics
- storage or use the old method */
+ case DICT_STATS_EMPTY_TABLE:
- dict_table_stats_lock(table, RW_X_LATCH);
+ dict_stats_empty_table(table);
- if (stats_upd_option == DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY
- && table->stat_initialized) {
+ /* If table is using persistent stats,
+ then save the stats on disk */
- dict_table_stats_unlock(table, RW_X_LATCH);
- return(DB_SUCCESS);
+ if (dict_stats_is_persistent_enabled(table)) {
+
+ if (dict_stats_persistent_storage_check(false)) {
+
+ return(dict_stats_save(table));
+ }
+
+ return(DB_STATS_DO_NOT_EXIST);
}
- /* else */
- /* Must unlock because otherwise there is a lock order
- violation with dict_sys->mutex below. Declare stats to be
- initialized before unlocking. */
- table->stat_initialized = TRUE;
- dict_table_stats_unlock(table, RW_X_LATCH);
+ return(DB_SUCCESS);
+
+ case DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY:
- if (strchr(table->name, '/') == NULL
- || strcmp(table->name, INDEX_STATS_NAME) == 0
- || strcmp(table->name, TABLE_STATS_NAME) == 0
- || (ut_strcount(table->name, "FTS") > 0
- && (ut_strcount(table->name, "CONFIG") > 0
- || ut_strcount(table->name, "INDEX") > 0
- || ut_strcount(table->name, "DELETED") > 0
- || ut_strcount(table->name, "DOC_ID") > 0
- || ut_strcount(table->name, "ADDED") > 0))) {
- /* Use the quick transient stats method for
- InnoDB internal tables, because we know the
- persistent stats storage does not contain data
- for them */
+ /* fetch requested, either fetch from persistent statistics
+ storage or use the old method */
- goto transient;
+ if (table->stat_initialized) {
+ return(DB_SUCCESS);
}
- /* else */
- if (dict_stats_persistent_storage_check(
- caller_has_dict_sys_mutex)) {
+ /* InnoDB internal tables (e.g. SYS_TABLES) cannot have
+ persistent stats enabled */
+ ut_a(strchr(table->name, '/') != NULL);
- ret = dict_stats_fetch_from_ps(table,
- caller_has_dict_sys_mutex);
+ if (!dict_stats_persistent_storage_check(false)) {
+ /* persistent statistics storage does not exist
+ or is corrupted, calculate the transient stats */
- if (ret == DB_STATS_DO_NOT_EXIST
- || (ret != DB_SUCCESS && stats_upd_option
- == DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY)) {
- /* Stats for this particular table do not
- exist or we have been called from open table
- which needs to initialize the stats,
- calculate the quick transient statistics */
- goto transient;
- }
- /* else */
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Error: Fetch of persistent "
+ "statistics requested for table %s but the "
+ "required system tables %s and %s are not "
+ "present or have unexpected structure. "
+ "Using transient stats instead.\n",
+ ut_format_name(table->name, TRUE,
+ buf, sizeof(buf)),
+ TABLE_STATS_NAME_PRINT,
+ INDEX_STATS_NAME_PRINT);
- return(ret);
- } else {
- /* persistent statistics storage does not exist,
- calculate the transient stats */
goto transient;
}
- break;
+ dict_table_t* t;
- /* no "default:" in order to produce a compilation warning
- about unhandled enumeration value */
- }
+ ut_ad(!srv_read_only_mode);
-transient:
+ /* Create a dummy table object with the same name and
+ indexes, suitable for fetching the stats into it. */
+ t = dict_stats_table_clone_create(table);
- dict_table_stats_lock(table, RW_X_LATCH);
+ dberr_t err = dict_stats_fetch_from_ps(t);
- dict_stats_update_transient(table);
+ t->stats_last_recalc = table->stats_last_recalc;
+ t->stat_modified_counter = 0;
- dict_table_stats_unlock(table, RW_X_LATCH);
+ switch (err) {
+ case DB_SUCCESS:
- return(DB_SUCCESS);
-}
-/* @} */
+ dict_table_stats_lock(table, RW_X_LATCH);
-/*********************************************************************//**
-Close the stats tables. Should always be called after successful
-dict_stats_open(). It will free the dict_stats handle.
-dict_stats_close() @{ */
-UNIV_INLINE
-void
-dict_stats_close(
-/*=============*/
- dict_stats_t* dict_stats) /*!< in/own: Handle to open
- statistics tables */
-{
- if (dict_stats->table_stats != NULL) {
- dict_table_close(dict_stats->table_stats, FALSE);
- dict_stats->table_stats = NULL;
- }
+ /* Initialize all stats to dummy values before
+ copying because dict_stats_table_clone_create() does
+ skip corrupted indexes so our dummy object 't' may
+ have less indexes than the real object 'table'. */
+ dict_stats_empty_table(table);
- if (dict_stats->index_stats != NULL) {
- dict_table_close(dict_stats->index_stats, FALSE);
- dict_stats->index_stats = NULL;
- }
+ dict_stats_copy(table, t);
- mem_free(dict_stats);
-}
-/* @} */
+ dict_stats_assert_initialized(table);
-/*********************************************************************//**
-Open stats tables to prevent these tables from being DROPped.
-Also check whether they have the correct structure. The caller
-must call dict_stats_close() when he has finished DMLing the tables.
-dict_stats_open() @{
-@return pointer to open tables or NULL on failure */
-UNIV_INLINE
-dict_stats_t*
-dict_stats_open(void)
-/*=================*/
-{
- dict_stats_t* dict_stats;
+ dict_table_stats_unlock(table, RW_X_LATCH);
+
+ dict_stats_table_clone_free(t);
+
+ return(DB_SUCCESS);
+ case DB_STATS_DO_NOT_EXIST:
+
+ dict_stats_table_clone_free(t);
- dict_stats = static_cast<dict_stats_t*>(
- mem_zalloc(sizeof(*dict_stats)));
+ if (dict_stats_auto_recalc_is_enabled(table)) {
+ return(dict_stats_update(
+ table,
+ DICT_STATS_RECALC_PERSISTENT));
+ }
- dict_stats->table_stats = dict_table_open_on_name_no_stats(
- TABLE_STATS_NAME, FALSE, DICT_ERR_IGNORE_NONE);
+ ut_format_name(table->name, TRUE, buf, sizeof(buf));
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Trying to use table %s which has "
+ "persistent statistics enabled, but auto "
+ "recalculation turned off and the statistics "
+ "do not exist in %s and %s. Please either run "
+ "\"ANALYZE TABLE %s;\" manually or enable the "
+ "auto recalculation with "
+ "\"ALTER TABLE %s STATS_AUTO_RECALC=1;\". "
+ "InnoDB will now use transient statistics for "
+ "%s.\n",
+ buf, TABLE_STATS_NAME, INDEX_STATS_NAME, buf,
+ buf, buf);
- dict_stats->index_stats = dict_table_open_on_name_no_stats(
- INDEX_STATS_NAME, FALSE, DICT_ERR_IGNORE_NONE);
+ goto transient;
+ default:
- /* Check if the tables have the correct structure, if yes then
- after this function we can safely DELETE from them without worrying
- that they may get DROPped or DDLed because the open will have
- increased the reference count. */
+ dict_stats_table_clone_free(t);
- if (dict_stats->table_stats == NULL
- || dict_stats->index_stats == NULL
- || !dict_stats_persistent_storage_check(FALSE)) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Error fetching persistent statistics "
+ "for table %s from %s and %s: %s. "
+ "Using transient stats method instead.\n",
+ ut_format_name(table->name, TRUE, buf,
+ sizeof(buf)),
+ TABLE_STATS_NAME,
+ INDEX_STATS_NAME,
+ ut_strerr(err));
- /* There was an error, close the tables and free the handle. */
- dict_stats_close(dict_stats);
- dict_stats = NULL;
+ goto transient;
+ }
+ /* no "default:" in order to produce a compilation warning
+ about unhandled enumeration value */
}
- return(dict_stats);
+transient:
+
+ dict_table_stats_lock(table, RW_X_LATCH);
+
+ dict_stats_update_transient(table);
+
+ dict_table_stats_unlock(table, RW_X_LATCH);
+
+ return(DB_SUCCESS);
}
-/* @} */
/*********************************************************************//**
Removes the information for a particular index's stats from the persistent
storage if it exists and if there is data stored for this index.
-The transaction is not committed, it must not be committed in this
-function because this is the user trx that is running DROP INDEX.
-The transaction will be committed at the very end when dropping an
-index.
+This function creates its own trx and commits it.
A note from Marko why we cannot edit user and sys_* tables in one trx:
marko: The problem is that ibuf merges should be disabled while we are
rolling back dict transactions.
marko: If ibuf merges are not disabled, we need to scan the *.ibd files.
But we shouldn't open *.ibd files before we have rolled back dict
transactions and opened the SYS_* records for the *.ibd files.
-dict_stats_delete_index_stats() @{
+dict_stats_drop_index() @{
@return DB_SUCCESS or error code */
UNIV_INTERN
-enum db_err
-dict_stats_delete_index_stats(
-/*==========================*/
- dict_index_t* index, /*!< in: index */
- trx_t* trx, /*!< in: transaction to use */
+dberr_t
+dict_stats_drop_index(
+/*==================*/
+ const char* db_and_table,/*!< in: db and table, e.g. 'db/table' */
+ const char* iname, /*!< in: index name */
char* errstr, /*!< out: error message if != DB_SUCCESS
is returned */
ulint errstr_sz)/*!< in: size of the errstr buffer */
{
- char database_name[MAX_DATABASE_NAME_LEN + 1];
- const char* table_name;
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
pars_info_t* pinfo;
- enum db_err ret;
- dict_stats_t* dict_stats;
- void* mysql_thd = trx->mysql_thd;
+ dberr_t ret;
+
+ ut_ad(!mutex_own(&dict_sys->mutex));
/* skip indexes whose table names do not contain a database name
e.g. if we are dropping an index from SYS_TABLES */
- if (strchr(index->table_name, '/') == NULL) {
-
- return(DB_SUCCESS);
- }
+ if (strchr(db_and_table, '/') == NULL) {
- /* Increment table reference count to prevent the tables from
- being DROPped just before que_eval_sql(). */
- dict_stats = dict_stats_open();
-
- if (dict_stats == NULL) {
- /* stats tables do not exist or have unexpected structure */
return(DB_SUCCESS);
}
- /* the stats tables cannot be DROPped now */
-
- ut_snprintf(database_name, sizeof(database_name), "%.*s",
- (int) dict_get_db_name_len(index->table_name),
- index->table_name);
-
- table_name = dict_remove_db_name(index->table_name);
+ dict_fs2utf8(db_and_table, db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
pinfo = pars_info_create();
- pars_info_add_str_literal(pinfo, "database_name", database_name);
+ pars_info_add_str_literal(pinfo, "database_name", db_utf8);
- pars_info_add_str_literal(pinfo, "table_name", table_name);
+ pars_info_add_str_literal(pinfo, "table_name", table_utf8);
- pars_info_add_str_literal(pinfo, "index_name", index->name);
+ pars_info_add_str_literal(pinfo, "index_name", iname);
- /* Force lock wait timeout to be instantaneous because the incoming
- transaction was created via MySQL. */
+ rw_lock_x_lock(&dict_operation_lock);
+ mutex_enter(&dict_sys->mutex);
- mysql_thd = trx->mysql_thd;
- trx->mysql_thd = NULL;
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE DROP_INDEX_STATS () IS\n"
+ "BEGIN\n"
+ "DELETE FROM \"" INDEX_STATS_NAME "\" WHERE\n"
+ "database_name = :database_name AND\n"
+ "table_name = :table_name AND\n"
+ "index_name = :index_name;\n"
+ "END;\n");
- ret = que_eval_sql(pinfo,
- "PROCEDURE DROP_INDEX_STATS () IS\n"
- "BEGIN\n"
- "DELETE FROM \"" INDEX_STATS_NAME "\" WHERE\n"
- "database_name = :database_name AND\n"
- "table_name = :table_name AND\n"
- "index_name = :index_name;\n"
- "END;\n",
- TRUE,
- trx);
-
- trx->mysql_thd = mysql_thd;
-
- /* pinfo is freed by que_eval_sql() */
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(&dict_operation_lock);
- /* do not to commit here, see the function's comment */
+ if (ret == DB_STATS_DO_NOT_EXIST) {
+ ret = DB_SUCCESS;
+ }
if (ret != DB_SUCCESS) {
-
ut_snprintf(errstr, errstr_sz,
"Unable to delete statistics for index %s "
- "from %s%s. They can be deleted later using "
+ "from %s%s: %s. They can be deleted later using "
"DELETE FROM %s WHERE "
"database_name = '%s' AND "
"table_name = '%s' AND "
"index_name = '%s';",
- index->name,
+ iname,
INDEX_STATS_NAME_PRINT,
(ret == DB_LOCK_WAIT_TIMEOUT
? " because the rows are locked"
: ""),
+ ut_strerr(ret),
INDEX_STATS_NAME_PRINT,
- database_name,
- table_name,
- index->name);
+ db_utf8,
+ table_utf8,
+ iname);
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: %s\n", errstr);
-
- trx->error_state = DB_SUCCESS;
}
- dict_stats_close(dict_stats);
+ return(ret);
+}
+/* @} */
+
+/*********************************************************************//**
+Executes
+DELETE FROM mysql.innodb_table_stats
+WHERE database_name = '...' AND table_name = '...';
+Creates its own transaction and commits it.
+dict_stats_delete_from_table_stats() @{
+@return DB_SUCCESS or error code */
+UNIV_INLINE
+dberr_t
+dict_stats_delete_from_table_stats(
+/*===============================*/
+ const char* database_name, /*!< in: database name, e.g. 'db' */
+ const char* table_name) /*!< in: table name, e.g. 'table' */
+{
+ pars_info_t* pinfo;
+ dberr_t ret;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_STAT */
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ pinfo = pars_info_create();
+
+ pars_info_add_str_literal(pinfo, "database_name", database_name);
+ pars_info_add_str_literal(pinfo, "table_name", table_name);
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE DELETE_FROM_TABLE_STATS () IS\n"
+ "BEGIN\n"
+ "DELETE FROM \"" TABLE_STATS_NAME "\" WHERE\n"
+ "database_name = :database_name AND\n"
+ "table_name = :table_name;\n"
+ "END;\n");
+
+ return(ret);
+}
+/* @} */
+
+/*********************************************************************//**
+Executes
+DELETE FROM mysql.innodb_index_stats
+WHERE database_name = '...' AND table_name = '...';
+Creates its own transaction and commits it.
+dict_stats_delete_from_index_stats() @{
+@return DB_SUCCESS or error code */
+UNIV_INLINE
+dberr_t
+dict_stats_delete_from_index_stats(
+/*===============================*/
+ const char* database_name, /*!< in: database name, e.g. 'db' */
+ const char* table_name) /*!< in: table name, e.g. 'table' */
+{
+ pars_info_t* pinfo;
+ dberr_t ret;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_STAT */
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ pinfo = pars_info_create();
+
+ pars_info_add_str_literal(pinfo, "database_name", database_name);
+ pars_info_add_str_literal(pinfo, "table_name", table_name);
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE DELETE_FROM_INDEX_STATS () IS\n"
+ "BEGIN\n"
+ "DELETE FROM \"" INDEX_STATS_NAME "\" WHERE\n"
+ "database_name = :database_name AND\n"
+ "table_name = :table_name;\n"
+ "END;\n");
return(ret);
}
@@ -2640,130 +3329,332 @@ dict_stats_delete_index_stats(
/*********************************************************************//**
Removes the statistics for a table and all of its indexes from the
persistent statistics storage if it exists and if there is data stored for
-the table. This function creates its own transaction and commits it.
-dict_stats_delete_table_stats() @{
+the table. This function creates its own transaction and commits it.
+dict_stats_drop_table() @{
@return DB_SUCCESS or error code */
UNIV_INTERN
-enum db_err
-dict_stats_delete_table_stats(
-/*==========================*/
- const char* table_name, /*!< in: table name */
+dberr_t
+dict_stats_drop_table(
+/*==================*/
+ const char* db_and_table, /*!< in: db and table, e.g. 'db/table' */
char* errstr, /*!< out: error message
if != DB_SUCCESS is returned */
ulint errstr_sz) /*!< in: size of errstr buffer */
{
- char database_name[MAX_DATABASE_NAME_LEN + 1];
- const char* table_name_strip; /* without leading db name */
- trx_t* trx;
- pars_info_t* pinfo;
- enum db_err ret = DB_ERROR;
- dict_stats_t* dict_stats;
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
+ dberr_t ret;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_STAT */
+ ut_ad(mutex_own(&dict_sys->mutex));
/* skip tables that do not contain a database name
e.g. if we are dropping SYS_TABLES */
- if (strchr(table_name, '/') == NULL) {
+ if (strchr(db_and_table, '/') == NULL) {
return(DB_SUCCESS);
}
/* skip innodb_table_stats and innodb_index_stats themselves */
- if (strcmp(table_name, TABLE_STATS_NAME) == 0
- || strcmp(table_name, INDEX_STATS_NAME) == 0) {
+ if (strcmp(db_and_table, TABLE_STATS_NAME) == 0
+ || strcmp(db_and_table, INDEX_STATS_NAME) == 0) {
return(DB_SUCCESS);
}
- /* Create a new private trx */
+ dict_fs2utf8(db_and_table, db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
- trx = trx_allocate_for_background();
+ ret = dict_stats_delete_from_table_stats(db_utf8, table_utf8);
- /* Use 'read-uncommitted' so that the SELECTs we execute
- do not get blocked in case some user has locked the rows we
- are SELECTing */
+ if (ret == DB_SUCCESS) {
+ ret = dict_stats_delete_from_index_stats(db_utf8, table_utf8);
+ }
- trx->isolation_level = TRX_ISO_READ_UNCOMMITTED;
+ if (ret == DB_STATS_DO_NOT_EXIST) {
+ ret = DB_SUCCESS;
+ }
- trx_start_if_not_started(trx);
+ if (ret != DB_SUCCESS) {
- /* Increment table reference count to prevent the tables from
- being DROPped just before que_eval_sql(). */
- dict_stats = dict_stats_open();
+ ut_snprintf(errstr, errstr_sz,
+ "Unable to delete statistics for table %s.%s: %s. "
+ "They can be deleted later using "
- if (dict_stats == NULL) {
- /* stats tables do not exist or have unexpected structure */
- ret = DB_SUCCESS;
- goto commit_and_return;
+ "DELETE FROM %s WHERE "
+ "database_name = '%s' AND "
+ "table_name = '%s'; "
+
+ "DELETE FROM %s WHERE "
+ "database_name = '%s' AND "
+ "table_name = '%s';",
+
+ db_utf8, table_utf8,
+ ut_strerr(ret),
+
+ INDEX_STATS_NAME_PRINT,
+ db_utf8, table_utf8,
+
+ TABLE_STATS_NAME_PRINT,
+ db_utf8, table_utf8);
}
- ut_snprintf(database_name, sizeof(database_name), "%.*s",
- (int) dict_get_db_name_len(table_name),
- table_name);
+ return(ret);
+}
+/* @} */
+
+/*********************************************************************//**
+Executes
+UPDATE mysql.innodb_table_stats SET
+database_name = '...', table_name = '...'
+WHERE database_name = '...' AND table_name = '...';
+Creates its own transaction and commits it.
+dict_stats_rename_in_table_stats() @{
+@return DB_SUCCESS or error code */
+UNIV_INLINE
+dberr_t
+dict_stats_rename_in_table_stats(
+/*=============================*/
+ const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */
+ const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */
+ const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */
+ const char* new_tablename_utf8)/*!< in: table name, e.g. 'newtable' */
+{
+ pars_info_t* pinfo;
+ dberr_t ret;
- table_name_strip = dict_remove_db_name(table_name);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_STAT */
+ ut_ad(mutex_own(&dict_sys->mutex));
pinfo = pars_info_create();
- pars_info_add_str_literal(pinfo, "database_name", database_name);
+ pars_info_add_str_literal(pinfo, "old_dbname_utf8", old_dbname_utf8);
+ pars_info_add_str_literal(pinfo, "old_tablename_utf8", old_tablename_utf8);
+ pars_info_add_str_literal(pinfo, "new_dbname_utf8", new_dbname_utf8);
+ pars_info_add_str_literal(pinfo, "new_tablename_utf8", new_tablename_utf8);
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE RENAME_IN_TABLE_STATS () IS\n"
+ "BEGIN\n"
+ "UPDATE \"" TABLE_STATS_NAME "\" SET\n"
+ "database_name = :new_dbname_utf8,\n"
+ "table_name = :new_tablename_utf8\n"
+ "WHERE\n"
+ "database_name = :old_dbname_utf8 AND\n"
+ "table_name = :old_tablename_utf8;\n"
+ "END;\n");
- pars_info_add_str_literal(pinfo, "table_name", table_name_strip);
+ return(ret);
+}
+/* @} */
- ret = que_eval_sql(pinfo,
- "PROCEDURE DROP_TABLE_STATS () IS\n"
- "BEGIN\n"
+/*********************************************************************//**
+Executes
+UPDATE mysql.innodb_index_stats SET
+database_name = '...', table_name = '...'
+WHERE database_name = '...' AND table_name = '...';
+Creates its own transaction and commits it.
+dict_stats_rename_in_index_stats() @{
+@return DB_SUCCESS or error code */
+UNIV_INLINE
+dberr_t
+dict_stats_rename_in_index_stats(
+/*=============================*/
+ const char* old_dbname_utf8,/*!< in: database name, e.g. 'olddb' */
+ const char* old_tablename_utf8,/*!< in: table name, e.g. 'oldtable' */
+ const char* new_dbname_utf8,/*!< in: database name, e.g. 'newdb' */
+ const char* new_tablename_utf8)/*!< in: table name, e.g. 'newtable' */
+{
+ pars_info_t* pinfo;
+ dberr_t ret;
- "DELETE FROM \"" INDEX_STATS_NAME "\" WHERE\n"
- "database_name = :database_name AND\n"
- "table_name = :table_name;\n"
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_STAT */
+ ut_ad(mutex_own(&dict_sys->mutex));
- "DELETE FROM \"" TABLE_STATS_NAME "\" WHERE\n"
- "database_name = :database_name AND\n"
- "table_name = :table_name;\n"
+ pinfo = pars_info_create();
- "END;\n",
- TRUE,
- trx);
+ pars_info_add_str_literal(pinfo, "old_dbname_utf8", old_dbname_utf8);
+ pars_info_add_str_literal(pinfo, "old_tablename_utf8", old_tablename_utf8);
+ pars_info_add_str_literal(pinfo, "new_dbname_utf8", new_dbname_utf8);
+ pars_info_add_str_literal(pinfo, "new_tablename_utf8", new_tablename_utf8);
+
+ ret = dict_stats_exec_sql(
+ pinfo,
+ "PROCEDURE RENAME_IN_INDEX_STATS () IS\n"
+ "BEGIN\n"
+ "UPDATE \"" INDEX_STATS_NAME "\" SET\n"
+ "database_name = :new_dbname_utf8,\n"
+ "table_name = :new_tablename_utf8\n"
+ "WHERE\n"
+ "database_name = :old_dbname_utf8 AND\n"
+ "table_name = :old_tablename_utf8;\n"
+ "END;\n");
- /* pinfo is freed by que_eval_sql() */
+ return(ret);
+}
+/* @} */
- if (ret != DB_SUCCESS) {
+/*********************************************************************//**
+Renames a table in InnoDB persistent stats storage.
+This function creates its own transaction and commits it.
+dict_stats_rename_table() @{
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+dict_stats_rename_table(
+/*====================*/
+ const char* old_name, /*!< in: old name, e.g. 'db/table' */
+ const char* new_name, /*!< in: new name, e.g. 'db/table' */
+ char* errstr, /*!< out: error string if != DB_SUCCESS
+ is returned */
+ size_t errstr_sz) /*!< in: errstr size */
+{
+ char old_db_utf8[MAX_DB_UTF8_LEN];
+ char new_db_utf8[MAX_DB_UTF8_LEN];
+ char old_table_utf8[MAX_TABLE_UTF8_LEN];
+ char new_table_utf8[MAX_TABLE_UTF8_LEN];
+ dberr_t ret;
- ut_snprintf(errstr, errstr_sz,
- "Unable to delete statistics for table %s.%s "
- "from %s or %s%s. "
- "They can be deleted later using "
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(!rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_STAT */
+ ut_ad(!mutex_own(&dict_sys->mutex));
- "DELETE FROM %s WHERE "
- "database_name = '%s' AND "
- "table_name = '%s'; "
+ /* skip innodb_table_stats and innodb_index_stats themselves */
+ if (strcmp(old_name, TABLE_STATS_NAME) == 0
+ || strcmp(old_name, INDEX_STATS_NAME) == 0
+ || strcmp(new_name, TABLE_STATS_NAME) == 0
+ || strcmp(new_name, INDEX_STATS_NAME) == 0) {
- "DELETE FROM %s WHERE "
- "database_name = '%s' AND "
- "table_name = '%s';",
+ return(DB_SUCCESS);
+ }
- database_name, table_name_strip,
- TABLE_STATS_NAME_PRINT, INDEX_STATS_NAME_PRINT,
+ dict_fs2utf8(old_name, old_db_utf8, sizeof(old_db_utf8),
+ old_table_utf8, sizeof(old_table_utf8));
- (ret == DB_LOCK_WAIT_TIMEOUT
- ? " because the rows are locked"
- : ""),
+ dict_fs2utf8(new_name, new_db_utf8, sizeof(new_db_utf8),
+ new_table_utf8, sizeof(new_table_utf8));
- INDEX_STATS_NAME_PRINT,
- database_name, table_name_strip,
+ rw_lock_x_lock(&dict_operation_lock);
+ mutex_enter(&dict_sys->mutex);
+
+ ulint n_attempts = 0;
+ do {
+ n_attempts++;
+
+ ret = dict_stats_rename_in_table_stats(
+ old_db_utf8, old_table_utf8,
+ new_db_utf8, new_table_utf8);
+
+ if (ret == DB_DUPLICATE_KEY) {
+ dict_stats_delete_from_table_stats(
+ new_db_utf8, new_table_utf8);
+ }
+
+ if (ret == DB_STATS_DO_NOT_EXIST) {
+ ret = DB_SUCCESS;
+ }
+
+ if (ret != DB_SUCCESS) {
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(&dict_operation_lock);
+ os_thread_sleep(200000 /* 0.2 sec */);
+ rw_lock_x_lock(&dict_operation_lock);
+ mutex_enter(&dict_sys->mutex);
+ }
+ } while ((ret == DB_DEADLOCK
+ || ret == DB_DUPLICATE_KEY
+ || ret == DB_LOCK_WAIT_TIMEOUT)
+ && n_attempts < 5);
+
+ if (ret != DB_SUCCESS) {
+ ut_snprintf(errstr, errstr_sz,
+ "Unable to rename statistics from "
+ "%s.%s to %s.%s in %s: %s. "
+ "They can be renamed later using "
+
+ "UPDATE %s SET "
+ "database_name = '%s', "
+ "table_name = '%s' "
+ "WHERE "
+ "database_name = '%s' AND "
+ "table_name = '%s';",
+ old_db_utf8, old_table_utf8,
+ new_db_utf8, new_table_utf8,
TABLE_STATS_NAME_PRINT,
- database_name, table_name_strip);
+ ut_strerr(ret),
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: %s\n", errstr);
+ TABLE_STATS_NAME_PRINT,
+ new_db_utf8, new_table_utf8,
+ old_db_utf8, old_table_utf8);
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(&dict_operation_lock);
+ return(ret);
}
+ /* else */
- dict_stats_close(dict_stats);
+ n_attempts = 0;
+ do {
+ n_attempts++;
-commit_and_return:
+ ret = dict_stats_rename_in_index_stats(
+ old_db_utf8, old_table_utf8,
+ new_db_utf8, new_table_utf8);
- trx_commit_for_mysql(trx);
+ if (ret == DB_DUPLICATE_KEY) {
+ dict_stats_delete_from_index_stats(
+ new_db_utf8, new_table_utf8);
+ }
- trx_free_for_background(trx);
+ if (ret == DB_STATS_DO_NOT_EXIST) {
+ ret = DB_SUCCESS;
+ }
+
+ if (ret != DB_SUCCESS) {
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(&dict_operation_lock);
+ os_thread_sleep(200000 /* 0.2 sec */);
+ rw_lock_x_lock(&dict_operation_lock);
+ mutex_enter(&dict_sys->mutex);
+ }
+ } while ((ret == DB_DEADLOCK
+ || ret == DB_DUPLICATE_KEY
+ || ret == DB_LOCK_WAIT_TIMEOUT)
+ && n_attempts < 5);
+
+ mutex_exit(&dict_sys->mutex);
+ rw_lock_x_unlock(&dict_operation_lock);
+
+ if (ret != DB_SUCCESS) {
+ ut_snprintf(errstr, errstr_sz,
+ "Unable to rename statistics from "
+ "%s.%s to %s.%s in %s: %s. "
+ "They can be renamed later using "
+
+ "UPDATE %s SET "
+ "database_name = '%s', "
+ "table_name = '%s' "
+ "WHERE "
+ "database_name = '%s' AND "
+ "table_name = '%s';",
+
+ old_db_utf8, old_table_utf8,
+ new_db_utf8, new_table_utf8,
+ INDEX_STATS_NAME_PRINT,
+ ut_strerr(ret),
+
+ INDEX_STATS_NAME_PRINT,
+ new_db_utf8, new_table_utf8,
+ old_db_utf8, old_table_utf8);
+ }
return(ret);
}
@@ -2933,13 +3824,13 @@ test_dict_stats_save()
dict_table_t table;
dict_index_t index1;
dict_field_t index1_fields[1];
- ib_uint64_t index1_stat_n_diff_key_vals[2];
- ib_uint64_t index1_stat_n_sample_sizes[2];
+ ib_uint64_t index1_stat_n_diff_key_vals[1];
+ ib_uint64_t index1_stat_n_sample_sizes[1];
dict_index_t index2;
dict_field_t index2_fields[4];
- ib_uint64_t index2_stat_n_diff_key_vals[5];
- ib_uint64_t index2_stat_n_sample_sizes[5];
- enum db_err ret;
+ ib_uint64_t index2_stat_n_diff_key_vals[4];
+ ib_uint64_t index2_stat_n_sample_sizes[4];
+ dberr_t ret;
/* craft a dummy dict_table_t */
table.name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME);
@@ -2949,16 +3840,11 @@ test_dict_stats_save()
UT_LIST_INIT(table.indexes);
UT_LIST_ADD_LAST(indexes, table.indexes, &index1);
UT_LIST_ADD_LAST(indexes, table.indexes, &index2);
-#ifdef UNIV_DEBUG
- table.magic_n = DICT_TABLE_MAGIC_N;
-#endif /* UNIV_DEBUG */
+ ut_d(table.magic_n = DICT_TABLE_MAGIC_N);
+ ut_d(index1.magic_n = DICT_INDEX_MAGIC_N);
index1.name = TEST_IDX1_NAME;
index1.table = &table;
-#ifdef UNIV_DEBUG
- index1.magic_n = DICT_INDEX_MAGIC_N;
-#endif /* UNIV_DEBUG */
- index1.to_be_dropped = 0;
index1.cached = 1;
index1.n_uniq = 1;
index1.fields = index1_fields;
@@ -2967,17 +3853,12 @@ test_dict_stats_save()
index1.stat_index_size = TEST_IDX1_INDEX_SIZE;
index1.stat_n_leaf_pages = TEST_IDX1_N_LEAF_PAGES;
index1_fields[0].name = TEST_IDX1_COL1_NAME;
- index1_stat_n_diff_key_vals[0] = 1; /* dummy */
- index1_stat_n_diff_key_vals[1] = TEST_IDX1_N_DIFF1;
- index1_stat_n_sample_sizes[0] = 0; /* dummy */
- index1_stat_n_sample_sizes[1] = TEST_IDX1_N_DIFF1_SAMPLE_SIZE;
+ index1_stat_n_diff_key_vals[0] = TEST_IDX1_N_DIFF1;
+ index1_stat_n_sample_sizes[0] = TEST_IDX1_N_DIFF1_SAMPLE_SIZE;
+ ut_d(index2.magic_n = DICT_INDEX_MAGIC_N);
index2.name = TEST_IDX2_NAME;
index2.table = &table;
-#ifdef UNIV_DEBUG
- index2.magic_n = DICT_INDEX_MAGIC_N;
-#endif /* UNIV_DEBUG */
- index2.to_be_dropped = 0;
index2.cached = 1;
index2.n_uniq = 4;
index2.fields = index2_fields;
@@ -2989,18 +3870,16 @@ test_dict_stats_save()
index2_fields[1].name = TEST_IDX2_COL2_NAME;
index2_fields[2].name = TEST_IDX2_COL3_NAME;
index2_fields[3].name = TEST_IDX2_COL4_NAME;
- index2_stat_n_diff_key_vals[0] = 1; /* dummy */
- index2_stat_n_diff_key_vals[1] = TEST_IDX2_N_DIFF1;
- index2_stat_n_diff_key_vals[2] = TEST_IDX2_N_DIFF2;
- index2_stat_n_diff_key_vals[3] = TEST_IDX2_N_DIFF3;
- index2_stat_n_diff_key_vals[4] = TEST_IDX2_N_DIFF4;
- index2_stat_n_sample_sizes[0] = 0; /* dummy */
- index2_stat_n_sample_sizes[1] = TEST_IDX2_N_DIFF1_SAMPLE_SIZE;
- index2_stat_n_sample_sizes[2] = TEST_IDX2_N_DIFF2_SAMPLE_SIZE;
- index2_stat_n_sample_sizes[3] = TEST_IDX2_N_DIFF3_SAMPLE_SIZE;
- index2_stat_n_sample_sizes[4] = TEST_IDX2_N_DIFF4_SAMPLE_SIZE;
-
- ret = dict_stats_save(&table, FALSE);
+ index2_stat_n_diff_key_vals[0] = TEST_IDX2_N_DIFF1;
+ index2_stat_n_diff_key_vals[1] = TEST_IDX2_N_DIFF2;
+ index2_stat_n_diff_key_vals[2] = TEST_IDX2_N_DIFF3;
+ index2_stat_n_diff_key_vals[3] = TEST_IDX2_N_DIFF4;
+ index2_stat_n_sample_sizes[0] = TEST_IDX2_N_DIFF1_SAMPLE_SIZE;
+ index2_stat_n_sample_sizes[1] = TEST_IDX2_N_DIFF2_SAMPLE_SIZE;
+ index2_stat_n_sample_sizes[2] = TEST_IDX2_N_DIFF3_SAMPLE_SIZE;
+ index2_stat_n_sample_sizes[3] = TEST_IDX2_N_DIFF4_SAMPLE_SIZE;
+
+ ret = dict_stats_save(&table);
ut_a(ret == DB_SUCCESS);
@@ -3098,41 +3977,35 @@ test_dict_stats_fetch_from_ps()
{
dict_table_t table;
dict_index_t index1;
- ib_uint64_t index1_stat_n_diff_key_vals[2];
- ib_uint64_t index1_stat_n_sample_sizes[2];
+ ib_uint64_t index1_stat_n_diff_key_vals[1];
+ ib_uint64_t index1_stat_n_sample_sizes[1];
dict_index_t index2;
- ib_uint64_t index2_stat_n_diff_key_vals[5];
- ib_uint64_t index2_stat_n_sample_sizes[5];
- enum db_err ret;
+ ib_uint64_t index2_stat_n_diff_key_vals[4];
+ ib_uint64_t index2_stat_n_sample_sizes[4];
+ dberr_t ret;
/* craft a dummy dict_table_t */
table.name = (char*) (TEST_DATABASE_NAME "/" TEST_TABLE_NAME);
UT_LIST_INIT(table.indexes);
UT_LIST_ADD_LAST(indexes, table.indexes, &index1);
UT_LIST_ADD_LAST(indexes, table.indexes, &index2);
-#ifdef UNIV_DEBUG
- table.magic_n = DICT_TABLE_MAGIC_N;
-#endif /* UNIV_DEBUG */
+ ut_d(table.magic_n = DICT_TABLE_MAGIC_N);
index1.name = TEST_IDX1_NAME;
-#ifdef UNIV_DEBUG
- index1.magic_n = DICT_INDEX_MAGIC_N;
-#endif /* UNIV_DEBUG */
+ ut_d(index1.magic_n = DICT_INDEX_MAGIC_N);
index1.cached = 1;
index1.n_uniq = 1;
index1.stat_n_diff_key_vals = index1_stat_n_diff_key_vals;
index1.stat_n_sample_sizes = index1_stat_n_sample_sizes;
index2.name = TEST_IDX2_NAME;
-#ifdef UNIV_DEBUG
- index2.magic_n = DICT_INDEX_MAGIC_N;
-#endif /* UNIV_DEBUG */
+ ut_d(index2.magic_n = DICT_INDEX_MAGIC_N);
index2.cached = 1;
index2.n_uniq = 4;
index2.stat_n_diff_key_vals = index2_stat_n_diff_key_vals;
index2.stat_n_sample_sizes = index2_stat_n_sample_sizes;
- ret = dict_stats_fetch_from_ps(&table, FALSE);
+ ret = dict_stats_fetch_from_ps(&table);
ut_a(ret == DB_SUCCESS);
@@ -3143,19 +4016,19 @@ test_dict_stats_fetch_from_ps()
ut_a(index1.stat_index_size == TEST_IDX1_INDEX_SIZE);
ut_a(index1.stat_n_leaf_pages == TEST_IDX1_N_LEAF_PAGES);
- ut_a(index1_stat_n_diff_key_vals[1] == TEST_IDX1_N_DIFF1);
- ut_a(index1_stat_n_sample_sizes[1] == TEST_IDX1_N_DIFF1_SAMPLE_SIZE);
+ ut_a(index1_stat_n_diff_key_vals[0] == TEST_IDX1_N_DIFF1);
+ ut_a(index1_stat_n_sample_sizes[0] == TEST_IDX1_N_DIFF1_SAMPLE_SIZE);
ut_a(index2.stat_index_size == TEST_IDX2_INDEX_SIZE);
ut_a(index2.stat_n_leaf_pages == TEST_IDX2_N_LEAF_PAGES);
- ut_a(index2_stat_n_diff_key_vals[1] == TEST_IDX2_N_DIFF1);
- ut_a(index2_stat_n_sample_sizes[1] == TEST_IDX2_N_DIFF1_SAMPLE_SIZE);
- ut_a(index2_stat_n_diff_key_vals[2] == TEST_IDX2_N_DIFF2);
- ut_a(index2_stat_n_sample_sizes[2] == TEST_IDX2_N_DIFF2_SAMPLE_SIZE);
- ut_a(index2_stat_n_diff_key_vals[3] == TEST_IDX2_N_DIFF3);
- ut_a(index2_stat_n_sample_sizes[3] == TEST_IDX2_N_DIFF3_SAMPLE_SIZE);
- ut_a(index2_stat_n_diff_key_vals[4] == TEST_IDX2_N_DIFF4);
- ut_a(index2_stat_n_sample_sizes[4] == TEST_IDX2_N_DIFF4_SAMPLE_SIZE);
+ ut_a(index2_stat_n_diff_key_vals[0] == TEST_IDX2_N_DIFF1);
+ ut_a(index2_stat_n_sample_sizes[0] == TEST_IDX2_N_DIFF1_SAMPLE_SIZE);
+ ut_a(index2_stat_n_diff_key_vals[1] == TEST_IDX2_N_DIFF2);
+ ut_a(index2_stat_n_sample_sizes[1] == TEST_IDX2_N_DIFF2_SAMPLE_SIZE);
+ ut_a(index2_stat_n_diff_key_vals[2] == TEST_IDX2_N_DIFF3);
+ ut_a(index2_stat_n_sample_sizes[2] == TEST_IDX2_N_DIFF3_SAMPLE_SIZE);
+ ut_a(index2_stat_n_diff_key_vals[3] == TEST_IDX2_N_DIFF4);
+ ut_a(index2_stat_n_sample_sizes[3] == TEST_IDX2_N_DIFF4_SAMPLE_SIZE);
printf("OK: fetch successful\n");
}
diff --git a/storage/innobase/dict/dict0stats_bg.cc b/storage/innobase/dict/dict0stats_bg.cc
new file mode 100644
index 00000000000..9a10d995360
--- /dev/null
+++ b/storage/innobase/dict/dict0stats_bg.cc
@@ -0,0 +1,402 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file dict/dict0stats_bg.cc
+Code used for background table and index stats gathering.
+
+Created Apr 25, 2012 Vasil Dimov
+*******************************************************/
+
+#include "row0mysql.h"
+#include "srv0start.h"
+#include "dict0stats.h"
+#include "dict0stats_bg.h"
+
+#include <vector>
+
+/** Minimum time interval between stats recalc for a given table */
+#define MIN_RECALC_INTERVAL 10 /* seconds */
+
+#define SHUTTING_DOWN() (srv_shutdown_state != SRV_SHUTDOWN_NONE)
+
+/** Event to wake up the stats thread */
+UNIV_INTERN os_event_t dict_stats_event = NULL;
+
+/** This mutex protects the "recalc_pool" variable. */
+static ib_mutex_t recalc_pool_mutex;
+#ifdef HAVE_PSI_INTERFACE
+static mysql_pfs_key_t recalc_pool_mutex_key;
+#endif /* HAVE_PSI_INTERFACE */
+
+/** The number of tables that can be added to "recalc_pool" before
+it is enlarged */
+static const ulint RECALC_POOL_INITIAL_SLOTS = 128;
+
+/** The multitude of tables whose stats are to be automatically
+recalculated - an STL vector */
+typedef std::vector<table_id_t> recalc_pool_t;
+static recalc_pool_t recalc_pool;
+
+typedef recalc_pool_t::iterator recalc_pool_iterator_t;
+
+/*****************************************************************//**
+Initialize the recalc pool, called once during thread initialization. */
+static
+void
+dict_stats_recalc_pool_init()
+/*=========================*/
+{
+ ut_ad(!srv_read_only_mode);
+
+ recalc_pool.reserve(RECALC_POOL_INITIAL_SLOTS);
+}
+
+/*****************************************************************//**
+Free the resources occupied by the recalc pool, called once during
+thread de-initialization. */
+static
+void
+dict_stats_recalc_pool_deinit()
+/*===========================*/
+{
+ ut_ad(!srv_read_only_mode);
+
+ recalc_pool.clear();
+ /*
+ recalc_pool may still have its buffer allocated. It will free it when
+ its destructor is called.
+ The problem is, memory leak detector is run before the recalc_pool's
+ destructor is invoked, and will report recalc_pool's buffer as leaked
+ memory. To avoid that, we force recalc_pool to surrender its buffer
+ to empty_pool object, which will free it when leaving this function:
+ */
+ recalc_pool_t empty_pool;
+ recalc_pool.swap(empty_pool);
+}
+
+/*****************************************************************//**
+Add a table to the recalc pool, which is processed by the
+background stats gathering thread. Only the table id is added to the
+list, so the table can be closed after being enqueued and it will be
+opened when needed. If the table does not exist later (has been DROPped),
+then it will be removed from the pool and skipped.
+dict_stats_recalc_pool_add() @{ */
+UNIV_INTERN
+void
+dict_stats_recalc_pool_add(
+/*=======================*/
+ const dict_table_t* table) /*!< in: table to add */
+{
+ ut_ad(!srv_read_only_mode);
+
+ mutex_enter(&recalc_pool_mutex);
+
+ /* quit if already in the list */
+ for (recalc_pool_iterator_t iter = recalc_pool.begin();
+ iter != recalc_pool.end();
+ ++iter) {
+
+ if (*iter == table->id) {
+ mutex_exit(&recalc_pool_mutex);
+ return;
+ }
+ }
+
+ recalc_pool.push_back(table->id);
+
+ mutex_exit(&recalc_pool_mutex);
+
+ os_event_set(dict_stats_event);
+}
+/* @} */
+
+/*****************************************************************//**
+Get a table from the auto recalc pool. The returned table id is removed
+from the pool.
+dict_stats_recalc_pool_get() @{
+@return true if the pool was non-empty and "id" was set, false otherwise */
+static
+bool
+dict_stats_recalc_pool_get(
+/*=======================*/
+ table_id_t* id) /*!< out: table id, or unmodified if list is
+ empty */
+{
+ ut_ad(!srv_read_only_mode);
+
+ mutex_enter(&recalc_pool_mutex);
+
+ if (recalc_pool.empty()) {
+ mutex_exit(&recalc_pool_mutex);
+ return(false);
+ }
+
+ *id = recalc_pool[0];
+
+ recalc_pool.erase(recalc_pool.begin());
+
+ mutex_exit(&recalc_pool_mutex);
+
+ return(true);
+}
+/* @} */
+
+/*****************************************************************//**
+Delete a given table from the auto recalc pool.
+dict_stats_recalc_pool_del() */
+UNIV_INTERN
+void
+dict_stats_recalc_pool_del(
+/*=======================*/
+ const dict_table_t* table) /*!< in: table to remove */
+{
+ ut_ad(!srv_read_only_mode);
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ mutex_enter(&recalc_pool_mutex);
+
+ ut_ad(table->id > 0);
+
+ for (recalc_pool_iterator_t iter = recalc_pool.begin();
+ iter != recalc_pool.end();
+ ++iter) {
+
+ if (*iter == table->id) {
+ /* erase() invalidates the iterator */
+ recalc_pool.erase(iter);
+ break;
+ }
+ }
+
+ mutex_exit(&recalc_pool_mutex);
+}
+
+/*****************************************************************//**
+Wait until background stats thread has stopped using the specified table(s).
+The caller must have locked the data dictionary using
+row_mysql_lock_data_dictionary() and this function may unlock it temporarily
+and restore the lock before it exits.
+The background stats thead is guaranteed not to start using the specified
+tables after this function returns and before the caller unlocks the data
+dictionary because it sets the BG_STAT_IN_PROGRESS bit in table->stats_bg_flag
+under dict_sys->mutex.
+dict_stats_wait_bg_to_stop_using_table() @{ */
+UNIV_INTERN
+void
+dict_stats_wait_bg_to_stop_using_tables(
+/*====================================*/
+ dict_table_t* table1, /*!< in/out: table1 */
+ dict_table_t* table2, /*!< in/out: table2, could be NULL */
+ trx_t* trx) /*!< in/out: transaction to use for
+ unlocking/locking the data dict */
+{
+ ut_ad(!srv_read_only_mode);
+
+ while ((table1->stats_bg_flag & BG_STAT_IN_PROGRESS)
+ || (table2 != NULL
+ && (table2->stats_bg_flag & BG_STAT_IN_PROGRESS))) {
+
+ table1->stats_bg_flag |= BG_STAT_SHOULD_QUIT;
+ if (table2 != NULL) {
+ table2->stats_bg_flag |= BG_STAT_SHOULD_QUIT;
+ }
+
+ row_mysql_unlock_data_dictionary(trx);
+ os_thread_sleep(250000);
+ row_mysql_lock_data_dictionary(trx);
+ }
+}
+/* @} */
+
+/*****************************************************************//**
+Initialize global variables needed for the operation of dict_stats_thread()
+Must be called before dict_stats_thread() is started.
+dict_stats_thread_init() @{ */
+UNIV_INTERN
+void
+dict_stats_thread_init()
+/*====================*/
+{
+ ut_a(!srv_read_only_mode);
+
+ dict_stats_event = os_event_create();
+
+ /* The recalc_pool_mutex is acquired from:
+ 1) the background stats gathering thread before any other latch
+ and released without latching anything else in between (thus
+ any level would do here)
+ 2) from row_update_statistics_if_needed()
+ and released without latching anything else in between. We know
+ that dict_sys->mutex (SYNC_DICT) is not acquired when
+ row_update_statistics_if_needed() is called and it may be acquired
+ inside that function (thus a level <=SYNC_DICT would do).
+ 3) from row_drop_table_for_mysql() after dict_sys->mutex (SYNC_DICT)
+ and dict_operation_lock (SYNC_DICT_OPERATION) have been locked
+ (thus a level <SYNC_DICT && <SYNC_DICT_OPERATION would do)
+ So we choose SYNC_STATS_AUTO_RECALC to be about below SYNC_DICT. */
+ mutex_create(recalc_pool_mutex_key, &recalc_pool_mutex,
+ SYNC_STATS_AUTO_RECALC);
+
+ dict_stats_recalc_pool_init();
+}
+/* @} */
+
+/*****************************************************************//**
+Free resources allocated by dict_stats_thread_init(), must be called
+after dict_stats_thread() has exited.
+dict_stats_thread_deinit() @{ */
+UNIV_INTERN
+void
+dict_stats_thread_deinit()
+/*======================*/
+{
+ ut_a(!srv_read_only_mode);
+ ut_ad(!srv_dict_stats_thread_active);
+
+ dict_stats_recalc_pool_deinit();
+
+ mutex_free(&recalc_pool_mutex);
+ memset(&recalc_pool_mutex, 0x0, sizeof(recalc_pool_mutex));
+
+ os_event_free(dict_stats_event);
+ dict_stats_event = NULL;
+}
+/* @} */
+
+/*****************************************************************//**
+Get the first table that has been added for auto recalc and eventually
+update its stats.
+dict_stats_process_entry_from_recalc_pool() @{ */
+static
+void
+dict_stats_process_entry_from_recalc_pool()
+/*=======================================*/
+{
+ table_id_t table_id;
+
+ ut_ad(!srv_read_only_mode);
+
+ /* pop the first table from the auto recalc pool */
+ if (!dict_stats_recalc_pool_get(&table_id)) {
+ /* no tables for auto recalc */
+ return;
+ }
+
+ dict_table_t* table;
+
+ mutex_enter(&dict_sys->mutex);
+
+ table = dict_table_open_on_id(table_id, TRUE, FALSE);
+
+ if (table == NULL) {
+ /* table does not exist, must have been DROPped
+ after its id was enqueued */
+ mutex_exit(&dict_sys->mutex);
+ return;
+ }
+
+ /* Check whether table is corrupted */
+ if (table->corrupted) {
+ dict_table_close(table, TRUE, FALSE);
+ mutex_exit(&dict_sys->mutex);
+ return;
+ }
+
+ table->stats_bg_flag = BG_STAT_IN_PROGRESS;
+
+ mutex_exit(&dict_sys->mutex);
+
+ /* ut_time() could be expensive, the current function
+ is called once every time a table has been changed more than 10% and
+ on a system with lots of small tables, this could become hot. If we
+ find out that this is a problem, then the check below could eventually
+ be replaced with something else, though a time interval is the natural
+ approach. */
+
+ if (ut_difftime(ut_time(), table->stats_last_recalc)
+ < MIN_RECALC_INTERVAL) {
+
+ /* Stats were (re)calculated not long ago. To avoid
+ too frequent stats updates we put back the table on
+ the auto recalc list and do nothing. */
+
+ dict_stats_recalc_pool_add(table);
+
+ } else {
+
+ dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT);
+ }
+
+ mutex_enter(&dict_sys->mutex);
+
+ table->stats_bg_flag = BG_STAT_NONE;
+
+ dict_table_close(table, TRUE, FALSE);
+
+ mutex_exit(&dict_sys->mutex);
+}
+/* @} */
+
+/*****************************************************************//**
+This is the thread for background stats gathering. It pops tables, from
+the auto recalc list and proceeds them, eventually recalculating their
+statistics.
+dict_stats_thread() @{
+@return this function does not return, it calls os_thread_exit() */
+extern "C" UNIV_INTERN
+os_thread_ret_t
+DECLARE_THREAD(dict_stats_thread)(
+/*==============================*/
+ void* arg __attribute__((unused))) /*!< in: a dummy parameter
+ required by os_thread_create */
+{
+ ut_a(!srv_read_only_mode);
+
+ srv_dict_stats_thread_active = TRUE;
+
+ while (!SHUTTING_DOWN()) {
+
+ /* Wake up periodically even if not signaled. This is
+ because we may lose an event - if the below call to
+ dict_stats_process_entry_from_recalc_pool() puts the entry back
+ in the list, the os_event_set() will be lost by the subsequent
+ os_event_reset(). */
+ os_event_wait_time(
+ dict_stats_event, MIN_RECALC_INTERVAL * 1000000);
+
+ if (SHUTTING_DOWN()) {
+ break;
+ }
+
+ dict_stats_process_entry_from_recalc_pool();
+
+ os_event_reset(dict_stats_event);
+ }
+
+ srv_dict_stats_thread_active = FALSE;
+
+ /* We count the number of threads in os_thread_exit(). A created
+ thread should always use that to exit instead of return(). */
+ os_thread_exit(NULL);
+
+ OS_THREAD_DUMMY_RETURN;
+}
+/* @} */
+
+/* vim: set foldmethod=marker foldmarker=@{,@}: */
diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc
index 839199cfd8e..a89875352c6 100644
--- a/storage/innobase/fil/fil0fil.cc
+++ b/storage/innobase/fil/fil0fil.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -25,6 +25,9 @@ Created 10/25/1995 Heikki Tuuri
#include "fil0fil.h"
+#include <debug_sync.h>
+#include <my_dbug.h>
+
#include "mem0mem.h"
#include "hash0hash.h"
#include "os0file.h"
@@ -41,7 +44,7 @@ Created 10/25/1995 Heikki Tuuri
#include "page0page.h"
#include "page0zip.h"
#include "trx0sys.h"
-#include "buf0rea.h"
+#include "row0mysql.h"
#ifndef UNIV_HOTBACKUP
# include "buf0lru.h"
# include "ibuf0ibuf.h"
@@ -138,7 +141,7 @@ UNIV_INTERN mysql_pfs_key_t fil_space_latch_key;
#endif /* UNIV_PFS_RWLOCK */
/** File node of a tablespace or the log data space */
-struct fil_node_struct {
+struct fil_node_t {
fil_space_t* space; /*!< backpointer to the space where this node
belongs */
char* name; /*!< path to the file */
@@ -172,11 +175,11 @@ struct fil_node_struct {
ulint magic_n;/*!< FIL_NODE_MAGIC_N */
};
-/** Value of fil_node_struct::magic_n */
+/** Value of fil_node_t::magic_n */
#define FIL_NODE_MAGIC_N 89389
/** Tablespace or log data space: let us call them by a common name space */
-struct fil_space_struct {
+struct fil_space_t {
char* name; /*!< space name = the path to the first file in
it */
ulint id; /*!< space id */
@@ -215,7 +218,8 @@ struct fil_space_struct {
last incomplete megabytes in data files may be
ignored if space == 0 */
ulint flags; /*!< tablespace flags; see
- fsp_flags_validate(), fsp_flags_get_zip_size() */
+ fsp_flags_is_valid(),
+ fsp_flags_get_zip_size() */
ulint n_reserved_extents;
/*!< number of reserved free extents for
ongoing operations like B-tree page split */
@@ -238,26 +242,23 @@ struct fil_space_struct {
UT_LIST_NODE_T(fil_space_t) unflushed_spaces;
/*!< list of spaces with at least one unflushed
file we have written to */
- ibool is_in_unflushed_spaces; /*!< TRUE if this space is
- currently in unflushed_spaces */
+ bool is_in_unflushed_spaces;
+ /*!< true if this space is currently in
+ unflushed_spaces */
UT_LIST_NODE_T(fil_space_t) space_list;
/*!< list of all spaces */
ulint magic_n;/*!< FIL_SPACE_MAGIC_N */
};
-/** Value of fil_space_struct::magic_n */
+/** Value of fil_space_t::magic_n */
#define FIL_SPACE_MAGIC_N 89472
-/** The tablespace memory cache */
-typedef struct fil_system_struct fil_system_t;
-
/** The tablespace memory cache; also the totality of logs (the log
data space) is stored here; below we talk about tablespaces, but also
the ib_logfiles form a 'space' and it is handled here */
-
-struct fil_system_struct {
+struct fil_system_t {
#ifndef UNIV_HOTBACKUP
- mutex_t mutex; /*!< The mutex protecting the cache */
+ ib_mutex_t mutex; /*!< The mutex protecting the cache */
#endif /* !UNIV_HOTBACKUP */
hash_table_t* spaces; /*!< The hash table of spaces in the
system; they are hashed on the space
@@ -313,7 +314,17 @@ initialized. */
static fil_system_t* fil_system = NULL;
/** Determine if (i) is a user tablespace id or not. */
-# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces)
+# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces_open)
+
+/** Determine if user has explicitly disabled fsync(). */
+#ifndef __WIN__
+# define fil_buffering_disabled(s) \
+ ((s)->purpose == FIL_TABLESPACE \
+ && srv_unix_file_flush_method \
+ == SRV_UNIX_O_DIRECT_NO_FSYNC)
+#else /* __WIN__ */
+# define fil_buffering_disabled(s) (0)
+#endif /* __WIN__ */
#ifdef UNIV_DEBUG
/** Try fil_validate() every this many times */
@@ -384,16 +395,6 @@ fil_node_complete_io(
the node as modified if
type == OS_FILE_WRITE */
/*******************************************************************//**
-Checks if a single-table tablespace for a given table name exists in the
-tablespace memory cache.
-@return space id, ULINT_UNDEFINED if not found */
-static
-ulint
-fil_get_space_id_for_table(
-/*=======================*/
- const char* name); /*!< in: table name in the standard
- 'databasename/tablename' format */
-/*******************************************************************//**
Frees a space object from the tablespace memory cache. Closes the files in
the chain but does not delete them. There must not be any pending i/o's or
flushes on the files.
@@ -412,7 +413,7 @@ calculating the byte offset within a space.
@return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do
i/o on a tablespace which does not exist */
UNIV_INLINE
-ulint
+dberr_t
fil_read(
/*=====*/
ibool sync, /*!< in: TRUE if synchronous aio is desired */
@@ -441,7 +442,7 @@ calculating the byte offset within a space.
@return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do
i/o on a tablespace which does not exist */
UNIV_INLINE
-ulint
+dberr_t
fil_write(
/*======*/
ibool sync, /*!< in: TRUE if synchronous aio is desired */
@@ -459,6 +460,8 @@ fil_write(
void* message) /*!< in: message for aio handler if non-sync
aio used, else ignored */
{
+ ut_ad(!srv_read_only_mode);
+
return(fil_io(OS_FILE_WRITE, sync, space_id, zip_size, block_offset,
byte_offset, len, buf, message));
}
@@ -592,9 +595,9 @@ fil_space_get_type(
/**********************************************************************//**
Checks if all the file nodes in a space are flushed. The caller must hold
the fil_system mutex.
-@return TRUE if all are flushed */
+@return true if all are flushed */
static
-ibool
+bool
fil_space_is_flushed(
/*=================*/
fil_space_t* space) /*!< in: space */
@@ -608,19 +611,21 @@ fil_space_is_flushed(
while (node) {
if (node->modification_counter > node->flush_counter) {
- return(FALSE);
+ ut_ad(!fil_buffering_disabled(space));
+ return(false);
}
node = UT_LIST_GET_NEXT(chain, node);
}
- return(TRUE);
+ return(true);
}
/*******************************************************************//**
-Appends a new file to the chain of files of a space. File must be closed. */
+Appends a new file to the chain of files of a space. File must be closed.
+@return pointer to the file name, or NULL on error */
UNIV_INTERN
-void
+char*
fil_node_create(
/*============*/
const char* name, /*!< in: file name (file must be closed) */
@@ -663,7 +668,7 @@ fil_node_create(
mutex_exit(&fil_system->mutex);
- return;
+ return(NULL);
}
space->size += size;
@@ -678,6 +683,8 @@ fil_node_create(
}
mutex_exit(&fil_system->mutex);
+
+ return(node->name);
}
/********************************************************************//**
@@ -718,7 +725,7 @@ fil_node_open_file(
OS_FILE_READ_ONLY, &success);
if (!success) {
/* The following call prints an error message */
- os_file_get_last_error(TRUE);
+ os_file_get_last_error(true);
ut_print_timestamp(stderr);
@@ -798,9 +805,9 @@ fil_node_open_file(
!= page_size)) {
fprintf(stderr,
"InnoDB: Error: tablespace file %s"
- " has page size %lx\n"
+ " has page size 0x%lx\n"
"InnoDB: but the data dictionary"
- " expects page size %lx!\n",
+ " expects page size 0x%lx!\n",
node->name, flags,
fsp_flags_get_page_size(space->flags));
@@ -809,9 +816,9 @@ fil_node_open_file(
if (UNIV_UNLIKELY(space->flags != flags)) {
fprintf(stderr,
- "InnoDB: Error: table flags are %lx"
+ "InnoDB: Error: table flags are 0x%lx"
" in the data dictionary\n"
- "InnoDB: but the flags in file %s are %lx!\n",
+ "InnoDB: but the flags in file %s are 0x%lx!\n",
space->flags, node->name, flags);
ut_error;
@@ -971,6 +978,7 @@ fil_try_to_close_file_in_LRU(
", because mod_count %ld != fl_count %ld\n",
(long) node->modification_counter,
(long) node->flush_counter);
+
}
if (node->being_extended) {
@@ -1143,10 +1151,15 @@ fil_node_free(
node->modification_counter = node->flush_counter;
- if (space->is_in_unflushed_spaces
- && fil_space_is_flushed(space)) {
+ if (fil_buffering_disabled(space)) {
+
+ ut_ad(!space->is_in_unflushed_spaces);
+ ut_ad(fil_space_is_flushed(space));
- space->is_in_unflushed_spaces = FALSE;
+ } else if (space->is_in_unflushed_spaces
+ && fil_space_is_flushed(space)) {
+
+ space->is_in_unflushed_spaces = false;
UT_LIST_REMOVE(unflushed_spaces,
system->unflushed_spaces,
@@ -1215,82 +1228,50 @@ fil_space_create(
{
fil_space_t* space;
- fsp_flags_validate(flags);
-
-try_again:
- /*printf(
- "InnoDB: Adding tablespace %lu of name %s, purpose %lu\n", id, name,
- purpose);*/
+ DBUG_EXECUTE_IF("fil_space_create_failure", return(false););
ut_a(fil_system);
- ut_a(name);
+ ut_a(fsp_flags_is_valid(flags));
- mutex_enter(&fil_system->mutex);
+ /* Look for a matching tablespace and if found free it. */
+ do {
+ mutex_enter(&fil_system->mutex);
- space = fil_space_get_by_name(name);
+ space = fil_space_get_by_name(name);
- if (UNIV_LIKELY_NULL(space)) {
- ibool success;
- ulint namesake_id;
+ if (space != 0) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Tablespace '%s' exists in the cache "
+ "with id %lu", name, (ulong) id);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Warning: trying to init to the"
- " tablespace memory cache\n"
- "InnoDB: a tablespace %lu of name ", (ulong) id);
- ut_print_filename(stderr, name);
- fprintf(stderr, ",\n"
- "InnoDB: but a tablespace %lu of the same name\n"
- "InnoDB: already exists in the"
- " tablespace memory cache!\n",
- (ulong) space->id);
+ if (id == 0 || purpose != FIL_TABLESPACE) {
- if (id == 0 || purpose != FIL_TABLESPACE) {
+ mutex_exit(&fil_system->mutex);
- mutex_exit(&fil_system->mutex);
+ return(FALSE);
+ }
- return(FALSE);
- }
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Freeing existing tablespace '%s' entry "
+ "from the cache with id %lu",
+ name, (ulong) id);
- fprintf(stderr,
- "InnoDB: We assume that InnoDB did a crash recovery,"
- " and you had\n"
- "InnoDB: an .ibd file for which the table"
- " did not exist in the\n"
- "InnoDB: InnoDB internal data dictionary in the"
- " ibdata files.\n"
- "InnoDB: We assume that you later removed the"
- " .ibd and .frm files,\n"
- "InnoDB: and are now trying to recreate the table."
- " We now remove the\n"
- "InnoDB: conflicting tablespace object"
- " from the memory cache and try\n"
- "InnoDB: the init again.\n");
-
- namesake_id = space->id;
-
- success = fil_space_free(namesake_id, FALSE);
- ut_a(success);
+ ibool success = fil_space_free(space->id, FALSE);
+ ut_a(success);
- mutex_exit(&fil_system->mutex);
+ mutex_exit(&fil_system->mutex);
+ }
- goto try_again;
- }
+ } while (space != 0);
space = fil_space_get_by_id(id);
- if (UNIV_LIKELY_NULL(space)) {
- fprintf(stderr,
- "InnoDB: Error: trying to add tablespace %lu"
- " of name ", (ulong) id);
- ut_print_filename(stderr, name);
- fprintf(stderr, "\n"
- "InnoDB: to the tablespace memory cache,"
- " but tablespace\n"
- "InnoDB: %lu of name ", (ulong) space->id);
- ut_print_filename(stderr, space->name);
- fputs(" already exists in the tablespace\n"
- "InnoDB: memory cache!\n", stderr);
+ if (space != 0) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Trying to add tablespace '%s' with id %lu "
+ "to the tablespace memory cache, but tablespace '%s' "
+ "with id %lu already exists in the cache!",
+ name, (ulong) id, space->name, (ulong) space->id);
mutex_exit(&fil_system->mutex);
@@ -1306,15 +1287,15 @@ try_again:
space->tablespace_version = fil_system->tablespace_version;
space->mark = FALSE;
- if (UNIV_LIKELY(purpose == FIL_TABLESPACE && !recv_recovery_on)
- && UNIV_UNLIKELY(id > fil_system->max_assigned_id)) {
+ if (purpose == FIL_TABLESPACE && !recv_recovery_on
+ && id > fil_system->max_assigned_id) {
+
if (!fil_system->space_id_reuse_warned) {
fil_system->space_id_reuse_warned = TRUE;
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Warning: allocated tablespace %lu,"
- " old maximum was %lu\n",
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Allocated tablespace %lu, old maximum "
+ "was %lu",
(ulong) id,
(ulong) fil_system->max_assigned_id);
}
@@ -1333,7 +1314,7 @@ try_again:
HASH_INSERT(fil_space_t, name_hash, fil_system->name_hash,
ut_fold_string(name), space);
- space->is_in_unflushed_spaces = FALSE;
+ space->is_in_unflushed_spaces = false;
UT_LIST_ADD_LAST(space_list, fil_system->space_list, space);
@@ -1418,7 +1399,6 @@ fil_space_free(
{
fil_space_t* space;
fil_space_t* fnamespace;
- fil_node_t* fil_node;
ut_ad(mutex_own(&fil_system->mutex));
@@ -1444,7 +1424,9 @@ fil_space_free(
ut_fold_string(space->name), space);
if (space->is_in_unflushed_spaces) {
- space->is_in_unflushed_spaces = FALSE;
+
+ ut_ad(!fil_buffering_disabled(space));
+ space->is_in_unflushed_spaces = false;
UT_LIST_REMOVE(unflushed_spaces, fil_system->unflushed_spaces,
space);
@@ -1455,12 +1437,11 @@ fil_space_free(
ut_a(space->magic_n == FIL_SPACE_MAGIC_N);
ut_a(0 == space->n_pending_flushes);
- fil_node = UT_LIST_GET_FIRST(space->chain);
+ for (fil_node_t* fil_node = UT_LIST_GET_FIRST(space->chain);
+ fil_node != NULL;
+ fil_node = UT_LIST_GET_FIRST(space->chain)) {
- while (fil_node != NULL) {
fil_node_free(fil_node, fil_system, space);
-
- fil_node = UT_LIST_GET_FIRST(space->chain);
}
ut_a(0 == UT_LIST_GET_LEN(space->chain));
@@ -1478,53 +1459,32 @@ fil_space_free(
}
/*******************************************************************//**
-Returns the size of the space in pages. The tablespace must be cached in the
-memory cache.
-@return space size, 0 if space not found */
-UNIV_INTERN
-ulint
-fil_space_get_size(
-/*===============*/
+Returns a pointer to the file_space_t that is in the memory cache
+associated with a space id. The caller must lock fil_system->mutex.
+@return file_space_t pointer, NULL if space not found */
+UNIV_INLINE
+fil_space_t*
+fil_space_get_space(
+/*================*/
ulint id) /*!< in: space id */
{
- fil_node_t* node;
fil_space_t* space;
- ulint size;
+ fil_node_t* node;
ut_ad(fil_system);
- mutex_enter(&fil_system->mutex);
-
space = fil_space_get_by_id(id);
-
if (space == NULL) {
- mutex_exit(&fil_system->mutex);
-
- return(0);
+ return(NULL);
}
if (space->size == 0 && space->purpose == FIL_TABLESPACE) {
ut_a(id != 0);
+ /* The following code must change when InnoDB supports
+ multiple datafiles per tablespace. */
ut_a(1 == UT_LIST_GET_LEN(space->chain));
- mutex_exit(&fil_system->mutex);
-
- /* It is possible that the space gets evicted at this point
- before the fil_mutex_enter_and_prepare_for_io() acquires
- the fil_system->mutex. Check for this after completing the
- call to fil_mutex_enter_and_prepare_for_io(). */
- fil_mutex_enter_and_prepare_for_io(id);
-
- /* We are still holding the fil_system->mutex. Check if
- the space is still in memory cache. */
- space = fil_space_get_by_id(id);
-
- if (space == NULL) {
- mutex_exit(&fil_system->mutex);
- return(0);
- }
-
node = UT_LIST_GET_FIRST(space->chain);
/* It must be a single-table tablespace and we have not opened
@@ -1535,7 +1495,69 @@ fil_space_get_size(
fil_node_complete_io(node, fil_system, OS_FILE_READ);
}
- size = space->size;
+ return(space);
+}
+
+/*******************************************************************//**
+Returns the path from the first fil_node_t found for the space ID sent.
+The caller is responsible for freeing the memory allocated here for the
+value returned.
+@return own: A copy of fil_node_t::path, NULL if space ID is zero
+or not found. */
+UNIV_INTERN
+char*
+fil_space_get_first_path(
+/*=====================*/
+ ulint id) /*!< in: space id */
+{
+ fil_space_t* space;
+ fil_node_t* node;
+ char* path;
+
+ ut_ad(fil_system);
+ ut_a(id);
+
+ fil_mutex_enter_and_prepare_for_io(id);
+
+ space = fil_space_get_space(id);
+
+ if (space == NULL) {
+ mutex_exit(&fil_system->mutex);
+
+ return(NULL);
+ }
+
+ ut_ad(mutex_own(&fil_system->mutex));
+
+ node = UT_LIST_GET_FIRST(space->chain);
+
+ path = mem_strdup(node->name);
+
+ mutex_exit(&fil_system->mutex);
+
+ return(path);
+}
+
+/*******************************************************************//**
+Returns the size of the space in pages. The tablespace must be cached in the
+memory cache.
+@return space size, 0 if space not found */
+UNIV_INTERN
+ulint
+fil_space_get_size(
+/*===============*/
+ ulint id) /*!< in: space id */
+{
+ fil_space_t* space;
+ ulint size;
+
+ ut_ad(fil_system);
+
+ fil_mutex_enter_and_prepare_for_io(id);
+
+ space = fil_space_get_space(id);
+
+ size = space ? space->size : 0;
mutex_exit(&fil_system->mutex);
@@ -1552,19 +1574,18 @@ fil_space_get_flags(
/*================*/
ulint id) /*!< in: space id */
{
- fil_node_t* node;
fil_space_t* space;
ulint flags;
ut_ad(fil_system);
- if (UNIV_UNLIKELY(!id)) {
+ if (!id) {
return(0);
}
- mutex_enter(&fil_system->mutex);
+ fil_mutex_enter_and_prepare_for_io(id);
- space = fil_space_get_by_id(id);
+ space = fil_space_get_space(id);
if (space == NULL) {
mutex_exit(&fil_system->mutex);
@@ -1572,38 +1593,6 @@ fil_space_get_flags(
return(ULINT_UNDEFINED);
}
- if (space->size == 0 && space->purpose == FIL_TABLESPACE) {
- ut_a(id != 0);
-
- ut_a(1 == UT_LIST_GET_LEN(space->chain));
-
- mutex_exit(&fil_system->mutex);
-
- /* It is possible that the space gets evicted at this point
- before the fil_mutex_enter_and_prepare_for_io() acquires
- the fil_system->mutex. Check for this after completing the
- call to fil_mutex_enter_and_prepare_for_io(). */
- fil_mutex_enter_and_prepare_for_io(id);
-
- /* We are still holding the fil_system->mutex. Check if
- the space is still in memory cache. */
- space = fil_space_get_by_id(id);
-
- if (space == NULL) {
- mutex_exit(&fil_system->mutex);
- return(0);
- }
-
- node = UT_LIST_GET_FIRST(space->chain);
-
- /* It must be a single-table tablespace and we have not opened
- the file yet; the following calls will open it and update the
- size fields */
-
- fil_node_prepare_for_io(node, fil_system, space);
- fil_node_complete_io(node, fil_system, OS_FILE_READ);
- }
-
flags = space->flags;
mutex_exit(&fil_system->mutex);
@@ -1778,6 +1767,49 @@ fil_close_all_files(void)
}
/*******************************************************************//**
+Closes the redo log files. There must not be any pending i/o's or not
+flushed modifications in the files. */
+UNIV_INTERN
+void
+fil_close_log_files(
+/*================*/
+ bool free) /*!< in: whether to free the memory object */
+{
+ fil_space_t* space;
+
+ mutex_enter(&fil_system->mutex);
+
+ space = UT_LIST_GET_FIRST(fil_system->space_list);
+
+ while (space != NULL) {
+ fil_node_t* node;
+ fil_space_t* prev_space = space;
+
+ if (space->purpose != FIL_LOG) {
+ space = UT_LIST_GET_NEXT(space_list, space);
+ continue;
+ }
+
+ for (node = UT_LIST_GET_FIRST(space->chain);
+ node != NULL;
+ node = UT_LIST_GET_NEXT(chain, node)) {
+
+ if (node->open) {
+ fil_node_close_file(node, fil_system);
+ }
+ }
+
+ space = UT_LIST_GET_NEXT(space_list, space);
+
+ if (free) {
+ fil_space_free(prev_space->id, FALSE);
+ }
+ }
+
+ mutex_exit(&fil_system->mutex);
+}
+
+/*******************************************************************//**
Sets the max tablespace id counter if the given number is bigger than the
previous value. */
UNIV_INTERN
@@ -1807,8 +1839,8 @@ fil_set_max_space_id_if_bigger(
Writes the flushed lsn and the latest archived log number to the page header
of the first page of a data file of the system tablespace (space 0),
which is uncompressed. */
-static
-ulint
+static __attribute__((warn_unused_result))
+dberr_t
fil_write_lsn_and_arch_no_to_file(
/*==============================*/
ulint space, /*!< in: space to write to */
@@ -1820,19 +1852,23 @@ fil_write_lsn_and_arch_no_to_file(
{
byte* buf1;
byte* buf;
+ dberr_t err;
buf1 = static_cast<byte*>(mem_alloc(2 * UNIV_PAGE_SIZE));
buf = static_cast<byte*>(ut_align(buf1, UNIV_PAGE_SIZE));
- fil_read(TRUE, space, 0, sum_of_sizes, 0, UNIV_PAGE_SIZE, buf, NULL);
+ err = fil_read(TRUE, space, 0, sum_of_sizes, 0,
+ UNIV_PAGE_SIZE, buf, NULL);
+ if (err == DB_SUCCESS) {
+ mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN, lsn);
- mach_write_to_8(buf + FIL_PAGE_FILE_FLUSH_LSN, lsn);
-
- fil_write(TRUE, space, 0, sum_of_sizes, 0, UNIV_PAGE_SIZE, buf, NULL);
+ err = fil_write(TRUE, space, 0, sum_of_sizes, 0,
+ UNIV_PAGE_SIZE, buf, NULL);
+ }
mem_free(buf1);
- return(DB_SUCCESS);
+ return(err);
}
/****************************************************************//**
@@ -1840,7 +1876,7 @@ Writes the flushed lsn and the latest archived log number to the page
header of the first page of each data file in the system tablespace.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
fil_write_flushed_lsn_to_data_files(
/*================================*/
lsn_t lsn, /*!< in: lsn to write */
@@ -1848,7 +1884,7 @@ fil_write_flushed_lsn_to_data_files(
{
fil_space_t* space;
fil_node_t* node;
- ulint err;
+ dberr_t err;
mutex_enter(&fil_system->mutex);
@@ -1864,7 +1900,6 @@ fil_write_flushed_lsn_to_data_files(
if (space->purpose == FIL_TABLESPACE
&& !fil_is_user_tablespace_id(space->id)) {
-
ulint sum_of_sizes = 0;
for (node = UT_LIST_GET_FIRST(space->chain);
@@ -1906,6 +1941,7 @@ fil_read_first_page(
parameters below already
contain sensible data */
ulint* flags, /*!< out: tablespace flags */
+ ulint* space_id, /*!< out: tablespace ID */
#ifdef UNIV_LOG_ARCHIVE
ulint* min_arch_log_no, /*!< out: min of archived
log numbers in data files */
@@ -1931,7 +1967,9 @@ fil_read_first_page(
*flags = fsp_header_get_flags(page);
- flushed_lsn = mach_read_from_8(page+ FIL_PAGE_FILE_FLUSH_LSN);
+ *space_id = fsp_header_get_space_id(page);
+
+ flushed_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN);
ut_free(buf);
@@ -2136,6 +2174,12 @@ created does not exist, then we create the directory, too.
Note that ibbackup --apply-log sets fil_path_to_mysql_datadir to point to the
datadir that we should use in replaying the file operations.
+
+InnoDB recovery does not replay these fully since it always sets the space id
+to zero. But ibbackup does replay them. TODO: If remote tablespaces are used,
+ibbackup will only create tables in the default directory since MLOG_FILE_CREATE
+and MLOG_FILE_CREATE2 only know the tablename, not the path.
+
@return end of log record, or NULL if the record was not completely
contained between ptr and end_ptr */
UNIV_INTERN
@@ -2231,7 +2275,9 @@ fil_op_log_parse_or_replay(
switch (type) {
case MLOG_FILE_DELETE:
if (fil_tablespace_exists_in_mem(space_id)) {
- ut_a(fil_delete_tablespace(space_id));
+ dberr_t err = fil_delete_tablespace(
+ space_id, BUF_REMOVE_FLUSH_NO_WRITE);
+ ut_a(err == DB_SUCCESS);
}
break;
@@ -2252,10 +2298,10 @@ fil_op_log_parse_or_replay(
if (fil_get_space_id_for_table(new_name)
== ULINT_UNDEFINED) {
- /* We do not care of the old name, that is
- why we pass NULL as the first argument */
+ /* We do not care about the old name, that
+ is why we pass NULL as the first argument. */
if (!fil_rename_tablespace(NULL, space_id,
- new_name)) {
+ new_name, NULL)) {
ut_error;
}
}
@@ -2273,12 +2319,14 @@ fil_op_log_parse_or_replay(
} else if (log_flags & MLOG_FILE_FLAG_TEMP) {
/* Temporary table, do nothing */
} else {
+ const char* path = NULL;
+
/* Create the database directory for name, if it does
not exist yet */
fil_create_directory_for_tablename(name);
if (fil_create_new_single_table_tablespace(
- space_id, name, FALSE, flags,
+ space_id, name, path, flags,
DICT_TF2_USE_TABLESPACE,
FIL_IBD_FILE_INITIAL_SIZE) != DB_SUCCESS) {
ut_error;
@@ -2295,118 +2343,271 @@ fil_op_log_parse_or_replay(
}
/*******************************************************************//**
-Deletes a single-table tablespace. The tablespace must be cached in the
-memory cache.
-@return TRUE if success */
-UNIV_INTERN
-ibool
-fil_delete_tablespace(
-/*==================*/
- ulint id) /*!< in: space id */
+Allocates a file name for the EXPORT/IMPORT config file name. The
+string must be freed by caller with mem_free().
+@return own: file name */
+static
+char*
+fil_make_cfg_name(
+/*==============*/
+ const char* filepath) /*!< in: .ibd file name */
{
- ibool success;
- fil_space_t* space;
- fil_node_t* node;
- ulint count = 0;
- char* path;
+ char* cfg_name;
- ut_a(id != 0);
-stop_new_ops:
- mutex_enter(&fil_system->mutex);
+ /* Create a temporary file path by replacing the .ibd suffix
+ with .cfg. */
- space = fil_space_get_by_id(id);
+ ut_ad(strlen(filepath) > 4);
- if (space != NULL) {
- space->stop_new_ops = TRUE;
+ cfg_name = mem_strdup(filepath);
+ ut_snprintf(cfg_name + strlen(cfg_name) - 3, 4, "cfg");
+ return(cfg_name);
+}
- if (space->n_pending_ops == 0) {
- mutex_exit(&fil_system->mutex);
+/*******************************************************************//**
+Check for change buffer merges.
+@return 0 if no merges else count + 1. */
+static
+ulint
+fil_ibuf_check_pending_ops(
+/*=======================*/
+ fil_space_t* space, /*!< in/out: Tablespace to check */
+ ulint count) /*!< in: number of attempts so far */
+{
+ ut_ad(mutex_own(&fil_system->mutex));
- count = 0;
+ if (space != 0 && space->n_pending_ops != 0) {
- goto try_again;
- } else {
- if (count > 5000) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Warning: trying to"
- " delete tablespace ", stderr);
- ut_print_filename(stderr, space->name);
- fprintf(stderr, ",\n"
- "InnoDB: but there are %lu pending"
- " operations (most likely ibuf merges)"
- " on it.\n"
- "InnoDB: Loop %lu.\n",
- (ulong) space->n_pending_ops,
- (ulong) count);
- }
+ if (count > 5000) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Trying to close/delete tablespace "
+ "'%s' but there are %lu pending change "
+ "buffer merges on it.",
+ space->name,
+ (ulong) space->n_pending_ops);
+ }
- mutex_exit(&fil_system->mutex);
+ return(count + 1);
+ }
- os_thread_sleep(20000);
- count++;
+ return(0);
+}
+
+/*******************************************************************//**
+Check for pending IO.
+@return 0 if no pending else count + 1. */
+static
+ulint
+fil_check_pending_io(
+/*=================*/
+ fil_space_t* space, /*!< in/out: Tablespace to check */
+ fil_node_t** node, /*!< out: Node in space list */
+ ulint count) /*!< in: number of attempts so far */
+{
+ ut_ad(mutex_own(&fil_system->mutex));
+ ut_a(space->n_pending_ops == 0);
- goto stop_new_ops;
+ /* The following code must change when InnoDB supports
+ multiple datafiles per tablespace. */
+ ut_a(UT_LIST_GET_LEN(space->chain) == 1);
+
+ *node = UT_LIST_GET_FIRST(space->chain);
+
+ if (space->n_pending_flushes > 0 || (*node)->n_pending > 0) {
+
+ ut_a(!(*node)->being_extended);
+
+ if (count > 1000) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Trying to close/delete tablespace '%s' "
+ "but there are %lu flushes "
+ " and %lu pending i/o's on it.",
+ space->name,
+ (ulong) space->n_pending_flushes,
+ (ulong) (*node)->n_pending);
}
+
+ return(count + 1);
}
- mutex_exit(&fil_system->mutex);
- count = 0;
+ return(0);
+}
+
+/*******************************************************************//**
+Check pending operations on a tablespace.
+@return DB_SUCCESS or error failure. */
+static
+dberr_t
+fil_check_pending_operations(
+/*=========================*/
+ ulint id, /*!< in: space id */
+ fil_space_t** space, /*!< out: tablespace instance in memory */
+ char** path) /*!< out/own: tablespace path */
+{
+ ulint count = 0;
+
+ ut_a(id != TRX_SYS_SPACE);
+ ut_ad(space);
+
+ *space = 0;
-try_again:
mutex_enter(&fil_system->mutex);
+ fil_space_t* sp = fil_space_get_by_id(id);
+ if (sp) {
+ sp->stop_new_ops = TRUE;
+ }
+ mutex_exit(&fil_system->mutex);
- space = fil_space_get_by_id(id);
+ /* Check for pending change buffer merges. */
- if (space == NULL) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: cannot delete tablespace %lu\n"
- "InnoDB: because it is not found in the"
- " tablespace memory cache.\n",
- (ulong) id);
+ do {
+ mutex_enter(&fil_system->mutex);
+
+ sp = fil_space_get_by_id(id);
+
+ count = fil_ibuf_check_pending_ops(sp, count);
mutex_exit(&fil_system->mutex);
- return(FALSE);
- }
+ if (count > 0) {
+ os_thread_sleep(20000);
+ }
- ut_a(space->stop_new_ops);
- ut_a(space->n_pending_ops == 0);
+ } while (count > 0);
- /* TODO: The following code must change when InnoDB supports
- multiple datafiles per tablespace. */
- ut_a(UT_LIST_GET_LEN(space->chain) == 1);
+ /* Check for pending IO. */
- node = UT_LIST_GET_FIRST(space->chain);
+ *path = 0;
- if (space->n_pending_flushes > 0 || node->n_pending > 0
- || node->being_extended) {
- if (count > 1000) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Warning: trying to"
- " delete tablespace ", stderr);
- ut_print_filename(stderr, space->name);
- fprintf(stderr, ",\n"
- "InnoDB: but there are %lu flushes"
- " and %lu pending i/o's on it\n"
- "InnoDB: Or it is being extended\n"
- "InnoDB: Loop %lu.\n",
- (ulong) space->n_pending_flushes,
- (ulong) node->n_pending,
- (ulong) count);
+ do {
+ mutex_enter(&fil_system->mutex);
+
+ sp = fil_space_get_by_id(id);
+
+ if (sp == NULL) {
+ mutex_exit(&fil_system->mutex);
+ return(DB_TABLESPACE_NOT_FOUND);
}
+
+ fil_node_t* node;
+
+ count = fil_check_pending_io(sp, &node, count);
+
+ if (count == 0) {
+ *path = mem_strdup(node->name);
+ }
+
mutex_exit(&fil_system->mutex);
- os_thread_sleep(20000);
- count++;
+ if (count > 0) {
+ os_thread_sleep(20000);
+ }
+
+ } while (count > 0);
+
+ ut_ad(sp);
+
+ *space = sp;
+ return(DB_SUCCESS);
+}
+
+/*******************************************************************//**
+Closes a single-table tablespace. The tablespace must be cached in the
+memory cache. Free all pages used by the tablespace.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+dberr_t
+fil_close_tablespace(
+/*=================*/
+ trx_t* trx, /*!< in/out: Transaction covering the close */
+ ulint id) /*!< in: space id */
+{
+ char* path = 0;
+ fil_space_t* space = 0;
+
+ ut_a(id != TRX_SYS_SPACE);
- goto try_again;
+ dberr_t err = fil_check_pending_operations(id, &space, &path);
+
+ if (err != DB_SUCCESS) {
+ return(err);
}
- path = mem_strdup(node->name);
+ ut_a(space);
+ ut_a(path != 0);
+
+ rw_lock_x_lock(&space->latch);
+
+#ifndef UNIV_HOTBACKUP
+ /* Invalidate in the buffer pool all pages belonging to the
+ tablespace. Since we have set space->stop_new_ops = TRUE, readahead
+ or ibuf merge can no longer read more pages of this tablespace to the
+ buffer pool. Thus we can clean the tablespace out of the buffer pool
+ completely and permanently. The flag stop_new_ops also prevents
+ fil_flush() from being applied to this tablespace. */
+
+ buf_LRU_flush_or_remove_pages(id, BUF_REMOVE_FLUSH_WRITE, trx);
+#endif
+ mutex_enter(&fil_system->mutex);
+
+ /* If the free is successful, the X lock will be released before
+ the space memory data structure is freed. */
+
+ if (!fil_space_free(id, TRUE)) {
+ rw_lock_x_unlock(&space->latch);
+ err = DB_TABLESPACE_NOT_FOUND;
+ } else {
+ err = DB_SUCCESS;
+ }
mutex_exit(&fil_system->mutex);
+ /* If it is a delete then also delete any generated files, otherwise
+ when we drop the database the remove directory will fail. */
+
+ char* cfg_name = fil_make_cfg_name(path);
+
+ os_file_delete_if_exists(cfg_name);
+
+ mem_free(path);
+ mem_free(cfg_name);
+
+ return(err);
+}
+
+/*******************************************************************//**
+Deletes a single-table tablespace. The tablespace must be cached in the
+memory cache.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+dberr_t
+fil_delete_tablespace(
+/*==================*/
+ ulint id, /*!< in: space id */
+ buf_remove_t buf_remove) /*!< in: specify the action to take
+ on the tables pages in the buffer
+ pool */
+{
+ char* path = 0;
+ fil_space_t* space = 0;
+
+ ut_a(id != TRX_SYS_SPACE);
+
+ dberr_t err = fil_check_pending_operations(id, &space, &path);
+
+ if (err != DB_SUCCESS) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot delete tablespace %lu because it is not "
+ "found in the tablespace memory cache.",
+ (ulong) id);
+
+ return(err);
+ }
+
+ ut_a(space);
+ ut_a(path != 0);
+
/* Important: We rely on the data dictionary mutex to ensure
that a race is not possible here. It should serialize the tablespace
drop/free. We acquire an X latch only to avoid a race condition
@@ -2441,9 +2642,22 @@ try_again:
To deal with potential read requests by checking the
::stop_new_ops flag in fil_io() */
- buf_LRU_invalidate_tablespace(id);
-#endif
- /* printf("Deleting tablespace %s id %lu\n", space->name, id); */
+ buf_LRU_flush_or_remove_pages(id, buf_remove, 0);
+
+#endif /* !UNIV_HOTBACKUP */
+
+ /* If it is a delete then also delete any generated files, otherwise
+ when we drop the database the remove directory will fail. */
+ {
+ char* cfg_name = fil_make_cfg_name(path);
+ os_file_delete_if_exists(cfg_name);
+ mem_free(cfg_name);
+ }
+
+ /* Delete the link file pointing to the ibd file we are deleting. */
+ if (FSP_FLAGS_HAS_DATA_DIR(space->flags)) {
+ fil_delete_link_file(space->name);
+ }
mutex_enter(&fil_system->mutex);
@@ -2452,25 +2666,27 @@ try_again:
if (fil_space_get_by_id(id)) {
ut_a(space->n_pending_ops == 0);
ut_a(UT_LIST_GET_LEN(space->chain) == 1);
- node = UT_LIST_GET_FIRST(space->chain);
+ fil_node_t* node = UT_LIST_GET_FIRST(space->chain);
ut_a(node->n_pending == 0);
}
- success = fil_space_free(id, TRUE);
+ if (!fil_space_free(id, TRUE)) {
+ err = DB_TABLESPACE_NOT_FOUND;
+ }
mutex_exit(&fil_system->mutex);
- if (success) {
- success = os_file_delete(path);
-
- if (!success) {
- success = os_file_delete_if_exists(path);
- }
- } else {
+ if (err != DB_SUCCESS) {
rw_lock_x_unlock(&space->latch);
+ } else if (!os_file_delete(path) && !os_file_delete_if_exists(path)) {
+
+ /* Note: This is because we have removed the
+ tablespace instance from the cache. */
+
+ err = DB_IO_ERROR;
}
- if (success) {
+ if (err == DB_SUCCESS) {
#ifndef UNIV_HOTBACKUP
/* Write a log record about the deletion of the .ibd
file, so that ibbackup can replay it in the
@@ -2485,14 +2701,12 @@ try_again:
fil_op_write_log(MLOG_FILE_DELETE, id, 0, 0, path, NULL, &mtr);
mtr_commit(&mtr);
#endif
- mem_free(path);
-
- return(TRUE);
+ err = DB_SUCCESS;
}
mem_free(path);
- return(FALSE);
+ return(err);
}
/*******************************************************************//**
@@ -2524,36 +2738,49 @@ fil_tablespace_is_being_deleted(
/*******************************************************************//**
Discards a single-table tablespace. The tablespace must be cached in the
memory cache. Discarding is like deleting a tablespace, but
-1) we do not drop the table from the data dictionary;
-2) we remove all insert buffer entries for the tablespace immediately; in DROP
-TABLE they are only removed gradually in the background;
-3) when the user does IMPORT TABLESPACE, the tablespace will have the same id
-as it originally had.
-@return TRUE if success */
+
+ 1. We do not drop the table from the data dictionary;
+
+ 2. We remove all insert buffer entries for the tablespace immediately;
+ in DROP TABLE they are only removed gradually in the background;
+
+ 3. Free all the pages in use by the tablespace.
+@return DB_SUCCESS or error */
UNIV_INTERN
-ibool
+dberr_t
fil_discard_tablespace(
/*===================*/
ulint id) /*!< in: space id */
{
- ibool success;
+ dberr_t err;
- success = fil_delete_tablespace(id);
+ switch (err = fil_delete_tablespace(id, BUF_REMOVE_ALL_NO_WRITE)) {
+ case DB_SUCCESS:
+ break;
- if (!success) {
- fprintf(stderr,
- "InnoDB: Warning: cannot delete tablespace %lu"
- " in DISCARD TABLESPACE.\n"
- "InnoDB: But let us remove the"
- " insert buffer entries for this tablespace.\n",
- (ulong) id);
+ case DB_IO_ERROR:
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "While deleting tablespace %lu in DISCARD TABLESPACE."
+ " File rename/delete failed: %s",
+ (ulong) id, ut_strerr(err));
+ break;
+
+ case DB_TABLESPACE_NOT_FOUND:
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Cannot delete tablespace %lu in DISCARD "
+ "TABLESPACE. %s",
+ (ulong) id, ut_strerr(err));
+ break;
+
+ default:
+ ut_error;
}
/* Remove all insert buffer entries for the tablespace */
ibuf_delete_for_discarded_space(id);
- return(success);
+ return(err);
}
#endif /* !UNIV_HOTBACKUP */
@@ -2609,30 +2836,27 @@ fil_rename_tablespace_in_mem(
Allocates a file name for a single-table tablespace. The string must be freed
by caller with mem_free().
@return own: file name */
-static
+UNIV_INTERN
char*
fil_make_ibd_name(
/*==============*/
- const char* name, /*!< in: table name or a dir path of a
- TEMPORARY table */
- ibool is_temp) /*!< in: TRUE if it is a dir path */
+ const char* name, /*!< in: table name or a dir path */
+ bool is_full_path) /*!< in: TRUE if it is a dir path */
{
char* filename;
ulint namelen = strlen(name);
ulint dirlen = strlen(fil_path_to_mysql_datadir);
+ ulint pathlen = dirlen + namelen + sizeof "/.ibd";
- filename = static_cast<char*>(
- mem_alloc(namelen + dirlen + sizeof "/.ibd"));
+ filename = static_cast<char*>(mem_alloc(pathlen));
- if (is_temp) {
+ if (is_full_path) {
memcpy(filename, name, namelen);
memcpy(filename + namelen, ".ibd", sizeof ".ibd");
} else {
- memcpy(filename, fil_path_to_mysql_datadir, dirlen);
- filename[dirlen] = '/';
+ ut_snprintf(filename, pathlen, "%s/%s.ibd",
+ fil_path_to_mysql_datadir, name);
- memcpy(filename + dirlen + 1, name, namelen);
- memcpy(filename + dirlen + namelen + 1, ".ibd", sizeof ".ibd");
}
srv_normalize_path_for_win(filename);
@@ -2641,6 +2865,31 @@ fil_make_ibd_name(
}
/*******************************************************************//**
+Allocates a file name for a tablespace ISL file (InnoDB Symbolic Link).
+The string must be freed by caller with mem_free().
+@return own: file name */
+UNIV_INTERN
+char*
+fil_make_isl_name(
+/*==============*/
+ const char* name) /*!< in: table name */
+{
+ char* filename;
+ ulint namelen = strlen(name);
+ ulint dirlen = strlen(fil_path_to_mysql_datadir);
+ ulint pathlen = dirlen + namelen + sizeof "/.isl";
+
+ filename = static_cast<char*>(mem_alloc(pathlen));
+
+ ut_snprintf(filename, pathlen, "%s/%s.isl",
+ fil_path_to_mysql_datadir, name);
+
+ srv_normalize_path_for_win(filename);
+
+ return(filename);
+}
+
+/*******************************************************************//**
Renames a single-table tablespace. The tablespace must be cached in the
tablespace memory cache.
@return TRUE if success */
@@ -2648,14 +2897,19 @@ UNIV_INTERN
ibool
fil_rename_tablespace(
/*==================*/
- const char* old_name_in, /*!< in: old table name in the standard
- databasename/tablename format of
- InnoDB, or NULL if we do the rename
- based on the space id only */
+ const char* old_name_in, /*!< in: old table name in the
+ standard databasename/tablename
+ format of InnoDB, or NULL if we
+ do the rename based on the space
+ id only */
ulint id, /*!< in: space id */
- const char* new_name) /*!< in: new table name in the standard
- databasename/tablename format
- of InnoDB */
+ const char* new_name, /*!< in: new table name in the
+ standard databasename/tablename
+ format of InnoDB */
+ const char* new_path_in) /*!< in: new full datafile path
+ if the tablespace is remotely
+ located, or NULL if it is located
+ in the normal data directory. */
{
ibool success;
fil_space_t* space;
@@ -2685,14 +2939,14 @@ retry:
space = fil_space_get_by_id(id);
+ DBUG_EXECUTE_IF("fil_rename_tablespace_failure_1", space = NULL; );
+
if (space == NULL) {
- fprintf(stderr,
- "InnoDB: Error: cannot find space id %lu"
- " in the tablespace memory cache\n"
- "InnoDB: though the table ", (ulong) id);
- ut_print_filename(stderr,
- old_name_in ? old_name_in : not_given);
- fputs(" in a rename operation should have that id\n", stderr);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot find space id %lu in the tablespace "
+ "memory cache, though the table '%s' in a "
+ "rename operation should have that id.",
+ (ulong) id, old_name_in ? old_name_in : not_given);
mutex_exit(&fil_system->mutex);
return(FALSE);
@@ -2711,10 +2965,13 @@ retry:
space->stop_ios = TRUE;
+ /* The following code must change when InnoDB supports
+ multiple datafiles per tablespace. */
ut_a(UT_LIST_GET_LEN(space->chain) == 1);
node = UT_LIST_GET_FIRST(space->chain);
- if (node->n_pending > 0 || node->n_pending_flushes > 0
+ if (node->n_pending > 0
+ || node->n_pending_flushes > 0
|| node->being_extended) {
/* There are pending i/o's or flushes or the file is
currently being extended, sleep for a while and
@@ -2747,24 +3004,31 @@ retry:
if (old_name_in) {
old_name = mem_strdup(old_name_in);
- old_path = fil_make_ibd_name(old_name, FALSE);
-
ut_a(strcmp(space->name, old_name) == 0);
- ut_a(strcmp(node->name, old_path) == 0);
} else {
old_name = mem_strdup(space->name);
- old_path = mem_strdup(node->name);
}
+ old_path = mem_strdup(node->name);
/* Rename the tablespace and the node in the memory cache */
- new_path = fil_make_ibd_name(new_name, FALSE);
+ new_path = new_path_in ? mem_strdup(new_path_in)
+ : fil_make_ibd_name(new_name, false);
+
success = fil_rename_tablespace_in_mem(
space, node, new_name, new_path);
if (success) {
+
+ DBUG_EXECUTE_IF("fil_rename_tablespace_failure_2",
+ goto skip_second_rename; );
+
success = os_file_rename(
innodb_file_data_key, old_path, new_path);
+ DBUG_EXECUTE_IF("fil_rename_tablespace_failure_2",
+skip_second_rename:
+ success = FALSE; );
+
if (!success) {
/* We have to revert the changes we made
to the tablespace memory cache */
@@ -2788,7 +3052,7 @@ retry:
&mtr);
mtr_commit(&mtr);
}
-#endif
+#endif /* !UNIV_HOTBACKUP */
mem_free(new_path);
mem_free(old_path);
@@ -2798,23 +3062,202 @@ retry:
}
/*******************************************************************//**
+Creates a new InnoDB Symbolic Link (ISL) file. It is always created
+under the 'datadir' of MySQL. The datadir is the directory of a
+running mysqld program. We can refer to it by simply using the path '.'.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+fil_create_link_file(
+/*=================*/
+ const char* tablename, /*!< in: tablename */
+ const char* filepath) /*!< in: pathname of tablespace */
+{
+ os_file_t file;
+ ibool success;
+ dberr_t err = DB_SUCCESS;
+ char* link_filepath;
+ char* prev_filepath = fil_read_link_file(tablename);
+
+ ut_ad(!srv_read_only_mode);
+
+ if (prev_filepath) {
+ /* Truncate will call this with an existing
+ link file which contains the same filepath. */
+ if (0 == strcmp(prev_filepath, filepath)) {
+ mem_free(prev_filepath);
+ return(DB_SUCCESS);
+ }
+ mem_free(prev_filepath);
+ }
+
+ link_filepath = fil_make_isl_name(tablename);
+
+ file = os_file_create_simple_no_error_handling(
+ innodb_file_data_key, link_filepath,
+ OS_FILE_CREATE, OS_FILE_READ_WRITE, &success);
+
+ if (!success) {
+ /* The following call will print an error message */
+ ulint error = os_file_get_last_error(true);
+
+ ut_print_timestamp(stderr);
+ fputs(" InnoDB: Cannot create file ", stderr);
+ ut_print_filename(stderr, link_filepath);
+ fputs(".\n", stderr);
+
+ if (error == OS_FILE_ALREADY_EXISTS) {
+ fputs("InnoDB: The link file: ", stderr);
+ ut_print_filename(stderr, filepath);
+ fputs(" already exists.\n", stderr);
+ err = DB_TABLESPACE_EXISTS;
+
+ } else if (error == OS_FILE_DISK_FULL) {
+ err = DB_OUT_OF_FILE_SPACE;
+
+ } else {
+ err = DB_ERROR;
+ }
+
+ /* file is not open, no need to close it. */
+ mem_free(link_filepath);
+ return(err);
+ }
+
+ if (!os_file_write(link_filepath, file, filepath, 0,
+ strlen(filepath))) {
+ err = DB_ERROR;
+ }
+
+ /* Close the file, we only need it at startup */
+ os_file_close(file);
+
+ mem_free(link_filepath);
+
+ return(err);
+}
+
+/*******************************************************************//**
+Deletes an InnoDB Symbolic Link (ISL) file. */
+UNIV_INTERN
+void
+fil_delete_link_file(
+/*=================*/
+ const char* tablename) /*!< in: name of table */
+{
+ char* link_filepath = fil_make_isl_name(tablename);
+
+ os_file_delete_if_exists(link_filepath);
+
+ mem_free(link_filepath);
+}
+
+/*******************************************************************//**
+Reads an InnoDB Symbolic Link (ISL) file.
+It is always created under the 'datadir' of MySQL. The name is of the
+form {databasename}/{tablename}. and the isl file is expected to be in a
+'{databasename}' directory called '{tablename}.isl'. The caller must free
+the memory of the null-terminated path returned if it is not null.
+@return own: filepath found in link file, NULL if not found. */
+UNIV_INTERN
+char*
+fil_read_link_file(
+/*===============*/
+ const char* name) /*!< in: tablespace name */
+{
+ char* filepath = NULL;
+ char* link_filepath;
+ FILE* file = NULL;
+
+ /* The .isl file is in the 'normal' tablespace location. */
+ link_filepath = fil_make_isl_name(name);
+
+ file = fopen(link_filepath, "r+b");
+
+ mem_free(link_filepath);
+
+ if (file) {
+ filepath = static_cast<char*>(mem_alloc(OS_FILE_MAX_PATH));
+
+ os_file_read_string(file, filepath, OS_FILE_MAX_PATH);
+ fclose(file);
+
+ if (strlen(filepath)) {
+ /* Trim whitespace from end of filepath */
+ ulint lastch = strlen(filepath) - 1;
+ while (lastch > 4 && filepath[lastch] <= 0x20) {
+ filepath[lastch--] = 0x00;
+ }
+ srv_normalize_path_for_win(filepath);
+ }
+ }
+
+ return(filepath);
+}
+
+/*******************************************************************//**
+Opens a handle to the file linked to in an InnoDB Symbolic Link file.
+@return TRUE if remote linked tablespace file is found and opened. */
+UNIV_INTERN
+ibool
+fil_open_linked_file(
+/*===============*/
+ const char* tablename, /*!< in: database/tablename */
+ char** remote_filepath,/*!< out: remote filepath */
+ os_file_t* remote_file) /*!< out: remote file handle */
+
+{
+ ibool success;
+
+ *remote_filepath = fil_read_link_file(tablename);
+ if (*remote_filepath == NULL) {
+ return(FALSE);
+ }
+
+ /* The filepath provided is different from what was
+ found in the link file. */
+ *remote_file = os_file_create_simple_no_error_handling(
+ innodb_file_data_key, *remote_filepath,
+ OS_FILE_OPEN, OS_FILE_READ_ONLY,
+ &success);
+
+ if (!success) {
+ char* link_filepath = fil_make_isl_name(tablename);
+
+ /* The following call prints an error message */
+ os_file_get_last_error(true);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "A link file was found named '%s' "
+ "but the linked tablespace '%s' "
+ "could not be opened.",
+ link_filepath, *remote_filepath);
+
+ mem_free(link_filepath);
+ mem_free(*remote_filepath);
+ *remote_filepath = NULL;
+ }
+
+ return(success);
+}
+
+/*******************************************************************//**
Creates a new single-table tablespace to a database directory of MySQL.
Database directories are under the 'datadir' of MySQL. The datadir is the
directory of a running mysqld program. We can refer to it by simply the
path '.'. Tables created with CREATE TEMPORARY TABLE we place in the temp
dir of the mysqld server.
+
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fil_create_new_single_table_tablespace(
/*===================================*/
ulint space_id, /*!< in: space id */
const char* tablename, /*!< in: the table name in the usual
databasename/tablename format
- of InnoDB, or a dir path to a temp
- table */
- ibool is_temp, /*!< in: TRUE if a table created with
- CREATE TEMPORARY TABLE */
+ of InnoDB */
+ const char* dir_path, /*!< in: NULL or a dir path */
ulint flags, /*!< in: tablespace flags */
ulint flags2, /*!< in: table flags2 */
ulint size) /*!< in: the initial size of the
@@ -2823,18 +3266,40 @@ fil_create_new_single_table_tablespace(
{
os_file_t file;
ibool ret;
- ulint err;
+ dberr_t err;
byte* buf2;
byte* page;
char* path;
ibool success;
+ /* TRUE if a table is created with CREATE TEMPORARY TABLE */
+ bool is_temp = !!(flags2 & DICT_TF2_TEMPORARY);
+ bool has_data_dir = FSP_FLAGS_HAS_DATA_DIR(flags);
ut_a(space_id > 0);
+ ut_ad(!srv_read_only_mode);
ut_a(space_id < SRV_LOG_SPACE_FIRST_ID);
ut_a(size >= FIL_IBD_FILE_INITIAL_SIZE);
- fsp_flags_validate(flags);
+ ut_a(fsp_flags_is_valid(flags));
- path = fil_make_ibd_name(tablename, is_temp);
+ if (is_temp) {
+ /* Temporary table filepath */
+ ut_ad(dir_path);
+ path = fil_make_ibd_name(dir_path, true);
+ } else if (has_data_dir) {
+ ut_ad(dir_path);
+ path = os_file_make_remote_pathname(dir_path, tablename, "ibd");
+
+ /* Since this tablespace file will be created in a
+ remote directory, let's create the subdirectories
+ in the path, if they are not there already. */
+ success = os_file_create_subdirs_if_needed(path);
+ if (!success) {
+ err = DB_ERROR;
+ goto error_exit_3;
+ }
+ } else {
+ path = fil_make_ibd_name(tablename, false);
+ }
file = os_file_create(
innodb_file_data_key, path,
@@ -2844,58 +3309,44 @@ fil_create_new_single_table_tablespace(
&ret);
if (ret == FALSE) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error creating file ", stderr);
- ut_print_filename(stderr, path);
- fputs(".\n", stderr);
-
/* The following call will print an error message */
-
- err = os_file_get_last_error(TRUE);
-
- if (err == OS_FILE_ALREADY_EXISTS) {
- fputs("InnoDB: The file already exists though"
- " the corresponding table did not\n"
- "InnoDB: exist in the InnoDB data dictionary."
- " Have you moved InnoDB\n"
- "InnoDB: .ibd files around without using the"
- " SQL commands\n"
- "InnoDB: DISCARD TABLESPACE and"
- " IMPORT TABLESPACE, or did\n"
- "InnoDB: mysqld crash in the middle of"
- " CREATE TABLE? You can\n"
- "InnoDB: resolve the problem by"
- " removing the file ", stderr);
- ut_print_filename(stderr, path);
- fputs("\n"
- "InnoDB: under the 'datadir' of MySQL.\n",
- stderr);
-
- mem_free(path);
- return(DB_TABLESPACE_ALREADY_EXISTS);
+ ulint error = os_file_get_last_error(true);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create file '%s'\n", path);
+
+ if (error == OS_FILE_ALREADY_EXISTS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "The file '%s' already exists though the "
+ "corresponding table did not exist "
+ "in the InnoDB data dictionary. "
+ "Have you moved InnoDB .ibd files "
+ "around without using the SQL commands "
+ "DISCARD TABLESPACE and IMPORT TABLESPACE, "
+ "or did mysqld crash in the middle of "
+ "CREATE TABLE? "
+ "You can resolve the problem by removing "
+ "the file '%s' under the 'datadir' of MySQL.",
+ path, path);
+
+ err = DB_TABLESPACE_EXISTS;
+ goto error_exit_3;
}
- if (err == OS_FILE_DISK_FULL) {
-
- mem_free(path);
- return(DB_OUT_OF_FILE_SPACE);
+ if (error == OS_FILE_DISK_FULL) {
+ err = DB_OUT_OF_FILE_SPACE;
+ goto error_exit_3;
}
- mem_free(path);
- return(DB_ERROR);
+ err = DB_ERROR;
+ goto error_exit_3;
}
ret = os_file_set_size(path, file, size * UNIV_PAGE_SIZE);
if (!ret) {
err = DB_OUT_OF_FILE_SPACE;
-error_exit:
- os_file_close(file);
-error_exit2:
- os_file_delete(path);
-
- mem_free(path);
- return(err);
+ goto error_exit_2;
}
/* printf("Creating tablespace %s id %lu\n", path, space_id); */
@@ -2944,356 +3395,486 @@ error_exit2:
ut_free(buf2);
if (!ret) {
- fputs("InnoDB: Error: could not write the first page"
- " to tablespace ", stderr);
- ut_print_filename(stderr, path);
- putc('\n', stderr);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Could not write the first page to tablespace "
+ "'%s'", path);
+
err = DB_ERROR;
- goto error_exit;
+ goto error_exit_2;
}
ret = os_file_flush(file);
if (!ret) {
- fputs("InnoDB: Error: file flush of tablespace ", stderr);
- ut_print_filename(stderr, path);
- fputs(" failed\n", stderr);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "File flush of tablespace '%s' failed", path);
err = DB_ERROR;
- goto error_exit;
+ goto error_exit_2;
}
- os_file_close(file);
+ if (has_data_dir) {
+ /* Now that the IBD file is created, make the ISL file. */
+ err = fil_create_link_file(tablename, path);
+ if (err != DB_SUCCESS) {
+ goto error_exit_2;
+ }
+ }
success = fil_space_create(tablename, space_id, flags, FIL_TABLESPACE);
-
- if (!success) {
+ if (!success || !fil_node_create(path, size, space_id, FALSE)) {
err = DB_ERROR;
- goto error_exit2;
+ goto error_exit_1;
}
- fil_node_create(path, size, space_id, FALSE);
-
#ifndef UNIV_HOTBACKUP
{
mtr_t mtr;
+ ulint mlog_file_flag = 0;
+
+ if (is_temp) {
+ mlog_file_flag |= MLOG_FILE_FLAG_TEMP;
+ }
mtr_start(&mtr);
fil_op_write_log(flags
? MLOG_FILE_CREATE2
: MLOG_FILE_CREATE,
- space_id,
- is_temp ? MLOG_FILE_FLAG_TEMP : 0,
- flags,
+ space_id, mlog_file_flag, flags,
tablename, NULL, &mtr);
mtr_commit(&mtr);
}
#endif
+ err = DB_SUCCESS;
+
+ /* Error code is set. Cleanup the various variables used.
+ These labels reflect the order in which variables are assigned or
+ actions are done. */
+error_exit_1:
+ if (has_data_dir && err != DB_SUCCESS) {
+ fil_delete_link_file(tablename);
+ }
+error_exit_2:
+ os_file_close(file);
+ if (err != DB_SUCCESS) {
+ os_file_delete(path);
+ }
+error_exit_3:
mem_free(path);
- return(DB_SUCCESS);
+
+ return(err);
}
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
-It is possible, though very improbable, that the lsn's in the tablespace to be
-imported have risen above the current system lsn, if a lengthy purge, ibuf
-merge, or rollback was performed on a backup taken with ibbackup. If that is
-the case, reset page lsn's in the file. We assume that mysqld was shut down
-after it performed these cleanup operations on the .ibd file, so that it at
-the shutdown stamped the latest lsn to the FIL_PAGE_FILE_FLUSH_LSN in the
-first page of the .ibd file, and we can determine whether we need to reset the
-lsn's just by looking at that flush lsn.
-@return TRUE if success */
-UNIV_INTERN
-ibool
-fil_reset_too_high_lsns(
-/*====================*/
- const char* name, /*!< in: table name in the
- databasename/tablename format */
- lsn_t current_lsn) /*!< in: reset lsn's if the lsn stamped
- to FIL_PAGE_FILE_FLUSH_LSN in the
- first page is too high */
+Report information about a bad tablespace. */
+static
+void
+fil_report_bad_tablespace(
+/*======================*/
+ char* filepath, /*!< in: filepath */
+ ulint found_id, /*!< in: found space ID */
+ ulint found_flags, /*!< in: found flags */
+ ulint expected_id, /*!< in: expected space id */
+ ulint expected_flags) /*!< in: expected flags */
{
- os_file_t file;
- char* filepath;
- byte* page;
- byte* buf2;
- lsn_t flush_lsn;
- ulint space_id;
- os_offset_t file_size;
- os_offset_t offset;
- ulint zip_size;
- ibool success;
- page_zip_des_t page_zip;
-
- filepath = fil_make_ibd_name(name, FALSE);
-
- file = os_file_create_simple_no_error_handling(
- innodb_file_data_key, filepath, OS_FILE_OPEN,
- OS_FILE_READ_WRITE, &success);
- if (!success) {
- /* The following call prints an error message */
- os_file_get_last_error(TRUE);
-
- ut_print_timestamp(stderr);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "In file '%s', tablespace id and flags are %lu and %lu, "
+ "but in the InnoDB data dictionary they are %lu and %lu. "
+ "Have you moved InnoDB .ibd files around without using the "
+ "commands DISCARD TABLESPACE and IMPORT TABLESPACE? "
+ "Please refer to "
+ REFMAN "innodb-troubleshooting-datadict.html "
+ "for how to resolve the issue.",
+ filepath, (ulong) found_id, (ulong) found_flags,
+ (ulong) expected_id, (ulong) expected_flags);
+}
- fputs(" InnoDB: Error: trying to open a table,"
- " but could not\n"
- "InnoDB: open the tablespace file ", stderr);
- ut_print_filename(stderr, filepath);
- fputs("!\n", stderr);
- mem_free(filepath);
+struct fsp_open_info {
+ ibool success; /*!< Has the tablespace been opened? */
+ ibool valid; /*!< Is the tablespace valid? */
+ os_file_t file; /*!< File handle */
+ char* filepath; /*!< File path to open */
+ lsn_t lsn; /*!< Flushed LSN from header page */
+ ulint id; /*!< Space ID */
+ ulint flags; /*!< Tablespace flags */
+#ifdef UNIV_LOG_ARCHIVE
+ ulint arch_log_no; /*!< latest archived log file number */
+#endif /* UNIV_LOG_ARCHIVE */
+};
- return(FALSE);
- }
+/********************************************************************//**
+Tries to open a single-table tablespace and optionally checks that the
+space id in it is correct. If this does not succeed, print an error message
+to the .err log. This function is used to open a tablespace when we start
+mysqld after the dictionary has been booted, and also in IMPORT TABLESPACE.
- /* Read the first page of the tablespace */
+NOTE that we assume this operation is used either at the database startup
+or under the protection of the dictionary mutex, so that two users cannot
+race here. This operation does not leave the file associated with the
+tablespace open, but closes it after we have looked at the space id in it.
- buf2 = static_cast<byte*>(ut_malloc(3 * UNIV_PAGE_SIZE));
- /* Align the memory for file i/o if we might have O_DIRECT set */
- page = static_cast<byte*>(ut_align(buf2, UNIV_PAGE_SIZE));
+If the validate boolean is set, we read the first page of the file and
+check that the space id in the file is what we expect. We assume that
+this function runs much faster if no check is made, since accessing the
+file inode probably is much faster (the OS caches them) than accessing
+the first page of the file. This boolean may be initially FALSE, but if
+a remote tablespace is found it will be changed to true.
- success = os_file_read(file, page, 0, UNIV_PAGE_SIZE);
- if (!success) {
+If the fix_dict boolean is set, then it is safe to use an internal SQL
+statement to update the dictionary tables if they are incorrect.
- goto func_exit;
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+fil_open_single_table_tablespace(
+/*=============================*/
+ bool validate, /*!< in: Do we validate tablespace? */
+ bool fix_dict, /*!< in: Can we fix the dictionary? */
+ ulint id, /*!< in: space id */
+ ulint flags, /*!< in: tablespace flags */
+ const char* tablename, /*!< in: table name in the
+ databasename/tablename format */
+ const char* path_in) /*!< in: tablespace filepath */
+{
+ dberr_t err = DB_SUCCESS;
+ bool dict_filepath_same_as_default = false;
+ bool link_file_found = false;
+ bool link_file_is_bad = false;
+ fsp_open_info def;
+ fsp_open_info dict;
+ fsp_open_info remote;
+ ulint tablespaces_found = 0;
+ ulint valid_tablespaces_found = 0;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(!fix_dict || rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(!fix_dict || mutex_own(&(dict_sys->mutex)));
+
+ if (!fsp_flags_is_valid(flags)) {
+ return(DB_CORRUPTION);
+ }
+
+ /* If the tablespace was relocated, we do not
+ compare the DATA_DIR flag */
+ ulint mod_flags = flags & ~FSP_FLAGS_MASK_DATA_DIR;
+
+ memset(&def, 0, sizeof(def));
+ memset(&dict, 0, sizeof(dict));
+ memset(&remote, 0, sizeof(remote));
+
+ /* Discover the correct filepath. We will always look for an ibd
+ in the default location. If it is remote, it should not be here. */
+ def.filepath = fil_make_ibd_name(tablename, false);
+
+ /* The path_in was read from SYS_DATAFILES. */
+ if (path_in) {
+ if (strcmp(def.filepath, path_in)) {
+ dict.filepath = mem_strdup(path_in);
+ /* possibility of multiple files. */
+ validate = true;
+ } else {
+ dict_filepath_same_as_default = true;
+ }
}
- /* We have to read the file flush lsn from the header of the file */
-
- flush_lsn = mach_read_from_8(page + FIL_PAGE_FILE_FLUSH_LSN);
+ link_file_found = fil_open_linked_file(
+ tablename, &remote.filepath, &remote.file);
+ remote.success = link_file_found;
+ if (remote.success) {
+ /* possibility of multiple files. */
+ validate = true;
+ tablespaces_found++;
+
+ /* A link file was found. MySQL does not allow a DATA
+ DIRECTORY to be be the same as the default filepath. */
+ ut_a(strcmp(def.filepath, remote.filepath));
+
+ /* If there was a filepath found in SYS_DATAFILES,
+ we hope it was the same as this remote.filepath found
+ in the ISL file. */
+ if (dict.filepath
+ && (0 == strcmp(dict.filepath, remote.filepath))) {
+ remote.success = FALSE;
+ os_file_close(remote.file);
+ mem_free(remote.filepath);
+ remote.filepath = NULL;
+ tablespaces_found--;
+ }
+ }
- if (current_lsn >= flush_lsn) {
- /* Ok */
- success = TRUE;
+ /* Attempt to open the tablespace at other possible filepaths. */
+ if (dict.filepath) {
+ dict.file = os_file_create_simple_no_error_handling(
+ innodb_file_data_key, dict.filepath, OS_FILE_OPEN,
+ OS_FILE_READ_ONLY, &dict.success);
+ if (dict.success) {
+ /* possibility of multiple files. */
+ validate = true;
+ tablespaces_found++;
+ }
+ }
- goto func_exit;
+ /* Always look for a file at the default location. */
+ ut_a(def.filepath);
+ def.file = os_file_create_simple_no_error_handling(
+ innodb_file_data_key, def.filepath, OS_FILE_OPEN,
+ OS_FILE_READ_ONLY, &def.success);
+ if (def.success) {
+ tablespaces_found++;
}
- space_id = fsp_header_get_space_id(page);
- zip_size = fsp_header_get_zip_size(page);
+ /* We have now checked all possible tablespace locations and
+ have a count of how many we found. If things are normal, we
+ only found 1. */
+ if (!validate && tablespaces_found == 1) {
+ goto skip_validate;
+ }
- page_zip_des_init(&page_zip);
- page_zip_set_size(&page_zip, zip_size);
- if (zip_size) {
- page_zip.data = page + UNIV_PAGE_SIZE;
+ /* Read the first page of the datadir tablespace, if found. */
+ if (def.success) {
+ fil_read_first_page(
+ def.file, FALSE, &def.flags, &def.id,
+#ifdef UNIV_LOG_ARCHIVE
+ &space_arch_log_no, &space_arch_log_no,
+#endif /* UNIV_LOG_ARCHIVE */
+ &def.lsn, &def.lsn);
+
+ /* Validate this single-table-tablespace with SYS_TABLES,
+ but do not compare the DATA_DIR flag, in case the
+ tablespace was relocated. */
+ ulint mod_def_flags = def.flags & ~FSP_FLAGS_MASK_DATA_DIR;
+ if (def.id == id && mod_def_flags == mod_flags) {
+ valid_tablespaces_found++;
+ def.valid = TRUE;
+ } else {
+ /* Do not use this tablespace. */
+ fil_report_bad_tablespace(
+ def.filepath, def.id,
+ def.flags, id, flags);
+ }
}
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Flush lsn in the tablespace file %lu"
- " to be imported\n"
- "InnoDB: is " LSN_PF ", which exceeds current"
- " system lsn " LSN_PF ".\n"
- "InnoDB: We reset the lsn's in the file ",
- (ulong) space_id,
- flush_lsn, current_lsn);
- ut_print_filename(stderr, filepath);
- fputs(".\n", stderr);
-
- ut_a(ut_is_2pow(zip_size));
- ut_a(zip_size <= UNIV_ZIP_SIZE_MAX);
-
- /* Loop through all the pages in the tablespace and reset the lsn and
- the page checksum if necessary */
-
- file_size = os_file_get_size(file);
- ut_a(file_size != (os_offset_t) -1);
+ /* Read the first page of the remote tablespace */
+ if (remote.success) {
+ fil_read_first_page(
+ remote.file, FALSE, &remote.flags, &remote.id,
+#ifdef UNIV_LOG_ARCHIVE
+ &remote.arch_log_no, &remote.arch_log_no,
+#endif /* UNIV_LOG_ARCHIVE */
+ &remote.lsn, &remote.lsn);
+
+ /* Validate this single-table-tablespace with SYS_TABLES,
+ but do not compare the DATA_DIR flag, in case the
+ tablespace was relocated. */
+ ulint mod_remote_flags = remote.flags & ~FSP_FLAGS_MASK_DATA_DIR;
+ if (remote.id == id && mod_remote_flags == mod_flags) {
+ valid_tablespaces_found++;
+ remote.valid = TRUE;
+ } else {
+ /* Do not use this linked tablespace. */
+ fil_report_bad_tablespace(
+ remote.filepath, remote.id,
+ remote.flags, id, flags);
+ link_file_is_bad = true;
+ }
+ }
- for (offset = 0; offset < file_size;
- offset += zip_size ? zip_size : UNIV_PAGE_SIZE) {
- success = os_file_read(file, page, offset,
- zip_size ? zip_size : UNIV_PAGE_SIZE);
- if (!success) {
+ /* Read the first page of the datadir tablespace, if found. */
+ if (dict.success) {
+ fil_read_first_page(
+ dict.file, FALSE, &dict.flags, &dict.id,
+#ifdef UNIV_LOG_ARCHIVE
+ &dict.arch_log_no, &dict.arch_log_no,
+#endif /* UNIV_LOG_ARCHIVE */
+ &dict.lsn, &dict.lsn);
+
+ /* Validate this single-table-tablespace with SYS_TABLES,
+ but do not compare the DATA_DIR flag, in case the
+ tablespace was relocated. */
+ ulint mod_dict_flags = dict.flags & ~FSP_FLAGS_MASK_DATA_DIR;
+ if (dict.id == id && mod_dict_flags == mod_flags) {
+ valid_tablespaces_found++;
+ dict.valid = TRUE;
+ } else {
+ /* Do not use this tablespace. */
+ fil_report_bad_tablespace(
+ dict.filepath, dict.id,
+ dict.flags, id, flags);
+ }
+ }
- goto func_exit;
+ /* Make sense of these three possible locations.
+ First, bail out if no tablespace files were found. */
+ if (valid_tablespaces_found == 0) {
+ /* The following call prints an error message */
+ os_file_get_last_error(true);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Could not find a valid tablespace file for '%s'. "
+ "See " REFMAN "innodb-troubleshooting-datadict.html "
+ "for how to resolve the issue.",
+ tablename);
+
+ err = DB_CORRUPTION;
+
+ goto cleanup_and_exit;
+ }
+
+ /* Do not open any tablespaces if more than one tablespace with
+ the correct space ID and flags were found. */
+ if (tablespaces_found > 1) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "A tablespace for %s has been found in "
+ "multiple places;", tablename);
+ if (def.success) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Default location; %s, LSN=" LSN_PF
+ ", Space ID=%lu, Flags=%lu",
+ def.filepath, def.lsn,
+ (ulong) def.id, (ulong) def.flags);
+ }
+ if (remote.success) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Remote location; %s, LSN=" LSN_PF
+ ", Space ID=%lu, Flags=%lu",
+ remote.filepath, remote.lsn,
+ (ulong) remote.id, (ulong) remote.flags);
}
- if (mach_read_from_8(page + FIL_PAGE_LSN) > current_lsn) {
- /* We have to reset the lsn */
-
- if (zip_size) {
- memcpy(page_zip.data, page, zip_size);
- buf_flush_init_for_writing(
- page, &page_zip, current_lsn);
- success = os_file_write(
- filepath, file, page_zip.data,
- offset, zip_size);
+ if (dict.success) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Dictionary location; %s, LSN=" LSN_PF
+ ", Space ID=%lu, Flags=%lu",
+ dict.filepath, dict.lsn,
+ (ulong) dict.id, (ulong) dict.flags);
+ }
+
+ /* Force-recovery will allow some tablespaces to be
+ skipped by REDO if there was more than one file found.
+ Unlike during the REDO phase of recovery, we now know
+ if the tablespace is valid according to the dictionary,
+ which was not available then. So if we did not force
+ recovery and there is only one good tablespace, ignore
+ any bad tablespaces. */
+ if (valid_tablespaces_found > 1 || srv_force_recovery > 0) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Will not open the tablespace for '%s'",
+ tablename);
+
+ if (def.success != def.valid
+ || dict.success != dict.valid
+ || remote.success != remote.valid) {
+ err = DB_CORRUPTION;
} else {
- buf_flush_init_for_writing(
- page, NULL, current_lsn);
- success = os_file_write(
- filepath, file, page,
- offset, UNIV_PAGE_SIZE);
+ err = DB_ERROR;
}
+ goto cleanup_and_exit;
+ }
- if (!success) {
+ /* There is only one valid tablespace found and we did
+ not use srv_force_recovery during REDO. Use this one
+ tablespace and clean up invalid tablespace pointers */
+ if (def.success && !def.valid) {
+ def.success = false;
+ os_file_close(def.file);
+ tablespaces_found--;
+ }
+ if (dict.success && !dict.valid) {
+ dict.success = false;
+ os_file_close(dict.file);
+ /* Leave dict.filepath so that SYS_DATAFILES
+ can be corrected below. */
+ tablespaces_found--;
+ }
+ if (remote.success && !remote.valid) {
+ remote.success = false;
+ os_file_close(remote.file);
+ mem_free(remote.filepath);
+ remote.filepath = NULL;
+ tablespaces_found--;
+ }
+ }
- goto func_exit;
+ /* At this point, there should be only one filepath. */
+ ut_a(tablespaces_found == 1);
+ ut_a(valid_tablespaces_found == 1);
+
+ /* Only fix the dictionary at startup when there is only one thread.
+ Calls to dict_load_table() can be done while holding other latches. */
+ if (!fix_dict) {
+ goto skip_validate;
+ }
+
+ /* We may need to change what is stored in SYS_DATAFILES or
+ SYS_TABLESPACES or adjust the link file.
+ Since a failure to update SYS_TABLESPACES or SYS_DATAFILES does
+ not prevent opening and using the single_table_tablespace either
+ this time or the next, we do not check the return code or fail
+ to open the tablespace. But dict_update_filepath() will issue a
+ warning to the log. */
+ if (dict.filepath) {
+ if (remote.success) {
+ dict_update_filepath(id, remote.filepath);
+ } else if (def.success) {
+ dict_update_filepath(id, def.filepath);
+ if (link_file_is_bad) {
+ fil_delete_link_file(tablename);
}
+ } else if (!link_file_found || link_file_is_bad) {
+ ut_ad(dict.success);
+ /* Fix the link file if we got our filepath
+ from the dictionary but a link file did not
+ exist or it did not point to a valid file. */
+ fil_delete_link_file(tablename);
+ fil_create_link_file(tablename, dict.filepath);
}
- }
- success = os_file_flush(file);
- if (!success) {
+ } else if (remote.success && dict_filepath_same_as_default) {
+ dict_update_filepath(id, remote.filepath);
- goto func_exit;
+ } else if (remote.success && path_in == NULL) {
+ /* SYS_DATAFILES record for this space ID was not found. */
+ dict_insert_tablespace_and_filepath(
+ id, tablename, remote.filepath, flags);
}
- /* We now update the flush_lsn stamp at the start of the file */
- success = os_file_read(file, page, 0,
- zip_size ? zip_size : UNIV_PAGE_SIZE);
- if (!success) {
+skip_validate:
+ if (err != DB_SUCCESS) {
+ ; // Don't load the tablespace into the cache
+ } else if (!fil_space_create(tablename, id, flags, FIL_TABLESPACE)) {
+ err = DB_ERROR;
+ } else {
+ /* We do not measure the size of the file, that is why
+ we pass the 0 below */
- goto func_exit;
+ if (!fil_node_create(remote.success ? remote.filepath :
+ dict.success ? dict.filepath :
+ def.filepath, 0, id, FALSE)) {
+ err = DB_ERROR;
+ }
}
- mach_write_to_8(page + FIL_PAGE_FILE_FLUSH_LSN, current_lsn);
-
- success = os_file_write(filepath, file, page, 0,
- zip_size ? zip_size : UNIV_PAGE_SIZE);
- if (!success) {
-
- goto func_exit;
+cleanup_and_exit:
+ if (remote.success) {
+ os_file_close(remote.file);
}
- success = os_file_flush(file);
-func_exit:
- os_file_close(file);
- ut_free(buf2);
- mem_free(filepath);
-
- return(success);
-}
-
-/********************************************************************//**
-Tries to open a single-table tablespace and optionally checks the space id is
-right in it. If does not succeed, prints an error message to the .err log. This
-function is used to open a tablespace when we start up mysqld, and also in
-IMPORT TABLESPACE.
-NOTE that we assume this operation is used either at the database startup
-or under the protection of the dictionary mutex, so that two users cannot
-race here. This operation does not leave the file associated with the
-tablespace open, but closes it after we have looked at the space id in it.
-@return TRUE if success */
-UNIV_INTERN
-ibool
-fil_open_single_table_tablespace(
-/*=============================*/
- ibool check_space_id, /*!< in: should we check that the space
- id in the file is right; we assume
- that this function runs much faster
- if no check is made, since accessing
- the file inode probably is much
- faster (the OS caches them) than
- accessing the first page of the file */
- ulint id, /*!< in: space id */
- ulint flags, /*!< in: tablespace flags */
- const char* tablename) /*!< in: table name in the
- databasename/tablename format */
-{
- os_file_t file;
- char* filepath;
- ibool success;
- byte* buf2;
- byte* page;
- ulint space_id;
- ulint space_flags;
-
- filepath = fil_make_ibd_name(tablename, FALSE);
-
- fsp_flags_validate(flags);
-
- file = os_file_create_simple_no_error_handling(
- innodb_file_data_key, filepath, OS_FILE_OPEN,
- OS_FILE_READ_ONLY, &success);
- if (!success) {
- /* The following call prints an error message */
- os_file_get_last_error(TRUE);
-
- ut_print_timestamp(stderr);
-
- fputs(" InnoDB: Error: trying to open a table,"
- " but could not\n"
- "InnoDB: open the tablespace file ", stderr);
- ut_print_filename(stderr, filepath);
- fputs("!\n"
- "InnoDB: Have you moved InnoDB .ibd files around"
- " without using the\n"
- "InnoDB: commands DISCARD TABLESPACE and"
- " IMPORT TABLESPACE?\n"
- "InnoDB: It is also possible that this is"
- " a temporary table #sql...,\n"
- "InnoDB: and MySQL removed the .ibd file for this.\n"
- "InnoDB: Please refer to\n"
- "InnoDB: " REFMAN
- "innodb-troubleshooting-datadict.html\n"
- "InnoDB: for how to resolve the issue.\n", stderr);
-
- mem_free(filepath);
-
- return(FALSE);
+ if (remote.filepath) {
+ mem_free(remote.filepath);
}
-
- if (!check_space_id) {
- space_id = id;
-
- goto skip_check;
+ if (dict.success) {
+ os_file_close(dict.file);
}
-
- /* Read the first page of the tablespace */
-
- buf2 = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE));
- /* Align the memory for file i/o if we might have O_DIRECT set */
- page = static_cast<byte*>(ut_align(buf2, UNIV_PAGE_SIZE));
-
- success = os_file_read(file, page, 0, UNIV_PAGE_SIZE);
-
- /* We have to read the tablespace id and flags from the file. */
-
- space_id = fsp_header_get_space_id(page);
- space_flags = fsp_header_get_flags(page);
-
- ut_free(buf2);
-
- if (UNIV_UNLIKELY(space_id != id || space_flags != flags)) {
- ut_print_timestamp(stderr);
-
- fputs(" InnoDB: Error: tablespace id and flags in file ",
- stderr);
- ut_print_filename(stderr, filepath);
- fprintf(stderr, " are %lu and %lu, but in the InnoDB\n"
- "InnoDB: data dictionary they are %lu and %lu.\n"
- "InnoDB: Have you moved InnoDB .ibd files"
- " around without using the\n"
- "InnoDB: commands DISCARD TABLESPACE and"
- " IMPORT TABLESPACE?\n"
- "InnoDB: Please refer to\n"
- "InnoDB: " REFMAN "innodb-troubleshooting-datadict.html\n"
- "InnoDB: for how to resolve the issue.\n",
- (ulong) space_id, (ulong) space_flags,
- (ulong) id, (ulong) flags);
-
- success = FALSE;
-
- goto func_exit;
+ if (dict.filepath) {
+ mem_free(dict.filepath);
}
-
-skip_check:
- success = fil_space_create(tablename, space_id, flags, FIL_TABLESPACE);
-
- if (!success) {
- goto func_exit;
+ if (def.success) {
+ os_file_close(def.file);
}
+ mem_free(def.filepath);
- /* We do not measure the size of the file, that is why we pass the 0
- below */
-
- fil_node_create(filepath, 0, space_id, FALSE);
-func_exit:
- os_file_close(file);
- mem_free(filepath);
-
- return(success);
+ return(err);
}
#endif /* !UNIV_HOTBACKUP */
@@ -3316,13 +3897,64 @@ fil_make_ibbackup_old_name(
memcpy(path, name, len);
memcpy(path + len, suffix, (sizeof suffix) - 1);
- ut_sprintf_timestamp_without_extra_chars(path + len + sizeof suffix);
+ ut_sprintf_timestamp_without_extra_chars(
+ path + len + ((sizeof suffix) - 1));
return(path);
}
#endif /* UNIV_HOTBACKUP */
/********************************************************************//**
Opens an .ibd file and adds the associated single-table tablespace to the
+InnoDB fil0fil.cc data structures.
+Set fsp->success to TRUE if tablespace is valid, FALSE if not. */
+static
+void
+fil_validate_single_table_tablespace(
+/*=================================*/
+ const char* tablename, /*!< in: database/tablename */
+ fsp_open_info* fsp) /*!< in/out: tablespace info */
+{
+ fil_read_first_page(
+ fsp->file, FALSE, &fsp->flags, &fsp->id,
+#ifdef UNIV_LOG_ARCHIVE
+ &fsp->arch_log_no, &fsp->arch_log_no,
+#endif /* UNIV_LOG_ARCHIVE */
+ &fsp->lsn, &fsp->lsn);
+
+ if (fsp->id == ULINT_UNDEFINED || fsp->id == 0) {
+ fprintf(stderr,
+ " InnoDB: Error: Tablespace is not sensible;"
+ " Table: %s Space ID: %lu Filepath: %s\n",
+ tablename, (ulong) fsp->id, fsp->filepath);
+ fsp->success = FALSE;
+ return;
+ }
+
+ mutex_enter(&fil_system->mutex);
+ fil_space_t* space = fil_space_get_by_id(fsp->id);
+ mutex_exit(&fil_system->mutex);
+ if (space != NULL) {
+ char* prev_filepath = fil_space_get_first_path(fsp->id);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Attempted to open a previously opened tablespace. "
+ "Previous tablespace %s uses space ID: %lu at "
+ "filepath: %s. Cannot open tablespace %s which uses "
+ "space ID: %lu at filepath: %s",
+ space->name, (ulong) space->id, prev_filepath,
+ tablename, (ulong) fsp->id, fsp->filepath);
+
+ mem_free(prev_filepath);
+ fsp->success = FALSE;
+ return;
+ }
+
+ fsp->success = TRUE;
+}
+
+
+/********************************************************************//**
+Opens an .ibd file and adds the associated single-table tablespace to the
InnoDB fil0fil.cc data structures. */
static
void
@@ -3330,34 +3962,49 @@ fil_load_single_table_tablespace(
/*=============================*/
const char* dbname, /*!< in: database name */
const char* filename) /*!< in: file name (not a path),
- including the .ibd extension */
+ including the .ibd or .isl extension */
{
- os_file_t file;
- char* filepath;
char* tablename;
- ibool success;
- byte* buf2;
- byte* page;
- ulint space_id;
- ulint flags;
+ ulint tablename_len;
+ ulint dbname_len = strlen(dbname);
+ ulint filename_len = strlen(filename);
+ fsp_open_info def;
+ fsp_open_info remote;
os_offset_t size;
#ifdef UNIV_HOTBACKUP
fil_space_t* space;
#endif
- filepath = static_cast<char*>(
- mem_alloc(
- strlen(dbname)
- + strlen(filename)
- + strlen(fil_path_to_mysql_datadir) + 3));
- sprintf(filepath, "%s/%s/%s", fil_path_to_mysql_datadir, dbname,
- filename);
- srv_normalize_path_for_win(filepath);
+ memset(&def, 0, sizeof(def));
+ memset(&remote, 0, sizeof(remote));
+ /* The caller assured that the extension is ".ibd" or ".isl". */
+ ut_ad(0 == memcmp(filename + filename_len - 4, ".ibd", 4)
+ || 0 == memcmp(filename + filename_len - 4, ".isl", 4));
+
+ /* Build up the tablename in the standard form database/table. */
tablename = static_cast<char*>(
- mem_alloc(strlen(dbname) + strlen(filename) + 2));
+ mem_alloc(dbname_len + filename_len + 2));
sprintf(tablename, "%s/%s", dbname, filename);
- tablename[strlen(tablename) - strlen(".ibd")] = 0;
+ tablename_len = strlen(tablename) - strlen(".ibd");
+ tablename[tablename_len] = '\0';
+
+ /* There may be both .ibd and .isl file in the directory.
+ And it is possible that the .isl file refers to a different
+ .ibd file. If so, we open and compare them the first time
+ one of them is sent to this function. So if this table has
+ already been loaded, there is nothing to do.*/
+ mutex_enter(&fil_system->mutex);
+ if (fil_space_get_by_name(tablename)) {
+ mem_free(tablename);
+ mutex_exit(&fil_system->mutex);
+ return;
+ }
+ mutex_exit(&fil_system->mutex);
+
+ /* Build up the filepath of the .ibd tablespace in the datadir.
+ This must be freed independent of def.success. */
+ def.filepath = fil_make_ibd_name(tablename, false);
#ifdef __WIN__
# ifndef UNIV_HOTBACKUP
@@ -3367,31 +4014,56 @@ fil_load_single_table_tablespace(
file path to lower case, so that we are consistent with InnoDB's
internal data dictionary. */
- dict_casedn_str(filepath);
+ dict_casedn_str(def.filepath);
# endif /* !UNIV_HOTBACKUP */
#endif
- file = os_file_create_simple_no_error_handling(
- innodb_file_data_key, filepath, OS_FILE_OPEN,
- OS_FILE_READ_ONLY, &success);
- if (!success) {
- /* The following call prints an error message */
- os_file_get_last_error(TRUE);
+ /* Check for a link file which locates a remote tablespace. */
+ remote.success = fil_open_linked_file(
+ tablename, &remote.filepath, &remote.file);
+
+ /* Read the first page of the remote tablespace */
+ if (remote.success) {
+ fil_validate_single_table_tablespace(tablename, &remote);
+ if (!remote.success) {
+ os_file_close(remote.file);
+ mem_free(remote.filepath);
+ }
+ }
+
+
+ /* Try to open the tablespace in the datadir. */
+ def.file = os_file_create_simple_no_error_handling(
+ innodb_file_data_key, def.filepath, OS_FILE_OPEN,
+ OS_FILE_READ_ONLY, &def.success);
+
+ /* Read the first page of the remote tablespace */
+ if (def.success) {
+ fil_validate_single_table_tablespace(tablename, &def);
+ if (!def.success) {
+ os_file_close(def.file);
+ }
+ }
+
+ if (!def.success && !remote.success) {
+ /* The following call prints an error message */
+ os_file_get_last_error(true);
+ fprintf(stderr,
+ "InnoDB: Error: could not open single-table"
+ " tablespace file %s\n", def.filepath);
+no_good_file:
fprintf(stderr,
- "InnoDB: Error: could not open single-table tablespace"
- " file\n"
- "InnoDB: %s!\n"
"InnoDB: We do not continue the crash recovery,"
" because the table may become\n"
- "InnoDB: corrupt if we cannot apply the log records"
- " in the InnoDB log to it.\n"
+ "InnoDB: corrupt if we cannot apply the log"
+ " records in the InnoDB log to it.\n"
"InnoDB: To fix the problem and start mysqld:\n"
"InnoDB: 1) If there is a permission problem"
" in the file and mysqld cannot\n"
"InnoDB: open the file, you should"
" modify the permissions.\n"
- "InnoDB: 2) If the table is not needed, or you can"
- " restore it from a backup,\n"
+ "InnoDB: 2) If the table is not needed, or you"
+ " can restore it from a backup,\n"
"InnoDB: then you can remove the .ibd file,"
" and InnoDB will do a normal\n"
"InnoDB: crash recovery and ignore that table.\n"
@@ -3400,123 +4072,84 @@ fil_load_single_table_tablespace(
"InnoDB: the .ibd file, you can set"
" innodb_force_recovery > 0 in my.cnf\n"
"InnoDB: and force InnoDB to continue crash"
- " recovery here.\n", filepath);
-
+ " recovery here.\n");
+will_not_choose:
mem_free(tablename);
- mem_free(filepath);
-
- if (srv_force_recovery > 0) {
- fprintf(stderr,
- "InnoDB: innodb_force_recovery"
- " was set to %lu. Continuing crash recovery\n"
- "InnoDB: even though we cannot access"
- " the .ibd file of this table.\n",
- srv_force_recovery);
- return;
+ if (remote.success) {
+ mem_free(remote.filepath);
}
-
- exit(1);
- }
-
- size = os_file_get_size(file);
-
- if (UNIV_UNLIKELY(size == (os_offset_t) -1)) {
- /* The following call prints an error message */
- os_file_get_last_error(TRUE);
-
- fprintf(stderr,
- "InnoDB: Error: could not measure the size"
- " of single-table tablespace file\n"
- "InnoDB: %s!\n"
- "InnoDB: We do not continue crash recovery,"
- " because the table will become\n"
- "InnoDB: corrupt if we cannot apply the log records"
- " in the InnoDB log to it.\n"
- "InnoDB: To fix the problem and start mysqld:\n"
- "InnoDB: 1) If there is a permission problem"
- " in the file and mysqld cannot\n"
- "InnoDB: access the file, you should"
- " modify the permissions.\n"
- "InnoDB: 2) If the table is not needed,"
- " or you can restore it from a backup,\n"
- "InnoDB: then you can remove the .ibd file,"
- " and InnoDB will do a normal\n"
- "InnoDB: crash recovery and ignore that table.\n"
- "InnoDB: 3) If the file system or the disk is broken,"
- " and you cannot remove\n"
- "InnoDB: the .ibd file, you can set"
- " innodb_force_recovery > 0 in my.cnf\n"
- "InnoDB: and force InnoDB to continue"
- " crash recovery here.\n", filepath);
-
- os_file_close(file);
- mem_free(tablename);
- mem_free(filepath);
+ mem_free(def.filepath);
if (srv_force_recovery > 0) {
- fprintf(stderr,
- "InnoDB: innodb_force_recovery"
- " was set to %lu. Continuing crash recovery\n"
- "InnoDB: even though we cannot access"
- " the .ibd file of this table.\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "innodb_force_recovery was set to %lu. "
+ "Continuing crash recovery even though we "
+ "cannot access the .ibd file of this table.",
srv_force_recovery);
return;
}
+ /* If debug code, cause a core dump and call stack. For
+ release builds just exit and rely on the messages above. */
+ ut_ad(0);
exit(1);
}
- /* TODO: What to do in other cases where we cannot access an .ibd
- file during a crash recovery? */
+ if (def.success && remote.success) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Tablespaces for %s have been found in two places;\n"
+ "Location 1: SpaceID: %lu LSN: %lu File: %s\n"
+ "Location 2: SpaceID: %lu LSN: %lu File: %s\n"
+ "You must delete one of them.",
+ tablename, (ulong) def.id, (ulong) def.lsn,
+ def.filepath, (ulong) remote.id, (ulong) remote.lsn,
+ remote.filepath);
- /* Every .ibd file is created >= 4 pages in size. Smaller files
- cannot be ok. */
+ def.success = FALSE;
+ os_file_close(def.file);
+ os_file_close(remote.file);
+ goto will_not_choose;
+ }
-#ifndef UNIV_HOTBACKUP
- if (size < FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE) {
- fprintf(stderr,
- "InnoDB: Error: the size of single-table"
- " tablespace file %s\n"
- "InnoDB: is only " UINT64PF
- ", should be at least %lu!\n",
- filepath,
- size, (ulong) (4 * UNIV_PAGE_SIZE));
- os_file_close(file);
- mem_free(tablename);
- mem_free(filepath);
+ /* At this point, only one tablespace is open */
+ ut_a(def.success == !remote.success);
- return;
- }
-#endif
- /* Read the first page of the tablespace if the size is big enough */
+ fsp_open_info* fsp = def.success ? &def : &remote;
- buf2 = static_cast<byte*>(ut_malloc(2 * UNIV_PAGE_SIZE));
- /* Align the memory for file i/o if we might have O_DIRECT set */
- page = static_cast<byte*>(ut_align(buf2, UNIV_PAGE_SIZE));
+ /* Get and test the file size. */
+ size = os_file_get_size(fsp->file);
- if (size >= FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE) {
- success = os_file_read(file, page, 0, UNIV_PAGE_SIZE);
+ if (size == (os_offset_t) -1) {
+ /* The following call prints an error message */
+ os_file_get_last_error(true);
- /* We have to read the tablespace id from the file */
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "could not measure the size of single-table "
+ "tablespace file %s", fsp->filepath);
- space_id = fsp_header_get_space_id(page);
- flags = fsp_header_get_flags(page);
- } else {
- space_id = ULINT_UNDEFINED;
- flags = 0;
+ os_file_close(fsp->file);
+ goto no_good_file;
}
+ /* Every .ibd file is created >= 4 pages in size. Smaller files
+ cannot be ok. */
+ ulong minimum_size = FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE;
+ if (size < minimum_size) {
#ifndef UNIV_HOTBACKUP
- if (space_id == ULINT_UNDEFINED || space_id == 0) {
- fprintf(stderr,
- "InnoDB: Error: tablespace id %lu in file %s"
- " is not sensible\n",
- (ulong) space_id,
- filepath);
- goto func_exit;
- }
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "The size of single-table tablespace file %s "
+ "is only " UINT64PF ", should be at least %lu!",
+ fsp->filepath, size, minimum_size);
+ os_file_close(fsp->file);
+ goto no_good_file;
#else
- if (space_id == ULINT_UNDEFINED || space_id == 0) {
+ fsp->id = ULINT_UNDEFINED;
+ fsp->flags = 0;
+#endif /* !UNIV_HOTBACKUP */
+ }
+
+#ifdef UNIV_HOTBACKUP
+ if (fsp->id == ULINT_UNDEFINED || fsp->id == 0) {
char* new_path;
fprintf(stderr,
@@ -3528,18 +4161,19 @@ fil_load_single_table_tablespace(
" is not sensible.\n"
"InnoDB: This can happen in an ibbackup run,"
" and is not dangerous.\n",
- filepath, space_id, filepath, size);
- os_file_close(file);
+ fsp->filepath, fsp->id, fsp->filepath, size);
+ os_file_close(fsp->file);
- new_path = fil_make_ibbackup_old_name(filepath);
- ut_a(os_file_rename(innodb_file_data_key, filepath, new_path));
+ new_path = fil_make_ibbackup_old_name(fsp->filepath);
+
+ bool success = os_file_rename(
+ innodb_file_data_key, fsp->filepath, new_path));
+
+ ut_a(success);
- ut_free(buf2);
- mem_free(tablename);
- mem_free(filepath);
mem_free(new_path);
- return;
+ goto func_exit_after_close;
}
/* A backup may contain the same space several times, if the space got
@@ -3551,7 +4185,7 @@ fil_load_single_table_tablespace(
mutex_enter(&fil_system->mutex);
- space = fil_space_get_by_id(space_id);
+ space = fil_space_get_by_id(fsp->id);
if (space) {
char* new_path;
@@ -3563,52 +4197,64 @@ fil_load_single_table_tablespace(
"InnoDB: was scanned earlier. This can happen"
" if you have renamed tables\n"
"InnoDB: during an ibbackup run.\n",
- filepath, space_id, filepath,
+ fsp->filepath, fsp->id, fsp->filepath,
space->name);
- os_file_close(file);
+ os_file_close(fsp->file);
- new_path = fil_make_ibbackup_old_name(filepath);
+ new_path = fil_make_ibbackup_old_name(fsp->filepath);
mutex_exit(&fil_system->mutex);
- ut_a(os_file_rename(innodb_file_data_key, filepath, new_path));
+ bool success = os_file_rename(
+ innodb_file_data_key, fsp->filepath, new_path);
+
+ ut_a(success);
- ut_free(buf2);
- mem_free(tablename);
- mem_free(filepath);
mem_free(new_path);
- return;
+ goto func_exit_after_close;
}
mutex_exit(&fil_system->mutex);
-#endif
- success = fil_space_create(tablename, space_id, flags, FIL_TABLESPACE);
-
- if (!success) {
+#endif /* UNIV_HOTBACKUP */
+ ibool file_space_create_success = fil_space_create(
+ tablename, fsp->id, fsp->flags, FIL_TABLESPACE);
+ if (!file_space_create_success) {
if (srv_force_recovery > 0) {
fprintf(stderr,
- "InnoDB: innodb_force_recovery"
- " was set to %lu. Continuing crash recovery\n"
- "InnoDB: even though the tablespace creation"
- " of this table failed.\n",
+ "InnoDB: innodb_force_recovery was set"
+ " to %lu. Continuing crash recovery\n"
+ "InnoDB: even though the tablespace"
+ " creation of this table failed.\n",
srv_force_recovery);
goto func_exit;
}
- exit(1);
+ /* Exit here with a core dump, stack, etc. */
+ ut_a(file_space_create_success);
}
/* We do not use the size information we have about the file, because
the rounding formula for extents and pages is somewhat complex; we
let fil_node_open() do that task. */
- fil_node_create(filepath, 0, space_id, FALSE);
+ if (!fil_node_create(fsp->filepath, 0, fsp->id, FALSE)) {
+ ut_error;
+ }
+
func_exit:
- os_file_close(file);
- ut_free(buf2);
+ os_file_close(fsp->file);
+
+#ifdef UNIV_HOTBACKUP
+func_exit_after_close:
+#else
+ ut_ad(!mutex_own(&fil_system->mutex));
+#endif
mem_free(tablename);
- mem_free(filepath);
+ if (remote.success) {
+ mem_free(remote.filepath);
+ }
+ mem_free(def.filepath);
}
/***********************************************************************//**
@@ -3621,29 +4267,25 @@ static
int
fil_file_readdir_next_file(
/*=======================*/
- ulint* err, /*!< out: this is set to DB_ERROR if an error
+ dberr_t* err, /*!< out: this is set to DB_ERROR if an error
was encountered, otherwise not changed */
const char* dirname,/*!< in: directory name or path */
os_file_dir_t dir, /*!< in: directory stream */
- os_file_stat_t* info) /*!< in/out: buffer where the info is returned */
+ os_file_stat_t* info) /*!< in/out: buffer where the
+ info is returned */
{
- ulint i;
- int ret;
-
- for (i = 0; i < 100; i++) {
- ret = os_file_readdir_next_file(dirname, dir, info);
+ for (ulint i = 0; i < 100; i++) {
+ int ret = os_file_readdir_next_file(dirname, dir, info);
if (ret != -1) {
return(ret);
}
- fprintf(stderr,
- "InnoDB: Error: os_file_readdir_next_file()"
- " returned -1 in\n"
- "InnoDB: directory %s\n"
- "InnoDB: Crash recovery may have failed"
- " for some .ibd files!\n", dirname);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "os_file_readdir_next_file() returned -1 in "
+ "directory %s, crash recovery may have failed "
+ "for some .ibd files!", dirname);
*err = DB_ERROR;
}
@@ -3660,7 +4302,7 @@ in the doublewrite buffer, also to know where to apply log records where the
space id is != 0.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
fil_load_single_table_tablespaces(void)
/*===================================*/
{
@@ -3671,7 +4313,7 @@ fil_load_single_table_tablespaces(void)
os_file_dir_t dbdir;
os_file_stat_t dbinfo;
os_file_stat_t fileinfo;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
/* The datadir of MySQL is always the default directory of mysqld */
@@ -3720,7 +4362,6 @@ fil_load_single_table_tablespaces(void)
dbdir = os_file_opendir(dbpath, FALSE);
if (dbdir != NULL) {
- /* printf("Opened dir %s\n", dbinfo.name); */
/* We found a database directory; loop through it,
looking for possible .ibd files in it */
@@ -3728,8 +4369,6 @@ fil_load_single_table_tablespaces(void)
ret = fil_file_readdir_next_file(&err, dbpath, dbdir,
&fileinfo);
while (ret == 0) {
- /* printf(
- " Looking at file %s\n", fileinfo.name); */
if (fileinfo.type == OS_FILE_TYPE_DIR) {
@@ -3738,11 +4377,14 @@ fil_load_single_table_tablespaces(void)
/* We found a symlink or a file */
if (strlen(fileinfo.name) > 4
- && 0 == strcmp(fileinfo.name
+ && (0 == strcmp(fileinfo.name
+ + strlen(fileinfo.name) - 4,
+ ".ibd")
+ || 0 == strcmp(fileinfo.name
+ strlen(fileinfo.name) - 4,
- ".ibd")) {
- /* The name ends in .ibd; try opening
- the file */
+ ".isl"))) {
+ /* The name ends in .ibd or .isl;
+ try opening the file */
fil_load_single_table_tablespace(
dbinfo.name, fileinfo.name);
}
@@ -3842,6 +4484,29 @@ fil_tablespace_exists_in_mem(
}
/*******************************************************************//**
+Report that a tablespace for a table was not found. */
+static
+void
+fil_report_missing_tablespace(
+/*===========================*/
+ const char* name, /*!< in: table name */
+ ulint space_id) /*!< in: table's space id */
+{
+ char index_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(index_name, sizeof(index_name), name, TRUE);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Table %s in the InnoDB data dictionary has tablespace id %lu, "
+ "but tablespace with that id or name does not exist. Have "
+ "you deleted or moved .ibd files? This may also be a table "
+ "created with CREATE TEMPORARY TABLE whose .ibd and .frm "
+ "files MySQL automatically removed, but the table still "
+ "exists in the InnoDB internal data dictionary.",
+ name, space_id);
+}
+
+/*******************************************************************//**
Returns TRUE if a matching tablespace exists in the InnoDB tablespace memory
cache. Note that if we have not done a crash recovery at the database startup,
there may be many tablespaces which are not yet in the memory cache.
@@ -3851,19 +4516,25 @@ ibool
fil_space_for_table_exists_in_mem(
/*==============================*/
ulint id, /*!< in: space id */
- const char* name, /*!< in: table name in the standard
- 'databasename/tablename' format */
+ const char* name, /*!< in: table name used in
+ fil_space_create(). Either the
+ standard 'dbname/tablename' format
+ or table->dir_path_of_temp_table */
ibool mark_space, /*!< in: in crash recovery, at database
startup we mark all spaces which have
an associated table in the InnoDB
data dictionary, so that
we can print a warning about orphaned
tablespaces */
- ibool print_error_if_does_not_exist)
+ ibool print_error_if_does_not_exist,
/*!< in: print detailed error
information to the .err log if a
matching tablespace is not found from
memory */
+ bool adjust_space, /*!< in: whether to adjust space id
+ when find table space mismatch */
+ mem_heap_t* heap, /*!< in: heap memory */
+ table_id_t table_id) /*!< in: table id */
{
fil_space_t* fnamespace;
fil_space_t* space;
@@ -3892,6 +4563,47 @@ fil_space_for_table_exists_in_mem(
return(TRUE);
}
+ /* Info from "fnamespace" comes from the ibd file itself, it can
+ be different from data obtained from System tables since it is
+ not transactional. If adjust_space is set, and the mismatching
+ space are between a user table and its temp table, we shall
+ adjust the ibd file name according to system table info */
+ if (adjust_space
+ && space != NULL
+ && row_is_mysql_tmp_table_name(space->name)
+ && !row_is_mysql_tmp_table_name(name)) {
+
+ mutex_exit(&fil_system->mutex);
+
+ DBUG_EXECUTE_IF("ib_crash_before_adjust_fil_space",
+ DBUG_SUICIDE(););
+
+ if (fnamespace) {
+ char* tmp_name;
+
+ tmp_name = dict_mem_create_temporary_tablename(
+ heap, name, table_id);
+
+ fil_rename_tablespace(fnamespace->name, fnamespace->id,
+ tmp_name, NULL);
+ }
+
+ DBUG_EXECUTE_IF("ib_crash_after_adjust_one_fil_space",
+ DBUG_SUICIDE(););
+
+ fil_rename_tablespace(space->name, id, name, NULL);
+
+ DBUG_EXECUTE_IF("ib_crash_after_adjust_fil_space",
+ DBUG_SUICIDE(););
+
+ mutex_enter(&fil_system->mutex);
+ fnamespace = fil_space_get_by_name(name);
+ ut_ad(space == fnamespace);
+ mutex_exit(&fil_system->mutex);
+
+ return(TRUE);
+ }
+
if (!print_error_if_does_not_exist) {
mutex_exit(&fil_system->mutex);
@@ -3901,22 +4613,9 @@ fil_space_for_table_exists_in_mem(
if (space == NULL) {
if (fnamespace == NULL) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: table ", stderr);
- ut_print_filename(stderr, name);
- fprintf(stderr, "\n"
- "InnoDB: in InnoDB data dictionary"
- " has tablespace id %lu,\n"
- "InnoDB: but tablespace with that id"
- " or name does not exist. Have\n"
- "InnoDB: you deleted or moved .ibd files?\n"
- "InnoDB: This may also be a table created with"
- " CREATE TEMPORARY TABLE\n"
- "InnoDB: whose .ibd and .frm files"
- " MySQL automatically removed, but the\n"
- "InnoDB: table still exists in the"
- " InnoDB internal data dictionary.\n",
- (ulong) id);
+ if (print_error_if_does_not_exist) {
+ fil_report_missing_tablespace(name, id);
+ }
} else {
ut_print_timestamp(stderr);
fputs(" InnoDB: Error: table ", stderr);
@@ -3975,7 +4674,7 @@ error_exit:
Checks if a single-table tablespace for a given table name exists in the
tablespace memory cache.
@return space id, ULINT_UNDEFINED if not found */
-static
+UNIV_INTERN
ulint
fil_get_space_id_for_table(
/*=======================*/
@@ -4030,6 +4729,8 @@ fil_extend_space_to_desired_size(
ulint pages_added;
ibool success;
+ ut_ad(!srv_read_only_mode);
+
retry:
pages_added = 0;
success = TRUE;
@@ -4081,20 +4782,6 @@ retry:
start_page_no = space->size;
file_start_page_no = space->size - node->size;
-#ifdef HAVE_POSIX_FALLOCATE
- if (srv_use_posix_fallocate) {
- success = os_file_set_size(node->name, node->handle,
- size_after_extend * page_size);
- mutex_enter(&fil_system->mutex);
- if (success) {
- node->size += (size_after_extend - start_page_no);
- space->size += (size_after_extend - start_page_no);
- os_has_said_disk_full = FALSE;
- }
- goto complete_io;
- }
-#endif
-
/* Extend at most 64 pages at a time */
buf_size = ut_min(64, size_after_extend - start_page_no) * page_size;
buf2 = static_cast<byte*>(mem_alloc(buf_size + page_size));
@@ -4118,7 +4805,7 @@ retry:
node->name, node->handle, buf,
offset, page_size * n_pages,
NULL, NULL);
-#endif
+#endif /* UNIV_HOTBACKUP */
if (success) {
os_has_said_disk_full = FALSE;
} else {
@@ -4150,10 +4837,6 @@ retry:
node->size += pages_added;
node->being_extended = FALSE;
-#ifdef HAVE_POSIX_FALLOCATE
-complete_io:
-#endif
-
fil_node_complete_io(node, fil_system, OS_FILE_WRITE);
*actual_size = space->size;
@@ -4195,7 +4878,7 @@ fil_extend_tablespaces_to_stored_len(void)
byte* buf;
ulint actual_size;
ulint size_in_header;
- ulint error;
+ dberr_t error;
ibool success;
buf = mem_alloc(UNIV_PAGE_SIZE);
@@ -4229,7 +4912,7 @@ fil_extend_tablespaces_to_stored_len(void)
"InnoDB: Check that you have free disk space"
" and retry!\n",
space->name, size_in_header, actual_size);
- exit(1);
+ ut_a(success);
}
mutex_enter(&fil_system->mutex);
@@ -4399,12 +5082,21 @@ fil_node_complete_io(
node->n_pending--;
if (type == OS_FILE_WRITE) {
+ ut_ad(!srv_read_only_mode);
system->modification_counter++;
node->modification_counter = system->modification_counter;
- if (!node->space->is_in_unflushed_spaces) {
+ if (fil_buffering_disabled(node->space)) {
+
+ /* We don't need to keep track of unflushed
+ changes as user has explicitly disabled
+ buffering. */
+ ut_ad(!node->space->is_in_unflushed_spaces);
+ node->flush_counter = node->modification_counter;
- node->space->is_in_unflushed_spaces = TRUE;
+ } else if (!node->space->is_in_unflushed_spaces) {
+
+ node->space->is_in_unflushed_spaces = true;
UT_LIST_ADD_FIRST(unflushed_spaces,
system->unflushed_spaces,
node->space);
@@ -4451,7 +5143,7 @@ Reads or writes data. This operation is asynchronous (aio).
@return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do
i/o on a tablespace which does not exist */
UNIV_INTERN
-ulint
+dberr_t
fil_io(
/*===*/
ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE,
@@ -4514,9 +5206,11 @@ fil_io(
#ifndef UNIV_HOTBACKUP
# ifndef UNIV_LOG_DEBUG
/* ibuf bitmap pages must be read in the sync aio mode: */
- ut_ad(recv_no_ibuf_operations || (type == OS_FILE_WRITE)
+ ut_ad(recv_no_ibuf_operations
+ || type == OS_FILE_WRITE
|| !ibuf_bitmap_page(zip_size, block_offset)
- || sync || is_log);
+ || sync
+ || is_log);
# endif /* UNIV_LOG_DEBUG */
if (sync) {
mode = OS_AIO_SYNC;
@@ -4535,9 +5229,10 @@ fil_io(
#endif /* !UNIV_HOTBACKUP */
if (type == OS_FILE_READ) {
- srv_data_read+= len;
+ srv_stats.data_read.add(len);
} else if (type == OS_FILE_WRITE) {
- srv_data_written+= len;
+ ut_ad(!srv_read_only_mode);
+ srv_stats.data_written.add(len);
}
/* Reserve the fil_system mutex and make sure that we can open at
@@ -4549,48 +5244,43 @@ fil_io(
/* If we are deleting a tablespace we don't allow any read
operations on that. However, we do allow write operations. */
- if (!space || (type == OS_FILE_READ && space->stop_new_ops)) {
+ if (space == 0 || (type == OS_FILE_READ && space->stop_new_ops)) {
mutex_exit(&fil_system->mutex);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: trying to do i/o"
- " to a tablespace which does not exist.\n"
- "InnoDB: i/o type %lu, space id %lu,"
- " page no. %lu, i/o length %lu bytes\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Trying to do i/o to a tablespace which does "
+ "not exist. i/o type %lu, space id %lu, "
+ "page no. %lu, i/o length %lu bytes",
(ulong) type, (ulong) space_id, (ulong) block_offset,
(ulong) len);
return(DB_TABLESPACE_DELETED);
}
- ut_ad((mode != OS_AIO_IBUF) || (space->purpose == FIL_TABLESPACE));
+ ut_ad(mode != OS_AIO_IBUF || space->purpose == FIL_TABLESPACE);
node = UT_LIST_GET_FIRST(space->chain);
for (;;) {
- if (UNIV_UNLIKELY(node == NULL)) {
+ if (node == NULL) {
if (ignore_nonexistent_pages) {
mutex_exit(&fil_system->mutex);
return(DB_ERROR);
}
- /* else */
fil_report_invalid_page_access(
block_offset, space_id, space->name,
byte_offset, len, type);
ut_error;
- }
- if (fil_is_user_tablespace_id(space->id) && node->size == 0) {
+ } else if (fil_is_user_tablespace_id(space->id)
+ && node->size == 0) {
+
/* We do not know the size of a single-table tablespace
before we open the file */
-
break;
- }
-
- if (node->size > block_offset) {
+ } else if (node->size > block_offset) {
/* Found! */
break;
} else {
@@ -4652,6 +5342,7 @@ fil_io(
if (type == OS_FILE_READ) {
ret = os_file_read(node->handle, buf, offset, len);
} else {
+ ut_ad(!srv_read_only_mode);
ret = os_file_write(node->name, node->handle, buf,
offset, len);
}
@@ -4659,7 +5350,7 @@ fil_io(
/* Queue the aio request */
ret = os_aio(type, mode | wake_later, node->name, node->handle, buf,
offset, len, node, message);
-#endif
+#endif /* UNIV_HOTBACKUP */
ut_a(ret);
if (mode == OS_AIO_SYNC) {
@@ -4701,24 +5392,24 @@ fil_aio_wait(
if (srv_use_native_aio) {
srv_set_io_thread_op_info(segment, "native aio handle");
#ifdef WIN_ASYNC_IO
- ret = os_aio_windows_handle(segment, 0, &fil_node,
- &message, &type);
+ ret = os_aio_windows_handle(
+ segment, 0, &fil_node, &message, &type);
#elif defined(LINUX_NATIVE_AIO)
- ret = os_aio_linux_handle(segment, &fil_node,
- &message, &type);
+ ret = os_aio_linux_handle(
+ segment, &fil_node, &message, &type);
#else
ut_error;
ret = 0; /* Eliminate compiler warning */
-#endif
+#endif /* WIN_ASYNC_IO */
} else {
srv_set_io_thread_op_info(segment, "simulated aio handle");
- ret = os_aio_simulated_handle(segment, &fil_node,
- &message, &type);
+ ret = os_aio_simulated_handle(
+ segment, &fil_node, &message, &type);
}
ut_a(ret);
- if (UNIV_UNLIKELY(fil_node == NULL)) {
+ if (fil_node == NULL) {
ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS);
return;
}
@@ -4774,6 +5465,28 @@ fil_flush(
return;
}
+ if (fil_buffering_disabled(space)) {
+
+ /* No need to flush. User has explicitly disabled
+ buffering. */
+ ut_ad(!space->is_in_unflushed_spaces);
+ ut_ad(fil_space_is_flushed(space));
+ ut_ad(space->n_pending_flushes == 0);
+
+#ifdef UNIV_DEBUG
+ for (node = UT_LIST_GET_FIRST(space->chain);
+ node != NULL;
+ node = UT_LIST_GET_NEXT(chain, node)) {
+ ut_ad(node->modification_counter
+ == node->flush_counter);
+ ut_ad(node->n_pending_flushes == 0);
+ }
+#endif /* UNIV_DEBUG */
+
+ mutex_exit(&fil_system->mutex);
+ return;
+ }
+
space->n_pending_flushes++; /*!< prevent dropping of the space while
we are flushing */
node = UT_LIST_GET_FIRST(space->chain);
@@ -4797,7 +5510,7 @@ fil_flush(
goto skip_flush;
}
-#endif
+#endif /* __WIN__ */
retry:
if (node->n_pending_flushes > 0) {
/* We want to avoid calling os_file_flush() on
@@ -4840,7 +5553,7 @@ skip_flush:
if (space->is_in_unflushed_spaces
&& fil_space_is_flushed(space)) {
- space->is_in_unflushed_spaces = FALSE;
+ space->is_in_unflushed_spaces = false;
UT_LIST_REMOVE(
unflushed_spaces,
@@ -5078,6 +5791,379 @@ fil_close(void)
fil_system = NULL;
}
+/********************************************************************//**
+Initializes a buffer control block when the buf_pool is created. */
+static
+void
+fil_buf_block_init(
+/*===============*/
+ buf_block_t* block, /*!< in: pointer to control block */
+ byte* frame) /*!< in: pointer to buffer frame */
+{
+ UNIV_MEM_DESC(frame, UNIV_PAGE_SIZE);
+
+ block->frame = frame;
+
+ block->page.io_fix = BUF_IO_NONE;
+ /* There are assertions that check for this. */
+ block->page.buf_fix_count = 1;
+ block->page.state = BUF_BLOCK_READY_FOR_USE;
+
+ page_zip_des_init(&block->page.zip);
+}
+
+struct fil_iterator_t {
+ os_file_t file; /*!< File handle */
+ const char* filepath; /*!< File path name */
+ os_offset_t start; /*!< From where to start */
+ os_offset_t end; /*!< Where to stop */
+ os_offset_t file_size; /*!< File size in bytes */
+ ulint page_size; /*!< Page size */
+ ulint n_io_buffers; /*!< Number of pages to use
+ for IO */
+ byte* io_buffer; /*!< Buffer to use for IO */
+};
+
+/********************************************************************//**
+TODO: This can be made parallel trivially by chunking up the file and creating
+a callback per thread. . Main benefit will be to use multiple CPUs for
+checksums and compressed tables. We have to do compressed tables block by
+block right now. Secondly we need to decompress/compress and copy too much
+of data. These are CPU intensive.
+
+Iterate over all the pages in the tablespace.
+@param iter - Tablespace iterator
+@param block - block to use for IO
+@param callback - Callback to inspect and update page contents
+@retval DB_SUCCESS or error code */
+static
+dberr_t
+fil_iterate(
+/*========*/
+ const fil_iterator_t& iter,
+ buf_block_t* block,
+ PageCallback& callback)
+{
+ os_offset_t offset;
+ ulint page_no = 0;
+ ulint space_id = callback.get_space_id();
+ ulint n_bytes = iter.n_io_buffers * iter.page_size;
+
+ ut_ad(!srv_read_only_mode);
+
+ /* TODO: For compressed tables we do a lot of useless
+ copying for non-index pages. Unfortunately, it is
+ required by buf_zip_decompress() */
+
+ for (offset = iter.start; offset < iter.end; offset += n_bytes) {
+
+ byte* io_buffer = iter.io_buffer;
+
+ block->frame = io_buffer;
+
+ if (callback.get_zip_size() > 0) {
+ page_zip_des_init(&block->page.zip);
+ page_zip_set_size(&block->page.zip, iter.page_size);
+ block->page.zip.data = block->frame + UNIV_PAGE_SIZE;
+ ut_d(block->page.zip.m_external = true);
+ ut_ad(iter.page_size == callback.get_zip_size());
+
+ /* Zip IO is done in the compressed page buffer. */
+ io_buffer = block->page.zip.data;
+ } else {
+ io_buffer = iter.io_buffer;
+ }
+
+ /* We have to read the exact number of bytes. Otherwise the
+ InnoDB IO functions croak on failed reads. */
+
+ n_bytes = static_cast<ulint>(
+ ut_min(static_cast<os_offset_t>(n_bytes),
+ iter.end - offset));
+
+ ut_ad(n_bytes > 0);
+ ut_ad(!(n_bytes % iter.page_size));
+
+ if (!os_file_read(iter.file, io_buffer, offset,
+ (ulint) n_bytes)) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed");
+
+ return(DB_IO_ERROR);
+ }
+
+ bool updated = false;
+ os_offset_t page_off = offset;
+ ulint n_pages_read = (ulint) n_bytes / iter.page_size;
+
+ for (ulint i = 0; i < n_pages_read; ++i) {
+
+ buf_block_set_file_page(block, space_id, page_no++);
+
+ dberr_t err;
+
+ if ((err = callback(page_off, block)) != DB_SUCCESS) {
+
+ return(err);
+
+ } else if (!updated) {
+ updated = buf_block_get_state(block)
+ == BUF_BLOCK_FILE_PAGE;
+ }
+
+ buf_block_set_state(block, BUF_BLOCK_NOT_USED);
+ buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE);
+
+ page_off += iter.page_size;
+ block->frame += iter.page_size;
+ }
+
+ /* A page was updated in the set, write back to disk. */
+ if (updated
+ && !os_file_write(
+ iter.filepath, iter.file, io_buffer,
+ offset, (ulint) n_bytes)) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed");
+
+ return(DB_IO_ERROR);
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/********************************************************************//**
+Iterate over all the pages in the tablespace.
+@param table - the table definiton in the server
+@param n_io_buffers - number of blocks to read and write together
+@param callback - functor that will do the page updates
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+fil_tablespace_iterate(
+/*===================*/
+ dict_table_t* table,
+ ulint n_io_buffers,
+ PageCallback& callback)
+{
+ dberr_t err;
+ os_file_t file;
+ char* filepath;
+
+ ut_a(n_io_buffers > 0);
+ ut_ad(!srv_read_only_mode);
+
+ DBUG_EXECUTE_IF("ib_import_trigger_corruption_1",
+ return(DB_CORRUPTION););
+
+ if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+ dict_get_and_save_data_dir_path(table, false);
+ ut_a(table->data_dir_path);
+
+ filepath = os_file_make_remote_pathname(
+ table->data_dir_path, table->name, "ibd");
+ } else {
+ filepath = fil_make_ibd_name(table->name, false);
+ }
+
+ {
+ ibool success;
+
+ file = os_file_create_simple_no_error_handling(
+ innodb_file_data_key, filepath,
+ OS_FILE_OPEN, OS_FILE_READ_WRITE, &success);
+
+ DBUG_EXECUTE_IF("fil_tablespace_iterate_failure",
+ {
+ static bool once;
+
+ if (!once || ut_rnd_interval(0, 10) == 5) {
+ once = true;
+ success = FALSE;
+ os_file_close(file);
+ }
+ });
+
+ if (!success) {
+ /* The following call prints an error message */
+ os_file_get_last_error(true);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Trying to import a tablespace, but could not "
+ "open the tablespace file %s", filepath);
+
+ mem_free(filepath);
+
+ return(DB_TABLESPACE_NOT_FOUND);
+
+ } else {
+ err = DB_SUCCESS;
+ }
+ }
+
+ callback.set_file(filepath, file);
+
+ os_offset_t file_size = os_file_get_size(file);
+ ut_a(file_size != (os_offset_t) -1);
+
+ /* The block we will use for every physical page */
+ buf_block_t block;
+
+ memset(&block, 0x0, sizeof(block));
+
+ /* Allocate a page to read in the tablespace header, so that we
+ can determine the page size and zip_size (if it is compressed).
+ We allocate an extra page in case it is a compressed table. One
+ page is to ensure alignement. */
+
+ void* page_ptr = mem_alloc(3 * UNIV_PAGE_SIZE);
+ byte* page = static_cast<byte*>(ut_align(page_ptr, UNIV_PAGE_SIZE));
+
+ fil_buf_block_init(&block, page);
+
+ /* Read the first page and determine the page and zip size. */
+
+ if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) {
+
+ err = DB_IO_ERROR;
+
+ } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) {
+ fil_iterator_t iter;
+
+ iter.file = file;
+ iter.start = 0;
+ iter.end = file_size;
+ iter.filepath = filepath;
+ iter.file_size = file_size;
+ iter.n_io_buffers = n_io_buffers;
+ iter.page_size = callback.get_page_size();
+
+ /* Compressed pages can't be optimised for block IO for now.
+ We do the IMPORT page by page. */
+
+ if (callback.get_zip_size() > 0) {
+ iter.n_io_buffers = 1;
+ ut_a(iter.page_size == callback.get_zip_size());
+ }
+
+ /** Add an extra page for compressed page scratch area. */
+
+ void* io_buffer = mem_alloc(
+ (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE);
+
+ iter.io_buffer = static_cast<byte*>(
+ ut_align(io_buffer, UNIV_PAGE_SIZE));
+
+ err = fil_iterate(iter, &block, callback);
+
+ mem_free(io_buffer);
+ }
+
+ if (err == DB_SUCCESS) {
+
+ ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk");
+
+ if (!os_file_flush(file)) {
+ ib_logf(IB_LOG_LEVEL_INFO, "os_file_flush() failed!");
+ err = DB_IO_ERROR;
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk - done!");
+ }
+ }
+
+ os_file_close(file);
+
+ mem_free(page_ptr);
+ mem_free(filepath);
+
+ return(err);
+}
+
+/**
+Set the tablespace compressed table size.
+@return DB_SUCCESS if it is valie or DB_CORRUPTION if not */
+dberr_t
+PageCallback::set_zip_size(const buf_frame_t* page) UNIV_NOTHROW
+{
+ m_zip_size = fsp_header_get_zip_size(page);
+
+ if (!ut_is_2pow(m_zip_size) || m_zip_size > UNIV_ZIP_SIZE_MAX) {
+ return(DB_CORRUPTION);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/********************************************************************//**
+Delete the tablespace file and any related files like .cfg.
+This should not be called for temporary tables. */
+UNIV_INTERN
+void
+fil_delete_file(
+/*============*/
+ const char* ibd_name) /*!< in: filepath of the ibd
+ tablespace */
+{
+ /* Force a delete of any stale .ibd files that are lying around. */
+
+ ib_logf(IB_LOG_LEVEL_INFO, "Deleting %s", ibd_name);
+
+ os_file_delete_if_exists(ibd_name);
+
+ char* cfg_name = fil_make_cfg_name(ibd_name);
+
+ os_file_delete_if_exists(cfg_name);
+
+ mem_free(cfg_name);
+}
+
+/**
+Iterate over all the spaces in the space list and fetch the
+tablespace names. It will return a copy of the name that must be
+freed by the caller using: delete[].
+@return DB_SUCCESS if all OK. */
+UNIV_INTERN
+dberr_t
+fil_get_space_names(
+/*================*/
+ space_name_list_t& space_name_list)
+ /*!< in/out: List to append to */
+{
+ fil_space_t* space;
+ dberr_t err = DB_SUCCESS;
+
+ mutex_enter(&fil_system->mutex);
+
+ for (space = UT_LIST_GET_FIRST(fil_system->space_list);
+ space != NULL;
+ space = UT_LIST_GET_NEXT(space_list, space)) {
+
+ if (space->purpose == FIL_TABLESPACE) {
+ ulint len;
+ char* name;
+
+ len = strlen(space->name);
+ name = new(std::nothrow) char[len + 1];
+
+ if (name == 0) {
+ /* Caller to free elements allocated so far. */
+ err = DB_OUT_OF_MEMORY;
+ break;
+ }
+
+ memcpy(name, space->name, len);
+ name[len] = 0;
+
+ space_name_list.push_back(name);
+ }
+ }
+
+ mutex_exit(&fil_system->mutex);
+
+ return(err);
+}
+
/****************************************************************//**
Generate redo logs for swapping two .ibd files */
UNIV_INTERN
diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc
index 398dd24afed..dc843a89fb9 100644
--- a/storage/innobase/fsp/fsp0fsp.cc
+++ b/storage/innobase/fsp/fsp0fsp.cc
@@ -93,15 +93,13 @@ fseg_n_reserved_pages_low(
/********************************************************************//**
Marks a page used. The page must reside within the extents of the given
segment. */
-static
+static __attribute__((nonnull))
void
fseg_mark_page_used(
/*================*/
fseg_inode_t* seg_inode,/*!< in: segment inode */
- ulint space, /*!< in: space id */
- ulint zip_size,/*!< in: compressed page size in bytes
- or 0 for uncompressed pages */
ulint page, /*!< in: page offset */
+ xdes_t* descr, /*!< in: extent descriptor */
mtr_t* mtr); /*!< in/out: mini-transaction */
/**********************************************************************//**
Returns the first extent descriptor for a segment. We think of the extent
@@ -214,30 +212,18 @@ Gets a descriptor bit of a page.
@return TRUE if free */
UNIV_INLINE
ibool
-xdes_get_bit(
-/*=========*/
+xdes_mtr_get_bit(
+/*=============*/
const xdes_t* descr, /*!< in: descriptor */
ulint bit, /*!< in: XDES_FREE_BIT or XDES_CLEAN_BIT */
ulint offset, /*!< in: page offset within extent:
0 ... FSP_EXTENT_SIZE - 1 */
- mtr_t* mtr) /*!< in/out: mini-transaction */
+ mtr_t* mtr) /*!< in: mini-transaction */
{
- ulint index;
- ulint byte_index;
- ulint bit_index;
-
+ ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX));
- ut_ad((bit == XDES_FREE_BIT) || (bit == XDES_CLEAN_BIT));
- ut_ad(offset < FSP_EXTENT_SIZE);
- index = bit + XDES_BITS_PER_PAGE * offset;
-
- byte_index = index / 8;
- bit_index = index % 8;
-
- return(ut_bit_get_nth(mtr_read_ulint(descr + XDES_BITMAP + byte_index,
- MLOG_1BYTE, mtr),
- bit_index));
+ return(xdes_get_bit(descr, bit, offset));
}
/**********************************************************************//**
@@ -287,7 +273,8 @@ xdes_find_bit(
xdes_t* descr, /*!< in: descriptor */
ulint bit, /*!< in: XDES_FREE_BIT or XDES_CLEAN_BIT */
ibool val, /*!< in: desired bit value */
- ulint hint, /*!< in: hint of which bit position would be desirable */
+ ulint hint, /*!< in: hint of which bit position would
+ be desirable */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint i;
@@ -297,14 +284,14 @@ xdes_find_bit(
ut_ad(hint < FSP_EXTENT_SIZE);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX));
for (i = hint; i < FSP_EXTENT_SIZE; i++) {
- if (val == xdes_get_bit(descr, bit, i, mtr)) {
+ if (val == xdes_mtr_get_bit(descr, bit, i, mtr)) {
return(i);
}
}
for (i = 0; i < hint; i++) {
- if (val == xdes_get_bit(descr, bit, i, mtr)) {
+ if (val == xdes_mtr_get_bit(descr, bit, i, mtr)) {
return(i);
}
@@ -324,7 +311,8 @@ xdes_find_bit_downward(
xdes_t* descr, /*!< in: descriptor */
ulint bit, /*!< in: XDES_FREE_BIT or XDES_CLEAN_BIT */
ibool val, /*!< in: desired bit value */
- ulint hint, /*!< in: hint of which bit position would be desirable */
+ ulint hint, /*!< in: hint of which bit position would
+ be desirable */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
ulint i;
@@ -334,14 +322,14 @@ xdes_find_bit_downward(
ut_ad(hint < FSP_EXTENT_SIZE);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX));
for (i = hint + 1; i > 0; i--) {
- if (val == xdes_get_bit(descr, bit, i - 1, mtr)) {
+ if (val == xdes_mtr_get_bit(descr, bit, i - 1, mtr)) {
return(i - 1);
}
}
for (i = FSP_EXTENT_SIZE - 1; i > hint; i--) {
- if (val == xdes_get_bit(descr, bit, i, mtr)) {
+ if (val == xdes_mtr_get_bit(descr, bit, i, mtr)) {
return(i);
}
@@ -360,13 +348,12 @@ xdes_get_n_used(
const xdes_t* descr, /*!< in: descriptor */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
- ulint i;
ulint count = 0;
ut_ad(descr && mtr);
ut_ad(mtr_memo_contains_page(mtr, descr, MTR_MEMO_PAGE_X_FIX));
- for (i = 0; i < FSP_EXTENT_SIZE; i++) {
- if (FALSE == xdes_get_bit(descr, XDES_FREE_BIT, i, mtr)) {
+ for (ulint i = 0; i < FSP_EXTENT_SIZE; ++i) {
+ if (FALSE == xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) {
count++;
}
}
@@ -471,76 +458,11 @@ xdes_init(
}
/********************************************************************//**
-Calculates the page where the descriptor of a page resides.
-@return descriptor page offset */
-UNIV_INLINE
-ulint
-xdes_calc_descriptor_page(
-/*======================*/
- ulint zip_size, /*!< in: compressed page size in bytes;
- 0 for uncompressed pages */
- ulint offset) /*!< in: page offset */
-{
-#ifndef DOXYGEN /* Doxygen gets confused of these */
-# if UNIV_PAGE_SIZE_MAX <= XDES_ARR_OFFSET \
- + (UNIV_PAGE_SIZE_MAX / FSP_EXTENT_SIZE_MAX) \
- * XDES_SIZE_MAX
-# error
-# endif
-# if UNIV_ZIP_SIZE_MIN <= XDES_ARR_OFFSET \
- + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE_MIN) \
- * XDES_SIZE_MIN
-# error
-# endif
-#endif /* !DOXYGEN */
-
- ut_ad(UNIV_PAGE_SIZE > XDES_ARR_OFFSET
- + (UNIV_PAGE_SIZE / FSP_EXTENT_SIZE)
- * XDES_SIZE);
- ut_ad(UNIV_ZIP_SIZE_MIN > XDES_ARR_OFFSET
- + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE)
- * XDES_SIZE);
-
- ut_ad(ut_is_2pow(zip_size));
-
- if (!zip_size) {
- return(ut_2pow_round(offset, UNIV_PAGE_SIZE));
- } else {
- ut_ad(zip_size > XDES_ARR_OFFSET
- + (zip_size / FSP_EXTENT_SIZE) * XDES_SIZE);
- return(ut_2pow_round(offset, zip_size));
- }
-}
-
-/********************************************************************//**
-Calculates the descriptor index within a descriptor page.
-@return descriptor index */
-UNIV_INLINE
-ulint
-xdes_calc_descriptor_index(
-/*=======================*/
- ulint zip_size, /*!< in: compressed page size in bytes;
- 0 for uncompressed pages */
- ulint offset) /*!< in: page offset */
-{
- ut_ad(ut_is_2pow(zip_size));
-
- if (!zip_size) {
- return(ut_2pow_remainder(offset, UNIV_PAGE_SIZE)
- / FSP_EXTENT_SIZE);
- } else {
- return(ut_2pow_remainder(offset, zip_size) / FSP_EXTENT_SIZE);
- }
-}
-
-/********************************************************************//**
Gets pointer to a the extent descriptor of a page. The page where the extent
-descriptor resides is x-locked. If the page offset is equal to the free limit
-of the space, adds new extents from above the free limit to the space free
-list, if not free limit == space size. This adding is necessary to make the
-descriptor defined, as they are uninitialized above the free limit.
+descriptor resides is x-locked. This function no longer extends the data
+file.
@return pointer to the extent descriptor, NULL if the page does not
-exist in the space or if the offset exceeds the free limit */
+exist in the space or if the offset is >= the free limit */
UNIV_INLINE __attribute__((nonnull, warn_unused_result))
xdes_t*
xdes_get_descriptor_with_space_hdr(
@@ -570,19 +492,10 @@ xdes_get_descriptor_with_space_hdr(
zip_size = fsp_flags_get_zip_size(
mach_read_from_4(sp_header + FSP_SPACE_FLAGS));
- /* If offset is >= size or > limit, return NULL */
-
- if ((offset >= size) || (offset > limit)) {
-
+ if ((offset >= size) || (offset >= limit)) {
return(NULL);
}
- /* If offset is == limit, fill free list of the space. */
-
- if (offset == limit) {
- fsp_fill_free_list(FALSE, space, sp_header, mtr);
- }
-
descr_page_no = xdes_calc_descriptor_page(zip_size, offset);
if (descr_page_no == 0) {
@@ -668,7 +581,7 @@ UNIV_INLINE
ulint
xdes_get_offset(
/*============*/
- xdes_t* descr) /*!< in: extent descriptor */
+ const xdes_t* descr) /*!< in: extent descriptor */
{
ut_ad(descr);
@@ -784,7 +697,7 @@ fsp_header_init_fields(
ulint space_id, /*!< in: space id */
ulint flags) /*!< in: tablespace flags (FSP_SPACE_FLAGS) */
{
- fsp_flags_validate(flags);
+ ut_a(fsp_flags_is_valid(flags));
mach_write_to_4(FSP_HEADER_OFFSET + FSP_SPACE_ID + page,
space_id);
@@ -872,11 +785,13 @@ fsp_header_get_space_id(
id = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
+ DBUG_EXECUTE_IF("fsp_header_get_space_id_failure",
+ id = ULINT_UNDEFINED;);
+
if (id != fsp_id) {
- fprintf(stderr,
- "InnoDB: Error: space id in fsp header %lu,"
- " but in the page header %lu\n",
- (ulong) fsp_id, (ulong) id);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Space id in fsp header %lu,but in the page header "
+ "%lu", fsp_id, id);
return(ULINT_UNDEFINED);
}
@@ -1348,7 +1263,7 @@ fsp_alloc_from_free_frag(
ulint frag_n_used;
ut_ad(xdes_get_state(descr, mtr) == XDES_FREE_FRAG);
- ut_a(xdes_get_bit(descr, XDES_FREE_BIT, bit, mtr));
+ ut_a(xdes_mtr_get_bit(descr, XDES_FREE_BIT, bit, mtr));
xdes_set_bit(descr, XDES_FREE_BIT, bit, FALSE, mtr);
/* Update the FRAG_N_USED field */
@@ -1583,7 +1498,9 @@ fsp_free_page(
ut_error;
}
- if (xdes_get_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, mtr)) {
+ if (xdes_mtr_get_bit(descr, XDES_FREE_BIT,
+ page % FSP_EXTENT_SIZE, mtr)) {
+
fprintf(stderr,
"InnoDB: Error: File space extent descriptor"
" of page %lu says it is free\n"
@@ -1728,16 +1645,15 @@ fsp_seg_inode_page_find_free(
ulint zip_size,/*!< in: compressed page size, or 0 */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
- fseg_inode_t* inode;
-
for (; i < FSP_SEG_INODES_PER_PAGE(zip_size); i++) {
+ fseg_inode_t* inode;
+
inode = fsp_seg_inode_page_get_nth_inode(
page, i, zip_size, mtr);
if (!mach_read_from_8(inode + FSEG_ID)) {
/* This is unused */
-
return(i);
}
@@ -1763,11 +1679,11 @@ fsp_alloc_seg_inode_page(
page_t* page;
ulint space;
ulint zip_size;
- ulint i;
ut_ad(page_offset(space_header) == FSP_HEADER_OFFSET);
space = page_get_space_id(page_align(space_header));
+
zip_size = fsp_flags_get_zip_size(
mach_read_from_4(FSP_SPACE_FLAGS + space_header));
@@ -1788,16 +1704,18 @@ fsp_alloc_seg_inode_page(
mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_INODE,
MLOG_2BYTES, mtr);
- for (i = 0; i < FSP_SEG_INODES_PER_PAGE(zip_size); i++) {
+ for (ulint i = 0; i < FSP_SEG_INODES_PER_PAGE(zip_size); i++) {
- inode = fsp_seg_inode_page_get_nth_inode(page, i,
- zip_size, mtr);
+ inode = fsp_seg_inode_page_get_nth_inode(
+ page, i, zip_size, mtr);
mlog_write_ull(inode + FSEG_ID, 0, mtr);
}
- flst_add_last(space_header + FSP_SEG_INODES_FREE,
- page + FSEG_INODE_PAGE_NODE, mtr);
+ flst_add_last(
+ space_header + FSP_SEG_INODES_FREE,
+ page + FSEG_INODE_PAGE_NODE, mtr);
+
return(TRUE);
}
@@ -2486,8 +2404,8 @@ fseg_alloc_free_page_low(
/*-------------------------------------------------------------*/
if ((xdes_get_state(descr, mtr) == XDES_FSEG)
&& mach_read_from_8(descr + XDES_ID) == seg_id
- && (xdes_get_bit(descr, XDES_FREE_BIT,
- hint % FSP_EXTENT_SIZE, mtr) == TRUE)) {
+ && (xdes_mtr_get_bit(descr, XDES_FREE_BIT,
+ hint % FSP_EXTENT_SIZE, mtr) == TRUE)) {
take_hinted_page:
/* 1. We can take the hinted page
=================================*/
@@ -2652,10 +2570,12 @@ got_hinted_page:
ut_ad(xdes_get_descriptor(space, zip_size, ret_page, mtr)
== ret_descr);
- ut_ad(xdes_get_bit(ret_descr, XDES_FREE_BIT,
- ret_page % FSP_EXTENT_SIZE, mtr) == TRUE);
- fseg_mark_page_used(seg_inode, space, zip_size, ret_page, mtr);
+ ut_ad(xdes_mtr_get_bit(
+ ret_descr, XDES_FREE_BIT,
+ ret_page % FSP_EXTENT_SIZE, mtr));
+
+ fseg_mark_page_used(seg_inode, ret_page, ret_descr, mtr);
}
return(fsp_page_create(
@@ -3053,27 +2973,21 @@ fsp_get_available_space_in_free_extents(
/********************************************************************//**
Marks a page used. The page must reside within the extents of the given
segment. */
-static
+static __attribute__((nonnull))
void
fseg_mark_page_used(
/*================*/
fseg_inode_t* seg_inode,/*!< in: segment inode */
- ulint space, /*!< in: space id */
- ulint zip_size,/*!< in: compressed page size in bytes
- or 0 for uncompressed pages */
ulint page, /*!< in: page offset */
+ xdes_t* descr, /*!< in: extent descriptor */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
- xdes_t* descr;
ulint not_full_n_used;
- ut_ad(seg_inode && mtr);
ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
== FSEG_MAGIC_N_VALUE);
- descr = xdes_get_descriptor(space, zip_size, page, mtr);
-
ut_ad(mtr_read_ulint(seg_inode + FSEG_ID, MLOG_4BYTES, mtr)
== mtr_read_ulint(descr + XDES_ID, MLOG_4BYTES, mtr));
@@ -3086,8 +3000,9 @@ fseg_mark_page_used(
descr + XDES_FLST_NODE, mtr);
}
- ut_ad(xdes_get_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, mtr)
- == TRUE);
+ ut_ad(xdes_mtr_get_bit(
+ descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, mtr));
+
/* We mark the page as used */
xdes_set_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, FALSE, mtr);
@@ -3142,8 +3057,8 @@ fseg_free_page_low(
descr = xdes_get_descriptor(space, zip_size, page, mtr);
- ut_a(descr);
- if (xdes_get_bit(descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, mtr)) {
+ if (xdes_mtr_get_bit(descr, XDES_FREE_BIT,
+ page % FSP_EXTENT_SIZE, mtr)) {
fputs("InnoDB: Dump of the tablespace extent descriptor: ",
stderr);
ut_print_buf(stderr, descr, 40);
@@ -3278,6 +3193,49 @@ fseg_free_page(
}
/**********************************************************************//**
+Checks if a single page of a segment is free.
+@return true if free */
+UNIV_INTERN
+bool
+fseg_page_is_free(
+/*==============*/
+ fseg_header_t* seg_header, /*!< in: segment header */
+ ulint space, /*!< in: space id */
+ ulint page) /*!< in: page offset */
+{
+ mtr_t mtr;
+ ibool is_free;
+ ulint flags;
+ rw_lock_t* latch;
+ xdes_t* descr;
+ ulint zip_size;
+ fseg_inode_t* seg_inode;
+
+ latch = fil_space_get_latch(space, &flags);
+ zip_size = dict_tf_get_zip_size(flags);
+
+ mtr_start(&mtr);
+ mtr_x_lock(latch, &mtr);
+
+ seg_inode = fseg_inode_get(seg_header, space, zip_size, &mtr);
+
+ ut_a(seg_inode);
+ ut_ad(mach_read_from_4(seg_inode + FSEG_MAGIC_N)
+ == FSEG_MAGIC_N_VALUE);
+ ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE));
+
+ descr = xdes_get_descriptor(space, zip_size, page, &mtr);
+ ut_a(descr);
+
+ is_free = xdes_mtr_get_bit(
+ descr, XDES_FREE_BIT, page % FSP_EXTENT_SIZE, &mtr);
+
+ mtr_commit(&mtr);
+
+ return(is_free);
+}
+
+/**********************************************************************//**
Frees an extent of a segment to the space free list. */
static
void
@@ -3308,7 +3266,7 @@ fseg_free_extent(
first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
for (i = 0; i < FSP_EXTENT_SIZE; i++) {
- if (FALSE == xdes_get_bit(descr, XDES_FREE_BIT, i, mtr)) {
+ if (!xdes_mtr_get_bit(descr, XDES_FREE_BIT, i, mtr)) {
/* Drop search system page hash index if the page is
found in the pool and is hashed */
@@ -3388,9 +3346,9 @@ fseg_free_step(
/* Check that the header resides on a page which has not been
freed yet */
- ut_a(descr);
- ut_a(xdes_get_bit(descr, XDES_FREE_BIT,
- header_page % FSP_EXTENT_SIZE, mtr) == FALSE);
+ ut_a(xdes_mtr_get_bit(descr, XDES_FREE_BIT,
+ header_page % FSP_EXTENT_SIZE, mtr) == FALSE);
+
inode = fseg_inode_try_get(header, space, zip_size, mtr);
if (UNIV_UNLIKELY(inode == NULL)) {
diff --git a/storage/innobase/fts/fts0ast.cc b/storage/innobase/fts/fts0ast.cc
index c01c43a021f..972f5acf461 100644
--- a/storage/innobase/fts/fts0ast.cc
+++ b/storage/innobase/fts/fts0ast.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -98,9 +98,21 @@ fts_ast_create_node_text(
void* arg, /*!< in: ast state instance */
const char* ptr) /*!< in: ast text string */
{
+ ulint len = strlen(ptr);
+ fts_ast_node_t* node = NULL;
+
+ ut_ad(len >= 2);
+
+ if (len == 2) {
+ ut_ad(ptr[0] == '\"');
+ ut_ad(ptr[1] == '\"');
+ return(NULL);
+ }
+
+ node = fts_ast_node_create();
+
/*!< We ignore the actual quotes "" */
- ulint len = strlen(ptr) - 2;
- fts_ast_node_t* node = fts_ast_node_create();
+ len -= 2;
node->type = FTS_AST_TEXT;
node->text.ptr = static_cast<byte*>(ut_malloc(len + 1));
@@ -381,34 +393,100 @@ fts_ast_node_print(
}
/******************************************************************//**
-Traverse the AST - in-order traversal.
+Traverse the AST - in-order traversal, except for the FTS_IGNORE
+nodes, which will be ignored in the first pass of each level, and
+visited in a second pass after all other nodes in the same level are visited.
@return DB_SUCCESS if all went well */
UNIV_INTERN
-ulint
+dberr_t
fts_ast_visit(
/*==========*/
fts_ast_oper_t oper, /*!< in: current operator */
fts_ast_node_t* node, /*!< in: current root node */
fts_ast_callback visitor, /*!< in: callback function */
- void* arg) /*!< in: arg for callback */
+ void* arg, /*!< in: arg for callback */
+ bool* has_ignore) /*!< out: true, if the operator
+ was ignored during processing,
+ currently we only ignore
+ FTS_IGNORE operator */
{
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
+ fts_ast_node_t* oper_node = NULL;
+ fts_ast_node_t* start_node;
+ bool revisit = false;
+ bool will_be_ignored = false;
+
+ start_node = node->list.head;
ut_a(node->type == FTS_AST_LIST
|| node->type == FTS_AST_SUBEXP_LIST);
+ /* In the first pass of the tree, at the leaf level of the
+ tree, FTS_IGNORE operation will be ignored. It will be
+ repeated at the level above the leaf level */
for (node = node->list.head;
- node && error == DB_SUCCESS;
+ node && (error == DB_SUCCESS);
node = node->next) {
if (node->type == FTS_AST_LIST) {
- error = fts_ast_visit(oper, node, visitor, arg);
+ error = fts_ast_visit(oper, node, visitor,
+ arg, &will_be_ignored);
+
+ /* If will_be_ignored is set to true, then
+ we encountered and ignored a FTS_IGNORE operator,
+ and a second pass is needed to process FTS_IGNORE
+ operator */
+ if (will_be_ignored) {
+ revisit = true;
+ }
} else if (node->type == FTS_AST_SUBEXP_LIST) {
error = fts_ast_visit_sub_exp(node, visitor, arg);
} else if (node->type == FTS_AST_OPER) {
oper = node->oper;
+ oper_node = node;
} else {
- visitor(oper, node, arg);
+ if (node->visited) {
+ continue;
+ }
+
+ ut_a(oper == FTS_NONE || !oper_node
+ || oper_node->oper == oper);
+
+ if (oper == FTS_IGNORE) {
+ *has_ignore = true;
+ /* Change the operator to FTS_IGNORE_SKIP,
+ so that it is processed in the second pass */
+ oper_node->oper = FTS_IGNORE_SKIP;
+ continue;
+ }
+
+ if (oper == FTS_IGNORE_SKIP) {
+ /* This must be the second pass, now we process
+ the FTS_IGNORE operator */
+ visitor(FTS_IGNORE, node, arg);
+ } else {
+ visitor(oper, node, arg);
+ }
+
+ node->visited = true;
+ }
+ }
+
+ /* Second pass to process the skipped FTS_IGNORE operation.
+ It is only performed at the level above leaf level */
+ if (revisit) {
+ for (node = start_node;
+ node && error == DB_SUCCESS;
+ node = node->next) {
+
+ if (node->type == FTS_AST_LIST) {
+ /* In this pass, it will process all those
+ operators ignored in the first pass, and those
+ whose operators are set to FTS_IGNORE_SKIP */
+ error = fts_ast_visit(
+ oper, node, visitor, arg,
+ &will_be_ignored);
+ }
}
}
diff --git a/storage/innobase/fts/fts0blex.cc b/storage/innobase/fts/fts0blex.cc
index b3350010db0..1abd737ec06 100644
--- a/storage/innobase/fts/fts0blex.cc
+++ b/storage/innobase/fts/fts0blex.cc
@@ -35,7 +35,7 @@
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
- * if you want the limit (max/min) macros for int types.
+ * if you want the limit (max/min) macros for int types.
*/
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS 1
@@ -247,7 +247,7 @@ struct yy_buffer_state
int yy_bs_lineno; /**< The line count. */
int yy_bs_column; /**< The column count. */
-
+
/* Whether to try to fill the input buffer when we reach the
* end of it.
*/
@@ -305,9 +305,9 @@ YY_BUFFER_STATE fts0b_scan_buffer (char *base,yy_size_t size ,yyscan_t yyscanner
YY_BUFFER_STATE fts0b_scan_string (yyconst char *yy_str ,yyscan_t yyscanner );
YY_BUFFER_STATE fts0b_scan_bytes (yyconst char *bytes,int len ,yyscan_t yyscanner );
-void *fts0balloc (yy_size_t , yyscan_t yyscanner __attribute__((unused)) );
-void *fts0brealloc (void *,yy_size_t , yyscan_t yyscanner __attribute__((unused)) );
-void fts0bfree (void * , yyscan_t yyscanner __attribute__((unused)) );
+void *fts0balloc (yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
+void *fts0brealloc (void *,yy_size_t , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
+void fts0bfree (void * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
#define yy_new_buffer fts0b_create_buffer
@@ -347,7 +347,7 @@ typedef int yy_state_type;
static yy_state_type yy_get_previous_state (yyscan_t yyscanner );
static yy_state_type yy_try_NUL_trans (yy_state_type current_state ,yyscan_t yyscanner);
static int yy_get_next_buffer (yyscan_t yyscanner );
-static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner __attribute__((unused)) );
+static void yy_fatal_error (yyconst char msg[] , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) );
/* Done after the current pattern has been matched and before the
* corresponding action - sets up yytext.
@@ -368,10 +368,10 @@ struct yy_trans_info
flex_int32_t yy_verify;
flex_int32_t yy_nxt;
};
-static yyconst flex_int16_t yy_accept[18] =
+static yyconst flex_int16_t yy_accept[19] =
{ 0,
- 4, 4, 8, 4, 1, 6, 1, 7, 2, 3,
- 4, 1, 1, 0, 5, 3, 0
+ 4, 4, 8, 4, 1, 6, 1, 7, 7, 2,
+ 3, 4, 1, 1, 0, 5, 3, 0
} ;
static yyconst flex_int32_t yy_ec[256] =
@@ -379,17 +379,17 @@ static yyconst flex_int32_t yy_ec[256] =
1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 4, 1, 5, 1, 1, 1, 1, 1, 6,
- 6, 6, 6, 1, 6, 1, 1, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 1, 1, 6,
- 1, 6, 1, 6, 1, 1, 1, 1, 1, 1,
+ 1, 4, 1, 5, 1, 1, 6, 1, 1, 7,
+ 7, 7, 7, 1, 7, 1, 1, 8, 8, 8,
+ 8, 8, 8, 8, 8, 8, 8, 1, 1, 7,
+ 1, 7, 1, 7, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 6, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 7, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -406,35 +406,39 @@ static yyconst flex_int32_t yy_ec[256] =
1, 1, 1, 1, 1
} ;
-static yyconst flex_int32_t yy_meta[8] =
+static yyconst flex_int32_t yy_meta[9] =
{ 0,
- 1, 2, 3, 4, 5, 5, 1
+ 1, 2, 3, 4, 5, 5, 5, 1
} ;
-static yyconst flex_int16_t yy_base[21] =
+static yyconst flex_int16_t yy_base[22] =
{ 0,
- 0, 0, 21, 0, 6, 22, 0, 13, 22, 7,
- 0, 0, 0, 4, 22, 0, 22, 10, 11, 15
+ 0, 0, 22, 0, 7, 23, 0, 14, 23, 23,
+ 7, 0, 0, 0, 5, 23, 0, 23, 11, 12,
+ 16
} ;
-static yyconst flex_int16_t yy_def[21] =
+static yyconst flex_int16_t yy_def[22] =
{ 0,
- 17, 1, 17, 18, 18, 17, 19, 20, 17, 18,
- 18, 5, 19, 20, 17, 10, 0, 17, 17, 17
+ 18, 1, 18, 19, 19, 18, 20, 21, 18, 18,
+ 19, 19, 5, 20, 21, 18, 11, 0, 18, 18,
+ 18
} ;
-static yyconst flex_int16_t yy_nxt[30] =
+static yyconst flex_int16_t yy_nxt[32] =
{ 0,
- 4, 5, 6, 7, 8, 9, 10, 12, 15, 13,
- 11, 11, 13, 16, 13, 14, 14, 15, 14, 14,
- 17, 3, 17, 17, 17, 17, 17, 17, 17
+ 4, 5, 6, 7, 8, 9, 10, 11, 13, 16,
+ 14, 12, 12, 14, 17, 14, 15, 15, 16, 15,
+ 15, 18, 3, 18, 18, 18, 18, 18, 18, 18,
+ 18
} ;
-static yyconst flex_int16_t yy_chk[30] =
+static yyconst flex_int16_t yy_chk[32] =
{ 0,
- 1, 1, 1, 1, 1, 1, 1, 5, 14, 5,
- 18, 18, 19, 10, 19, 20, 20, 8, 20, 20,
- 3, 17, 17, 17, 17, 17, 17, 17, 17
+ 1, 1, 1, 1, 1, 1, 1, 1, 5, 15,
+ 5, 19, 19, 20, 11, 20, 21, 21, 8, 21,
+ 21, 3, 18, 18, 18, 18, 18, 18, 18, 18,
+ 18
} ;
/* The intent behind this definition is that it'll catch
@@ -477,7 +481,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
#define YY_DECL int fts_blexer(YYSTYPE* val, yyscan_t yyscanner)
#define YY_NO_INPUT 1
-#line 480 "fts0blex.cc"
+#line 484 "fts0blex.cc"
#define INITIAL 0
@@ -575,11 +579,11 @@ extern int fts0bwrap (yyscan_t yyscanner );
#endif
#ifndef yytext_ptr
-static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner __attribute__((unused)));
+static void yy_flex_strncpy (char *,yyconst char *,int , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)));
#endif
#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner __attribute__((unused)));
+static int yy_flex_strlen (yyconst char * , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)));
#endif
#ifndef YY_NO_INPUT
@@ -699,12 +703,12 @@ YY_DECL
register yy_state_type yy_current_state;
register char *yy_cp, *yy_bp;
register int yy_act;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
#line 43 "fts0blex.l"
-#line 707 "fts0blex.cc"
+#line 711 "fts0blex.cc"
if ( !yyg->yy_init )
{
@@ -757,13 +761,13 @@ yy_match:
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 18 )
+ if ( yy_current_state >= 19 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
++yy_cp;
}
- while ( yy_current_state != 17 );
+ while ( yy_current_state != 18 );
yy_cp = yyg->yy_last_accepting_cpos;
yy_current_state = yyg->yy_last_accepting_state;
@@ -835,7 +839,7 @@ YY_RULE_SETUP
#line 73 "fts0blex.l"
ECHO;
YY_BREAK
-#line 838 "fts0blex.cc"
+#line 842 "fts0blex.cc"
case YY_STATE_EOF(INITIAL):
yyterminate();
@@ -978,7 +982,7 @@ case YY_STATE_EOF(INITIAL):
*/
static int yy_get_next_buffer (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
register char *source = yyg->yytext_ptr;
register int number_to_move, i;
@@ -1044,9 +1048,9 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
else
b->yy_buf_size *= 2;
- b->yy_ch_buf = (char*)
+ b->yy_ch_buf = (char *)
/* Include room in for 2 EOB chars. */
- fts0brealloc((void*) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner );
+ fts0brealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner );
}
else
/* Can't grow it, we don't own it. */
@@ -1095,7 +1099,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
if ((yy_size_t) (yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
/* Extend the array by 50%, plus the number we really need. */
yy_size_t new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1);
- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char*) fts0brealloc((void*) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner );
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) fts0brealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner );
if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
}
@@ -1115,7 +1119,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
{
register yy_state_type yy_current_state;
register char *yy_cp;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yy_current_state = yyg->yy_start;
@@ -1130,7 +1134,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 18 )
+ if ( yy_current_state >= 19 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
@@ -1147,7 +1151,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner)
{
register int yy_is_jam;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner; /* This var may be unused depending upon options. */
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */
register char *yy_cp = yyg->yy_c_buf_p;
register YY_CHAR yy_c = 1;
@@ -1159,11 +1163,11 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 18 )
+ if ( yy_current_state >= 19 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
- yy_is_jam = (yy_current_state == 17);
+ yy_is_jam = (yy_current_state == 18);
return yy_is_jam ? 0 : yy_current_state;
}
@@ -1177,7 +1181,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
{
int c;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
*yyg->yy_c_buf_p = yyg->yy_hold_char;
@@ -1235,7 +1239,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
}
}
- c = *(unsigned char*) yyg->yy_c_buf_p; /* cast for 8-bit char's */
+ c = *(unsigned char *) yyg->yy_c_buf_p; /* cast for 8-bit char's */
*yyg->yy_c_buf_p = '\0'; /* preserve yytext */
yyg->yy_hold_char = *++yyg->yy_c_buf_p;
@@ -1250,7 +1254,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
*/
void fts0brestart (FILE * input_file , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if ( ! YY_CURRENT_BUFFER ){
fts0bensure_buffer_stack (yyscanner);
@@ -1268,7 +1272,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
*/
void fts0b_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* TODO. We should be able to replace this entire function body
* with
@@ -1300,7 +1304,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
static void fts0b_load_buffer_state (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
@@ -1316,7 +1320,7 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
YY_BUFFER_STATE fts0b_create_buffer (FILE * file, int size , yyscan_t yyscanner)
{
YY_BUFFER_STATE b;
-
+
b = (YY_BUFFER_STATE) fts0balloc(sizeof( struct yy_buffer_state ) ,yyscanner );
if ( ! b )
YY_FATAL_ERROR( "out of dynamic memory in fts0b_create_buffer()" );
@@ -1326,7 +1330,7 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
/* yy_ch_buf has to be 2 characters longer than the size given because
* we need to put in 2 end-of-buffer characters.
*/
- b->yy_ch_buf = (char*) fts0balloc(b->yy_buf_size + 2 ,yyscanner );
+ b->yy_ch_buf = (char *) fts0balloc(b->yy_buf_size + 2 ,yyscanner );
if ( ! b->yy_ch_buf )
YY_FATAL_ERROR( "out of dynamic memory in fts0b_create_buffer()" );
@@ -1343,7 +1347,7 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
*/
void fts0b_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if ( ! b )
return;
@@ -1352,9 +1356,9 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
if ( b->yy_is_our_buffer )
- fts0bfree((void*) b->yy_ch_buf ,yyscanner );
+ fts0bfree((void *) b->yy_ch_buf ,yyscanner );
- fts0bfree((void*) b ,yyscanner );
+ fts0bfree((void *) b ,yyscanner );
}
/* Initializes or reinitializes a buffer.
@@ -1365,7 +1369,7 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
{
int oerrno = errno;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
fts0b_flush_buffer(b ,yyscanner);
@@ -1382,7 +1386,7 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
}
b->yy_is_interactive = 0;
-
+
errno = oerrno;
}
@@ -1392,7 +1396,7 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
*/
void fts0b_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if ( ! b )
return;
@@ -1422,7 +1426,7 @@ static void fts0b_load_buffer_state (yyscan_t yyscanner)
*/
void fts0bpush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (new_buffer == NULL)
return;
@@ -1453,7 +1457,7 @@ void fts0bpush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
*/
void fts0bpop_buffer_state (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (!YY_CURRENT_BUFFER)
return;
@@ -1474,7 +1478,7 @@ void fts0bpop_buffer_state (yyscan_t yyscanner)
static void fts0bensure_buffer_stack (yyscan_t yyscanner)
{
int num_to_alloc;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (!yyg->yy_buffer_stack) {
@@ -1483,14 +1487,14 @@ static void fts0bensure_buffer_stack (yyscan_t yyscanner)
* immediate realloc on the next call.
*/
num_to_alloc = 1;
- yyg->yy_buffer_stack = (struct yy_buffer_state**) fts0balloc
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)fts0balloc
(num_to_alloc * sizeof(struct yy_buffer_state*)
, yyscanner);
if ( ! yyg->yy_buffer_stack )
YY_FATAL_ERROR( "out of dynamic memory in fts0bensure_buffer_stack()" );
-
+
memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*));
-
+
yyg->yy_buffer_stack_max = num_to_alloc;
yyg->yy_buffer_stack_top = 0;
return;
@@ -1502,7 +1506,7 @@ static void fts0bensure_buffer_stack (yyscan_t yyscanner)
int grow_size = 8 /* arbitrary grow size */;
num_to_alloc = yyg->yy_buffer_stack_max + grow_size;
- yyg->yy_buffer_stack = (struct yy_buffer_state**) fts0brealloc
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)fts0brealloc
(yyg->yy_buffer_stack,
num_to_alloc * sizeof(struct yy_buffer_state*)
, yyscanner);
@@ -1519,12 +1523,12 @@ static void fts0bensure_buffer_stack (yyscan_t yyscanner)
* @param base the character buffer
* @param size the size in bytes of the character buffer
* @param yyscanner The scanner object.
- * @return the newly allocated buffer state object.
+ * @return the newly allocated buffer state object.
*/
YY_BUFFER_STATE fts0b_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner)
{
YY_BUFFER_STATE b;
-
+
if ( size < 2 ||
base[size-2] != YY_END_OF_BUFFER_CHAR ||
base[size-1] != YY_END_OF_BUFFER_CHAR )
@@ -1560,7 +1564,7 @@ YY_BUFFER_STATE fts0b_scan_buffer (char * base, yy_size_t size , yyscan_t yysc
*/
YY_BUFFER_STATE fts0b_scan_string (yyconst char * yystr , yyscan_t yyscanner)
{
-
+
return fts0b_scan_bytes(yystr,strlen(yystr) ,yyscanner);
}
@@ -1577,10 +1581,10 @@ YY_BUFFER_STATE fts0b_scan_bytes (yyconst char * yybytes, int _yybytes_len , y
char *buf;
yy_size_t n;
int i;
-
+
/* Get memory for full buffer, including space for trailing EOB's. */
n = _yybytes_len + 2;
- buf = (char*) fts0balloc(n ,yyscanner );
+ buf = (char *) fts0balloc(n ,yyscanner );
if ( ! buf )
YY_FATAL_ERROR( "out of dynamic memory in fts0b_scan_bytes()" );
@@ -1605,7 +1609,7 @@ YY_BUFFER_STATE fts0b_scan_bytes (yyconst char * yybytes, int _yybytes_len , y
#define YY_EXIT_FAILURE 2
#endif
-static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute__((unused)))
+static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
{
(void) fprintf( stderr, "%s\n", msg );
exit( YY_EXIT_FAILURE );
@@ -1635,7 +1639,7 @@ static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute_
*/
YY_EXTRA_TYPE fts0bget_extra (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyextra;
}
@@ -1644,11 +1648,11 @@ YY_EXTRA_TYPE fts0bget_extra (yyscan_t yyscanner)
*/
int fts0bget_lineno (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
-
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
if (! YY_CURRENT_BUFFER)
return 0;
-
+
return yylineno;
}
@@ -1657,11 +1661,11 @@ int fts0bget_lineno (yyscan_t yyscanner)
*/
int fts0bget_column (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
-
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
if (! YY_CURRENT_BUFFER)
return 0;
-
+
return yycolumn;
}
@@ -1670,7 +1674,7 @@ int fts0bget_column (yyscan_t yyscanner)
*/
FILE *fts0bget_in (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyin;
}
@@ -1679,7 +1683,7 @@ FILE *fts0bget_in (yyscan_t yyscanner)
*/
FILE *fts0bget_out (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyout;
}
@@ -1688,7 +1692,7 @@ FILE *fts0bget_out (yyscan_t yyscanner)
*/
int fts0bget_leng (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyleng;
}
@@ -1698,7 +1702,7 @@ int fts0bget_leng (yyscan_t yyscanner)
char *fts0bget_text (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yytext;
}
@@ -1708,7 +1712,7 @@ char *fts0bget_text (yyscan_t yyscanner)
*/
void fts0bset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyextra = user_defined ;
}
@@ -1718,12 +1722,12 @@ void fts0bset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)
*/
void fts0bset_lineno (int line_number , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* lineno is only valid if an input buffer exists. */
if (! YY_CURRENT_BUFFER )
- yy_fatal_error( "fts0bset_lineno called with no buffer" , yyscanner);
-
+ yy_fatal_error( "fts0bset_lineno called with no buffer" , yyscanner);
+
yylineno = line_number;
}
@@ -1733,12 +1737,12 @@ void fts0bset_lineno (int line_number , yyscan_t yyscanner)
*/
void fts0bset_column (int column_no , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* column is only valid if an input buffer exists. */
if (! YY_CURRENT_BUFFER )
- yy_fatal_error( "fts0bset_column called with no buffer" , yyscanner);
-
+ yy_fatal_error( "fts0bset_column called with no buffer" , yyscanner);
+
yycolumn = column_no;
}
@@ -1750,25 +1754,25 @@ void fts0bset_column (int column_no , yyscan_t yyscanner)
*/
void fts0bset_in (FILE * in_str , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyin = in_str ;
}
void fts0bset_out (FILE * out_str , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyout = out_str ;
}
int fts0bget_debug (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yy_flex_debug;
}
void fts0bset_debug (int bdebug , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yy_flex_debug = bdebug ;
}
@@ -1821,26 +1825,26 @@ int fts0blex_init_extra(YY_EXTRA_TYPE yy_user_defined,yyscan_t* ptr_yy_globals )
errno = EINVAL;
return 1;
}
-
+
*ptr_yy_globals = (yyscan_t) fts0balloc ( sizeof( struct yyguts_t ), &dummy_yyguts );
-
+
if (*ptr_yy_globals == NULL){
errno = ENOMEM;
return 1;
}
-
+
/* By setting to 0xAA, we expose bugs in
yy_init_globals. Leave at 0x00 for releases. */
memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
-
+
fts0bset_extra (yy_user_defined, *ptr_yy_globals);
-
+
return yy_init_globals ( *ptr_yy_globals );
}
static int yy_init_globals (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* Initialization is the same as for the non-reentrant scanner.
* This function is called from fts0blex_destroy(), so don't allocate here.
*/
@@ -1848,7 +1852,7 @@ static int yy_init_globals (yyscan_t yyscanner)
yyg->yy_buffer_stack = 0;
yyg->yy_buffer_stack_top = 0;
yyg->yy_buffer_stack_max = 0;
- yyg->yy_c_buf_p = (char*) 0;
+ yyg->yy_c_buf_p = (char *) 0;
yyg->yy_init = 0;
yyg->yy_start = 0;
@@ -1861,8 +1865,8 @@ static int yy_init_globals (yyscan_t yyscanner)
yyin = stdin;
yyout = stdout;
#else
- yyin = (FILE*) 0;
- yyout = (FILE*) 0;
+ yyin = (FILE *) 0;
+ yyout = (FILE *) 0;
#endif
/* For future reference: Set errno on error, since we are called by
@@ -1874,7 +1878,7 @@ static int yy_init_globals (yyscan_t yyscanner)
/* fts0blex_destroy is for both reentrant and non-reentrant scanners. */
int fts0blex_destroy (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* Pop the buffer stack, destroying each element. */
while(YY_CURRENT_BUFFER){
@@ -1906,7 +1910,7 @@ int fts0blex_destroy (yyscan_t yyscanner)
*/
#ifndef yytext_ptr
-static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner __attribute__((unused)))
+static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
{
register int i;
for ( i = 0; i < n; ++i )
@@ -1915,7 +1919,7 @@ static void yy_flex_strncpy (char* s1, yyconst char * s2, int n , yyscan_t yysc
#endif
#ifdef YY_NEED_STRLEN
-static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__((unused)))
+static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
{
register int n;
for ( n = 0; s[n]; ++n )
@@ -1925,26 +1929,26 @@ static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__(
}
#endif
-void *fts0balloc (yy_size_t size , yyscan_t yyscanner __attribute__((unused)))
+void *fts0balloc (yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
{
- return (void*) malloc( size );
+ return (void *) malloc( size );
}
-void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner __attribute__((unused)))
+void *fts0brealloc (void * ptr, yy_size_t size , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
{
- /* The cast to (char*) in the following accommodates both
+ /* The cast to (char *) in the following accommodates both
* implementations that use char* generic pointers, and those
* that use void* generic pointers. It works with the latter
* because both ANSI C and C++ allow castless assignment from
* any pointer type to void*, and deal with argument conversions
* as though doing an assignment.
*/
- return (void*) realloc( (char*) ptr, size );
+ return (void *) realloc( (char *) ptr, size );
}
-void fts0bfree (void * ptr , yyscan_t yyscanner __attribute__((unused)))
+void fts0bfree (void * ptr , yyscan_t yyscanner __attribute__((unused)) __attribute__((unused)) __attribute__((unused)) __attribute__((unused)))
{
- free( (char*) ptr ); /* see fts0brealloc() for (char*) cast */
+ free( (char *) ptr ); /* see fts0brealloc() for (char *) cast */
}
#define YYTABLES_NAME "yytables"
diff --git a/storage/innobase/fts/fts0blex.l b/storage/innobase/fts/fts0blex.l
index b84b0cea294..6193f0df187 100644
--- a/storage/innobase/fts/fts0blex.l
+++ b/storage/innobase/fts/fts0blex.l
@@ -56,7 +56,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
return(FTS_NUMB);
}
-[^" \n*()+\-<>~@]* {
+[^" \n*()+\-<>~@%]* {
val->token = strdup(fts0bget_text(yyscanner));
return(FTS_TERM);
diff --git a/storage/innobase/fts/fts0config.cc b/storage/innobase/fts/fts0config.cc
index 3f849ef183c..9cac680101c 100644
--- a/storage/innobase/fts/fts0config.cc
+++ b/storage/innobase/fts/fts0config.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -70,7 +70,7 @@ Get value from the config table. The caller must ensure that enough
space is allocated for value to hold the column contents.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_value(
/*=================*/
trx_t* trx, /*!< transaction */
@@ -83,7 +83,7 @@ fts_config_get_value(
{
pars_info_t* info;
que_t* graph;
- ulint error;
+ dberr_t error;
ulint name_len = strlen(name);
info = pars_info_create();
@@ -162,7 +162,7 @@ must ensure that enough space is allocated for value to hold the
column contents.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_index_value(
/*=======================*/
trx_t* trx, /*!< transaction */
@@ -173,7 +173,7 @@ fts_config_get_index_value(
config table */
{
char* name;
- ulint error;
+ dberr_t error;
fts_table_t fts_table;
FTS_INIT_FTS_TABLE(&fts_table, "CONFIG", FTS_COMMON_TABLE,
@@ -193,7 +193,7 @@ fts_config_get_index_value(
Set the value in the config table for name.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_value(
/*=================*/
trx_t* trx, /*!< transaction */
@@ -206,7 +206,7 @@ fts_config_set_value(
{
pars_info_t* info;
que_t* graph;
- ulint error;
+ dberr_t error;
undo_no_t undo_no;
undo_no_t n_rows_updated;
ulint name_len = strlen(name);
@@ -262,7 +262,7 @@ fts_config_set_value(
Set the value specific to an FTS index in the config table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_index_value(
/*=======================*/
trx_t* trx, /*!< transaction */
@@ -273,7 +273,7 @@ fts_config_set_index_value(
config table */
{
char* name;
- ulint error;
+ dberr_t error;
fts_table_t fts_table;
FTS_INIT_FTS_TABLE(&fts_table, "CONFIG", FTS_COMMON_TABLE,
@@ -293,7 +293,7 @@ fts_config_set_index_value(
Get an ulint value from the config table.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_index_ulint(
/*=======================*/
trx_t* trx, /*!< in: transaction */
@@ -301,7 +301,7 @@ fts_config_get_index_ulint(
const char* name, /*!< in: param name */
ulint* int_value) /*!< out: value */
{
- ulint error;
+ dberr_t error;
fts_string_t value;
/* We set the length of value to the max bytes it can hold. This
@@ -314,8 +314,8 @@ fts_config_get_index_ulint(
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) reading `%s'\n",
- error, name);
+ fprintf(stderr, " InnoDB: Error: (%s) reading `%s'\n",
+ ut_strerr(error), name);
} else {
*int_value = strtoul((char*) value.f_str, NULL, 10);
}
@@ -329,7 +329,7 @@ fts_config_get_index_ulint(
Set an ulint value in the config table.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_index_ulint(
/*=======================*/
trx_t* trx, /*!< in: transaction */
@@ -337,7 +337,7 @@ fts_config_set_index_ulint(
const char* name, /*!< in: param name */
ulint int_value) /*!< in: value */
{
- ulint error;
+ dberr_t error;
fts_string_t value;
/* We set the length of value to the max bytes it can hold. This
@@ -356,8 +356,8 @@ fts_config_set_index_ulint(
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) writing `%s'\n",
- error, name);
+ fprintf(stderr, " InnoDB: Error: (%s) writing `%s'\n",
+ ut_strerr(error), name);
}
ut_free(value.f_str);
@@ -369,7 +369,7 @@ fts_config_set_index_ulint(
Get an ulint value from the config table.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_ulint(
/*=================*/
trx_t* trx, /*!< in: transaction */
@@ -378,7 +378,7 @@ fts_config_get_ulint(
const char* name, /*!< in: param name */
ulint* int_value) /*!< out: value */
{
- ulint error;
+ dberr_t error;
fts_string_t value;
/* We set the length of value to the max bytes it can hold. This
@@ -391,8 +391,8 @@ fts_config_get_ulint(
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) reading `%s'\n",
- error, name);
+ fprintf(stderr, " InnoDB: Error: (%s) reading `%s'\n",
+ ut_strerr(error), name);
} else {
*int_value = strtoul((char*) value.f_str, NULL, 10);
}
@@ -406,7 +406,7 @@ fts_config_get_ulint(
Set an ulint value in the config table.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_ulint(
/*=================*/
trx_t* trx, /*!< in: transaction */
@@ -415,7 +415,7 @@ fts_config_set_ulint(
const char* name, /*!< in: param name */
ulint int_value) /*!< in: value */
{
- ulint error;
+ dberr_t error;
fts_string_t value;
/* We set the length of value to the max bytes it can hold. This
@@ -434,8 +434,8 @@ fts_config_set_ulint(
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) writing `%s'\n",
- error, name);
+ fprintf(stderr, " InnoDB: Error: (%s) writing `%s'\n",
+ ut_strerr(error), name);
}
ut_free(value.f_str);
@@ -447,7 +447,7 @@ fts_config_set_ulint(
Increment the value in the config table for column name.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_increment_value(
/*=======================*/
trx_t* trx, /*!< transaction */
@@ -458,7 +458,7 @@ fts_config_increment_value(
ulint delta) /*!< in: increment by this
much */
{
- ulint error;
+ dberr_t error;
fts_string_t value;
que_t* graph = NULL;
ulint name_len = strlen(name);
@@ -520,8 +520,8 @@ fts_config_increment_value(
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) "
- "while incrementing %s.\n", error, name);
+ fprintf(stderr, " InnoDB: Error: (%s) "
+ "while incrementing %s.\n", ut_strerr(error), name);
}
ut_free(value.f_str);
@@ -533,7 +533,7 @@ fts_config_increment_value(
Increment the per index value in the config table for column name.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_increment_index_value(
/*=============================*/
trx_t* trx, /*!< transaction */
@@ -544,7 +544,7 @@ fts_config_increment_index_value(
much */
{
char* name;
- ulint error;
+ dberr_t error;
fts_table_t fts_table;
FTS_INIT_FTS_TABLE(&fts_table, "CONFIG", FTS_COMMON_TABLE,
diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc
index 7845ba2cb7e..a81d3043e9c 100644
--- a/storage/innobase/fts/fts0fts.cc
+++ b/storage/innobase/fts/fts0fts.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -36,12 +36,8 @@ Full Text Search interface
#include "dict0priv.h"
#include "dict0stats.h"
#include "btr0pcur.h"
-#include "row0row.h"
-#include "ha_prototypes.h"
-#ifdef UNIV_NONINL
-#include "fts0priv.ic"
-#endif
+#include "ha_prototypes.h"
#define FTS_MAX_ID_LEN 32
@@ -63,9 +59,6 @@ UNIV_INTERN ulong fts_min_token_size;
ib_time_t elapsed_time = 0;
ulint n_nodes = 0;
-typedef struct fts_schema_struct fts_schema_t;
-typedef struct fts_sys_table_struct fts_sys_table_t;
-
/** Error condition reported by fts_utf8_decode() */
const ulint UTF8_ERROR = 0xFFFFFFFF;
@@ -142,7 +135,7 @@ const char *fts_default_stopword[] =
};
/** For storing table info when checking for orphaned tables. */
-struct fts_sys_table_struct {
+struct fts_aux_table_t {
table_id_t id; /*!< Table id */
table_id_t parent_id; /*!< Parent table id */
table_id_t index_id; /*!< Table FT index id */
@@ -246,7 +239,7 @@ static const char* fts_config_table_insert_values_sql =
FTS_OPTIMIZE_LIMIT_IN_SECS "', '180');\n"
""
"INSERT INTO %s VALUES ('"
- FTS_SYNCED_DOC_ID "', '1');\n"
+ FTS_SYNCED_DOC_ID "', '0');\n"
""
"INSERT INTO %s VALUES ('"
FTS_TOTAL_DELETED_COUNT "', '0');\n"
@@ -257,12 +250,13 @@ static const char* fts_config_table_insert_values_sql =
/****************************************************************//**
Run SYNC on the table, i.e., write out data from the cache to the
FTS auxiliary INDEX table and clear the cache at the end.
-@return DB_SUCCESS if all OK */
+@return DB_SUCCESS if all OK */
static
-ulint
+dberr_t
fts_sync(
/*=====*/
- fts_sync_t* sync); /*!< in: sync state */
+ fts_sync_t* sync) /*!< in: sync state */
+ __attribute__((nonnull));
/****************************************************************//**
Release all resources help by the words rb tree e.g., the node ilist. */
@@ -270,7 +264,8 @@ static
void
fts_words_free(
/*===========*/
- ib_rbt_t* words); /*!< in: rb tree of words */
+ ib_rbt_t* words) /*!< in: rb tree of words */
+ __attribute__((nonnull));
#ifdef FTS_CACHE_SIZE_DEBUG
/****************************************************************//**
Read the max cache size parameter from the config table. */
@@ -294,19 +289,35 @@ fts_add_doc_by_id(
doc_id_t doc_id, /*!< in: doc id */
ib_vector_t* fts_indexes __attribute__((unused)));
/*!< in: affected fts indexes */
+#ifdef FTS_DOC_STATS_DEBUG
/****************************************************************//**
Check whether a particular word (term) exists in the FTS index.
@return DB_SUCCESS if all went fine */
static
-ulint
+dberr_t
fts_is_word_in_index(
/*=================*/
trx_t* trx, /*!< in: FTS query state */
que_t** graph, /*!< out: Query graph */
fts_table_t* fts_table, /*!< in: table instance */
const fts_string_t* word, /*!< in: the word to check */
- ibool* found); /*!< out: TRUE if exists */
+ ibool* found) /*!< out: TRUE if exists */
+ __attribute__((nonnull, warn_unused_result));
+#endif /* FTS_DOC_STATS_DEBUG */
+/******************************************************************//**
+Update the last document id. This function could create a new
+transaction to update the last document id.
+@return DB_SUCCESS if OK */
+static
+dberr_t
+fts_update_sync_doc_id(
+/*===================*/
+ const dict_table_t* table, /*!< in: table */
+ const char* table_name, /*!< in: table name, or NULL */
+ doc_id_t doc_id, /*!< in: last document id */
+ trx_t* trx) /*!< in: update trx, or NULL */
+ __attribute__((nonnull(1)));
/********************************************************************
Check if we should stop. */
UNIV_INLINE
@@ -443,7 +454,7 @@ fts_load_user_stopword(
{
pars_info_t* info;
que_t* graph;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ibool ret = TRUE;
trx_t* trx;
ibool has_lock = fts->fts_status & TABLE_DICT_LOCKED;
@@ -507,9 +518,9 @@ fts_load_user_stopword(
trx->error_state = DB_SUCCESS;
} else {
- fprintf(stderr, " InnoDB: Error: %lu "
+ fprintf(stderr, " InnoDB: Error '%s' "
"while reading user stopword table.\n",
- error);
+ ut_strerr(error));
ret = FALSE;
break;
}
@@ -542,7 +553,7 @@ fts_index_cache_init(
index_cache->words = rbt_create_arg_cmp(
sizeof(fts_tokenizer_word_t), innobase_fts_text_cmp,
- index_cache->charset);
+ (void*) index_cache->charset);
ut_a(index_cache->doc_stats == NULL);
@@ -670,7 +681,7 @@ fts_add_index(
ib_vector_push(fts->indexes, &index);
- index_cache = (fts_index_cache_t*) fts_find_index_cache(cache, index);
+ index_cache = fts_find_index_cache(cache, index);
if (!index_cache) {
/* Add new index cache structure */
@@ -805,7 +816,7 @@ fts_check_cached_index(
Drop auxiliary tables related to an FTS index
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_index(
/*===========*/
dict_table_t* table, /*!< in: Table where indexes are dropped */
@@ -813,7 +824,7 @@ fts_drop_index(
trx_t* trx) /*!< in: Transaction for the drop */
{
ib_vector_t* indexes = table->fts->indexes;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ut_a(indexes);
@@ -821,6 +832,8 @@ fts_drop_index(
&& (index == static_cast<dict_index_t*>(
ib_vector_getp(table->fts->indexes, 0))))
|| ib_vector_is_empty(indexes)) {
+ doc_id_t current_doc_id;
+ doc_id_t first_doc_id;
/* If we are dropping the only FTS index of the table,
remove it from optimize thread */
@@ -844,17 +857,20 @@ fts_drop_index(
return(err);
}
+ current_doc_id = table->fts->cache->next_doc_id;
+ first_doc_id = table->fts->cache->first_doc_id;
fts_cache_clear(table->fts->cache, TRUE);
fts_cache_destroy(table->fts->cache);
table->fts->cache = fts_cache_create(table);
+ table->fts->cache->next_doc_id = current_doc_id;
+ table->fts->cache->first_doc_id = first_doc_id;
} else {
fts_cache_t* cache = table->fts->cache;
fts_index_cache_t* index_cache;
rw_lock_x_lock(&cache->init_lock);
- index_cache = (fts_index_cache_t*) fts_find_index_cache(
- cache, index);
+ index_cache = fts_find_index_cache(cache, index);
if (index_cache->words) {
fts_words_free(index_cache->words);
@@ -1215,7 +1231,7 @@ fts_tokenizer_word_get(
if (rbt_search(cache->stopword_info.cached_stopword,
&parent, text) == 0) {
- return NULL;
+ return(NULL);
}
/* Check if we found a match, if not then add word to tree. */
@@ -1445,38 +1461,40 @@ fts_cache_add_doc(
/****************************************************************//**
Drops a table. If the table can't be found we return a SUCCESS code.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_drop_table(
/*===========*/
trx_t* trx, /*!< in: transaction */
const char* table_name) /*!< in: table to drop */
{
- ulint error = DB_SUCCESS;
+ dict_table_t* table;
+ dberr_t error = DB_SUCCESS;
- /* Check that the table exists in our data dictionary. */
- if (dict_table_get_low(table_name)) {
+ /* Check that the table exists in our data dictionary.
+ Similar to regular drop table case, we will open table with
+ DICT_ERR_IGNORE_INDEX_ROOT and DICT_ERR_IGNORE_CORRUPT option */
+ table = dict_table_open_on_name(
+ table_name, TRUE, FALSE,
+ static_cast<dict_err_ignore_t>(
+ DICT_ERR_IGNORE_INDEX_ROOT | DICT_ERR_IGNORE_CORRUPT));
-#ifdef FTS_INTERNAL_DIAG_PRINT
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Dropping %s\n", table_name);
-#endif
+ if (table != 0) {
- error = row_drop_table_for_mysql(table_name, trx, TRUE);
+ dict_table_close(table, TRUE, FALSE);
+
+ /* Pass nonatomic=false (dont allow data dict unlock),
+ because the transaction may hold locks on SYS_* tables from
+ previous calls to fts_drop_table(). */
+ error = row_drop_table_for_mysql(table_name, trx, true, false);
- /* We only return the status of the last error. */
if (error != DB_SUCCESS) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) dropping "
- "FTS index table %s\n", error, table_name);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to drop FTS index aux table %s: %s",
+ table_name, ut_strerr(error));
}
} else {
- ut_print_timestamp(stderr);
-
- /* FIXME: Should provide appropriate error return code
- rather than printing message indiscriminately. */
- fprintf(stderr, " InnoDB: %s not found.\n",
- table_name);
+ error = DB_FAIL;
}
return(error);
@@ -1487,8 +1505,8 @@ Drops the common ancillary tables needed for supporting an FTS index
on the given table. row_mysql_lock_data_dictionary must have been called
before this.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_drop_common_tables(
/*===================*/
trx_t* trx, /*!< in: transaction */
@@ -1496,10 +1514,10 @@ fts_drop_common_tables(
index */
{
ulint i;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
for (i = 0; fts_common_tables[i] != NULL; ++i) {
- ulint err;
+ dberr_t err;
char* table_name;
fts_table->suffix = fts_common_tables[i];
@@ -1509,7 +1527,7 @@ fts_drop_common_tables(
err = fts_drop_table(trx, table_name);
/* We only return the status of the last error. */
- if (err != DB_SUCCESS) {
+ if (err != DB_SUCCESS && err != DB_FAIL) {
error = err;
}
@@ -1520,11 +1538,11 @@ fts_drop_common_tables(
}
/****************************************************************//**
-Since we do a horizontal split on the index table, we need to drop the
+Since we do a horizontal split on the index table, we need to drop
all the split tables.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_index_split_tables(
/*========================*/
trx_t* trx, /*!< in: transaction */
@@ -1533,12 +1551,12 @@ fts_drop_index_split_tables(
{
ulint i;
fts_table_t fts_table;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
FTS_INIT_INDEX_TABLE(&fts_table, NULL, FTS_INDEX_TABLE, index);
for (i = 0; fts_index_selector[i].value; ++i) {
- ulint err;
+ dberr_t err;
char* table_name;
fts_table.suffix = fts_get_suffix(i);
@@ -1548,7 +1566,7 @@ fts_drop_index_split_tables(
err = fts_drop_table(trx, table_name);
/* We only return the status of the last error. */
- if (err != DB_SUCCESS) {
+ if (err != DB_SUCCESS && err != DB_FAIL) {
error = err;
}
@@ -1562,23 +1580,21 @@ fts_drop_index_split_tables(
Drops FTS auxiliary tables for an FTS index
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_index_tables(
/*==================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index) /*!< in: Index to drop */
{
- ulint err;
- ulint error = DB_SUCCESS;
fts_table_t fts_table;
- ulint j;
+ dberr_t error = DB_SUCCESS;
static const char* index_tables[] = {
"DOC_ID",
NULL
};
- err = fts_drop_index_split_tables(trx, index);
+ dberr_t err = fts_drop_index_split_tables(trx, index);
/* We only return the status of the last error. */
if (err != DB_SUCCESS) {
@@ -1587,18 +1603,17 @@ fts_drop_index_tables(
FTS_INIT_INDEX_TABLE(&fts_table, NULL, FTS_INDEX_TABLE, index);
- for (j = 0; index_tables[j] != NULL; ++j) {
- ulint err;
+ for (ulint i = 0; index_tables[i] != NULL; ++i) {
char* table_name;
- fts_table.suffix = index_tables[j];
+ fts_table.suffix = index_tables[i];
table_name = fts_get_table_name(&fts_table);
err = fts_drop_table(trx, table_name);
/* We only return the status of the last error. */
- if (err != DB_SUCCESS) {
+ if (err != DB_SUCCESS && err != DB_FAIL) {
error = err;
}
@@ -1613,18 +1628,20 @@ Drops FTS ancillary tables needed for supporting an FTS index
on the given table. row_mysql_lock_data_dictionary must have been called
before this.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_drop_all_index_tables(
/*======================*/
trx_t* trx, /*!< in: transaction */
fts_t* fts) /*!< in: fts instance */
{
- ulint i;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
- for (i = 0; i < ib_vector_size(fts->indexes); ++i) {
- ulint err;
+ for (ulint i = 0;
+ fts->indexes != 0 && i < ib_vector_size(fts->indexes);
+ ++i) {
+
+ dberr_t err;
dict_index_t* index;
index = static_cast<dict_index_t*>(
@@ -1646,17 +1663,19 @@ given table. row_mysql_lock_data_dictionary must have been called before
this.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_tables(
/*============*/
trx_t* trx, /*!< in: transaction */
dict_table_t* table) /*!< in: table has the FTS index */
{
- ulint error;
+ dberr_t error;
fts_table_t fts_table;
FTS_INIT_FTS_TABLE(&fts_table, NULL, FTS_COMMON_TABLE, table);
+ /* TODO: This is not atomic and can cause problems during recovery. */
+
error = fts_drop_common_tables(trx, &fts_table);
if (error == DB_SUCCESS) {
@@ -1692,20 +1711,20 @@ on the given table. row_mysql_lock_data_dictionary must have been called
before this.
@return DB_SUCCESS if succeed */
UNIV_INTERN
-ulint
+dberr_t
fts_create_common_tables(
/*=====================*/
- trx_t* trx, /*!< in: transaction */
- const dict_table_t* table, /*!< in: table with FTS index */
- const char* name, /*!< in: table name normalized.*/
- ibool skip_doc_id_index) /*!< in: Skip index on doc id */
-
+ trx_t* trx, /*!< in: transaction */
+ const dict_table_t* table, /*!< in: table with FTS index */
+ const char* name, /*!< in: table name normalized.*/
+ bool skip_doc_id_index)/*!< in: Skip index on doc id */
{
char* sql;
- ulint error;
+ dberr_t error;
que_t* graph;
fts_table_t fts_table;
mem_heap_t* heap = mem_heap_create(1024);
+ pars_info_t* info;
FTS_INIT_FTS_TABLE(&fts_table, NULL, FTS_COMMON_TABLE, table);
@@ -1744,17 +1763,23 @@ fts_create_common_tables(
goto func_exit;
}
+ info = pars_info_create();
+
+ pars_info_bind_id(info, TRUE, "table_name", name);
+ pars_info_bind_id(info, TRUE, "index_name", FTS_DOC_ID_INDEX_NAME);
+ pars_info_bind_id(info, TRUE, "doc_id_col_name", FTS_DOC_ID_COL_NAME);
+
/* Create the FTS DOC_ID index on the hidden column. Currently this
is common for any FT index created on the table. */
graph = fts_parse_sql_no_dict_lock(
NULL,
- NULL,
+ info,
mem_heap_printf(
heap,
"BEGIN\n"
""
- "CREATE UNIQUE INDEX %s ON %s(%s);\n",
- FTS_DOC_ID_INDEX_NAME, name, FTS_DOC_ID_COL_NAME));
+ "CREATE UNIQUE INDEX $index_name ON $table_name("
+ "$doc_id_col_name);\n"));
error = fts_eval_sql(trx, graph);
que_graph_free(graph);
@@ -1794,7 +1819,7 @@ fts_create_one_index_table(
dict_field_t* field;
dict_table_t* new_table = NULL;
char* table_name = fts_get_table_name(fts_table);
- ulint error;
+ dberr_t error;
CHARSET_INFO* charset;
ut_ad(index->type & DICT_FTS);
@@ -1828,14 +1853,14 @@ fts_create_one_index_table(
dict_mem_table_add_col(new_table, heap, "ilist", DATA_BLOB,
4130048, 0);
- error = row_create_table_for_mysql(new_table, trx);
+ error = row_create_table_for_mysql(new_table, trx, true);
if (error != DB_SUCCESS) {
- trx->error_state = static_cast<db_err>(error);
+ trx->error_state = error;
dict_mem_table_free(new_table);
new_table = NULL;
- fprintf(stderr, " InnoDB: Warning: Fail to create FTS "
- " index table %s \n", table_name);
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Fail to create FTS index table %s", table_name);
}
mem_free(table_name);
@@ -1848,7 +1873,7 @@ Wrapper function of fts_create_index_tables_low(), create auxiliary
tables for an FTS index
@return: DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_create_index_tables_low(
/*========================*/
trx_t* trx, /*!< in: transaction */
@@ -1862,7 +1887,7 @@ fts_create_index_tables_low(
char* sql;
que_t* graph;
fts_table_t fts_table;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
mem_heap_t* heap = mem_heap_create(1024);
fts_table.type = FTS_INDEX_TABLE;
@@ -1874,6 +1899,7 @@ fts_create_index_tables_low(
/* Create the FTS auxiliary tables that are specific
to an FTS index. */
sql = fts_prepare_sql(&fts_table, fts_create_index_tables_sql);
+
graph = fts_parse_sql_no_dict_lock(NULL, NULL, sql);
mem_free(sql);
@@ -1903,9 +1929,7 @@ fts_create_index_tables_low(
que_graph_free(graph);
}
- if (error == DB_SUCCESS) {
- error = fts_sql_commit(trx);
- } else {
+ if (error != DB_SUCCESS) {
/* We have special error handling here */
trx->error_state = DB_SUCCESS;
@@ -1928,18 +1952,25 @@ FTS index on the given table. row_mysql_lock_data_dictionary must have
been called before this.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_create_index_tables(
/*====================*/
trx_t* trx, /*!< in: transaction */
const dict_index_t* index) /*!< in: the index instance */
{
+ dberr_t err;
dict_table_t* table;
table = dict_table_get_low(index->table_name);
ut_a(table != NULL);
- return(fts_create_index_tables_low(trx, index, table->name, table->id));
+ err = fts_create_index_tables_low(trx, index, table->name, table->id);
+
+ if (err == DB_SUCCESS) {
+ trx_commit(trx);
+ }
+
+ return(err);
}
#if 0
/******************************************************************//**
@@ -1953,22 +1984,22 @@ fts_get_state_str(
{
switch (state) {
case FTS_INSERT:
- return "INSERT";
+ return("INSERT");
case FTS_MODIFY:
- return "MODIFY";
+ return("MODIFY");
case FTS_DELETE:
- return "DELETE";
+ return("DELETE");
case FTS_NOTHING:
- return "NOTHING";
+ return("NOTHING");
case FTS_INVALID:
- return "INVALID";
+ return("INVALID");
default:
- return "UNKNOWN";
+ return("UNKNOWN");
}
}
#endif
@@ -2321,7 +2352,7 @@ fts_get_max_cache_size(
trx_t* trx, /*!< in: transaction */
fts_table_t* fts_table) /*!< in: table instance */
{
- ulint error;
+ dberr_t error;
fts_string_t value;
ulint cache_size_in_mb;
@@ -2381,32 +2412,19 @@ fts_get_max_cache_size(
}
#endif
-/*********************************************************************//**
-Get the total number of documents in the FTS.
-@return estimated number of rows in the table */
-UNIV_INTERN
-ulint
-fts_get_total_document_count(
-/*=========================*/
- dict_table_t* table) /*!< in: table instance */
-{
- ut_ad(table->stat_initialized);
-
- return((ulint) table->stat_n_rows);
-}
-
+#ifdef FTS_DOC_STATS_DEBUG
/*********************************************************************//**
Get the total number of words in the FTS for a particular FTS index.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
fts_get_total_word_count(
/*=====================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: for this index */
ulint* total) /* out: total words */
{
- ulint error;
+ dberr_t error;
fts_string_t value;
*total = 0;
@@ -2426,14 +2444,15 @@ fts_get_total_word_count(
*total = strtoul((char*) value.f_str, NULL, 10);
} else {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) reading total words "
- "value from config table\n", error);
+ fprintf(stderr, " InnoDB: Error: (%s) reading total words "
+ "value from config table\n", ut_strerr(error));
}
ut_free(value.f_str);
return(error);
}
+#endif /* FTS_DOC_STATS_DEBUG */
/*********************************************************************//**
Update the next and last Doc ID in the CONFIG table to be the input
@@ -2443,8 +2462,9 @@ UNIV_INTERN
void
fts_update_next_doc_id(
/*===================*/
+ trx_t* trx, /*!< in/out: transaction */
const dict_table_t* table, /*!< in: table */
- const char* table_name, /*!< in: table name */
+ const char* table_name, /*!< in: table name, or NULL */
doc_id_t doc_id) /*!< in: DOC ID to set */
{
table->fts->cache->synced_doc_id = doc_id;
@@ -2453,7 +2473,7 @@ fts_update_next_doc_id(
table->fts->cache->first_doc_id = table->fts->cache->next_doc_id;
fts_update_sync_doc_id(
- table, table_name, table->fts->cache->synced_doc_id, NULL);
+ table, table_name, table->fts->cache->synced_doc_id, trx);
}
@@ -2461,7 +2481,7 @@ fts_update_next_doc_id(
Get the next available document id.
@return DB_SUCCESS if OK */
UNIV_INTERN
-ulint
+dberr_t
fts_get_next_doc_id(
/*================*/
const dict_table_t* table, /*!< in: table */
@@ -2494,8 +2514,8 @@ fts_get_next_doc_id(
This function fetch the Doc ID from CONFIG table, and compare with
the Doc ID supplied. And store the larger one to the CONFIG table.
@return DB_SUCCESS if OK */
-UNIV_INTERN
-ulint
+static __attribute__((nonnull))
+dberr_t
fts_cmp_set_sync_doc_id(
/*====================*/
const dict_table_t* table, /*!< in: table */
@@ -2509,7 +2529,7 @@ fts_cmp_set_sync_doc_id(
{
trx_t* trx;
pars_info_t* info;
- ulint error;
+ dberr_t error;
fts_table_t fts_table;
que_t* graph = NULL;
fts_cache_t* cache = table->fts->cache;
@@ -2559,8 +2579,6 @@ retry:
goto func_exit;
}
- ut_a(*doc_id > 0);
-
if (read_only) {
goto func_exit;
}
@@ -2594,8 +2612,8 @@ func_exit:
*doc_id = 0;
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) "
- "while getting next doc id.\n", error);
+ fprintf(stderr, " InnoDB: Error: (%s) "
+ "while getting next doc id.\n", ut_strerr(error));
fts_sql_rollback(trx);
@@ -2614,23 +2632,23 @@ func_exit:
Update the last document id. This function could create a new
transaction to update the last document id.
@return DB_SUCCESS if OK */
-UNIV_INTERN
-ulint
+static
+dberr_t
fts_update_sync_doc_id(
/*===================*/
const dict_table_t* table, /*!< in: table */
- const char* table_name, /*!< in: table name */
+ const char* table_name, /*!< in: table name, or NULL */
doc_id_t doc_id, /*!< in: last document id */
- trx_t* trx) /*!< in: update trx */
+ trx_t* trx) /*!< in: update trx, or NULL */
{
byte id[FTS_MAX_ID_LEN];
pars_info_t* info;
fts_table_t fts_table;
ulint id_len;
que_t* graph = NULL;
- ulint error;
+ dberr_t error;
ibool local_trx = FALSE;
- fts_cache_t* cache = table->fts->cache;;
+ fts_cache_t* cache = table->fts->cache;
fts_table.suffix = "CONFIG";
fts_table.table_id = table->id;
@@ -2651,8 +2669,7 @@ fts_update_sync_doc_id(
info = pars_info_create();
- // FIXME: Get rid of snprintf
- id_len = snprintf(
+ id_len = ut_snprintf(
(char*) id, sizeof(id), FTS_DOC_ID_FORMAT, doc_id + 1);
pars_info_bind_varchar_literal(info, "doc_id", id, id_len);
@@ -2672,9 +2689,10 @@ fts_update_sync_doc_id(
fts_sql_commit(trx);
cache->synced_doc_id = doc_id;
} else {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) "
- "while updating last doc id.\n", error);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "(%s) while updating last doc id.",
+ ut_strerr(error));
fts_sql_rollback(trx);
}
@@ -2725,15 +2743,15 @@ fts_doc_ids_free(
/*********************************************************************//**
Do commit-phase steps necessary for the insertion of a new row.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_add(
/*====*/
fts_trx_table_t*ftt, /*!< in: FTS trx table */
fts_trx_row_t* row) /*!< in: row */
{
dict_table_t* table = ftt->table;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
doc_id_t doc_id = row->doc_id;
ut_a(row->state == FTS_INSERT || row->state == FTS_MODIFY);
@@ -2757,8 +2775,8 @@ fts_add(
/*********************************************************************//**
Do commit-phase steps necessary for the deletion of a row.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_delete(
/*=======*/
fts_trx_table_t*ftt, /*!< in: FTS trx table */
@@ -2766,7 +2784,7 @@ fts_delete(
{
que_t* graph;
fts_table_t fts_table;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
doc_id_t write_doc_id;
dict_table_t* table = ftt->table;
doc_id_t doc_id = row->doc_id;
@@ -2848,14 +2866,14 @@ fts_delete(
/*********************************************************************//**
Do commit-phase steps necessary for the modification of a row.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_modify(
/*=======*/
fts_trx_table_t* ftt, /*!< in: FTS trx table */
fts_trx_row_t* row) /*!< in: row */
{
- ulint error;
+ dberr_t error;
ut_a(row->state == FTS_MODIFY);
@@ -2872,7 +2890,7 @@ fts_modify(
Create a new document id.
@return DB_SUCCESS if all went well else error */
UNIV_INTERN
-ulint
+dberr_t
fts_create_doc_id(
/*==============*/
dict_table_t* table, /*!< in: row is of this table. */
@@ -2882,7 +2900,7 @@ fts_create_doc_id(
mem_heap_t* heap) /*!< in: heap */
{
doc_id_t doc_id;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ut_a(table->fts->doc_col != ULINT_UNDEFINED);
@@ -2919,15 +2937,15 @@ fts_create_doc_id(
The given transaction is about to be committed; do whatever is necessary
from the FTS system's POV.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_commit_table(
/*=============*/
fts_trx_table_t* ftt) /*!< in: FTS table to commit*/
{
const ib_rbt_node_t* node;
ib_rbt_t* rows;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
fts_cache_t* cache = ftt->table->fts->cache;
trx_t* trx = trx_allocate_for_background();
@@ -2979,13 +2997,13 @@ The given transaction is about to be committed; do whatever is necessary
from the FTS system's POV.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_commit(
/*=======*/
trx_t* trx) /*!< in: transaction */
{
const ib_rbt_node_t* node;
- ulint error;
+ dberr_t error;
ib_rbt_t* tables;
fts_savepoint_t* savepoint;
@@ -3008,10 +3026,9 @@ fts_commit(
}
/*********************************************************************//**
-Create a new empty document.
-@return new document */
+Initialize a document. */
UNIV_INTERN
-fts_doc_t*
+void
fts_doc_init(
/*=========*/
fts_doc_t* doc) /*!< in: doc to initialize */
@@ -3021,8 +3038,6 @@ fts_doc_init(
memset(doc, 0, sizeof(*doc));
doc->self_heap = ib_heap_allocator_create(heap);
-
- return(doc);
}
/*********************************************************************//**
@@ -3075,7 +3090,7 @@ fts_fetch_row_id(
/*********************************************************************//**
Callback function for fetch that stores the text of an FTS document,
converting each column to UTF-16.
-@return: always returns FALSE */
+@return always FALSE */
UNIV_INTERN
ibool
fts_query_expansion_fetch_doc(
@@ -3467,13 +3482,15 @@ fts_get_max_doc_id(
dfield = dict_index_get_nth_field(index, 0);
+#if 0 /* This can fail when renaming a column to FTS_DOC_ID_COL_NAME. */
ut_ad(innobase_strcasecmp(FTS_DOC_ID_COL_NAME, dfield->name) == 0);
+#endif
mtr_start(&mtr);
/* fetch the largest indexes value */
btr_pcur_open_at_index_side(
- FALSE, index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
+ false, index, BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
if (page_get_n_recs(btr_pcur_get_page(&pcur)) > 0) {
const rec_t* rec = NULL;
@@ -3516,13 +3533,14 @@ func_exit:
Fetch document with the given document id.
@return DB_SUCCESS if OK else error */
UNIV_INTERN
-ulint
+dberr_t
fts_doc_fetch_by_doc_id(
/*====================*/
fts_get_doc_t* get_doc, /*!< in: state */
doc_id_t doc_id, /*!< in: id of document to
fetch */
- dict_index_t* index_to_use, /*!< in: caller supplied FTS index */
+ dict_index_t* index_to_use, /*!< in: caller supplied FTS index,
+ or NULL */
ulint option, /*!< in: search option, if it is
greater than doc_id or equal */
fts_sql_callback
@@ -3530,7 +3548,7 @@ fts_doc_fetch_by_doc_id(
void* arg) /*!< in: callback arg */
{
pars_info_t* info;
- ulint error;
+ dberr_t error;
const char* select_str;
doc_id_t write_doc_id;
dict_index_t* index;
@@ -3555,6 +3573,7 @@ fts_doc_fetch_by_doc_id(
pars_info_bind_function(info, "my_func", callback, arg);
select_str = fts_get_select_columns_str(index, info, info->heap);
+ pars_info_bind_id(info, TRUE, "table_name", index->table_name);
if (!get_doc || !get_doc->get_document_graph) {
if (option == FTS_FETCH_DOC_BY_ID_EQUAL) {
@@ -3564,7 +3583,7 @@ fts_doc_fetch_by_doc_id(
mem_heap_printf(info->heap,
"DECLARE FUNCTION my_func;\n"
"DECLARE CURSOR c IS"
- " SELECT %s FROM %s"
+ " SELECT %s FROM $table_name"
" WHERE %s = :doc_id;\n"
"BEGIN\n"
""
@@ -3576,20 +3595,32 @@ fts_doc_fetch_by_doc_id(
" END IF;\n"
"END LOOP;\n"
"CLOSE c;",
- select_str, index->table_name,
- FTS_DOC_ID_COL_NAME));
+ select_str, FTS_DOC_ID_COL_NAME));
} else {
ut_ad(option == FTS_FETCH_DOC_BY_ID_LARGE);
+ /* This is used for crash recovery of table with
+ hidden DOC ID or FTS indexes. We will scan the table
+ to re-processing user table rows whose DOC ID or
+ FTS indexed documents have not been sync-ed to disc
+ during recent crash.
+ In the case that all fulltext indexes are dropped
+ for a table, we will keep the "hidden" FTS_DOC_ID
+ column, and this scan is to retreive the largest
+ DOC ID being used in the table to determine the
+ appropriate next DOC ID.
+ In the case of there exists fulltext index(es), this
+ operation will re-tokenize any docs that have not
+ been sync-ed to the disk, and re-prime the FTS
+ cached */
graph = fts_parse_sql(
NULL,
info,
mem_heap_printf(info->heap,
"DECLARE FUNCTION my_func;\n"
"DECLARE CURSOR c IS"
- " SELECT %s, %s FROM %s"
- " WHERE %s > :doc_id"
- " ORDER BY %s;\n"
+ " SELECT %s, %s FROM $table_name"
+ " WHERE %s > :doc_id;\n"
"BEGIN\n"
""
"OPEN c;\n"
@@ -3601,9 +3632,7 @@ fts_doc_fetch_by_doc_id(
"END LOOP;\n"
"CLOSE c;",
FTS_DOC_ID_COL_NAME,
- select_str, index->table_name,
- FTS_DOC_ID_COL_NAME,
- FTS_DOC_ID_COL_NAME));
+ select_str, FTS_DOC_ID_COL_NAME));
}
if (get_doc) {
get_doc->get_document_graph = graph;
@@ -3633,7 +3662,7 @@ fts_doc_fetch_by_doc_id(
Write out a single word's data as new entry/entries in the INDEX table.
@return DB_SUCCESS if all OK. */
UNIV_INTERN
-ulint
+dberr_t
fts_write_node(
/*===========*/
trx_t* trx, /*!< in: transaction */
@@ -3643,7 +3672,7 @@ fts_write_node(
fts_node_t* node) /*!< in: node columns */
{
pars_info_t* info;
- ulint error;
+ dberr_t error;
ib_uint32_t doc_count;
ib_time_t start_time;
doc_id_t last_doc_id;
@@ -3698,8 +3727,8 @@ fts_write_node(
/*********************************************************************//**
Add rows to the DELETED_CACHE table.
@return DB_SUCCESS if all went well else error code*/
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_sync_add_deleted_cache(
/*=======================*/
fts_sync_t* sync, /*!< in: sync state */
@@ -3710,7 +3739,7 @@ fts_sync_add_deleted_cache(
que_t* graph;
fts_table_t fts_table;
doc_id_t dummy = 0;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ulint n_elems = ib_vector_size(doc_ids);
ut_a(ib_vector_size(doc_ids) > 0);
@@ -3748,9 +3777,10 @@ fts_sync_add_deleted_cache(
}
/*********************************************************************//**
-Write the words and ilist to disk.*/
-static
-ulint
+Write the words and ilist to disk.
+@return DB_SUCCESS if all went well else error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_sync_write_words(
/*=================*/
trx_t* trx, /*!< in: transaction */
@@ -3761,10 +3791,12 @@ fts_sync_write_words(
ulint n_nodes = 0;
ulint n_words = 0;
const ib_rbt_node_t* rbt_node;
- ulint n_new_words = 0;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ibool print_error = FALSE;
+#ifdef FTS_DOC_STATS_DEBUG
dict_table_t* table = index_cache->index->table;
+ ulint n_new_words = 0;
+#endif /* FTS_DOC_STATS_DEBUG */
FTS_INIT_INDEX_TABLE(
&fts_table, NULL, FTS_INDEX_TABLE, index_cache->index);
@@ -3789,9 +3821,10 @@ fts_sync_write_words(
fts_table.suffix = fts_get_suffix(selected);
+#ifdef FTS_DOC_STATS_DEBUG
/* Check if the word exists in the FTS index and if not
then we need to increment the total word count stats. */
- if (error == DB_SUCCESS) {
+ if (error == DB_SUCCESS && fts_enable_diag_print) {
ibool found = FALSE;
error = fts_is_word_in_index(
@@ -3805,6 +3838,7 @@ fts_sync_write_words(
++n_new_words;
}
}
+#endif /* FTS_DOC_STATS_DEBUG */
n_nodes += ib_vector_size(word->nodes);
@@ -3829,9 +3863,9 @@ fts_sync_write_words(
if (error != DB_SUCCESS && !print_error) {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error (%lu) writing "
+ fprintf(stderr, " InnoDB: Error (%s) writing "
"word node to FTS auxiliary index "
- "table.\n", error);
+ "table.\n", ut_strerr(error));
print_error = TRUE;
}
@@ -3840,19 +3874,23 @@ fts_sync_write_words(
ut_free(rbt_remove_node(index_cache->words, rbt_node));
}
- if (error == DB_SUCCESS && n_new_words > 0) {
+#ifdef FTS_DOC_STATS_DEBUG
+ if (error == DB_SUCCESS && n_new_words > 0 && fts_enable_diag_print) {
fts_table_t fts_table;
FTS_INIT_FTS_TABLE(&fts_table, NULL, FTS_COMMON_TABLE, table);
/* Increment the total number of words in the FTS index */
- fts_config_increment_index_value(
+ error = fts_config_increment_index_value(
trx, index_cache->index, FTS_TOTAL_WORD_COUNT,
n_new_words);
}
+#endif /* FTS_DOC_STATS_DEBUG */
- printf("Avg number of nodes: %lf\n",
- (double) n_nodes / (double) (n_words > 1 ? n_words : 1));
+ if (fts_enable_diag_print) {
+ printf("Avg number of nodes: %lf\n",
+ (double) n_nodes / (double) (n_words > 1 ? n_words : 1));
+ }
return(error);
}
@@ -3861,8 +3899,8 @@ fts_sync_write_words(
/*********************************************************************//**
Write a single documents statistics to disk.
@return DB_SUCCESS if all went well else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_sync_write_doc_stat(
/*====================*/
trx_t* trx, /*!< in: transaction */
@@ -3872,7 +3910,7 @@ fts_sync_write_doc_stat(
{
pars_info_t* info;
doc_id_t doc_id;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ib_uint32_t word_count;
if (*graph) {
@@ -3918,9 +3956,9 @@ fts_sync_write_doc_stat(
trx->error_state = DB_SUCCESS;
} else {
- fprintf(stderr, " InnoDB: Error: %lu "
+ fprintf(stderr, " InnoDB: Error: (%s) "
"while writing to FTS doc_id.\n",
- error);
+ ut_strerr(error));
break; /* Exit the loop. */
}
@@ -3940,7 +3978,7 @@ fts_sync_write_doc_stats(
trx_t* trx, /*!< in: transaction */
const fts_index_cache_t*index_cache) /*!< in: index cache */
{
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
que_t* graph = NULL;
fts_doc_stats_t* doc_stat;
@@ -3973,7 +4011,6 @@ fts_sync_write_doc_stats(
return(error);
}
-#endif /* FTS_DOC_STATS_DEBUG */
/*********************************************************************//**
Callback to check the existince of a word.
@@ -4007,13 +4044,12 @@ fts_lookup_word(
}
/*********************************************************************//**
-Check whether a particular word (term) exists in the FTS index. */
+Check whether a particular word (term) exists in the FTS index.
+@return DB_SUCCESS if all went well else error code */
static
-ulint
+dberr_t
fts_is_word_in_index(
/*=================*/
- /* out: DB_SUCCESS if all went
- well else error code */
trx_t* trx, /*!< in: FTS query state */
que_t** graph, /* out: Query graph */
fts_table_t* fts_table, /*!< in: table instance */
@@ -4022,7 +4058,7 @@ fts_is_word_in_index(
ibool* found) /* out: TRUE if exists */
{
pars_info_t* info;
- ulint error;
+ dberr_t error;
trx->op_info = "looking up word in FTS index";
@@ -4073,8 +4109,9 @@ fts_is_word_in_index(
trx->error_state = DB_SUCCESS;
} else {
- fprintf(stderr, " InnoDB: Error: %lu "
- "while reading FTS index.\n", error);
+ fprintf(stderr, " InnoDB: Error: (%s) "
+ "while reading FTS index.\n",
+ ut_strerr(error));
break; /* Exit the loop. */
}
@@ -4083,6 +4120,7 @@ fts_is_word_in_index(
return(error);
}
+#endif /* FTS_DOC_STATS_DEBUG */
/*********************************************************************//**
Begin Sync, create transaction, acquire locks, etc. */
@@ -4101,29 +4139,36 @@ fts_sync_begin(
sync->trx = trx_allocate_for_background();
- ut_print_timestamp(stderr);
- fprintf(stderr, " SYNC deleted count: %ld size: %lu bytes\n",
- ib_vector_size(cache->deleted_doc_ids), cache->total_size);
+ if (fts_enable_diag_print) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "FTS SYNC for table %s, deleted count: %ld size: "
+ "%lu bytes",
+ sync->table->name,
+ ib_vector_size(cache->deleted_doc_ids),
+ cache->total_size);
+ }
}
/*********************************************************************//**
Run SYNC on the table, i.e., write out data from the index specific
-cache to the FTS aux INDEX table and FTS aux doc id stats table. */
-static
-ulint
+cache to the FTS aux INDEX table and FTS aux doc id stats table.
+@return DB_SUCCESS if all OK */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_sync_index(
/*===========*/
- /* out: DB_SUCCESS if all OK */
fts_sync_t* sync, /*!< in: sync state */
fts_index_cache_t* index_cache) /*!< in: index cache */
{
trx_t* trx = sync->trx;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
trx->op_info = "doing SYNC index";
- ut_print_timestamp(stderr);
- fprintf(stderr, " SYNC words: %ld\n", rbt_size(index_cache->words));
+ if (fts_enable_diag_print) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "SYNC words: %ld", rbt_size(index_cache->words));
+ }
ut_ad(rbt_validate(index_cache->words));
@@ -4146,13 +4191,13 @@ fts_sync_index(
/*********************************************************************//**
Commit the SYNC, change state of processed doc ids etc.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_sync_commit(
/*============*/
fts_sync_t* sync) /*!< in: sync state */
{
- ulint error;
+ dberr_t error;
trx_t* trx = sync->trx;
fts_cache_t* cache = sync->table->fts->cache;
doc_id_t last_doc_id;
@@ -4191,13 +4236,18 @@ fts_sync_commit(
fts_sql_rollback(trx);
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) during SYNC.\n", error);
+ fprintf(stderr, " InnoDB: Error: (%s) during SYNC.\n",
+ ut_strerr(error));
}
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: SYNC time : %lusecs: elapsed %lf ins/sec\n",
- (ulong) (ut_time() - sync->start_time),
- (double) n_nodes/ (double) elapsed_time);
+ if (fts_enable_diag_print && elapsed_time) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "SYNC for table %s: SYNC time : %lu secs: "
+ "elapsed %lf ins/sec",
+ sync->table->name,
+ (ulong) (ut_time() - sync->start_time),
+ (double) n_nodes/ (double) elapsed_time);
+ }
trx_free_for_background(trx);
@@ -4226,13 +4276,13 @@ Run SYNC on the table, i.e., write out data from the cache to the
FTS auxiliary INDEX table and clear the cache at the end.
@return DB_SUCCESS if all OK */
static
-ulint
+dberr_t
fts_sync(
/*=====*/
fts_sync_t* sync) /*!< in: sync state */
{
ulint i;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
fts_cache_t* cache = sync->table->fts->cache;
rw_lock_x_lock(&cache->lock);
@@ -4275,34 +4325,28 @@ fts_sync(
/****************************************************************//**
Run SYNC on the table, i.e., write out data from the cache to the
-FTS auxiliary INDEX table and clear the cache at the end.
-@return DB_SUCCESS if all OK */
+FTS auxiliary INDEX table and clear the cache at the end. */
UNIV_INTERN
-ulint
+void
fts_sync_table(
/*===========*/
dict_table_t* table) /*!< in: table */
{
- ulint error = DB_SUCCESS;
-
ut_ad(table->fts);
if (table->fts->cache) {
fts_sync(table->fts->cache->sync);
}
-
- return(error);
}
/********************************************************************
Process next token from document starting at the given position, i.e., add
-the token's start position to the token's list of positions. */
+the token's start position to the token's list of positions.
+@return number of characters handled in this call */
static
ulint
fts_process_token(
/*==============*/
- /* out: number of characters
- handled in this call */
fts_doc_t* doc, /* in/out: document to
tokenize */
fts_doc_t* result, /* out: if provided, save
@@ -4406,7 +4450,7 @@ fts_tokenize_document(
ut_a(doc->charset);
doc->tokens = rbt_create_arg_cmp(
- sizeof(fts_token_t), innobase_fts_text_cmp, doc->charset);
+ sizeof(fts_token_t), innobase_fts_text_cmp, (void*) doc->charset);
for (ulint i = 0; i < doc->text.f_len; i += inc) {
inc = fts_process_token(doc, result, i, 0);
@@ -4473,6 +4517,7 @@ fts_get_docs_create(
memset(get_doc, 0x0, sizeof(*get_doc));
get_doc->index_cache = fts_get_index_cache(cache, *index);
+ get_doc->cache = cache;
/* Must find the index cache. */
ut_a(get_doc->index_cache != NULL);
@@ -4520,11 +4565,14 @@ fts_init_doc_id(
rw_lock_x_lock(&table->fts->cache->lock);
+ /* Return if the table is already initialized for DOC ID */
if (table->fts->cache->first_doc_id != FTS_NULL_DOC_ID) {
rw_lock_x_unlock(&table->fts->cache->lock);
return(0);
}
+ DEBUG_SYNC_C("fts_initialize_doc_id");
+
/* Then compare this value with the ID value stored in the CONFIG
table. The larger one will be our new initial Doc ID */
fts_cmp_set_sync_doc_id(table, 0, FALSE, &max_doc_id);
@@ -4591,7 +4639,7 @@ fts_get_rows_count(
trx_t* trx;
pars_info_t* info;
que_t* graph;
- ulint error;
+ dberr_t error;
ulint count = 0;
trx = trx_allocate_for_background();
@@ -4639,9 +4687,9 @@ fts_get_rows_count(
trx->error_state = DB_SUCCESS;
} else {
- fprintf(stderr, " InnoDB: Error: %lu "
+ fprintf(stderr, " InnoDB: Error: (%s) "
"while reading FTS table.\n",
- error);
+ ut_strerr(error));
break; /* Exit the loop. */
}
@@ -4678,7 +4726,7 @@ fts_update_max_cache_size(
trx_free_for_background(trx);
}
-#endif
+#endif /* FTS_CACHE_SIZE_DEBUG */
/*********************************************************************//**
Free the modified rows of a table. */
@@ -4861,13 +4909,13 @@ fts_get_doc_id_from_rec(
col_no = dict_col_get_clust_pos(
&table->cols[table->fts->doc_col], clust_index);
+ ut_ad(col_no != ULINT_UNDEFINED);
- /* We have no choice but to cast rec here :-( */
- data = rec_get_nth_field((rec_t*) rec, offsets, col_no, &len);
+ data = rec_get_nth_field(rec, offsets, col_no, &len);
ut_a(len == 8);
- ut_a(len == sizeof(doc_id));
- doc_id = (doc_id_t) mach_read_from_8(data);
+ ut_ad(8 == sizeof(doc_id));
+ doc_id = static_cast<doc_id_t>(mach_read_from_8(data));
return(doc_id);
}
@@ -4876,7 +4924,7 @@ fts_get_doc_id_from_rec(
Search the index specific cache for a particular FTS index.
@return the index specific cache else NULL */
UNIV_INTERN
-const fts_index_cache_t*
+fts_index_cache_t*
fts_find_index_cache(
/*=================*/
const fts_cache_t* cache, /*!< in: cache to search */
@@ -4884,7 +4932,8 @@ fts_find_index_cache(
{
/* We cast away the const because our internal function, takes
non-const cache arg and returns a non-const pointer. */
- return(fts_get_index_cache((fts_cache_t*) cache, index));
+ return(static_cast<fts_index_cache_t*>(
+ fts_get_index_cache((fts_cache_t*) cache, index)));
}
/*********************************************************************//**
@@ -4960,7 +5009,7 @@ fts_cache_append_deleted_doc_ids(
{
ulint i;
- mutex_enter((mutex_t*) &cache->deleted_lock);
+ mutex_enter((ib_mutex_t*) &cache->deleted_lock);
for (i = 0; i < ib_vector_size(cache->deleted_doc_ids); ++i) {
fts_update_t* update;
@@ -4971,7 +5020,7 @@ fts_cache_append_deleted_doc_ids(
ib_vector_push(vector, &update->doc_id);
}
- mutex_exit((mutex_t*) &cache->deleted_lock);
+ mutex_exit((ib_mutex_t*) &cache->deleted_lock);
}
/*********************************************************************//**
@@ -5043,11 +5092,11 @@ UNIV_INTERN
void
fts_add_doc_id_column(
/*==================*/
- dict_table_t* table) /*!< in/out: Table with FTS index */
+ dict_table_t* table, /*!< in/out: Table with FTS index */
+ mem_heap_t* heap) /*!< in: temporary memory heap, or NULL */
{
dict_mem_table_add_col(
- table,
- table->heap,
+ table, heap,
FTS_DOC_ID_COL_NAME,
DATA_INT,
dtype_form_prtype(
@@ -5069,7 +5118,7 @@ fts_update_doc_id(
doc_id_t* next_doc_id) /*!< in/out: buffer for writing */
{
doc_id_t doc_id;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
if (*next_doc_id) {
doc_id = *next_doc_id;
@@ -5236,13 +5285,12 @@ fts_savepoint_copy(
ftt_dst = fts_trx_table_clone(*ftt_src);
- rbt_insert(dst->tables, &ftt_dst->table->id, &ftt_dst);
+ rbt_insert(dst->tables, &ftt_dst, &ftt_dst);
}
}
/*********************************************************************//**
-Take a FTS savepoint.
-@return DB_SUCCESS or error code */
+Take a FTS savepoint. */
UNIV_INTERN
void
fts_savepoint_take(
@@ -5312,7 +5360,6 @@ fts_savepoint_release(
const char* name) /*!< in: savepoint name */
{
ulint i;
- fts_savepoint_t* prev;
ib_vector_t* savepoints;
ulint top_of_stack = 0;
@@ -5322,9 +5369,6 @@ fts_savepoint_release(
ut_a(ib_vector_size(savepoints) > 0);
- prev = static_cast<fts_savepoint_t*>(
- ib_vector_get(savepoints, top_of_stack));
-
/* Skip the implied savepoint (first element). */
for (i = 1; i < ib_vector_size(savepoints); ++i) {
fts_savepoint_t* savepoint;
@@ -5338,17 +5382,6 @@ fts_savepoint_release(
we have to skip deleted/released entries. */
if (savepoint->name != NULL
&& strcmp(name, savepoint->name) == 0) {
-
- fts_savepoint_t* last;
- fts_savepoint_t temp;
-
- last = static_cast<fts_savepoint_t*>(
- ib_vector_last(savepoints));
-
- /* Swap the entries. */
- memcpy(&temp, last, sizeof(temp));
- memcpy(last, prev, sizeof(*last));
- memcpy(prev, &temp, sizeof(*prev));
break;
/* Track the previous savepoint instance that will
@@ -5357,8 +5390,6 @@ fts_savepoint_release(
/* We need to delete all entries
greater than this element. */
top_of_stack = i;
-
- prev = savepoint;
}
}
@@ -5395,8 +5426,7 @@ fts_savepoint_release(
}
/**********************************************************************//**
-Refresh last statement savepoint.
-@return DB_SUCCESS or error code */
+Refresh last statement savepoint. */
UNIV_INTERN
void
fts_savepoint_laststmt_refresh(
@@ -5588,7 +5618,7 @@ static
ibool
fts_is_aux_table_name(
/*==================*/
- fts_sys_table_t*table, /*!< out: table info */
+ fts_aux_table_t*table, /*!< out: table info */
const char* name, /*!< in: table name */
ulint len) /*!< in: length of table name */
{
@@ -5614,7 +5644,6 @@ fts_is_aux_table_name(
if (ptr != NULL && len > 20 && strncmp(ptr, "FTS_", 4) == 0) {
ulint i;
-
/* Skip the prefix. */
ptr += 4;
len -= 4;
@@ -5689,7 +5718,7 @@ fts_read_tables(
void* user_arg) /*!< in: pointer to ib_vector_t */
{
int i;
- fts_sys_table_t*table;
+ fts_aux_table_t*table;
mem_heap_t* heap;
ibool done = FALSE;
ib_vector_t* tables = static_cast<ib_vector_t*>(user_arg);
@@ -5701,7 +5730,7 @@ fts_read_tables(
/* We will use this heap for allocating strings. */
heap = static_cast<mem_heap_t*>(tables->allocator->arg);
- table = static_cast<fts_sys_table_t*>(ib_vector_push(tables, NULL));
+ table = static_cast<fts_aux_table_t*>(ib_vector_push(tables, NULL));
memset(table, 0x0, sizeof(*table));
@@ -5726,9 +5755,9 @@ fts_read_tables(
}
table->name = static_cast<char*>(
- mem_heap_dup(heap, data, len + 1));
- table->name[len] = '\0';
- printf("Found [%.*s]\n", (int) len, table->name);
+ mem_heap_alloc(heap, len + 1));
+ memcpy(table->name, data, len);
+ table->name[len] = 0;
break;
case 1: /* ID */
@@ -5749,41 +5778,41 @@ fts_read_tables(
Check and drop all orphaned FTS auxiliary tables, those that don't have
a parent table or FTS index defined on them.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull))
+void
fts_check_and_drop_orphaned_tables(
/*===============================*/
trx_t* trx, /*!< in: transaction */
ib_vector_t* tables) /*!< in: tables to check */
{
- ulint i;
- ulint error = DB_SUCCESS;
-
- for (i = 0; i < ib_vector_size(tables); ++i) {
+ for (ulint i = 0; i < ib_vector_size(tables); ++i) {
dict_table_t* table;
- fts_sys_table_t* sys_table;
- ibool drop = FALSE;
+ fts_aux_table_t* aux_table;
+ bool drop = false;
- sys_table = static_cast<fts_sys_table_t*>(
+ aux_table = static_cast<fts_aux_table_t*>(
ib_vector_get(tables, i));
- table = dict_table_open_on_id(sys_table->parent_id, FALSE);
+ table = dict_table_open_on_id(
+ aux_table->parent_id, TRUE, FALSE);
if (table == NULL || table->fts == NULL) {
- drop = TRUE;
+ drop = true;
- } else if (sys_table->index_id != 0) {
- ulint j;
+ } else if (aux_table->index_id != 0) {
index_id_t id;
- fts_t* fts;
+ fts_t* fts;
- drop = TRUE;
+ drop = true;
fts = table->fts;
- id = sys_table->index_id;
+ id = aux_table->index_id;
/* Search for the FT index in the table's list. */
- for (j = 0; j < ib_vector_size(fts->indexes); ++j) {
+ for (ulint j = 0;
+ j < ib_vector_size(fts->indexes);
+ ++j) {
+
const dict_index_t* index;
index = static_cast<const dict_index_t*>(
@@ -5791,28 +5820,36 @@ fts_check_and_drop_orphaned_tables(
if (index->id == id) {
- drop = FALSE;
+ drop = false;
break;
}
}
}
if (table) {
- dict_table_close(table, FALSE);
+ dict_table_close(table, TRUE, FALSE);
}
if (drop) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Warning: Parent table of "
- "FT auxiliary table %s not found.\n",
- sys_table->name);
- /* We ignore drop errors. */
- fts_drop_table(trx, sys_table->name);
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Parent table of FTS auxiliary table %s not "
+ "found.", aux_table->name);
+
+ dberr_t err = fts_drop_table(trx, aux_table->name);
+
+ if (err == DB_FAIL) {
+ char* path;
+
+ path = fil_make_ibd_name(
+ aux_table->name, false);
+
+ os_file_delete_if_exists(path);
+
+ mem_free(path);
+ }
}
}
-
- return(error);
}
/**********************************************************************//**
@@ -5823,19 +5860,62 @@ void
fts_drop_orphaned_tables(void)
/*==========================*/
{
- trx_t* trx;
- pars_info_t* info;
- mem_heap_t* heap;
- que_t* graph;
- ib_vector_t* tables;
- ib_alloc_t* heap_alloc;
- ulint error = DB_SUCCESS;
+ trx_t* trx;
+ pars_info_t* info;
+ mem_heap_t* heap;
+ que_t* graph;
+ ib_vector_t* tables;
+ ib_alloc_t* heap_alloc;
+ space_name_list_t space_name_list;
+ dberr_t error = DB_SUCCESS;
+
+ /* Note: We have to free the memory after we are done with the list. */
+ error = fil_get_space_names(space_name_list);
+
+ if (error == DB_OUT_OF_MEMORY) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "Out of memory");
+ ut_error;
+ }
heap = mem_heap_create(1024);
heap_alloc = ib_heap_allocator_create(heap);
/* We store the table ids of all the FTS indexes that were found. */
- tables = ib_vector_create(heap_alloc, sizeof(fts_sys_table_t), 128);
+ tables = ib_vector_create(heap_alloc, sizeof(fts_aux_table_t), 128);
+
+ /* Get the list of all known .ibd files and check for orphaned
+ FTS auxiliary files in that list. We need to remove them because
+ users can't map them back to table names and this will create
+ unnecessary clutter. */
+
+ for (space_name_list_t::iterator it = space_name_list.begin();
+ it != space_name_list.end();
+ ++it) {
+
+ fts_aux_table_t* fts_aux_table;
+
+ fts_aux_table = static_cast<fts_aux_table_t*>(
+ ib_vector_push(tables, NULL));
+
+ memset(fts_aux_table, 0x0, sizeof(*fts_aux_table));
+
+ if (!fts_is_aux_table_name(fts_aux_table, *it, strlen(*it))) {
+ ib_vector_pop(tables);
+ } else {
+ ulint len = strlen(*it);
+
+ fts_aux_table->id = fil_get_space_id_for_table(*it);
+
+ /* We got this list from fil0fil.cc. The tablespace
+ with this name must exist. */
+ ut_a(fts_aux_table->id != ULINT_UNDEFINED);
+
+ fts_aux_table->name = static_cast<char*>(
+ mem_heap_dup(heap, *it, len + 1));
+
+ fts_aux_table->name[len] = 0;
+ }
+ }
trx = trx_allocate_for_background();
trx->op_info = "dropping orphaned FTS tables";
@@ -5867,10 +5947,7 @@ fts_drop_orphaned_tables(void)
error = fts_eval_sql(trx, graph);
if (error == DB_SUCCESS) {
- error = fts_check_and_drop_orphaned_tables(trx, tables);
- }
-
- if (error == DB_SUCCESS) {
+ fts_check_and_drop_orphaned_tables(trx, tables);
fts_sql_commit(trx);
break; /* Exit the loop. */
} else {
@@ -5881,15 +5958,15 @@ fts_drop_orphaned_tables(void)
ut_print_timestamp(stderr);
if (error == DB_LOCK_WAIT_TIMEOUT) {
- fprintf(stderr, " InnoDB: Warning: lock wait "
- "timeout reading SYS_TABLES. "
- "Retrying!\n");
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "lock wait timeout reading SYS_TABLES. "
+ "Retrying!");
trx->error_state = DB_SUCCESS;
} else {
- fprintf(stderr, " InnoDB: Error: %lu "
- "while reading SYS_TABLES.\n",
- error);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "(%s) while reading SYS_TABLES.",
+ ut_strerr(error));
break; /* Exit the loop. */
}
@@ -5905,6 +5982,14 @@ fts_drop_orphaned_tables(void)
if (heap != NULL) {
mem_heap_free(heap);
}
+
+ /** Free the memory allocated to store the .ibd names. */
+ for (space_name_list_t::iterator it = space_name_list.begin();
+ it != space_name_list.end();
+ ++it) {
+
+ delete[] *it;
+ }
}
/**********************************************************************//**
@@ -5986,7 +6071,7 @@ fts_load_stopword(
{
fts_table_t fts_table;
fts_string_t str;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ulint use_stopword;
fts_cache_t* cache;
const char* stopword_to_use = NULL;
@@ -6086,6 +6171,43 @@ cleanup:
/**********************************************************************//**
Callback function when we initialize the FTS at the start up
+time. It recovers the maximum Doc IDs presented in the current table.
+@return: always returns TRUE */
+static
+ibool
+fts_init_get_doc_id(
+/*================*/
+ void* row, /*!< in: sel_node_t* */
+ void* user_arg) /*!< in: fts cache */
+{
+ doc_id_t doc_id = FTS_NULL_DOC_ID;
+ sel_node_t* node = static_cast<sel_node_t*>(row);
+ que_node_t* exp = node->select_list;
+ fts_cache_t* cache = static_cast<fts_cache_t*>(user_arg);
+
+ ut_ad(ib_vector_is_empty(cache->get_docs));
+
+ /* Copy each indexed column content into doc->text.f_str */
+ if (exp) {
+ dfield_t* dfield = que_node_get_val(exp);
+ dtype_t* type = dfield_get_type(dfield);
+ void* data = dfield_get_data(dfield);
+
+ ut_a(dtype_get_mtype(type) == DATA_INT);
+
+ doc_id = static_cast<doc_id_t>(mach_read_from_8(
+ static_cast<const byte*>(data)));
+
+ if (doc_id >= cache->next_doc_id) {
+ cache->next_doc_id = doc_id + 1;
+ }
+ }
+
+ return(TRUE);
+}
+
+/**********************************************************************//**
+Callback function when we initialize the FTS at the start up
time. It recovers Doc IDs that have not sync-ed to the auxiliary
table, and require to bring them back into FTS index.
@return: always returns TRUE */
@@ -6100,22 +6222,16 @@ fts_init_recover_doc(
fts_doc_t doc;
ulint doc_len = 0;
ulint field_no = 0;
- ibool has_fts = TRUE;
- fts_get_doc_t* get_doc = NULL;
+ fts_get_doc_t* get_doc = static_cast<fts_get_doc_t*>(user_arg);
doc_id_t doc_id = FTS_NULL_DOC_ID;
sel_node_t* node = static_cast<sel_node_t*>(row);
que_node_t* exp = node->select_list;
- fts_cache_t* cache = static_cast<fts_cache_t*>(user_arg);
+ fts_cache_t* cache = get_doc->cache;
- if (ib_vector_is_empty(cache->get_docs)) {
- has_fts = FALSE;
- } else {
- get_doc = static_cast<fts_get_doc_t*>(
- ib_vector_get(cache->get_docs, 0));
+ fts_doc_init(&doc);
+ doc.found = TRUE;
- fts_doc_init(&doc);
- doc.found = TRUE;
- }
+ ut_ad(cache);
/* Copy each indexed column content into doc->text.f_str */
while (exp) {
@@ -6131,18 +6247,11 @@ fts_init_recover_doc(
doc_id = static_cast<doc_id_t>(mach_read_from_8(
static_cast<const byte*>(data)));
- /* Just need to fetch the Doc ID */
- if (!has_fts) {
- goto func_exit;
- }
-
field_no++;
exp = que_node_get_next(exp);
continue;
}
- ut_a(has_fts);
-
if (len == UNIV_SQL_NULL) {
exp = que_node_get_next(exp);
continue;
@@ -6196,7 +6305,6 @@ fts_init_recover_doc(
cache->added++;
-func_exit:
if (doc_id >= cache->next_doc_id) {
cache->next_doc_id = doc_id + 1;
}
@@ -6223,6 +6331,9 @@ fts_init_index(
fts_get_doc_t* get_doc = NULL;
ibool has_fts = TRUE;
fts_cache_t* cache = table->fts->cache;
+ bool need_init = false;
+
+ ut_ad(!mutex_own(&dict_sys->mutex));
/* First check cache->get_docs is initialized */
if (!has_cache_lock) {
@@ -6239,6 +6350,8 @@ fts_init_index(
goto func_exit;
}
+ need_init = true;
+
start_doc = cache->synced_doc_id;
if (!start_doc) {
@@ -6250,28 +6363,32 @@ fts_init_index(
dropped, and we re-initialize the Doc ID system for subsequent
insertion */
if (ib_vector_is_empty(cache->get_docs)) {
- index = dict_table_get_first_index(table);
+ index = dict_table_get_index_on_name(table, FTS_DOC_ID_INDEX_NAME);
+
+ ut_a(index);
+
has_fts = FALSE;
+ fts_doc_fetch_by_doc_id(NULL, start_doc, index,
+ FTS_FETCH_DOC_BY_ID_LARGE,
+ fts_init_get_doc_id, cache);
} else {
- /* We only have one FTS index per table */
- get_doc = static_cast<fts_get_doc_t*>(
- ib_vector_get(cache->get_docs, 0));
+ for (ulint i = 0; i < ib_vector_size(cache->get_docs); ++i) {
+ get_doc = static_cast<fts_get_doc_t*>(
+ ib_vector_get(cache->get_docs, i));
- index = get_doc->index_cache->index;
- }
+ index = get_doc->index_cache->index;
- fts_doc_fetch_by_doc_id(NULL, start_doc, index,
- FTS_FETCH_DOC_BY_ID_LARGE,
- fts_init_recover_doc, cache);
+ fts_doc_fetch_by_doc_id(NULL, start_doc, index,
+ FTS_FETCH_DOC_BY_ID_LARGE,
+ fts_init_recover_doc, get_doc);
+ }
+ }
if (has_fts) {
if (table->fts->cache->stopword_info.status
& STOPWORD_NOT_INIT) {
fts_load_stopword(table, NULL, NULL, NULL, TRUE, TRUE);
}
-
- /* Register the table with the optimize thread. */
- fts_optimize_add_table(table);
}
table->fts->fts_status |= ADDED_TABLE_SYNCED;
@@ -6283,5 +6400,12 @@ func_exit:
rw_lock_x_unlock(&cache->lock);
}
+ if (need_init) {
+ mutex_enter(&dict_sys->mutex);
+ /* Register the table with the optimize thread. */
+ fts_optimize_add_table(table);
+ mutex_exit(&dict_sys->mutex);
+ }
+
return(TRUE);
}
diff --git a/storage/innobase/fts/fts0opt.cc b/storage/innobase/fts/fts0opt.cc
index 92e040d2715..9abeeccac91 100644
--- a/storage/innobase/fts/fts0opt.cc
+++ b/storage/innobase/fts/fts0opt.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,26 +39,29 @@ Completed 2011/7/10 Sunny and Jimmy Yang
#include "fts0vlc.ic"
#endif
-/* The FTS optimize thread's work queue. */
+/** The FTS optimize thread's work queue. */
static ib_wqueue_t* fts_optimize_wq;
-/* The number of document ids to delete in one statement. */
+/** The number of document ids to delete in one statement. */
static const ulint FTS_MAX_DELETE_DOC_IDS = 1000;
-/* Time to wait for a message. */
+/** Time to wait for a message. */
static const ulint FTS_QUEUE_WAIT_IN_USECS = 5000000;
-/* Default optimize interval in secs. */
+/** Default optimize interval in secs. */
static const ulint FTS_OPTIMIZE_INTERVAL_IN_SECS = 300;
+/** Server is shutting down, so does we exiting the optimize thread */
+static bool fts_opt_start_shutdown = false;
+
#if 0
-/* Check each table in round robin to see whether they'd
+/** Check each table in round robin to see whether they'd
need to be "optimized" */
static ulint fts_optimize_sync_iterator = 0;
#endif
/** State of a table within the optimization sub system. */
-enum fts_state_enum {
+enum fts_state_t {
FTS_STATE_LOADED,
FTS_STATE_RUNNING,
FTS_STATE_SUSPENDED,
@@ -67,7 +70,7 @@ enum fts_state_enum {
};
/** FTS optimize thread message types. */
-enum fts_msg_type_enum {
+enum fts_msg_type_t {
FTS_MSG_START, /*!< Start optimizing thread */
FTS_MSG_PAUSE, /*!< Pause optimizing thread */
@@ -83,21 +86,9 @@ enum fts_msg_type_enum {
threads work queue */
};
-typedef enum fts_state_enum fts_state_t;
-typedef struct fts_zip_struct fts_zip_t;
-typedef struct fts_msg_struct fts_msg_t;
-typedef struct fts_slot_struct fts_slot_t;
-typedef struct fts_encode_struct fts_encode_t;
-typedef enum fts_msg_type_enum fts_msg_type_t;
-typedef struct fts_msg_del_struct fts_msg_del_t;
-typedef struct fts_msg_stop_struct fts_msg_stop_t;
-typedef struct fts_optimize_struct fts_optimize_t;
-typedef struct fts_msg_optimize_struct fts_msg_optimize_t;
-typedef struct fts_optimize_graph_struct fts_optimize_graph_t;
-
/** Compressed list of words that have been read from FTS INDEX
that needs to be optimized. */
-struct fts_zip_struct {
+struct fts_zip_t {
ulint status; /*!< Status of (un)/zip operation */
ulint n_words; /*!< Number of words compressed */
@@ -128,7 +119,7 @@ struct fts_zip_struct {
};
/** Prepared statemets used during optimize */
-struct fts_optimize_graph_struct {
+struct fts_optimize_graph_t {
/*!< Delete a word from FTS INDEX */
que_t* delete_nodes_graph;
/*!< Insert a word into FTS INDEX */
@@ -140,7 +131,7 @@ struct fts_optimize_graph_struct {
};
/** Used by fts_optimize() to store state. */
-struct fts_optimize_struct {
+struct fts_optimize_t {
trx_t* trx; /*!< The transaction used for all SQL */
ib_alloc_t* self_heap; /*!< Heap to use for allocations */
@@ -183,14 +174,14 @@ struct fts_optimize_struct {
};
/** Used by the optimize, to keep state during compacting nodes. */
-struct fts_encode_struct {
+struct fts_encode_t {
doc_id_t src_last_doc_id;/*!< Last doc id read from src node */
byte* src_ilist_ptr; /*!< Current ptr within src ilist */
};
/** We use this information to determine when to start the optimize
cycle for a table. */
-struct fts_slot_struct {
+struct fts_slot_t {
dict_table_t* table; /*!< Table to optimize */
fts_state_t state; /*!< State of this slot */
@@ -210,7 +201,7 @@ struct fts_slot_struct {
};
/** A table remove message for the FTS optimize thread. */
-struct fts_msg_del_struct {
+struct fts_msg_del_t {
dict_table_t* table; /*!< The table to remove */
os_event_t event; /*!< Event to synchronize acknowledgement
@@ -219,12 +210,12 @@ struct fts_msg_del_struct {
};
/** Stop the optimize thread. */
-struct fts_msg_optimize_struct {
+struct fts_msg_optimize_t {
dict_table_t* table; /*!< Table to optimize */
};
/** The FTS optimize message work queue message type. */
-struct fts_msg_struct {
+struct fts_msg_t {
fts_msg_type_t type; /*!< Message type */
void* ptr; /*!< The message contents */
@@ -466,9 +457,9 @@ fts_optimize_index_fetch_node(
/**********************************************************************//**
Read the rows from the FTS inde.
-@return vector of rows fetched */
+@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_index_fetch_nodes(
/*==================*/
trx_t* trx, /*!< in: transaction */
@@ -479,7 +470,7 @@ fts_index_fetch_nodes(
fts_fetch_t* fetch) /*!< in: fetch callback.*/
{
pars_info_t* info;
- ulint error;
+ dberr_t error;
trx->op_info = "fetching FTS index nodes";
@@ -543,8 +534,9 @@ fts_index_fetch_nodes(
trx->error_state = DB_SUCCESS;
} else {
- fprintf(stderr, " InnoDB: Error: %lu "
- "while reading FTS index.\n", error);
+ fprintf(stderr, " InnoDB: Error: (%s) "
+ "while reading FTS index.\n",
+ ut_strerr(error));
break; /* Exit the loop. */
}
@@ -781,8 +773,8 @@ fts_zip_deflate_end(
Read the words from the FTS INDEX.
@return DB_SUCCESS if all OK, DB_TABLE_NOT_FOUND if no more indexes
to search else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_index_fetch_words(
/*==================*/
fts_optimize_t* optim, /*!< in: optimize scratch pad */
@@ -794,7 +786,7 @@ fts_index_fetch_words(
que_t* graph;
ulint selected;
fts_zip_t* zip = NULL;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
mem_heap_t* heap = static_cast<mem_heap_t*>(optim->self_heap->arg);
ibool inited = FALSE;
@@ -849,13 +841,14 @@ fts_index_fetch_words(
zip = optim->zip;
for(;;) {
+ int err;
- if (!inited && ((error = deflateInit(zip->zp, 9))
+ if (!inited && ((err = deflateInit(zip->zp, 9))
!= Z_OK)) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: ZLib deflateInit() "
- "failed: %lu\n", error);
+ "failed: %d\n", err);
error = DB_ERROR;
break;
@@ -885,9 +878,9 @@ fts_index_fetch_words(
optim->trx->error_state = DB_SUCCESS;
} else {
- fprintf(stderr, " InnoDB: Error: %lu "
+ fprintf(stderr, " InnoDB: Error: (%s) "
"while reading document.\n",
- error);
+ ut_strerr(error));
break; /* Exit the loop. */
}
@@ -962,14 +955,14 @@ fts_fetch_doc_ids(
Read the rows from a FTS common auxiliary table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_table_fetch_doc_ids(
/*====================*/
trx_t* trx, /*!< in: transaction */
fts_table_t* fts_table, /*!< in: table */
fts_doc_ids_t* doc_ids) /*!< in: For collecting doc ids */
{
- ulint error;
+ dberr_t error;
que_t* graph;
pars_info_t* info = pars_info_create();
ibool alloc_bk_trx = FALSE;
@@ -1114,8 +1107,8 @@ fts_optimize_lookup(
/**********************************************************************//**
Encode the word pos list into the node
@return DB_SUCCESS or error code*/
-static
-ulint
+static __attribute__((nonnull))
+dberr_t
fts_optimize_encode_node(
/*=====================*/
fts_node_t* node, /*!< in: node to fill*/
@@ -1126,7 +1119,7 @@ fts_optimize_encode_node(
ulint enc_len;
ulint pos_enc_len;
doc_id_t doc_id_delta;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
byte* src = enc->src_ilist_ptr;
if (node->first_doc_id == 0) {
@@ -1202,8 +1195,8 @@ fts_optimize_encode_node(
/**********************************************************************//**
Optimize the data contained in a node.
@return DB_SUCCESS or error code*/
-static
-ulint
+static __attribute__((nonnull))
+dberr_t
fts_optimize_node(
/*==============*/
ib_vector_t* del_vec, /*!< in: vector of doc ids to delete*/
@@ -1213,7 +1206,7 @@ fts_optimize_node(
fts_encode_t* enc) /*!< in: encoding state */
{
ulint copied;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
doc_id_t doc_id = enc->src_last_doc_id;
if (!enc->src_ilist_ptr) {
@@ -1299,8 +1292,8 @@ test_again:
/**********************************************************************//**
Determine the starting pos within the deleted doc id vector for a word.
-@return DB_SUCCESS or error code */
-static
+@return delete position */
+static __attribute__((nonnull, warn_unused_result))
int
fts_optimize_deleted_pos(
/*=====================*/
@@ -1428,8 +1421,8 @@ fts_optimize_word(
/**********************************************************************//**
Update the FTS index table. This is a delete followed by an insert.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_write_word(
/*====================*/
trx_t* trx, /*!< in: transaction */
@@ -1441,7 +1434,7 @@ fts_optimize_write_word(
pars_info_t* info;
que_t* graph;
ulint selected;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
char* table_name = fts_get_table_name(fts_table);
info = pars_info_create();
@@ -1470,8 +1463,9 @@ fts_optimize_write_word(
if (error != DB_SUCCESS) {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) during optimize, "
- "when deleting a word from the FTS index.\n", error);
+ fprintf(stderr, " InnoDB: Error: (%s) during optimize, "
+ "when deleting a word from the FTS index.\n",
+ ut_strerr(error));
}
fts_que_graph_free(graph);
@@ -1491,9 +1485,10 @@ fts_optimize_write_word(
if (error != DB_SUCCESS) {
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: (%lu) "
+ fprintf(stderr, " InnoDB: Error: (%s) "
"during optimize, while adding a "
- "word to the FTS index.\n", error);
+ "word to the FTS index.\n",
+ ut_strerr(error));
}
}
@@ -1529,8 +1524,8 @@ fts_word_free(
/**********************************************************************//**
Optimize the word ilist and rewrite data to the FTS index.
@return status one of RESTART, EXIT, ERROR */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_compact(
/*=================*/
fts_optimize_t* optim, /*!< in: optimize state data */
@@ -1538,7 +1533,7 @@ fts_optimize_compact(
ib_time_t start_time) /*!< in: optimize start time */
{
ulint i;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ulint size = ib_vector_size(optim->words);
for (i = 0; i < size && error == DB_SUCCESS && !optim->done; ++i) {
@@ -1622,77 +1617,63 @@ fts_optimize_create(
/**********************************************************************//**
Get optimize start time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_get_index_start_time(
/*==============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
ib_time_t* start_time) /*!< out: time in secs */
{
- ulint error;
-
- error = fts_config_get_index_ulint(
- trx, index, FTS_OPTIMIZE_START_TIME, (ulint*) start_time);
-
- return(error);
+ return(fts_config_get_index_ulint(
+ trx, index, FTS_OPTIMIZE_START_TIME,
+ (ulint*) start_time));
}
/**********************************************************************//**
Set the optimize start time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_set_index_start_time(
/*==============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
ib_time_t start_time) /*!< in: start time */
{
- ulint error;
-
- error = fts_config_set_index_ulint(
- trx, index, FTS_OPTIMIZE_START_TIME, (ulint) start_time);
-
- return(error);
+ return(fts_config_set_index_ulint(
+ trx, index, FTS_OPTIMIZE_START_TIME,
+ (ulint) start_time));
}
/**********************************************************************//**
Get optimize end time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_get_index_end_time(
/*============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
ib_time_t* end_time) /*!< out: time in secs */
{
- ulint error;
-
- error = fts_config_get_index_ulint(
- trx, index, FTS_OPTIMIZE_END_TIME, (ulint*) end_time);
-
- return(error);
+ return(fts_config_get_index_ulint(
+ trx, index, FTS_OPTIMIZE_END_TIME, (ulint*) end_time));
}
/**********************************************************************//**
Set the optimize end time of an FTS index.
@return DB_SUCCESS if all OK else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_set_index_end_time(
/*============================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
ib_time_t end_time) /*!< in: end time */
{
- ulint error;
-
- error = fts_config_set_index_ulint(
- trx, index, FTS_OPTIMIZE_END_TIME, (ulint) end_time);
-
- return(error);
+ return(fts_config_set_index_ulint(
+ trx, index, FTS_OPTIMIZE_END_TIME, (ulint) end_time));
}
#endif
@@ -1798,7 +1779,7 @@ fts_optimize_words(
fprintf(stderr, "%.*s\n", (int) word->f_len, word->f_str);
while(!optim->done) {
- ulint error;
+ dberr_t error;
trx_t* trx = optim->trx;
ulint selected;
@@ -1901,15 +1882,15 @@ fts_optimize_set_next_word(
Optimize is complete. Set the completion time, and reset the optimize
start string for this FTS index to "".
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_index_completed(
/*=========================*/
fts_optimize_t* optim, /*!< in: optimize instance */
dict_index_t* index) /*!< in: table with one FTS index */
{
fts_string_t word;
- ulint error;
+ dberr_t error;
byte buf[sizeof(ulint)];
#ifdef FTS_OPTIMIZE_DEBUG
ib_time_t end_time = ut_time();
@@ -1929,8 +1910,8 @@ fts_optimize_index_completed(
if (error != DB_SUCCESS) {
- fprintf(stderr, "InnoDB: Error: (%lu) while "
- "updating last optimized word!\n", error);
+ fprintf(stderr, "InnoDB: Error: (%s) while "
+ "updating last optimized word!\n", ut_strerr(error));
}
return(error);
@@ -1941,15 +1922,15 @@ fts_optimize_index_completed(
Read the list of words from the FTS auxiliary index that will be
optimized in this pass.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_index_read_words(
/*==========================*/
fts_optimize_t* optim, /*!< in: optimize instance */
dict_index_t* index, /*!< in: table with one FTS index */
fts_string_t* word) /*!< in: buffer to use */
{
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
if (optim->del_list_regenerated) {
word->f_len = 0;
@@ -1998,15 +1979,15 @@ fts_optimize_index_read_words(
Run OPTIMIZE on the given FTS index. Note: this can take a very long
time (hours).
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_index(
/*===============*/
fts_optimize_t* optim, /*!< in: optimize instance */
dict_index_t* index) /*!< in: table with one FTS index */
{
fts_string_t word;
- ulint error;
+ dberr_t error;
byte str[FTS_MAX_WORD_LEN + 1];
/* Set the current index that we have to optimize. */
@@ -2069,8 +2050,8 @@ fts_optimize_index(
/**********************************************************************//**
Delete the document ids in the delete, and delete cache tables.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_purge_deleted_doc_ids(
/*===============================*/
fts_optimize_t* optim) /*!< in: optimize instance */
@@ -2081,7 +2062,7 @@ fts_optimize_purge_deleted_doc_ids(
fts_update_t* update;
char* sql_str;
doc_id_t write_doc_id;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
info = pars_info_create();
@@ -2138,13 +2119,13 @@ fts_optimize_purge_deleted_doc_ids(
/**********************************************************************//**
Delete the document ids in the pending delete, and delete tables.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_purge_deleted_doc_id_snapshot(
/*=======================================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
- ulint error;
+ dberr_t error;
que_t* graph;
char* sql_str;
@@ -2188,13 +2169,13 @@ Copy the deleted doc ids that will be purged during this optimize run
to the being deleted FTS auxiliary tables. The transaction is committed
upon successfull copy and rolled back on DB_DUPLICATE_KEY error.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_create_deleted_doc_id_snapshot(
/*========================================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
- ulint error;
+ dberr_t error;
que_t* graph;
char* sql_str;
@@ -2226,13 +2207,13 @@ fts_optimize_create_deleted_doc_id_snapshot(
Read in the document ids that are to be purged during optimize. The
transaction is committed upon successfully read.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_read_deleted_doc_id_snapshot(
/*======================================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
- ulint error;
+ dberr_t error;
optim->fts_common_table.suffix = "BEING_DELETED";
@@ -2263,14 +2244,14 @@ Optimze all the FTS indexes, skipping those that have already been
optimized, since the FTS auxiliary indexes are not guaranteed to be
of the same cardinality.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_indexes(
/*=================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
ulint i;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
fts_t* fts = optim->table->fts;
/* Optimize the FTS indexes. */
@@ -2333,13 +2314,13 @@ fts_optimize_indexes(
/*********************************************************************//**
Cleanup the snapshot tables and the master deleted table.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_purge_snapshot(
/*========================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
- ulint error;
+ dberr_t error;
/* Delete the doc ids from the master deleted tables, that were
in the snapshot that was taken at the start of optimize. */
@@ -2362,13 +2343,13 @@ fts_optimize_purge_snapshot(
/*********************************************************************//**
Reset the start time to 0 so that a new optimize can be started.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_optimize_reset_start_time(
/*==========================*/
fts_optimize_t* optim) /*!< in: optimize instance */
{
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
#ifdef FTS_OPTIMIZE_DEBUG
fts_t* fts = optim->table->fts;
@@ -2401,13 +2382,13 @@ fts_optimize_reset_start_time(
/*********************************************************************//**
Run OPTIMIZE on the given table by a background thread.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull))
+dberr_t
fts_optimize_table_bk(
/*==================*/
fts_slot_t* slot) /*!< in: table to optimiza */
{
- ulint error;
+ dberr_t error;
dict_table_t* table = slot->table;
fts_t* fts = table->fts;
@@ -2440,12 +2421,12 @@ fts_optimize_table_bk(
Run OPTIMIZE on the given table.
@return DB_SUCCESS if all OK */
UNIV_INTERN
-ulint
+dberr_t
fts_optimize_table(
/*===============*/
dict_table_t* table) /*!< in: table to optimiza */
{
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
fts_optimize_t* optim = NULL;
fts_t* fts = table->fts;
@@ -2567,6 +2548,11 @@ fts_optimize_add_table(
return;
}
+ /* Make sure table with FTS index cannot be evicted */
+ if (table->can_be_evicted) {
+ dict_table_move_from_lru_to_non_lru(table);
+ }
+
msg = fts_optimize_create_msg(FTS_MSG_ADD_TABLE, table);
ib_wqueue_add(fts_optimize_wq, msg, msg->heap);
@@ -2602,18 +2588,26 @@ fts_optimize_remove_table(
dict_table_t* table) /*!< in: table to remove */
{
fts_msg_t* msg;
- os_event_t event;
- fts_msg_del_t* remove;
+ os_event_t event;
+ fts_msg_del_t* remove;
/* if the optimize system not yet initialized, return */
if (!fts_optimize_wq) {
return;
}
+ /* FTS optimizer thread is already exited */
+ if (fts_opt_start_shutdown) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Try to remove table %s after FTS optimize"
+ " thread exiting.", table->name);
+ return;
+ }
+
msg = fts_optimize_create_msg(FTS_MSG_DEL_TABLE, NULL);
/* We will wait on this event until signalled by the consumer. */
- event = os_event_create(table->name);
+ event = os_event_create();
remove = static_cast<fts_msg_del_t*>(
mem_heap_alloc(msg->heap, sizeof(*remove)));
@@ -2889,6 +2883,8 @@ fts_optimize_thread(
ulint n_optimize = 0;
ib_wqueue_t* wq = (ib_wqueue_t*) arg;
+ ut_ad(!srv_read_only_mode);
+
heap = mem_heap_create(sizeof(dict_table_t*) * 64);
heap_alloc = ib_heap_allocator_create(heap);
@@ -3010,10 +3006,10 @@ fts_optimize_thread(
ib_vector_get(tables, i));
if (slot->state != FTS_STATE_EMPTY) {
- dict_table_t* table;
+ dict_table_t* table = NULL;
- table = dict_table_open_on_name_no_stats(
- slot->table->name, FALSE,
+ table = dict_table_open_on_name(
+ slot->table->name, FALSE, FALSE,
DICT_ERR_IGNORE_INDEX_ROOT);
if (table) {
@@ -3022,8 +3018,11 @@ fts_optimize_thread(
fts_sync_table(table);
}
- fts_free(table);
- dict_table_close(table, FALSE);
+ if (table->fts) {
+ fts_free(table);
+ }
+
+ dict_table_close(table, FALSE, FALSE);
}
}
}
@@ -3031,10 +3030,7 @@ fts_optimize_thread(
ib_vector_free(tables);
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: FTS optimize thread exiting.\n");
-
- ib_wqueue_free(wq);
+ ib_logf(IB_LOG_LEVEL_INFO, "FTS optimize thread exiting.");
os_event_set(exit_event);
@@ -3052,6 +3048,8 @@ void
fts_optimize_init(void)
/*===================*/
{
+ ut_ad(!srv_read_only_mode);
+
/* For now we only support one optimize thread. */
ut_a(fts_optimize_wq == NULL);
@@ -3074,18 +3072,30 @@ fts_optimize_is_init(void)
/**********************************************************************//**
Signal the optimize thread to prepare for shutdown. */
-
+UNIV_INTERN
void
fts_optimize_start_shutdown(void)
/*=============================*/
{
+ ut_ad(!srv_read_only_mode);
+
fts_msg_t* msg;
os_event_t event;
+ /* If there is an ongoing activity on dictionary, such as
+ srv_master_evict_from_table_cache(), wait for it */
+ dict_mutex_enter_for_mysql();
+
+ /* Tells FTS optimizer system that we are exiting from
+ optimizer thread, message send their after will not be
+ processed */
+ fts_opt_start_shutdown = true;
+ dict_mutex_exit_for_mysql();
+
/* We tell the OPTIMIZE thread to switch to state done, we
can't delete the work queue here because the add thread needs
deregister the FTS tables. */
- event = os_event_create(NULL);
+ event = os_event_create();
msg = fts_optimize_create_msg(FTS_MSG_STOP, NULL);
msg->ptr = event;
@@ -3094,15 +3104,20 @@ fts_optimize_start_shutdown(void)
os_event_wait(event);
os_event_free(event);
+
+ ib_wqueue_free(fts_optimize_wq);
+
}
/**********************************************************************//**
Reset the work queue. */
-
+UNIV_INTERN
void
fts_optimize_end(void)
/*==================*/
{
+ ut_ad(!srv_read_only_mode);
+
// FIXME: Potential race condition here: We should wait for
// the optimize thread to confirm shutdown.
fts_optimize_wq = NULL;
diff --git a/storage/innobase/fts/fts0pars.cc b/storage/innobase/fts/fts0pars.cc
index 4fdfff5ca42..dd2984b1beb 100644
--- a/storage/innobase/fts/fts0pars.cc
+++ b/storage/innobase/fts/fts0pars.cc
@@ -105,7 +105,7 @@ extern int ftserror(const char* p);
typedef int (*fts_scanner_alt)(YYSTYPE* val, yyscan_t yyscanner);
typedef int (*fts_scanner)();
-struct fts_lexer_struct {
+struct fts_lexer_t {
fts_scanner scanner;
void* yyscanner;
};
diff --git a/storage/innobase/fts/fts0que.cc b/storage/innobase/fts/fts0que.cc
index 58b429a8406..5c757b4f176 100644
--- a/storage/innobase/fts/fts0que.cc
+++ b/storage/innobase/fts/fts0que.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,6 +24,7 @@ Created 2007/03/27 Sunny Bains
Completed 2011/7/10 Sunny and Jimmy Yang
*******************************************************/
+#include "dict0dict.h" /* dict_table_get_n_rows() */
#include "ut0rbt.h"
#include "row0sel.h"
#include "fts0fts.h"
@@ -57,15 +58,10 @@ static const double FTS_NORMALIZE_COEFF = 0.0115F;
/* For parsing the search phrase */
static const char* FTS_PHRASE_DELIMITER = "\t ";
-typedef struct fts_match_struct fts_match_t;
-typedef struct fts_query_struct fts_query_t;
-typedef struct fts_phrase_struct fts_phrase_t;
-typedef struct fts_select_struct fts_select_t;
-typedef struct fts_doc_freq_struct fts_doc_freq_t;
-typedef struct fts_word_freq_struct fts_word_freq_t;
+struct fts_word_freq_t;
/** State of an FTS query. */
-struct fts_query_struct {
+struct fts_query_t {
mem_heap_t* heap; /*!< Heap to use for allocations */
trx_t* trx; /*!< The query transaction */
@@ -126,11 +122,11 @@ struct fts_query_struct {
position info for each matched word
in the word list */
- ulint total_docs; /*!< The total number of documents */
+ ib_uint64_t total_docs; /*!< The total number of documents */
ulint total_words; /*!< The total number of words */
- ulint error; /*!< Error code if any, that is
+ dberr_t error; /*!< Error code if any, that is
encountered during query processing */
ib_rbt_t* word_freqs; /*!< RB tree of word frequencies per
@@ -144,7 +140,7 @@ struct fts_query_struct {
/** For phrase matching, first we collect the documents and the positions
then we match. */
-struct fts_match_struct {
+struct fts_match_t {
doc_id_t doc_id; /*!< Document id */
ulint start; /*!< Start the phrase match from
@@ -158,7 +154,7 @@ struct fts_match_struct {
/** For matching tokens in a phrase search. We use this data structure in
the callback that determines whether a document should be accepted or
rejected for a phrase search. */
-struct fts_select_struct {
+struct fts_select_t {
doc_id_t doc_id; /*!< The document id to match */
ulint min_pos; /*!< For found to be TRUE at least
@@ -173,8 +169,23 @@ struct fts_select_struct {
the FTS index */
};
+/** structure defines a set of ranges for original documents, each of which
+has a minimum position and maximum position. Text in such range should
+contain all words in the proximity search. We will need to count the
+words in such range to make sure it is less than the specified distance
+of the proximity search */
+struct fts_proximity_t {
+ ulint n_pos; /*!< number of position set, defines
+ a range (min to max) containing all
+ matching words */
+ ulint* min_pos; /*!< the minimum position (in bytes)
+ of the range */
+ ulint* max_pos; /*!< the maximum position (in bytes)
+ of the range */
+};
+
/** The match positions and tokesn to match */
-struct fts_phrase_struct {
+struct fts_phrase_t {
ibool found; /*!< Match result */
const fts_match_t*
@@ -188,23 +199,26 @@ struct fts_phrase_struct {
CHARSET_INFO* charset; /*!< Phrase match charset */
mem_heap_t* heap; /*!< Heap for word processing */
ulint zip_size; /*!< row zip size */
+ fts_proximity_t*proximity_pos; /*!< position info for proximity
+ search verification. Records the min
+ and max position of words matched */
};
/** For storing the frequncy of a word/term in a document */
-struct fts_doc_freq_struct {
+struct fts_doc_freq_t {
doc_id_t doc_id; /*!< Document id */
ulint freq; /*!< Frequency of a word in a document */
};
/** To determine the word frequency per document. */
-struct fts_word_freq_struct {
+struct fts_word_freq_t {
byte* word; /*!< Word for which we need the freq,
it's allocated on the query heap */
ib_rbt_t* doc_freqs; /*!< RB Tree for storing per document
word frequencies. The elements are
of type fts_doc_freq_t */
- ulint doc_count; /*!< Total number of documents that
+ ib_uint64_t doc_count; /*!< Total number of documents that
contain this word */
double idf; /*!< Inverse document frequency */
};
@@ -257,37 +271,46 @@ search arguments to search the document again, thus "expand"
the search result set.
@return DB_SUCCESS if success, otherwise the error code */
static
-ulint
+dberr_t
fts_expand_query(
/*=============*/
dict_index_t* index, /*!< in: FTS index to search */
- fts_query_t* query); /*!< in: query result, to be freed
+ fts_query_t* query) /*!< in: query result, to be freed
by the client */
+ __attribute__((nonnull, warn_unused_result));
/*************************************************************//**
This function finds documents that contain all words in a
phrase or proximity search. And if proximity search, verify
-the words are close to each other enough, as in specified distance.
+the words are close enough to each other, as in specified distance.
This function is called for phrase and proximity search.
@return TRUE if documents are found, FALSE if otherwise */
static
ibool
-fts_check_phrase_proximity(
-/*=======================*/
- fts_query_t* query, /*!< in: query instance */
+fts_phrase_or_proximity_search(
+/*===========================*/
+ fts_query_t* query, /*!< in/out: query instance
+ query->doc_ids might be instantiated
+ with qualified doc IDs */
ib_vector_t* tokens); /*!< in: Tokens contain words */
/*************************************************************//**
-This function check the words in result document are close to each
-other enough (within proximity rnage). This is used for proximity search.
-@return TRUE if words are close to each other, FALSE if otherwise */
+This function checks whether words in result documents are close to
+each other (within proximity range as specified by "distance").
+If "distance" is MAX_ULINT, then it will find all combinations of
+positions of matching words and store min and max positions
+in the "qualified_pos" for later verification.
+@return true if words are close to each other, false if otherwise */
static
-ulint
-fts_proximity_check_position(
-/*=========================*/
- fts_match_t** match, /*!< in: query instance */
- ulint num_match, /*!< in: number of matching
- items */
- ulint distance); /*!< in: distance value
- for proximity search */
+bool
+fts_proximity_get_positions(
+/*========================*/
+ fts_match_t** match, /*!< in: query instance */
+ ulint num_match, /*!< in: number of matching
+ items */
+ ulint distance, /*!< in: distance value
+ for proximity search */
+ fts_proximity_t* qualified_pos); /*!< out: the position info
+ records ranges containing
+ all matching words. */
#if 0
/********************************************************************
Get the total number of words in a documents. */
@@ -954,8 +977,8 @@ cont_search:
/*****************************************************************//**
Set difference.
@return DB_SUCCESS if all went well */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_difference(
/*=================*/
fts_query_t* query, /*!< in: query instance */
@@ -993,15 +1016,21 @@ fts_query_difference(
ut_a(index_cache != NULL);
/* Search the cache for a matching word first. */
- nodes = fts_cache_find_word(index_cache, token);
+ if (query->cur_node->term.wildcard
+ && query->flags != FTS_PROXIMITY
+ && query->flags != FTS_PHRASE) {
+ fts_cache_find_wildcard(query, index_cache, token);
+ } else {
+ nodes = fts_cache_find_word(index_cache, token);
- for (i = 0; nodes && i < ib_vector_size(nodes); ++i) {
- const fts_node_t* node;
+ for (i = 0; nodes && i < ib_vector_size(nodes); ++i) {
+ const fts_node_t* node;
- node = static_cast<const fts_node_t*>(
- ib_vector_get_const(nodes, i));
+ node = static_cast<const fts_node_t*>(
+ ib_vector_get_const(nodes, i));
- fts_query_check_node(query, token, node);
+ fts_query_check_node(query, token, node);
+ }
}
rw_lock_x_unlock(&cache->lock);
@@ -1026,8 +1055,8 @@ fts_query_difference(
/*****************************************************************//**
Intersect the token doc ids with the current set.
@return DB_SUCCESS if all went well */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_intersect(
/*================*/
fts_query_t* query, /*!< in: query instance */
@@ -1216,8 +1245,8 @@ fts_query_cache(
/*****************************************************************//**
Set union.
@return DB_SUCCESS if all went well */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_union(
/*============*/
fts_query_t* query, /*!< in: query instance */
@@ -1248,13 +1277,7 @@ fts_query_union(
/* Single '%' would confuse parser in pars_like_rebind(). In addition,
our wildcard search only supports prefix search */
- if (*token->f_str == '%') {
- if (token->f_len == 1) {
- return(query->error);
- }
- token->f_str++;
- token->f_len--;
- }
+ ut_ad(*token->f_str != '%');
fts_query_cache(query, token);
@@ -1485,6 +1508,67 @@ fts_query_match_phrase_terms(
}
/*****************************************************************//**
+Callback function to count the number of words in position ranges,
+and see whether the word count is in specified "phrase->distance"
+@return true if the number of characters is less than the "distance" */
+static
+bool
+fts_proximity_is_word_in_range(
+/*===========================*/
+ const fts_phrase_t*
+ phrase, /*!< in: phrase with the search info */
+ byte* start, /*!< in: text to search */
+ ulint total_len) /*!< in: length of text */
+{
+ fts_proximity_t* proximity_pos = phrase->proximity_pos;
+
+ /* Search each matched position pair (with min and max positions)
+ and count the number of words in the range */
+ for (ulint i = 0; i < proximity_pos->n_pos; i++) {
+ ulint cur_pos = proximity_pos->min_pos[i];
+ ulint n_word = 0;
+
+ ut_ad(proximity_pos->max_pos[i] <= total_len);
+
+ /* Walk through words in the range and count them */
+ while (cur_pos <= proximity_pos->max_pos[i]) {
+ ulint len;
+ fts_string_t str;
+ ulint offset = 0;
+
+ len = innobase_mysql_fts_get_token(
+ phrase->charset,
+ start + cur_pos,
+ start + total_len, &str, &offset);
+
+ if (len == 0) {
+ break;
+ }
+
+ /* Advances position with "len" bytes */
+ cur_pos += len;
+
+ /* Record the number of words */
+ if (str.f_n_char > 0) {
+ n_word++;
+ }
+
+ if (n_word > phrase->distance) {
+ break;
+ }
+ }
+
+ /* Check if the number of words is less than specified
+ "distance" */
+ if (n_word && n_word <= phrase->distance) {
+ return(true);
+ }
+ }
+
+ return(false);
+}
+
+/*****************************************************************//**
Callback function to fetch and search the document.
@return TRUE if matched else FALSE */
static
@@ -1594,31 +1678,77 @@ fts_query_fetch_document(
sel_node_t* node = static_cast<sel_node_t*>(row);
fts_phrase_t* phrase = static_cast<fts_phrase_t*>(user_arg);
ulint prev_len = 0;
+ ulint total_len = 0;
+ byte* document_text = NULL;
exp = node->select_list;
phrase->found = FALSE;
+ /* For proximity search, we will need to get the whole document
+ from all fields, so first count the total length of the document
+ from all the fields */
+ if (phrase->proximity_pos) {
+ while (exp) {
+ ulint field_len;
+ dfield_t* dfield = que_node_get_val(exp);
+ byte* data = static_cast<byte*>(
+ dfield_get_data(dfield));
+
+ if (dfield_is_ext(dfield)) {
+ ulint local_len = dfield_get_len(dfield);
+
+ local_len -= BTR_EXTERN_FIELD_REF_SIZE;
+
+ field_len = mach_read_from_4(
+ data + local_len + BTR_EXTERN_LEN + 4);
+ } else {
+ field_len = dfield_get_len(dfield);
+ }
+
+ if (field_len != UNIV_SQL_NULL) {
+ total_len += field_len + 1;
+ }
+
+ exp = que_node_get_next(exp);
+ }
+
+ document_text = static_cast<byte*>(mem_heap_zalloc(
+ phrase->heap, total_len));
+
+ if (!document_text) {
+ return(FALSE);
+ }
+ }
+
+ exp = node->select_list;
+
while (exp) {
dfield_t* dfield = que_node_get_val(exp);
- void* data = NULL;
+ byte* data = static_cast<byte*>(
+ dfield_get_data(dfield));
ulint cur_len;
if (dfield_is_ext(dfield)) {
data = btr_copy_externally_stored_field(
- &cur_len, static_cast<const byte*>(data),
- phrase->zip_size,
+ &cur_len, data, phrase->zip_size,
dfield_get_len(dfield), phrase->heap);
} else {
- data = dfield_get_data(dfield);
cur_len = dfield_get_len(dfield);
}
if (cur_len != UNIV_SQL_NULL && cur_len != 0) {
- phrase->found =
- fts_query_match_phrase(
- phrase, static_cast<byte*>(data),
- cur_len, prev_len, phrase->heap);
+ if (phrase->proximity_pos) {
+ memcpy(document_text + prev_len, data, cur_len);
+ } else {
+ /* For phrase search */
+ phrase->found =
+ fts_query_match_phrase(
+ phrase,
+ static_cast<byte*>(data),
+ cur_len, prev_len,
+ phrase->heap);
+ }
}
if (phrase->found) {
@@ -1633,6 +1763,13 @@ fts_query_fetch_document(
exp = que_node_get_next(exp);
}
+ if (phrase->proximity_pos) {
+ ut_ad(prev_len <= total_len);
+
+ phrase->found = fts_proximity_is_word_in_range(
+ phrase, document_text, total_len);
+ }
+
return(phrase->found);
}
@@ -1689,13 +1826,12 @@ fts_query_select(
/********************************************************************
Read the rows from the FTS index, that match word and where the
-doc id is between first and last doc id. */
-static
-ulint
+doc id is between first and last doc id.
+@return DB_SUCCESS if all went well else error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_find_term(
/*================*/
- /*!< out: DB_SUCCESS if all went well
- else error code */
fts_query_t* query, /*!< in: FTS query state */
que_t** graph, /*!< in: prepared statement */
const fts_string_t* word, /*!< in: the word to fetch */
@@ -1705,7 +1841,7 @@ fts_query_find_term(
ibool* found) /*!< out: TRUE if found else FALSE */
{
pars_info_t* info;
- ulint error;
+ dberr_t error;
fts_select_t select;
doc_id_t match_doc_id;
trx_t* trx = query->trx;
@@ -1830,19 +1966,18 @@ fts_query_sum(
}
/********************************************************************
-Calculate the total documents that contain a particular word (term). */
-static
-ulint
+Calculate the total documents that contain a particular word (term).
+@return DB_SUCCESS if all went well else error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_total_docs_containing_term(
/*=================================*/
- /*!< out: DB_SUCCESS if all went well
- else error code */
fts_query_t* query, /*!< in: FTS query state */
const fts_string_t* word, /*!< in: the word to check */
ulint* total) /*!< out: documents containing word */
{
pars_info_t* info;
- ulint error;
+ dberr_t error;
que_t* graph;
ulint selected;
trx_t* trx = query->trx;
@@ -1910,19 +2045,18 @@ fts_query_total_docs_containing_term(
}
/********************************************************************
-Get the total number of words in a documents. */
-static
-ulint
+Get the total number of words in a documents.
+@return DB_SUCCESS if all went well else error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_terms_in_document(
/*========================*/
- /*!< out: DB_SUCCESS if all went well
- else error code */
fts_query_t* query, /*!< in: FTS query state */
doc_id_t doc_id, /*!< in: the word to check */
ulint* total) /*!< out: total words in document */
{
pars_info_t* info;
- ulint error;
+ dberr_t error;
que_t* graph;
doc_id_t read_doc_id;
trx_t* trx = query->trx;
@@ -1993,9 +2127,9 @@ fts_query_terms_in_document(
/*****************************************************************//**
Retrieve the document and match the phrase tokens.
-@return TRUE if matches else FALSE */
-static
-ulint
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_match_document(
/*=====================*/
ib_vector_t* tokens, /*!< in: phrase tokens */
@@ -2004,7 +2138,7 @@ fts_query_match_document(
ulint distance, /*!< in: proximity distance */
ibool* found) /*!< out: TRUE if phrase found */
{
- ulint error;
+ dberr_t error;
fts_phrase_t phrase;
memset(&phrase, 0x0, sizeof(phrase));
@@ -2025,8 +2159,8 @@ fts_query_match_document(
if (error != DB_SUCCESS) {
ut_print_timestamp(stderr);
- fprintf(stderr, "InnoDB: Error: (%lu) matching document.\n",
- error);
+ fprintf(stderr, "InnoDB: Error: (%s) matching document.\n",
+ ut_strerr(error));
} else {
*found = phrase.found;
}
@@ -2037,11 +2171,66 @@ fts_query_match_document(
}
/*****************************************************************//**
+This function fetches the original documents and count the
+words in between matching words to see that is in specified distance
+@return DB_SUCCESS if all OK */
+static __attribute__((nonnull, warn_unused_result))
+bool
+fts_query_is_in_proximity_range(
+/*============================*/
+ const fts_query_t* query, /*!< in: query instance */
+ fts_match_t** match, /*!< in: query instance */
+ fts_proximity_t* qualified_pos) /*!< in: position info for
+ qualified ranges */
+{
+ fts_get_doc_t get_doc;
+ fts_cache_t* cache = query->index->table->fts->cache;
+ dberr_t err;
+ fts_phrase_t phrase;
+
+ memset(&get_doc, 0x0, sizeof(get_doc));
+ memset(&phrase, 0x0, sizeof(phrase));
+
+ rw_lock_x_lock(&cache->lock);
+ get_doc.index_cache = fts_find_index_cache(cache, query->index);
+ rw_lock_x_unlock(&cache->lock);
+ ut_a(get_doc.index_cache != NULL);
+
+ phrase.distance = query->distance;
+ phrase.charset = get_doc.index_cache->charset;
+ phrase.zip_size = dict_table_zip_size(
+ get_doc.index_cache->index->table);
+ phrase.heap = mem_heap_create(512);
+ phrase.proximity_pos = qualified_pos;
+ phrase.found = FALSE;
+
+ err = fts_doc_fetch_by_doc_id(
+ &get_doc, match[0]->doc_id, NULL, FTS_FETCH_DOC_BY_ID_EQUAL,
+ fts_query_fetch_document, &phrase);
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Error: (%s) in verification phase of proximity "
+ "search", ut_strerr(err));
+ }
+
+ /* Free the prepared statement. */
+ if (get_doc.get_document_graph) {
+ fts_que_graph_free(get_doc.get_document_graph);
+ get_doc.get_document_graph = NULL;
+ }
+
+ mem_heap_free(phrase.heap);
+
+ return(err == DB_SUCCESS && phrase.found);
+}
+
+/*****************************************************************//**
Iterate over the matched document ids and search the for the
actual phrase in the text.
@return DB_SUCCESS if all OK */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_search_phrase(
/*====================*/
fts_query_t* query, /*!< in: query instance */
@@ -2050,8 +2239,6 @@ fts_query_search_phrase(
ulint i;
fts_get_doc_t get_doc;
ulint n_matched;
- // FIXME: Debug code
- ulint searched = 0;
fts_cache_t* cache = query->index->table->fts->cache;
n_matched = ib_vector_size(query->matched);
@@ -2061,9 +2248,7 @@ fts_query_search_phrase(
rw_lock_x_lock(&cache->lock);
- // FIXME: We shouldn't have to cast here.
- get_doc.index_cache = (fts_index_cache_t*)
- fts_find_index_cache(cache, query->index);
+ get_doc.index_cache = fts_find_index_cache(cache, query->index);
/* Must find the index cache */
ut_a(get_doc.index_cache != NULL);
@@ -2089,9 +2274,6 @@ fts_query_search_phrase(
an earlier pass. */
if (match->doc_id != 0) {
- // FIXME: Debug code
- ++searched;
-
query->error = fts_query_match_document(
tokens, &get_doc,
match, query->distance, &found);
@@ -2119,18 +2301,14 @@ fts_query_search_phrase(
get_doc.get_document_graph = NULL;
}
- // FIXME: Debug code
- ut_print_timestamp(stderr);
- printf(" End: %lu, %lu\n", searched, ib_vector_size(query->matched));
-
return(query->error);
}
/*****************************************************************//**
Text/Phrase search.
-@return count of doc ids added */
-static
-ulint
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_phrase_search(
/*====================*/
fts_query_t* query, /*!< in: query instance */
@@ -2290,7 +2468,7 @@ fts_query_phrase_search(
/* If we are doing proximity search, verify the distance
between all words, and check they are in specified distance. */
if (query->flags & FTS_PROXIMITY) {
- fts_check_phrase_proximity(query, tokens);
+ fts_phrase_or_proximity_search(query, tokens);
} else {
ibool matched;
@@ -2301,7 +2479,7 @@ fts_query_phrase_search(
and then doing a search through the text. Isolated
testing shows this also helps in mitigating disruption
of the buffer cache. */
- matched = fts_check_phrase_proximity(query, tokens);
+ matched = fts_phrase_or_proximity_search(query, tokens);
query->matched = query->match_array[0];
/* Read the actual text in and search for the phrase. */
@@ -2329,8 +2507,8 @@ func_exit:
/*****************************************************************//**
Find the word and evaluate.
@return DB_SUCCESS if all went well */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_query_execute(
/*==============*/
fts_query_t* query, /*!< in: query instance */
@@ -2477,13 +2655,12 @@ fts_query_visitor(
/*****************************************************************//**
Process (nested) sub-expression, create a new result set to store the
sub-expression result by processing nodes under current sub-expression
-list. Merge the sub-expression result with that of parent expression list. */
-
-ulint
+list. Merge the sub-expression result with that of parent expression list.
+@return DB_SUCCESS if all went well */
+UNIV_INTERN
+dberr_t
fts_ast_visit_sub_exp(
/*==================*/
- /*!< out: DB_SUCCESS if all
- went well */
fts_ast_node_t* node, /*!< in,out: current root node */
fts_ast_callback visitor, /*!< in: callback function */
void* arg) /*!< in,out: arg for callback */
@@ -2492,8 +2669,9 @@ fts_ast_visit_sub_exp(
fts_query_t* query = static_cast<fts_query_t*>(arg);
ib_rbt_t* parent_doc_ids;
ib_rbt_t* subexpr_doc_ids;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ibool inited = query->inited;
+ bool will_be_ignored = false;
ut_a(node->type == FTS_AST_SUBEXP_LIST);
@@ -2521,7 +2699,8 @@ fts_ast_visit_sub_exp(
/* Process nodes in current sub-expression and store its
result set in query->doc_ids we created above. */
- error = fts_ast_visit(FTS_NONE, node->next, visitor, arg);
+ error = fts_ast_visit(FTS_NONE, node->next, visitor,
+ arg, &will_be_ignored);
/* Reinstate parent node state and prepare for merge. */
query->inited = inited;
@@ -2757,6 +2936,8 @@ fts_query_read_node(
ut_a(query->cur_node->type == FTS_AST_TERM ||
query->cur_node->type == FTS_AST_TEXT);
+ memset(&node, 0, sizeof(node));
+
/* Need to consider the wildcard search case, the word frequency
is created on the search string not the actual word. So we need
to assign the frequency on search string behalf. */
@@ -2879,8 +3060,8 @@ fts_query_calculate_idf(
/*====================*/
fts_query_t* query) /*!< in: Query state */
{
- const ib_rbt_node_t* node;
- double total_docs = query->total_docs;
+ const ib_rbt_node_t* node;
+ ib_uint64_t total_docs = query->total_docs;
/* We need to free any instances of fts_doc_freq_t that we
may have allocated. */
@@ -2893,7 +3074,7 @@ fts_query_calculate_idf(
word_freq = rbt_value(fts_word_freq_t, node);
if (word_freq->doc_count > 0) {
- if (total_docs == (double) word_freq->doc_count) {
+ if (total_docs == word_freq->doc_count) {
/* QP assume ranking > 0 if we find
a match. Since Log10(1) = 0, we cannot
make IDF a zero value if do find a
@@ -2907,10 +3088,13 @@ fts_query_calculate_idf(
}
}
- fprintf(stderr,"'%s' -> %lu/%lu %6.5lf\n",
- word_freq->word,
- query->total_docs, word_freq->doc_count,
- word_freq->idf);
+ if (fts_enable_diag_print) {
+ fprintf(stderr,"'%s' -> " UINT64PF "/" UINT64PF
+ " %6.5lf\n",
+ word_freq->word,
+ query->total_docs, word_freq->doc_count,
+ word_freq->idf);
+ }
}
}
@@ -3017,7 +3201,7 @@ fts_retrieve_ranking(
ranking = rbt_value(fts_ranking_t, parent.last);
- return (ranking->rank);
+ return(ranking->rank);
}
return(0);
@@ -3184,7 +3368,7 @@ fts_query_parse(
FTS Query entry point.
@return DB_SUCCESS if successful otherwise error code */
UNIV_INTERN
-ulint
+dberr_t
fts_query(
/*======*/
trx_t* trx, /*!< in: transaction */
@@ -3196,7 +3380,7 @@ fts_query(
fts_result_t** result) /*!< in/out: result doc ids */
{
fts_query_t query;
- ulint error;
+ dberr_t error = DB_SUCCESS;
byte* lc_query_str;
ulint lc_query_str_len;
ulint result_len;
@@ -3204,6 +3388,7 @@ fts_query(
trx_t* query_trx;
CHARSET_INFO* charset;
ulint start_time_ms;
+ bool will_be_ignored = false;
boolean_mode = flags & FTS_BOOL;
@@ -3237,20 +3422,24 @@ fts_query(
/* Setup the RB tree that will be used to collect per term
statistics. */
query.word_freqs = rbt_create_arg_cmp(
- sizeof(fts_word_freq_t), innobase_fts_string_cmp, charset);
+ sizeof(fts_word_freq_t), innobase_fts_string_cmp,
+ (void*) charset);
- query.total_docs = fts_get_total_document_count(index->table);
+ query.total_docs = dict_table_get_n_rows(index->table);
- error = fts_get_total_word_count(trx, query.index, &query.total_words);
+#ifdef FTS_DOC_STATS_DEBUG
+ if (ft_enable_diag_print) {
+ error = fts_get_total_word_count(
+ trx, query.index, &query.total_words);
- if (error != DB_SUCCESS) {
- goto func_exit;
- }
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ }
-#ifdef FTS_INTERNAL_DIAG_PRINT
- fprintf(stderr, "Total docs: %lu Total words: %lu\n",
- query.total_docs, query.total_words);
-#endif
+ fprintf(stderr, "Total docs: " UINT64PF " Total words: %lu\n",
+ query.total_docs, query.total_words);
+ }
+#endif /* FTS_DOC_STATS_DEBUG */
query.fts_common_table.suffix = "DELETED";
@@ -3299,13 +3488,14 @@ fts_query(
sizeof(fts_ranking_t), fts_ranking_doc_id_cmp);
/* Parse the input query string. */
- if (fts_query_parse(&query, lc_query_str, query_len)) {
+ if (fts_query_parse(&query, lc_query_str, result_len)) {
fts_ast_node_t* ast = query.root;
/* Traverse the Abstract Syntax Tree (AST) and execute
the query. */
query.error = fts_ast_visit(
- FTS_NONE, ast, fts_query_visitor, &query);
+ FTS_NONE, ast, fts_query_visitor,
+ &query, &will_be_ignored);
/* If query expansion is requested, extend the search
with first search pass result */
@@ -3453,8 +3643,8 @@ words in documents found in the first search pass will be used as
search arguments to search the document again, thus "expand"
the search result set.
@return DB_SUCCESS if success, otherwise the error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
fts_expand_query(
/*=============*/
dict_index_t* index, /*!< in: FTS index to search */
@@ -3463,7 +3653,7 @@ fts_expand_query(
const ib_rbt_node_t* node;
const ib_rbt_node_t* token_node;
fts_doc_t result_doc;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
const fts_index_cache_t*index_cache;
/* If no doc is found in first search pass, return */
@@ -3482,7 +3672,7 @@ fts_expand_query(
result_doc.tokens = rbt_create_arg_cmp(
sizeof(fts_token_t), innobase_fts_text_cmp,
- index_cache->charset);
+ (void*) index_cache->charset);
result_doc.charset = index_cache->charset;
@@ -3557,14 +3747,16 @@ fts_expand_query(
/*************************************************************//**
This function finds documents that contain all words in a
phrase or proximity search. And if proximity search, verify
-the words are close to each other enough, as in specified distance.
+the words are close enough to each other, as in specified distance.
This function is called for phrase and proximity search.
@return TRUE if documents are found, FALSE if otherwise */
static
ibool
-fts_check_phrase_proximity(
-/*=======================*/
- fts_query_t* query, /*!< in: query instance */
+fts_phrase_or_proximity_search(
+/*===========================*/
+ fts_query_t* query, /*!< in/out: query instance.
+ query->doc_ids might be instantiated
+ with qualified doc IDs */
ib_vector_t* tokens) /*!< in: Tokens contain words */
{
ulint n_matched;
@@ -3581,8 +3773,13 @@ fts_check_phrase_proximity(
walk through the list and find common documents that
contain all the matching words. */
for (i = 0; i < n_matched; i++) {
- ulint j;
- ulint k = 0;
+ ulint j;
+ ulint k = 0;
+ fts_proximity_t qualified_pos;
+ ulint qualified_pos_buf[MAX_PROXIMITY_ITEM * 2];
+
+ qualified_pos.min_pos = &qualified_pos_buf[0];
+ qualified_pos.max_pos = &qualified_pos_buf[MAX_PROXIMITY_ITEM];
match[0] = static_cast<fts_match_t*>(
ib_vector_get(query->match_array[0], i));
@@ -3647,24 +3844,31 @@ fts_check_phrase_proximity(
/* For this matching doc, we need to further
verify whether the words in the doc are close
- to each other, and with in distance specified
+ to each other, and within the distance specified
in the proximity search */
if (query->flags & FTS_PHRASE) {
matched = TRUE;
- } else if (fts_proximity_check_position(
- match, num_token, query->distance)) {
- ulint z;
- /* If so, mark we find a matching doc */
- fts_query_process_doc_id(query, match[0]->doc_id, 0);
+ } else if (fts_proximity_get_positions(
+ match, num_token, ULINT_MAX, &qualified_pos)) {
+
+ /* Fetch the original documents and count the
+ words in between matching words to see that is in
+ specified distance */
+ if (fts_query_is_in_proximity_range(
+ query, match, &qualified_pos)) {
+ /* If so, mark we find a matching doc */
+ fts_query_process_doc_id(
+ query, match[0]->doc_id, 0);
- matched = TRUE;
- for (z = 0; z < num_token; z++) {
- fts_string_t* token;
- token = static_cast<fts_string_t*>(
- ib_vector_get(tokens, z));
- fts_query_add_word_to_document(
- query, match[0]->doc_id,
- token->f_str);
+ matched = TRUE;
+ for (ulint z = 0; z < num_token; z++) {
+ fts_string_t* token;
+ token = static_cast<fts_string_t*>(
+ ib_vector_get(tokens, z));
+ fts_query_add_word_to_document(
+ query, match[0]->doc_id,
+ token->f_str);
+ }
}
}
@@ -3678,24 +3882,32 @@ func_exit:
}
/*************************************************************//**
-This function check the words in result document are close to each
-other (within proximity range). This is used for proximity search.
-@return TRUE if words are close to each other, FALSE if otherwise */
+This function checks whether words in result documents are close to
+each other (within proximity range as specified by "distance").
+If "distance" is MAX_ULINT, then it will find all combinations of
+positions of matching words and store min and max positions
+in the "qualified_pos" for later verification.
+@return true if words are close to each other, false if otherwise */
static
-ulint
-fts_proximity_check_position(
-/*=========================*/
- fts_match_t** match, /*!< in: query instance */
- ulint num_match, /*!< in: number of matching
- items */
- ulint distance) /*!< in: distance value
- for proximity search */
+bool
+fts_proximity_get_positions(
+/*========================*/
+ fts_match_t** match, /*!< in: query instance */
+ ulint num_match, /*!< in: number of matching
+ items */
+ ulint distance, /*!< in: distance value
+ for proximity search */
+ fts_proximity_t* qualified_pos) /*!< out: the position info
+ records ranges containing
+ all matching words. */
{
ulint i;
ulint idx[MAX_PROXIMITY_ITEM];
ulint num_pos[MAX_PROXIMITY_ITEM];
ulint min_idx;
+ qualified_pos->n_pos = 0;
+
ut_a(num_match < MAX_PROXIMITY_ITEM);
/* Each word could appear multiple times in a doc. So
@@ -3747,14 +3959,21 @@ fts_proximity_check_position(
find a good match */
if (max_pos - min_pos <= distance
&& (i >= num_match || position[i] != ULINT_UNDEFINED)) {
- return(TRUE);
- } else {
- /* Otherwise, move to the next position is the
- list for the word with the smallest position */
- idx[min_idx]++;
+ /* The charset has variable character
+ length encoding, record the min_pos and
+ max_pos, we will need to verify the actual
+ number of characters */
+ qualified_pos->min_pos[qualified_pos->n_pos] = min_pos;
+ qualified_pos->max_pos[qualified_pos->n_pos] = max_pos;
+ qualified_pos->n_pos++;
}
+
+ /* Otherwise, move to the next position is the
+ list for the word with the smallest position */
+ idx[min_idx]++;
}
- /* Failed to find all words within the range for the doc */
- return(FALSE);
+ ut_ad(qualified_pos->n_pos <= MAX_PROXIMITY_ITEM);
+
+ return(qualified_pos->n_pos != 0);
}
diff --git a/storage/innobase/fts/fts0sql.cc b/storage/innobase/fts/fts0sql.cc
index 8e60a5f1132..03c19d93af6 100644
--- a/storage/innobase/fts/fts0sql.cc
+++ b/storage/innobase/fts/fts0sql.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -262,7 +262,7 @@ fts_parse_sql_no_dict_lock(
Evaluate an SQL query graph.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_eval_sql(
/*=========*/
trx_t* trx, /*!< in: transaction */
@@ -327,16 +327,16 @@ fts_get_select_columns_str(
Commit a transaction.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_sql_commit(
/*===========*/
trx_t* trx) /*!< in: transaction */
{
- ulint error;
+ dberr_t error;
error = trx_commit_for_mysql(trx);
- /* Commit above returns 0 on success, it should always succeed */
+ /* Commit should always succeed */
ut_a(error == DB_SUCCESS);
return(DB_SUCCESS);
@@ -346,7 +346,7 @@ fts_sql_commit(
Rollback a transaction.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_sql_rollback(
/*=============*/
trx_t* trx) /*!< in: transaction */
diff --git a/storage/innobase/fts/fts0tlex.cc b/storage/innobase/fts/fts0tlex.cc
index 69b859716d5..44434c4ea25 100644
--- a/storage/innobase/fts/fts0tlex.cc
+++ b/storage/innobase/fts/fts0tlex.cc
@@ -35,7 +35,7 @@
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
- * if you want the limit (max/min) macros for int types.
+ * if you want the limit (max/min) macros for int types.
*/
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS 1
@@ -247,7 +247,7 @@ struct yy_buffer_state
int yy_bs_lineno; /**< The line count. */
int yy_bs_column; /**< The column count. */
-
+
/* Whether to try to fill the input buffer when we reach the
* end of it.
*/
@@ -368,10 +368,10 @@ struct yy_trans_info
flex_int32_t yy_verify;
flex_int32_t yy_nxt;
};
-static yyconst flex_int16_t yy_accept[16] =
+static yyconst flex_int16_t yy_accept[17] =
{ 0,
- 4, 4, 7, 4, 1, 5, 1, 6, 2, 4,
- 1, 1, 0, 3, 0
+ 4, 4, 7, 4, 1, 5, 1, 6, 6, 2,
+ 4, 1, 1, 0, 3, 0
} ;
static yyconst flex_int32_t yy_ec[256] =
@@ -379,8 +379,8 @@ static yyconst flex_int32_t yy_ec[256] =
1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 4, 1, 5, 1, 1, 1, 1, 1, 1,
- 1, 6, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 4, 1, 5, 1, 1, 6, 1, 1, 1,
+ 1, 7, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -406,35 +406,35 @@ static yyconst flex_int32_t yy_ec[256] =
1, 1, 1, 1, 1
} ;
-static yyconst flex_int32_t yy_meta[7] =
+static yyconst flex_int32_t yy_meta[8] =
{ 0,
- 1, 2, 3, 4, 5, 1
+ 1, 2, 3, 4, 5, 5, 1
} ;
-static yyconst flex_int16_t yy_base[19] =
+static yyconst flex_int16_t yy_base[20] =
{ 0,
- 0, 0, 17, 0, 5, 20, 0, 8, 0, 0,
- 0, 0, 3, 20, 20, 9, 10, 14
+ 0, 0, 18, 0, 6, 21, 0, 9, 21, 0,
+ 0, 0, 0, 4, 21, 21, 10, 11, 15
} ;
-static yyconst flex_int16_t yy_def[19] =
+static yyconst flex_int16_t yy_def[20] =
{ 0,
- 15, 1, 15, 16, 16, 15, 17, 18, 16, 16,
- 5, 17, 18, 15, 0, 15, 15, 15
+ 16, 1, 16, 17, 17, 16, 18, 19, 16, 17,
+ 17, 5, 18, 19, 16, 0, 16, 16, 16
} ;
-static yyconst flex_int16_t yy_nxt[27] =
+static yyconst flex_int16_t yy_nxt[29] =
{ 0,
- 4, 5, 6, 7, 8, 9, 11, 14, 12, 10,
- 10, 12, 14, 12, 13, 13, 15, 13, 13, 3,
- 15, 15, 15, 15, 15, 15
+ 4, 5, 6, 7, 8, 9, 10, 12, 15, 13,
+ 11, 11, 13, 15, 13, 14, 14, 16, 14, 14,
+ 3, 16, 16, 16, 16, 16, 16, 16
} ;
-static yyconst flex_int16_t yy_chk[27] =
+static yyconst flex_int16_t yy_chk[29] =
{ 0,
- 1, 1, 1, 1, 1, 1, 5, 13, 5, 16,
- 16, 17, 8, 17, 18, 18, 3, 18, 18, 15,
- 15, 15, 15, 15, 15, 15
+ 1, 1, 1, 1, 1, 1, 1, 5, 14, 5,
+ 17, 17, 18, 8, 18, 19, 19, 3, 19, 19,
+ 16, 16, 16, 16, 16, 16, 16, 16
} ;
/* The intent behind this definition is that it'll catch
@@ -699,7 +699,7 @@ YY_DECL
register yy_state_type yy_current_state;
register char *yy_cp, *yy_bp;
register int yy_act;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
#line 44 "fts0tlex.l"
@@ -757,13 +757,13 @@ yy_match:
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 16 )
+ if ( yy_current_state >= 17 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
++yy_cp;
}
- while ( yy_current_state != 15 );
+ while ( yy_current_state != 16 );
yy_cp = yyg->yy_last_accepting_cpos;
yy_current_state = yyg->yy_last_accepting_state;
@@ -969,7 +969,7 @@ case YY_STATE_EOF(INITIAL):
*/
static int yy_get_next_buffer (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
register char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
register char *source = yyg->yytext_ptr;
register int number_to_move, i;
@@ -1035,9 +1035,9 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
else
b->yy_buf_size *= 2;
- b->yy_ch_buf = (char*)
+ b->yy_ch_buf = (char *)
/* Include room in for 2 EOB chars. */
- fts0trealloc((void*) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner );
+ fts0trealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 ,yyscanner );
}
else
/* Can't grow it, we don't own it. */
@@ -1086,7 +1086,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
if ((yy_size_t) (yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
/* Extend the array by 50%, plus the number we really need. */
yy_size_t new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1);
- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char*) fts0trealloc((void*) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner );
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) fts0trealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size ,yyscanner );
if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
}
@@ -1106,7 +1106,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
{
register yy_state_type yy_current_state;
register char *yy_cp;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yy_current_state = yyg->yy_start;
@@ -1121,7 +1121,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 16 )
+ if ( yy_current_state >= 17 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
@@ -1138,7 +1138,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner)
{
register int yy_is_jam;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner; /* This var may be unused depending upon options. */
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */
register char *yy_cp = yyg->yy_c_buf_p;
register YY_CHAR yy_c = 1;
@@ -1150,11 +1150,11 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 16 )
+ if ( yy_current_state >= 17 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
- yy_is_jam = (yy_current_state == 15);
+ yy_is_jam = (yy_current_state == 16);
return yy_is_jam ? 0 : yy_current_state;
}
@@ -1168,7 +1168,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
{
int c;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
*yyg->yy_c_buf_p = yyg->yy_hold_char;
@@ -1226,7 +1226,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
}
}
- c = *(unsigned char*) yyg->yy_c_buf_p; /* cast for 8-bit char's */
+ c = *(unsigned char *) yyg->yy_c_buf_p; /* cast for 8-bit char's */
*yyg->yy_c_buf_p = '\0'; /* preserve yytext */
yyg->yy_hold_char = *++yyg->yy_c_buf_p;
@@ -1241,7 +1241,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
*/
void fts0trestart (FILE * input_file , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if ( ! YY_CURRENT_BUFFER ){
fts0tensure_buffer_stack (yyscanner);
@@ -1259,7 +1259,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
*/
void fts0t_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* TODO. We should be able to replace this entire function body
* with
@@ -1291,7 +1291,7 @@ static int yy_get_next_buffer (yyscan_t yyscanner)
static void fts0t_load_buffer_state (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
@@ -1317,7 +1317,7 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner)
/* yy_ch_buf has to be 2 characters longer than the size given because
* we need to put in 2 end-of-buffer characters.
*/
- b->yy_ch_buf = (char*) fts0talloc(b->yy_buf_size + 2 ,yyscanner );
+ b->yy_ch_buf = (char *) fts0talloc(b->yy_buf_size + 2 ,yyscanner );
if ( ! b->yy_ch_buf )
YY_FATAL_ERROR( "out of dynamic memory in fts0t_create_buffer()" );
@@ -1334,7 +1334,7 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner)
*/
void fts0t_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if ( ! b )
return;
@@ -1343,9 +1343,9 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner)
YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
if ( b->yy_is_our_buffer )
- fts0tfree((void*) b->yy_ch_buf ,yyscanner );
+ fts0tfree((void *) b->yy_ch_buf ,yyscanner );
- fts0tfree((void*) b ,yyscanner );
+ fts0tfree((void *) b ,yyscanner );
}
/* Initializes or reinitializes a buffer.
@@ -1356,7 +1356,7 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner)
{
int oerrno = errno;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
fts0t_flush_buffer(b ,yyscanner);
@@ -1383,7 +1383,7 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner)
*/
void fts0t_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if ( ! b )
return;
@@ -1413,7 +1413,7 @@ static void fts0t_load_buffer_state (yyscan_t yyscanner)
*/
void fts0tpush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (new_buffer == NULL)
return;
@@ -1444,7 +1444,7 @@ void fts0tpush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
*/
void fts0tpop_buffer_state (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (!YY_CURRENT_BUFFER)
return;
@@ -1465,7 +1465,7 @@ void fts0tpop_buffer_state (yyscan_t yyscanner)
static void fts0tensure_buffer_stack (yyscan_t yyscanner)
{
int num_to_alloc;
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (!yyg->yy_buffer_stack) {
@@ -1474,7 +1474,7 @@ static void fts0tensure_buffer_stack (yyscan_t yyscanner)
* immediate realloc on the next call.
*/
num_to_alloc = 1;
- yyg->yy_buffer_stack = (struct yy_buffer_state**) fts0talloc
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)fts0talloc
(num_to_alloc * sizeof(struct yy_buffer_state*)
, yyscanner);
if ( ! yyg->yy_buffer_stack )
@@ -1493,7 +1493,7 @@ static void fts0tensure_buffer_stack (yyscan_t yyscanner)
int grow_size = 8 /* arbitrary grow size */;
num_to_alloc = yyg->yy_buffer_stack_max + grow_size;
- yyg->yy_buffer_stack = (struct yy_buffer_state**) fts0trealloc
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)fts0trealloc
(yyg->yy_buffer_stack,
num_to_alloc * sizeof(struct yy_buffer_state*)
, yyscanner);
@@ -1510,7 +1510,7 @@ static void fts0tensure_buffer_stack (yyscan_t yyscanner)
* @param base the character buffer
* @param size the size in bytes of the character buffer
* @param yyscanner The scanner object.
- * @return the newly allocated buffer state object.
+ * @return the newly allocated buffer state object.
*/
YY_BUFFER_STATE fts0t_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner)
{
@@ -1571,7 +1571,7 @@ YY_BUFFER_STATE fts0t_scan_bytes (yyconst char * yybytes, int _yybytes_len , y
/* Get memory for full buffer, including space for trailing EOB's. */
n = _yybytes_len + 2;
- buf = (char*) fts0talloc(n ,yyscanner );
+ buf = (char *) fts0talloc(n ,yyscanner );
if ( ! buf )
YY_FATAL_ERROR( "out of dynamic memory in fts0t_scan_bytes()" );
@@ -1626,7 +1626,7 @@ static void yy_fatal_error (yyconst char* msg , yyscan_t yyscanner __attribute_
*/
YY_EXTRA_TYPE fts0tget_extra (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyextra;
}
@@ -1635,7 +1635,7 @@ YY_EXTRA_TYPE fts0tget_extra (yyscan_t yyscanner)
*/
int fts0tget_lineno (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (! YY_CURRENT_BUFFER)
return 0;
@@ -1648,7 +1648,7 @@ int fts0tget_lineno (yyscan_t yyscanner)
*/
int fts0tget_column (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
if (! YY_CURRENT_BUFFER)
return 0;
@@ -1661,7 +1661,7 @@ int fts0tget_column (yyscan_t yyscanner)
*/
FILE *fts0tget_in (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyin;
}
@@ -1670,7 +1670,7 @@ FILE *fts0tget_in (yyscan_t yyscanner)
*/
FILE *fts0tget_out (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyout;
}
@@ -1679,7 +1679,7 @@ FILE *fts0tget_out (yyscan_t yyscanner)
*/
int fts0tget_leng (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yyleng;
}
@@ -1689,7 +1689,7 @@ int fts0tget_leng (yyscan_t yyscanner)
char *fts0tget_text (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yytext;
}
@@ -1699,7 +1699,7 @@ char *fts0tget_text (yyscan_t yyscanner)
*/
void fts0tset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyextra = user_defined ;
}
@@ -1709,11 +1709,11 @@ void fts0tset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)
*/
void fts0tset_lineno (int line_number , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* lineno is only valid if an input buffer exists. */
if (! YY_CURRENT_BUFFER )
- yy_fatal_error( "fts0tset_lineno called with no buffer" , yyscanner);
+ yy_fatal_error( "fts0tset_lineno called with no buffer" , yyscanner);
yylineno = line_number;
}
@@ -1724,11 +1724,11 @@ void fts0tset_lineno (int line_number , yyscan_t yyscanner)
*/
void fts0tset_column (int column_no , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* column is only valid if an input buffer exists. */
if (! YY_CURRENT_BUFFER )
- yy_fatal_error( "fts0tset_column called with no buffer" , yyscanner);
+ yy_fatal_error( "fts0tset_column called with no buffer" , yyscanner);
yycolumn = column_no;
}
@@ -1741,25 +1741,25 @@ void fts0tset_column (int column_no , yyscan_t yyscanner)
*/
void fts0tset_in (FILE * in_str , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyin = in_str ;
}
void fts0tset_out (FILE * out_str , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yyout = out_str ;
}
int fts0tget_debug (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
return yy_flex_debug;
}
void fts0tset_debug (int bdebug , yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
yy_flex_debug = bdebug ;
}
@@ -1819,19 +1819,19 @@ int fts0tlex_init_extra(YY_EXTRA_TYPE yy_user_defined,yyscan_t* ptr_yy_globals )
errno = ENOMEM;
return 1;
}
-
+
/* By setting to 0xAA, we expose bugs in
yy_init_globals. Leave at 0x00 for releases. */
memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
fts0tset_extra (yy_user_defined, *ptr_yy_globals);
-
+
return yy_init_globals ( *ptr_yy_globals );
}
static int yy_init_globals (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* Initialization is the same as for the non-reentrant scanner.
* This function is called from fts0tlex_destroy(), so don't allocate here.
*/
@@ -1839,7 +1839,7 @@ static int yy_init_globals (yyscan_t yyscanner)
yyg->yy_buffer_stack = 0;
yyg->yy_buffer_stack_top = 0;
yyg->yy_buffer_stack_max = 0;
- yyg->yy_c_buf_p = (char*) 0;
+ yyg->yy_c_buf_p = (char *) 0;
yyg->yy_init = 0;
yyg->yy_start = 0;
@@ -1852,8 +1852,8 @@ static int yy_init_globals (yyscan_t yyscanner)
yyin = stdin;
yyout = stdout;
#else
- yyin = (FILE*) 0;
- yyout = (FILE*) 0;
+ yyin = (FILE *) 0;
+ yyout = (FILE *) 0;
#endif
/* For future reference: Set errno on error, since we are called by
@@ -1865,7 +1865,7 @@ static int yy_init_globals (yyscan_t yyscanner)
/* fts0tlex_destroy is for both reentrant and non-reentrant scanners. */
int fts0tlex_destroy (yyscan_t yyscanner)
{
- struct yyguts_t * yyg = (struct yyguts_t*) yyscanner;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
/* Pop the buffer stack, destroying each element. */
while(YY_CURRENT_BUFFER){
@@ -1918,24 +1918,24 @@ static int yy_flex_strlen (yyconst char * s , yyscan_t yyscanner __attribute__(
void *fts0talloc (yy_size_t size , yyscan_t yyscanner __attribute__((unused)))
{
- return (void*) malloc( size );
+ return (void *) malloc( size );
}
void *fts0trealloc (void * ptr, yy_size_t size , yyscan_t yyscanner __attribute__((unused)))
{
- /* The cast to (char*) in the following accommodates both
+ /* The cast to (char *) in the following accommodates both
* implementations that use char* generic pointers, and those
* that use void* generic pointers. It works with the latter
* because both ANSI C and C++ allow castless assignment from
* any pointer type to void*, and deal with argument conversions
* as though doing an assignment.
*/
- return (void*) realloc( (char*) ptr, size );
+ return (void *) realloc( (char *) ptr, size );
}
void fts0tfree (void * ptr , yyscan_t yyscanner __attribute__((unused)))
{
- free( (char*) ptr ); /* see fts0trealloc() for (char*) cast */
+ free( (char *) ptr ); /* see fts0trealloc() for (char *) cast */
}
#define YYTABLES_NAME "yytables"
diff --git a/storage/innobase/fts/fts0tlex.l b/storage/innobase/fts/fts0tlex.l
index 8b04a9fecf1..8c42678ac7a 100644
--- a/storage/innobase/fts/fts0tlex.l
+++ b/storage/innobase/fts/fts0tlex.l
@@ -57,7 +57,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
return(FTS_TEXT);
}
-[^" \n]* {
+[^" \n\%]* {
val->token = strdup(fts0tget_text(yyscanner));
return(FTS_TERM);
diff --git a/storage/innobase/ha/ha0ha.cc b/storage/innobase/ha/ha0ha.cc
index b58dc486cfa..ae1eb55982a 100644
--- a/storage/innobase/ha/ha0ha.cc
+++ b/storage/innobase/ha/ha0ha.cc
@@ -32,9 +32,7 @@ Created 8/22/1994 Heikki Tuuri
#ifdef UNIV_DEBUG
# include "buf0buf.h"
#endif /* UNIV_DEBUG */
-#ifndef UNIV_HOTBACKUP
# include "btr0sea.h"
-#endif /* !UNIV_HOTBACKUP */
#include "page0page.h"
/*************************************************************//**
@@ -79,7 +77,6 @@ ha_create_func(
return(table);
}
-#ifndef UNIV_HOTBACKUP
if (type == MEM_HEAP_FOR_PAGE_HASH) {
/* We create a hash table protected by rw_locks for
buf_pool->page_hash. */
@@ -97,7 +94,6 @@ ha_create_func(
table->heaps[i] = mem_heap_create_typed(4096, type);
ut_a(table->heaps[i]);
}
-#endif /* !UNIV_HOTBACKUP */
return(table);
}
@@ -120,7 +116,6 @@ ha_clear(
|| rw_lock_own(&btr_search_latch, RW_LOCK_EXCLUSIVE));
#endif /* UNIV_SYNC_DEBUG */
-#ifndef UNIV_HOTBACKUP
/* Free the memory heaps. */
n = table->n_sync_obj;
@@ -151,7 +146,6 @@ ha_clear(
table->n_sync_obj = 0;
table->type = HASH_TABLE_SYNC_NONE;
-#endif /* !UNIV_HOTBACKUP */
/* Clear the hash table. */
n = hash_get_n_cells(table);
@@ -179,7 +173,7 @@ ha_insert_for_fold_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- const rec_t* data) /*!< in: data, must not be NULL */
+ const rec_t* data) /*!< in: data, must not be NULL */
{
hash_cell_t* cell;
ha_node_t* node;
@@ -215,7 +209,7 @@ ha_insert_for_fold_func(
prev_node->block = block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- prev_node->data = (rec_t*) data;
+ prev_node->data = data;
return(TRUE);
}
@@ -237,7 +231,7 @@ ha_insert_for_fold_func(
return(FALSE);
}
- ha_node_set_data(node, block, (rec_t*) data);
+ ha_node_set_data(node, block, data);
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
if (table->adaptive) {
@@ -304,11 +298,11 @@ ha_search_and_update_if_found_func(
/*===============================*/
hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */
- rec_t* data, /*!< in: pointer to the data */
+ const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- rec_t* new_data)/*!< in: new pointer to the data */
+ const rec_t* new_data)/*!< in: new pointer to the data */
{
ha_node_t* node;
diff --git a/storage/innobase/ha/hash0hash.cc b/storage/innobase/ha/hash0hash.cc
index 99128a676d5..174b6bcb57e 100644
--- a/storage/innobase/ha/hash0hash.cc
+++ b/storage/innobase/ha/hash0hash.cc
@@ -106,14 +106,14 @@ void
hash_mutex_exit_all_but(
/*====================*/
hash_table_t* table, /*!< in: hash table */
- mutex_t* keep_mutex) /*!< in: mutex to keep */
+ ib_mutex_t* keep_mutex) /*!< in: mutex to keep */
{
ulint i;
ut_ad(table->type == HASH_TABLE_SYNC_MUTEX);
for (i = 0; i < table->n_sync_obj; i++) {
- mutex_t* mutex = table->sync_obj.mutexes + i;
+ ib_mutex_t* mutex = table->sync_obj.mutexes + i;
if (UNIV_LIKELY(keep_mutex != mutex)) {
mutex_exit(mutex);
}
@@ -373,8 +373,8 @@ hash_create_sync_obj_func(
switch (type) {
case HASH_TABLE_SYNC_MUTEX:
- table->sync_obj.mutexes = static_cast<mutex_t*>(
- mem_alloc(n_sync_obj * sizeof(mutex_t)));
+ table->sync_obj.mutexes = static_cast<ib_mutex_t*>(
+ mem_alloc(n_sync_obj * sizeof(ib_mutex_t)));
for (i = 0; i < n_sync_obj; i++) {
mutex_create(hash_table_mutex_key,
diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc
index a6fdaf36d32..0410f091ccb 100644
--- a/storage/innobase/handler/ha_innodb.cc
+++ b/storage/innobase/handler/ha_innodb.cc
@@ -3,6 +3,7 @@
Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2008, 2009 Google Inc.
Copyright (c) 2009, Percona Inc.
+Copyright (c) 2012, Facebook Inc.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -36,8 +37,10 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include <sql_acl.h> // PROCESS_ACL
#include <debug_sync.h> // DEBUG_SYNC
+#include <my_base.h> // HA_OPTION_*
#include <mysys_err.h>
#include <innodb_priv.h>
+
#ifdef _WIN32
#include <io.h>
#endif
@@ -57,8 +60,10 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "srv0srv.h"
#include "trx0roll.h"
#include "trx0trx.h"
+
#include "trx0sys.h"
#include "mtr0mtr.h"
+#include "rem0types.h"
#include "row0ins.h"
#include "row0mysql.h"
#include "row0sel.h"
@@ -75,14 +80,24 @@ this program; if not, write to the Free Software Foundation, Inc.,
#include "row0merge.h"
#include "dict0boot.h"
#include "dict0stats.h"
+#include "dict0stats_bg.h"
#include "ha_prototypes.h"
#include "ut0mem.h"
#include "ibuf0ibuf.h"
#include "dict0dict.h"
#include "srv0mon.h"
+#include "api0api.h"
+#include "api0misc.h"
#include "pars0pars.h"
#include "fts0fts.h"
#include "fts0types.h"
+#include "row0import.h"
+#include "row0quiesce.h"
+#ifdef UNIV_DEBUG
+#include "trx0purge.h"
+#endif /* UNIV_DEBUG */
+#include "fts0priv.h"
+#include "page0zip.h"
#include "ha_innodb.h"
#include "i_s.h"
@@ -112,11 +127,9 @@ static const long AUTOINC_NEW_STYLE_LOCKING = 1;
static const long AUTOINC_NO_LOCKING = 2;
static long innobase_mirrored_log_groups;
-static long innobase_log_files_in_group;
static long innobase_log_buffer_size;
static long innobase_additional_mem_pool_size;
static long innobase_file_io_threads;
-static long innobase_force_recovery;
static long innobase_open_files;
static long innobase_autoinc_lock_mode;
static ulong innobase_commit_concurrency = 0;
@@ -134,12 +147,13 @@ static uint innobase_old_blocks_pct;
of the buffer pool. */
static uint innobase_change_buffer_max_size = CHANGE_BUFFER_DEFAULT_SIZE;
+static ulong innobase_compression_level = DEFAULT_COMPRESSION_LEVEL;
+
/* The default values for the following char* start-up parameters
are determined in innobase_init below: */
static char* innobase_data_home_dir = NULL;
static char* innobase_data_file_path = NULL;
-static char* innobase_log_group_home_dir = NULL;
static char* innobase_file_format_name = NULL;
static char* innobase_change_buffering = NULL;
static char* innobase_enable_monitor_counter = NULL;
@@ -178,7 +192,6 @@ static my_bool innobase_stats_on_metadata = TRUE;
static my_bool innobase_large_prefix = FALSE;
static my_bool innodb_optimize_fulltext_only = FALSE;
-
static char* internal_innobase_data_file_path = NULL;
static char* innodb_version_str = (char*) INNODB_VERSION_STR;
@@ -252,6 +265,11 @@ const struct _ft_vft ft_vft_result = {NULL,
innobase_fts_retrieve_ranking,
NULL};
+const struct _ft_vft_ext ft_vft_ext_result = {innobase_fts_get_version,
+ innobase_fts_flags,
+ innobase_fts_retrieve_docid,
+ innobase_fts_count_matches};
+
#ifdef HAVE_PSI_INTERFACE
/* Keys to register pthread mutexes/cond in the current file with
performance schema */
@@ -264,8 +282,7 @@ static mysql_pfs_key_t pending_checkpoint_mutex_key;
static PSI_mutex_info all_pthread_mutexes[] = {
{&commit_threads_m_key, "commit_threads_m", 0},
{&commit_cond_mutex_key, "commit_cond_mutex", 0},
- {&innobase_share_mutex_key, "innobase_share_mutex", 0},
- {&pending_checkpoint_mutex_key, "pending_checkpoint_mutex", 0}
+ {&innobase_share_mutex_key, "innobase_share_mutex", 0}
};
static PSI_cond_info all_innodb_conds[] = {
@@ -308,8 +325,10 @@ static PSI_mutex_info all_innodb_mutexes[] = {
# endif /* UNIV_MEM_DEBUG */
{&mem_pool_mutex_key, "mem_pool_mutex", 0},
{&mutex_list_mutex_key, "mutex_list_mutex", 0},
+ {&page_zip_stat_per_index_mutex_key, "page_zip_stat_per_index_mutex", 0},
{&purge_sys_bh_mutex_key, "purge_sys_bh_mutex", 0},
{&recv_sys_mutex_key, "recv_sys_mutex", 0},
+ {&recv_writer_mutex_key, "recv_writer_mutex", 0},
{&rseg_mutex_key, "rseg_mutex", 0},
# ifdef UNIV_SYNC_DEBUG
{&rw_lock_debug_mutex_key, "rw_lock_debug_mutex", 0},
@@ -338,8 +357,12 @@ static PSI_mutex_info all_innodb_mutexes[] = {
#ifndef HAVE_ATOMIC_BUILTINS
{&srv_conc_mutex_key, "srv_conc_mutex", 0},
#endif /* !HAVE_ATOMIC_BUILTINS */
+#ifndef HAVE_ATOMIC_BUILTINS_64
+ {&monitor_mutex_key, "monitor_mutex", 0},
+#endif /* !HAVE_ATOMIC_BUILTINS_64 */
{&ut_list_mutex_key, "ut_list_mutex", 0},
{&trx_sys_mutex_key, "trx_sys_mutex", 0},
+ {&zip_pad_mutex_key, "zip_pad_mutex", 0},
};
# endif /* UNIV_PFS_MUTEX */
@@ -366,6 +389,7 @@ static PSI_rwlock_info all_innodb_rwlocks[] = {
{&trx_i_s_cache_lock_key, "trx_i_s_cache_lock", 0},
{&trx_purge_latch_key, "trx_purge_latch", 0},
{&index_tree_rw_lock_key, "index_tree_rw_lock", 0},
+ {&index_online_log_key, "index_online_log", 0},
{&dict_table_stats_latch_key, "dict_table_stats", 0},
{&hash_table_rw_lock_key, "hash table locks", 0}
};
@@ -383,7 +407,8 @@ static PSI_thread_info all_innodb_threads[] = {
{&srv_monitor_thread_key, "srv_monitor_thread", 0},
{&srv_master_thread_key, "srv_master_thread", 0},
{&srv_purge_thread_key, "srv_purge_thread", 0},
- {&buf_page_cleaner_thread_key, "page_cleaner_thread", 0}
+ {&buf_page_cleaner_thread_key, "page_cleaner_thread", 0},
+ {&recv_writer_thread_key, "recovery writer thread", 0}
};
# endif /* UNIV_PFS_THREAD */
@@ -398,6 +423,70 @@ static PSI_file_info all_innodb_files[] = {
# endif /* UNIV_PFS_IO */
#endif /* HAVE_PSI_INTERFACE */
+/** Always normalize table name to lower case on Windows */
+#ifdef __WIN__
+#define normalize_table_name(norm_name, name) \
+ normalize_table_name_low(norm_name, name, TRUE)
+#else
+#define normalize_table_name(norm_name, name) \
+ normalize_table_name_low(norm_name, name, FALSE)
+#endif /* __WIN__ */
+
+/** Set up InnoDB API callback function array */
+ib_cb_t innodb_api_cb[] = {
+ (ib_cb_t) ib_cursor_open_table,
+ (ib_cb_t) ib_cursor_read_row,
+ (ib_cb_t) ib_cursor_insert_row,
+ (ib_cb_t) ib_cursor_delete_row,
+ (ib_cb_t) ib_cursor_update_row,
+ (ib_cb_t) ib_cursor_moveto,
+ (ib_cb_t) ib_cursor_first,
+ (ib_cb_t) ib_cursor_next,
+ (ib_cb_t) ib_cursor_last,
+ (ib_cb_t) ib_cursor_set_match_mode,
+ (ib_cb_t) ib_sec_search_tuple_create,
+ (ib_cb_t) ib_clust_read_tuple_create,
+ (ib_cb_t) ib_tuple_delete,
+ (ib_cb_t) ib_tuple_copy,
+ (ib_cb_t) ib_tuple_read_u32,
+ (ib_cb_t) ib_tuple_write_u32,
+ (ib_cb_t) ib_tuple_read_u64,
+ (ib_cb_t) ib_tuple_write_u64,
+ (ib_cb_t) ib_tuple_read_i32,
+ (ib_cb_t) ib_tuple_write_i32,
+ (ib_cb_t) ib_tuple_read_i64,
+ (ib_cb_t) ib_tuple_write_i64,
+ (ib_cb_t) ib_tuple_get_n_cols,
+ (ib_cb_t) ib_col_set_value,
+ (ib_cb_t) ib_col_get_value,
+ (ib_cb_t) ib_col_get_meta,
+ (ib_cb_t) ib_trx_begin,
+ (ib_cb_t) ib_trx_commit,
+ (ib_cb_t) ib_trx_rollback,
+ (ib_cb_t) ib_trx_start,
+ (ib_cb_t) ib_trx_release,
+ (ib_cb_t) ib_trx_state,
+ (ib_cb_t) ib_cursor_lock,
+ (ib_cb_t) ib_cursor_close,
+ (ib_cb_t) ib_cursor_new_trx,
+ (ib_cb_t) ib_cursor_reset,
+ (ib_cb_t) ib_open_table_by_name,
+ (ib_cb_t) ib_col_get_name,
+ (ib_cb_t) ib_table_truncate,
+ (ib_cb_t) ib_cursor_open_index_using_name,
+ (ib_cb_t) ib_close_thd,
+ (ib_cb_t) ib_cfg_get_cfg,
+ (ib_cb_t) ib_cursor_set_cluster_access,
+ (ib_cb_t) ib_cursor_commit_trx,
+ (ib_cb_t) ib_cfg_trx_level,
+ (ib_cb_t) ib_tuple_get_n_user_cols,
+ (ib_cb_t) ib_cursor_set_lock_mode,
+ (ib_cb_t) ib_cursor_clear_trx,
+ (ib_cb_t) ib_get_idx_field_name,
+ (ib_cb_t) ib_trx_get_start_time,
+ (ib_cb_t) ib_cfg_bk_commit_interval
+};
+
/*************************************************************//**
Check whether valid argument given to innodb_ft_*_stopword_table.
This function is registered as a callback with MySQL.
@@ -412,24 +501,10 @@ innodb_stopword_table_validate(
void* save, /*!< out: immediate result
for update function */
struct st_mysql_value* value); /*!< in: incoming string */
-/****************************************************************//**
-Update the session variable innodb_session_stopword_table
-with the "saved" stopword table name value. This function
-is registered as a callback with MySQL. */
-static
-void
-innodb_session_stopword_update(
-/*===========================*/
- THD* thd, /*!< in: thread handle */
- struct st_mysql_sys_var* var, /*!< in: pointer to
- system variable */
- void* var_ptr,/*!< out: where the
- formal string goes */
- const void* save); /*!< in: immediate result
- from check function */
-/** "GEN_CLUST_INDEX" is the name reserved for Innodb default
-system primary index. */
-static const char innobase_index_reserve_name[]= "GEN_CLUST_INDEX";
+
+/** "GEN_CLUST_INDEX" is the name reserved for InnoDB default
+system clustered index when there is no primary key. */
+const char innobase_index_reserve_name[] = "GEN_CLUST_INDEX";
static const char innobase_hton_name[]= "InnoDB";
@@ -452,19 +527,14 @@ static MYSQL_THDVAR_BOOL(ft_enable_stopword, PLUGIN_VAR_OPCMDARG,
NULL, NULL,
/* default */ TRUE);
-static MYSQL_THDVAR_BOOL(analyze_is_persistent, PLUGIN_VAR_OPCMDARG,
- "ANALYZE TABLE in InnoDB uses a more precise (and slow) sampling "
- "algorithm and saves the results persistently.",
- /* check_func */ NULL, /* update_func */ NULL,
- /* default */ FALSE);
-
static MYSQL_THDVAR_ULONG(lock_wait_timeout, PLUGIN_VAR_RQCMDARG,
"Timeout in seconds an InnoDB transaction may wait for a lock before being rolled back. Values above 100000000 disable the timeout.",
NULL, NULL, 50, 1, 1024 * 1024 * 1024, 0);
-static MYSQL_THDVAR_STR(ft_user_stopword_table, PLUGIN_VAR_OPCMDARG,
+static MYSQL_THDVAR_STR(ft_user_stopword_table,
+ PLUGIN_VAR_OPCMDARG|PLUGIN_VAR_MEMALLOC,
"User supplied stopword table name, effective in the session level.",
- innodb_stopword_table_validate, innodb_session_stopword_update, NULL);
+ innodb_stopword_table_validate, NULL, NULL);
static SHOW_VAR innodb_status_variables[]= {
{"buffer_pool_dump_status",
@@ -575,9 +645,9 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_available_undo_logs, SHOW_LONG},
#ifdef UNIV_DEBUG
{"purge_trx_id_age",
- (char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
+ (char*) &export_vars.innodb_purge_trx_id_age, SHOW_LONG},
{"purge_view_trx_id_age",
- (char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
+ (char*) &export_vars.innodb_purge_view_trx_id_age, SHOW_LONG},
#endif /* UNIV_DEBUG */
{NullS, NullS, SHOW_LONG}
};
@@ -610,18 +680,8 @@ innobase_close_connection(
THD* thd); /*!< in: MySQL thread handle for
which to close the connection */
-static
-void
-innobase_commit_ordered(
-/*======================*/
- handlerton *hton, /*!< in/out: Innodb handlerton */
- THD* thd, /*!< in: MySQL thread handle */
- bool all); /*!< in: TRUE - commit transaction
- FALSE - the current SQL statement
- ended */
-static
-void
-innobase_kill_query(handlerton *hton, THD* thd, enum thd_kill_levels level);
+static void innobase_kill_query(handlerton *hton, THD* thd, enum thd_kill_levels level);
+static void innobase_commit_ordered(handlerton *hton, THD* thd, bool all);
/*****************************************************************//**
Commits a transaction in an InnoDB database or marks an SQL statement
@@ -696,14 +756,7 @@ innobase_release_savepoint(
savepoint should be released */
void* savepoint); /*!< in: savepoint data */
-/*****************************************************************//**
-Handle a commit checkpoint request from server layer.
-We simply flush the redo log immediately and do the notify call.*/
-static
-void
-innobase_checkpoint_request(
- handlerton *hton,
- void *cookie);
+static void innobase_checkpoint_request(handlerton *hton, void *cookie);
/************************************************************************//**
Function for constructing an InnoDB table handler instance. */
@@ -757,13 +810,6 @@ int
innobase_file_format_validate_and_set(
/*==================================*/
const char* format_max); /*!< in: parameter value */
-/****************************************************************//**
-Return alter table flags supported in an InnoDB database. */
-static
-uint
-innobase_alter_table_flags(
-/*=======================*/
- uint flags);
/*******************************************************************//**
This function is used to prepare an X/Open XA distributed transaction.
@@ -937,6 +983,21 @@ innodb_enable_monitor_at_startup(
/*=============================*/
char* str); /*!< in: monitor counter enable list */
+/*********************************************************************
+Normalizes a table name string. A normalized name consists of the
+database name catenated to '/' and table name. An example:
+test/mytable. On Windows normalization puts both the database name and the
+table name always to lower case if "set_lower_case" is set to TRUE. */
+static
+void
+normalize_table_name_low(
+/*=====================*/
+ char* norm_name, /* out: normalized name as a
+ null-terminated string */
+ const char* name, /* in: table name string */
+ ibool set_lower_case); /* in: TRUE if we want to set
+ name to lower case */
+
/*************************************************************//**
Check for a valid value of innobase_commit_concurrency.
@return 0 for valid innodb_commit_concurrency */
@@ -979,7 +1040,7 @@ innobase_create_handler(
TABLE_SHARE* table,
MEM_ROOT* mem_root)
{
- return new (mem_root) ha_innobase(hton, table);
+ return(new (mem_root) ha_innobase(hton, table));
}
/* General functions */
@@ -1020,9 +1081,22 @@ UNIV_INTERN
ibool
thd_is_replication_slave_thread(
/*============================*/
- void* thd) /*!< in: thread handle (THD*) */
+ THD* thd) /*!< in: thread handle */
{
- return((ibool) thd_slave_thread((THD*) thd));
+ return((ibool) thd_slave_thread(thd));
+}
+
+/******************************************************************//**
+Gets information on the durability property requested by thread.
+Used when writing either a prepare or commit record to the log
+buffer. @return the durability property. */
+UNIV_INTERN
+enum durability_properties
+thd_requested_durability(
+/*=====================*/
+ const THD* thd) /*!< in: thread handle */
+{
+ return(thd_get_durability_property(thd));
}
/******************************************************************//**
@@ -1032,10 +1106,9 @@ UNIV_INTERN
ibool
thd_trx_is_read_only(
/*=================*/
- void* thd) /*!< in: thread handle (THD*) */
+ THD* thd) /*!< in: thread handle */
{
- /* Waiting on WL#6046 to complete. */
- return(FALSE);
+ return(thd != 0 && thd_tx_is_read_only(thd));
}
/******************************************************************//**
@@ -1046,11 +1119,11 @@ UNIV_INTERN
ibool
thd_trx_is_auto_commit(
/*===================*/
- void* thd) /*!< in: thread handle (THD*) can be NULL */
+ THD* thd) /*!< in: thread handle, can be NULL */
{
return(thd != NULL
&& !thd_test_options(
- static_cast<THD*>(thd),
+ thd,
OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)
&& thd_is_select(thd));
}
@@ -1126,6 +1199,17 @@ innobase_srv_conc_force_exit_innodb(
}
/******************************************************************//**
+Returns the NUL terminated value of glob_hostname.
+@return pointer to glob_hostname. */
+UNIV_INTERN
+const char*
+server_get_hostname()
+/*=================*/
+{
+ return(glob_hostname);
+}
+
+/******************************************************************//**
Returns true if the transaction this thread is processing has edited
non-transactional tables. Used by the deadlock detector when deciding
which transaction to rollback in case of a deadlock - we try to avoid
@@ -1135,9 +1219,9 @@ UNIV_INTERN
ibool
thd_has_edited_nontrans_tables(
/*===========================*/
- void* thd) /*!< in: thread handle (THD*) */
+ THD* thd) /*!< in: thread handle */
{
- return((ibool) thd_non_transactional_update((THD*) thd));
+ return((ibool) thd_non_transactional_update(thd));
}
/******************************************************************//**
@@ -1147,9 +1231,9 @@ UNIV_INTERN
ibool
thd_is_select(
/*==========*/
- const void* thd) /*!< in: thread handle (THD*) */
+ const THD* thd) /*!< in: thread handle */
{
- return(thd_sql_command((const THD*) thd) == SQLCOM_SELECT);
+ return(thd_sql_command(thd) == SQLCOM_SELECT);
}
/******************************************************************//**
@@ -1160,10 +1244,10 @@ UNIV_INTERN
ibool
thd_supports_xa(
/*============*/
- void* thd) /*!< in: thread handle (THD*), or NULL to query
+ THD* thd) /*!< in: thread handle, or NULL to query
the global innodb_supports_xa */
{
- return(THDVAR((THD*) thd, support_xa));
+ return(THDVAR(thd, support_xa));
}
/******************************************************************//**
@@ -1173,12 +1257,12 @@ UNIV_INTERN
ulong
thd_lock_wait_timeout(
/*==================*/
- void* thd) /*!< in: thread handle (THD*), or NULL to query
+ THD* thd) /*!< in: thread handle, or NULL to query
the global innodb_lock_wait_timeout */
{
/* According to <mysql/plugin.h>, passing thd == NULL
returns the global value of the session variable. */
- return(THDVAR((THD*) thd, lock_wait_timeout));
+ return(THDVAR(thd, lock_wait_timeout));
}
/******************************************************************//**
@@ -1187,17 +1271,18 @@ UNIV_INTERN
void
thd_set_lock_wait_time(
/*===================*/
- void* thd, /*!< in: thread handle (THD*) */
+ THD* thd, /*!< in/out: thread handle */
ulint value) /*!< in: time waited for the lock */
{
if (thd) {
- thd_storage_lock_wait((THD*) thd, value);
+ thd_storage_lock_wait(thd, value);
}
}
/********************************************************************//**
Obtain the InnoDB transaction of a MySQL thread.
@return reference to transaction pointer */
+__attribute__((warn_unused_result, nonnull))
static inline
trx_t*&
thd_to_trx(
@@ -1257,11 +1342,11 @@ Converts an InnoDB error code to a MySQL error code and also tells to MySQL
about a possible transaction rollback inside InnoDB caused by a lock wait
timeout or a deadlock.
@return MySQL error code */
-UNIV_INTERN
+static
int
convert_error_code_to_mysql(
/*========================*/
- int error, /*!< in: InnoDB error code */
+ dberr_t error, /*!< in: InnoDB error code */
ulint flags, /*!< in: InnoDB table flags, or 0 */
THD* thd) /*!< in: user thread handle or NULL */
{
@@ -1299,7 +1384,7 @@ convert_error_code_to_mysql(
return(HA_ERR_FOUND_DUPP_KEY);
case DB_READ_ONLY:
- return(HA_ERR_READ_ONLY_TRANSACTION);
+ return(HA_ERR_TABLE_READONLY);
case DB_FOREIGN_DUPLICATE_KEY:
return(HA_ERR_FOREIGN_DUPLICATE_KEY);
@@ -1356,12 +1441,19 @@ convert_error_code_to_mysql(
case DB_OUT_OF_FILE_SPACE:
return(HA_ERR_RECORD_FILE_FULL);
+ case DB_TABLE_IN_FK_CHECK:
+ return(HA_ERR_TABLE_IN_FK_CHECK);
+
case DB_TABLE_IS_BEING_USED:
return(HA_ERR_WRONG_COMMAND);
+ case DB_TABLESPACE_DELETED:
case DB_TABLE_NOT_FOUND:
return(HA_ERR_NO_SUCH_TABLE);
+ case DB_TABLESPACE_NOT_FOUND:
+ return(HA_ERR_NO_SUCH_TABLE);
+
case DB_TOO_BIG_RECORD: {
/* If prefix is true then a 768-byte prefix is stored
locally for BLOB fields. Refer to dict_table_get_format() */
@@ -1377,7 +1469,7 @@ convert_error_code_to_mysql(
"or ROW_FORMAT=COMPRESSED ": "",
prefix ? DICT_MAX_FIXED_COL_LEN : 0);
return(HA_ERR_TO_BIG_ROW);
- }
+ }
case DB_TOO_BIG_INDEX_COL:
my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0),
@@ -1398,21 +1490,11 @@ convert_error_code_to_mysql(
return(HA_ERR_LOCK_TABLE_FULL);
- case DB_PRIMARY_KEY_IS_NULL:
- return(ER_PRIMARY_CANT_HAVE_NULL);
-
case DB_FTS_INVALID_DOCID:
return(HA_FTS_INVALID_DOCID);
case DB_TOO_MANY_CONCURRENT_TRXS:
- /* New error code HA_ERR_TOO_MANY_CONCURRENT_TRXS is only
- available in 5.1.38 and later, but the plugin should still
- work with previous versions of MySQL. */
-#ifdef HA_ERR_TOO_MANY_CONCURRENT_TRXS
return(HA_ERR_TOO_MANY_CONCURRENT_TRXS);
-#else /* HA_ERR_TOO_MANY_CONCURRENT_TRXS */
- return(HA_ERR_RECORD_FILE_FULL);
-#endif /* HA_ERR_TOO_MANY_CONCURRENT_TRXS */
case DB_UNSUPPORTED:
return(HA_ERR_UNSUPPORTED);
case DB_INDEX_CORRUPT:
@@ -1421,6 +1503,8 @@ convert_error_code_to_mysql(
return(HA_ERR_UNDO_REC_TOO_BIG);
case DB_OUT_OF_MEMORY:
return(HA_ERR_OUT_OF_MEM);
+ case DB_TABLESPACE_EXISTS:
+ return(HA_ERR_TABLESPACE_EXISTS);
}
}
@@ -1431,18 +1515,30 @@ void
innobase_mysql_print_thd(
/*=====================*/
FILE* f, /*!< in: output stream */
- void* thd, /*!< in: pointer to a MySQL THD object */
+ THD* thd, /*!< in: MySQL THD object */
uint max_query_len) /*!< in: max query length to print, or 0 to
use the default max length */
{
char buffer[1024];
- fputs(thd_security_context((THD*) thd, buffer, sizeof buffer,
+ fputs(thd_security_context(thd, buffer, sizeof buffer,
max_query_len), f);
putc('\n', f);
}
/******************************************************************//**
+Get the error message format string.
+@return the format string or 0 if not found. */
+UNIV_INTERN
+const char*
+innobase_get_err_msg(
+/*=================*/
+ int error_code) /*!< in: MySQL error code */
+{
+ return(my_get_err_msg(error_code));
+}
+
+/******************************************************************//**
Get the variable length bounds of the given character set. */
UNIV_INTERN
void
@@ -1498,7 +1594,7 @@ innobase_convert_from_table_id(
{
uint errors;
- strconvert(cs, from, &my_charset_filename, to, (uint) len, &errors);
+ strconvert(cs, from, FN_REFLEN, &my_charset_filename, to, (uint) len, &errors);
}
/******************************************************************//**
@@ -1514,7 +1610,7 @@ innobase_convert_from_id(
{
uint errors;
- strconvert(cs, from, system_charset_info, to, (uint) len, &errors);
+ strconvert(cs, from, FN_REFLEN, system_charset_info, to, (uint) len, &errors);
}
/******************************************************************//**
@@ -1586,9 +1682,9 @@ UNIV_INTERN
struct charset_info_st*
innobase_get_charset(
/*=================*/
- void* mysql_thd) /*!< in: MySQL thread handle */
+ THD* mysql_thd) /*!< in: MySQL thread handle */
{
- return(thd_charset((THD*) mysql_thd));
+ return(thd_charset(mysql_thd));
}
/**********************************************************************//**
@@ -1598,12 +1694,12 @@ UNIV_INTERN
const char*
innobase_get_stmt(
/*==============*/
- void* mysql_thd, /*!< in: MySQL thread handle */
+ THD* thd, /*!< in: MySQL thread handle */
size_t* length) /*!< out: length of the SQL statement */
{
LEX_STRING* stmt;
- stmt = thd_query_string((THD*) mysql_thd);
+ stmt = thd_query_string(thd);
*length = stmt->length;
return(stmt->str);
}
@@ -1644,14 +1740,9 @@ innobase_mysql_tmpfile(void)
/*========================*/
{
int fd2 = -1;
- File fd;
-
- DBUG_EXECUTE_IF(
- "innobase_tmpfile_creation_failure",
- return(-1);
- );
+ File fd = mysql_tmpfile("ib");
- fd = mysql_tmpfile("ib");
+ DBUG_EXECUTE_IF("innobase_tmpfile_creation_failure", return(-1););
if (fd >= 0) {
/* Copy the file descriptor, so that the additional resources
@@ -1773,11 +1864,11 @@ values we want to reserve for multi-value inserts e.g.,
INSERT INTO T VALUES(), (), ();
-innobase_next_autoinc() will be called with increment set to
-n * 3 where autoinc_lock_mode != TRADITIONAL because we want
-to reserve 3 values for the multi-value INSERT above.
+innobase_next_autoinc() will be called with increment set to 3 where
+autoinc_lock_mode != TRADITIONAL because we want to reserve 3 values for
+the multi-value INSERT above.
@return the next value */
-static
+UNIV_INTERN
ulonglong
innobase_next_autoinc(
/*==================*/
@@ -1814,6 +1905,7 @@ innobase_next_autoinc(
in reality a negative value.The visual studio compilers converts
large double values automatically into unsigned long long datatype
maximum value */
+
if (block >= max_value
|| offset > max_value
|| current >= max_value
@@ -1983,7 +2075,7 @@ trx_deregister_from_2pc(
trx_t* trx) /* in: transaction */
{
trx->is_registered = 0;
- trx->active_commit_ordered = 0;
+ trx->active_commit_ordered = 0;
}
/*********************************************************************//**
@@ -2010,6 +2102,78 @@ trx_is_started(
}
/*********************************************************************//**
+Copy table flags from MySQL's HA_CREATE_INFO into an InnoDB table object.
+Those flags are stored in .frm file and end up in the MySQL table object,
+but are frequently used inside InnoDB so we keep their copies into the
+InnoDB table object. */
+UNIV_INTERN
+void
+innobase_copy_frm_flags_from_create_info(
+/*=====================================*/
+ dict_table_t* innodb_table, /*!< in/out: InnoDB table */
+ HA_CREATE_INFO* create_info) /*!< in: create info */
+{
+ ibool ps_on;
+ ibool ps_off;
+
+ if (dict_table_is_temporary(innodb_table) || srv_read_only_mode) {
+ /* Temp tables do not use persistent stats. */
+ ps_on = FALSE;
+ ps_off = TRUE;
+ } else {
+ ps_on = create_info->table_options
+ & HA_OPTION_STATS_PERSISTENT;
+ ps_off = create_info->table_options
+ & HA_OPTION_NO_STATS_PERSISTENT;
+ }
+
+ dict_stats_set_persistent(innodb_table, ps_on, ps_off);
+
+ dict_stats_auto_recalc_set(
+ innodb_table,
+ create_info->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON,
+ create_info->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF);
+
+ innodb_table->stats_sample_pages = create_info->stats_sample_pages;
+}
+
+/*********************************************************************//**
+Copy table flags from MySQL's TABLE_SHARE into an InnoDB table object.
+Those flags are stored in .frm file and end up in the MySQL table object,
+but are frequently used inside InnoDB so we keep their copies into the
+InnoDB table object. */
+UNIV_INTERN
+void
+innobase_copy_frm_flags_from_table_share(
+/*=====================================*/
+ dict_table_t* innodb_table, /*!< in/out: InnoDB table */
+ TABLE_SHARE* table_share) /*!< in: table share */
+{
+ ibool ps_on;
+ ibool ps_off;
+
+ if (dict_table_is_temporary(innodb_table) || srv_read_only_mode) {
+ /* Temp tables do not use persistent stats */
+ ps_on = FALSE;
+ ps_off = TRUE;
+ } else {
+ ps_on = table_share->db_create_options
+ & HA_OPTION_STATS_PERSISTENT;
+ ps_off = table_share->db_create_options
+ & HA_OPTION_NO_STATS_PERSISTENT;
+ }
+
+ dict_stats_set_persistent(innodb_table, ps_on, ps_off);
+
+ dict_stats_auto_recalc_set(
+ innodb_table,
+ table_share->stats_auto_recalc == HA_STATS_AUTO_RECALC_ON,
+ table_share->stats_auto_recalc == HA_STATS_AUTO_RECALC_OFF);
+
+ innodb_table->stats_sample_pages = table_share->stats_sample_pages;
+}
+
+/*********************************************************************//**
Construct ha_innobase handler. */
UNIV_INTERN
ha_innobase::ha_innobase(
@@ -2018,14 +2182,15 @@ ha_innobase::ha_innobase(
TABLE_SHARE* table_arg)
:handler(hton, table_arg),
int_table_flags(HA_REC_NOT_IN_SEQ |
- HA_NULL_IN_KEY | HA_CAN_VIRTUAL_COLUMNS |
+ HA_NULL_IN_KEY |
HA_CAN_INDEX_BLOBS |
HA_CAN_SQL_HANDLER |
HA_PRIMARY_KEY_REQUIRED_FOR_POSITION |
HA_PRIMARY_KEY_IN_READ_INDEX |
HA_BINLOG_ROW_CAPABLE |
HA_CAN_GEOMETRY | HA_PARTIAL_COLUMN_READ |
- HA_TABLE_SCAN_ON_INDEX | HA_CAN_FULLTEXT),
+ HA_TABLE_SCAN_ON_INDEX | HA_CAN_FULLTEXT |
+ HA_CAN_FULLTEXT_EXT | HA_CAN_EXPORT),
start_of_scan(0),
num_write_row(0)
{}
@@ -2050,6 +2215,9 @@ ha_innobase::update_thd(
{
trx_t* trx;
+ /* The table should have been opened in ha_innobase::open(). */
+ DBUG_ASSERT(prebuilt->table->n_ref_count > 0);
+
trx = check_trx_exists(thd);
if (prebuilt->trx != trx) {
@@ -2137,7 +2305,9 @@ invalidation to the transaction commit.
2) To store or retrieve a value from the query cache of an InnoDB table TBL,
any query must first ask InnoDB's permission. We must pass the thd as a
parameter because InnoDB will look at the trx id, if any, associated with
-that thd.
+that thd. Also the full_name which is used as key to search for the table
+object. The full_name is a string containing the normalized path to the
+table in the canonical format.
3) Use of the query cache for InnoDB tables is now allowed also when
AUTOCOMMIT==0 or we are inside BEGIN ... COMMIT. Thus transactions no longer
@@ -2172,11 +2342,9 @@ innobase_query_caching_of_table_permitted(
THD* thd, /*!< in: thd of the user who is trying to
store a result to the query cache or
retrieve it */
- char* full_name, /*!< in: concatenation of database name,
- the null character NUL, and the table
- name */
- uint full_name_len, /*!< in: length of the full name, i.e.
- len(dbname) + len(tablename) + 1 */
+ char* full_name, /*!< in: normalized path to the table */
+ uint full_name_len, /*!< in: length of the normalized path
+ to the table */
ulonglong *unused) /*!< unused for this engine */
{
ibool is_autocommit;
@@ -2236,16 +2404,7 @@ innobase_query_caching_of_table_permitted(
}
/* Normalize the table name to InnoDB format */
-
- memcpy(norm_name, full_name, full_name_len);
-
- norm_name[strlen(norm_name)] = '/'; /* InnoDB uses '/' as the
- separator between db and
- table */
- norm_name[full_name_len] = '\0';
-#ifdef __WIN__
- innobase_casedn_str(norm_name);
-#endif
+ normalize_table_name(norm_name, full_name);
innobase_register_trx(innodb_hton_ptr, thd, trx);
@@ -2283,7 +2442,7 @@ innobase_invalidate_query_cache(
/* Argument TRUE below means we are using transactions */
#ifdef HAVE_QUERY_CACHE
- mysql_query_cache_invalidate4((THD*) trx->mysql_thd,
+ mysql_query_cache_invalidate4(trx->mysql_thd,
full_name,
(uint32) full_name_len,
TRUE);
@@ -2302,7 +2461,7 @@ innobase_convert_identifier(
ulint buflen, /*!< in: length of buf, in bytes */
const char* id, /*!< in: identifier to convert */
ulint idlen, /*!< in: length of id, in bytes */
- void* thd, /*!< in: MySQL connection thread, or NULL */
+ THD* thd, /*!< in: MySQL connection thread, or NULL */
ibool file_id)/*!< in: TRUE=id is a table or database name;
FALSE=id is an UTF-8 string */
{
@@ -2325,7 +2484,7 @@ innobase_convert_identifier(
nz[idlen] = 0;
s = nz2;
- idlen = explain_filename((THD*) thd, nz, nz2, sizeof nz2,
+ idlen = explain_filename(thd, nz, nz2, sizeof nz2,
EXPLAIN_PARTITIONS_AS_COMMENT);
goto no_quote;
}
@@ -2334,7 +2493,7 @@ innobase_convert_identifier(
if (UNIV_UNLIKELY(!thd)) {
q = '"';
} else {
- q = get_quote_char_for_identifier((THD*) thd, s, (int) idlen);
+ q = get_quote_char_for_identifier(thd, s, (int) idlen);
}
if (q == EOF) {
@@ -2390,7 +2549,7 @@ innobase_convert_name(
ulint buflen, /*!< in: length of buf, in bytes */
const char* id, /*!< in: identifier to convert */
ulint idlen, /*!< in: length of id, in bytes */
- void* thd, /*!< in: MySQL connection thread, or NULL */
+ THD* thd, /*!< in: MySQL connection thread, or NULL */
ibool table_id)/*!< in: TRUE=id is a table or database name;
FALSE=id is an index name */
{
@@ -2432,14 +2591,13 @@ no_db_name:
}
return(s);
-
}
/*****************************************************************//**
A wrapper function of innobase_convert_name(), convert a table or
index name to the MySQL system_charset_info (UTF-8) and quote it if needed.
@return pointer to the end of buf */
-static inline
+UNIV_INTERN
void
innobase_format_name(
/*==================*/
@@ -2465,9 +2623,9 @@ UNIV_INTERN
ibool
trx_is_interrupted(
/*===============*/
- trx_t* trx) /*!< in: transaction */
+ const trx_t* trx) /*!< in: transaction */
{
- return(trx && trx->mysql_thd && thd_kill_level((THD*) trx->mysql_thd));
+ return(trx && trx->mysql_thd && thd_kill_level(trx->mysql_thd));
}
/**********************************************************************//**
@@ -2479,8 +2637,20 @@ trx_is_strict(
/*==========*/
trx_t* trx) /*!< in: transaction */
{
- return(trx && trx->mysql_thd
- && THDVAR((THD*) trx->mysql_thd, strict_mode));
+ return(trx && trx->mysql_thd && THDVAR(trx->mysql_thd, strict_mode));
+}
+
+/**********************************************************************//**
+Determines if the current MySQL thread is running in strict mode.
+If thd==NULL, THDVAR returns the global value of innodb-strict-mode.
+@return TRUE if strict */
+UNIV_INLINE
+ibool
+thd_is_strict(
+/*==========*/
+ THD* thd) /*!< in: MySQL thread descriptor */
+{
+ return(THDVAR(thd, strict_mode));
}
/**************************************************************//**
@@ -2496,6 +2666,7 @@ ha_innobase::reset_template(void)
prebuilt->keep_other_fields_on_keyread = 0;
prebuilt->read_just_key = 0;
+ prebuilt->in_fts_query = 0;
/* Reset index condition pushdown state. */
if (prebuilt->idx_cond) {
prebuilt->idx_cond = NULL;
@@ -2598,14 +2769,14 @@ innobase_init(
innobase_hton->savepoint_rollback = innobase_rollback_to_savepoint;
innobase_hton->savepoint_release = innobase_release_savepoint;
innobase_hton->prepare_ordered= NULL;
- innobase_hton->commit_ordered= innobase_commit_ordered;
+ innobase_hton->commit_ordered= innobase_commit_ordered;
innobase_hton->commit = innobase_commit;
innobase_hton->rollback = innobase_rollback;
innobase_hton->prepare = innobase_xa_prepare;
innobase_hton->recover = innobase_xa_recover;
innobase_hton->commit_by_xid = innobase_commit_by_xid;
innobase_hton->rollback_by_xid = innobase_rollback_by_xid;
- innobase_hton->commit_checkpoint_request=innobase_checkpoint_request;
+ innobase_hton->commit_checkpoint_request=innobase_checkpoint_request;
innobase_hton->create_cursor_read_view = innobase_create_cursor_view;
innobase_hton->set_cursor_read_view = innobase_set_cursor_view;
innobase_hton->close_cursor_read_view = innobase_close_cursor_view;
@@ -2622,9 +2793,7 @@ innobase_init(
innobase_hton->release_temporary_latches =
innobase_release_temporary_latches;
-
- innobase_hton->alter_table_flags = innobase_alter_table_flags;
- innobase_hton->kill_query = innobase_kill_query;
+ innobase_hton->kill_query = innobase_kill_query;
if (srv_file_per_table)
innobase_hton->tablefile_extensions = ha_innobase_exts;
@@ -2694,12 +2863,12 @@ innobase_init(
srv_data_home = (innobase_data_home_dir ? innobase_data_home_dir :
default_path);
- /* Set default InnoDB data file size to 10 MB and let it be
+ /* Set default InnoDB data file size to 12 MB and let it be
auto-extending. Thus users can use InnoDB in >= 4.0 without having
to specify any startup options. */
if (!innobase_data_file_path) {
- innobase_data_file_path = (char*) "ibdata1:10M:autoextend";
+ innobase_data_file_path = (char*) "ibdata1:12M:autoextend";
}
/* Since InnoDB edits the argument in the next call, we make another
@@ -2723,8 +2892,8 @@ mem_free_and_error:
/* The default dir for log files is the datadir of MySQL */
- if (!innobase_log_group_home_dir) {
- innobase_log_group_home_dir = default_path;
+ if (!srv_log_group_home_dir) {
+ srv_log_group_home_dir = default_path;
}
#ifdef UNIV_LOG_ARCHIVE
@@ -2737,12 +2906,12 @@ mem_free_and_error:
srv_arch_dir = innobase_log_arch_dir;
#endif /* UNIG_LOG_ARCHIVE */
- ret = (bool)
- srv_parse_log_group_home_dirs(innobase_log_group_home_dir);
+ srv_normalize_path_for_win(srv_log_group_home_dir);
- if (ret == FALSE || innobase_mirrored_log_groups != 1) {
- sql_print_error("syntax error in innodb_log_group_home_dir, or a "
- "wrong number of mirrored log groups");
+ if (strchr(srv_log_group_home_dir, ';')
+ || innobase_mirrored_log_groups != 1) {
+ sql_print_error("syntax error in innodb_log_group_home_dir, "
+ "or a wrong number of mirrored log groups");
goto mem_free_and_error;
}
@@ -2834,12 +3003,52 @@ innobase_change_buffering_inited_ok:
innobase_change_buffering = (char*)
innobase_change_buffering_values[ibuf_use];
+ /* Check that interdependent parameters have sane values. */
+ if (srv_max_buf_pool_modified_pct < srv_max_dirty_pages_pct_lwm) {
+ sql_print_warning("InnoDB: innodb_max_dirty_pages_pct_lwm"
+ " cannot be set higher than"
+ " innodb_max_dirty_pages_pct.\n"
+ "InnoDB: Setting"
+ " innodb_max_dirty_pages_pct_lwm to %lu\n",
+ srv_max_buf_pool_modified_pct);
+
+ srv_max_dirty_pages_pct_lwm = srv_max_buf_pool_modified_pct;
+ }
+
+ if (srv_max_io_capacity == SRV_MAX_IO_CAPACITY_DUMMY_DEFAULT) {
+
+ if (srv_io_capacity >= SRV_MAX_IO_CAPACITY_LIMIT / 2) {
+ /* Avoid overflow. */
+ srv_max_io_capacity = SRV_MAX_IO_CAPACITY_LIMIT;
+ } else {
+ /* The user has not set the value. We should
+ set it based on innodb_io_capacity. */
+ srv_max_io_capacity =
+ ut_max(2 * srv_io_capacity, 2000);
+ }
+
+ } else if (srv_max_io_capacity < srv_io_capacity) {
+ sql_print_warning("InnoDB: innodb_io_capacity"
+ " cannot be set higher than"
+ " innodb_io_capacity_max.\n"
+ "InnoDB: Setting"
+ " innodb_io_capacity to %lu\n",
+ srv_max_io_capacity);
+
+ srv_io_capacity = srv_max_io_capacity;
+ }
+
+ if (!is_filename_allowed(srv_buf_dump_filename,
+ strlen(srv_buf_dump_filename), FALSE)) {
+ sql_print_error("InnoDB: innodb_buffer_pool_filename"
+ " cannot have colon (:) in the file name.");
+ goto mem_free_and_error;
+ }
+
/* --------------------------------------------------*/
srv_file_flush_method_str = innobase_file_flush_method;
- srv_n_log_groups = (ulint) innobase_mirrored_log_groups;
- srv_n_log_files = (ulint) innobase_log_files_in_group;
srv_log_file_size = (ib_uint64_t) innobase_log_file_size;
#ifdef UNIV_LOG_ARCHIVE
@@ -2865,6 +3074,18 @@ innobase_change_buffering_inited_ok:
srv_log_buffer_size = (ulint) innobase_log_buffer_size;
+ if (innobase_buffer_pool_instances == 0) {
+ innobase_buffer_pool_instances = 8;
+
+#if defined(__WIN__) && !defined(_WIN64)
+ if (innobase_buffer_pool_size > 1331 * 1024 * 1024) {
+ innobase_buffer_pool_instances
+ = ut_min(MAX_BUFFER_POOLS,
+ (long) (innobase_buffer_pool_size
+ / (128 * 1024 * 1024)));
+ }
+#endif /* defined(__WIN__) && !defined(_WIN64) */
+ }
srv_buf_pool_size = (ulint) innobase_buffer_pool_size;
srv_buf_pool_instances = (ulint) innobase_buffer_pool_instances;
@@ -2897,9 +3118,10 @@ innobase_change_buffering_inited_ok:
srv_n_read_io_threads = (ulint) innobase_read_io_threads;
srv_n_write_io_threads = (ulint) innobase_write_io_threads;
- srv_force_recovery = (ulint) innobase_force_recovery;
-
srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite;
+
+ page_compression_level = (ulint) innobase_compression_level;
+
if (!innobase_use_checksums) {
ut_print_timestamp(stderr);
fprintf(stderr,
@@ -2930,6 +3152,12 @@ innobase_change_buffering_inited_ok:
"level instead, see " REFMAN "set-transaction.html.\n");
}
+ if (innobase_open_files < 10) {
+ innobase_open_files = 300;
+ if (srv_file_per_table && table_cache_size > 300) {
+ innobase_open_files = table_cache_size;
+ }
+ }
srv_max_n_open_files = (ulint) innobase_open_files;
srv_innodb_status = (ibool) innobase_create_status_file;
@@ -3029,7 +3257,7 @@ innobase_change_buffering_inited_ok:
/* Since we in this module access directly the fields of a trx
struct, and due to different headers and flags it might happen that
- mutex_t has a different size in this module and in InnoDB
+ ib_mutex_t has a different size in this module and in InnoDB
modules, we check at run time that the size is the same in
these compilation modules. */
@@ -3144,28 +3372,13 @@ innobase_flush_logs(
DBUG_ENTER("innobase_flush_logs");
DBUG_ASSERT(hton == innodb_hton_ptr);
- log_buffer_flush_to_disk();
+ if (!srv_read_only_mode) {
+ log_buffer_flush_to_disk();
+ }
DBUG_RETURN(result);
}
-/****************************************************************//**
-Return alter table flags supported in an InnoDB database. */
-static
-uint
-innobase_alter_table_flags(
-/*=======================*/
- uint flags)
-{
- return(HA_INPLACE_ADD_INDEX_NO_READ_WRITE
- | HA_INPLACE_ADD_INDEX_NO_WRITE
- | HA_INPLACE_DROP_INDEX_NO_READ_WRITE
- | HA_INPLACE_ADD_UNIQUE_INDEX_NO_READ_WRITE
- | HA_INPLACE_ADD_UNIQUE_INDEX_NO_WRITE
- | HA_INPLACE_DROP_UNIQUE_INDEX_NO_READ_WRITE
- | HA_INPLACE_ADD_PK_INDEX_NO_READ_WRITE);
-}
-
/*****************************************************************//**
Commits a transaction in an InnoDB database. */
static
@@ -3380,9 +3593,6 @@ innobase_commit(
innobase_commit_ordered_2(trx, thd);
}
- /* We were instructed to commit the whole transaction, or
- this is an SQL statement end and autocommit is on */
-
/* We did the first part already in innobase_commit_ordered(),
Now finish by doing a write + flush of logs. */
trx_commit_complete_for_mysql(trx);
@@ -3432,7 +3642,7 @@ innobase_rollback(
transaction FALSE - rollback the current
statement only */
{
- int error = 0;
+ dberr_t error;
trx_t* trx;
DBUG_ENTER("innobase_rollback");
@@ -3481,7 +3691,7 @@ innobase_rollback_trx(
/*==================*/
trx_t* trx) /*!< in: transaction */
{
- int error = 0;
+ dberr_t error = DB_SUCCESS;
DBUG_ENTER("innobase_rollback_trx");
DBUG_PRINT("trans", ("aborting transaction"));
@@ -3580,6 +3790,7 @@ innobase_checkpoint_request(
Log code calls this whenever log has been written and/or flushed up
to a new position. We use this to notify upper layer of a new commit
checkpoint when necessary.*/
+extern "C" UNIV_INTERN
void
innobase_mysql_log_notify(
/*===============*/
@@ -3662,7 +3873,7 @@ innobase_rollback_to_savepoint(
void* savepoint) /*!< in: savepoint data */
{
ib_int64_t mysql_binlog_cache_pos;
- int error = 0;
+ dberr_t error;
trx_t* trx;
char name[64];
@@ -3683,7 +3894,7 @@ innobase_rollback_to_savepoint(
longlong2str((ulint) savepoint, name, 36);
- error = (int) trx_rollback_to_savepoint_for_mysql(
+ error = trx_rollback_to_savepoint_for_mysql(
trx, name, &mysql_binlog_cache_pos);
if (error == DB_SUCCESS && trx->fts_trx != NULL) {
@@ -3707,7 +3918,7 @@ innobase_release_savepoint(
savepoint should be released */
void* savepoint) /*!< in: savepoint data */
{
- int error = 0;
+ dberr_t error;
trx_t* trx;
char name[64];
@@ -3720,7 +3931,7 @@ innobase_release_savepoint(
longlong2str((ulint) savepoint, name, 36);
- error = (int) trx_release_savepoint_for_mysql(trx, name);
+ error = trx_release_savepoint_for_mysql(trx, name);
if (error == DB_SUCCESS && trx->fts_trx != NULL) {
fts_savepoint_release(trx, name);
@@ -3740,7 +3951,7 @@ innobase_savepoint(
THD* thd, /*!< in: handle to the MySQL thread */
void* savepoint) /*!< in: savepoint data */
{
- int error = 0;
+ dberr_t error;
trx_t* trx;
DBUG_ENTER("innobase_savepoint");
@@ -3767,7 +3978,7 @@ innobase_savepoint(
char name[64];
longlong2str((ulint) savepoint,name,36);
- error = (int) trx_savepoint_for_mysql(trx, name, (ib_int64_t)0);
+ error = trx_savepoint_for_mysql(trx, name, (ib_int64_t)0);
if (error == DB_SUCCESS && trx->fts_trx != NULL) {
fts_savepoint_take(trx, name);
@@ -3818,6 +4029,27 @@ innobase_close_connection(
}
/*****************************************************************//**
+Frees a possible InnoDB trx object associated with the current THD.
+@return 0 or error number */
+UNIV_INTERN
+int
+innobase_close_thd(
+/*===============*/
+ THD* thd) /*!< in: handle to the MySQL thread of the user
+ whose resources should be free'd */
+{
+ trx_t* trx = thd_to_trx(thd);
+
+ if (!trx) {
+ return(0);
+ }
+
+ return(innobase_close_connection(innodb_hton_ptr, thd));
+}
+
+UNIV_INTERN void lock_cancel_waiting_and_release(lock_t* lock);
+
+/*****************************************************************//**
Cancel any pending lock request associated with the current THD. */
static
void
@@ -3832,10 +4064,17 @@ innobase_kill_query(
DBUG_ASSERT(hton == innodb_hton_ptr);
trx = thd_to_trx(thd);
- /* Cancel a pending lock request. */
- if (trx) {
- lock_trx_handle_wait(trx);
- }
+
+ if (trx)
+ {
+ /* Cancel a pending lock request. */
+ lock_mutex_enter();
+ trx_mutex_enter(trx);
+ if (trx->lock.wait_lock)
+ lock_cancel_waiting_and_release(trx->lock.wait_lock);
+ trx_mutex_exit(trx);
+ lock_mutex_exit();
+ }
DBUG_VOID_RETURN;
}
@@ -3944,9 +4183,9 @@ ha_innobase::index_flags(
uint,
bool) const
{
- ulong extra_flag= 0;
- if (key == table_share->primary_key)
- extra_flag= HA_CLUSTERED_INDEX;
+ ulong extra_flag= 0;
+ if (table && key == table->s->primary_key)
+ extra_flag= HA_CLUSTERED_INDEX;
return((table_share->key_info[key].algorithm == HA_KEY_ALG_FULLTEXT)
? 0
: (HA_READ_NEXT | HA_READ_PREV | HA_READ_ORDER
@@ -4028,19 +4267,10 @@ ha_innobase::primary_key_is_clustered()
return(true);
}
-/** Always normalize table name to lower case on Windows */
-#ifdef __WIN__
-#define normalize_table_name(norm_name, name) \
- normalize_table_name_low(norm_name, name, TRUE)
-#else
-#define normalize_table_name(norm_name, name) \
- normalize_table_name_low(norm_name, name, FALSE)
-#endif /* __WIN__ */
-
/*****************************************************************//**
Normalizes a table name string. A normalized name consists of the
-database name catenated to '/' and table name. An example:
-test/mytable. On Windows normalization puts both the database name and the
+database name catenated to '/' and table name. Example: test/mytable.
+On Windows normalization puts both the database name and the
table name always to lower case if "set_lower_case" is set to TRUE. */
static
void
@@ -4053,9 +4283,11 @@ normalize_table_name_low(
to lower case */
{
char* name_ptr;
+ ulint name_len;
char* db_ptr;
ulint db_len;
char* ptr;
+ ulint norm_len;
/* Scan name from the end */
@@ -4067,6 +4299,7 @@ normalize_table_name_low(
}
name_ptr = ptr + 1;
+ name_len = strlen(name_ptr);
/* skip any number of path separators */
while (ptr >= name && (*ptr == '\\' || *ptr == '/')) {
@@ -4085,11 +4318,15 @@ normalize_table_name_low(
db_ptr = ptr + 1;
+ norm_len = db_len + name_len + sizeof "/";
+ ut_a(norm_len < FN_REFLEN - 1);
+
memcpy(norm_name, db_ptr, db_len);
norm_name[db_len] = '/';
- memcpy(norm_name + db_len + 1, name_ptr, strlen(name_ptr) + 1);
+ /* Copy the name and null-byte. */
+ memcpy(norm_name + db_len + 1, name_ptr, name_len + 1);
if (set_lower_case) {
innobase_casedn_str(norm_name);
@@ -4104,7 +4341,7 @@ void
test_normalize_table_name_low()
/*===========================*/
{
- char norm_name[128];
+ char norm_name[FN_REFLEN];
const char* test_data[][2] = {
/* input, expected result */
{"./mysqltest/t1", "mysqltest/t1"},
@@ -4160,12 +4397,84 @@ test_normalize_table_name_low()
}
}
}
+
+/*********************************************************************
+Test ut_format_name(). */
+static
+void
+test_ut_format_name()
+/*=================*/
+{
+ char buf[NAME_LEN * 3];
+
+ struct {
+ const char* name;
+ ibool is_table;
+ ulint buf_size;
+ const char* expected;
+ } test_data[] = {
+ {"test/t1", TRUE, sizeof(buf), "\"test\".\"t1\""},
+ {"test/t1", TRUE, 12, "\"test\".\"t1\""},
+ {"test/t1", TRUE, 11, "\"test\".\"t1"},
+ {"test/t1", TRUE, 10, "\"test\".\"t"},
+ {"test/t1", TRUE, 9, "\"test\".\""},
+ {"test/t1", TRUE, 8, "\"test\"."},
+ {"test/t1", TRUE, 7, "\"test\""},
+ {"test/t1", TRUE, 6, "\"test"},
+ {"test/t1", TRUE, 5, "\"tes"},
+ {"test/t1", TRUE, 4, "\"te"},
+ {"test/t1", TRUE, 3, "\"t"},
+ {"test/t1", TRUE, 2, "\""},
+ {"test/t1", TRUE, 1, ""},
+ {"test/t1", TRUE, 0, "BUF_NOT_CHANGED"},
+ {"table", TRUE, sizeof(buf), "\"table\""},
+ {"ta'le", TRUE, sizeof(buf), "\"ta'le\""},
+ {"ta\"le", TRUE, sizeof(buf), "\"ta\"\"le\""},
+ {"ta`le", TRUE, sizeof(buf), "\"ta`le\""},
+ {"index", FALSE, sizeof(buf), "\"index\""},
+ {"ind/ex", FALSE, sizeof(buf), "\"ind/ex\""},
+ };
+
+ for (size_t i = 0; i < UT_ARR_SIZE(test_data); i++) {
+
+ memcpy(buf, "BUF_NOT_CHANGED", strlen("BUF_NOT_CHANGED") + 1);
+
+ char* ret;
+
+ ret = ut_format_name(test_data[i].name,
+ test_data[i].is_table,
+ buf,
+ test_data[i].buf_size);
+
+ ut_a(ret == buf);
+
+ if (strcmp(buf, test_data[i].expected) == 0) {
+ fprintf(stderr,
+ "ut_format_name(%s, %s, buf, %lu), "
+ "expected %s, OK\n",
+ test_data[i].name,
+ test_data[i].is_table ? "TRUE" : "FALSE",
+ test_data[i].buf_size,
+ test_data[i].expected);
+ } else {
+ fprintf(stderr,
+ "ut_format_name(%s, %s, buf, %lu), "
+ "expected %s, ERROR: got %s\n",
+ test_data[i].name,
+ test_data[i].is_table ? "TRUE" : "FALSE",
+ test_data[i].buf_size,
+ test_data[i].expected,
+ buf);
+ ut_error;
+ }
+ }
+}
#endif /* !DBUG_OFF */
/********************************************************************//**
Get the upper limit of the MySQL integral and floating-point type.
@return maximum allowed value for the field */
-static
+UNIV_INTERN
ulonglong
innobase_get_int_col_max_value(
/*===========================*/
@@ -4245,12 +4554,13 @@ innobase_match_index_columns(
DBUG_ENTER("innobase_match_index_columns");
/* Check whether user defined index column count matches */
- if (key_info->key_parts != index_info->n_user_defined_cols) {
+ if (key_info->user_defined_key_parts !=
+ index_info->n_user_defined_cols) {
DBUG_RETURN(FALSE);
}
key_part = key_info->key_part;
- key_end = key_part + key_info->key_parts;
+ key_end = key_part + key_info->user_defined_key_parts;
innodb_idx_fld = index_info->fields;
innodb_idx_fld_end = index_info->fields + index_info->n_fields;
@@ -4509,6 +4819,7 @@ ha_innobase::innobase_initialize_autoinc()
auto_inc = innobase_next_autoinc(
read_auto_inc, 1, 1, 0, col_max_value);
+
break;
}
case DB_RECORD_NOT_FOUND:
@@ -4558,12 +4869,12 @@ ha_innobase::open(
uint test_if_locked) /*!< in: not used */
{
dict_table_t* ib_table;
- char norm_name[1000];
+ char norm_name[FN_REFLEN];
THD* thd;
ulint retries = 0;
char* is_part = NULL;
ibool par_case_name_set = FALSE;
- char par_case_name[MAX_FULL_NAME_LEN + 1];
+ char par_case_name[FN_REFLEN];
DBUG_ENTER("ha_innobase::open");
@@ -4605,7 +4916,31 @@ ha_innobase::open(
retry:
/* Get pointer to a table object in InnoDB dictionary cache */
- ib_table = dict_table_open_on_name(norm_name, FALSE);
+ ib_table = dict_table_open_on_name(norm_name, FALSE, TRUE,
+ DICT_ERR_IGNORE_NONE);
+
+ if (ib_table
+ && ((!DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
+ && table->s->fields != dict_table_get_n_user_cols(ib_table))
+ || (DICT_TF2_FLAG_IS_SET(ib_table, DICT_TF2_FTS_HAS_DOC_ID)
+ && (table->s->fields
+ != dict_table_get_n_user_cols(ib_table) - 1)))) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "table %s contains %lu user defined columns "
+ "in InnoDB, but %lu columns in MySQL. Please "
+ "check INFORMATION_SCHEMA.INNODB_SYS_COLUMNS and "
+ REFMAN "innodb-troubleshooting.html "
+ "for how to resolve it",
+ norm_name, (ulong) dict_table_get_n_user_cols(ib_table),
+ (ulong) table->s->fields);
+
+ /* Mark this table as corrupted, so the drop table
+ or force recovery can still use it, but not others. */
+ ib_table->corrupted = true;
+ dict_table_close(ib_table, FALSE, FALSE);
+ ib_table = NULL;
+ is_part = NULL;
+ }
if (NULL == ib_table) {
if (is_part && retries < 10) {
@@ -4619,13 +4954,13 @@ retry:
1) If boot against an installation from Windows
platform, then its partition table name could
- be all be in lower case in system tables. So we
- will need to check lower case name when load table.
+ be in lower case in system tables. So we will
+ need to check lower case name when load table.
- 2) If we boot an installation from other case
+ 2) If we boot an installation from other case
sensitive platform in Windows, we might need to
- check the existence of table name without lowering
- case them in the system table. */
+ check the existence of table name without lower
+ case in the system table. */
if (innobase_get_lower_case_table_names() == 1) {
if (!par_case_name_set) {
@@ -4633,9 +4968,7 @@ retry:
/* Check for the table using lower
case name, including the partition
separator "P" */
- memcpy(par_case_name, norm_name,
- strlen(norm_name));
- par_case_name[strlen(norm_name)] = 0;
+ strcpy(par_case_name, norm_name);
innobase_casedn_str(par_case_name);
#else
/* On Windows platfrom, check
@@ -4649,7 +4982,8 @@ retry:
}
ib_table = dict_table_open_on_name(
- par_case_name, FALSE);
+ par_case_name, FALSE, TRUE,
+ DICT_ERR_IGNORE_NONE);
}
if (!ib_table) {
@@ -4687,21 +5021,13 @@ retry:
retries);
}
- sql_print_error("Cannot find or open table %s from\n"
- "the internal data dictionary of InnoDB "
- "though the .frm file for the\n"
- "table exists. Maybe you have deleted and "
- "recreated InnoDB data\n"
- "files but have forgotten to delete the "
- "corresponding .frm files\n"
- "of InnoDB tables, or you have moved .frm "
- "files to another database?\n"
- "or, the table contains indexes that this "
- "version of the engine\n"
- "doesn't support.\n"
- "See " REFMAN "innodb-troubleshooting.html\n"
- "how you can resolve the problem.\n",
- norm_name);
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Cannot open table %s from the internal data "
+ "dictionary of InnoDB though the .frm file "
+ "for the table exists. See "
+ REFMAN "innodb-troubleshooting.html for how "
+ "you can resolve the problem.", norm_name);
+
free_share(share);
my_errno = ENOENT;
@@ -4710,21 +5036,47 @@ retry:
table_opened:
+ innobase_copy_frm_flags_from_table_share(ib_table, table->s);
+
+ dict_stats_init(ib_table);
+
MONITOR_INC(MONITOR_TABLE_OPEN);
- if (ib_table->ibd_file_missing && !thd_tablespace_op(thd)) {
- sql_print_error("MySQL is trying to open a table handle but "
- "the .ibd file for\ntable %s does not exist.\n"
- "Have you deleted the .ibd file from the "
- "database directory under\nthe MySQL datadir, "
- "or have you used DISCARD TABLESPACE?\n"
- "See " REFMAN "innodb-troubleshooting.html\n"
- "how you can resolve the problem.\n",
- norm_name);
+ bool no_tablespace;
+
+ if (dict_table_is_discarded(ib_table)) {
+
+ ib_senderrf(thd,
+ IB_LOG_LEVEL_WARN, ER_TABLESPACE_DISCARDED,
+ table->s->table_name.str);
+
+ /* Allow an open because a proper DISCARD should have set
+ all the flags and index root page numbers to FIL_NULL that
+ should prevent any DML from running but it should allow DDL
+ operations. */
+
+ no_tablespace = false;
+
+ } else if (ib_table->ibd_file_missing) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN,
+ ER_TABLESPACE_MISSING, norm_name);
+
+ /* This means we have no idea what happened to the tablespace
+ file, best to play it safe. */
+
+ no_tablespace = true;
+ } else {
+ no_tablespace = false;
+ }
+
+ if (!thd_tablespace_op(thd) && no_tablespace) {
free_share(share);
my_errno = ENOENT;
- dict_table_close(ib_table, FALSE);
+ dict_table_close(ib_table, FALSE, FALSE);
+
DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
}
@@ -4872,7 +5224,9 @@ table_opened:
}
/* Only if the table has an AUTOINC column. */
- if (prebuilt->table != NULL && table->found_next_number_field != NULL) {
+ if (prebuilt->table != NULL
+ && !prebuilt->table->ibd_file_missing
+ && table->found_next_number_field != NULL) {
dict_table_autoinc_lock(prebuilt->table);
/* Since a table can already be "open" in InnoDB's internal
@@ -4893,6 +5247,31 @@ table_opened:
}
UNIV_INTERN
+handler*
+ha_innobase::clone(
+/*===============*/
+ const char* name, /*!< in: table name */
+ MEM_ROOT* mem_root) /*!< in: memory context */
+{
+ ha_innobase* new_handler;
+
+ DBUG_ENTER("ha_innobase::clone");
+
+ new_handler = static_cast<ha_innobase*>(handler::clone(name,
+ mem_root));
+ if (new_handler) {
+ DBUG_ASSERT(new_handler->prebuilt != NULL);
+ DBUG_ASSERT(new_handler->user_thd == user_thd);
+ DBUG_ASSERT(new_handler->prebuilt->trx == prebuilt->trx);
+
+ new_handler->prebuilt->select_lock_type
+ = prebuilt->select_lock_type;
+ }
+
+ DBUG_RETURN(new_handler);
+}
+
+UNIV_INTERN
uint
ha_innobase::max_supported_key_part_length() const
/*==============================================*/
@@ -4957,36 +5336,6 @@ get_field_offset(
return((uint) (field->ptr - table->record[0]));
}
-/**************************************************************//**
-Checks if a field in a record is SQL NULL. Uses the record format
-information in table to track the null bit in record.
-@return 1 if NULL, 0 otherwise */
-static inline
-uint
-field_in_record_is_null(
-/*====================*/
- TABLE* table, /*!< in: MySQL table object */
- Field* field, /*!< in: MySQL field object */
- char* record) /*!< in: a row in MySQL format */
-{
- int null_offset;
-
- if (!field->null_ptr) {
-
- return(0);
- }
-
- null_offset = (uint) ((char*) field->null_ptr
- - (char*) table->record[0]);
-
- if (record[null_offset] & field->null_bit) {
-
- return(1);
- }
-
- return(0);
-}
-
/*************************************************************//**
InnoDB uses this function to compare two data fields for which the data type
is such that we must use MySQL code to compare them. NOTE that the prototype
@@ -5446,6 +5795,7 @@ get_innobase_type_from_mysql_type(
case HA_KEYTYPE_END:
ut_error;
}
+
return(0);
}
@@ -5475,7 +5825,7 @@ innobase_read_from_2_little_endian(
/*===============================*/
const uchar* buf) /*!< in: from where to read */
{
- return (uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1])));
+ return((uint) ((ulint)(buf[0]) + 256 * ((ulint)(buf[1]))));
}
/*******************************************************************//**
@@ -5493,7 +5843,8 @@ ha_innobase::store_key_val_for_row(
{
KEY* key_info = table->key_info + keynr;
KEY_PART_INFO* key_part = key_info->key_part;
- KEY_PART_INFO* end = key_part + key_info->key_parts;
+ KEY_PART_INFO* end =
+ key_part + key_info->user_defined_key_parts;
char* buff_start = buff;
enum_field_types mysql_type;
Field* field;
@@ -5869,10 +6220,9 @@ build_template_field(
templ->rec_field_no = dict_index_get_nth_col_pos(index, i);
}
- if (field->null_ptr) {
+ if (field->real_maybe_null()) {
templ->mysql_null_byte_offset =
- (ulint) ((char*) field->null_ptr
- - (char*) table->record[0]);
+ field->null_offset();
templ->mysql_null_bit_mask = (ulint) field->null_bit;
} else {
@@ -5974,6 +6324,10 @@ ha_innobase::build_template(
prebuilt->need_to_access_clustered = (index == clust_index);
+ /* Either prebuilt->index should be a secondary index, or it
+ should be the clustered index. */
+ ut_ad(dict_index_is_clust(index) == (index == clust_index));
+
/* Below we check column by column if we need to access
the clustered index. */
@@ -6190,11 +6544,13 @@ min value of the autoinc interval. Once that is fixed we can get rid of
the special lock handling.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
ha_innobase::innobase_lock_autoinc(void)
/*====================================*/
{
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
+
+ ut_ad(!srv_read_only_mode);
switch (innobase_autoinc_lock_mode) {
case AUTOINC_NO_LOCKING:
@@ -6239,19 +6595,19 @@ ha_innobase::innobase_lock_autoinc(void)
ut_error;
}
- return(ulong(error));
+ return(error);
}
/********************************************************************//**
Reset the autoinc value in the table.
@return DB_SUCCESS if all went well else error code */
UNIV_INTERN
-ulint
+dberr_t
ha_innobase::innobase_reset_autoinc(
/*================================*/
ulonglong autoinc) /*!< in: value to store */
{
- ulint error;
+ dberr_t error;
error = innobase_lock_autoinc();
@@ -6262,7 +6618,7 @@ ha_innobase::innobase_reset_autoinc(
dict_table_autoinc_unlock(prebuilt->table);
}
- return(ulong(error));
+ return(error);
}
/********************************************************************//**
@@ -6270,12 +6626,12 @@ Store the autoinc value in the table. The autoinc value is only set if
it's greater than the existing autoinc value in the table.
@return DB_SUCCESS if all went well else error code */
UNIV_INTERN
-ulint
+dberr_t
ha_innobase::innobase_set_max_autoinc(
/*==================================*/
ulonglong auto_inc) /*!< in: value to store */
{
- ulint error;
+ dberr_t error;
error = innobase_lock_autoinc();
@@ -6286,7 +6642,7 @@ ha_innobase::innobase_set_max_autoinc(
dict_table_autoinc_unlock(prebuilt->table);
}
- return(ulong(error));
+ return(error);
}
/********************************************************************//**
@@ -6299,7 +6655,7 @@ ha_innobase::write_row(
/*===================*/
uchar* record) /*!< in: a row in MySQL format */
{
- ulint error = 0;
+ dberr_t error;
int error_result= 0;
ibool auto_inc_used= FALSE;
ulint sql_command;
@@ -6307,7 +6663,10 @@ ha_innobase::write_row(
DBUG_ENTER("ha_innobase::write_row");
- if (prebuilt->trx != trx) {
+ if (srv_read_only_mode) {
+ ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ } else if (prebuilt->trx != trx) {
sql_print_error("The transaction object for the table handle "
"is at %p, but for the current thread it is at "
"%p",
@@ -6325,6 +6684,8 @@ ha_innobase::write_row(
++trx->will_lock;
}
+ ha_statistic_increment(&SSV::ha_write_count);
+
sql_command = thd_sql_command(user_thd);
if ((sql_command == SQLCOM_ALTER_TABLE
@@ -6404,7 +6765,7 @@ no_commit:
innobase_get_auto_increment(). */
prebuilt->autoinc_error = DB_SUCCESS;
- if ((error = update_auto_increment())) {
+ if ((error_result = update_auto_increment())) {
/* We don't want to mask autoinc overflow errors. */
/* Handle the case where the AUTOINC sub-system
@@ -6415,15 +6776,11 @@ no_commit:
my_error(ER_AUTOINC_READ_FAILED, MYF(0));
goto func_exit;
} else if (prebuilt->autoinc_error != DB_SUCCESS) {
- error = (int) prebuilt->autoinc_error;
+ error = prebuilt->autoinc_error;
goto report_error;
}
- /* MySQL errors are passed straight back. except for
- HA_ERR_AUTO_INC_READ_FAILED. This can only happen
- for values out of range.
- */
- error_result = (int) error;
+ /* MySQL errors are passed straight back. */
goto func_exit;
}
@@ -6442,10 +6799,10 @@ no_commit:
innobase_srv_conc_enter_innodb(prebuilt->trx);
error = row_insert_for_mysql((byte*) record, prebuilt);
+ DEBUG_SYNC(user_thd, "ib_after_row_insert");
/* Handle duplicate key errors */
if (auto_inc_used) {
- ulint err;
ulonglong auto_inc;
ulonglong col_max_value;
@@ -6507,6 +6864,7 @@ set_max_autoinc:
ulonglong offset;
ulonglong increment;
+ dberr_t err;
offset = prebuilt->autoinc_offset;
increment = prebuilt->autoinc_increment;
@@ -6525,13 +6883,22 @@ set_max_autoinc:
}
}
break;
+ default:
+ break;
}
}
innobase_srv_conc_exit_innodb(prebuilt->trx);
report_error:
- error_result = convert_error_code_to_mysql((int) error,
+ if (error == DB_TABLESPACE_DELETED) {
+ ib_senderrf(
+ trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_DISCARDED,
+ table->s->table_name.str);
+ }
+
+ error_result = convert_error_code_to_mysql(error,
prebuilt->table->flags,
user_thd);
@@ -6548,9 +6915,9 @@ func_exit:
/**********************************************************************//**
Checks which fields have changed in a row and stores information
of them to an update vector.
-@return error number or 0 */
+@return DB_SUCCESS or error code */
static
-int
+dberr_t
calc_row_difference(
/*================*/
upd_t* uvect, /*!< in/out: update vector */
@@ -6580,12 +6947,13 @@ calc_row_difference(
dfield_t dfield;
dict_index_t* clust_index;
uint i;
- ulint error = DB_SUCCESS;
ibool changes_fts_column = FALSE;
ibool changes_fts_doc_col = FALSE;
trx_t* trx = thd_to_trx(thd);
doc_id_t doc_id = FTS_NULL_DOC_ID;
+ ut_ad(!srv_read_only_mode);
+
n_fields = table->s->fields;
clust_index = dict_table_get_first_index(prebuilt->table);
@@ -6657,14 +7025,12 @@ calc_row_difference(
}
- if (field->null_ptr) {
- if (field_in_record_is_null(table, field,
- (char*) old_row)) {
+ if (field->real_maybe_null()) {
+ if (field->is_null_in_record(old_row)) {
o_len = UNIV_SQL_NULL;
}
- if (field_in_record_is_null(table, field,
- (char*) new_row)) {
+ if (field->is_null_in_record(new_row)) {
n_len = UNIV_SQL_NULL;
}
}
@@ -6801,13 +7167,7 @@ calc_row_difference(
fts_update_doc_id(
innodb_table, ufield, &trx->fts_next_doc_id);
- if (error == DB_SUCCESS) {
- ++n_changed;
- } else {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error (%lu) while updating "
- "doc id in calc_row_difference().\n", error);
- }
+ ++n_changed;
} else {
/* We have a Doc ID column, but none of FTS indexed
columns are touched, nor the Doc ID column, so set
@@ -6821,7 +7181,7 @@ calc_row_difference(
ut_a(buf <= (byte*) original_upd_buff + buff_len);
- return(error);
+ return(DB_SUCCESS);
}
/**********************************************************************//**
@@ -6840,14 +7200,17 @@ ha_innobase::update_row(
uchar* new_row) /*!< in: new row in MySQL format */
{
upd_t* uvect;
- int error = 0;
+ dberr_t error;
trx_t* trx = thd_to_trx(user_thd);
DBUG_ENTER("ha_innobase::update_row");
ut_a(prebuilt->trx == trx);
- if (!trx_is_started(trx)) {
+ if (srv_read_only_mode) {
+ ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ } else if (!trx_is_started(trx)) {
++trx->will_lock;
}
@@ -6868,6 +7231,8 @@ ha_innobase::update_row(
}
}
+ ha_statistic_increment(&SSV::ha_update_count);
+
if (prebuilt->upd_node) {
uvect = prebuilt->upd_node->update;
} else {
@@ -6935,18 +7300,18 @@ ha_innobase::update_row(
innobase_srv_conc_exit_innodb(trx);
func_exit:
- error = convert_error_code_to_mysql(error,
+ int err = convert_error_code_to_mysql(error,
prebuilt->table->flags, user_thd);
/* If success and no columns were updated. */
- if (error == 0 && uvect->n_fields == 0) {
+ if (err == 0 && uvect->n_fields == 0) {
/* This is the same as success, but instructs
MySQL that the row is not really updated and it
should not increase the count of updated rows.
This is fix for http://bugs.mysql.com/29157 */
- error = HA_ERR_RECORD_IS_THE_SAME;
- } else if (error == HA_FTS_INVALID_DOCID) {
+ err = HA_ERR_RECORD_IS_THE_SAME;
+ } else if (err == HA_FTS_INVALID_DOCID) {
my_error(HA_FTS_INVALID_DOCID, MYF(0));
}
@@ -6955,7 +7320,7 @@ func_exit:
innobase_active_small();
- DBUG_RETURN(error);
+ DBUG_RETURN(err);
}
/**********************************************************************//**
@@ -6967,17 +7332,22 @@ ha_innobase::delete_row(
/*====================*/
const uchar* record) /*!< in: a row in MySQL format */
{
- int error = 0;
+ dberr_t error;
trx_t* trx = thd_to_trx(user_thd);
DBUG_ENTER("ha_innobase::delete_row");
ut_a(prebuilt->trx == trx);
- if (!trx_is_started(trx)) {
+ if (srv_read_only_mode) {
+ ib_senderrf(ha_thd(), IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ } else if (!trx_is_started(trx)) {
++trx->will_lock;
}
+ ha_statistic_increment(&SSV::ha_delete_count);
+
if (!prebuilt->upd_node) {
row_get_prebuilt_update_vector(prebuilt);
}
@@ -6992,15 +7362,13 @@ ha_innobase::delete_row(
innobase_srv_conc_exit_innodb(trx);
- error = convert_error_code_to_mysql(
- error, prebuilt->table->flags, user_thd);
-
/* Tell the InnoDB server that there might be work for
utility threads: */
innobase_active_small();
- DBUG_RETURN(error);
+ DBUG_RETURN(convert_error_code_to_mysql(
+ error, prebuilt->table->flags, user_thd));
}
/**********************************************************************//**
@@ -7233,21 +7601,19 @@ ha_innobase::index_read(
dict_index_t* index;
ulint match_mode = 0;
int error;
- ulint ret;
+ dberr_t ret;
DBUG_ENTER("index_read");
DEBUG_SYNC_C("ha_innobase_index_read_begin");
ut_a(prebuilt->trx == thd_to_trx(user_thd));
+ ut_ad(key_len != 0 || find_flag != HA_READ_KEY_EXACT);
+
+ ha_statistic_increment(&SSV::ha_read_key_count);
index = prebuilt->index;
if (UNIV_UNLIKELY(index == NULL) || dict_index_is_corrupted(index)) {
- DBUG_PRINT("error", ("index: %p index_corrupt: %d data_corrupt: %d",
- index,
- index ? test(index->type & DICT_CORRUPT) : 0,
- (index && index->table ?
- test(index->table->corrupted) : 0)));
prebuilt->index_usable = FALSE;
DBUG_RETURN(HA_ERR_CRASHED);
}
@@ -7320,6 +7686,7 @@ ha_innobase::index_read(
case DB_SUCCESS:
error = 0;
table->status = 0;
+ srv_stats.n_rows_read.add((size_t) prebuilt->trx->id, 1);
break;
case DB_RECORD_NOT_FOUND:
error = HA_ERR_KEY_NOT_FOUND;
@@ -7329,10 +7696,30 @@ ha_innobase::index_read(
error = HA_ERR_KEY_NOT_FOUND;
table->status = STATUS_NOT_FOUND;
break;
+ case DB_TABLESPACE_DELETED:
+
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_DISCARDED,
+ table->s->table_name.str);
+
+ table->status = STATUS_NOT_FOUND;
+ error = HA_ERR_NO_SUCH_TABLE;
+ break;
+ case DB_TABLESPACE_NOT_FOUND:
+
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_MISSING, MYF(0),
+ table->s->table_name.str);
+
+ table->status = STATUS_NOT_FOUND;
+ error = HA_ERR_NO_SUCH_TABLE;
+ break;
default:
- error = convert_error_code_to_mysql((int) ret,
- prebuilt->table->flags,
- user_thd);
+ error = convert_error_code_to_mysql(
+ ret, prebuilt->table->flags, user_thd);
+
table->status = STATUS_NOT_FOUND;
break;
}
@@ -7534,8 +7921,8 @@ ha_innobase::general_fetch(
uint match_mode) /*!< in: 0, ROW_SEL_EXACT, or
ROW_SEL_EXACT_PREFIX */
{
- ulint ret;
- int error = 0;
+ dberr_t ret;
+ int error;
DBUG_ENTER("general_fetch");
@@ -7552,6 +7939,7 @@ ha_innobase::general_fetch(
case DB_SUCCESS:
error = 0;
table->status = 0;
+ srv_stats.n_rows_read.add((size_t) prebuilt->trx->id, 1);
break;
case DB_RECORD_NOT_FOUND:
error = HA_ERR_END_OF_FILE;
@@ -7561,9 +7949,30 @@ ha_innobase::general_fetch(
error = HA_ERR_END_OF_FILE;
table->status = STATUS_NOT_FOUND;
break;
+ case DB_TABLESPACE_DELETED:
+
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_DISCARDED,
+ table->s->table_name.str);
+
+ table->status = STATUS_NOT_FOUND;
+ error = HA_ERR_NO_SUCH_TABLE;
+ break;
+ case DB_TABLESPACE_NOT_FOUND:
+
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_MISSING,
+ table->s->table_name.str);
+
+ table->status = STATUS_NOT_FOUND;
+ error = HA_ERR_NO_SUCH_TABLE;
+ break;
default:
error = convert_error_code_to_mysql(
- (int) ret, prebuilt->table->flags, user_thd);
+ ret, prebuilt->table->flags, user_thd);
+
table->status = STATUS_NOT_FOUND;
break;
}
@@ -7582,6 +7991,8 @@ ha_innobase::index_next(
uchar* buf) /*!< in/out: buffer for next row in MySQL
format */
{
+ ha_statistic_increment(&SSV::ha_read_next_count);
+
return(general_fetch(buf, ROW_SEL_NEXT, 0));
}
@@ -7596,6 +8007,8 @@ ha_innobase::index_next_same(
const uchar* key, /*!< in: key value */
uint keylen) /*!< in: key value length */
{
+ ha_statistic_increment(&SSV::ha_read_next_count);
+
return(general_fetch(buf, ROW_SEL_NEXT, last_match_mode));
}
@@ -7609,6 +8022,8 @@ ha_innobase::index_prev(
/*====================*/
uchar* buf) /*!< in/out: buffer for previous row in MySQL format */
{
+ ha_statistic_increment(&SSV::ha_read_prev_count);
+
return(general_fetch(buf, ROW_SEL_PREV, 0));
}
@@ -7625,6 +8040,7 @@ ha_innobase::index_first(
int error;
DBUG_ENTER("index_first");
+ ha_statistic_increment(&SSV::ha_read_first_count);
error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY);
@@ -7650,6 +8066,7 @@ ha_innobase::index_last(
int error;
DBUG_ENTER("index_last");
+ ha_statistic_increment(&SSV::ha_read_last_count);
error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY);
@@ -7719,6 +8136,7 @@ ha_innobase::rnd_next(
int error;
DBUG_ENTER("rnd_next");
+ ha_statistic_increment(&SSV::ha_read_rnd_next_count);
if (start_of_scan) {
error = index_first(buf);
@@ -7752,6 +8170,8 @@ ha_innobase::rnd_pos(
DBUG_ENTER("rnd_pos");
DBUG_DUMP("key", pos, ref_length);
+ ha_statistic_increment(&SSV::ha_read_rnd_count);
+
ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
/* Note that we assume the length of the row reference is fixed
@@ -7776,8 +8196,6 @@ ha_innobase::ft_init()
{
DBUG_ENTER("ft_init");
- fprintf(stderr, "ft_init()\n");
-
trx_t* trx = check_trx_exists(ha_thd());
/* FTS queries are not treated as autocommit non-locking selects.
@@ -7816,15 +8234,15 @@ ha_innobase::ft_init_ext(
ulint buf_tmp_used;
uint num_errors;
- fprintf(stderr, "ft_init_ext()\n");
-
- fprintf(stderr, "keynr=%u, '%.*s'\n",
- keynr, (int) key->length(), (byte*) key->ptr());
+ if (fts_enable_diag_print) {
+ fprintf(stderr, "keynr=%u, '%.*s'\n",
+ keynr, (int) key->length(), (byte*) key->ptr());
- if (flags & FT_BOOL) {
- fprintf(stderr, "BOOL search\n");
- } else {
- fprintf(stderr, "NL search\n");
+ if (flags & FT_BOOL) {
+ fprintf(stderr, "BOOL search\n");
+ } else {
+ fprintf(stderr, "NL search\n");
+ }
}
/* FIXME: utf32 and utf16 are not compatible with some
@@ -7871,7 +8289,7 @@ ha_innobase::ft_init_ext(
if (!index || index->type != DICT_FTS) {
my_error(ER_TABLE_HAS_NO_FT, MYF(0));
- return NULL;
+ return(NULL);
}
if (!(table->fts->fts_status & ADDED_TABLE_SYNCED)) {
@@ -7882,25 +8300,69 @@ ha_innobase::ft_init_ext(
error = fts_query(trx, index, flags, query, query_len, &result);
- prebuilt->result = result;
-
// FIXME: Proper error handling and diagnostic
if (error != DB_SUCCESS) {
fprintf(stderr, "Error processing query\n");
} else {
- /* Must return an instance of a result even if it's empty */
- ut_a(prebuilt->result);
-
/* Allocate FTS handler, and instantiate it before return */
fts_hdl = (NEW_FT_INFO*) my_malloc(sizeof(NEW_FT_INFO),
MYF(0));
fts_hdl->please = (struct _ft_vft*)(&ft_vft_result);
+ fts_hdl->could_you = (struct _ft_vft_ext*)(&ft_vft_ext_result);
fts_hdl->ft_prebuilt = prebuilt;
fts_hdl->ft_result = result;
+
+ /* FIXME: Re-evluate the condition when Bug 14469540
+ is resolved */
+ prebuilt->in_fts_query = true;
}
- return ((FT_INFO*) fts_hdl);
+ return((FT_INFO*) fts_hdl);
+}
+
+/*****************************************************************//**
+Set up search tuple for a query through FTS_DOC_ID_INDEX on
+supplied Doc ID. This is used by MySQL to retrieve the documents
+once the search result (Doc IDs) is available */
+static
+void
+innobase_fts_create_doc_id_key(
+/*===========================*/
+ dtuple_t* tuple, /* in/out: prebuilt->search_tuple */
+ const dict_index_t*
+ index, /* in: index (FTS_DOC_ID_INDEX) */
+ doc_id_t* doc_id) /* in/out: doc id to search, value
+ could be changed to storage format
+ used for search. */
+{
+ doc_id_t temp_doc_id;
+ dfield_t* dfield = dtuple_get_nth_field(tuple, 0);
+
+ ut_a(dict_index_get_n_unique(index) == 1);
+
+ dtuple_set_n_fields(tuple, index->n_fields);
+ dict_index_copy_types(tuple, index, index->n_fields);
+
+#ifdef UNIV_DEBUG
+ /* The unique Doc ID field should be an eight-bytes integer */
+ dict_field_t* field = dict_index_get_nth_field(index, 0);
+ ut_a(field->col->mtype == DATA_INT);
+ ut_ad(sizeof(*doc_id) == field->fixed_len);
+ ut_ad(innobase_strcasecmp(index->name, FTS_DOC_ID_INDEX_NAME) == 0);
+#endif /* UNIV_DEBUG */
+
+ /* Convert to storage byte order */
+ mach_write_to_8(reinterpret_cast<byte*>(&temp_doc_id), *doc_id);
+ *doc_id = temp_doc_id;
+ dfield_set_data(dfield, doc_id, sizeof(*doc_id));
+
+ dtuple_set_n_fields_cmp(tuple, 1);
+
+ for (ulint i = 1; i < index->n_fields; i++) {
+ dfield = dtuple_get_nth_field(tuple, i);
+ dfield_set_null(dfield);
+ }
}
/**********************************************************************//**
@@ -7947,6 +8409,14 @@ next_record:
if (result->current != NULL) {
dict_index_t* index;
dtuple_t* tuple = prebuilt->search_tuple;
+ doc_id_t search_doc_id;
+
+ /* If we only need information from result we can return
+ without fetching the table row */
+ if (ft_prebuilt->read_just_key) {
+ table->status= 0;
+ return(0);
+ }
index = dict_table_get_index_on_name(
prebuilt->table, FTS_DOC_ID_INDEX_NAME);
@@ -7960,48 +8430,74 @@ next_record:
fts_ranking_t* ranking = rbt_value(
fts_ranking_t, result->current);
- /* We pass a pointer to the doc_id because we need to
- convert it to storage byte order. */
- row_create_key(tuple, index, &ranking->doc_id);
+ search_doc_id = ranking->doc_id;
+
+ /* We pass a pointer of search_doc_id because it will be
+ converted to storage byte order used in the search
+ tuple. */
+ innobase_fts_create_doc_id_key(tuple, index, &search_doc_id);
innobase_srv_conc_enter_innodb(prebuilt->trx);
- ulint ret = row_search_for_mysql(
+ dberr_t ret = row_search_for_mysql(
(byte*) buf, PAGE_CUR_GE, prebuilt, ROW_SEL_EXACT, 0);
innobase_srv_conc_exit_innodb(prebuilt->trx);
-
- if (ret == DB_SUCCESS) {
+ switch (ret) {
+ case DB_SUCCESS:
error = 0;
table->status = 0;
-
- } else if (ret == DB_RECORD_NOT_FOUND) {
-
+ break;
+ case DB_RECORD_NOT_FOUND:
result->current = const_cast<ib_rbt_node_t*>(
rbt_next(result->rankings_by_rank,
result->current));
if (!result->current) {
- error = HA_ERR_KEY_NOT_FOUND;
+ /* exhaust the result set, should return
+ HA_ERR_END_OF_FILE just like
+ ha_innobase::general_fetch() and/or
+ ha_innobase::index_first() etc. */
+ error = HA_ERR_END_OF_FILE;
table->status = STATUS_NOT_FOUND;
} else {
goto next_record;
}
+ break;
+ case DB_END_OF_INDEX:
+ error = HA_ERR_END_OF_FILE;
+ table->status = STATUS_NOT_FOUND;
+ break;
+ case DB_TABLESPACE_DELETED:
- } else if (ret == DB_END_OF_INDEX) {
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_DISCARDED,
+ table->s->table_name.str);
- error = HA_ERR_KEY_NOT_FOUND;
table->status = STATUS_NOT_FOUND;
- } else {
+ error = HA_ERR_NO_SUCH_TABLE;
+ break;
+ case DB_TABLESPACE_NOT_FOUND:
+
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_MISSING,
+ table->s->table_name.str);
+ table->status = STATUS_NOT_FOUND;
+ error = HA_ERR_NO_SUCH_TABLE;
+ break;
+ default:
error = convert_error_code_to_mysql(
- (int) ret, 0, user_thd);
+ ret, 0, user_thd);
table->status = STATUS_NOT_FOUND;
+ break;
}
- return (error);
+ return(error);
}
return(HA_ERR_END_OF_FILE);
@@ -8015,11 +8511,6 @@ ha_innobase::ft_end()
{
fprintf(stderr, "ft_end()\n");
- if (prebuilt->result != NULL) {
- fts_query_free_result(prebuilt->result);
- prebuilt->result = NULL;
- }
-
rnd_end();
}
@@ -8073,23 +8564,21 @@ See http://bugs.mysql.com/32710 for expl. why we choose PROCESS. */
/*****************************************************************//**
Check whether there exist a column named as "FTS_DOC_ID", which is
reserved for InnoDB FTS Doc ID
-@return TRUE if there exist a "FTS_DOC_ID" column */
+@return true if there exist a "FTS_DOC_ID" column */
static
-ibool
+bool
create_table_check_doc_id_col(
/*==========================*/
trx_t* trx, /*!< in: InnoDB transaction handle */
- TABLE* form, /*!< in: information on table
+ const TABLE* form, /*!< in: information on table
columns and indexes */
ulint* doc_id_col) /*!< out: Doc ID column number if
- there exist a FTS_DOC_ID column, ULINT_UNDEFINED if column is of the
+ there exist a FTS_DOC_ID column,
+ ULINT_UNDEFINED if column is of the
wrong type/name/size */
{
- ibool find_doc_id = FALSE;
- ulint i;
-
- for (i = 0; i < form->s->fields; i++) {
- Field* field;
+ for (ulint i = 0; i < form->s->fields; i++) {
+ const Field* field;
ulint col_type;
ulint col_len;
ulint unsigned_type;
@@ -8104,21 +8593,19 @@ create_table_check_doc_id_col(
if (innobase_strcasecmp(field->field_name,
FTS_DOC_ID_COL_NAME) == 0) {
- find_doc_id = TRUE;
-
/* Note the name is case sensitive due to
our internal query parser */
if (col_type == DATA_INT
- && !field->null_ptr
+ && !field->real_maybe_null()
&& col_len == sizeof(doc_id_t)
&& (strcmp(field->field_name,
FTS_DOC_ID_COL_NAME) == 0)) {
*doc_id_col = i;
} else {
push_warning_printf(
- (THD*) trx->mysql_thd,
+ trx->mysql_thd,
Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: FTS_DOC_ID column must be "
"of BIGINT NOT NULL type, and named "
"in all capitalized characters");
@@ -8127,38 +8614,39 @@ create_table_check_doc_id_col(
*doc_id_col = ULINT_UNDEFINED;
}
- break;
+ return(true);
}
}
- return(find_doc_id);
+ return(false);
}
/*****************************************************************//**
Creates a table definition to an InnoDB database. */
-static
+static __attribute__((nonnull, warn_unused_result))
int
create_table_def(
/*=============*/
trx_t* trx, /*!< in: InnoDB transaction handle */
- TABLE* form, /*!< in: information on table
+ const TABLE* form, /*!< in: information on table
columns and indexes */
const char* table_name, /*!< in: table name */
- const char* path_of_temp_table,/*!< in: if this is a table explicitly
+ const char* temp_path, /*!< in: if this is a table explicitly
created by the user with the
TEMPORARY keyword, then this
parameter is the dir path where the
table should be placed if we create
an .ibd file for it (no .ibd extension
- in the path, though); otherwise this
- is NULL */
+ in the path, though). Otherwise this
+ is a zero length-string */
+ const char* remote_path, /*!< in: Remote path or zero length-string */
ulint flags, /*!< in: table flags */
ulint flags2) /*!< in: table flags2 */
{
- Field* field;
+ THD* thd = trx->mysql_thd;
dict_table_t* table;
ulint n_cols;
- int error;
+ dberr_t err;
ulint col_type;
ulint col_len;
ulint nulls_allowed;
@@ -8169,17 +8657,18 @@ create_table_def(
ulint i;
ulint doc_id_col = 0;
ibool has_doc_id_col = FALSE;
+ mem_heap_t* heap;
DBUG_ENTER("create_table_def");
DBUG_PRINT("enter", ("table_name: %s", table_name));
- ut_a(trx->mysql_thd != NULL);
+ DBUG_ASSERT(thd != NULL);
/* MySQL does the name length check. But we do additional check
on the name length here */
if (strlen(table_name) > MAX_FULL_NAME_LEN) {
push_warning_printf(
- (THD*) trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN,
+ thd, Sql_condition::WARN_LEVEL_WARN,
ER_TABLE_NAME,
"InnoDB: Table Name or Database Name is too long");
@@ -8191,7 +8680,7 @@ create_table_def(
if (strcmp(strchr(table_name, '/') + 1,
"innodb_table_monitor") == 0) {
push_warning(
- (THD*) trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN,
+ thd, Sql_condition::WARN_LEVEL_WARN,
HA_ERR_WRONG_COMMAND,
DEPRECATED_MSG_INNODB_TABLE_MONITOR);
}
@@ -8205,7 +8694,7 @@ create_table_def(
if (doc_id_col == ULINT_UNDEFINED) {
trx_commit_for_mysql(trx);
- error = DB_ERROR;
+ err = DB_ERROR;
goto error_ret;
} else {
has_doc_id_col = TRUE;
@@ -8233,42 +8722,41 @@ create_table_def(
flags, flags2);
}
- if (path_of_temp_table) {
+ if (flags2 & DICT_TF2_TEMPORARY) {
+ ut_a(strlen(temp_path));
table->dir_path_of_temp_table =
- mem_heap_strdup(table->heap, path_of_temp_table);
+ mem_heap_strdup(table->heap, temp_path);
}
+ if (DICT_TF_HAS_DATA_DIR(flags)) {
+ ut_a(strlen(remote_path));
+ table->data_dir_path = mem_heap_strdup(table->heap, remote_path);
+ } else {
+ table->data_dir_path = NULL;
+ }
+ heap = mem_heap_create(1000);
+
for (i = 0; i < n_cols; i++) {
- field = form->field[i];
+ Field* field = form->field[i];
col_type = get_innobase_type_from_mysql_type(&unsigned_type,
field);
if (!col_type) {
push_warning_printf(
- (THD*) trx->mysql_thd,
- Sql_condition::WARN_LEVEL_WARN,
+ thd, Sql_condition::WARN_LEVEL_WARN,
ER_CANT_CREATE_TABLE,
"Error creating table '%s' with "
"column '%s'. Please check its "
"column type and try to re-create "
"the table with an appropriate "
"column type.",
- table->name, (char*) field->field_name);
+ table->name, field->field_name);
goto err_col;
}
- if (field->null_ptr) {
- nulls_allowed = 0;
- } else {
- nulls_allowed = DATA_NOT_NULL;
- }
-
- if (field->binary()) {
- binary_type = DATA_BINARY_TYPE;
- } else {
- binary_type = 0;
- }
+ nulls_allowed = field->real_maybe_null() ? 0 : DATA_NOT_NULL;
+ binary_type = field->binary() ? DATA_BINARY_TYPE : 0;
charset_no = 0;
@@ -8280,13 +8768,13 @@ create_table_def(
/* in data0type.h we assume that the
number fits in one byte in prtype */
push_warning_printf(
- (THD*) trx->mysql_thd,
- Sql_condition::WARN_LEVEL_WARN,
+ thd, Sql_condition::WARN_LEVEL_WARN,
ER_CANT_CREATE_TABLE,
"In InnoDB, charset-collation codes"
" must be below 256."
" Unsupported code %lu.",
(ulong) charset_no);
+ mem_heap_free(heap);
DBUG_RETURN(ER_CANT_CREATE_TABLE);
}
}
@@ -8318,14 +8806,15 @@ create_table_def(
field->field_name);
err_col:
dict_mem_table_free(table);
+ mem_heap_free(heap);
trx_commit_for_mysql(trx);
- error = DB_ERROR;
+ err = DB_ERROR;
goto error_ret;
}
- dict_mem_table_add_col(table, table->heap,
- (char*) field->field_name,
+ dict_mem_table_add_col(table, heap,
+ field->field_name,
col_type,
dtype_form_prtype(
(ulint) field->type()
@@ -8337,25 +8826,33 @@ err_col:
/* Add the FTS doc_id hidden column. */
if (flags2 & DICT_TF2_FTS && !has_doc_id_col) {
- fts_add_doc_id_column(table);
+ fts_add_doc_id_column(table, heap);
}
- error = row_create_table_for_mysql(table, trx);
+ err = row_create_table_for_mysql(table, trx, false);
- if (error == DB_DUPLICATE_KEY) {
- char buf[100];
+ mem_heap_free(heap);
+
+ if (err == DB_DUPLICATE_KEY || err == DB_TABLESPACE_EXISTS) {
+ char display_name[FN_REFLEN];
char* buf_end = innobase_convert_identifier(
- buf, sizeof buf - 1, table_name, strlen(table_name),
- trx->mysql_thd, TRUE);
+ display_name, sizeof(display_name) - 1,
+ table_name, strlen(table_name),
+ thd, TRUE);
*buf_end = '\0';
- my_error(ER_TABLE_EXISTS_ERROR, MYF(0), buf);
+
+ my_error(err == DB_DUPLICATE_KEY
+ ? ER_TABLE_EXISTS_ERROR
+ : ER_TABLESPACE_EXISTS, MYF(0), display_name);
}
-error_ret:
- error = convert_error_code_to_mysql(error, flags, NULL);
+ if (err == DB_SUCCESS && (flags2 & DICT_TF2_FTS)) {
+ fts_optimize_add_table(table);
+ }
- DBUG_RETURN(error);
+error_ret:
+ DBUG_RETURN(convert_error_code_to_mysql(err, flags, thd));
}
/*****************************************************************//**
@@ -8365,108 +8862,113 @@ int
create_index(
/*=========*/
trx_t* trx, /*!< in: InnoDB transaction handle */
- TABLE* form, /*!< in: information on table
+ const TABLE* form, /*!< in: information on table
columns and indexes */
ulint flags, /*!< in: InnoDB table flags */
const char* table_name, /*!< in: table name */
uint key_num) /*!< in: index number */
{
- Field* field;
dict_index_t* index;
int error;
- ulint n_fields;
- KEY* key;
- KEY_PART_INFO* key_part;
+ const KEY* key;
ulint ind_type;
- ulint col_type;
- ulint prefix_len = 0;
- ulint is_unsigned;
- ulint i;
- ulint j;
- ulint* field_lengths = NULL;
+ ulint* field_lengths;
DBUG_ENTER("create_index");
key = form->key_info + key_num;
- n_fields = key->key_parts;
-
/* Assert that "GEN_CLUST_INDEX" cannot be used as non-primary index */
ut_a(innobase_strcasecmp(key->name, innobase_index_reserve_name) != 0);
- ind_type = 0;
-
if (key->flags & HA_FULLTEXT) {
- ind_type = DICT_FTS;
- } else {
- if (key_num == form->s->primary_key) {
- ind_type = ind_type | DICT_CLUSTERED;
+ index = dict_mem_index_create(table_name, key->name, 0,
+ DICT_FTS,
+ key->user_defined_key_parts);
+
+ for (ulint i = 0; i < key->user_defined_key_parts; i++) {
+ KEY_PART_INFO* key_part = key->key_part + i;
+ dict_mem_index_add_field(
+ index, key_part->field->field_name, 0);
}
- if (key->flags & HA_NOSAME ) {
- ind_type = ind_type | DICT_UNIQUE;
- }
- }
+ DBUG_RETURN(convert_error_code_to_mysql(
+ row_create_index_for_mysql(
+ index, trx, NULL),
+ flags, NULL));
- /* We pass 0 as the space id, and determine at a lower level the space
- id where to store the table */
+ }
- index = dict_mem_index_create(table_name, key->name, 0,
- ind_type, n_fields);
+ ind_type = 0;
- if (ind_type != DICT_FTS) {
- field_lengths = (ulint*) my_malloc(
- sizeof(ulint) * n_fields, MYF(MY_FAE));
+ if (key_num == form->s->primary_key) {
+ ind_type |= DICT_CLUSTERED;
+ }
- ut_ad(!(index->type & DICT_FTS));
+ if (key->flags & HA_NOSAME) {
+ ind_type |= DICT_UNIQUE;
}
- for (i = 0; i < n_fields; i++) {
- key_part = key->key_part + i;
+ field_lengths = (ulint*) my_malloc(
+ key->user_defined_key_parts * sizeof *
+ field_lengths, MYF(MY_FAE));
- if (ind_type != DICT_FTS) {
+ /* We pass 0 as the space id, and determine at a lower level the space
+ id where to store the table */
- /* (The flag HA_PART_KEY_SEG denotes in MySQL a
- column prefix field in an index: we only store a
- specified number of first bytes of the column to
- the index field.) The flag does not seem to be
- properly set by MySQL. Let us fall back on testing
- the length of the key part versus the column. */
+ index = dict_mem_index_create(table_name, key->name, 0,
+ ind_type, key->user_defined_key_parts);
- field = NULL;
+ for (ulint i = 0; i < key->user_defined_key_parts; i++) {
+ KEY_PART_INFO* key_part = key->key_part + i;
+ ulint prefix_len;
+ ulint col_type;
+ ulint is_unsigned;
- for (j = 0; j < form->s->fields; j++) {
- field = form->field[j];
+ /* (The flag HA_PART_KEY_SEG denotes in MySQL a
+ column prefix field in an index: we only store a
+ specified number of first bytes of the column to
+ the index field.) The flag does not seem to be
+ properly set by MySQL. Let us fall back on testing
+ the length of the key part versus the column. */
- if (0 == innobase_strcasecmp(
- field->field_name,
- key_part->field->field_name)) {
- /* Found the corresponding column */
+ Field* field = NULL;
- break;
- }
- }
+ for (ulint j = 0; j < form->s->fields; j++) {
- ut_a(j < form->s->fields);
+ field = form->field[j];
- col_type = get_innobase_type_from_mysql_type(
- &is_unsigned, key_part->field);
+ if (0 == innobase_strcasecmp(
+ field->field_name,
+ key_part->field->field_name)) {
+ /* Found the corresponding column */
- if (DATA_BLOB == col_type
- || (key_part->length < field->pack_length()
- && field->type() != MYSQL_TYPE_VARCHAR)
- || (field->type() == MYSQL_TYPE_VARCHAR
- && key_part->length < field->pack_length()
- - ((Field_varstring*) field)->length_bytes)) {
+ goto found;
+ }
+ }
+ ut_error;
+found:
+ col_type = get_innobase_type_from_mysql_type(
+ &is_unsigned, key_part->field);
+
+ if (DATA_BLOB == col_type
+ || (key_part->length < field->pack_length()
+ && field->type() != MYSQL_TYPE_VARCHAR)
+ || (field->type() == MYSQL_TYPE_VARCHAR
+ && key_part->length < field->pack_length()
+ - ((Field_varstring*) field)->length_bytes)) {
+
+ switch (col_type) {
+ default:
prefix_len = key_part->length;
-
- if (col_type == DATA_INT
- || col_type == DATA_FLOAT
- || col_type == DATA_DOUBLE
- || col_type == DATA_DECIMAL) {
- sql_print_error(
+ break;
+ case DATA_INT:
+ case DATA_FLOAT:
+ case DATA_DOUBLE:
+ case DATA_DECIMAL:
+ sql_print_error(
"MySQL is trying to create a column "
"prefix index field, on an "
"inappropriate data type. Table "
@@ -8474,17 +8976,16 @@ create_index(
table_name,
key_part->field->field_name);
- prefix_len = 0;
- }
- } else {
prefix_len = 0;
}
-
- field_lengths[i] = key_part->length;
+ } else {
+ prefix_len = 0;
}
- dict_mem_index_add_field(index,
- (char*) key_part->field->field_name, prefix_len);
+ field_lengths[i] = key_part->length;
+
+ dict_mem_index_add_field(
+ index, key_part->field->field_name, prefix_len);
}
ut_ad(key->flags & HA_FULLTEXT || !(index->type & DICT_FTS));
@@ -8492,9 +8993,10 @@ create_index(
/* Even though we've defined max_supported_key_part_length, we
still do our own checking using field_lengths to be absolutely
sure we don't create too long indexes. */
- error = row_create_index_for_mysql(index, trx, field_lengths);
- error = convert_error_code_to_mysql(error, flags, NULL);
+ error = convert_error_code_to_mysql(
+ row_create_index_for_mysql(index, trx, field_lengths),
+ flags, NULL);
my_free(field_lengths);
@@ -8513,7 +9015,7 @@ create_clustered_index_when_no_primary(
const char* table_name) /*!< in: table name */
{
dict_index_t* index;
- int error;
+ dberr_t error;
/* We pass 0 as the space id, and determine at a lower level the space
id where to store the table */
@@ -8523,9 +9025,7 @@ create_clustered_index_when_no_primary(
error = row_create_index_for_mysql(index, trx, NULL);
- error = convert_error_code_to_mysql(error, flags, NULL);
-
- return(error);
+ return(convert_error_code_to_mysql(error, flags, NULL));
}
/*****************************************************************//**
@@ -8562,11 +9062,11 @@ get_row_format_name(
if (!use_tablespace) { \
push_warning_printf( \
thd, Sql_condition::WARN_LEVEL_WARN, \
- HA_WRONG_CREATE_OPTION, \
+ ER_ILLEGAL_HA_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_per_table.", \
get_row_format_name(row_format)); \
- ret = FALSE; \
+ ret = "ROW_FORMAT"; \
}
/** If file-format is Antelope, issue warning and set ret false */
@@ -8574,11 +9074,11 @@ get_row_format_name(
if (srv_file_format < UNIV_FORMAT_B) { \
push_warning_printf( \
thd, Sql_condition::WARN_LEVEL_WARN, \
- HA_WRONG_CREATE_OPTION, \
+ ER_ILLEGAL_HA_CREATE_OPTION, \
"InnoDB: ROW_FORMAT=%s requires" \
" innodb_file_format > Antelope.", \
get_row_format_name(row_format)); \
- ret = FALSE; \
+ ret = "ROW_FORMAT"; \
}
@@ -8587,11 +9087,11 @@ Validates the create options. We may build on this function
in future. For now, it checks two specifiers:
KEY_BLOCK_SIZE and ROW_FORMAT
If innodb_strict_mode is not set then this function is a no-op
-@return TRUE if valid. */
-static
-ibool
-create_options_are_valid(
-/*=====================*/
+@return NULL if valid, string if not. */
+UNIV_INTERN
+const char*
+create_options_are_invalid(
+/*=======================*/
THD* thd, /*!< in: connection thread. */
TABLE* form, /*!< in: information on table
columns and indexes */
@@ -8599,14 +9099,14 @@ create_options_are_valid(
bool use_tablespace) /*!< in: srv_file_per_table */
{
ibool kbs_specified = FALSE;
- ibool ret = TRUE;
+ const char* ret = NULL;
enum row_type row_format = form->s->row_type;
ut_ad(thd != NULL);
/* If innodb_strict_mode is not set don't do any validation. */
if (!(THDVAR(thd, strict_mode))) {
- return(TRUE);
+ return(NULL);
}
ut_ad(form != NULL);
@@ -8626,18 +9126,18 @@ create_options_are_valid(
if (!use_tablespace) {
push_warning(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
- ret = FALSE;
+ ret = "KEY_BLOCK_SIZE";
}
if (srv_file_format < UNIV_FORMAT_B) {
push_warning(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
- ret = FALSE;
+ ret = "KEY_BLOCK_SIZE";
}
/* The maximum KEY_BLOCK_SIZE (KBS) is 16. But if
@@ -8649,22 +9149,22 @@ create_options_are_valid(
if (create_info->key_block_size > kbs_max) {
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE=%ld"
" cannot be larger than %ld.",
create_info->key_block_size,
kbs_max);
- ret = FALSE;
+ ret = "KEY_BLOCK_SIZE";
}
break;
default:
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: invalid KEY_BLOCK_SIZE = %lu."
" Valid values are [1, 2, 4, 8, 16]",
create_info->key_block_size);
- ret = FALSE;
+ ret = "KEY_BLOCK_SIZE";
break;
}
}
@@ -8685,11 +9185,11 @@ create_options_are_valid(
if (kbs_specified) {
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: cannot specify ROW_FORMAT = %s"
" with KEY_BLOCK_SIZE.",
get_row_format_name(row_format));
- ret = FALSE;
+ ret = "KEY_BLOCK_SIZE";
}
break;
case ROW_TYPE_DEFAULT:
@@ -8699,12 +9199,42 @@ create_options_are_valid(
case ROW_TYPE_NOT_USED:
push_warning(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION, \
+ ER_ILLEGAL_HA_CREATE_OPTION, \
"InnoDB: invalid ROW_FORMAT specifier.");
- ret = FALSE;
+ ret = "ROW_TYPE";
break;
}
+ /* Use DATA DIRECTORY only with file-per-table. */
+ if (create_info->data_file_name && !use_tablespace) {
+ push_warning(
+ thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ "InnoDB: DATA DIRECTORY requires"
+ " innodb_file_per_table.");
+ ret = "DATA DIRECTORY";
+ }
+
+ /* Do not use DATA DIRECTORY with TEMPORARY TABLE. */
+ if (create_info->data_file_name
+ && create_info->options & HA_LEX_CREATE_TMP_TABLE) {
+ push_warning(
+ thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ "InnoDB: DATA DIRECTORY cannot be used"
+ " for TEMPORARY tables.");
+ ret = "DATA DIRECTORY";
+ }
+
+ /* Do not allow INDEX_DIRECTORY */
+ if (create_info->index_file_name) {
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ "InnoDB: INDEX DIRECTORY is not supported");
+ ret = "INDEX DIRECTORY";
+ }
+
return(ret);
}
@@ -8720,11 +9250,18 @@ ha_innobase::update_create_info(
ha_innobase::info(HA_STATUS_AUTO);
create_info->auto_increment_value = stats.auto_increment_value;
}
+
+ /* Update the DATA DIRECTORY name from SYS_DATAFILES. */
+ dict_get_and_save_data_dir_path(prebuilt->table, false);
+
+ if (prebuilt->table->data_dir_path) {
+ create_info->data_file_name = prebuilt->table->data_dir_path;
+ }
}
/*****************************************************************//**
Initialize the table FTS stopword list
-@TRUE if succeed */
+@return TRUE if success */
UNIV_INTERN
ibool
innobase_fts_load_stopword(
@@ -8733,68 +9270,38 @@ innobase_fts_load_stopword(
trx_t* trx, /*!< in: transaction */
THD* thd) /*!< in: current thread */
{
- return (fts_load_stopword(table, trx,
- fts_server_stopword_table,
- THDVAR(thd, ft_user_stopword_table),
- THDVAR(thd, ft_enable_stopword), FALSE));
+ return(fts_load_stopword(table, trx,
+ fts_server_stopword_table,
+ THDVAR(thd, ft_user_stopword_table),
+ THDVAR(thd, ft_enable_stopword), FALSE));
}
+
/*****************************************************************//**
-Creates a new table to an InnoDB database.
-@return error number */
+Parses the table name into normal name and either temp path or remote path
+if needed.
+@return 0 if successful, otherwise, error number */
UNIV_INTERN
int
-ha_innobase::create(
-/*================*/
- const char* name, /*!< in: table name */
- TABLE* form, /*!< in: information on table
- columns and indexes */
- HA_CREATE_INFO* create_info) /*!< in: more information of the
+ha_innobase::parse_table_name(
+/*==========================*/
+ const char* name, /*!< in/out: table name provided*/
+ HA_CREATE_INFO* create_info, /*!< in: more information of the
created table, contains also the
create statement string */
+ ulint flags, /*!< in: flags*/
+ ulint flags2, /*!< in: flags2*/
+ char* norm_name, /*!< out: normalized table name */
+ char* temp_path, /*!< out: absolute path of table */
+ char* remote_path) /*!< out: remote path of table */
{
- int error;
- trx_t* parent_trx;
- trx_t* trx;
- int primary_key_no;
- uint i;
- char name2[FN_REFLEN];
- char norm_name[FN_REFLEN];
THD* thd = ha_thd();
- ib_int64_t auto_inc_value;
- ulint fts_indexes = 0;
- ibool zip_allowed = TRUE;
- enum row_type row_format;
- rec_format_t innodb_row_format = REC_FORMAT_COMPACT;
-
- /* Cache the global variable "srv_file_per_table" to a local
- variable before using it. Note that "srv_file_per_table"
- is not under dict_sys mutex protection, and could be changed
- while creating the table. So we read the current value here
- and make all further decisions based on this. */
- bool use_tablespace = srv_file_per_table;
-
- /* Zip Shift Size - log2 - 9 of compressed page size,
- zero for uncompressed */
- ulint zip_ssize = 0;
- ulint flags = 0;
- ulint flags2 = 0;
- dict_table_t* innobase_table = NULL;
-
- /* Cache the value of innodb_file_format, in case it is
- modified by another thread while the table is being created. */
- const ulint file_format_allowed = srv_file_format;
- const char* stmt;
- size_t stmt_len;
-
- DBUG_ENTER("ha_innobase::create");
-
- DBUG_ASSERT(thd != NULL);
- DBUG_ASSERT(create_info != NULL);
+ bool use_tablespace = flags2 & DICT_TF2_USE_TABLESPACE;
+ DBUG_ENTER("ha_innobase::parse_table_name");
#ifdef __WIN__
/* Names passed in from server are in two formats:
1. <database_name>/<table_name>: for normal table creation
- 2. full path: for temp table creation, or sym link
+ 2. full path: for temp table creation, or DATA DIRECTORY.
When srv_file_per_table is on and mysqld_embedded is off,
check for full path pattern, i.e.
@@ -8805,7 +9312,7 @@ ha_innobase::create(
if (use_tablespace
&& !mysqld_embedded
- && (!create_info->options & HA_LEX_CREATE_TMP_TABLE)) {
+ && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) {
if ((name[1] == ':')
|| (name[0] == '\\' && name[1] == '\\')) {
@@ -8815,26 +9322,113 @@ ha_innobase::create(
}
#endif
- if (form->s->fields > 1000) {
- /* The limit probably should be REC_MAX_N_FIELDS - 3 = 1020,
- but we play safe here */
+ normalize_table_name(norm_name, name);
+ temp_path[0] = '\0';
+ remote_path[0] = '\0';
+
+ /* A full path is used for TEMPORARY TABLE and DATA DIRECTORY.
+ In the case of;
+ CREATE TEMPORARY TABLE ... DATA DIRECTORY={path} ... ;
+ We ignore the DATA DIRECTORY. */
+ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) {
+ strncpy(temp_path, name, FN_REFLEN - 1);
+ }
+
+ if (create_info->data_file_name) {
+ bool ignore = false;
+
+ /* Use DATA DIRECTORY only with file-per-table. */
+ if (!use_tablespace) {
+ push_warning(
+ thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ "InnoDB: DATA DIRECTORY requires"
+ " innodb_file_per_table.");
+ ignore = true;
+ }
+
+ /* Do not use DATA DIRECTORY with TEMPORARY TABLE. */
+ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) {
+ push_warning(
+ thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_ILLEGAL_HA_CREATE_OPTION,
+ "InnoDB: DATA DIRECTORY cannot be"
+ " used for TEMPORARY tables.");
+ ignore = true;
+ }
- DBUG_RETURN(HA_ERR_TO_BIG_ROW);
+ if (ignore) {
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_WARN,
+ WARN_OPTION_IGNORED,
+ ER_DEFAULT(WARN_OPTION_IGNORED),
+ "DATA DIRECTORY");
+ } else {
+ strncpy(remote_path, create_info->data_file_name,
+ FN_REFLEN - 1);
+ }
+ }
+
+ if (create_info->index_file_name) {
+ push_warning_printf(
+ thd, Sql_condition::WARN_LEVEL_WARN,
+ WARN_OPTION_IGNORED,
+ ER_DEFAULT(WARN_OPTION_IGNORED),
+ "INDEX DIRECTORY");
}
+ DBUG_RETURN(0);
+}
+
+/*****************************************************************//**
+Determines InnoDB table flags.
+@retval true if successful, false if error */
+UNIV_INTERN
+bool
+innobase_table_flags(
+/*=================*/
+ const TABLE* form, /*!< in: table */
+ const HA_CREATE_INFO* create_info, /*!< in: information
+ on table columns and indexes */
+ THD* thd, /*!< in: connection */
+ bool use_tablespace, /*!< in: whether to create
+ outside system tablespace */
+ ulint* flags, /*!< out: DICT_TF flags */
+ ulint* flags2) /*!< out: DICT_TF2 flags */
+{
+ DBUG_ENTER("innobase_table_flags");
+
+ const char* fts_doc_id_index_bad = NULL;
+ bool zip_allowed = true;
+ ulint zip_ssize = 0;
+ enum row_type row_format;
+ rec_format_t innodb_row_format = REC_FORMAT_COMPACT;
+ bool use_data_dir;
+
+ /* Cache the value of innodb_file_format, in case it is
+ modified by another thread while the table is being created. */
+ const ulint file_format_allowed = srv_file_format;
+
+ *flags = 0;
+ *flags2 = 0;
+
/* Check if there are any FTS indexes defined on this table. */
- for (i = 0; i < form->s->keys; i++) {
- KEY* key = form->key_info + i;
+ for (uint i = 0; i < form->s->keys; i++) {
+ const KEY* key = &form->key_info[i];
if (key->flags & HA_FULLTEXT) {
- ++fts_indexes;
+ *flags2 |= DICT_TF2_FTS;
/* We don't support FTS indexes in temporary
tables. */
if (create_info->options & HA_LEX_CREATE_TMP_TABLE) {
my_error(ER_INNODB_NO_FT_TEMP_TABLE, MYF(0));
- DBUG_RETURN(-1);
+ DBUG_RETURN(false);
+ }
+
+ if (fts_doc_id_index_bad) {
+ goto index_bad;
}
}
@@ -8847,41 +9441,15 @@ ha_innobase::create(
|| strcmp(key->name, FTS_DOC_ID_INDEX_NAME)
|| strcmp(key->key_part[0].field->field_name,
FTS_DOC_ID_COL_NAME)) {
- push_warning_printf(thd,
- Sql_condition::WARN_LEVEL_WARN,
- ER_WRONG_NAME_FOR_INDEX,
- " InnoDB: Index name %s is reserved"
- " for the unique index on"
- " FTS_DOC_ID column for FTS"
- " document ID indexing"
- " on table %s. Please check"
- " the index definition to"
- " make sure it is of correct"
- " type\n",
- FTS_DOC_ID_INDEX_NAME,
- name);
- my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0),
- FTS_DOC_ID_INDEX_NAME);
- DBUG_RETURN(-1);
+ fts_doc_id_index_bad = key->name;
}
- }
-
- strcpy(name2, name);
-
- normalize_table_name(norm_name, name2);
-
- /* Create the table definition in InnoDB */
-
- flags = 0;
-
- if (fts_indexes > 0) {
- flags2 = DICT_TF2_FTS;
- }
- /* Validate create options if innodb_strict_mode is set. */
- if (!create_options_are_valid(
- thd, form, create_info, use_tablespace)) {
- DBUG_RETURN(HA_WRONG_CREATE_OPTION);
+ if (fts_doc_id_index_bad && (*flags2 & DICT_TF2_FTS)) {
+index_bad:
+ my_error(ER_INNODB_FT_WRONG_DOCID_INDEX, MYF(0),
+ fts_doc_id_index_bad);
+ DBUG_RETURN(false);
+ }
}
if (create_info->key_block_size) {
@@ -8905,7 +9473,7 @@ ha_innobase::create(
if (!use_tablespace) {
push_warning(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_per_table.");
zip_allowed = FALSE;
@@ -8914,7 +9482,7 @@ ha_innobase::create(
if (file_format_allowed < UNIV_FORMAT_B) {
push_warning(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: KEY_BLOCK_SIZE requires"
" innodb_file_format > Antelope.");
zip_allowed = FALSE;
@@ -8925,7 +9493,7 @@ ha_innobase::create(
PAGE_ZIP_SSIZE_MAX)) {
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu.",
create_info->key_block_size);
}
@@ -8947,7 +9515,7 @@ ha_innobase::create(
with ALTER TABLE anyway. */
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ignoring KEY_BLOCK_SIZE=%lu"
" unless ROW_FORMAT=COMPRESSED.",
create_info->key_block_size);
@@ -8975,14 +9543,14 @@ ha_innobase::create(
if (!use_tablespace) {
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_per_table.",
get_row_format_name(row_format));
} else if (file_format_allowed == UNIV_FORMAT_A) {
push_warning_printf(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: ROW_FORMAT=%s requires"
" innodb_file_format > Antelope.",
get_row_format_name(row_format));
@@ -8999,7 +9567,7 @@ ha_innobase::create(
case ROW_TYPE_PAGE:
push_warning(
thd, Sql_condition::WARN_LEVEL_WARN,
- HA_WRONG_CREATE_OPTION,
+ ER_ILLEGAL_HA_CREATE_OPTION,
"InnoDB: assuming ROW_FORMAT=COMPACT.");
case ROW_TYPE_DEFAULT:
/* If we fell through, set row format to Compact. */
@@ -9012,12 +9580,100 @@ ha_innobase::create(
if (!zip_allowed) {
zip_ssize = 0;
}
- dict_tf_set(&flags, innodb_row_format, zip_ssize);
+
+ use_data_dir = use_tablespace
+ && ((create_info->data_file_name != NULL)
+ && !(create_info->options & HA_LEX_CREATE_TMP_TABLE));
+
+ dict_tf_set(flags, innodb_row_format, zip_ssize, use_data_dir);
+
+ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) {
+ *flags2 |= DICT_TF2_TEMPORARY;
+ }
+
+ if (use_tablespace) {
+ *flags2 |= DICT_TF2_USE_TABLESPACE;
+ }
+
+ DBUG_RETURN(true);
+}
+
+/*****************************************************************//**
+Creates a new table to an InnoDB database.
+@return error number */
+UNIV_INTERN
+int
+ha_innobase::create(
+/*================*/
+ const char* name, /*!< in: table name */
+ TABLE* form, /*!< in: information on table
+ columns and indexes */
+ HA_CREATE_INFO* create_info) /*!< in: more information of the
+ created table, contains also the
+ create statement string */
+{
+ int error;
+ trx_t* parent_trx;
+ trx_t* trx;
+ int primary_key_no;
+ uint i;
+ char norm_name[FN_REFLEN]; /* {database}/{tablename} */
+ char temp_path[FN_REFLEN]; /* absolute path of temp frm */
+ char remote_path[FN_REFLEN]; /* absolute path of table */
+ THD* thd = ha_thd();
+ ib_int64_t auto_inc_value;
+
+ /* Cache the global variable "srv_file_per_table" to a local
+ variable before using it. Note that "srv_file_per_table"
+ is not under dict_sys mutex protection, and could be changed
+ while creating the table. So we read the current value here
+ and make all further decisions based on this. */
+ bool use_tablespace = srv_file_per_table;
+
+ /* Zip Shift Size - log2 - 9 of compressed page size,
+ zero for uncompressed */
+ ulint flags;
+ ulint flags2;
+ dict_table_t* innobase_table = NULL;
+
+ const char* stmt;
+ size_t stmt_len;
+
+ DBUG_ENTER("ha_innobase::create");
+
+ DBUG_ASSERT(thd != NULL);
+ DBUG_ASSERT(create_info != NULL);
+
+ if (form->s->fields > REC_MAX_N_USER_FIELDS) {
+ DBUG_RETURN(HA_ERR_TOO_MANY_FIELDS);
+ } else if (srv_read_only_mode) {
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ }
+
+ /* Create the table definition in InnoDB */
+
+ /* Validate create options if innodb_strict_mode is set. */
+ if (create_options_are_invalid(
+ thd, form, create_info, use_tablespace)) {
+ DBUG_RETURN(HA_WRONG_CREATE_OPTION);
+ }
+
+ if (!innobase_table_flags(form, create_info,
+ thd, use_tablespace,
+ &flags, &flags2)) {
+ DBUG_RETURN(-1);
+ }
+
+ error = parse_table_name(name, create_info, flags, flags2,
+ norm_name, temp_path, remote_path);
+ if (error) {
+ DBUG_RETURN(error);
+ }
/* Look for a primary key */
primary_key_no = (form->s->primary_key != MAX_KEY ?
- (int) form->s->primary_key :
- -1);
+ (int) form->s->primary_key :
+ -1);
/* Our function innobase_get_mysql_key_number_for_index assumes
the primary key is always number 0, if it exists */
@@ -9034,14 +9690,6 @@ ha_innobase::create(
DBUG_RETURN(HA_ERR_GENERIC);
}
- if (create_info->options & HA_LEX_CREATE_TMP_TABLE) {
- flags2 |= DICT_TF2_TEMPORARY;
- }
-
- if (use_tablespace) {
- flags2 |= DICT_TF2_USE_TABLESPACE;
- }
-
/* Get the transaction associated with the current thd, or create one
if not yet created */
@@ -9060,10 +9708,8 @@ ha_innobase::create(
row_mysql_lock_data_dictionary(trx);
- error = create_table_def(trx, form, norm_name,
- create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL,
- flags, flags2);
-
+ error = create_table_def(trx, form, norm_name, temp_path,
+ remote_path, flags, flags2);
if (error) {
goto cleanup;
}
@@ -9093,20 +9739,20 @@ ha_innobase::create(
/* Create the ancillary tables that are common to all FTS indexes on
this table. */
- if (fts_indexes > 0) {
- ulint ret = 0;
+ if (flags2 & DICT_TF2_FTS) {
+ enum fts_doc_id_index_enum ret;
- innobase_table = dict_table_open_on_name_no_stats(
- norm_name, TRUE, DICT_ERR_IGNORE_NONE);
+ innobase_table = dict_table_open_on_name(
+ norm_name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
ut_a(innobase_table);
- /* Check whether there alreadys exist FTS_DOC_ID_INDEX */
+ /* Check whether there already exists FTS_DOC_ID_INDEX */
ret = innobase_fts_check_doc_id_index_in_def(
form->s->keys, form->s->key_info);
- /* Raise error if FTS_DOC_ID_INDEX is of wrong format */
- if (ret == FTS_INCORRECT_DOC_ID_INDEX) {
+ switch (ret) {
+ case FTS_INCORRECT_DOC_ID_INDEX:
push_warning_printf(thd,
Sql_condition::WARN_LEVEL_WARN,
ER_WRONG_NAME_FOR_INDEX,
@@ -9125,20 +9771,23 @@ ha_innobase::create(
fts_free(innobase_table);
}
- dict_table_close(innobase_table, TRUE);
+ dict_table_close(innobase_table, TRUE, FALSE);
my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0),
FTS_DOC_ID_INDEX_NAME);
error = -1;
goto cleanup;
+ case FTS_EXIST_DOC_ID_INDEX:
+ case FTS_NOT_EXIST_DOC_ID_INDEX:
+ break;
}
- error = fts_create_common_tables(
+ dberr_t err = fts_create_common_tables(
trx, innobase_table, norm_name,
(ret == FTS_EXIST_DOC_ID_INDEX));
- error = convert_error_code_to_mysql(error, 0, NULL);
+ error = convert_error_code_to_mysql(err, 0, NULL);
- dict_table_close(innobase_table, TRUE);
+ dict_table_close(innobase_table, TRUE, FALSE);
if (error) {
goto cleanup;
@@ -9159,11 +9808,11 @@ ha_innobase::create(
stmt = innobase_get_stmt(thd, &stmt_len);
if (stmt) {
- error = row_table_add_foreign_constraints(
+ dberr_t err = row_table_add_foreign_constraints(
trx, stmt, stmt_len, norm_name,
create_info->options & HA_LEX_CREATE_TMP_TABLE);
- switch (error) {
+ switch (err) {
case DB_PARENT_NO_INDEX:
push_warning_printf(
@@ -9184,9 +9833,11 @@ ha_innobase::create(
" table where referencing columns appear"
" as the first columns.\n", norm_name);
break;
+ default:
+ break;
}
- error = convert_error_code_to_mysql(error, flags, NULL);
+ error = convert_error_code_to_mysql(err, flags, NULL);
if (error) {
goto cleanup;
@@ -9194,7 +9845,7 @@ ha_innobase::create(
}
/* Cache all the FTS indexes on this table in the FTS specific
structure. They are used for FTS indexed column update handling. */
- if (fts_indexes > 0) {
+ if (flags2 & DICT_TF2_FTS) {
fts_t* fts = innobase_table->fts;
ut_a(fts != NULL);
@@ -9212,10 +9863,15 @@ ha_innobase::create(
log_buffer_flush_to_disk();
- innobase_table = dict_table_open_on_name(norm_name, FALSE);
+ innobase_table = dict_table_open_on_name(
+ norm_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
DBUG_ASSERT(innobase_table != 0);
+ innobase_copy_frm_flags_from_create_info(innobase_table, create_info);
+
+ dict_stats_update(innobase_table, DICT_STATS_EMPTY_TABLE);
+
if (innobase_table) {
/* We update the highest file format in the system table
space, if this table has higher file format setting. */
@@ -9226,9 +9882,9 @@ ha_innobase::create(
}
/* Load server stopword into FTS cache */
- if (fts_indexes > 0) {
+ if (flags2 & DICT_TF2_FTS) {
if (!innobase_fts_load_stopword(innobase_table, NULL, thd)) {
- dict_table_close(innobase_table, FALSE);
+ dict_table_close(innobase_table, FALSE, FALSE);
srv_active_wake_master_thread();
trx_free_for_mysql(trx);
DBUG_RETURN(-1);
@@ -9265,7 +9921,7 @@ ha_innobase::create(
dict_table_autoinc_unlock(innobase_table);
}
- dict_table_close(innobase_table, FALSE);
+ dict_table_close(innobase_table, FALSE, FALSE);
/* Tell the InnoDB server that there might be work for
utility threads: */
@@ -9277,7 +9933,7 @@ ha_innobase::create(
DBUG_RETURN(0);
cleanup:
- innobase_commit_low(trx);
+ trx_rollback_for_mysql(trx);
row_mysql_unlock_data_dictionary(trx);
@@ -9295,9 +9951,8 @@ ha_innobase::discard_or_import_tablespace(
/*======================================*/
my_bool discard) /*!< in: TRUE if discard, else import */
{
+ dberr_t err;
dict_table_t* dict_table;
- trx_t* trx;
- int err;
DBUG_ENTER("ha_innobase::discard_or_import_tablespace");
@@ -9305,18 +9960,85 @@ ha_innobase::discard_or_import_tablespace(
ut_a(prebuilt->trx->magic_n == TRX_MAGIC_N);
ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
+ if (srv_read_only_mode) {
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ }
+
dict_table = prebuilt->table;
- trx = prebuilt->trx;
- if (discard) {
- err = row_discard_tablespace_for_mysql(dict_table->name, trx);
+ if (dict_table->space == TRX_SYS_SPACE) {
+
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_IN_SYSTEM_TABLESPACE,
+ table->s->table_name.str);
+
+ DBUG_RETURN(HA_ERR_TABLE_NEEDS_UPGRADE);
+ }
+
+ trx_start_if_not_started(prebuilt->trx);
+
+ /* In case MySQL calls this in the middle of a SELECT query, release
+ possible adaptive hash latch to avoid deadlocks of threads. */
+ trx_search_latch_release_if_reserved(prebuilt->trx);
+
+ /* Obtain an exclusive lock on the table. */
+ err = row_mysql_lock_table(
+ prebuilt->trx, dict_table, LOCK_X,
+ discard ? "setting table lock for DISCARD TABLESPACE"
+ : "setting table lock for IMPORT TABLESPACE");
+
+ if (err != DB_SUCCESS) {
+ /* unable to lock the table: do nothing */
+ } else if (discard) {
+
+ /* Discarding an already discarded tablespace should be an
+ idempotent operation. Also, if the .ibd file is missing the
+ user may want to set the DISCARD flag in order to IMPORT
+ a new tablespace. */
+
+ if (dict_table->ibd_file_missing) {
+ ib_senderrf(
+ prebuilt->trx->mysql_thd,
+ IB_LOG_LEVEL_WARN, ER_TABLESPACE_MISSING,
+ table->s->table_name.str);
+ }
+
+ err = row_discard_tablespace_for_mysql(
+ dict_table->name, prebuilt->trx);
+
+ } else if (!dict_table->ibd_file_missing) {
+ /* Commit the transaction in order to
+ release the table lock. */
+ trx_commit_for_mysql(prebuilt->trx);
+
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_EXISTS, table->s->table_name.str);
+
+ DBUG_RETURN(HA_ERR_TABLE_EXIST);
} else {
- err = row_import_tablespace_for_mysql(dict_table->name, trx);
+ err = row_import_for_mysql(dict_table, prebuilt);
+
+ if (err == DB_SUCCESS) {
+
+ if (table->found_next_number_field) {
+ dict_table_autoinc_lock(dict_table);
+ innobase_initialize_autoinc();
+ dict_table_autoinc_unlock(dict_table);
+ }
+
+ info(HA_STATUS_TIME
+ | HA_STATUS_CONST
+ | HA_STATUS_VARIABLE
+ | HA_STATUS_AUTO);
+ }
}
- err = convert_error_code_to_mysql(err, dict_table->flags, NULL);
+ /* Commit the transaction in order to release the table lock. */
+ trx_commit_for_mysql(prebuilt->trx);
- DBUG_RETURN(err);
+ DBUG_RETURN(convert_error_code_to_mysql(err, dict_table->flags, NULL));
}
/*****************************************************************//**
@@ -9327,10 +10049,15 @@ int
ha_innobase::truncate()
/*===================*/
{
+ dberr_t err;
int error;
DBUG_ENTER("ha_innobase::truncate");
+ if (srv_read_only_mode) {
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ }
+
/* Get the transaction associated with the current thd, or create one
if not yet created, and update prebuilt->trx */
@@ -9341,11 +10068,28 @@ ha_innobase::truncate()
}
/* Truncate the table in InnoDB */
- error = row_truncate_table_for_mysql(prebuilt->table, prebuilt->trx);
+ err = row_truncate_table_for_mysql(prebuilt->table, prebuilt->trx);
- error = convert_error_code_to_mysql(error, prebuilt->table->flags,
- NULL);
+ switch (err) {
+ case DB_TABLESPACE_DELETED:
+ case DB_TABLESPACE_NOT_FOUND:
+ ib_senderrf(
+ prebuilt->trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ (err == DB_TABLESPACE_DELETED ?
+ ER_TABLESPACE_DISCARDED : ER_TABLESPACE_MISSING),
+ table->s->table_name.str);
+ table->status = STATUS_NOT_FOUND;
+ error = HA_ERR_NO_SUCH_TABLE;
+ break;
+
+ default:
+ error = convert_error_code_to_mysql(
+ err, prebuilt->table->flags,
+ prebuilt->trx->mysql_thd);
+ table->status = STATUS_NOT_FOUND;
+ break;
+ }
DBUG_RETURN(error);
}
@@ -9363,12 +10107,11 @@ ha_innobase::delete_table(
const char* name) /*!< in: table name */
{
ulint name_len;
- int error;
+ dberr_t err;
trx_t* parent_trx;
trx_t* trx;
- THD *thd = ha_thd();
- char norm_name[1000];
- char errstr[1024];
+ THD* thd = ha_thd();
+ char norm_name[FN_REFLEN];
DBUG_ENTER("ha_innobase::delete_table");
@@ -9376,29 +10119,21 @@ ha_innobase::delete_table(
"test_normalize_table_name_low",
test_normalize_table_name_low();
);
+ DBUG_EXECUTE_IF(
+ "test_ut_format_name",
+ test_ut_format_name();
+ );
/* Strangely, MySQL passes the table name without the '.frm'
extension, in contrast to ::create */
normalize_table_name(norm_name, name);
- if (IS_MAGIC_TABLE_AND_USER_DENIED_ACCESS(norm_name, thd)) {
+ if (srv_read_only_mode) {
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ } else if (IS_MAGIC_TABLE_AND_USER_DENIED_ACCESS(norm_name, thd)) {
DBUG_RETURN(HA_ERR_GENERIC);
}
- /* Remove stats for this table and all of its indexes from the
- persistent storage if it exists and if there are stats for this
- table in there. This function creates its own trx and commits
- it. */
- error = dict_stats_delete_table_stats(norm_name,
- errstr, sizeof(errstr));
- if (error != DB_SUCCESS) {
- push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
- ER_LOCK_WAIT_TIMEOUT, errstr);
- }
-
- /* Get the transaction associated with the current thd, or create one
- if not yet created */
-
parent_trx = check_trx_exists(thd);
/* In case MySQL calls this in the middle of a SELECT query, release
@@ -9419,14 +10154,14 @@ ha_innobase::delete_table(
/* We are doing a DDL operation. */
++trx->will_lock;
+ trx->ddl = true;
/* Drop the table in InnoDB */
- error = row_drop_table_for_mysql(norm_name, trx,
- thd_sql_command(thd)
- == SQLCOM_DROP_DB);
+ err = row_drop_table_for_mysql(
+ norm_name, trx, thd_sql_command(thd) == SQLCOM_DROP_DB);
- if (error == DB_TABLE_NOT_FOUND
+ if (err == DB_TABLE_NOT_FOUND
&& innobase_get_lower_case_table_names() == 1) {
char* is_part = NULL;
#ifdef __WIN__
@@ -9436,25 +10171,25 @@ ha_innobase::delete_table(
#endif /* __WIN__ */
if (is_part) {
- char par_case_name[MAX_FULL_NAME_LEN + 1];
+ char par_case_name[FN_REFLEN];
#ifndef __WIN__
/* Check for the table using lower
case name, including the partition
separator "P" */
- memcpy(par_case_name, norm_name, strlen(norm_name));
- par_case_name[strlen(norm_name)] = 0;
+ strcpy(par_case_name, norm_name);
innobase_casedn_str(par_case_name);
#else
/* On Windows platfrom, check
whether there exists table name in
system table whose name is
not being normalized to lower case */
- normalize_table_name_low(par_case_name, name, FALSE);
+ normalize_table_name_low(
+ par_case_name, name, FALSE);
#endif
- error = row_drop_table_for_mysql(par_case_name, trx,
- thd_sql_command(thd)
- == SQLCOM_DROP_DB);
+ err = row_drop_table_for_mysql(
+ par_case_name, trx,
+ thd_sql_command(thd) == SQLCOM_DROP_DB);
}
}
@@ -9473,9 +10208,7 @@ ha_innobase::delete_table(
trx_free_for_mysql(trx);
- error = convert_error_code_to_mysql(error, 0, NULL);
-
- DBUG_RETURN(error);
+ DBUG_RETURN(convert_error_code_to_mysql(err, 0, NULL));
}
/*****************************************************************//**
@@ -9501,6 +10234,10 @@ innobase_drop_database(
DBUG_ASSERT(hton == innodb_hton_ptr);
+ if (srv_read_only_mode) {
+ return;
+ }
+
/* In the Windows plugin, thd = current_thd is always NULL */
if (thd) {
trx_t* parent_trx = check_trx_exists(thd);
@@ -9556,36 +10293,36 @@ innobase_drop_database(
innobase_commit_low(trx);
trx_free_for_mysql(trx);
}
+
/*********************************************************************//**
Renames an InnoDB table.
-@return 0 or error code */
-static
-int
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
innobase_rename_table(
/*==================*/
trx_t* trx, /*!< in: transaction */
const char* from, /*!< in: old name of the table */
- const char* to, /*!< in: new name of the table */
- ibool lock_and_commit)
- /*!< in: TRUE=lock data dictionary and commit */
+ const char* to) /*!< in: new name of the table */
{
- int error;
- char* norm_to;
- char* norm_from;
+ dberr_t error;
+ char norm_to[FN_REFLEN];
+ char norm_from[FN_REFLEN];
- // Magic number 64 arbitrary
- norm_to = (char*) my_malloc(strlen(to) + 64, MYF(0));
- norm_from = (char*) my_malloc(strlen(from) + 64, MYF(0));
+ DBUG_ENTER("innobase_rename_table");
+ DBUG_ASSERT(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
+
+ ut_ad(!srv_read_only_mode);
normalize_table_name(norm_to, to);
normalize_table_name(norm_from, from);
+ DEBUG_SYNC_C("innodb_rename_table_ready");
+
/* Serialize data dictionary operations with dictionary mutex:
no deadlocks can occur then in these operations */
- if (lock_and_commit) {
- row_mysql_lock_data_dictionary(trx);
- }
+ row_mysql_lock_data_dictionary(trx);
/* Transaction must be flagged as a locking transaction or it hasn't
been started yet. */
@@ -9593,7 +10330,7 @@ innobase_rename_table(
ut_a(trx->will_lock > 0);
error = row_rename_table_for_mysql(
- norm_from, norm_to, trx, lock_and_commit);
+ norm_from, norm_to, trx, TRUE);
if (error != DB_SUCCESS) {
if (error == DB_TABLE_NOT_FOUND
@@ -9606,39 +10343,36 @@ innobase_rename_table(
#endif /* __WIN__ */
if (is_part) {
- char par_case_name[MAX_FULL_NAME_LEN + 1];
-
+ char par_case_name[FN_REFLEN];
#ifndef __WIN__
/* Check for the table using lower
case name, including the partition
separator "P" */
- memcpy(par_case_name, norm_from,
- strlen(norm_from));
- par_case_name[strlen(norm_from)] = 0;
+ strcpy(par_case_name, norm_from);
innobase_casedn_str(par_case_name);
#else
/* On Windows platfrom, check
whether there exists table name in
system table whose name is
not being normalized to lower case */
- normalize_table_name_low(par_case_name,
- from, FALSE);
+ normalize_table_name_low(
+ par_case_name, from, FALSE);
#endif
error = row_rename_table_for_mysql(
- par_case_name, norm_to, trx,
- lock_and_commit);
-
+ par_case_name, norm_to, trx, TRUE);
}
}
if (error != DB_SUCCESS) {
- FILE* ef = dict_foreign_err_file;
-
- fputs("InnoDB: Renaming table ", ef);
- ut_print_name(ef, trx, TRUE, norm_from);
- fputs(" to ", ef);
- ut_print_name(ef, trx, TRUE, norm_to);
- fputs(" failed!\n", ef);
+ if (!srv_read_only_mode) {
+ FILE* ef = dict_foreign_err_file;
+
+ fputs("InnoDB: Renaming table ", ef);
+ ut_print_name(ef, trx, TRUE, norm_from);
+ fputs(" to ", ef);
+ ut_print_name(ef, trx, TRUE, norm_to);
+ fputs(" failed!\n", ef);
+ }
} else {
#ifndef __WIN__
sql_print_warning("Rename partition table %s "
@@ -9659,20 +10393,15 @@ innobase_rename_table(
}
}
- if (lock_and_commit) {
- row_mysql_unlock_data_dictionary(trx);
-
- /* Flush the log to reduce probability that the .frm
- files and the InnoDB data dictionary get out-of-sync
- if the user runs with innodb_flush_log_at_trx_commit = 0 */
+ row_mysql_unlock_data_dictionary(trx);
- log_buffer_flush_to_disk();
- }
+ /* Flush the log to reduce probability that the .frm
+ files and the InnoDB data dictionary get out-of-sync
+ if the user runs with innodb_flush_log_at_trx_commit = 0 */
- my_free(norm_to);
- my_free(norm_from);
+ log_buffer_flush_to_disk();
- return(error);
+ DBUG_RETURN(error);
}
/*********************************************************************//**
@@ -9686,12 +10415,17 @@ ha_innobase::rename_table(
const char* to) /*!< in: new name of the table */
{
trx_t* trx;
- int error;
+ dberr_t error;
trx_t* parent_trx;
THD* thd = ha_thd();
DBUG_ENTER("ha_innobase::rename_table");
+ if (srv_read_only_mode) {
+ ib_senderrf(thd, IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ }
+
/* Get the transaction associated with the current thd, or create one
if not yet created */
@@ -9704,15 +10438,11 @@ ha_innobase::rename_table(
trx = innobase_trx_allocate(thd);
- /* Either the transaction is already flagged as a locking transaction
- or it hasn't been started yet. */
-
- ut_a(!trx_is_started(trx) || trx->will_lock > 0);
-
/* We are doing a DDL operation. */
++trx->will_lock;
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
- error = innobase_rename_table(trx, from, to, TRUE);
+ error = innobase_rename_table(trx, from, to);
DEBUG_SYNC(thd, "after_innobase_rename_table");
@@ -9724,6 +10454,27 @@ ha_innobase::rename_table(
innobase_commit_low(trx);
trx_free_for_mysql(trx);
+ if (error == DB_SUCCESS) {
+ char norm_from[MAX_FULL_NAME_LEN];
+ char norm_to[MAX_FULL_NAME_LEN];
+ char errstr[512];
+ dberr_t ret;
+
+ normalize_table_name(norm_from, from);
+ normalize_table_name(norm_to, to);
+
+ ret = dict_stats_rename_table(norm_from, norm_to,
+ errstr, sizeof(errstr));
+
+ if (ret != DB_SUCCESS) {
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: %s\n", errstr);
+
+ push_warning(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_LOCK_WAIT_TIMEOUT, errstr);
+ }
+ }
+
/* Add a special case to handle the Duplicated Key error
and return DB_ERROR instead.
This is to avoid a possible SIGSEGV error from mysql error
@@ -9736,15 +10487,13 @@ ha_innobase::rename_table(
the dup key error here is due to an existing table whose name
is the one we are trying to rename to) and return the generic
error code. */
- if (error == (int) DB_DUPLICATE_KEY) {
+ if (error == DB_DUPLICATE_KEY) {
my_error(ER_TABLE_EXISTS_ERROR, MYF(0), to);
error = DB_ERROR;
}
- error = convert_error_code_to_mysql(error, 0, NULL);
-
- DBUG_RETURN(error);
+ DBUG_RETURN(convert_error_code_to_mysql(error, 0, NULL));
}
/*********************************************************************//**
@@ -9803,7 +10552,7 @@ ha_innobase::records_in_range(
goto func_exit;
}
- key_parts= key->key_parts;
+ key_parts= key->ext_key_parts;
if ((min_key && min_key->keypart_map>=(key_part_map) (1<<key_parts)) ||
(max_key && max_key->keypart_map>=(key_part_map) (1<<key_parts)))
key_parts= key->ext_key_parts;
@@ -9811,11 +10560,11 @@ ha_innobase::records_in_range(
heap = mem_heap_create(2 * (key_parts * sizeof(dfield_t)
+ sizeof(dtuple_t)));
- range_start = dtuple_create(heap, key_parts);
- dict_index_copy_types(range_start, index, key_parts);
+ range_start = dtuple_create(heap, key_parts);
+ dict_index_copy_types(range_start, index, key_parts);
- range_end = dtuple_create(heap, key_parts);
- dict_index_copy_types(range_end, index, key_parts);
+ range_end = dtuple_create(heap, key_parts);
+ dict_index_copy_types(range_end, index, key_parts);
row_sel_convert_mysql_key_to_innobase(
range_start,
@@ -9884,10 +10633,10 @@ ha_rows
ha_innobase::estimate_rows_upper_bound()
/*====================================*/
{
- dict_index_t* index;
- ulonglong estimate;
- ulonglong local_data_file_length;
- ulint stat_n_leaf_pages;
+ const dict_index_t* index;
+ ulonglong estimate;
+ ulonglong local_data_file_length;
+ ulint stat_n_leaf_pages;
DBUG_ENTER("estimate_rows_upper_bound");
@@ -9897,8 +10646,7 @@ ha_innobase::estimate_rows_upper_bound()
update_thd(ha_thd());
- prebuilt->trx->op_info = (char*)
- "calculating upper bound for table rows";
+ prebuilt->trx->op_info = "calculating upper bound for table rows";
/* In case MySQL calls this in the middle of a SELECT query, release
possible adaptive hash latch to avoid deadlocks of threads */
@@ -9914,16 +10662,15 @@ ha_innobase::estimate_rows_upper_bound()
local_data_file_length =
((ulonglong) stat_n_leaf_pages) * UNIV_PAGE_SIZE;
-
/* Calculate a minimum length for a clustered index record and from
that an upper bound for the number of rows. Since we only calculate
new statistics in row0mysql.cc when a table has grown by a threshold
factor, we must add a safety factor 2 in front of the formula below. */
- estimate = 2 * local_data_file_length /
- dict_index_calc_min_rec_len(index);
+ estimate = 2 * local_data_file_length
+ / dict_index_calc_min_rec_len(index);
- prebuilt->trx->op_info = (char*)"";
+ prebuilt->trx->op_info = "";
DBUG_RETURN((ha_rows) estimate);
}
@@ -9943,7 +10690,32 @@ ha_innobase::scan_time()
as a random disk read, that is, we do not divide the following
by 10, which would be physically realistic. */
- return((double) (prebuilt->table->stat_clustered_index_size));
+ /* The locking below is disabled for performance reasons. Without
+ it we could end up returning uninitialized value to the caller,
+ which in the worst case could make some query plan go bogus or
+ issue a Valgrind warning. */
+#if 0
+ /* avoid potential lock order violation with dict_table_stats_lock()
+ below */
+ update_thd(ha_thd());
+ trx_search_latch_release_if_reserved(prebuilt->trx);
+#endif
+
+ ulint stat_clustered_index_size;
+
+#if 0
+ dict_table_stats_lock(prebuilt->table, RW_S_LATCH);
+#endif
+
+ ut_a(prebuilt->table->stat_initialized);
+
+ stat_clustered_index_size = prebuilt->table->stat_clustered_index_size;
+
+#if 0
+ dict_table_stats_unlock(prebuilt->table, RW_S_LATCH);
+#endif
+
+ return((double) stat_clustered_index_size);
}
/******************************************************************//**
@@ -9979,6 +10751,16 @@ ha_innobase::read_time(
return(ranges + (double) rows / (double) total_rows * time_for_scan);
}
+/******************************************************************//**
+Return the size of the InnoDB memory buffer. */
+UNIV_INTERN
+longlong
+ha_innobase::get_memory_buffer_size() const
+/*=======================================*/
+{
+ return(innobase_buffer_pool_size);
+}
+
/*********************************************************************//**
Calculates the key number used inside MySQL for an Innobase index. We will
first check the "index translation table" for a match of the index to get
@@ -10004,9 +10786,6 @@ innobase_get_mysql_key_number_for_index(
unsigned int i;
ut_a(index);
- /*
- ut_ad(strcmp(index->table->name, ib_table->name) == 0);
- */
/* If index does not belong to the table object of share structure
(ib_table comes from the share structure) search the index->table
@@ -10037,12 +10816,9 @@ innobase_get_mysql_key_number_for_index(
}
}
- /* If index_count in translation table is set to 0, it
- is possible we are in the process of rebuilding table,
- do not spit error in this case */
- if (share->idx_trans_tbl.index_count) {
- /* Print an error message if we cannot find the index
- ** in the "index translation table". */
+ /* Print an error message if we cannot find the index
+ in the "index translation table". */
+ if (*index->name != TEMP_INDEX_PREFIX) {
sql_print_error("Cannot find index %s in InnoDB index "
"translation table.", index->name);
}
@@ -10066,10 +10842,16 @@ innobase_get_mysql_key_number_for_index(
ind != NULL;
ind = dict_table_get_next_index(ind)) {
if (index == ind) {
- sql_print_error("Find index %s in InnoDB index list "
+ /* Temp index is internal to InnoDB, that is
+ not present in the MySQL index list, so no
+ need to print such mismatch warning. */
+ if (*(index->name) != TEMP_INDEX_PREFIX) {
+ sql_print_warning(
+ "Find index %s in InnoDB index list "
"but not its MySQL index number "
"It could be an InnoDB internal index.",
index->name);
+ }
return(-1);
}
}
@@ -10093,45 +10875,49 @@ innodb_rec_per_key(
ha_rows records) /*!< in: estimated total records */
{
ha_rows rec_per_key;
+ ib_uint64_t n_diff;
+
+ ut_a(index->table->stat_initialized);
ut_ad(i < dict_index_get_n_unique(index));
- /* Note the stat_n_diff_key_vals[] stores the diff value with
- n-prefix indexing, so it is always stat_n_diff_key_vals[i + 1] */
- if (index->stat_n_diff_key_vals[i + 1] == 0) {
+ n_diff = index->stat_n_diff_key_vals[i];
+
+ if (n_diff == 0) {
rec_per_key = records;
} else if (srv_innodb_stats_method == SRV_STATS_NULLS_IGNORED) {
- ib_uint64_t num_null;
+ ib_uint64_t n_null;
+ ib_uint64_t n_non_null;
+
+ n_non_null = index->stat_n_non_null_key_vals[i];
/* In theory, index->stat_n_non_null_key_vals[i]
should always be less than the number of records.
Since this is statistics value, the value could
have slight discrepancy. But we will make sure
the number of null values is not a negative number. */
- if (records < index->stat_n_non_null_key_vals[i]) {
- num_null = 0;
+ if (records < n_non_null) {
+ n_null = 0;
} else {
- num_null = records - index->stat_n_non_null_key_vals[i];
+ n_null = records - n_non_null;
}
/* If the number of NULL values is the same as or
large than that of the distinct values, we could
consider that the table consists mostly of NULL value.
Set rec_per_key to 1. */
- if (index->stat_n_diff_key_vals[i + 1] <= num_null) {
+ if (n_diff <= n_null) {
rec_per_key = 1;
} else {
/* Need to exclude rows with NULL values from
rec_per_key calculation */
- rec_per_key = (ha_rows)(
- (records - num_null)
- / (index->stat_n_diff_key_vals[i + 1]
- - num_null));
+ rec_per_key = (ha_rows)
+ ((records - n_null) / (n_diff - n_null));
}
} else {
- rec_per_key = (ha_rows)
- (records / index->stat_n_diff_key_vals[i + 1]);
+ DEBUG_SYNC_C("after_checking_for_0");
+ rec_per_key = (ha_rows) (records / n_diff);
}
return(rec_per_key);
@@ -10145,17 +10931,12 @@ UNIV_INTERN
int
ha_innobase::info_low(
/*==================*/
- uint flag, /*!< in: what information MySQL
- requests */
- dict_stats_upd_option_t stats_upd_option)
- /*!< in: whether to (re) calc
- the stats or to fetch them from
- the persistent storage */
+ uint flag, /*!< in: what information is requested */
+ bool is_analyze)
{
dict_table_t* ib_table;
- dict_index_t* index;
ha_rows rec_per_key;
- ib_int64_t n_rows;
+ ib_uint64_t n_rows;
char path[FN_REFLEN];
os_file_stat_t stat_info;
@@ -10179,37 +10960,52 @@ ha_innobase::info_low(
trx_search_latch_release_if_reserved(prebuilt->trx);
ib_table = prebuilt->table;
+ DBUG_ASSERT(ib_table->n_ref_count > 0);
if (flag & HA_STATUS_TIME) {
- if (stats_upd_option != DICT_STATS_FETCH
- || innobase_stats_on_metadata) {
- /* In sql_show we call with this flag: update
- then statistics so that they are up-to-date */
- enum db_err ret;
+ if (is_analyze || innobase_stats_on_metadata) {
+
+ dict_stats_upd_option_t opt;
+ dberr_t ret;
prebuilt->trx->op_info = "updating table statistics";
+ if (dict_stats_is_persistent_enabled(ib_table)) {
+
+ ut_ad(!srv_read_only_mode);
+
+ if (is_analyze) {
+ opt = DICT_STATS_RECALC_PERSISTENT;
+ } else {
+ /* This is e.g. 'SHOW INDEXES', fetch
+ the persistent stats from disk. */
+ opt = DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY;
+ }
+ } else {
+ opt = DICT_STATS_RECALC_TRANSIENT;
+ }
+
ut_ad(!mutex_own(&dict_sys->mutex));
- ret = dict_stats_update(ib_table, stats_upd_option,
- FALSE);
+ ret = dict_stats_update(ib_table, opt);
if (ret != DB_SUCCESS) {
prebuilt->trx->op_info = "";
DBUG_RETURN(HA_ERR_GENERIC);
}
- prebuilt->trx->op_info = "returning various info to MySQL";
+ prebuilt->trx->op_info =
+ "returning various info to MySQL";
}
my_snprintf(path, sizeof(path), "%s/%s%s",
- mysql_data_home, ib_table->name, reg_ext);
+ mysql_data_home, ib_table->name, reg_ext);
unpack_filename(path,path);
/* Note that we do not know the access time of the table,
nor the CHECK TABLE time, nor the UPDATE or INSERT time. */
- if (os_file_get_status(path,&stat_info)) {
+ if (os_file_get_status(path, &stat_info, false) == DB_SUCCESS) {
stats.create_time = (ulong) stat_info.ctime;
}
}
@@ -10217,13 +11013,28 @@ ha_innobase::info_low(
if (flag & HA_STATUS_VARIABLE) {
ulint page_size;
+ ulint stat_clustered_index_size;
+ ulint stat_sum_of_other_index_sizes;
+
+ if (!(flag & HA_STATUS_NO_LOCK)) {
+ dict_table_stats_lock(ib_table, RW_S_LATCH);
+ }
+
+ ut_a(ib_table->stat_initialized);
n_rows = ib_table->stat_n_rows;
- /* Because we do not protect stat_n_rows by any mutex in a
- delete, it is theoretically possible that the value can be
- smaller than zero! TODO: fix this race.
+ stat_clustered_index_size
+ = ib_table->stat_clustered_index_size;
+
+ stat_sum_of_other_index_sizes
+ = ib_table->stat_sum_of_other_index_sizes;
+
+ if (!(flag & HA_STATUS_NO_LOCK)) {
+ dict_table_stats_unlock(ib_table, RW_S_LATCH);
+ }
+ /*
The MySQL optimizer seems to assume in a left join that n_rows
is an accurate estimate if it is zero. Of course, it is not,
since we do not have any locks on the rows yet at this phase.
@@ -10233,10 +11044,6 @@ ha_innobase::info_low(
set. That way SHOW TABLE STATUS will show the best estimate,
while the optimizer never sees the table empty. */
- if (n_rows < 0) {
- n_rows = 0;
- }
-
if (n_rows == 0 && !(flag & HA_STATUS_TIME)) {
n_rows++;
}
@@ -10266,10 +11073,10 @@ ha_innobase::info_low(
stats.records = (ha_rows) n_rows;
stats.deleted = 0;
stats.data_file_length
- = ((ulonglong) ib_table->stat_clustered_index_size)
+ = ((ulonglong) stat_clustered_index_size)
* page_size;
- stats.index_file_length =
- ((ulonglong) ib_table->stat_sum_of_other_index_sizes)
+ stats.index_file_length
+ = ((ulonglong) stat_sum_of_other_index_sizes)
* page_size;
/* Since fsp_get_available_space_in_free_extents() is
@@ -10309,8 +11116,8 @@ ha_innobase::info_low(
"space for table %s but its "
"tablespace has been discarded or "
"the .ibd file is missing. Setting "
- "the free space to zero. "
- "(Errcode: %M)",
+ "the free space to zero. "
+ "(errno: %M)",
ib_table->name, errno);
stats.delete_length = 0;
@@ -10320,7 +11127,7 @@ ha_innobase::info_low(
}
stats.check_time = 0;
- stats.mrr_length_per_rec = ref_length + sizeof(void*);
+ stats.mrr_length_per_rec= ref_length + 8; // 8 = max(sizeof(void *));
if (stats.records == 0) {
stats.mean_rec_length = 0;
@@ -10336,12 +11143,40 @@ ha_innobase::info_low(
matches up. If prebuilt->clust_index_was_generated
holds, InnoDB defines GEN_CLUST_INDEX internally */
ulint num_innodb_index = UT_LIST_GET_LEN(ib_table->indexes)
- - prebuilt->clust_index_was_generated;
+ - prebuilt->clust_index_was_generated;
+ if (table->s->keys < num_innodb_index) {
+ /* If there are too many indexes defined
+ inside InnoDB, ignore those that are being
+ created, because MySQL will only consider
+ the fully built indexes here. */
+
+ for (const dict_index_t* index
+ = UT_LIST_GET_FIRST(ib_table->indexes);
+ index != NULL;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ /* First, online index creation is
+ completed inside InnoDB, and then
+ MySQL attempts to upgrade the
+ meta-data lock so that it can rebuild
+ the .frm file. If we get here in that
+ time frame, dict_index_is_online_ddl()
+ would not hold and the index would
+ still not be included in TABLE_SHARE. */
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ num_innodb_index--;
+ }
+ }
+
+ if (table->s->keys < num_innodb_index
+ && innobase_fts_check_doc_id_index(
+ ib_table, NULL, NULL)
+ == FTS_EXIST_DOC_ID_INDEX) {
+ num_innodb_index--;
+ }
+ }
- if (table->s->keys != num_innodb_index
- && (innobase_fts_check_doc_id_index(ib_table, NULL)
- == FTS_EXIST_DOC_ID_INDEX
- && table->s->keys != (num_innodb_index - 1))) {
+ if (table->s->keys != num_innodb_index) {
sql_print_error("InnoDB: Table %s contains %lu "
"indexes inside InnoDB, which "
"is different from the number of "
@@ -10350,6 +11185,12 @@ ha_innobase::info_low(
table->s->keys);
}
+ if (!(flag & HA_STATUS_NO_LOCK)) {
+ dict_table_stats_lock(ib_table, RW_S_LATCH);
+ }
+
+ ut_a(ib_table->stat_initialized);
+
for (i = 0; i < table->s->keys; i++) {
ulong j;
rec_per_key = 1;
@@ -10358,7 +11199,7 @@ ha_innobase::info_low(
The identity of index (match up index name with
that of table->key_info[i]) is already verified in
innobase_get_index(). */
- index = innobase_get_index(i);
+ dict_index_t* index = innobase_get_index(i);
if (index == NULL) {
sql_print_error("Table %s contains fewer "
@@ -10373,7 +11214,7 @@ ha_innobase::info_low(
break;
}
- for (j = 0; j < table->key_info[i].key_parts; j++) {
+ for (j = 0; j < table->key_info[i].ext_key_parts; j++) {
if (table->key_info[i].flags & HA_FULLTEXT) {
/* The whole concept has no validity
@@ -10420,15 +11261,17 @@ ha_innobase::info_low(
KEY *key_info= table->key_info+i;
key_part_map ext_key_part_map=
- key_info->ext_key_part_map;
+ key_info->ext_key_part_map;
- if (key_info->key_parts != key_info->ext_key_parts) {
+ if (key_info->user_defined_key_parts !=
+ key_info->ext_key_parts)
+ {
KEY *pk_key_info= key_info+
table->s->primary_key;
- uint k = key_info->key_parts;
+ uint k = key_info->user_defined_key_parts;
ha_rows k_rec_per_key = rec_per_key;
- uint pk_parts = pk_key_info->key_parts;
+ uint pk_parts = pk_key_info->user_defined_key_parts;
index= innobase_get_index(
table->s->primary_key);
@@ -10463,6 +11306,10 @@ ha_innobase::info_low(
}
}
}
+
+ if (!(flag & HA_STATUS_NO_LOCK)) {
+ dict_table_stats_unlock(ib_table, RW_S_LATCH);
+ }
}
if (srv_force_recovery >= SRV_FORCE_NO_IBUF_MERGE) {
@@ -10485,7 +11332,7 @@ ha_innobase::info_low(
errkey = (unsigned int) (
(prebuilt->trx->error_key_num
== ULINT_UNDEFINED)
- ? -1
+ ? ~0
: prebuilt->trx->error_key_num);
}
}
@@ -10508,9 +11355,9 @@ UNIV_INTERN
int
ha_innobase::info(
/*==============*/
- uint flag) /*!< in: what information MySQL requests */
+ uint flag) /*!< in: what information is requested */
{
- return(info_low(flag, DICT_STATS_FETCH));
+ return(this->info_low(flag, false /* not ANALYZE */));
}
/**********************************************************************//**
@@ -10524,19 +11371,13 @@ ha_innobase::analyze(
THD* thd, /*!< in: connection thread handle */
HA_CHECK_OPT* check_opt) /*!< in: currently ignored */
{
- dict_stats_upd_option_t upd_option;
- int ret;
-
- if (THDVAR(thd, analyze_is_persistent)) {
- upd_option = DICT_STATS_RECALC_PERSISTENT;
- } else {
- upd_option = DICT_STATS_RECALC_TRANSIENT;
- }
+ int ret;
- /* Simply call ::info_low() with all the flags
+ /* Simply call this->info_low() with all the flags
and request recalculation of the statistics */
- ret = info_low(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE,
- upd_option);
+ ret = this->info_low(
+ HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE,
+ true /* this is ANALYZE */);
if (ret != 0) {
return(HA_ADMIN_FAILED);
@@ -10609,19 +11450,23 @@ ha_innobase::check(
build_template(true);
}
- if (prebuilt->table->ibd_file_missing) {
- sql_print_error("InnoDB: Error:\n"
- "InnoDB: MySQL is trying to use a table handle"
- " but the .ibd file for\n"
- "InnoDB: table %s does not exist.\n"
- "InnoDB: Have you deleted the .ibd file"
- " from the database directory under\n"
- "InnoDB: the MySQL datadir, or have you"
- " used DISCARD TABLESPACE?\n"
- "InnoDB: Please refer to\n"
- "InnoDB: " REFMAN "innodb-troubleshooting.html\n"
- "InnoDB: how you can resolve the problem.\n",
- prebuilt->table->name);
+ if (dict_table_is_discarded(prebuilt->table)) {
+
+ ib_senderrf(
+ thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_DISCARDED,
+ table->s->table_name.str);
+
+ DBUG_RETURN(HA_ADMIN_CORRUPT);
+
+ } else if (prebuilt->table->ibd_file_missing) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_MISSING,
+ table->s->table_name.str);
+
DBUG_RETURN(HA_ADMIN_CORRUPT);
}
@@ -10647,27 +11492,23 @@ ha_innobase::check(
/* Enlarge the fatal lock wait timeout during CHECK TABLE. */
os_increment_counter_by_amount(
server_mutex,
- srv_fatal_semaphore_wait_threshold, 7200/*2 hours*/);
+ srv_fatal_semaphore_wait_threshold,
+ SRV_SEMAPHORE_WAIT_EXTENSION);
for (index = dict_table_get_first_index(prebuilt->table);
index != NULL;
index = dict_table_get_next_index(index)) {
char index_name[MAX_FULL_NAME_LEN + 1];
-#if 0
- fputs("Validating index ", stderr);
- ut_print_name(stderr, trx, FALSE, index->name);
- putc('\n', stderr);
-#endif
- /* If this is an index being created, break */
+ /* If this is an index being created or dropped, break */
if (*index->name == TEMP_INDEX_PREFIX) {
break;
- } else if (!btr_validate_index(index, prebuilt->trx)) {
+ } else if (!btr_validate_index(index, prebuilt->trx)) {
is_ok = FALSE;
innobase_format_name(
index_name, sizeof index_name,
- prebuilt->index->name, TRUE);
+ index->name, TRUE);
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
ER_NOT_KEYFILE,
@@ -10731,9 +11572,8 @@ ha_innobase::check(
" index %s is corrupted.",
index_name);
is_ok = FALSE;
- row_mysql_lock_data_dictionary(prebuilt->trx);
- dict_set_corrupted(index);
- row_mysql_unlock_data_dictionary(prebuilt->trx);
+ dict_set_corrupted(
+ index, prebuilt->trx, "CHECK TABLE");
}
if (thd_kill_level(user_thd)) {
@@ -10768,9 +11608,8 @@ ha_innobase::check(
index = dict_table_get_first_index(prebuilt->table);
if (!dict_index_is_corrupted(index)) {
- mutex_enter(&dict_sys->mutex);
- dict_set_corrupted(index);
- mutex_exit(&dict_sys->mutex);
+ dict_set_corrupted(
+ index, prebuilt->trx, "CHECK TABLE");
}
prebuilt->table->corrupted = TRUE;
}
@@ -10791,7 +11630,8 @@ ha_innobase::check(
/* Restore the fatal lock wait timeout after CHECK TABLE. */
os_decrement_counter_by_amount(
server_mutex,
- srv_fatal_semaphore_wait_threshold, 7200/*2 hours*/);
+ srv_fatal_semaphore_wait_threshold,
+ SRV_SEMAPHORE_WAIT_EXTENSION);
prebuilt->trx->op_info = "";
if (thd_kill_level(user_thd)) {
@@ -10836,40 +11676,47 @@ ha_innobase::update_table_comment(
/* output the data to a temporary file */
- mutex_enter(&srv_dict_tmpfile_mutex);
- rewind(srv_dict_tmpfile);
+ if (!srv_read_only_mode) {
- fprintf(srv_dict_tmpfile, "InnoDB free: %llu kB",
- fsp_get_available_space_in_free_extents(
- prebuilt->table->space));
+ mutex_enter(&srv_dict_tmpfile_mutex);
- dict_print_info_on_foreign_keys(FALSE, srv_dict_tmpfile,
- prebuilt->trx, prebuilt->table);
- flen = ftell(srv_dict_tmpfile);
- if (flen < 0) {
- flen = 0;
- } else if (length + flen + 3 > 64000) {
- flen = 64000 - 3 - length;
- }
+ rewind(srv_dict_tmpfile);
- /* allocate buffer for the full string, and
- read the contents of the temporary file */
+ fprintf(srv_dict_tmpfile, "InnoDB free: %llu kB",
+ fsp_get_available_space_in_free_extents(
+ prebuilt->table->space));
- str = (char*) my_malloc(length + flen + 3, MYF(0));
+ dict_print_info_on_foreign_keys(
+ FALSE, srv_dict_tmpfile, prebuilt->trx,
+ prebuilt->table);
- if (str) {
- char* pos = str + length;
- if (length) {
- memcpy(str, comment, length);
- *pos++ = ';';
- *pos++ = ' ';
+ flen = ftell(srv_dict_tmpfile);
+
+ if (flen < 0) {
+ flen = 0;
+ } else if (length + flen + 3 > 64000) {
+ flen = 64000 - 3 - length;
}
- rewind(srv_dict_tmpfile);
- flen = (uint) fread(pos, 1, flen, srv_dict_tmpfile);
- pos[flen] = 0;
- }
- mutex_exit(&srv_dict_tmpfile_mutex);
+ /* allocate buffer for the full string, and
+ read the contents of the temporary file */
+
+ str = (char*) my_malloc(length + flen + 3, MYF(0));
+
+ if (str) {
+ char* pos = str + length;
+ if (length) {
+ memcpy(str, comment, length);
+ *pos++ = ';';
+ *pos++ = ' ';
+ }
+ rewind(srv_dict_tmpfile);
+ flen = (uint) fread(pos, 1, flen, srv_dict_tmpfile);
+ pos[flen] = 0;
+ }
+
+ mutex_exit(&srv_dict_tmpfile_mutex);
+ }
prebuilt->trx->op_info = (char*)"";
@@ -10886,8 +11733,8 @@ char*
ha_innobase::get_foreign_key_create_info(void)
/*==========================================*/
{
- char* str = 0;
long flen;
+ char* str = 0;
ut_a(prebuilt != NULL);
@@ -10905,31 +11752,36 @@ ha_innobase::get_foreign_key_create_info(void)
trx_search_latch_release_if_reserved(prebuilt->trx);
- mutex_enter(&srv_dict_tmpfile_mutex);
- rewind(srv_dict_tmpfile);
+ if (!srv_read_only_mode) {
+ mutex_enter(&srv_dict_tmpfile_mutex);
+ rewind(srv_dict_tmpfile);
- /* output the data to a temporary file */
- dict_print_info_on_foreign_keys(TRUE, srv_dict_tmpfile,
- prebuilt->trx, prebuilt->table);
- prebuilt->trx->op_info = (char*)"";
+ /* Output the data to a temporary file */
+ dict_print_info_on_foreign_keys(
+ TRUE, srv_dict_tmpfile, prebuilt->trx,
+ prebuilt->table);
- flen = ftell(srv_dict_tmpfile);
- if (flen < 0) {
- flen = 0;
- }
+ prebuilt->trx->op_info = (char*)"";
- /* allocate buffer for the string, and
- read the contents of the temporary file */
+ flen = ftell(srv_dict_tmpfile);
- str = (char*) my_malloc(flen + 1, MYF(0));
+ if (flen < 0) {
+ flen = 0;
+ }
- if (str) {
- rewind(srv_dict_tmpfile);
- flen = (uint) fread(str, 1, flen, srv_dict_tmpfile);
- str[flen] = 0;
- }
+ /* Allocate buffer for the string, and
+ read the contents of the temporary file */
+
+ str = (char*) my_malloc(flen + 1, MYF(0));
- mutex_exit(&srv_dict_tmpfile_mutex);
+ if (str) {
+ rewind(srv_dict_tmpfile);
+ flen = (uint) fread(str, 1, flen, srv_dict_tmpfile);
+ str[flen] = 0;
+ }
+
+ mutex_exit(&srv_dict_tmpfile_mutex);
+ }
return(str);
}
@@ -11143,17 +11995,16 @@ ha_innobase::can_switch_engines(void)
bool can_switch;
DBUG_ENTER("ha_innobase::can_switch_engines");
-
- ut_a(prebuilt->trx == thd_to_trx(ha_thd()));
+ update_thd();
prebuilt->trx->op_info =
"determining if there are foreign key constraints";
- row_mysql_lock_data_dictionary(prebuilt->trx);
+ row_mysql_freeze_data_dictionary(prebuilt->trx);
can_switch = !UT_LIST_GET_FIRST(prebuilt->table->referenced_list)
&& !UT_LIST_GET_FIRST(prebuilt->table->foreign_list);
- row_mysql_unlock_data_dictionary(prebuilt->trx);
+ row_mysql_unfreeze_data_dictionary(prebuilt->trx);
prebuilt->trx->op_info = "";
DBUG_RETURN(can_switch);
@@ -11202,50 +12053,52 @@ ha_innobase::extra(
enum ha_extra_function operation)
/*!< in: HA_EXTRA_FLUSH or some other flag */
{
+ check_trx_exists(ha_thd());
+
/* Warning: since it is not sure that MySQL calls external_lock
before calling this function, the trx field in prebuilt can be
obsolete! */
switch (operation) {
- case HA_EXTRA_FLUSH:
- if (prebuilt->blob_heap) {
- row_mysql_prebuilt_free_blob_heap(prebuilt);
- }
- break;
- case HA_EXTRA_RESET_STATE:
- reset_template();
- thd_to_trx(ha_thd())->duplicates = 0;
- break;
- case HA_EXTRA_NO_KEYREAD:
- prebuilt->read_just_key = 0;
- break;
- case HA_EXTRA_KEYREAD:
- prebuilt->read_just_key = 1;
- break;
- case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
- prebuilt->keep_other_fields_on_keyread = 1;
- break;
+ case HA_EXTRA_FLUSH:
+ if (prebuilt->blob_heap) {
+ row_mysql_prebuilt_free_blob_heap(prebuilt);
+ }
+ break;
+ case HA_EXTRA_RESET_STATE:
+ reset_template();
+ thd_to_trx(ha_thd())->duplicates = 0;
+ break;
+ case HA_EXTRA_NO_KEYREAD:
+ prebuilt->read_just_key = 0;
+ break;
+ case HA_EXTRA_KEYREAD:
+ prebuilt->read_just_key = 1;
+ break;
+ case HA_EXTRA_KEYREAD_PRESERVE_FIELDS:
+ prebuilt->keep_other_fields_on_keyread = 1;
+ break;
- /* IMPORTANT: prebuilt->trx can be obsolete in
- this method, because it is not sure that MySQL
- calls external_lock before this method with the
- parameters below. We must not invoke update_thd()
- either, because the calling threads may change.
- CAREFUL HERE, OR MEMORY CORRUPTION MAY OCCUR! */
- case HA_EXTRA_INSERT_WITH_UPDATE:
- thd_to_trx(ha_thd())->duplicates |= TRX_DUP_IGNORE;
- break;
- case HA_EXTRA_NO_IGNORE_DUP_KEY:
- thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_IGNORE;
- break;
- case HA_EXTRA_WRITE_CAN_REPLACE:
- thd_to_trx(ha_thd())->duplicates |= TRX_DUP_REPLACE;
- break;
- case HA_EXTRA_WRITE_CANNOT_REPLACE:
- thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_REPLACE;
- break;
- default:/* Do nothing */
- ;
+ /* IMPORTANT: prebuilt->trx can be obsolete in
+ this method, because it is not sure that MySQL
+ calls external_lock before this method with the
+ parameters below. We must not invoke update_thd()
+ either, because the calling threads may change.
+ CAREFUL HERE, OR MEMORY CORRUPTION MAY OCCUR! */
+ case HA_EXTRA_INSERT_WITH_UPDATE:
+ thd_to_trx(ha_thd())->duplicates |= TRX_DUP_IGNORE;
+ break;
+ case HA_EXTRA_NO_IGNORE_DUP_KEY:
+ thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_IGNORE;
+ break;
+ case HA_EXTRA_WRITE_CAN_REPLACE:
+ thd_to_trx(ha_thd())->duplicates |= TRX_DUP_REPLACE;
+ break;
+ case HA_EXTRA_WRITE_CANNOT_REPLACE:
+ thd_to_trx(ha_thd())->duplicates &= ~TRX_DUP_REPLACE;
+ break;
+ default:/* Do nothing */
+ ;
}
return(0);
@@ -11354,14 +12207,6 @@ ha_innobase::start_stmt(
++trx->will_lock;
}
- if (prebuilt->result) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Warning: FTS result set not NULL\n");
-
- fts_query_free_result(prebuilt->result);
- prebuilt->result = NULL;
- }
-
return(0);
}
@@ -11434,6 +12279,24 @@ ha_innobase::external_lock(
}
}
+ /* Check for UPDATEs in read-only mode. */
+ if (srv_read_only_mode
+ && (thd_sql_command(thd) == SQLCOM_UPDATE
+ || thd_sql_command(thd) == SQLCOM_INSERT
+ || thd_sql_command(thd) == SQLCOM_REPLACE
+ || thd_sql_command(thd) == SQLCOM_DROP_TABLE
+ || thd_sql_command(thd) == SQLCOM_ALTER_TABLE
+ || thd_sql_command(thd) == SQLCOM_OPTIMIZE
+ || thd_sql_command(thd) == SQLCOM_CREATE_TABLE
+ || thd_sql_command(thd) == SQLCOM_CREATE_INDEX
+ || thd_sql_command(thd) == SQLCOM_DROP_INDEX
+ || thd_sql_command(thd) == SQLCOM_DELETE)) {
+
+ ib_senderrf(thd, IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
+
+ DBUG_RETURN(HA_ERR_TABLE_READONLY);
+ }
+
trx = prebuilt->trx;
prebuilt->sql_stat_start = TRUE;
@@ -11441,6 +12304,41 @@ ha_innobase::external_lock(
reset_template();
+ switch (prebuilt->table->quiesce) {
+ case QUIESCE_START:
+ /* Check for FLUSH TABLE t WITH READ LOCK; */
+ if (!srv_read_only_mode
+ && thd_sql_command(thd) == SQLCOM_FLUSH
+ && lock_type == F_RDLCK) {
+
+ row_quiesce_table_start(prebuilt->table, trx);
+
+ /* Use the transaction instance to track UNLOCK
+ TABLES. It can be done via START TRANSACTION; too
+ implicitly. */
+
+ ++trx->flush_tables;
+ }
+ break;
+
+ case QUIESCE_COMPLETE:
+ /* Check for UNLOCK TABLES; implicit or explicit
+ or trx interruption. */
+ if (trx->flush_tables > 0
+ && (lock_type == F_UNLCK || trx_is_interrupted(trx))) {
+
+ row_quiesce_table_complete(prebuilt->table, trx);
+
+ ut_a(trx->flush_tables > 0);
+ --trx->flush_tables;
+ }
+
+ break;
+
+ case QUIESCE_NONE:
+ break;
+ }
+
if (lock_type == F_WRLCK) {
/* If this is a SELECT, then it is in UPDATE TABLE ...
@@ -11491,13 +12389,13 @@ ha_innobase::external_lock(
&& thd_test_options(thd, OPTION_NOT_AUTOCOMMIT)
&& thd_in_lock_tables(thd)) {
- ulint error = row_lock_table_for_mysql(
+ dberr_t error = row_lock_table_for_mysql(
prebuilt, NULL, 0);
if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql(
- (int) error, 0, thd);
- DBUG_RETURN((int) error);
+ DBUG_RETURN(
+ convert_error_code_to_mysql(
+ error, 0, thd));
}
}
@@ -11587,19 +12485,23 @@ ha_innobase::transactional_table_lock(
update_thd(thd);
- if (prebuilt->table->ibd_file_missing && !thd_tablespace_op(thd)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: MySQL is trying to use a table handle"
- " but the .ibd file for\n"
- "InnoDB: table %s does not exist.\n"
- "InnoDB: Have you deleted the .ibd file"
- " from the database directory under\n"
- "InnoDB: the MySQL datadir?"
- "InnoDB: See " REFMAN
- "innodb-troubleshooting.html\n"
- "InnoDB: how you can resolve the problem.\n",
- prebuilt->table->name);
+ if (!thd_tablespace_op(thd)) {
+
+ if (dict_table_is_discarded(prebuilt->table)) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_DISCARDED,
+ table->s->table_name.str);
+
+ } else if (prebuilt->table->ibd_file_missing) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLESPACE_MISSING,
+ table->s->table_name.str);
+ }
+
DBUG_RETURN(HA_ERR_CRASHED);
}
@@ -11617,11 +12519,12 @@ ha_innobase::transactional_table_lock(
prebuilt->select_lock_type = LOCK_S;
prebuilt->stored_select_lock_type = LOCK_S;
} else {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB error:\n"
-"MySQL is trying to set transactional table lock with corrupted lock type\n"
-"to table %s, lock type %d does not exist.\n",
- prebuilt->table->name, lock_type);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "MySQL is trying to set transactional table lock "
+ "with corrupted lock type to table %s, lock type "
+ "%d does not exist.",
+ table->s->table_name.str, lock_type);
+
DBUG_RETURN(HA_ERR_CRASHED);
}
@@ -11630,14 +12533,14 @@ ha_innobase::transactional_table_lock(
innobase_register_trx(ht, thd, trx);
if (THDVAR(thd, table_locks) && thd_in_lock_tables(thd)) {
- ulint error = DB_SUCCESS;
+ dberr_t error;
error = row_lock_table_for_mysql(prebuilt, NULL, 0);
if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql(
- (int) error, prebuilt->table->flags, thd);
- DBUG_RETURN((int) error);
+ DBUG_RETURN(
+ convert_error_code_to_mysql(
+ error, prebuilt->table->flags, thd));
}
if (thd_test_options(
@@ -11688,6 +12591,13 @@ innodb_show_status(
DBUG_ENTER("innodb_show_status");
DBUG_ASSERT(hton == innodb_hton_ptr);
+ /* We don't create the temp files or associated
+ mutexes in read-only-mode */
+
+ if (srv_read_only_mode) {
+ DBUG_RETURN(0);
+ }
+
trx = check_trx_exists(thd);
trx_search_latch_release_if_reserved(trx);
@@ -11777,11 +12687,11 @@ innodb_mutex_show_status(
{
char buf1[IO_SIZE];
char buf2[IO_SIZE];
- mutex_t* mutex;
+ ib_mutex_t* mutex;
rw_lock_t* lock;
ulint block_mutex_oswait_count = 0;
ulint block_lock_oswait_count = 0;
- mutex_t* block_mutex = NULL;
+ ib_mutex_t* block_mutex = NULL;
rw_lock_t* block_lock = NULL;
#ifdef UNIV_DEBUG
ulint rw_lock_count= 0;
@@ -11813,41 +12723,7 @@ innodb_mutex_show_status(
block_mutex_oswait_count += mutex->count_os_wait;
continue;
}
-#ifdef UNIV_DEBUG
- if (mutex->mutex_type != 1) {
- if (mutex->count_using > 0) {
- buf1len= my_snprintf(buf1, sizeof(buf1),
- "%s:%s",
- mutex->cmutex_name,
- innobase_basename(mutex->cfile_name));
- buf2len= my_snprintf(buf2, sizeof(buf2),
- "count=%lu, spin_waits=%lu,"
- " spin_rounds=%lu, "
- "os_waits=%lu, os_yields=%lu,"
- " os_wait_times=%lu",
- mutex->count_using,
- mutex->count_spin_loop,
- mutex->count_spin_rounds,
- mutex->count_os_wait,
- mutex->count_os_yield,
- (ulong) (mutex->lspent_time/1000));
-
- if (stat_print(thd, innobase_hton_name,
- hton_name_len, buf1, buf1len,
- buf2, buf2len)) {
- mutex_exit(&mutex_list_mutex);
- DBUG_RETURN(1);
- }
- }
- } else {
- rw_lock_count += mutex->count_using;
- rw_lock_count_spin_loop += mutex->count_spin_loop;
- rw_lock_count_spin_rounds += mutex->count_spin_rounds;
- rw_lock_count_os_wait += mutex->count_os_wait;
- rw_lock_count_os_yield += mutex->count_os_yield;
- rw_lock_wait_time += mutex->lspent_time;
- }
-#else /* UNIV_DEBUG */
+
buf1len= (uint) my_snprintf(buf1, sizeof(buf1), "%s:%lu",
innobase_basename(mutex->cfile_name),
(ulong) mutex->cline);
@@ -11860,7 +12736,6 @@ innodb_mutex_show_status(
mutex_exit(&mutex_list_mutex);
DBUG_RETURN(1);
}
-#endif /* UNIV_DEBUG */
}
if (block_mutex) {
@@ -12133,12 +13008,52 @@ ha_innobase::store_lock(
const bool in_lock_tables = thd_in_lock_tables(thd);
const uint sql_command = thd_sql_command(thd);
- if (sql_command == SQLCOM_DROP_TABLE) {
+ if (srv_read_only_mode
+ && (sql_command == SQLCOM_UPDATE
+ || sql_command == SQLCOM_INSERT
+ || sql_command == SQLCOM_REPLACE
+ || sql_command == SQLCOM_DROP_TABLE
+ || sql_command == SQLCOM_ALTER_TABLE
+ || sql_command == SQLCOM_OPTIMIZE
+ || sql_command == SQLCOM_CREATE_TABLE
+ || sql_command == SQLCOM_CREATE_INDEX
+ || sql_command == SQLCOM_DROP_INDEX
+ || sql_command == SQLCOM_DELETE)) {
+
+ ib_senderrf(trx->mysql_thd,
+ IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
+
+ } else if (sql_command == SQLCOM_FLUSH
+ && lock_type == TL_READ_NO_INSERT) {
+
+ /* Check for FLUSH TABLES ... WITH READ LOCK */
+
+ /* Note: This call can fail, but there is no way to return
+ the error to the caller. We simply ignore it for now here
+ and push the error code to the caller where the error is
+ detected in the function. */
+
+ dberr_t err = row_quiesce_set_state(
+ prebuilt->table, QUIESCE_START, trx);
+
+ ut_a(err == DB_SUCCESS || err == DB_UNSUPPORTED);
+
+ if (trx->isolation_level == TRX_ISO_SERIALIZABLE) {
+ prebuilt->select_lock_type = LOCK_S;
+ prebuilt->stored_select_lock_type = LOCK_S;
+ } else {
+ prebuilt->select_lock_type = LOCK_NONE;
+ prebuilt->stored_select_lock_type = LOCK_NONE;
+ }
+
+ /* Check for DROP TABLE */
+ } else if (sql_command == SQLCOM_DROP_TABLE) {
/* MySQL calls this function in DROP TABLE though this table
handle may belong to another thd that is running a query. Let
us in that case skip any changes to the prebuilt struct. */
+ /* Check for LOCK TABLE t1,...,tn WITH SHARED LOCKS */
} else if ((lock_type == TL_READ && in_lock_tables)
|| (lock_type == TL_READ_HIGH_PRIORITY && in_lock_tables)
|| lock_type == TL_READ_WITH_SHARED_LOCKS
@@ -12164,18 +13079,18 @@ ha_innobase::store_lock(
unexpected if an obsolete consistent read view would be
used. */
- ulint isolation_level;
+ /* Use consistent read for checksum table */
- isolation_level = trx->isolation_level;
-
- if ((srv_locks_unsafe_for_binlog
- || isolation_level <= TRX_ISO_READ_COMMITTED)
- && isolation_level != TRX_ISO_SERIALIZABLE
- && (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT)
- && (sql_command == SQLCOM_INSERT_SELECT
- || sql_command == SQLCOM_REPLACE_SELECT
- || sql_command == SQLCOM_UPDATE
- || sql_command == SQLCOM_CREATE_TABLE)) {
+ if (sql_command == SQLCOM_CHECKSUM
+ || ((srv_locks_unsafe_for_binlog
+ || trx->isolation_level <= TRX_ISO_READ_COMMITTED)
+ && trx->isolation_level != TRX_ISO_SERIALIZABLE
+ && (lock_type == TL_READ
+ || lock_type == TL_READ_NO_INSERT)
+ && (sql_command == SQLCOM_INSERT_SELECT
+ || sql_command == SQLCOM_REPLACE_SELECT
+ || sql_command == SQLCOM_UPDATE
+ || sql_command == SQLCOM_CREATE_TABLE))) {
/* If we either have innobase_locks_unsafe_for_binlog
option set or this session is using READ COMMITTED
@@ -12189,11 +13104,6 @@ ha_innobase::store_lock(
prebuilt->select_lock_type = LOCK_NONE;
prebuilt->stored_select_lock_type = LOCK_NONE;
- } else if (sql_command == SQLCOM_CHECKSUM) {
- /* Use consistent read for checksum table */
-
- prebuilt->select_lock_type = LOCK_NONE;
- prebuilt->stored_select_lock_type = LOCK_NONE;
} else {
prebuilt->select_lock_type = LOCK_S;
prebuilt->stored_select_lock_type = LOCK_S;
@@ -12293,7 +13203,7 @@ the AUTOINC value. If SUCCESS then the table AUTOINC mutex will be locked
on return and all relevant locks acquired.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
ha_innobase::innobase_get_autoinc(
/*==============================*/
ulonglong* value) /*!< out: autoinc value */
@@ -12350,12 +13260,7 @@ ha_innobase::innobase_peek_autoinc(void)
}
/*********************************************************************//**
-This function initializes the auto-inc counter if it has not been
-initialized yet. This function does not change the value of the auto-inc
-counter if it already has been initialized. Returns the value of the
-auto-inc counter in *first_value, and ULONGLONG_MAX in *nb_reserved_values (as
-we have a table-level lock). offset, increment, nb_desired_values are ignored.
-*first_value is set to -1 if error (deadlock or lock wait timeout) */
+Returns the value of the auto-inc counter in *first_value and ~0 on failure. */
UNIV_INTERN
void
ha_innobase::get_auto_increment(
@@ -12370,7 +13275,7 @@ ha_innobase::get_auto_increment(
values */
{
trx_t* trx;
- ulint error;
+ dberr_t error;
ulonglong autoinc = 0;
/* Prepare prebuilt->trx in the table handle */
@@ -12484,18 +13389,15 @@ ha_innobase::reset_auto_increment(
{
DBUG_ENTER("ha_innobase::reset_auto_increment");
- int error;
+ dberr_t error;
update_thd(ha_thd());
error = row_lock_table_autoinc_for_mysql(prebuilt);
if (error != DB_SUCCESS) {
- error = convert_error_code_to_mysql(error,
- prebuilt->table->flags,
- user_thd);
-
- DBUG_RETURN(error);
+ DBUG_RETURN(convert_error_code_to_mysql(
+ error, prebuilt->table->flags, user_thd));
}
/* The next value can never be 0. */
@@ -12564,7 +13466,7 @@ ha_innobase::get_foreign_dup_key(
/* else */
/* copy table name (and convert from filename-safe encoding to
- system_charset_info, e.g. "foo_@0J@00b6" -> "foo_ö") */
+ system_charset_info) */
char* p;
p = strchr(err_index->table->name, '/');
/* strip ".../" prefix if any */
@@ -12617,7 +13519,7 @@ ha_innobase::cmp_ref(
key_part = table->key_info[table->s->primary_key].key_part;
key_part_end = key_part
- + table->key_info[table->s->primary_key].key_parts;
+ + table->key_info[table->s->primary_key].user_defined_key_parts;
for (; key_part != key_part_end; ++key_part) {
field = key_part->field;
@@ -12662,11 +13564,10 @@ my_bool
ha_innobase::register_query_cache_table(
/*====================================*/
THD* thd, /*!< in: user thread handle */
- char* table_key, /*!< in: concatenation of database name,
- the null character NUL,
- and the table name */
- uint key_length, /*!< in: length of the full name, i.e.
- len(dbname) + len(tablename) + 1 */
+ char* table_key, /*!< in: normalized path to the
+ table */
+ uint key_length, /*!< in: length of the normalized
+ path to the table */
qc_engine_callback*
call_back, /*!< out: pointer to function for
checking if query caching
@@ -12788,8 +13689,8 @@ innobase_xa_prepare(
false - the current SQL statement
ended */
{
- int error = 0;
- trx_t* trx = check_trx_exists(thd);
+ int error = 0;
+ trx_t* trx = check_trx_exists(thd);
DBUG_ASSERT(hton == innodb_hton_ptr);
@@ -12982,124 +13883,6 @@ innobase_set_cursor_view(
}
/*******************************************************************//**
-If col_name is not NULL, check whether the named column is being
-renamed in the table. If col_name is not provided, check
-whether any one of columns in the table is being renamed.
-@return true if the column is being renamed */
-static
-bool
-check_column_being_renamed(
-/*=======================*/
- const TABLE* table, /*!< in: MySQL table */
- const char* col_name) /*!< in: name of the column */
-{
- uint k;
- Field* field;
-
- for (k = 0; k < table->s->fields; k++) {
- field = table->field[k];
-
- if (field->flags & FIELD_IS_RENAMED) {
-
- /* If col_name is not provided, return
- if the field is marked as being renamed. */
- if (!col_name) {
- return(true);
- }
-
- /* If col_name is provided, return only
- if names match */
- if (innobase_strcasecmp(field->field_name,
- col_name) == 0) {
- return(true);
- }
- }
- }
-
- return(false);
-}
-
-/*******************************************************************//**
-Check whether any of the given columns is being renamed in the table.
-@return true if any of col_names is being renamed in table */
-static
-bool
-column_is_being_renamed(
-/*====================*/
- TABLE* table, /*!< in: MySQL table */
- uint n_cols, /*!< in: number of columns */
- const char** col_names) /*!< in: names of the columns */
-{
- uint j;
-
- for (j = 0; j < n_cols; j++) {
- if (check_column_being_renamed(table, col_names[j])) {
- return(true);
- }
- }
-
- return(false);
-}
-
-/*******************************************************************//**
-Check whether a column in table "table" is being renamed and if this column
-is part of a foreign key, either part of another table, referencing this
-table or part of this table, referencing another table.
-@return true if a column that participates in a foreign key definition
-is being renamed */
-static
-bool
-foreign_key_column_is_being_renamed(
-/*================================*/
- row_prebuilt_t* prebuilt, /* in: InnoDB prebuilt struct */
- TABLE* table) /* in: MySQL table */
-{
- dict_foreign_t* foreign;
-
- /* check whether there are foreign keys at all */
- if (UT_LIST_GET_LEN(prebuilt->table->foreign_list) == 0
- && UT_LIST_GET_LEN(prebuilt->table->referenced_list) == 0) {
- /* no foreign keys involved with prebuilt->table */
-
- return(false);
- }
-
- row_mysql_lock_data_dictionary(prebuilt->trx);
-
- /* Check whether any column in the foreign key constraints which refer
- to this table is being renamed. */
- for (foreign = UT_LIST_GET_FIRST(prebuilt->table->referenced_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
-
- if (column_is_being_renamed(table, foreign->n_fields,
- foreign->referenced_col_names)) {
-
- row_mysql_unlock_data_dictionary(prebuilt->trx);
- return(true);
- }
- }
-
- /* Check whether any column in the foreign key constraints in the
- table is being renamed. */
- for (foreign = UT_LIST_GET_FIRST(prebuilt->table->foreign_list);
- foreign != NULL;
- foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
-
- if (column_is_being_renamed(table, foreign->n_fields,
- foreign->foreign_col_names)) {
-
- row_mysql_unlock_data_dictionary(prebuilt->trx);
- return(true);
- }
- }
-
- row_mysql_unlock_data_dictionary(prebuilt->trx);
-
- return(false);
-}
-
-/*******************************************************************//**
*/
UNIV_INTERN
bool
@@ -13108,6 +13891,8 @@ ha_innobase::check_if_incompatible_data(
HA_CREATE_INFO* info,
uint table_changes)
{
+ innobase_copy_frm_flags_from_create_info(prebuilt->table, info);
+
if (table_changes != IS_EQUAL_YES) {
return(COMPATIBLE_DATA_NO);
@@ -13120,25 +13905,8 @@ ha_innobase::check_if_incompatible_data(
return(COMPATIBLE_DATA_NO);
}
- /* For column rename operation, MySQL does not supply enough
- information (new column name etc.) for InnoDB to make appropriate
- system metadata change. To avoid system metadata inconsistency,
- currently we can just request a table rebuild/copy by returning
- COMPATIBLE_DATA_NO */
- if (check_column_being_renamed(table, NULL)) {
- return(COMPATIBLE_DATA_NO);
- }
-
- /* Check if a column participating in a foreign key is being renamed.
- There is no mechanism for updating InnoDB foreign key definitions. */
- if (foreign_key_column_is_being_renamed(prebuilt, table)) {
-
- return(COMPATIBLE_DATA_NO);
- }
-
/* Check that row format didn't change */
if ((info->used_fields & HA_CREATE_USED_ROW_FORMAT)
- && info->row_type != ROW_TYPE_DEFAULT
&& info->row_type != get_row_type()) {
return(COMPATIBLE_DATA_NO);
@@ -13152,6 +13920,135 @@ ha_innobase::check_if_incompatible_data(
return(COMPATIBLE_DATA_YES);
}
+/****************************************************************//**
+Update the system variable innodb_io_capacity_max using the "saved"
+value. This function is registered as a callback with MySQL. */
+static
+void
+innodb_io_capacity_max_update(
+/*===========================*/
+ THD* thd, /*!< in: thread handle */
+ struct st_mysql_sys_var* var, /*!< in: pointer to
+ system variable */
+ void* var_ptr,/*!< out: where the
+ formal string goes */
+ const void* save) /*!< in: immediate result
+ from check function */
+{
+ ulong in_val = *static_cast<const ulong*>(save);
+ if (in_val < srv_io_capacity) {
+ in_val = srv_io_capacity;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "innodb_io_capacity_max cannot be"
+ " set lower than innodb_io_capacity.");
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "Setting innodb_io_capacity_max to %lu",
+ srv_io_capacity);
+ }
+
+ srv_max_io_capacity = in_val;
+}
+
+/****************************************************************//**
+Update the system variable innodb_io_capacity using the "saved"
+value. This function is registered as a callback with MySQL. */
+static
+void
+innodb_io_capacity_update(
+/*======================*/
+ THD* thd, /*!< in: thread handle */
+ struct st_mysql_sys_var* var, /*!< in: pointer to
+ system variable */
+ void* var_ptr,/*!< out: where the
+ formal string goes */
+ const void* save) /*!< in: immediate result
+ from check function */
+{
+ ulong in_val = *static_cast<const ulong*>(save);
+ if (in_val > srv_max_io_capacity) {
+ in_val = srv_max_io_capacity;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "innodb_io_capacity cannot be set"
+ " higher than innodb_io_capacity_max.");
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "Setting innodb_io_capacity to %lu",
+ srv_max_io_capacity);
+ }
+
+ srv_io_capacity = in_val;
+}
+
+/****************************************************************//**
+Update the system variable innodb_max_dirty_pages_pct using the "saved"
+value. This function is registered as a callback with MySQL. */
+static
+void
+innodb_max_dirty_pages_pct_update(
+/*==============================*/
+ THD* thd, /*!< in: thread handle */
+ struct st_mysql_sys_var* var, /*!< in: pointer to
+ system variable */
+ void* var_ptr,/*!< out: where the
+ formal string goes */
+ const void* save) /*!< in: immediate result
+ from check function */
+{
+ ulong in_val = *static_cast<const ulong*>(save);
+ if (in_val < srv_max_dirty_pages_pct_lwm) {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "innodb_max_dirty_pages_pct cannot be"
+ " set lower than"
+ " innodb_max_dirty_pages_pct_lwm.");
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "Lowering"
+ " innodb_max_dirty_page_pct_lwm to %lu",
+ in_val);
+
+ srv_max_dirty_pages_pct_lwm = in_val;
+ }
+
+ srv_max_buf_pool_modified_pct = in_val;
+}
+
+/****************************************************************//**
+Update the system variable innodb_max_dirty_pages_pct_lwm using the
+"saved" value. This function is registered as a callback with MySQL. */
+static
+void
+innodb_max_dirty_pages_pct_lwm_update(
+/*==================================*/
+ THD* thd, /*!< in: thread handle */
+ struct st_mysql_sys_var* var, /*!< in: pointer to
+ system variable */
+ void* var_ptr,/*!< out: where the
+ formal string goes */
+ const void* save) /*!< in: immediate result
+ from check function */
+{
+ ulong in_val = *static_cast<const ulong*>(save);
+ if (in_val > srv_max_buf_pool_modified_pct) {
+ in_val = srv_max_buf_pool_modified_pct;
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "innodb_max_dirty_pages_pct_lwm"
+ " cannot be set higher than"
+ " innodb_max_dirty_pages_pct.");
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "Setting innodb_max_dirty_page_pct_lwm"
+ " to %lu",
+ in_val);
+ }
+
+ srv_max_dirty_pages_pct_lwm = in_val;
+}
+
/************************************************************//**
Validate the file format name and return its corresponding id.
@return valid file format id */
@@ -13517,8 +14414,8 @@ innodb_internal_table_validate(
return(0);
}
- user_table = dict_table_open_on_name_no_stats(
- table_name, FALSE, DICT_ERR_IGNORE_NONE);
+ user_table = dict_table_open_on_name(
+ table_name, FALSE, TRUE, DICT_ERR_IGNORE_NONE);
if (user_table) {
if (dict_table_has_fts_index(user_table)) {
@@ -13526,7 +14423,7 @@ innodb_internal_table_validate(
ret = 0;
}
- dict_table_close(user_table, FALSE);
+ dict_table_close(user_table, FALSE, TRUE);
}
return(ret);
@@ -13571,13 +14468,12 @@ innodb_internal_table_update(
}
/****************************************************************//**
-Update the session variable innodb_session_stopword_table
-with the "saved" stopword table name value. This function
-is registered as a callback with MySQL. */
+Update the system variable innodb_adaptive_hash_index using the "saved"
+value. This function is registered as a callback with MySQL. */
static
void
-innodb_session_stopword_update(
-/*===========================*/
+innodb_adaptive_hash_index_update(
+/*==============================*/
THD* thd, /*!< in: thread handle */
struct st_mysql_sys_var* var, /*!< in: pointer to
system variable */
@@ -13586,32 +14482,20 @@ innodb_session_stopword_update(
const void* save) /*!< in: immediate result
from check function */
{
- const char* stopword_table_name;
- char* old;
-
- ut_a(save != NULL);
- ut_a(var_ptr != NULL);
-
- stopword_table_name = *static_cast<const char*const*>(save);
- old = *(char**) var_ptr;
-
- if (stopword_table_name) {
- *(char**) var_ptr = my_strdup(stopword_table_name, MYF(0));
+ if (*(my_bool*) save) {
+ btr_search_enable();
} else {
- *(char**) var_ptr = NULL;
- }
-
- if (old) {
- my_free(old);
+ btr_search_disable();
}
}
+
/****************************************************************//**
-Update the system variable innodb_adaptive_hash_index using the "saved"
+Update the system variable innodb_cmp_per_index using the "saved"
value. This function is registered as a callback with MySQL. */
static
void
-innodb_adaptive_hash_index_update(
-/*==============================*/
+innodb_cmp_per_index_update(
+/*========================*/
THD* thd, /*!< in: thread handle */
struct st_mysql_sys_var* var, /*!< in: pointer to
system variable */
@@ -13620,11 +14504,13 @@ innodb_adaptive_hash_index_update(
const void* save) /*!< in: immediate result
from check function */
{
- if (*(my_bool*) save) {
- btr_search_enable();
- } else {
- btr_search_disable();
+ /* Reset the stats whenever we enable the table
+ INFORMATION_SCHEMA.innodb_cmp_per_index. */
+ if (!srv_cmp_per_index_enabled && *(my_bool*) save) {
+ page_zip_reset_stat_per_index();
}
+
+ srv_cmp_per_index_enabled = !!(*(my_bool*) save);
}
/****************************************************************//**
@@ -14196,6 +15082,53 @@ exit:
return;
}
+#ifdef __WIN__
+/*************************************************************//**
+Validate if passed-in "value" is a valid value for
+innodb_buffer_pool_filename. On Windows, file names with colon (:)
+are not allowed.
+
+@return 0 for valid name */
+static
+int
+innodb_srv_buf_dump_filename_validate(
+/*==================================*/
+ THD* thd, /*!< in: thread handle */
+ struct st_mysql_sys_var* var, /*!< in: pointer to system
+ variable */
+ void* save, /*!< out: immediate result
+ for update function */
+ struct st_mysql_value* value) /*!< in: incoming string */
+{
+ const char* buf_name;
+ char buff[OS_FILE_MAX_PATH];
+ int len= sizeof(buff);
+
+ ut_a(save != NULL);
+ ut_a(value != NULL);
+
+ buf_name = value->val_str(value, buff, &len);
+
+ if (buf_name) {
+ if (is_filename_allowed(buf_name, len, FALSE)){
+ *static_cast<const char**>(save) = buf_name;
+ return(0);
+ } else {
+ push_warning_printf(thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_WRONG_ARGUMENTS,
+ "InnoDB: innodb_buffer_pool_filename "
+ "cannot have colon (:) in the file name.");
+
+ }
+ }
+
+ return(1);
+}
+#else /* __WIN__ */
+# define innodb_srv_buf_dump_filename_validate NULL
+#endif /* __WIN__ */
+
/****************************************************************//**
Update the system variable innodb_monitor_enable and enable
specified monitor counter.
@@ -14273,6 +15206,29 @@ innodb_reset_all_monitor_update(
}
/****************************************************************//**
+Update the system variable innodb_compression_level using the "saved"
+value. This function is registered as a callback with MySQL. */
+static
+void
+innodb_compression_level_update(
+/*============================*/
+ THD* thd, /*!< in: thread handle */
+ struct st_mysql_sys_var* var, /*!< in: pointer to
+ system variable */
+ void* var_ptr,/*!< out: where the
+ formal string goes */
+ const void* save) /*!< in: immediate result
+ from check function */
+{
+ /* We have this call back just to avoid confusion between
+ ulong and ulint datatypes. */
+ innobase_compression_level =
+ (*static_cast<const ulong*>(save));
+ page_compression_level =
+ (static_cast<const ulint>(innobase_compression_level));
+}
+
+/****************************************************************//**
Parse and enable InnoDB monitor counters during server startup.
User can list the monitor counters/groups to be enable by specifying
"loose-innodb_monitor_enable=monitor_name1;monitor_name2..."
@@ -14390,6 +15346,12 @@ innobase_fts_retrieve_ranking(
ft_prebuilt = ((NEW_FT_INFO*) fts_hdl)->ft_prebuilt;
+ if (ft_prebuilt->read_just_key) {
+ fts_ranking_t* ranking =
+ rbt_value(fts_ranking_t, result->current);
+ return(ranking->rank);
+ }
+
/* Retrieve the ranking value for doc_id with value of
prebuilt->fts_doc_id */
return(fts_retrieve_ranking(result, ft_prebuilt->fts_doc_id));
@@ -14404,20 +15366,16 @@ innobase_fts_close_ranking(
FT_INFO * fts_hdl)
{
fts_result_t* result;
- row_prebuilt_t* ft_prebuilt;
- ft_prebuilt = ((NEW_FT_INFO*) fts_hdl)->ft_prebuilt;
+ ((NEW_FT_INFO*) fts_hdl)->ft_prebuilt->in_fts_query = false;
result = ((NEW_FT_INFO*) fts_hdl)->ft_result;
fts_query_free_result(result);
- if (result == ft_prebuilt->result) {
- ft_prebuilt->result = NULL;
- }
-
my_free((uchar*) fts_hdl);
+
return;
}
@@ -14441,7 +15399,120 @@ innobase_fts_find_ranking(
/* Retrieve the ranking value for doc_id with value of
prebuilt->fts_doc_id */
- return fts_retrieve_ranking(result, ft_prebuilt->fts_doc_id);
+ return(fts_retrieve_ranking(result, ft_prebuilt->fts_doc_id));
+}
+
+#ifdef UNIV_DEBUG
+static my_bool innodb_purge_run_now = TRUE;
+static my_bool innodb_purge_stop_now = TRUE;
+
+/****************************************************************//**
+Set the purge state to RUN. If purge is disabled then it
+is a no-op. This function is registered as a callback with MySQL. */
+static
+void
+purge_run_now_set(
+/*==============*/
+ THD* thd /*!< in: thread handle */
+ __attribute__((unused)),
+ struct st_mysql_sys_var* var /*!< in: pointer to system
+ variable */
+ __attribute__((unused)),
+ void* var_ptr /*!< out: where the formal
+ string goes */
+ __attribute__((unused)),
+ const void* save) /*!< in: immediate result from
+ check function */
+{
+ if (*(my_bool*) save && trx_purge_state() != PURGE_STATE_DISABLED) {
+ trx_purge_run();
+ }
+}
+
+/****************************************************************//**
+Set the purge state to STOP. If purge is disabled then it
+is a no-op. This function is registered as a callback with MySQL. */
+static
+void
+purge_stop_now_set(
+/*===============*/
+ THD* thd /*!< in: thread handle */
+ __attribute__((unused)),
+ struct st_mysql_sys_var* var /*!< in: pointer to system
+ variable */
+ __attribute__((unused)),
+ void* var_ptr /*!< out: where the formal
+ string goes */
+ __attribute__((unused)),
+ const void* save) /*!< in: immediate result from
+ check function */
+{
+ if (*(my_bool*) save && trx_purge_state() != PURGE_STATE_DISABLED) {
+ trx_purge_stop();
+ }
+}
+#endif /* UNIV_DEBUG */
+
+/***********************************************************************
+@return version of the extended FTS API */
+uint
+innobase_fts_get_version()
+/*======================*/
+{
+ /* Currently this doesn't make much sense as returning
+ HA_CAN_FULLTEXT_EXT automatically mean this version is supported.
+ This supposed to ease future extensions. */
+ return(2);
+}
+
+/***********************************************************************
+@return Which part of the extended FTS API is supported */
+ulonglong
+innobase_fts_flags()
+/*================*/
+{
+ return(FTS_ORDERED_RESULT | FTS_DOCID_IN_RESULT);
+}
+
+
+/***********************************************************************
+Find and Retrieve the FTS doc_id for the current result row
+@return the document ID */
+ulonglong
+innobase_fts_retrieve_docid(
+/*========================*/
+ FT_INFO_EXT * fts_hdl) /*!< in: FTS handler */
+{
+ row_prebuilt_t* ft_prebuilt;
+ fts_result_t* result;
+
+ ft_prebuilt = ((NEW_FT_INFO *)fts_hdl)->ft_prebuilt;
+ result = ((NEW_FT_INFO *)fts_hdl)->ft_result;
+
+ if (ft_prebuilt->read_just_key) {
+ fts_ranking_t* ranking =
+ rbt_value(fts_ranking_t, result->current);
+ return(ranking->doc_id);
+ }
+
+ return(ft_prebuilt->fts_doc_id);
+}
+
+/***********************************************************************
+Find and retrieve the size of the current result
+@return number of matching rows */
+ulonglong
+innobase_fts_count_matches(
+/*=======================*/
+ FT_INFO_EXT* fts_hdl) /*!< in: FTS handler */
+{
+ NEW_FT_INFO* handle = (NEW_FT_INFO *) fts_hdl;
+
+ if (handle->ft_result->rankings_by_id != 0) {
+ return rbt_size(handle->ft_result->rankings_by_id);
+ } else {
+ return(0);
+ }
}
/* These variables are never read by InnoDB or changed. They are a kind of
@@ -14478,7 +15549,7 @@ buffer_pool_dump_now(
const void* save) /*!< in: immediate result from
check function */
{
- if (*(my_bool*) save) {
+ if (*(my_bool*) save && !srv_read_only_mode) {
buf_dump_start();
}
}
@@ -14599,7 +15670,26 @@ static MYSQL_SYSVAR_BOOL(use_fallocate, innobase_use_fallocate,
static MYSQL_SYSVAR_ULONG(io_capacity, srv_io_capacity,
PLUGIN_VAR_RQCMDARG,
"Number of IOPs the server can do. Tunes the background IO rate",
- NULL, NULL, 200, 100, ~0UL, 0);
+ NULL, innodb_io_capacity_update, 200, 100, ~0UL, 0);
+
+static MYSQL_SYSVAR_ULONG(io_capacity_max, srv_max_io_capacity,
+ PLUGIN_VAR_RQCMDARG,
+ "Limit to which innodb_io_capacity can be inflated.",
+ NULL, innodb_io_capacity_max_update,
+ SRV_MAX_IO_CAPACITY_DUMMY_DEFAULT, 100,
+ SRV_MAX_IO_CAPACITY_LIMIT, 0);
+
+#ifdef UNIV_DEBUG
+static MYSQL_SYSVAR_BOOL(purge_run_now, innodb_purge_run_now,
+ PLUGIN_VAR_OPCMDARG,
+ "Set purge state to RUN",
+ NULL, purge_run_now_set, FALSE);
+
+static MYSQL_SYSVAR_BOOL(purge_stop_now, innodb_purge_stop_now,
+ PLUGIN_VAR_OPCMDARG,
+ "Set purge state to STOP",
+ NULL, purge_stop_now_set, FALSE);
+#endif /* UNIV_DEBUG */
static MYSQL_SYSVAR_ULONG(purge_batch_size, srv_purge_batch_size,
PLUGIN_VAR_OPCMDARG,
@@ -14611,7 +15701,7 @@ static MYSQL_SYSVAR_ULONG(purge_batch_size, srv_purge_batch_size,
static MYSQL_SYSVAR_ULONG(purge_threads, srv_n_purge_threads,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
- "Purge threads can be from 0 to 32. Default is 0.",
+ "Purge threads can be from 1 to 32. Default is 1.",
NULL, NULL,
1, /* Default setting */
1, /* Minimum value */
@@ -14634,7 +15724,7 @@ static MYSQL_SYSVAR_ULONG(fast_shutdown, innobase_fast_shutdown,
static MYSQL_SYSVAR_BOOL(file_per_table, srv_file_per_table,
PLUGIN_VAR_NOCMDARG,
"Stores each InnoDB table to an .ibd file in the database dir.",
- NULL, NULL, FALSE);
+ NULL, NULL, TRUE);
static MYSQL_SYSVAR_STR(file_format, innobase_file_format_name,
PLUGIN_VAR_RQCMDARG,
@@ -14670,6 +15760,11 @@ static MYSQL_SYSVAR_STR(ft_server_stopword_table, innobase_server_stopword_table
innodb_stopword_table_update,
NULL);
+static MYSQL_SYSVAR_UINT(flush_log_at_timeout, srv_flush_log_at_timeout,
+ PLUGIN_VAR_OPCMDARG,
+ "Write and flush logs every (n) second.",
+ NULL, NULL, 1, 0, 2700, 0);
+
static MYSQL_SYSVAR_ULONG(flush_log_at_trx_commit, srv_flush_log_at_trx_commit,
PLUGIN_VAR_OPCMDARG,
"Controls the durability/speed trade-off for commits."
@@ -14715,20 +15810,38 @@ static MYSQL_SYSVAR_BOOL(log_archive, innobase_log_archive,
"Set to 1 if you want to have logs archived.", NULL, NULL, FALSE);
#endif /* UNIV_LOG_ARCHIVE */
-static MYSQL_SYSVAR_STR(log_group_home_dir, innobase_log_group_home_dir,
+static MYSQL_SYSVAR_STR(log_group_home_dir, srv_log_group_home_dir,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Path to InnoDB log files.", NULL, NULL, NULL);
static MYSQL_SYSVAR_ULONG(max_dirty_pages_pct, srv_max_buf_pool_modified_pct,
PLUGIN_VAR_RQCMDARG,
"Percentage of dirty pages allowed in bufferpool.",
- NULL, NULL, 75, 0, 99, 0);
+ NULL, innodb_max_dirty_pages_pct_update, 75, 0, 99, 0);
+
+static MYSQL_SYSVAR_ULONG(max_dirty_pages_pct_lwm,
+ srv_max_dirty_pages_pct_lwm,
+ PLUGIN_VAR_RQCMDARG,
+ "Percentage of dirty pages at which flushing kicks in.",
+ NULL, innodb_max_dirty_pages_pct_lwm_update, 0, 0, 99, 0);
+
+static MYSQL_SYSVAR_ULONG(adaptive_flushing_lwm,
+ srv_adaptive_flushing_lwm,
+ PLUGIN_VAR_RQCMDARG,
+ "Percentage of log capacity below which no adaptive flushing happens.",
+ NULL, NULL, 10, 0, 70, 0);
static MYSQL_SYSVAR_BOOL(adaptive_flushing, srv_adaptive_flushing,
PLUGIN_VAR_NOCMDARG,
"Attempt flushing dirty pages to avoid IO bursts at checkpoints.",
NULL, NULL, TRUE);
+static MYSQL_SYSVAR_ULONG(flushing_avg_loops,
+ srv_flushing_avg_loops,
+ PLUGIN_VAR_RQCMDARG,
+ "Number of iterations over which the background flushing is averaged.",
+ NULL, NULL, 30, 1, 1000, 0);
+
static MYSQL_SYSVAR_ULONG(max_purge_lag, srv_max_purge_lag,
PLUGIN_VAR_RQCMDARG,
"Desired maximum length of the purge queue (0 = no limit)",
@@ -14737,11 +15850,11 @@ static MYSQL_SYSVAR_ULONG(max_purge_lag, srv_max_purge_lag,
static MYSQL_SYSVAR_ULONG(max_purge_lag_delay, srv_max_purge_lag_delay,
PLUGIN_VAR_RQCMDARG,
"Maximum delay of user threads in micro-seconds",
- NULL, NULL,
+ NULL, NULL,
0L, /* Default seting */
0L, /* Minimum value */
10000000UL, 0); /* Maximum value */
-
+
static MYSQL_SYSVAR_BOOL(rollback_on_timeout, innobase_rollback_on_timeout,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
"Roll back the complete transaction on lock wait timeout, for 4.x compatibility (disabled by default)",
@@ -14754,8 +15867,9 @@ static MYSQL_SYSVAR_BOOL(status_file, innobase_create_status_file,
static MYSQL_SYSVAR_BOOL(stats_on_metadata, innobase_stats_on_metadata,
PLUGIN_VAR_OPCMDARG,
- "Enable statistics gathering for metadata commands such as SHOW TABLE STATUS (on by default)",
- NULL, NULL, TRUE);
+ "Enable statistics gathering for metadata commands such as "
+ "SHOW TABLE STATUS for tables that use transient statistics (off by default)",
+ NULL, NULL, FALSE);
static MYSQL_SYSVAR_ULONGLONG(stats_sample_pages, srv_stats_transient_sample_pages,
PLUGIN_VAR_RQCMDARG,
@@ -14769,6 +15883,20 @@ static MYSQL_SYSVAR_ULONGLONG(stats_transient_sample_pages,
"statistics (if persistent statistics are not used, default 8)",
NULL, NULL, 8, 1, ~0ULL, 0);
+static MYSQL_SYSVAR_BOOL(stats_persistent, srv_stats_persistent,
+ PLUGIN_VAR_OPCMDARG,
+ "InnoDB persistent statistics enabled for all tables unless overridden "
+ "at table level",
+ NULL, NULL, TRUE);
+
+static MYSQL_SYSVAR_BOOL(stats_auto_recalc, srv_stats_auto_recalc,
+ PLUGIN_VAR_OPCMDARG,
+ "InnoDB automatic recalculation of persistent statistics enabled for all "
+ "tables unless overridden at table level (automatic recalculation is only "
+ "done when InnoDB decides that the table has changed too much and needs a "
+ "new statistics)",
+ NULL, NULL, TRUE);
+
static MYSQL_SYSVAR_ULONGLONG(stats_persistent_sample_pages,
srv_stats_persistent_sample_pages,
PLUGIN_VAR_RQCMDARG,
@@ -14788,6 +15916,13 @@ static MYSQL_SYSVAR_ULONG(replication_delay, srv_replication_delay,
"innodb_thread_concurrency is reached (0 by default)",
NULL, NULL, 0, 0, ~0UL, 0);
+static MYSQL_SYSVAR_ULONG(compression_level, innobase_compression_level,
+ PLUGIN_VAR_RQCMDARG,
+ "Compression level used for compressed row format. 0 is no compression"
+ ", 1 is fastest, 9 is best compression and default is 6.",
+ NULL, innodb_compression_level_update,
+ DEFAULT_COMPRESSION_LEVEL, 0, 9, 0);
+
static MYSQL_SYSVAR_LONG(additional_mem_pool_size, innobase_additional_mem_pool_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"DEPRECATED. This option may be removed in future releases, "
@@ -14799,7 +15934,7 @@ static MYSQL_SYSVAR_LONG(additional_mem_pool_size, innobase_additional_mem_pool_
static MYSQL_SYSVAR_ULONG(autoextend_increment, srv_auto_extend_increment,
PLUGIN_VAR_RQCMDARG,
"Data file autoextend increment in megabytes",
- NULL, NULL, 8L, 1L, 1000L, 0);
+ NULL, NULL, 64L, 1L, 1000L, 0);
static MYSQL_SYSVAR_LONGLONG(buffer_pool_size, innobase_buffer_pool_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
@@ -14821,12 +15956,12 @@ static MYSQL_SYSVAR_ULONG(doublewrite_batch_size, srv_doublewrite_batch_size,
static MYSQL_SYSVAR_LONG(buffer_pool_instances, innobase_buffer_pool_instances,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Number of buffer pool instances, set to higher value on high-end machines to increase scalability",
- NULL, NULL, 1L, 1L, MAX_BUFFER_POOLS, 1L);
+ NULL, NULL, 0L, 0L, MAX_BUFFER_POOLS, 1L);
static MYSQL_SYSVAR_STR(buffer_pool_filename, srv_buf_dump_filename,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_MEMALLOC,
"Filename to/from which to dump/load the InnoDB buffer pool",
- NULL, NULL, SRV_BUF_DUMP_FILENAME_DEFAULT);
+ innodb_srv_buf_dump_filename_validate, NULL, SRV_BUF_DUMP_FILENAME_DEFAULT);
static MYSQL_SYSVAR_BOOL(buffer_pool_dump_now, innodb_buffer_pool_dump_now,
PLUGIN_VAR_RQCMDARG,
@@ -14859,10 +15994,13 @@ static MYSQL_SYSVAR_ULONG(lru_scan_depth, srv_LRU_scan_depth,
"How deep to scan LRU to keep it clean",
NULL, NULL, 1024, 100, ~0UL, 0);
-static MYSQL_SYSVAR_BOOL(flush_neighbors, srv_flush_neighbors,
- PLUGIN_VAR_NOCMDARG,
- "Flush neighbors from buffer pool when flushing a block.",
- NULL, NULL, TRUE);
+static MYSQL_SYSVAR_ULONG(flush_neighbors, srv_flush_neighbors,
+ PLUGIN_VAR_OPCMDARG,
+ "Set to 0 (don't flush neighbors from buffer pool),"
+ " 1 (flush contiguous neighbors from buffer pool)"
+ " or 2 (flush neighbors from buffer pool),"
+ " when flushing a block",
+ NULL, NULL, 1, 0, 2, 0);
static MYSQL_SYSVAR_ULONG(commit_concurrency, innobase_commit_concurrency,
PLUGIN_VAR_RQCMDARG,
@@ -14872,7 +16010,7 @@ static MYSQL_SYSVAR_ULONG(commit_concurrency, innobase_commit_concurrency,
static MYSQL_SYSVAR_ULONG(concurrency_tickets, srv_n_free_tickets_to_enter,
PLUGIN_VAR_RQCMDARG,
"Number of times a thread is allowed to enter InnoDB within the same SQL query after it has once got the ticket",
- NULL, NULL, 500L, 1L, ~0UL, 0);
+ NULL, NULL, 5000L, 1L, ~0UL, 0);
static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY | PLUGIN_VAR_NOSYSVAR,
@@ -14882,7 +16020,7 @@ static MYSQL_SYSVAR_LONG(file_io_threads, innobase_file_io_threads,
static MYSQL_SYSVAR_BOOL(ft_enable_diag_print, fts_enable_diag_print,
PLUGIN_VAR_OPCMDARG,
"Whether to enable additional FTS diagnostic printout ",
- NULL, NULL, TRUE);
+ NULL, NULL, FALSE);
static MYSQL_SYSVAR_BOOL(disable_sort_file_cache, srv_disable_sort_file_cache,
PLUGIN_VAR_OPCMDARG,
@@ -14898,7 +16036,7 @@ static MYSQL_SYSVAR_STR(ft_aux_table, fts_internal_tbl_name,
static MYSQL_SYSVAR_ULONG(ft_cache_size, fts_max_cache_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"InnoDB Fulltext search cache size in bytes",
- NULL, NULL, 32000000, 1600000, 80000000, 0);
+ NULL, NULL, 8000000, 1600000, 80000000, 0);
static MYSQL_SYSVAR_ULONG(ft_min_token_size, fts_min_token_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
@@ -14924,7 +16062,12 @@ static MYSQL_SYSVAR_ULONG(ft_sort_pll_degree, fts_sort_pll_degree,
static MYSQL_SYSVAR_ULONG(sort_buffer_size, srv_sort_buf_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Memory buffer size for index creation",
- NULL, NULL, 1048576, 524288, 64<<20, 0);
+ NULL, NULL, 1048576, 65536, 64<<20, 0);
+
+static MYSQL_SYSVAR_ULONGLONG(online_alter_log_max_size, srv_online_max_size,
+ PLUGIN_VAR_RQCMDARG,
+ "Maximum modification log file size for online index creation",
+ NULL, NULL, 128<<20, 65536, ~0ULL, 0);
static MYSQL_SYSVAR_BOOL(optimize_fulltext_only, innodb_optimize_fulltext_only,
PLUGIN_VAR_NOCMDARG,
@@ -14941,11 +16084,18 @@ static MYSQL_SYSVAR_ULONG(write_io_threads, innobase_write_io_threads,
"Number of background write I/O threads in InnoDB.",
NULL, NULL, 4, 1, 64, 0);
-static MYSQL_SYSVAR_LONG(force_recovery, innobase_force_recovery,
+static MYSQL_SYSVAR_ULONG(force_recovery, srv_force_recovery,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Helps to save your data in case the disk image of the database becomes corrupt.",
NULL, NULL, 0, 0, 6, 0);
+#ifndef DBUG_OFF
+static MYSQL_SYSVAR_ULONG(force_recovery_crash, srv_force_recovery_crash,
+ PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
+ "Kills the server during crash recovery.",
+ NULL, NULL, 0, 0, 10, 0);
+#endif /* !DBUG_OFF */
+
static MYSQL_SYSVAR_ULONG(page_size, srv_page_size,
PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
"Page size to use for all InnoDB tablespaces.",
@@ -14960,12 +16110,12 @@ static MYSQL_SYSVAR_LONG(log_buffer_size, innobase_log_buffer_size,
static MYSQL_SYSVAR_LONGLONG(log_file_size, innobase_log_file_size,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"Size of each log file in a log group.",
- NULL, NULL, 5*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 1024*1024L);
+ NULL, NULL, 48*1024*1024L, 1*1024*1024L, LONGLONG_MAX, 1024*1024L);
-static MYSQL_SYSVAR_LONG(log_files_in_group, innobase_log_files_in_group,
+static MYSQL_SYSVAR_ULONG(log_files_in_group, srv_n_log_files,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
- "Number of log files in the log group. InnoDB writes to the files in a circular fashion. Value 3 is recommended here.",
- NULL, NULL, 2, 2, 100, 0);
+ "Number of log files in the log group. InnoDB writes to the files in a circular fashion.",
+ NULL, NULL, 2, 2, SRV_N_LOG_FILES_MAX, 0);
static MYSQL_SYSVAR_LONG(mirrored_log_groups, innobase_mirrored_log_groups,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
@@ -14981,13 +16131,13 @@ static MYSQL_SYSVAR_UINT(old_blocks_time, buf_LRU_old_threshold_ms,
PLUGIN_VAR_RQCMDARG,
"Move blocks to the 'new' end of the buffer pool if the first access"
" was at least this many milliseconds ago."
- " The timeout is disabled if 0 (the default).",
- NULL, NULL, 0, 0, UINT_MAX32, 0);
+ " The timeout is disabled if 0.",
+ NULL, NULL, 1000, 0, UINT_MAX32, 0);
static MYSQL_SYSVAR_LONG(open_files, innobase_open_files,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY,
"How many files at the maximum InnoDB keeps open at the same time.",
- NULL, NULL, 300L, 10L, LONG_MAX, 0);
+ NULL, NULL, 0L, 0L, LONG_MAX, 0);
static MYSQL_SYSVAR_ULONG(sync_spin_loops, srv_n_spin_wait_rounds,
PLUGIN_VAR_RQCMDARG,
@@ -15087,6 +16237,37 @@ static MYSQL_SYSVAR_BOOL(use_native_aio, srv_use_native_aio,
"Use native AIO if supported on this platform.",
NULL, NULL, TRUE);
+static MYSQL_SYSVAR_BOOL(api_enable_binlog, ib_binlog_enabled,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Enable binlog for applications direct access InnoDB through InnoDB APIs",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_BOOL(api_enable_mdl, ib_mdl_enabled,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Enable MDL for applications direct access InnoDB through InnoDB APIs",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_BOOL(api_disable_rowlock, ib_disable_row_lock,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_READONLY,
+ "Disable row lock when direct access InnoDB through InnoDB APIs",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_ULONG(api_trx_level, ib_trx_level_setting,
+ PLUGIN_VAR_OPCMDARG,
+ "InnoDB API transaction isolation level",
+ NULL, NULL,
+ 0, /* Default setting */
+ 0, /* Minimum value */
+ 3, 0); /* Maximum value */
+
+static MYSQL_SYSVAR_ULONG(api_bk_commit_interval, ib_bk_commit_interval,
+ PLUGIN_VAR_OPCMDARG,
+ "Background commit interval in seconds",
+ NULL, NULL,
+ 5, /* Default setting */
+ 1, /* Minimum value */
+ 1024 * 1024 * 1024, 0); /* Maximum value */
+
static MYSQL_SYSVAR_STR(change_buffering, innobase_change_buffering,
PLUGIN_VAR_RQCMDARG,
"Buffer changes to reduce random access: "
@@ -15114,6 +16295,12 @@ static MYSQL_SYSVAR_UINT(change_buffering_debug, ibuf_debug,
PLUGIN_VAR_RQCMDARG,
"Debug flags for InnoDB change buffering (0=none, 2=crash at merge)",
NULL, NULL, 0, 0, 2, 0);
+
+static MYSQL_SYSVAR_BOOL(disable_background_merge,
+ srv_ibuf_disable_background_merge,
+ PLUGIN_VAR_NOCMDARG | PLUGIN_VAR_RQCMDARG,
+ "Disable change buffering merges by the master thread",
+ NULL, NULL, FALSE);
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
static MYSQL_SYSVAR_BOOL(random_read_ahead, srv_random_read_ahead,
@@ -15156,6 +16343,30 @@ static MYSQL_SYSVAR_BOOL(print_all_deadlocks, srv_print_all_deadlocks,
"Print all deadlocks to MySQL error log (off by default)",
NULL, NULL, FALSE);
+static MYSQL_SYSVAR_ULONG(compression_failure_threshold_pct,
+ zip_failure_threshold_pct, PLUGIN_VAR_OPCMDARG,
+ "If the compression failure rate of a table is greater than this number"
+ " more padding is added to the pages to reduce the failures. A value of"
+ " zero implies no padding",
+ NULL, NULL, 5, 0, 100, 0);
+
+static MYSQL_SYSVAR_ULONG(compression_pad_pct_max,
+ zip_pad_max, PLUGIN_VAR_OPCMDARG,
+ "Percentage of empty space on a data page that can be reserved"
+ " to make the page compressible.",
+ NULL, NULL, 50, 0, 75, 0);
+
+static MYSQL_SYSVAR_BOOL(read_only, srv_read_only_mode,
+ PLUGIN_VAR_OPCMDARG | PLUGIN_VAR_READONLY,
+ "Start InnoDB in read only mode (off by default)",
+ NULL, NULL, FALSE);
+
+static MYSQL_SYSVAR_BOOL(cmp_per_index_enabled, srv_cmp_per_index_enabled,
+ PLUGIN_VAR_OPCMDARG,
+ "Enable INFORMATION_SCHEMA.innodb_cmp_per_index, "
+ "may have negative impact on performance (off by default)",
+ NULL, innodb_cmp_per_index_update, FALSE);
+
#ifdef UNIV_DEBUG
static MYSQL_SYSVAR_UINT(trx_rseg_n_slots_debug, trx_rseg_n_slots_debug,
PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_NOCMDOPT,
@@ -15177,6 +16388,8 @@ static MYSQL_SYSVAR_BOOL(trx_purge_view_update_only_debug,
static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(additional_mem_pool_size),
+ MYSQL_SYSVAR(api_trx_level),
+ MYSQL_SYSVAR(api_bk_commit_interval),
MYSQL_SYSVAR(autoextend_increment),
MYSQL_SYSVAR(buffer_pool_size),
MYSQL_SYSVAR(buffer_pool_instances),
@@ -15192,11 +16405,15 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(checksums),
MYSQL_SYSVAR(commit_concurrency),
MYSQL_SYSVAR(concurrency_tickets),
+ MYSQL_SYSVAR(compression_level),
MYSQL_SYSVAR(data_file_path),
MYSQL_SYSVAR(data_home_dir),
MYSQL_SYSVAR(doublewrite),
MYSQL_SYSVAR(use_atomic_writes),
MYSQL_SYSVAR(use_fallocate),
+ MYSQL_SYSVAR(api_enable_binlog),
+ MYSQL_SYSVAR(api_enable_mdl),
+ MYSQL_SYSVAR(api_disable_rowlock),
MYSQL_SYSVAR(fast_shutdown),
MYSQL_SYSVAR(file_io_threads),
MYSQL_SYSVAR(read_io_threads),
@@ -15205,9 +16422,13 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(file_format),
MYSQL_SYSVAR(file_format_check),
MYSQL_SYSVAR(file_format_max),
+ MYSQL_SYSVAR(flush_log_at_timeout),
MYSQL_SYSVAR(flush_log_at_trx_commit),
MYSQL_SYSVAR(flush_method),
MYSQL_SYSVAR(force_recovery),
+#ifndef DBUG_OFF
+ MYSQL_SYSVAR(force_recovery_crash),
+#endif /* !DBUG_OFF */
MYSQL_SYSVAR(ft_cache_size),
MYSQL_SYSVAR(ft_enable_stopword),
MYSQL_SYSVAR(ft_max_token_size),
@@ -15228,7 +16449,10 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(log_files_in_group),
MYSQL_SYSVAR(log_group_home_dir),
MYSQL_SYSVAR(max_dirty_pages_pct),
+ MYSQL_SYSVAR(max_dirty_pages_pct_lwm),
+ MYSQL_SYSVAR(adaptive_flushing_lwm),
MYSQL_SYSVAR(adaptive_flushing),
+ MYSQL_SYSVAR(flushing_avg_loops),
MYSQL_SYSVAR(max_purge_lag),
MYSQL_SYSVAR(max_purge_lag_delay),
MYSQL_SYSVAR(mirrored_log_groups),
@@ -15245,7 +16469,9 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(stats_on_metadata),
MYSQL_SYSVAR(stats_sample_pages),
MYSQL_SYSVAR(stats_transient_sample_pages),
+ MYSQL_SYSVAR(stats_persistent),
MYSQL_SYSVAR(stats_persistent_sample_pages),
+ MYSQL_SYSVAR(stats_auto_recalc),
MYSQL_SYSVAR(adaptive_hash_index),
MYSQL_SYSVAR(stats_method),
MYSQL_SYSVAR(replication_delay),
@@ -15253,7 +16479,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(strict_mode),
MYSQL_SYSVAR(support_xa),
MYSQL_SYSVAR(sort_buffer_size),
- MYSQL_SYSVAR(analyze_is_persistent),
+ MYSQL_SYSVAR(online_alter_log_max_size),
MYSQL_SYSVAR(sync_spin_loops),
MYSQL_SYSVAR(spin_wait_delay),
MYSQL_SYSVAR(table_locks),
@@ -15270,26 +16496,36 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR(change_buffer_max_size),
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
MYSQL_SYSVAR(change_buffering_debug),
+ MYSQL_SYSVAR(disable_background_merge),
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
MYSQL_SYSVAR(random_read_ahead),
MYSQL_SYSVAR(read_ahead_threshold),
+ MYSQL_SYSVAR(read_only),
MYSQL_SYSVAR(io_capacity),
+ MYSQL_SYSVAR(io_capacity_max),
MYSQL_SYSVAR(monitor_enable),
MYSQL_SYSVAR(monitor_disable),
MYSQL_SYSVAR(monitor_reset),
MYSQL_SYSVAR(monitor_reset_all),
MYSQL_SYSVAR(purge_threads),
MYSQL_SYSVAR(purge_batch_size),
+#ifdef UNIV_DEBUG
+ MYSQL_SYSVAR(purge_run_now),
+ MYSQL_SYSVAR(purge_stop_now),
+#endif /* UNIV_DEBUG */
#if defined UNIV_DEBUG || defined UNIV_PERF_DEBUG
MYSQL_SYSVAR(page_hash_locks),
MYSQL_SYSVAR(doublewrite_batch_size),
#endif /* defined UNIV_DEBUG || defined UNIV_PERF_DEBUG */
MYSQL_SYSVAR(print_all_deadlocks),
+ MYSQL_SYSVAR(cmp_per_index_enabled),
MYSQL_SYSVAR(undo_logs),
MYSQL_SYSVAR(rollback_segments),
MYSQL_SYSVAR(undo_directory),
MYSQL_SYSVAR(undo_tablespaces),
MYSQL_SYSVAR(sync_array_size),
+ MYSQL_SYSVAR(compression_failure_threshold_pct),
+ MYSQL_SYSVAR(compression_pad_pct_max),
#ifdef UNIV_DEBUG
MYSQL_SYSVAR(trx_rseg_n_slots_debug),
MYSQL_SYSVAR(limit_optimistic_insert_debug),
@@ -15321,6 +16557,8 @@ i_s_innodb_cmp,
i_s_innodb_cmp_reset,
i_s_innodb_cmpmem,
i_s_innodb_cmpmem_reset,
+i_s_innodb_cmp_per_index,
+i_s_innodb_cmp_per_index_reset,
i_s_innodb_buffer_page,
i_s_innodb_buffer_page_lru,
i_s_innodb_buffer_stats,
@@ -15338,7 +16576,9 @@ i_s_innodb_sys_indexes,
i_s_innodb_sys_columns,
i_s_innodb_sys_fields,
i_s_innodb_sys_foreign,
-i_s_innodb_sys_foreign_cols
+i_s_innodb_sys_foreign_cols,
+i_s_innodb_sys_tablespaces,
+i_s_innodb_sys_datafiles
maria_declare_plugin_end;
@@ -15377,7 +16617,7 @@ innobase_undo_logs_init_default_max()
#ifdef UNIV_COMPILE_TEST_FUNCS
-typedef struct innobase_convert_name_test_struct {
+struct innobase_convert_name_test_t {
char* buf;
ulint buflen;
const char* id;
@@ -15386,7 +16626,7 @@ typedef struct innobase_convert_name_test_struct {
ibool file_id;
const char* expected;
-} innobase_convert_name_test_t;
+};
void
test_innobase_convert_name()
@@ -15505,62 +16745,52 @@ test_innobase_convert_name()
* Multi Range Read interface, DS-MRR calls
*/
-int
-ha_innobase::multi_range_read_init(
- RANGE_SEQ_IF* seq,
- void* seq_init_param,
- uint n_ranges,
- uint mode,
- HANDLER_BUFFER* buf)
+int ha_innobase::multi_range_read_init(RANGE_SEQ_IF *seq, void *seq_init_param,
+ uint n_ranges, uint mode,
+ HANDLER_BUFFER *buf)
{
- return(ds_mrr.dsmrr_init(this, seq, seq_init_param,
- n_ranges, mode, buf));
+ return ds_mrr.dsmrr_init(this, seq, seq_init_param, n_ranges, mode, buf);
}
-int
-ha_innobase::multi_range_read_next(
- range_id_t *range_info)
+int ha_innobase::multi_range_read_next(range_id_t *range_info)
{
- return(ds_mrr.dsmrr_next(range_info));
+ return ds_mrr.dsmrr_next(range_info);
}
-ha_rows
-ha_innobase::multi_range_read_info_const(
- uint keyno,
- RANGE_SEQ_IF* seq,
- void* seq_init_param,
- uint n_ranges,
- uint* bufsz,
- uint* flags,
- Cost_estimate* cost)
+ha_rows ha_innobase::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
+ void *seq_init_param,
+ uint n_ranges, uint *bufsz,
+ uint *flags,
+ Cost_estimate *cost)
{
- /* See comments in ha_myisam::multi_range_read_info_const */
- ds_mrr.init(this, table);
- return(ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param,
- n_ranges, bufsz, flags, cost));
+ /* See comments in ha_myisam::multi_range_read_info_const */
+ ds_mrr.init(this, table);
+
+ if (prebuilt->select_lock_type != LOCK_NONE)
+ *flags |= HA_MRR_USE_DEFAULT_IMPL;
+
+ ha_rows res= ds_mrr.dsmrr_info_const(keyno, seq, seq_init_param, n_ranges,
+ bufsz, flags, cost);
+ return res;
}
-ha_rows
-ha_innobase::multi_range_read_info(
- uint keyno,
- uint n_ranges,
- uint keys,
- uint key_parts,
- uint* bufsz,
- uint* flags,
- Cost_estimate* cost)
+ha_rows ha_innobase::multi_range_read_info(uint keyno, uint n_ranges,
+ uint keys, uint key_parts,
+ uint *bufsz, uint *flags,
+ Cost_estimate *cost)
{
- ds_mrr.init(this, table);
- return(ds_mrr.dsmrr_info(keyno, n_ranges, keys, key_parts, bufsz,
- flags, cost));
+ ds_mrr.init(this, table);
+ ha_rows res= ds_mrr.dsmrr_info(keyno, n_ranges, keys, key_parts, bufsz,
+ flags, cost);
+ return res;
}
-int ha_innobase::multi_range_read_explain_info(uint mrr_mode, char *str, size_t size)
+int ha_innobase::multi_range_read_explain_info(uint mrr_mode, char *str,
+ size_t size)
{
return ds_mrr.dsmrr_explain_info(mrr_mode, str, size);
}
-
/**
* Index Condition Pushdown interface implementation
*/
@@ -15574,7 +16804,7 @@ innobase_index_cond(
/*================*/
void* file) /*!< in/out: pointer to ha_innobase */
{
- return handler_index_cond_check(file);
+ return handler_index_cond_check(file);
}
/** Attempt to push down an index condition.
@@ -15599,3 +16829,181 @@ ha_innobase::idx_cond_push(
DBUG_RETURN(NULL);
}
+/******************************************************************//**
+Use this when the args are passed to the format string from
+errmsg-utf8.txt directly as is.
+
+Push a warning message to the client, it is a wrapper around:
+
+void push_warning_printf(
+ THD *thd, Sql_condition::enum_warning_level level,
+ uint code, const char *format, ...);
+*/
+UNIV_INTERN
+void
+ib_senderrf(
+/*========*/
+ THD* thd, /*!< in/out: session */
+ ib_log_level_t level, /*!< in: warning level */
+ ib_uint32_t code, /*!< MySQL error code */
+ ...) /*!< Args */
+{
+ char* str;
+ va_list args;
+ const char* format = innobase_get_err_msg(code);
+
+ /* If the caller wants to push a message to the client then
+ the caller must pass a valid session handle. */
+
+ ut_a(thd != 0);
+
+ /* The error code must exist in the errmsg-utf8.txt file. */
+ ut_a(format != 0);
+
+ va_start(args, code);
+
+#ifdef __WIN__
+ int size = _vscprintf(format, args) + 1;
+ str = static_cast<char*>(malloc(size));
+ str[size - 1] = 0x0;
+ vsnprintf(str, size, format, args);
+#elif HAVE_VASPRINTF
+ (void) vasprintf(&str, format, args);
+#else
+ /* Use a fixed length string. */
+ str = static_cast<char*>(malloc(BUFSIZ));
+ my_vsnprintf(str, BUFSIZ, format, args);
+#endif /* __WIN__ */
+
+ Sql_condition::enum_warning_level l;
+
+ l = Sql_condition::WARN_LEVEL_NOTE;
+
+ switch(level) {
+ case IB_LOG_LEVEL_INFO:
+ break;
+ case IB_LOG_LEVEL_WARN:
+ l = Sql_condition::WARN_LEVEL_WARN;
+ break;
+ case IB_LOG_LEVEL_ERROR:
+ /* We can't use push_warning_printf(), it is a hard error. */
+ my_printf_error(code, "%s", MYF(0), str);
+ break;
+ case IB_LOG_LEVEL_FATAL:
+ l = Sql_condition::WARN_LEVEL_END;
+ break;
+ }
+
+ if (level != IB_LOG_LEVEL_ERROR) {
+ push_warning_printf(thd, l, code, "InnoDB: %s", str);
+ }
+
+ va_end(args);
+ free(str);
+
+ if (level == IB_LOG_LEVEL_FATAL) {
+ ut_error;
+ }
+}
+
+/******************************************************************//**
+Use this when the args are first converted to a formatted string and then
+passed to the format string from errmsg-utf8.txt. The error message format
+must be: "Some string ... %s".
+
+Push a warning message to the client, it is a wrapper around:
+
+void push_warning_printf(
+ THD *thd, Sql_condition::enum_warning_level level,
+ uint code, const char *format, ...);
+*/
+UNIV_INTERN
+void
+ib_errf(
+/*====*/
+ THD* thd, /*!< in/out: session */
+ ib_log_level_t level, /*!< in: warning level */
+ ib_uint32_t code, /*!< MySQL error code */
+ const char* format, /*!< printf format */
+ ...) /*!< Args */
+{
+ char* str;
+ va_list args;
+
+ /* If the caller wants to push a message to the client then
+ the caller must pass a valid session handle. */
+
+ ut_a(thd != 0);
+ ut_a(format != 0);
+
+ va_start(args, format);
+
+#ifdef __WIN__
+ int size = _vscprintf(format, args) + 1;
+ str = static_cast<char*>(malloc(size));
+ str[size - 1] = 0x0;
+ vsnprintf(str, size, format, args);
+#elif HAVE_VASPRINTF
+ (void) vasprintf(&str, format, args);
+#else
+ /* Use a fixed length string. */
+ str = static_cast<char*>(malloc(BUFSIZ));
+ my_vsnprintf(str, BUFSIZ, format, args);
+#endif /* __WIN__ */
+
+ ib_senderrf(thd, level, code, str);
+
+ va_end(args);
+ free(str);
+}
+
+/******************************************************************//**
+Write a message to the MySQL log, prefixed with "InnoDB: " */
+UNIV_INTERN
+void
+ib_logf(
+/*====*/
+ ib_log_level_t level, /*!< in: warning level */
+ const char* format, /*!< printf format */
+ ...) /*!< Args */
+{
+ char* str;
+ va_list args;
+
+ va_start(args, format);
+
+#ifdef __WIN__
+ int size = _vscprintf(format, args) + 1;
+ str = static_cast<char*>(malloc(size));
+ str[size - 1] = 0x0;
+ vsnprintf(str, size, format, args);
+#elif HAVE_VASPRINTF
+ (void) vasprintf(&str, format, args);
+#else
+ /* Use a fixed length string. */
+ str = static_cast<char*>(malloc(BUFSIZ));
+ my_vsnprintf(str, BUFSIZ, format, args);
+#endif /* __WIN__ */
+
+ switch(level) {
+ case IB_LOG_LEVEL_INFO:
+ sql_print_information("InnoDB: %s", str);
+ break;
+ case IB_LOG_LEVEL_WARN:
+ sql_print_warning("InnoDB: %s", str);
+ break;
+ case IB_LOG_LEVEL_ERROR:
+ sql_print_error("InnoDB: %s", str);
+ break;
+ case IB_LOG_LEVEL_FATAL:
+ sql_print_error("InnoDB: %s", str);
+ break;
+ }
+
+ va_end(args);
+ free(str);
+
+ if (level == IB_LOG_LEVEL_FATAL) {
+ ut_error;
+ }
+}
diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h
index e56a1ec52e3..ece9f7cf58a 100644
--- a/storage/innobase/handler/ha_innodb.h
+++ b/storage/innobase/handler/ha_innodb.h
@@ -27,14 +27,14 @@ this program; if not, write to the Free Software Foundation, Inc.,
/* Structure defines translation table between mysql index and innodb
index structures */
-typedef struct innodb_idx_translate_struct {
+struct innodb_idx_translate_t {
ulint index_count; /*!< number of valid index entries
in the index_mapping array */
ulint array_size; /*!< array size of index_mapping */
dict_index_t** index_mapping; /*!< index pointer array directly
maps to index in Innodb from MySQL
array index */
-} innodb_idx_translate_t;
+};
/** InnoDB table share */
@@ -53,15 +53,8 @@ typedef struct st_innobase_share {
} INNOBASE_SHARE;
-/** InnoDB B-tree index */
-struct dict_index_struct;
-/** Prebuilt structures in an Innobase table handle used within MySQL */
-struct row_prebuilt_struct;
-
-/** InnoDB B-tree index */
-typedef struct dict_index_struct dict_index_t;
-/** Prebuilt structures in an Innobase table handle used within MySQL */
-typedef struct row_prebuilt_struct row_prebuilt_t;
+/** Prebuilt structures in an InnoDB table handle used within MySQL */
+struct row_prebuilt_t;
/** The class defining a handle to an Innodb table */
class ha_innobase: public handler
@@ -101,15 +94,13 @@ class ha_innobase: public handler
void update_thd();
int change_active_index(uint keynr);
int general_fetch(uchar* buf, uint direction, uint match_mode);
- ulint innobase_lock_autoinc();
+ dberr_t innobase_lock_autoinc();
ulonglong innobase_peek_autoinc();
- ulint innobase_set_max_autoinc(ulonglong auto_inc);
- ulint innobase_reset_autoinc(ulonglong auto_inc);
- ulint innobase_get_autoinc(ulonglong* value);
- ulint innobase_update_autoinc(ulonglong auto_inc);
+ dberr_t innobase_set_max_autoinc(ulonglong auto_inc);
+ dberr_t innobase_reset_autoinc(ulonglong auto_inc);
+ dberr_t innobase_get_autoinc(ulonglong* value);
void innobase_initialize_autoinc();
dict_index_t* innobase_get_index(uint keynr);
- int info_low(uint flag, dict_stats_upd_option_t stats_upd_option);
/* Init values for the class: */
public:
@@ -132,9 +123,11 @@ class ha_innobase: public handler
const key_map* keys_to_use_for_scanning();
int open(const char *name, int mode, uint test_if_locked);
+ handler* clone(const char *name, MEM_ROOT *mem_root);
int close(void);
double scan_time();
double read_time(uint index, uint ranges, ha_rows rows);
+ longlong get_memory_buffer_size() const;
int write_row(uchar * buf);
int update_row(const uchar * old_data, uchar * new_data);
@@ -182,6 +175,13 @@ class ha_innobase: public handler
ha_rows estimate_rows_upper_bound();
void update_create_info(HA_CREATE_INFO* create_info);
+ int parse_table_name(const char*name,
+ HA_CREATE_INFO* create_info,
+ ulint flags,
+ ulint flags2,
+ char* norm_name,
+ char* temp_path,
+ char* remote_path);
int create(const char *name, register TABLE *form,
HA_CREATE_INFO *create_info);
int truncate();
@@ -219,13 +219,76 @@ class ha_innobase: public handler
static ulonglong get_mysql_bin_log_pos();
bool primary_key_is_clustered();
int cmp_ref(const uchar *ref1, const uchar *ref2);
- /** Fast index creation (smart ALTER TABLE) @see handler0alter.cc @{ */
- int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys,
- handler_add_index **add);
- int final_add_index(handler_add_index *add, bool commit);
- int prepare_drop_index(TABLE *table_arg, uint *key_num,
- uint num_of_keys);
- int final_drop_index(TABLE *table_arg);
+ /** On-line ALTER TABLE interface @see handler0alter.cc @{ */
+
+ /** Check if InnoDB supports a particular alter table in-place
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used during in-place alter.
+
+ @retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported
+ @retval HA_ALTER_INPLACE_NO_LOCK Supported
+ @retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE
+ Supported, but requires lock
+ during main phase and exclusive
+ lock during prepare phase.
+ @retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE
+ Supported, prepare phase
+ requires exclusive lock.
+ */
+ enum_alter_inplace_result check_if_supported_inplace_alter(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info);
+ /** Allows InnoDB to update internal structures with concurrent
+ writes blocked (provided that check_if_supported_inplace_alter()
+ did not return HA_ALTER_INPLACE_NO_LOCK).
+ This will be invoked before inplace_alter_table().
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used during in-place alter.
+
+ @retval true Failure
+ @retval false Success
+ */
+ bool prepare_inplace_alter_table(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info);
+
+ /** Alter the table structure in-place with operations
+ specified using HA_ALTER_FLAGS and Alter_inplace_information.
+ The level of concurrency allowed during this operation depends
+ on the return value from check_if_supported_inplace_alter().
+
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used during in-place alter.
+
+ @retval true Failure
+ @retval false Success
+ */
+ bool inplace_alter_table(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info);
+
+ /** Commit or rollback the changes made during
+ prepare_inplace_alter_table() and inplace_alter_table() inside
+ the storage engine. Note that the allowed level of concurrency
+ during this operation will be the same as for
+ inplace_alter_table() and thus might be higher than during
+ prepare_inplace_alter_table(). (E.g concurrent writes were
+ blocked during prepare, but might not be during commit).
+ @param altered_table TABLE object for new version of table.
+ @param ha_alter_info Structure describing changes to be done
+ by ALTER TABLE and holding data used during in-place alter.
+ @param commit true => Commit, false => Rollback.
+ @retval true Failure
+ @retval false Success
+ */
+ bool commit_inplace_alter_table(
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ bool commit);
/** @} */
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
@@ -241,6 +304,8 @@ private:
@see build_template() */
inline void reset_template();
+ int info_low(uint, bool);
+
public:
/** @name Multi Range Read interface @{ */
/** Initialize multi range read @see DsMrr_impl::dsmrr_init
@@ -283,15 +348,12 @@ public:
* @param flags
* @param cost
*/
- ha_rows multi_range_read_info(uint keyno,
- uint n_ranges, uint keys,
- uint key_parts,
- uint* bufsz, uint* mrr_mode,
+ ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
+ uint key_parts, uint* bufsz, uint* flags,
Cost_estimate* cost);
- int multi_range_read_explain_info(uint mrr_mode,
- char *str, size_t size);
-
+ int multi_range_read_explain_info(uint mrr_mode, char *str,
+ size_t size);
/** Attempt to push down an index condition.
* @param[in] keyno MySQL key number
* @param[in] idx_cond Index condition to be checked
@@ -364,6 +426,27 @@ bool thd_binlog_filter_ok(const MYSQL_THD thd);
*/
bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd);
+/**
+ Gets information on the durability property requested by
+ a thread.
+ @param thd Thread handle
+ @return a durability property.
+*/
+enum durability_properties thd_get_durability_property(const MYSQL_THD thd);
+
+/** Get the auto_increment_offset auto_increment_increment.
+@param thd Thread object
+@param off auto_increment_offset
+@param inc auto_increment_increment */
+void thd_get_autoinc(const MYSQL_THD thd, ulong* off, ulong* inc)
+__attribute__((nonnull));
+
+/** Is strict sql_mode set.
+@param thd Thread object
+@return True if sql_mode has strict mode (all or trans), false otherwise.
+*/
+bool thd_is_strict_mode(const MYSQL_THD thd)
+__attribute__((nonnull));
} /* extern "C" */
/** Get the file name and position of the MySQL binlog corresponding to the
@@ -371,7 +454,7 @@ bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd);
*/
extern void mysql_bin_log_commit_pos(THD *thd, ulonglong *out_pos, const char **out_file);
-typedef struct trx_struct trx_t;
+struct trx_t;
extern const struct _ft_vft ft_vft_result;
@@ -379,23 +462,11 @@ extern const struct _ft_vft ft_vft_result;
typedef struct new_ft_info
{
struct _ft_vft *please;
+ struct _ft_vft_ext *could_you;
row_prebuilt_t* ft_prebuilt;
fts_result_t* ft_result;
} NEW_FT_INFO;
-/********************************************************************//**
-@file handler/ha_innodb.h
-Converts an InnoDB error code to a MySQL error code and also tells to MySQL
-about a possible transaction rollback inside InnoDB caused by a lock wait
-timeout or a deadlock.
-@return MySQL error code */
-int
-convert_error_code_to_mysql(
-/*========================*/
- int error, /*!< in: InnoDB error code */
- ulint flags, /*!< in: InnoDB table flags, or 0 */
- MYSQL_THD thd); /*!< in: user thread handle or NULL */
-
/*********************************************************************//**
Allocates an InnoDB transaction for a MySQL handler object.
@return InnoDB transaction handle */
@@ -410,13 +481,50 @@ system default primary index name 'GEN_CLUST_INDEX'. If a name
matches, this function pushes an warning message to the client,
and returns true.
@return true if the index name matches the reserved name */
+UNIV_INTERN
bool
innobase_index_name_is_reserved(
/*============================*/
THD* thd, /*!< in/out: MySQL connection */
const KEY* key_info, /*!< in: Indexes to be created */
- ulint num_of_keys); /*!< in: Number of indexes to
+ ulint num_of_keys) /*!< in: Number of indexes to
be created. */
+ __attribute__((nonnull, warn_unused_result));
+
+/*****************************************************************//**
+Determines InnoDB table flags.
+@retval true if successful, false if error */
+UNIV_INTERN
+bool
+innobase_table_flags(
+/*=================*/
+ const TABLE* form, /*!< in: table */
+ const HA_CREATE_INFO* create_info, /*!< in: information
+ on table columns and indexes */
+ THD* thd, /*!< in: connection */
+ bool use_tablespace, /*!< in: whether to create
+ outside system tablespace */
+ ulint* flags, /*!< out: DICT_TF flags */
+ ulint* flags2) /*!< out: DICT_TF2 flags */
+ __attribute__((nonnull, warn_unused_result));
+
+/*****************************************************************//**
+Validates the create options. We may build on this function
+in future. For now, it checks two specifiers:
+KEY_BLOCK_SIZE and ROW_FORMAT
+If innodb_strict_mode is not set then this function is a no-op
+@return NULL if valid, string if not. */
+UNIV_INTERN
+const char*
+create_options_are_invalid(
+/*=======================*/
+ THD* thd, /*!< in: connection thread. */
+ TABLE* form, /*!< in: information on table
+ columns and indexes */
+ HA_CREATE_INFO* create_info, /*!< in: create info. */
+ bool use_tablespace) /*!< in: srv_file_per_table */
+ __attribute__((nonnull, warn_unused_result));
+
/*********************************************************************//**
Retrieve the FTS Relevance Ranking result for doc with doc_id
of prebuilt->fts_doc_id
@@ -434,7 +542,7 @@ of prebuilt->fts_doc_id
UNIV_INTERN
float
innobase_fts_find_ranking(
-/*==========================*/
+/*======================*/
FT_INFO* fts_hdl, /*!< in: FTS handler */
uchar* record, /*!< in: Unused */
uint len); /*!< in: Unused */
@@ -443,24 +551,20 @@ Free the memory for the FTS handler */
UNIV_INTERN
void
innobase_fts_close_ranking(
-/*==========================*/
- FT_INFO* fts_hdl); /*!< in: FTS handler */
-/*********************************************************************//**
-Free the memory for the FTS handler */
-void
-innobase_fts_close_ranking(
-/*==========================*/
- FT_INFO* fts_hdl); /*!< in: FTS handler */
+/*=======================*/
+ FT_INFO* fts_hdl) /*!< in: FTS handler */
+ __attribute__((nonnull));
/*****************************************************************//**
Initialize the table FTS stopword list
-@return TRUE is succeed */
+@return TRUE if success */
UNIV_INTERN
ibool
innobase_fts_load_stopword(
/*=======================*/
dict_table_t* table, /*!< in: Table has the FTS */
trx_t* trx, /*!< in: transaction */
- THD* thd); /*!< in: current thread */
+ THD* thd) /*!< in: current thread */
+ __attribute__((nonnull(1,3), warn_unused_result));
/** Some defines for innobase_fts_check_doc_id_index() return value */
enum fts_doc_id_index_enum {
@@ -472,15 +576,17 @@ enum fts_doc_id_index_enum {
/*******************************************************************//**
Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME
on the Doc ID column.
-@return FTS_EXIST_DOC_ID_INDEX if there exists the FTS_DOC_ID index,
-FTS_INCORRECT_DOC_ID_INDEX if the FTS_DOC_ID index is of wrong format */
+@return the status of the FTS_DOC_ID index */
UNIV_INTERN
enum fts_doc_id_index_enum
innobase_fts_check_doc_id_index(
/*============================*/
- dict_table_t* table, /*!< in: table definition */
- ulint* fts_doc_col_no);/*!< out: The column number for
- Doc ID */
+ const dict_table_t* table, /*!< in: table definition */
+ const TABLE* altered_table, /*!< in: MySQL table
+ that is being altered */
+ ulint* fts_doc_col_no) /*!< out: The column number for
+ Doc ID */
+ __attribute__((warn_unused_result));
/*******************************************************************//**
Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME
@@ -492,4 +598,59 @@ enum fts_doc_id_index_enum
innobase_fts_check_doc_id_index_in_def(
/*===================================*/
ulint n_key, /*!< in: Number of keys */
- KEY* key_info); /*!< in: Key definition */
+ const KEY* key_info) /*!< in: Key definitions */
+ __attribute__((nonnull, warn_unused_result));
+
+/***********************************************************************
+@return version of the extended FTS API */
+uint
+innobase_fts_get_version();
+
+/***********************************************************************
+@return Which part of the extended FTS API is supported */
+ulonglong
+innobase_fts_flags();
+
+/***********************************************************************
+Find and Retrieve the FTS doc_id for the current result row
+@return the document ID */
+ulonglong
+innobase_fts_retrieve_docid(
+/*============================*/
+ FT_INFO_EXT* fts_hdl); /*!< in: FTS handler */
+
+/***********************************************************************
+Find and retrieve the size of the current result
+@return number of matching rows */
+ulonglong
+innobase_fts_count_matches(
+/*============================*/
+ FT_INFO_EXT* fts_hdl); /*!< in: FTS handler */
+
+/** "GEN_CLUST_INDEX" is the name reserved for InnoDB default
+system clustered index when there is no primary key. */
+extern const char innobase_index_reserve_name[];
+
+/*********************************************************************//**
+Copy table flags from MySQL's HA_CREATE_INFO into an InnoDB table object.
+Those flags are stored in .frm file and end up in the MySQL table object,
+but are frequently used inside InnoDB so we keep their copies into the
+InnoDB table object. */
+UNIV_INTERN
+void
+innobase_copy_frm_flags_from_create_info(
+/*=====================================*/
+ dict_table_t* innodb_table, /*!< in/out: InnoDB table */
+ HA_CREATE_INFO* create_info); /*!< in: create info */
+
+/*********************************************************************//**
+Copy table flags from MySQL's TABLE_SHARE into an InnoDB table object.
+Those flags are stored in .frm file and end up in the MySQL table object,
+but are frequently used inside InnoDB so we keep their copies into the
+InnoDB table object. */
+UNIV_INTERN
+void
+innobase_copy_frm_flags_from_table_share(
+/*=====================================*/
+ dict_table_t* innodb_table, /*!< in/out: InnoDB table */
+ TABLE_SHARE* table_share); /*!< in: table share */
diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc
index e1a10ade9ad..31d2972e32e 100644
--- a/storage/innobase/handler/handler0alter.cc
+++ b/storage/innobase/handler/handler0alter.cc
@@ -23,11 +23,20 @@ Smart ALTER TABLE
#include <unireg.h>
#include <mysqld_error.h>
-#include <sql_lex.h> // SQLCOM_CREATE_INDEX
+#include <log.h>
+#include <debug_sync.h>
#include <innodb_priv.h>
+#include <sql_alter.h>
+#include <sql_class.h>
+#include "dict0crea.h"
+#include "dict0dict.h"
+#include "dict0priv.h"
#include "dict0stats.h"
+#include "dict0stats_bg.h"
#include "log0log.h"
+#include "rem0types.h"
+#include "row0log.h"
#include "row0merge.h"
#include "srv0srv.h"
#include "trx0trx.h"
@@ -36,9 +45,995 @@ Smart ALTER TABLE
#include "handler0alter.h"
#include "srv0mon.h"
#include "fts0priv.h"
+#include "pars0pars.h"
#include "ha_innodb.h"
+/** Operations for creating an index in place */
+static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_CREATE
+ = Alter_inplace_info::ADD_INDEX
+ | Alter_inplace_info::ADD_UNIQUE_INDEX;
+
+/** Operations for rebuilding a table in place */
+static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_INPLACE_REBUILD
+ = Alter_inplace_info::ADD_PK_INDEX
+ | Alter_inplace_info::DROP_PK_INDEX
+ | Alter_inplace_info::CHANGE_CREATE_OPTION
+ | Alter_inplace_info::ALTER_COLUMN_NULLABLE
+ | Alter_inplace_info::ALTER_COLUMN_NOT_NULLABLE
+ | Alter_inplace_info::ALTER_COLUMN_ORDER
+ | Alter_inplace_info::DROP_COLUMN
+ | Alter_inplace_info::ADD_COLUMN
+ /*
+ | Alter_inplace_info::ALTER_COLUMN_TYPE
+ | Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH
+ */
+ ;
+
+/** Operations for creating indexes or rebuilding a table */
+static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_INPLACE_CREATE
+ = INNOBASE_ONLINE_CREATE | INNOBASE_INPLACE_REBUILD;
+
+/** Operations for altering a table that InnoDB does not care about */
+static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_INPLACE_IGNORE
+ = Alter_inplace_info::ALTER_COLUMN_DEFAULT
+ | Alter_inplace_info::ALTER_COLUMN_COLUMN_FORMAT
+ | Alter_inplace_info::ALTER_COLUMN_STORAGE_TYPE
+ | Alter_inplace_info::ALTER_RENAME;
+
+/** Operations that InnoDB can perform online */
+static const Alter_inplace_info::HA_ALTER_FLAGS INNOBASE_ONLINE_OPERATIONS
+ = INNOBASE_INPLACE_IGNORE
+ | INNOBASE_ONLINE_CREATE
+ | Alter_inplace_info::DROP_INDEX
+ | Alter_inplace_info::DROP_UNIQUE_INDEX
+ | Alter_inplace_info::DROP_FOREIGN_KEY
+ | Alter_inplace_info::ALTER_COLUMN_NAME
+ | Alter_inplace_info::ADD_FOREIGN_KEY;
+
+/* Report an InnoDB error to the client by invoking my_error(). */
+static UNIV_COLD __attribute__((nonnull))
+void
+my_error_innodb(
+/*============*/
+ dberr_t error, /*!< in: InnoDB error code */
+ const char* table, /*!< in: table name */
+ ulint flags) /*!< in: table flags */
+{
+ switch (error) {
+ case DB_MISSING_HISTORY:
+ my_error(ER_TABLE_DEF_CHANGED, MYF(0));
+ break;
+ case DB_RECORD_NOT_FOUND:
+ my_error(ER_KEY_NOT_FOUND, MYF(0), table);
+ break;
+ case DB_DEADLOCK:
+ my_error(ER_LOCK_DEADLOCK, MYF(0));
+ break;
+ case DB_LOCK_WAIT_TIMEOUT:
+ my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
+ break;
+ case DB_INTERRUPTED:
+ my_error(ER_QUERY_INTERRUPTED, MYF(0));
+ break;
+ case DB_OUT_OF_MEMORY:
+ my_error(ER_OUT_OF_RESOURCES, MYF(0));
+ break;
+ case DB_OUT_OF_FILE_SPACE:
+ my_error(ER_RECORD_FILE_FULL, MYF(0), table);
+ break;
+ case DB_TOO_BIG_INDEX_COL:
+ my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0),
+ DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags));
+ break;
+ case DB_TOO_MANY_CONCURRENT_TRXS:
+ my_error(ER_TOO_MANY_CONCURRENT_TRXS, MYF(0));
+ break;
+ case DB_LOCK_TABLE_FULL:
+ my_error(ER_LOCK_TABLE_FULL, MYF(0));
+ break;
+ case DB_UNDO_RECORD_TOO_BIG:
+ my_error(ER_UNDO_RECORD_TOO_BIG, MYF(0));
+ break;
+ case DB_CORRUPTION:
+ my_error(ER_NOT_KEYFILE, MYF(0), table);
+ break;
+ case DB_TOO_BIG_RECORD:
+ my_error(ER_TOO_BIG_ROWSIZE, MYF(0),
+ page_get_free_space_of_empty(
+ flags & DICT_TF_COMPACT) / 2);
+ break;
+ case DB_INVALID_NULL:
+ /* TODO: report the row, as we do for DB_DUPLICATE_KEY */
+ my_error(ER_INVALID_USE_OF_NULL, MYF(0));
+ break;
+#ifdef UNIV_DEBUG
+ case DB_SUCCESS:
+ case DB_DUPLICATE_KEY:
+ case DB_TABLESPACE_EXISTS:
+ case DB_ONLINE_LOG_TOO_BIG:
+ /* These codes should not be passed here. */
+ ut_error;
+#endif /* UNIV_DEBUG */
+ default:
+ my_error(ER_GET_ERRNO, MYF(0), error);
+ break;
+ }
+}
+
+/** Determine if fulltext indexes exist in a given table.
+@param table_share MySQL table
+@return whether fulltext indexes exist on the table */
+static
+bool
+innobase_fulltext_exist(
+/*====================*/
+ const TABLE_SHARE* table_share)
+{
+ for (uint i = 0; i < table_share->keys; i++) {
+ if (table_share->key_info[i].flags & HA_FULLTEXT) {
+ return(true);
+ }
+ }
+
+ return(false);
+}
+
+/*******************************************************************//**
+Determine if ALTER TABLE needs to rebuild the table.
+@param ha_alter_info the DDL operation
+@return whether it is necessary to rebuild the table */
+static __attribute__((nonnull, warn_unused_result))
+bool
+innobase_need_rebuild(
+/*==================*/
+ const Alter_inplace_info* ha_alter_info)
+{
+ if (ha_alter_info->handler_flags
+ == Alter_inplace_info::CHANGE_CREATE_OPTION
+ && !(ha_alter_info->create_info->used_fields
+ & (HA_CREATE_USED_ROW_FORMAT
+ | HA_CREATE_USED_KEY_BLOCK_SIZE))) {
+ /* Any other CHANGE_CREATE_OPTION than changing
+ ROW_FORMAT or KEY_BLOCK_SIZE is ignored. */
+ return(false);
+ }
+
+ return(!!(ha_alter_info->handler_flags & INNOBASE_INPLACE_REBUILD));
+}
+
+/** Check if InnoDB supports a particular alter table in-place
+@param altered_table TABLE object for new version of table.
+@param ha_alter_info Structure describing changes to be done
+by ALTER TABLE and holding data used during in-place alter.
+
+@retval HA_ALTER_INPLACE_NOT_SUPPORTED Not supported
+@retval HA_ALTER_INPLACE_NO_LOCK Supported
+@retval HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE Supported, but requires
+lock during main phase and exclusive lock during prepare phase.
+@retval HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE Supported, prepare phase
+requires exclusive lock (any transactions that have accessed the table
+must commit or roll back first, and no transactions can access the table
+while prepare_inplace_alter_table() is executing)
+*/
+UNIV_INTERN
+enum_alter_inplace_result
+ha_innobase::check_if_supported_inplace_alter(
+/*==========================================*/
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info)
+{
+ DBUG_ENTER("check_if_supported_inplace_alter");
+
+ if (srv_read_only_mode) {
+ ha_alter_info->unsupported_reason =
+ innobase_get_err_msg(ER_READ_ONLY_MODE);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ } else if (srv_created_new_raw || srv_force_recovery) {
+ ha_alter_info->unsupported_reason =
+ innobase_get_err_msg(ER_READ_ONLY_MODE);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ if (altered_table->s->fields > REC_MAX_N_USER_FIELDS) {
+ /* Deny the inplace ALTER TABLE. MySQL will try to
+ re-create the table and ha_innobase::create() will
+ return an error too. This is how we effectively
+ deny adding too many columns to a table. */
+ ha_alter_info->unsupported_reason =
+ innobase_get_err_msg(ER_TOO_MANY_FIELDS);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ update_thd();
+ trx_search_latch_release_if_reserved(prebuilt->trx);
+
+ if (ha_alter_info->handler_flags
+ & ~(INNOBASE_ONLINE_OPERATIONS | INNOBASE_INPLACE_REBUILD)) {
+ if (ha_alter_info->handler_flags
+ & (Alter_inplace_info::ALTER_COLUMN_EQUAL_PACK_LENGTH
+ | Alter_inplace_info::ALTER_COLUMN_TYPE))
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ /* Only support online add foreign key constraint when
+ check_foreigns is turned off */
+ if ((ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_FOREIGN_KEY)
+ && prebuilt->trx->check_foreigns) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) {
+ DBUG_RETURN(HA_ALTER_INPLACE_NO_LOCK);
+ }
+
+ /* Only support NULL -> NOT NULL change if strict table sql_mode
+ is set. Fall back to COPY for conversion if not strict tables.
+ In-Place will fail with an error when trying to convert
+ NULL to a NOT NULL value. */
+ if ((ha_alter_info->handler_flags
+ & Alter_inplace_info::ALTER_COLUMN_NOT_NULLABLE)
+ && !thd_is_strict_mode(user_thd)) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ /* InnoDB cannot IGNORE when creating unique indexes. IGNORE
+ should silently delete some duplicate rows. Our inplace_alter
+ code will not delete anything from existing indexes. */
+ if (ha_alter_info->ignore
+ && (ha_alter_info->handler_flags
+ & (Alter_inplace_info::ADD_PK_INDEX
+ | Alter_inplace_info::ADD_UNIQUE_INDEX))) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ /* DROP PRIMARY KEY is only allowed in combination with ADD
+ PRIMARY KEY. */
+ if ((ha_alter_info->handler_flags
+ & (Alter_inplace_info::ADD_PK_INDEX
+ | Alter_inplace_info::DROP_PK_INDEX))
+ == Alter_inplace_info::DROP_PK_INDEX) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ /* ADD FOREIGN KEY does not currently work properly in combination
+ with renaming columns. (Bug#14105491) */
+ if ((ha_alter_info->handler_flags
+ & (Alter_inplace_info::ADD_FOREIGN_KEY
+ | Alter_inplace_info::ALTER_COLUMN_NAME))
+ == (Alter_inplace_info::ADD_FOREIGN_KEY
+ | Alter_inplace_info::ALTER_COLUMN_NAME)) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ /* DROP FOREIGN KEY may not currently work properly in combination
+ with other operations. (Work-around for 5.6.10 only.) */
+ if ((ha_alter_info->handler_flags
+ & Alter_inplace_info::DROP_FOREIGN_KEY)
+ && (ha_alter_info->handler_flags
+ & (Alter_inplace_info::DROP_FOREIGN_KEY
+ | INNOBASE_INPLACE_REBUILD))
+ != Alter_inplace_info::DROP_FOREIGN_KEY) {
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ /* If a column change from NOT NULL to NULL,
+ and there's a implict pk on this column. the
+ table should be rebuild. The change should
+ only go through the "Copy" method.*/
+ if ((ha_alter_info->handler_flags
+ & Alter_inplace_info::ALTER_COLUMN_NULLABLE)) {
+ uint primary_key = altered_table->s->primary_key;
+
+ /* See if MYSQL table has no pk but we do.*/
+ if (UNIV_UNLIKELY(primary_key >= MAX_KEY)
+ && !row_table_got_default_clust_index(prebuilt->table)) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_PRIMARY_CANT_HAVE_NULL);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+ }
+
+ /* We should be able to do the operation in-place.
+ See if we can do it online (LOCK=NONE). */
+ bool online = true;
+
+ List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list);
+
+ /* Fix the key parts. */
+ for (KEY* new_key = ha_alter_info->key_info_buffer;
+ new_key < ha_alter_info->key_info_buffer
+ + ha_alter_info->key_count;
+ new_key++) {
+ for (KEY_PART_INFO* key_part = new_key->key_part;
+ key_part < new_key->key_part + new_key->user_defined_key_parts;
+ key_part++) {
+ const Create_field* new_field;
+
+ DBUG_ASSERT(key_part->fieldnr
+ < altered_table->s->fields);
+
+ cf_it.rewind();
+ for (uint fieldnr = 0; (new_field = cf_it++);
+ fieldnr++) {
+ if (fieldnr == key_part->fieldnr) {
+ break;
+ }
+ }
+
+ DBUG_ASSERT(new_field);
+
+ key_part->field = altered_table->field[
+ key_part->fieldnr];
+ /* In some special cases InnoDB emits "false"
+ duplicate key errors with NULL key values. Let
+ us play safe and ensure that we can correctly
+ print key values even in such cases .*/
+ key_part->null_offset = key_part->field->null_offset();
+ key_part->null_bit = key_part->field->null_bit;
+
+ if (new_field->field) {
+ /* This is an existing column. */
+ continue;
+ }
+
+ /* This is an added column. */
+ DBUG_ASSERT(ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_COLUMN);
+
+ /* We cannot replace a hidden FTS_DOC_ID
+ with a user-visible FTS_DOC_ID. */
+ if (prebuilt->table->fts
+ && innobase_fulltext_exist(altered_table->s)
+ && !my_strcasecmp(
+ system_charset_info,
+ key_part->field->field_name,
+ FTS_DOC_ID_COL_NAME)) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+
+ DBUG_ASSERT((MTYP_TYPENR(key_part->field->unireg_check)
+ == Field::NEXT_NUMBER)
+ == !!(key_part->field->flags
+ & AUTO_INCREMENT_FLAG));
+
+ if (key_part->field->flags & AUTO_INCREMENT_FLAG) {
+ /* We cannot assign an AUTO_INCREMENT
+ column values during online ALTER. */
+ DBUG_ASSERT(key_part->field == altered_table
+ -> found_next_number_field);
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC);
+ online = false;
+ }
+ }
+ }
+
+ DBUG_ASSERT(!prebuilt->table->fts || prebuilt->table->fts->doc_col
+ <= table->s->fields);
+ DBUG_ASSERT(!prebuilt->table->fts || prebuilt->table->fts->doc_col
+ < dict_table_get_n_user_cols(prebuilt->table));
+
+ if (prebuilt->table->fts
+ && innobase_fulltext_exist(altered_table->s)) {
+ /* FULLTEXT indexes are supposed to remain. */
+ /* Disallow DROP INDEX FTS_DOC_ID_INDEX */
+
+ for (uint i = 0; i < ha_alter_info->index_drop_count; i++) {
+ if (!my_strcasecmp(
+ system_charset_info,
+ ha_alter_info->index_drop_buffer[i]->name,
+ FTS_DOC_ID_INDEX_NAME)) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+ }
+
+ /* InnoDB can have a hidden FTS_DOC_ID_INDEX on a
+ visible FTS_DOC_ID column as well. Prevent dropping or
+ renaming the FTS_DOC_ID. */
+
+ for (Field** fp = table->field; *fp; fp++) {
+ if (!((*fp)->flags
+ & (FIELD_IS_RENAMED | FIELD_IS_DROPPED))) {
+ continue;
+ }
+
+ if (!my_strcasecmp(
+ system_charset_info,
+ (*fp)->field_name,
+ FTS_DOC_ID_COL_NAME)) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+ }
+ }
+
+ prebuilt->trx->will_lock++;
+
+ if (!online) {
+ /* We already determined that only a non-locking
+ operation is possible. */
+ } else if (((ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_PK_INDEX)
+ || innobase_need_rebuild(ha_alter_info))
+ && (innobase_fulltext_exist(altered_table->s)
+ || (prebuilt->table->flags2
+ & DICT_TF2_FTS_HAS_DOC_ID))) {
+ /* Refuse to rebuild the table online, if
+ fulltext indexes are to survive the rebuild,
+ or if the table contains a hidden FTS_DOC_ID column. */
+ online = false;
+ /* If the table already contains fulltext indexes,
+ refuse to rebuild the table natively altogether. */
+ if (prebuilt->table->fts) {
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_INNODB_FT_LIMIT);
+ DBUG_RETURN(HA_ALTER_INPLACE_NOT_SUPPORTED);
+ }
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS);
+ } else if ((ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_INDEX)) {
+ /* Building a full-text index requires a lock.
+ We could do without a lock if the table already contains
+ an FTS_DOC_ID column, but in that case we would have
+ to apply the modification log to the full-text indexes. */
+
+ for (uint i = 0; i < ha_alter_info->index_add_count; i++) {
+ const KEY* key =
+ &ha_alter_info->key_info_buffer[
+ ha_alter_info->index_add_buffer[i]];
+ if (key->flags & HA_FULLTEXT) {
+ DBUG_ASSERT(!(key->flags & HA_KEYFLAG_MASK
+ & ~(HA_FULLTEXT
+ | HA_PACK_KEY
+ | HA_GENERATED_KEY
+ | HA_BINARY_PACK_KEY)));
+ ha_alter_info->unsupported_reason = innobase_get_err_msg(
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS);
+ online = false;
+ break;
+ }
+ }
+ }
+
+ DBUG_RETURN(online
+ ? HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE
+ : HA_ALTER_INPLACE_SHARED_LOCK_AFTER_PREPARE);
+}
+
+/*************************************************************//**
+Initialize the dict_foreign_t structure with supplied info
+@return true if added, false if duplicate foreign->id */
+static __attribute__((nonnull(1,3,5,7)))
+bool
+innobase_init_foreign(
+/*==================*/
+ dict_foreign_t* foreign, /*!< in/out: structure to
+ initialize */
+ char* constraint_name, /*!< in/out: constraint name if
+ exists */
+ dict_table_t* table, /*!< in: foreign table */
+ dict_index_t* index, /*!< in: foreign key index */
+ const char** column_names, /*!< in: foreign key column
+ names */
+ ulint num_field, /*!< in: number of columns */
+ const char* referenced_table_name, /*!< in: referenced table
+ name */
+ dict_table_t* referenced_table, /*!< in: referenced table */
+ dict_index_t* referenced_index, /*!< in: referenced index */
+ const char** referenced_column_names,/*!< in: referenced column
+ names */
+ ulint referenced_num_field) /*!< in: number of referenced
+ columns */
+{
+ if (constraint_name) {
+ ulint db_len;
+
+ /* Catenate 'databasename/' to the constraint name specified
+ by the user: we conceive the constraint as belonging to the
+ same MySQL 'database' as the table itself. We store the name
+ to foreign->id. */
+
+ db_len = dict_get_db_name_len(table->name);
+
+ foreign->id = static_cast<char*>(mem_heap_alloc(
+ foreign->heap, db_len + strlen(constraint_name) + 2));
+
+ ut_memcpy(foreign->id, table->name, db_len);
+ foreign->id[db_len] = '/';
+ strcpy(foreign->id + db_len + 1, constraint_name);
+ }
+
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ /* Check if any existing foreign key has the same id */
+
+ for (const dict_foreign_t* existing_foreign
+ = UT_LIST_GET_FIRST(table->foreign_list);
+ existing_foreign != 0;
+ existing_foreign = UT_LIST_GET_NEXT(
+ foreign_list, existing_foreign)) {
+
+ if (ut_strcmp(existing_foreign->id, foreign->id) == 0) {
+ return(false);
+ }
+ }
+
+ foreign->foreign_table = table;
+ foreign->foreign_table_name = mem_heap_strdup(
+ foreign->heap, table->name);
+ dict_mem_foreign_table_name_lookup_set(foreign, TRUE);
+
+ foreign->foreign_index = index;
+ foreign->n_fields = (unsigned int) num_field;
+
+ foreign->foreign_col_names = static_cast<const char**>(
+ mem_heap_alloc(foreign->heap, num_field * sizeof(void*)));
+
+ for (ulint i = 0; i < foreign->n_fields; i++) {
+ foreign->foreign_col_names[i] = mem_heap_strdup(
+ foreign->heap, column_names[i]);
+ }
+
+ foreign->referenced_index = referenced_index;
+ foreign->referenced_table = referenced_table;
+
+ foreign->referenced_table_name = mem_heap_strdup(
+ foreign->heap, referenced_table_name);
+ dict_mem_referenced_table_name_lookup_set(foreign, TRUE);
+
+ foreign->referenced_col_names = static_cast<const char**>(
+ mem_heap_alloc(foreign->heap,
+ referenced_num_field * sizeof(void*)));
+
+ for (ulint i = 0; i < foreign->n_fields; i++) {
+ foreign->referenced_col_names[i]
+ = mem_heap_strdup(foreign->heap,
+ referenced_column_names[i]);
+ }
+
+ return(true);
+}
+
+/*************************************************************//**
+Check whether the foreign key options is legit
+@return true if it is */
+static __attribute__((nonnull, warn_unused_result))
+bool
+innobase_check_fk_option(
+/*=====================*/
+ dict_foreign_t* foreign) /*!< in:InnoDB Foreign key */
+{
+ if (foreign->type & (DICT_FOREIGN_ON_UPDATE_SET_NULL
+ | DICT_FOREIGN_ON_DELETE_SET_NULL)
+ && foreign->foreign_index) {
+
+ for (ulint j = 0; j < foreign->n_fields; j++) {
+ if ((dict_index_get_nth_col(
+ foreign->foreign_index, j)->prtype)
+ & DATA_NOT_NULL) {
+
+ /* It is not sensible to define
+ SET NULL if the column is not
+ allowed to be NULL! */
+ return(false);
+ }
+ }
+ }
+
+ return(true);
+}
+
+/*************************************************************//**
+Set foreign key options
+@return true if successfully set */
+static __attribute__((nonnull, warn_unused_result))
+bool
+innobase_set_foreign_key_option(
+/*============================*/
+ dict_foreign_t* foreign, /*!< in:InnoDB Foreign key */
+ Foreign_key* fk_key) /*!< in: Foreign key info from
+ MySQL */
+{
+ ut_ad(!foreign->type);
+
+ switch (fk_key->delete_opt) {
+ case Foreign_key::FK_OPTION_NO_ACTION:
+ case Foreign_key::FK_OPTION_RESTRICT:
+ case Foreign_key::FK_OPTION_DEFAULT:
+ foreign->type = DICT_FOREIGN_ON_DELETE_NO_ACTION;
+ break;
+ case Foreign_key::FK_OPTION_CASCADE:
+ foreign->type = DICT_FOREIGN_ON_DELETE_CASCADE;
+ break;
+ case Foreign_key::FK_OPTION_SET_NULL:
+ foreign->type = DICT_FOREIGN_ON_DELETE_SET_NULL;
+ break;
+ }
+
+ switch (fk_key->update_opt) {
+ case Foreign_key::FK_OPTION_NO_ACTION:
+ case Foreign_key::FK_OPTION_RESTRICT:
+ case Foreign_key::FK_OPTION_DEFAULT:
+ foreign->type |= DICT_FOREIGN_ON_UPDATE_NO_ACTION;
+ break;
+ case Foreign_key::FK_OPTION_CASCADE:
+ foreign->type |= DICT_FOREIGN_ON_UPDATE_CASCADE;
+ break;
+ case Foreign_key::FK_OPTION_SET_NULL:
+ foreign->type |= DICT_FOREIGN_ON_UPDATE_SET_NULL;
+ break;
+ }
+
+ return(innobase_check_fk_option(foreign));
+}
+
+/*******************************************************************//**
+Check if a foreign key constraint can make use of an index
+that is being created.
+@return useable index, or NULL if none found */
+static __attribute__((nonnull, warn_unused_result))
+const KEY*
+innobase_find_equiv_index(
+/*======================*/
+ const char*const* col_names,
+ /*!< in: column names */
+ uint n_cols, /*!< in: number of columns */
+ const KEY* keys, /*!< in: index information */
+ const uint* add, /*!< in: indexes being created */
+ uint n_add) /*!< in: number of indexes to create */
+{
+ for (uint i = 0; i < n_add; i++) {
+ const KEY* key = &keys[add[i]];
+
+ if (key->user_defined_key_parts < n_cols) {
+no_match:
+ continue;
+ }
+
+ for (uint j = 0; j < n_cols; j++) {
+ const KEY_PART_INFO& key_part = key->key_part[j];
+ uint32 col_len
+ = key_part.field->pack_length();
+
+ /* The MySQL pack length contains 1 or 2 bytes
+ length field for a true VARCHAR. */
+
+ if (key_part.field->type() == MYSQL_TYPE_VARCHAR) {
+ col_len -= static_cast<const Field_varstring*>(
+ key_part.field)->length_bytes;
+ }
+
+ if (key_part.length < col_len) {
+
+ /* Column prefix indexes cannot be
+ used for FOREIGN KEY constraints. */
+ goto no_match;
+ }
+
+ if (innobase_strcasecmp(col_names[j],
+ key_part.field->field_name)) {
+ /* Name mismatch */
+ goto no_match;
+ }
+ }
+
+ return(key);
+ }
+
+ return(NULL);
+}
+
+/*************************************************************//**
+Found an index whose first fields are the columns in the array
+in the same order and is not marked for deletion
+@return matching index, NULL if not found */
+static
+dict_index_t*
+innobase_find_fk_index(
+/*===================*/
+ Alter_inplace_info* ha_alter_info,
+ /*!< in: alter table info */
+ dict_table_t* table, /*!< in: table */
+ const char** columns,/*!< in: array of column names */
+ ulint n_cols) /*!< in: number of columns */
+
+{
+ dict_index_t* index;
+ dict_index_t* found_index = NULL;
+
+ index = dict_table_get_first_index(table);
+
+ while (index != NULL) {
+ if (index->type & DICT_FTS) {
+ goto next_rec;
+ } else if (dict_foreign_qualify_index(
+ table, columns, n_cols, index, NULL, TRUE, FALSE)) {
+ /* Check if this index is in the drop list */
+ if (index) {
+ KEY** drop_key;
+
+ drop_key = ha_alter_info->index_drop_buffer;
+
+ for (uint i = 0;
+ i < ha_alter_info->index_drop_count;
+ i++) {
+ if (innobase_strcasecmp(
+ drop_key[i]->name,
+ index->name) == 0) {
+ goto next_rec;
+ }
+ }
+ }
+
+ found_index = index;
+ break;
+ }
+
+next_rec:
+ index = dict_table_get_next_index(index);
+ }
+
+ return(found_index);
+}
+
+/*************************************************************//**
+Create InnoDB foreign key structure from MySQL alter_info
+@retval true if successful
+@retval false on error (will call my_error()) */
+static
+bool
+innobase_get_foreign_key_info(
+/*==========================*/
+ Alter_inplace_info*
+ ha_alter_info, /*!< in: alter table info */
+ const TABLE_SHARE*
+ table_share, /*!< in: the TABLE_SHARE */
+ dict_table_t* table, /*!< in: table */
+ dict_foreign_t**add_fk, /*!< out: foreign constraint added */
+ ulint* n_add_fk, /*!< out: number of foreign
+ constraints added */
+ mem_heap_t* heap, /*!< in: memory heap */
+ const trx_t* trx) /*!< in: user transaction */
+{
+ Key* key;
+ Foreign_key* fk_key;
+ ulint i = 0;
+ dict_table_t* referenced_table = NULL;
+ char* referenced_table_name = NULL;
+ ulint num_fk = 0;
+ Alter_info* alter_info = ha_alter_info->alter_info;
+
+ *n_add_fk = 0;
+
+ List_iterator<Key> key_iterator(alter_info->key_list);
+
+ while ((key=key_iterator++)) {
+ if (key->type == Key::FOREIGN_KEY) {
+ const char* column_names[MAX_NUM_FK_COLUMNS];
+ dict_index_t* index = NULL;
+ const char* referenced_column_names[MAX_NUM_FK_COLUMNS];
+ dict_index_t* referenced_index = NULL;
+ ulint num_col = 0;
+ ulint referenced_num_col = 0;
+ bool correct_option;
+ char* db_namep = NULL;
+ char* tbl_namep = NULL;
+ ulint db_name_len = 0;
+ ulint tbl_name_len = 0;
+#ifdef __WIN__
+ char db_name[MAX_DATABASE_NAME_LEN];
+ char tbl_name[MAX_TABLE_NAME_LEN];
+#endif
+
+ fk_key= static_cast<Foreign_key*>(key);
+
+ if (fk_key->columns.elements > 0) {
+ Key_part_spec* column;
+ List_iterator<Key_part_spec> key_part_iterator(
+ fk_key->columns);
+
+ /* Get all the foreign key column info for the
+ current table */
+ while ((column = key_part_iterator++)) {
+ column_names[i] =
+ column->field_name.str;
+ ut_ad(i < MAX_NUM_FK_COLUMNS);
+ i++;
+ }
+
+ index = innobase_find_fk_index(
+ ha_alter_info, table, column_names, i);
+
+ /* MySQL would add a index in the creation
+ list if no such index for foreign table,
+ so we have to use DBUG_EXECUTE_IF to simulate
+ the scenario */
+ DBUG_EXECUTE_IF("innodb_test_no_foreign_idx",
+ index = NULL;);
+
+ /* Check whether there exist such
+ index in the the index create clause */
+ if (!index && !innobase_find_equiv_index(
+ column_names, i,
+ ha_alter_info->key_info_buffer,
+ ha_alter_info->index_add_buffer,
+ ha_alter_info->index_add_count)) {
+ my_error(
+ ER_FK_NO_INDEX_CHILD,
+ MYF(0),
+ fk_key->name.str,
+ table_share->table_name.str);
+ goto err_exit;
+ }
+
+ num_col = i;
+ }
+
+ add_fk[num_fk] = dict_mem_foreign_create();
+
+#ifndef __WIN__
+ tbl_namep = fk_key->ref_table.str;
+ tbl_name_len = fk_key->ref_table.length;
+ db_namep = fk_key->ref_db.str;
+ db_name_len = fk_key->ref_db.length;
+#else
+ ut_ad(fk_key->ref_table.str);
+
+ memcpy(tbl_name, fk_key->ref_table.str,
+ fk_key->ref_table.length);
+ tbl_name[fk_key->ref_table.length] = 0;
+ innobase_casedn_str(tbl_name);
+ tbl_name_len = strlen(tbl_name);
+ tbl_namep = &tbl_name[0];
+
+ if (fk_key->ref_db.str != NULL) {
+ memcpy(db_name, fk_key->ref_db.str,
+ fk_key->ref_db.length);
+ db_name[fk_key->ref_db.length] = 0;
+ innobase_casedn_str(db_name);
+ db_name_len = strlen(db_name);
+ db_namep = &db_name[0];
+ }
+#endif
+ mutex_enter(&dict_sys->mutex);
+
+ referenced_table_name = dict_get_referenced_table(
+ table->name,
+ db_namep,
+ db_name_len,
+ tbl_namep,
+ tbl_name_len,
+ &referenced_table,
+ add_fk[num_fk]->heap);
+
+ /* Test the case when referenced_table failed to
+ open, if trx->check_foreigns is not set, we should
+ still be able to add the foreign key */
+ DBUG_EXECUTE_IF("innodb_test_open_ref_fail",
+ referenced_table = NULL;);
+
+ if (!referenced_table && trx->check_foreigns) {
+ mutex_exit(&dict_sys->mutex);
+ my_error(ER_FK_CANNOT_OPEN_PARENT,
+ MYF(0), tbl_namep);
+
+ goto err_exit;
+ }
+
+ i = 0;
+
+ if (fk_key->ref_columns.elements > 0) {
+ Key_part_spec* column;
+ List_iterator<Key_part_spec> key_part_iterator(
+ fk_key->ref_columns);
+
+ while ((column = key_part_iterator++)) {
+ referenced_column_names[i] =
+ column->field_name.str;
+ ut_ad(i < MAX_NUM_FK_COLUMNS);
+ i++;
+ }
+
+ if (referenced_table) {
+ referenced_index =
+ dict_foreign_find_index(
+ referenced_table,
+ referenced_column_names,
+ i, NULL,
+ TRUE, FALSE);
+
+ DBUG_EXECUTE_IF(
+ "innodb_test_no_reference_idx",
+ referenced_index = NULL;);
+
+ /* Check whether there exist such
+ index in the the index create clause */
+ if (!referenced_index) {
+ mutex_exit(&dict_sys->mutex);
+ my_error(
+ ER_FK_NO_INDEX_PARENT,
+ MYF(0),
+ fk_key->name.str,
+ tbl_namep);
+ goto err_exit;
+ }
+ } else {
+ ut_a(!trx->check_foreigns);
+ }
+
+ referenced_num_col = i;
+ }
+
+ if (!innobase_init_foreign(
+ add_fk[num_fk], fk_key->name.str,
+ table, index, column_names,
+ num_col, referenced_table_name,
+ referenced_table, referenced_index,
+ referenced_column_names, referenced_num_col)) {
+ mutex_exit(&dict_sys->mutex);
+ my_error(
+ ER_FK_DUP_NAME,
+ MYF(0),
+ add_fk[num_fk]->id);
+ goto err_exit;
+ }
+
+ mutex_exit(&dict_sys->mutex);
+
+ correct_option = innobase_set_foreign_key_option(
+ add_fk[num_fk], fk_key);
+
+ DBUG_EXECUTE_IF("innodb_test_wrong_fk_option",
+ correct_option = false;);
+
+ if (!correct_option) {
+ my_error(ER_FK_INCORRECT_OPTION,
+ MYF(0),
+ table_share->table_name.str,
+ add_fk[num_fk]->id);
+ goto err_exit;
+ }
+
+ num_fk++;
+ i = 0;
+ }
+
+ }
+
+ *n_add_fk = num_fk;
+
+ return(true);
+err_exit:
+ for (i = 0; i <= num_fk; i++) {
+ if (add_fk[i]) {
+ dict_foreign_free(add_fk[i]);
+ }
+ }
+
+ return(false);
+}
+
/*************************************************************//**
Copies an InnoDB column to a MySQL field. This function is
adapted from row_sel_field_store_in_mysql_format(). */
@@ -91,10 +1086,9 @@ innobase_col_to_mysql(
break;
case DATA_BLOB:
- /* Store a pointer to the BLOB buffer to dest: the BLOB was
- already copied to the buffer in row_sel_store_mysql_rec */
-
- row_mysql_store_blob_ref(dest, flen, data, len);
+ /* Skip MySQL BLOBs when reporting an erroneous row
+ during index creation or table rebuild. */
+ field->set_null();
break;
#ifdef UNIV_DEBUG
@@ -135,20 +1129,19 @@ UNIV_INTERN
void
innobase_rec_to_mysql(
/*==================*/
- TABLE* table, /*!< in/out: MySQL table */
- const rec_t* rec, /*!< in: record */
- const dict_index_t* index, /*!< in: index */
- const ulint* offsets) /*!< in: rec_get_offsets(
- rec, index, ...) */
+ struct TABLE* table, /*!< in/out: MySQL table */
+ const rec_t* rec, /*!< in: record */
+ const dict_index_t* index, /*!< in: index */
+ const ulint* offsets)/*!< in: rec_get_offsets(
+ rec, index, ...) */
{
uint n_fields = table->s->fields;
- uint i;
ut_ad(n_fields == dict_table_get_n_user_cols(index->table)
- || (DICT_TF2_FLAG_IS_SET(index->table, DICT_TF2_FTS_HAS_DOC_ID)
- && n_fields + 1 == dict_table_get_n_user_cols(index->table)));
+ - !!(DICT_TF2_FLAG_IS_SET(index->table,
+ DICT_TF2_FTS_HAS_DOC_ID)));
- for (i = 0; i < n_fields; i++) {
+ for (uint i = 0; i < n_fields; i++) {
Field* field = table->field[i];
ulint ipos;
ulint ilen;
@@ -158,7 +1151,8 @@ innobase_rec_to_mysql(
ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE);
- if (UNIV_UNLIKELY(ipos == ULINT_UNDEFINED)) {
+ if (ipos == ULINT_UNDEFINED
+ || rec_offs_nth_extern(offsets, ipos)) {
null_field:
field->set_null();
continue;
@@ -182,6 +1176,85 @@ null_field:
}
/*************************************************************//**
+Copies an InnoDB index entry to table->record[0]. */
+UNIV_INTERN
+void
+innobase_fields_to_mysql(
+/*=====================*/
+ struct TABLE* table, /*!< in/out: MySQL table */
+ const dict_index_t* index, /*!< in: InnoDB index */
+ const dfield_t* fields) /*!< in: InnoDB index fields */
+{
+ uint n_fields = table->s->fields;
+
+ ut_ad(n_fields == dict_table_get_n_user_cols(index->table)
+ - !!(DICT_TF2_FLAG_IS_SET(index->table,
+ DICT_TF2_FTS_HAS_DOC_ID)));
+
+ for (uint i = 0; i < n_fields; i++) {
+ Field* field = table->field[i];
+ ulint ipos;
+
+ field->reset();
+
+ ipos = dict_index_get_nth_col_or_prefix_pos(index, i, TRUE);
+
+ if (ipos == ULINT_UNDEFINED
+ || dfield_is_ext(&fields[ipos])
+ || dfield_is_null(&fields[ipos])) {
+
+ field->set_null();
+ } else {
+ field->set_notnull();
+
+ const dfield_t* df = &fields[ipos];
+
+ innobase_col_to_mysql(
+ dict_field_get_col(
+ dict_index_get_nth_field(index, ipos)),
+ static_cast<const uchar*>(dfield_get_data(df)),
+ dfield_get_len(df), field);
+ }
+ }
+}
+
+/*************************************************************//**
+Copies an InnoDB row to table->record[0]. */
+UNIV_INTERN
+void
+innobase_row_to_mysql(
+/*==================*/
+ struct TABLE* table, /*!< in/out: MySQL table */
+ const dict_table_t* itab, /*!< in: InnoDB table */
+ const dtuple_t* row) /*!< in: InnoDB row */
+{
+ uint n_fields = table->s->fields;
+
+ /* The InnoDB row may contain an extra FTS_DOC_ID column at the end. */
+ ut_ad(row->n_fields == dict_table_get_n_cols(itab));
+ ut_ad(n_fields == row->n_fields - DATA_N_SYS_COLS
+ - !!(DICT_TF2_FLAG_IS_SET(itab, DICT_TF2_FTS_HAS_DOC_ID)));
+
+ for (uint i = 0; i < n_fields; i++) {
+ Field* field = table->field[i];
+ const dfield_t* df = dtuple_get_nth_field(row, i);
+
+ field->reset();
+
+ if (dfield_is_ext(df) || dfield_is_null(df)) {
+ field->set_null();
+ } else {
+ field->set_notnull();
+
+ innobase_col_to_mysql(
+ dict_table_get_nth_col(itab, i),
+ static_cast<const uchar*>(dfield_get_data(df)),
+ dfield_get_len(df), field);
+ }
+ }
+}
+
+/*************************************************************//**
Resets table->record[0]. */
UNIV_INTERN
void
@@ -197,66 +1270,29 @@ innobase_rec_reset(
}
}
-/******************************************************************//**
-Removes the filename encoding of a database and table name. */
-static
-void
-innobase_convert_tablename(
-/*=======================*/
- char* s) /*!< in: identifier; out: decoded identifier */
-{
- uint errors;
-
- char* slash = strchr(s, '/');
-
- if (slash) {
- char* t;
- /* Temporarily replace the '/' with NUL. */
- *slash = 0;
- /* Convert the database name. */
- strconvert(&my_charset_filename, s, system_charset_info,
- s, slash - s + 1, &errors);
-
- t = s + strlen(s);
- ut_ad(slash >= t);
- /* Append a '.' after the database name. */
- *t++ = '.';
- slash++;
- /* Convert the table name. */
- strconvert(&my_charset_filename, slash, system_charset_info,
- t, slash - t + strlen(slash), &errors);
- } else {
- strconvert(&my_charset_filename, s,
- system_charset_info, s, strlen(s), &errors);
- }
-}
-
/*******************************************************************//**
This function checks that index keys are sensible.
@return 0 or error number */
-static
+static __attribute__((nonnull, warn_unused_result))
int
innobase_check_index_keys(
/*======================*/
- const KEY* key_info, /*!< in: Indexes to be
- created */
- ulint num_of_keys, /*!< in: Number of
- indexes to be created */
- const dict_table_t* table) /*!< in: Existing indexes */
+ const Alter_inplace_info* info,
+ /*!< in: indexes to be created or dropped */
+ const dict_table_t* innodb_table)
+ /*!< in: Existing indexes */
{
- ulint key_num;
-
- ut_ad(key_info);
- ut_ad(num_of_keys);
-
- for (key_num = 0; key_num < num_of_keys; key_num++) {
- const KEY& key = key_info[key_num];
+ for (uint key_num = 0; key_num < info->index_add_count;
+ key_num++) {
+ const KEY& key = info->key_info_buffer[
+ info->index_add_buffer[key_num]];
/* Check that the same index name does not appear
twice in indexes to be created. */
for (ulint i = 0; i < key_num; i++) {
- const KEY& key2 = key_info[i];
+ const KEY& key2 = info->key_info_buffer[
+ info->index_add_buffer[i]];
if (0 == strcmp(key.name, key2.name)) {
my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0),
@@ -268,23 +1304,36 @@ innobase_check_index_keys(
/* Check that the same index name does not already exist. */
- for (const dict_index_t* index
- = dict_table_get_first_index(table);
- index; index = dict_table_get_next_index(index)) {
+ const dict_index_t* index;
- if (0 == strcmp(key.name, index->name)) {
- my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0),
- key.name);
+ for (index = dict_table_get_first_index(innodb_table);
+ index; index = dict_table_get_next_index(index)) {
- return(ER_WRONG_NAME_FOR_INDEX);
+ if (!strcmp(key.name, index->name)) {
+ break;
}
}
- /* Check that MySQL does not try to create a column
- prefix index field on an inappropriate data type and
- that the same column does not appear twice in the index. */
+ if (index) {
+ /* If a key by the same name is being created and
+ dropped, the name clash is OK. */
+ for (uint i = 0; i < info->index_drop_count;
+ i++) {
+ const KEY* drop_key
+ = info->index_drop_buffer[i];
- for (ulint i = 0; i < key.key_parts; i++) {
+ if (0 == strcmp(key.name, drop_key->name)) {
+ goto name_ok;
+ }
+ }
+
+ my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key.name);
+
+ return(ER_WRONG_NAME_FOR_INDEX);
+ }
+
+name_ok:
+ for (ulint i = 0; i < key.user_defined_key_parts; i++) {
const KEY_PART_INFO& key_part1
= key.key_part[i];
const Field* field
@@ -299,6 +1348,10 @@ innobase_check_index_keys(
case DATA_FLOAT:
case DATA_DOUBLE:
case DATA_DECIMAL:
+ /* Check that MySQL does not try to
+ create a column prefix index field on
+ an inappropriate data type. */
+
if (field->type() == MYSQL_TYPE_VARCHAR) {
if (key_part1.length
>= field->pack_length()
@@ -318,17 +1371,19 @@ innobase_check_index_keys(
return(ER_WRONG_KEY_COLUMN);
}
+ /* Check that the same column does not appear
+ twice in the index. */
+
for (ulint j = 0; j < i; j++) {
const KEY_PART_INFO& key_part2
= key.key_part[j];
- if (strcmp(key_part1.field->field_name,
- key_part2.field->field_name)) {
+ if (key_part1.fieldnr != key_part2.fieldnr) {
continue;
}
my_error(ER_WRONG_KEY_COLUMN, MYF(0), "InnoDB",
- key_part1.field->field_name);
+ field->field_name);
return(ER_WRONG_KEY_COLUMN);
}
}
@@ -339,16 +1394,19 @@ innobase_check_index_keys(
/*******************************************************************//**
Create index field definition for key part */
-static
+static __attribute__((nonnull(2,3)))
void
innobase_create_index_field_def(
/*============================*/
- KEY_PART_INFO* key_part, /*!< in: MySQL key definition */
- mem_heap_t* heap, /*!< in: memory heap */
- merge_index_field_t* index_field) /*!< out: index field
+ const TABLE* altered_table, /*!< in: MySQL table that is
+ being altered, or NULL
+ if a new clustered index is
+ not being created */
+ const KEY_PART_INFO* key_part, /*!< in: MySQL key definition */
+ index_field_t* index_field) /*!< out: index field
definition for key_part */
{
- Field* field;
+ const Field* field;
ibool is_unsigned;
ulint col_type;
@@ -357,9 +1415,13 @@ innobase_create_index_field_def(
ut_ad(key_part);
ut_ad(index_field);
- field = key_part->field;
+ field = altered_table
+ ? altered_table->field[key_part->fieldnr]
+ : key_part->field;
ut_a(field);
+ index_field->col_no = key_part->fieldnr;
+
col_type = get_innobase_type_from_mysql_type(&is_unsigned, field);
if (DATA_BLOB == col_type
@@ -374,44 +1436,48 @@ innobase_create_index_field_def(
index_field->prefix_len = 0;
}
- index_field->field_name = mem_heap_strdup(heap, field->field_name);
-
DBUG_VOID_RETURN;
}
/*******************************************************************//**
Create index definition for key */
-static
+static __attribute__((nonnull))
void
innobase_create_index_def(
/*======================*/
- KEY* key, /*!< in: key definition */
- bool new_primary, /*!< in: TRUE=generating
- a new primary key
+ const TABLE* altered_table, /*!< in: MySQL table that is
+ being altered */
+ const KEY* keys, /*!< in: key definitions */
+ ulint key_number, /*!< in: MySQL key number */
+ bool new_clustered, /*!< in: true if generating
+ a new clustered index
on the table */
- bool key_primary, /*!< in: TRUE if this key
- is a primary key */
- merge_index_def_t* index, /*!< out: index definition */
+ bool key_clustered, /*!< in: true if this is
+ the new clustered index */
+ index_def_t* index, /*!< out: index definition */
mem_heap_t* heap) /*!< in: heap where memory
is allocated */
{
- ulint i;
- ulint len;
- ulint n_fields = key->key_parts;
- char* index_name;
+ const KEY* key = &keys[key_number];
+ ulint i;
+ ulint len;
+ ulint n_fields = key->user_defined_key_parts;
+ char* index_name;
DBUG_ENTER("innobase_create_index_def");
+ DBUG_ASSERT(!key_clustered || new_clustered);
- index->fields = (merge_index_field_t*) mem_heap_alloc(
- heap, n_fields * sizeof *index->fields);
+ index->fields = static_cast<index_field_t*>(
+ mem_heap_alloc(heap, n_fields * sizeof *index->fields));
index->ind_type = 0;
+ index->key_number = key_number;
index->n_fields = n_fields;
len = strlen(key->name) + 1;
- index->name = index_name = (char*) mem_heap_alloc(heap,
- len + !new_primary);
+ index->name = index_name = static_cast<char*>(
+ mem_heap_alloc(heap, len + !new_clustered));
- if (UNIV_LIKELY(!new_primary)) {
+ if (!new_clustered) {
*index_name++ = TEMP_INDEX_PREFIX;
}
@@ -421,144 +1487,155 @@ innobase_create_index_def(
index->ind_type |= DICT_UNIQUE;
}
- if (key->flags & HA_FULLTEXT) {
+ if (key_clustered) {
+ DBUG_ASSERT(!(key->flags & HA_FULLTEXT));
+ index->ind_type |= DICT_CLUSTERED;
+ } else if (key->flags & HA_FULLTEXT) {
+ DBUG_ASSERT(!(key->flags & HA_KEYFLAG_MASK
+ & ~(HA_FULLTEXT
+ | HA_PACK_KEY
+ | HA_BINARY_PACK_KEY)));
+ DBUG_ASSERT(!(key->flags & HA_NOSAME));
+ DBUG_ASSERT(!index->ind_type);
index->ind_type |= DICT_FTS;
}
- if (key_primary) {
- index->ind_type |= DICT_CLUSTERED;
+ if (!new_clustered) {
+ altered_table = NULL;
}
for (i = 0; i < n_fields; i++) {
- innobase_create_index_field_def(&key->key_part[i], heap,
- &index->fields[i]);
+ innobase_create_index_field_def(
+ altered_table, &key->key_part[i], &index->fields[i]);
}
DBUG_VOID_RETURN;
}
/*******************************************************************//**
-Copy index field definition */
+Check whether the table has the FTS_DOC_ID column
+@return whether there exists an FTS_DOC_ID column */
static
-void
-innobase_copy_index_field_def(
+bool
+innobase_fts_check_doc_id_col(
/*==========================*/
- const dict_field_t* field, /*!< in: definition to copy */
- merge_index_field_t* index_field) /*!< out: copied definition */
+ const dict_table_t* table, /*!< in: InnoDB table with
+ fulltext index */
+ const TABLE* altered_table,
+ /*!< in: MySQL table with
+ fulltext index */
+ ulint* fts_doc_col_no)
+ /*!< out: The column number for
+ Doc ID, or ULINT_UNDEFINED
+ if it is of wrong type */
{
- DBUG_ENTER("innobase_copy_index_field_def");
- DBUG_ASSERT(field != NULL);
- DBUG_ASSERT(index_field != NULL);
-
- index_field->field_name = field->name;
- index_field->prefix_len = field->prefix_len;
-
- DBUG_VOID_RETURN;
-}
-
-/*******************************************************************//**
-Copy index definition for the index */
-static
-void
-innobase_copy_index_def(
-/*====================*/
- const dict_index_t* index, /*!< in: index definition to copy */
- merge_index_def_t* new_index,/*!< out: Index definition */
- mem_heap_t* heap) /*!< in: heap where allocated */
-{
- ulint n_fields;
- ulint i;
-
- DBUG_ENTER("innobase_copy_index_def");
+ *fts_doc_col_no = ULINT_UNDEFINED;
- /* Note that we take only those fields that user defined to be
- in the index. In the internal representation more colums were
- added and those colums are not copied .*/
+ const uint n_cols = altered_table->s->fields;
+ uint i;
- n_fields = index->n_user_defined_cols;
+ for (i = 0; i < n_cols; i++) {
+ const Field* field = altered_table->s->field[i];
- new_index->fields = (merge_index_field_t*) mem_heap_alloc(
- heap, n_fields * sizeof *new_index->fields);
+ if (my_strcasecmp(system_charset_info,
+ field->field_name, FTS_DOC_ID_COL_NAME)) {
+ continue;
+ }
- /* When adding a PRIMARY KEY, we may convert a previous
- clustered index to a secondary index (UNIQUE NOT NULL). */
- new_index->ind_type = index->type & ~DICT_CLUSTERED;
- new_index->n_fields = n_fields;
- new_index->name = index->name;
+ if (strcmp(field->field_name, FTS_DOC_ID_COL_NAME)) {
+ my_error(ER_WRONG_COLUMN_NAME, MYF(0),
+ field->field_name);
+ } else if (field->type() != MYSQL_TYPE_LONGLONG
+ || field->pack_length() != 8
+ || field->real_maybe_null()
+ || !(field->flags & UNSIGNED_FLAG)) {
+ my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN, MYF(0),
+ field->field_name);
+ } else {
+ *fts_doc_col_no = i;
+ }
- for (i = 0; i < n_fields; i++) {
- innobase_copy_index_field_def(&index->fields[i],
- &new_index->fields[i]);
+ return(true);
}
- DBUG_VOID_RETURN;
-}
-
-/*******************************************************************//**
-Check whether the table has the FTS_DOC_ID column
-@return TRUE if there exists the FTS_DOC_ID column, if TRUE but fts_doc_col_no
- equal to ULINT_UNDEFINED then that means the column exists but is not
- of the right type. */
-static
-ibool
-innobase_fts_check_doc_id_col(
-/*==========================*/
- dict_table_t* table, /*!< in: table with FTS index */
- ulint* fts_doc_col_no) /*!< out: The column number for
- Doc ID */
-{
- *fts_doc_col_no = ULINT_UNDEFINED;
+ if (!table) {
+ return(false);
+ }
- for (ulint i = 0; i + DATA_N_SYS_COLS < (ulint) table->n_cols; i++) {
+ for (; i + DATA_N_SYS_COLS < (uint) table->n_cols; i++) {
const char* name = dict_table_get_col_name(table, i);
if (strcmp(name, FTS_DOC_ID_COL_NAME) == 0) {
+#ifdef UNIV_DEBUG
const dict_col_t* col;
col = dict_table_get_nth_col(table, i);
- if (col->mtype != DATA_INT || col->len != 8) {
- fprintf(stderr,
- " InnoDB: %s column in table %s"
- " must be of the BIGINT datatype\n",
- FTS_DOC_ID_COL_NAME, table->name);
- } else if (!(col->prtype & DATA_NOT_NULL)) {
- fprintf(stderr,
- " InnoDB: %s column in table %s"
- " must be NOT NULL\n",
- FTS_DOC_ID_COL_NAME, table->name);
-
- } else if (!(col->prtype & DATA_UNSIGNED)) {
- fprintf(stderr,
- " InnoDB: %s column in table %s"
- " must be UNSIGNED\n",
- FTS_DOC_ID_COL_NAME, table->name);
- } else {
- *fts_doc_col_no = i;
- }
-
- return(TRUE);
+ /* Because the FTS_DOC_ID does not exist in
+ the MySQL data dictionary, this must be the
+ internally created FTS_DOC_ID column. */
+ ut_ad(col->mtype == DATA_INT);
+ ut_ad(col->len == 8);
+ ut_ad(col->prtype & DATA_NOT_NULL);
+ ut_ad(col->prtype & DATA_UNSIGNED);
+#endif /* UNIV_DEBUG */
+ *fts_doc_col_no = i;
+ return(true);
}
}
- return(FALSE);
+ return(false);
}
/*******************************************************************//**
Check whether the table has a unique index with FTS_DOC_ID_INDEX_NAME
on the Doc ID column.
-@return FTS_EXIST_DOC_ID_INDEX if there exists the FTS_DOC_ID index,
-FTS_INCORRECT_DOC_ID_INDEX if the FTS_DOC_ID index is of wrong format */
+@return the status of the FTS_DOC_ID index */
UNIV_INTERN
enum fts_doc_id_index_enum
innobase_fts_check_doc_id_index(
/*============================*/
- dict_table_t* table, /*!< in: table definition */
- ulint* fts_doc_col_no) /*!< out: The column number for
- Doc ID */
+ const dict_table_t* table, /*!< in: table definition */
+ const TABLE* altered_table, /*!< in: MySQL table
+ that is being altered */
+ ulint* fts_doc_col_no) /*!< out: The column number for
+ Doc ID, or ULINT_UNDEFINED
+ if it is being created in
+ ha_alter_info */
{
- dict_index_t* index;
- dict_field_t* field;
+ const dict_index_t* index;
+ const dict_field_t* field;
+
+ if (altered_table) {
+ /* Check if a unique index with the name of
+ FTS_DOC_ID_INDEX_NAME is being created. */
+
+ for (uint i = 0; i < altered_table->s->keys; i++) {
+ const KEY& key = altered_table->s->key_info[i];
+
+ if (innobase_strcasecmp(
+ key.name, FTS_DOC_ID_INDEX_NAME)) {
+ continue;
+ }
+
+ if ((key.flags & HA_NOSAME)
+ && key.user_defined_key_parts == 1
+ && !strcmp(key.name, FTS_DOC_ID_INDEX_NAME)
+ && !strcmp(key.key_part[0].field->field_name,
+ FTS_DOC_ID_COL_NAME)) {
+ if (fts_doc_col_no) {
+ *fts_doc_col_no = ULINT_UNDEFINED;
+ }
+ return(FTS_EXIST_DOC_ID_INDEX);
+ } else {
+ return(FTS_INCORRECT_DOC_ID_INDEX);
+ }
+ }
+ }
+
+ if (!table) {
+ return(FTS_NOT_EXIST_DOC_ID_INDEX);
+ }
for (index = dict_table_get_first_index(table);
index; index = dict_table_get_next_index(index)) {
@@ -570,6 +1647,7 @@ innobase_fts_check_doc_id_index(
}
if (!dict_index_is_unique(index)
+ || dict_index_get_n_unique(index) > 1
|| strcmp(index->name, FTS_DOC_ID_INDEX_NAME)) {
return(FTS_INCORRECT_DOC_ID_INDEX);
}
@@ -590,9 +1668,9 @@ innobase_fts_check_doc_id_index(
} else {
return(FTS_INCORRECT_DOC_ID_INDEX);
}
-
}
+
/* Not found */
return(FTS_NOT_EXIST_DOC_ID_INDEX);
}
@@ -606,12 +1684,12 @@ enum fts_doc_id_index_enum
innobase_fts_check_doc_id_index_in_def(
/*===================================*/
ulint n_key, /*!< in: Number of keys */
- KEY * key_info) /*!< in: Key definition */
+ const KEY* key_info) /*!< in: Key definition */
{
/* Check whether there is a "FTS_DOC_ID_INDEX" in the to be built index
list */
for (ulint j = 0; j < n_key; j++) {
- KEY* key = &key_info[j];
+ const KEY* key = &key_info[j];
if (innobase_strcasecmp(key->name, FTS_DOC_ID_INDEX_NAME)) {
continue;
@@ -620,14 +1698,15 @@ innobase_fts_check_doc_id_index_in_def(
/* Do a check on FTS DOC ID_INDEX, it must be unique,
named as "FTS_DOC_ID_INDEX" and on column "FTS_DOC_ID" */
if (!(key->flags & HA_NOSAME)
+ || key->user_defined_key_parts != 1
|| strcmp(key->name, FTS_DOC_ID_INDEX_NAME)
|| strcmp(key->key_part[0].field->field_name,
- FTS_DOC_ID_COL_NAME)) {
+ FTS_DOC_ID_COL_NAME)) {
return(FTS_INCORRECT_DOC_ID_INDEX);
- }
+ }
return(FTS_EXIST_DOC_ID_INDEX);
- }
+ }
return(FTS_NOT_EXIST_DOC_ID_INDEX);
}
@@ -637,8 +1716,7 @@ Create an index table where indexes are ordered as follows:
IF a new primary key is defined for the table THEN
1) New primary key
- 2) Original secondary indexes
- 3) New secondary indexes
+ 2) The remaining keys in key_info
ELSE
@@ -646,626 +1724,1272 @@ ELSE
ENDIF
-
-@return key definitions or NULL */
-static
-merge_index_def_t*
-innobase_create_key_def(
-/*====================*/
- trx_t* trx, /*!< in: trx */
- dict_table_t* table, /*!< in: table definition */
- mem_heap_t* heap, /*!< in: heap where space for key
- definitions are allocated */
- KEY* key_info, /*!< in: Indexes to be created */
- ulint& n_keys, /*!< in/out: Number of indexes to
- be created */
- ulint* num_fts_index, /*!< out: Number of FTS indexes */
- ibool* add_fts_doc_id, /*!< out: Whether we need to add
- new DOC ID column for FTS index */
- ibool* add_fts_doc_id_idx)/*!< out: Whether we need to add
- new index on DOC ID column */
+@return key definitions */
+static __attribute__((nonnull, warn_unused_result, malloc))
+index_def_t*
+innobase_create_key_defs(
+/*=====================*/
+ mem_heap_t* heap,
+ /*!< in/out: memory heap where space for key
+ definitions are allocated */
+ const Alter_inplace_info* ha_alter_info,
+ /*!< in: alter operation */
+ const TABLE* altered_table,
+ /*!< in: MySQL table that is being altered */
+ ulint& n_add,
+ /*!< in/out: number of indexes to be created */
+ ulint& n_fts_add,
+ /*!< out: number of FTS indexes to be created */
+ bool got_default_clust,
+ /*!< in: whether the table lacks a primary key */
+ ulint& fts_doc_id_col,
+ /*!< in: The column number for Doc ID */
+ bool& add_fts_doc_id,
+ /*!< in: whether we need to add new DOC ID
+ column for FTS index */
+ bool& add_fts_doc_idx)
+ /*!< in: whether we need to add new DOC ID
+ index for FTS index */
{
- ulint i = 0;
- merge_index_def_t* indexdef;
- merge_index_def_t* indexdefs;
+ index_def_t* indexdef;
+ index_def_t* indexdefs;
bool new_primary;
+ const uint*const add
+ = ha_alter_info->index_add_buffer;
+ const KEY*const key_info
+ = ha_alter_info->key_info_buffer;
- DBUG_ENTER("innobase_create_key_def");
-
- indexdef = indexdefs = (merge_index_def_t*)
- mem_heap_alloc(heap, sizeof *indexdef
- * (n_keys + UT_LIST_GET_LEN(table->indexes)));
-
- *add_fts_doc_id = FALSE;
- *add_fts_doc_id_idx = FALSE;
+ DBUG_ENTER("innobase_create_key_defs");
+ DBUG_ASSERT(!add_fts_doc_id || add_fts_doc_idx);
+ DBUG_ASSERT(ha_alter_info->index_add_count == n_add);
/* If there is a primary key, it is always the first index
- defined for the table. */
+ defined for the innodb_table. */
- new_primary = !my_strcasecmp(system_charset_info,
- key_info->name, "PRIMARY");
+ new_primary = n_add > 0
+ && !my_strcasecmp(system_charset_info,
+ key_info[*add].name, "PRIMARY");
+ n_fts_add = 0;
/* If there is a UNIQUE INDEX consisting entirely of NOT NULL
columns and if the index does not contain column prefix(es)
(only prefix/part of the column is indexed), MySQL will treat the
index as a PRIMARY KEY unless the table already has one. */
- if (!new_primary && (key_info->flags & HA_NOSAME)
- && (!(key_info->flags & HA_KEY_HAS_PART_KEY_SEG))
- && row_table_got_default_clust_index(table)) {
- uint key_part = key_info->key_parts;
+ if (n_add > 0 && !new_primary && got_default_clust
+ && (key_info[*add].flags & HA_NOSAME)
+ && !(key_info[*add].flags & HA_KEY_HAS_PART_KEY_SEG)) {
+ uint key_part = key_info[*add].user_defined_key_parts;
- new_primary = TRUE;
+ new_primary = true;
while (key_part--) {
- if (key_info->key_part[key_part].key_type
- & FIELDFLAG_MAYBE_NULL) {
- new_primary = FALSE;
+ const uint maybe_null
+ = key_info[*add].key_part[key_part].key_type
+ & FIELDFLAG_MAYBE_NULL;
+ DBUG_ASSERT(!maybe_null
+ == !key_info[*add].key_part[key_part].
+ field->real_maybe_null());
+
+ if (maybe_null) {
+ new_primary = false;
break;
}
}
}
- /* Check whether any indexes in the create list are Full
- Text Indexes*/
- for (ulint j = 0; j < n_keys; j++) {
- if (key_info[j].flags & HA_FULLTEXT) {
- (*num_fts_index)++;
- }
- }
-
- /* Check whether there is a "FTS_DOC_ID_INDEX" in the to be built index
- list */
- if (innobase_fts_check_doc_id_index_in_def(n_keys, key_info)
- == FTS_INCORRECT_DOC_ID_INDEX) {
- push_warning_printf((THD*) trx->mysql_thd,
- Sql_condition::WARN_LEVEL_WARN,
- ER_WRONG_NAME_FOR_INDEX,
- " InnoDB: Index name %s is reserved"
- " for the unique index on"
- " FTS_DOC_ID column for FTS"
- " document ID indexing"
- " on table %s. Please check"
- " the index definition to"
- " make sure it is of correct"
- " type\n",
- FTS_DOC_ID_INDEX_NAME,
- table->name);
- DBUG_RETURN(NULL);
- }
-
- /* If we are to build an FTS index, check whether the table
- already has a DOC ID column, if not, we will need to add a
- Doc ID hidden column and rebuild the primary index */
- if (*num_fts_index) {
- enum fts_doc_id_index_enum ret;
- ibool exists;
- ulint doc_col_no;
- ulint fts_doc_col_no;
-
- exists = innobase_fts_check_doc_id_col(table, &fts_doc_col_no);
-
- if (exists) {
-
- if (fts_doc_col_no == ULINT_UNDEFINED) {
+ const bool rebuild = new_primary || add_fts_doc_id
+ || innobase_need_rebuild(ha_alter_info);
+ /* Reserve one more space if new_primary is true, and we might
+ need to add the FTS_DOC_ID_INDEX */
+ indexdef = indexdefs = static_cast<index_def_t*>(
+ mem_heap_alloc(
+ heap, sizeof *indexdef
+ * (ha_alter_info->key_count
+ + rebuild
+ + got_default_clust)));
- push_warning_printf(
- (THD*) trx->mysql_thd,
- Sql_condition::WARN_LEVEL_WARN,
- ER_WRONG_COLUMN_NAME,
- " InnoDB: There exists a column %s "
- "in table %s, but it is the wrong "
- "type. Create of FTS index failed.\n",
- FTS_DOC_ID_COL_NAME, table->name);
-
- DBUG_RETURN(NULL);
-
- } else if (!table->fts) {
- table->fts = fts_create(table);
- }
-
- table->fts->doc_col = fts_doc_col_no;
+ if (rebuild) {
+ ulint primary_key_number;
+ if (new_primary) {
+ DBUG_ASSERT(n_add > 0);
+ primary_key_number = *add;
+ } else if (got_default_clust) {
+ /* Create the GEN_CLUST_INDEX */
+ index_def_t* index = indexdef++;
+
+ index->fields = NULL;
+ index->n_fields = 0;
+ index->ind_type = DICT_CLUSTERED;
+ index->name = mem_heap_strdup(
+ heap, innobase_index_reserve_name);
+ index->key_number = ~0;
+ primary_key_number = ULINT_UNDEFINED;
+ goto created_clustered;
} else {
- *add_fts_doc_id = TRUE;
- *add_fts_doc_id_idx = TRUE;
-
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Rebuild table %s to add "
- "DOC_ID column\n", table->name);
+ primary_key_number = 0;
}
- ret = innobase_fts_check_doc_id_index(table, &doc_col_no);
+ /* Create the PRIMARY key index definition */
+ innobase_create_index_def(
+ altered_table, key_info, primary_key_number,
+ TRUE, TRUE, indexdef++, heap);
- switch (ret) {
- case FTS_NOT_EXIST_DOC_ID_INDEX:
- *add_fts_doc_id_idx = TRUE;
- break;
- case FTS_INCORRECT_DOC_ID_INDEX:
+created_clustered:
+ n_add = 1;
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Index %s is used for FTS"
- " Doc ID indexing on table %s, it is"
- " now on the wrong column or of"
- " wrong format. Please drop it.\n",
- FTS_DOC_ID_INDEX_NAME, table->name);
- DBUG_RETURN(NULL);
+ for (ulint i = 0; i < ha_alter_info->key_count; i++) {
+ if (i == primary_key_number) {
+ continue;
+ }
+ /* Copy the index definitions. */
+ innobase_create_index_def(
+ altered_table, key_info, i, TRUE, FALSE,
+ indexdef, heap);
- default:
- ut_ad(ret == FTS_EXIST_DOC_ID_INDEX);
+ if (indexdef->ind_type & DICT_FTS) {
+ n_fts_add++;
+ }
- ut_ad(doc_col_no == fts_doc_col_no);
+ indexdef++;
+ n_add++;
}
- }
-
- /* If DICT_TF2_FTS_ADD_DOC_ID is set, we will need to rebuild
- the table to add the unique Doc ID column for FTS index. And
- thus the primary index would required to be rebuilt. Copy all
- the index definitions */
- if (new_primary || *add_fts_doc_id) {
- const dict_index_t* index;
- if (new_primary) {
- /* Create the PRIMARY key index definition */
- innobase_create_index_def(&key_info[i++],
- TRUE, TRUE,
- indexdef++, heap);
- }
+ if (n_fts_add > 0) {
+ if (!add_fts_doc_id
+ && !innobase_fts_check_doc_id_col(
+ NULL, altered_table,
+ &fts_doc_id_col)) {
+ fts_doc_id_col = altered_table->s->fields;
+ add_fts_doc_id = true;
+ }
- row_mysql_lock_data_dictionary(trx);
+ if (!add_fts_doc_idx) {
+ fts_doc_id_index_enum ret;
+ ulint doc_col_no;
- index = dict_table_get_first_index(table);
+ ret = innobase_fts_check_doc_id_index(
+ NULL, altered_table, &doc_col_no);
- /* Copy the index definitions of the old table. Skip
- the old clustered index if it is a generated clustered
- index or a PRIMARY KEY. If the clustered index is a
- UNIQUE INDEX, it must be converted to a secondary index. */
+ /* This should have been checked before */
+ ut_ad(ret != FTS_INCORRECT_DOC_ID_INDEX);
- if (new_primary
- && (dict_index_get_nth_col(index, 0)->mtype
- == DATA_SYS
- || !my_strcasecmp(system_charset_info,
- index->name, "PRIMARY"))) {
- index = dict_table_get_next_index(index);
+ if (ret == FTS_NOT_EXIST_DOC_ID_INDEX) {
+ add_fts_doc_idx = true;
+ } else {
+ ut_ad(ret == FTS_EXIST_DOC_ID_INDEX);
+ ut_ad(doc_col_no == ULINT_UNDEFINED
+ || doc_col_no == fts_doc_id_col);
+ }
+ }
}
+ } else {
+ /* Create definitions for added secondary indexes. */
- while (index) {
- innobase_copy_index_def(index, indexdef++, heap);
+ for (ulint i = 0; i < n_add; i++) {
+ innobase_create_index_def(
+ altered_table, key_info, add[i], FALSE, FALSE,
+ indexdef, heap);
- if (new_primary && index->type & DICT_FTS) {
- (*num_fts_index)++;
+ if (indexdef->ind_type & DICT_FTS) {
+ n_fts_add++;
}
- index = dict_table_get_next_index(index);
+ indexdef++;
}
+ }
- /* The primary index would be rebuilt if a FTS Doc ID
- column is to be added, and the primary index definition
- is just copied from old table and stored in indexdefs[0] */
- if (*add_fts_doc_id) {
- indexdefs[0].ind_type |= DICT_CLUSTERED;
- DICT_TF2_FLAG_SET(table, DICT_TF2_FTS_ADD_DOC_ID);
- }
+ DBUG_ASSERT(indexdefs + n_add == indexdef);
- row_mysql_unlock_data_dictionary(trx);
- }
+ if (add_fts_doc_idx) {
+ index_def_t* index = indexdef++;
- /* Create definitions for added secondary indexes. */
+ index->fields = static_cast<index_field_t*>(
+ mem_heap_alloc(heap, sizeof *index->fields));
+ index->n_fields = 1;
+ index->fields->col_no = fts_doc_id_col;
+ index->fields->prefix_len = 0;
+ index->ind_type = DICT_UNIQUE;
- while (i < n_keys) {
- innobase_create_index_def(&key_info[i++], new_primary, FALSE,
- indexdef++, heap);
- }
+ if (rebuild) {
+ index->name = mem_heap_strdup(
+ heap, FTS_DOC_ID_INDEX_NAME);
+ ut_ad(!add_fts_doc_id
+ || fts_doc_id_col == altered_table->s->fields);
+ } else {
+ char* index_name;
+ index->name = index_name = static_cast<char*>(
+ mem_heap_alloc(
+ heap,
+ 1 + sizeof FTS_DOC_ID_INDEX_NAME));
+ *index_name++ = TEMP_INDEX_PREFIX;
+ memcpy(index_name, FTS_DOC_ID_INDEX_NAME,
+ sizeof FTS_DOC_ID_INDEX_NAME);
+ }
- n_keys = indexdef - indexdefs;
+ /* TODO: assign a real MySQL key number for this */
+ index->key_number = ULINT_UNDEFINED;
+ n_add++;
+ }
+ DBUG_ASSERT(indexdef > indexdefs);
+ DBUG_ASSERT((ulint) (indexdef - indexdefs)
+ <= ha_alter_info->key_count
+ + add_fts_doc_idx + got_default_clust);
+ DBUG_ASSERT(ha_alter_info->index_add_count <= n_add);
DBUG_RETURN(indexdefs);
}
/*******************************************************************//**
Check each index column size, make sure they do not exceed the max limit
-@return HA_ERR_INDEX_COL_TOO_LONG if index column size exceeds limit */
-static
-int
+@return true if index column size exceeds limit */
+static __attribute__((nonnull, warn_unused_result))
+bool
innobase_check_column_length(
/*=========================*/
- const dict_table_t*table, /*!< in: table definition */
+ ulint max_col_len, /*!< in: maximum column length */
const KEY* key_info) /*!< in: Indexes to be created */
{
- ulint max_col_len = DICT_MAX_FIELD_LEN_BY_FORMAT(table);
-
- for (ulint key_part = 0; key_part < key_info->key_parts; key_part++) {
+ for (ulint key_part = 0; key_part < key_info->user_defined_key_parts; key_part++) {
if (key_info->key_part[key_part].length > max_col_len) {
- my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0), max_col_len);
- return(HA_ERR_INDEX_COL_TOO_LONG);
+ return(true);
}
}
- return(0);
+ return(false);
}
-/*******************************************************************//**
-Create a temporary tablename using query id, thread id, and id
-@return temporary tablename */
-static
-char*
-innobase_create_temporary_tablename(
-/*================================*/
- mem_heap_t* heap, /*!< in: memory heap */
- char id, /*!< in: identifier [0-9a-zA-Z] */
- const char* table_name) /*!< in: table name */
+struct ha_innobase_inplace_ctx : public inplace_alter_handler_ctx
{
- char* name;
- ulint len;
- static const char suffix[] = "@0023 "; /* "# " */
+ /** Dummy query graph */
+ que_thr_t* thr;
+ /** InnoDB indexes being created */
+ dict_index_t** add;
+ /** MySQL key numbers for the InnoDB indexes that are being created */
+ const ulint* add_key_numbers;
+ /** number of InnoDB indexes being created */
+ const ulint num_to_add;
+ /** InnoDB indexes being dropped */
+ dict_index_t** drop;
+ /** number of InnoDB indexes being dropped */
+ const ulint num_to_drop;
+ /** InnoDB foreign key constraints being dropped */
+ dict_foreign_t** drop_fk;
+ /** number of InnoDB foreign key constraints being dropped */
+ const ulint num_to_drop_fk;
+ /** InnoDB foreign key constraints being added */
+ dict_foreign_t** add_fk;
+ /** number of InnoDB foreign key constraints being dropped */
+ const ulint num_to_add_fk;
+ /** whether to create the indexes online */
+ bool online;
+ /** memory heap */
+ mem_heap_t* heap;
+ /** dictionary transaction */
+ trx_t* trx;
+ /** table where the indexes are being created or dropped */
+ dict_table_t* indexed_table;
+ /** mapping of old column numbers to new ones, or NULL */
+ const ulint* col_map;
+ /** added AUTO_INCREMENT column position, or ULINT_UNDEFINED */
+ const ulint add_autoinc;
+ /** default values of ADD COLUMN, or NULL */
+ const dtuple_t* add_cols;
+ /** autoinc sequence to use */
+ ib_sequence_t sequence;
+
+ ha_innobase_inplace_ctx(trx_t* user_trx,
+ dict_index_t** add_arg,
+ const ulint* add_key_numbers_arg,
+ ulint num_to_add_arg,
+ dict_index_t** drop_arg,
+ ulint num_to_drop_arg,
+ dict_foreign_t** drop_fk_arg,
+ ulint num_to_drop_fk_arg,
+ dict_foreign_t** add_fk_arg,
+ ulint num_to_add_fk_arg,
+ bool online_arg,
+ mem_heap_t* heap_arg,
+ trx_t* trx_arg,
+ dict_table_t* indexed_table_arg,
+ const ulint* col_map_arg,
+ ulint add_autoinc_arg,
+ ulonglong autoinc_col_min_value_arg,
+ ulonglong autoinc_col_max_value_arg,
+ const dtuple_t* add_cols_arg) :
+ inplace_alter_handler_ctx(),
+ add (add_arg), add_key_numbers (add_key_numbers_arg),
+ num_to_add (num_to_add_arg),
+ drop (drop_arg), num_to_drop (num_to_drop_arg),
+ drop_fk (drop_fk_arg), num_to_drop_fk (num_to_drop_fk_arg),
+ add_fk (add_fk_arg), num_to_add_fk (num_to_add_fk_arg),
+ online (online_arg), heap (heap_arg), trx (trx_arg),
+ indexed_table (indexed_table_arg),
+ col_map (col_map_arg), add_autoinc (add_autoinc_arg),
+ add_cols (add_cols_arg),
+ sequence(user_trx ? user_trx->mysql_thd : 0,
+ autoinc_col_min_value_arg, autoinc_col_max_value_arg)
+ {
+#ifdef UNIV_DEBUG
+ for (ulint i = 0; i < num_to_add; i++) {
+ ut_ad(!add[i]->to_be_dropped);
+ }
+ for (ulint i = 0; i < num_to_drop; i++) {
+ ut_ad(drop[i]->to_be_dropped);
+ }
+#endif /* UNIV_DEBUG */
- len = strlen(table_name);
+ thr = pars_complete_graph_for_exec(NULL, user_trx, heap);
+ }
- name = (char*) mem_heap_alloc(heap, len + sizeof suffix);
- memcpy(name, table_name, len);
- memcpy(name + len, suffix, sizeof suffix);
- name[len + (sizeof suffix - 2)] = id;
+ ~ha_innobase_inplace_ctx()
+ {
+ mem_heap_free(heap);
+ }
- return(name);
-}
+private:
+ // Disable copying
+ ha_innobase_inplace_ctx(const ha_innobase_inplace_ctx&);
+ ha_innobase_inplace_ctx& operator=(const ha_innobase_inplace_ctx&);
+};
-class ha_innobase_add_index : public handler_add_index
+/********************************************************************//**
+Drop any indexes that we were not able to free previously due to
+open table handles. */
+static
+void
+online_retry_drop_indexes_low(
+/*==========================*/
+ dict_table_t* table, /*!< in/out: table */
+ trx_t* trx) /*!< in/out: transaction */
{
-public:
- /** table where the indexes are being created */
- dict_table_t* indexed_table;
- ha_innobase_add_index(TABLE* table, KEY* key_info, uint num_of_keys,
- dict_table_t* indexed_table_arg) :
- handler_add_index(table, key_info, num_of_keys),
- indexed_table (indexed_table_arg) {}
- ~ha_innobase_add_index() {}
-};
+ ut_ad(mutex_own(&dict_sys->mutex));
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
+
+ /* We can have table->n_ref_count > 1, because other threads
+ may have prebuilt->table pointing to the table. However, these
+ other threads should be between statements, waiting for the
+ next statement to execute, or for a meta-data lock. */
+ ut_ad(table->n_ref_count >= 1);
+
+ if (table->drop_aborted) {
+ row_merge_drop_indexes(trx, table, TRUE);
+ }
+}
-/*******************************************************************//**
-This is to create FTS_DOC_ID_INDEX definition on the newly added Doc ID for
-the FTS indexes table
-@return dict_index_t for the FTS_DOC_ID_INDEX */
-dict_index_t*
-innobase_create_fts_doc_id_idx(
-/*===========================*/
- dict_table_t* indexed_table, /*!< in: Table where indexes are
- created */
- trx_t* trx, /*!< in: Transaction */
- mem_heap_t* heap) /*!< Heap for index definitions */
+/********************************************************************//**
+Drop any indexes that we were not able to free previously due to
+open table handles. */
+static __attribute__((nonnull))
+void
+online_retry_drop_indexes(
+/*======================*/
+ dict_table_t* table, /*!< in/out: table */
+ THD* user_thd) /*!< in/out: MySQL connection */
{
- dict_index_t* index;
- merge_index_def_t fts_index_def;
- char* index_name;
-
- /* Create the temp index name for FTS_DOC_ID_INDEX */
- fts_index_def.name = index_name = (char*) mem_heap_alloc(
- heap, FTS_DOC_ID_INDEX_NAME_LEN + 2);
- *index_name++ = TEMP_INDEX_PREFIX;
- memcpy(index_name, FTS_DOC_ID_INDEX_NAME,
- FTS_DOC_ID_INDEX_NAME_LEN);
- index_name[FTS_DOC_ID_INDEX_NAME_LEN] = 0;
-
- /* Only the Doc ID will be indexed */
- fts_index_def.n_fields = 1;
- fts_index_def.ind_type = DICT_UNIQUE;
- fts_index_def.fields = (merge_index_field_t*) mem_heap_alloc(
- heap, sizeof *fts_index_def.fields);
- fts_index_def.fields[0].prefix_len = 0;
- fts_index_def.fields[0].field_name = mem_heap_strdup(
- heap, FTS_DOC_ID_COL_NAME);
-
- index = row_merge_create_index(trx, indexed_table, &fts_index_def);
- return(index);
+ if (table->drop_aborted) {
+ trx_t* trx = innobase_trx_allocate(user_thd);
+
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ row_mysql_lock_data_dictionary(trx);
+ online_retry_drop_indexes_low(table, trx);
+ trx_commit_for_mysql(trx);
+ row_mysql_unlock_data_dictionary(trx);
+ trx_free_for_mysql(trx);
+ }
+
+#ifdef UNIV_DEBUG
+ mutex_enter(&dict_sys->mutex);
+ dict_table_check_for_dup_indexes(table, CHECK_ALL_COMPLETE);
+ mutex_exit(&dict_sys->mutex);
+ ut_a(!table->drop_aborted);
+#endif /* UNIV_DEBUG */
}
-/*******************************************************************//**
-Clean up on ha_innobase::add_index error. */
-static
+/********************************************************************//**
+Commit a dictionary transaction and drop any indexes that we were not
+able to free previously due to open table handles. */
+static __attribute__((nonnull))
void
-innobase_add_index_cleanup(
-/*=======================*/
- row_prebuilt_t* prebuilt, /*!< in/out: prebuilt */
- trx_t* trx, /*!< in/out: transaction */
- dict_table_t* table) /*!< in/out: table on which
- the indexes were going to be
- created */
+online_retry_drop_indexes_with_trx(
+/*===============================*/
+ dict_table_t* table, /*!< in/out: table */
+ trx_t* trx) /*!< in/out: transaction */
{
- trx_rollback_to_savepoint(trx, NULL);
+ ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED));
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
- ut_a(trx != prebuilt->trx);
+ /* Now that the dictionary is being locked, check if we can
+ drop any incompletely created indexes that may have been left
+ behind in rollback_inplace_alter_table() earlier. */
+ if (table->drop_aborted) {
- trx_free_for_mysql(trx);
+ trx->table_id = 0;
- trx_commit_for_mysql(prebuilt->trx);
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
- if (table != NULL) {
+ online_retry_drop_indexes_low(table, trx);
+ trx_commit_for_mysql(trx);
+ }
+}
- rw_lock_x_lock(&dict_operation_lock);
+/** Determines if InnoDB is dropping a foreign key constraint.
+@param foreign the constraint
+@param drop_fk constraints being dropped
+@param n_drop_fk number of constraints that are being dropped
+@return whether the constraint is being dropped */
+inline __attribute__((pure, nonnull, warn_unused_result))
+bool
+innobase_dropping_foreign(
+/*======================*/
+ const dict_foreign_t* foreign,
+ dict_foreign_t** drop_fk,
+ ulint n_drop_fk)
+{
+ while (n_drop_fk--) {
+ if (*drop_fk++ == foreign) {
+ return(true);
+ }
+ }
- dict_mutex_enter_for_mysql();
+ return(false);
+}
- /* Note: This check excludes the system tables. However, we
- should be safe because users cannot add indexes to system
- tables. */
+/** Determines if an InnoDB FOREIGN KEY constraint depends on a
+column that is being dropped or modified to NOT NULL.
+@param user_table InnoDB table as it is before the ALTER operation
+@param col_name Name of the column being altered
+@param drop_fk constraints being dropped
+@param n_drop_fk number of constraints that are being dropped
+@param drop true=drop column, false=set NOT NULL
+@retval true Not allowed (will call my_error())
+@retval false Allowed
+*/
+static __attribute__((pure, nonnull, warn_unused_result))
+bool
+innobase_check_foreigns_low(
+/*========================*/
+ const dict_table_t* user_table,
+ dict_foreign_t** drop_fk,
+ ulint n_drop_fk,
+ const char* col_name,
+ bool drop)
+{
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ /* Check if any FOREIGN KEY constraints are defined on this
+ column. */
+ for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST(
+ user_table->foreign_list);
+ foreign;
+ foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ if (!drop && !(foreign->type
+ & (DICT_FOREIGN_ON_DELETE_SET_NULL
+ | DICT_FOREIGN_ON_UPDATE_SET_NULL))) {
+ continue;
+ }
- if (UT_LIST_GET_LEN(table->foreign_list) == 0
- && UT_LIST_GET_LEN(table->referenced_list) == 0
- && !table->can_be_evicted) {
+ if (innobase_dropping_foreign(foreign, drop_fk, n_drop_fk)) {
+ continue;
+ }
- dict_table_move_from_non_lru_to_lru(table);
+ for (unsigned f = 0; f < foreign->n_fields; f++) {
+ if (!strcmp(foreign->foreign_col_names[f],
+ col_name)) {
+ my_error(drop
+ ? ER_FK_COLUMN_CANNOT_DROP
+ : ER_FK_COLUMN_NOT_NULL, MYF(0),
+ col_name, foreign->id);
+ return(true);
+ }
}
+ }
- dict_table_close(table, TRUE);
+ if (!drop) {
+ /* SET NULL clauses on foreign key constraints of
+ child tables affect the child tables, not the parent table.
+ The column can be NOT NULL in the parent table. */
+ return(false);
+ }
- dict_mutex_exit_for_mysql();
+ /* Check if any FOREIGN KEY constraints in other tables are
+ referring to the column that is being dropped. */
+ for (const dict_foreign_t* foreign = UT_LIST_GET_FIRST(
+ user_table->referenced_list);
+ foreign;
+ foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ if (innobase_dropping_foreign(foreign, drop_fk, n_drop_fk)) {
+ continue;
+ }
- rw_lock_x_unlock(&dict_operation_lock);
+ for (unsigned f = 0; f < foreign->n_fields; f++) {
+ char display_name[FN_REFLEN];
+
+ if (strcmp(foreign->referenced_col_names[f],
+ col_name)) {
+ continue;
+ }
+
+ char* buf_end = innobase_convert_name(
+ display_name, (sizeof display_name) - 1,
+ foreign->foreign_table_name,
+ strlen(foreign->foreign_table_name),
+ NULL, TRUE);
+ *buf_end = '\0';
+ my_error(ER_FK_COLUMN_CANNOT_DROP_CHILD,
+ MYF(0), col_name, foreign->id,
+ display_name);
+
+ return(true);
+ }
}
+
+ return(false);
}
-/*******************************************************************//**
-Create indexes.
-@return 0 or error number */
-UNIV_INTERN
-int
-ha_innobase::add_index(
-/*===================*/
- TABLE* in_table, /*!< in: Table where indexes
- are created */
- KEY* key_info, /*!< in: Indexes
- to be created */
- uint num_of_keys, /*!< in: Number of indexes
- to be created */
- handler_add_index** add) /*!< out: context */
+/** Determines if an InnoDB FOREIGN KEY constraint depends on a
+column that is being dropped or modified to NOT NULL.
+@param ha_alter_info Data used during in-place alter
+@param altered_table MySQL table that is being altered
+@param old_table MySQL table as it is before the ALTER operation
+@param user_table InnoDB table as it is before the ALTER operation
+@param drop_fk constraints being dropped
+@param n_drop_fk number of constraints that are being dropped
+@retval true Not allowed (will call my_error())
+@retval false Allowed
+*/
+static __attribute__((pure, nonnull, warn_unused_result))
+bool
+innobase_check_foreigns(
+/*====================*/
+ Alter_inplace_info* ha_alter_info,
+ const TABLE* altered_table,
+ const TABLE* old_table,
+ const dict_table_t* user_table,
+ dict_foreign_t** drop_fk,
+ ulint n_drop_fk)
{
- dict_index_t** index = NULL; /*!< Index to be created */
- dict_index_t* fts_index = NULL;/*!< FTS Index to be created */
- dict_table_t* indexed_table; /*!< Table where indexes are created */
- merge_index_def_t* index_defs; /*!< Index definitions */
- mem_heap_t* heap = NULL; /*!< Heap for index definitions */
- trx_t* trx; /*!< Transaction */
- ulint num_of_idx;
- ulint num_created = 0;
- ibool dict_locked = FALSE;
- ulint new_primary = 0;
- int error;
- ulint num_fts_index = 0;
- ulint num_idx_create = 0;
- ibool fts_add_doc_id = FALSE;
- ibool fts_add_doc_idx = FALSE;
+ List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list);
+
+ for (Field** fp = old_table->field; *fp; fp++) {
+ cf_it.rewind();
+ const Create_field* new_field;
- DBUG_ENTER("ha_innobase::add_index");
- ut_a(table);
- ut_a(key_info);
- ut_a(num_of_keys);
+ ut_ad(!(*fp)->real_maybe_null()
+ == !!((*fp)->flags & NOT_NULL_FLAG));
- *add = NULL;
+ while ((new_field = cf_it++)) {
+ if (new_field->field == *fp) {
+ break;
+ }
+ }
- if (srv_created_new_raw || srv_force_recovery) {
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ if (!new_field || (new_field->flags & NOT_NULL_FLAG)) {
+ if (innobase_check_foreigns_low(
+ user_table, drop_fk, n_drop_fk,
+ (*fp)->field_name, !new_field)) {
+ return(true);
+ }
+ }
}
- update_thd();
+ return(false);
+}
- /* In case MySQL calls this in the middle of a SELECT query, release
- possible adaptive hash latch to avoid deadlocks of threads. */
- trx_search_latch_release_if_reserved(prebuilt->trx);
+/** Convert a default value for ADD COLUMN.
- /* Check if the index name is reserved. */
- if (innobase_index_name_is_reserved(user_thd, key_info, num_of_keys)) {
- DBUG_RETURN(-1);
+@param heap Memory heap where allocated
+@param dfield InnoDB data field to copy to
+@param field MySQL value for the column
+@param comp nonzero if in compact format */
+static __attribute__((nonnull))
+void
+innobase_build_col_map_add(
+/*=======================*/
+ mem_heap_t* heap,
+ dfield_t* dfield,
+ const Field* field,
+ ulint comp)
+{
+ if (field->is_real_null()) {
+ dfield_set_null(dfield);
+ return;
}
- indexed_table = dict_table_open_on_name(prebuilt->table->name, FALSE);
+ ulint size = field->pack_length();
- if (UNIV_UNLIKELY(!indexed_table)) {
- DBUG_RETURN(HA_ERR_NO_SUCH_TABLE);
- }
+ byte* buf = static_cast<byte*>(mem_heap_alloc(heap, size));
- ut_a(indexed_table == prebuilt->table);
+ row_mysql_store_col_in_innobase_format(
+ dfield, buf, TRUE, field->ptr, size, comp);
+}
- if (indexed_table->tablespace_discarded) {
- DBUG_RETURN(-1);
+/** Construct the translation table for reordering, dropping or
+adding columns.
+
+@param ha_alter_info Data used during in-place alter
+@param altered_table MySQL table that is being altered
+@param table MySQL table as it is before the ALTER operation
+@param new_table InnoDB table corresponding to MySQL altered_table
+@param old_table InnoDB table corresponding to MYSQL table
+@param add_cols Default values for ADD COLUMN, or NULL if no ADD COLUMN
+@param heap Memory heap where allocated
+@return array of integers, mapping column numbers in the table
+to column numbers in altered_table */
+static __attribute__((nonnull(1,2,3,4,5,7), warn_unused_result))
+const ulint*
+innobase_build_col_map(
+/*===================*/
+ Alter_inplace_info* ha_alter_info,
+ const TABLE* altered_table,
+ const TABLE* table,
+ const dict_table_t* new_table,
+ const dict_table_t* old_table,
+ dtuple_t* add_cols,
+ mem_heap_t* heap)
+{
+ DBUG_ENTER("innobase_build_col_map");
+ DBUG_ASSERT(altered_table != table);
+ DBUG_ASSERT(new_table != old_table);
+ DBUG_ASSERT(dict_table_get_n_cols(new_table)
+ >= altered_table->s->fields + DATA_N_SYS_COLS);
+ DBUG_ASSERT(dict_table_get_n_cols(old_table)
+ >= table->s->fields + DATA_N_SYS_COLS);
+ DBUG_ASSERT(!!add_cols == !!(ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_COLUMN));
+ DBUG_ASSERT(!add_cols || dtuple_get_n_fields(add_cols)
+ == dict_table_get_n_cols(new_table));
+
+ ulint* col_map = static_cast<ulint*>(
+ mem_heap_alloc(heap, old_table->n_cols * sizeof *col_map));
+
+ List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list);
+ uint i = 0;
+
+ /* Any dropped columns will map to ULINT_UNDEFINED. */
+ for (uint old_i = 0; old_i + DATA_N_SYS_COLS < old_table->n_cols;
+ old_i++) {
+ col_map[old_i] = ULINT_UNDEFINED;
}
- /* Check that index keys are sensible */
- error = innobase_check_index_keys(key_info, num_of_keys, prebuilt->table);
+ while (const Create_field* new_field = cf_it++) {
+ for (uint old_i = 0; table->field[old_i]; old_i++) {
+ const Field* field = table->field[old_i];
+ if (new_field->field == field) {
+ col_map[old_i] = i;
+ goto found_col;
+ }
+ }
- if (UNIV_UNLIKELY(error)) {
- dict_table_close(prebuilt->table, FALSE);
- DBUG_RETURN(error);
+ innobase_build_col_map_add(
+ heap, dtuple_get_nth_field(add_cols, i),
+ altered_table->s->field[i],
+ dict_table_is_comp(new_table));
+found_col:
+ i++;
}
- /* Check each index's column length to make sure they do not
- exceed limit */
- for (ulint i = 0; i < num_of_keys; i++) {
- if (key_info[i].flags & HA_FULLTEXT) {
- continue;
+ DBUG_ASSERT(i == altered_table->s->fields);
+
+ i = table->s->fields;
+
+ /* Add the InnoDB hidden FTS_DOC_ID column, if any. */
+ if (i + DATA_N_SYS_COLS < old_table->n_cols) {
+ /* There should be exactly one extra field,
+ the FTS_DOC_ID. */
+ DBUG_ASSERT(DICT_TF2_FLAG_IS_SET(old_table,
+ DICT_TF2_FTS_HAS_DOC_ID));
+ DBUG_ASSERT(i + DATA_N_SYS_COLS + 1 == old_table->n_cols);
+ DBUG_ASSERT(!strcmp(dict_table_get_col_name(
+ old_table, table->s->fields),
+ FTS_DOC_ID_COL_NAME));
+ if (altered_table->s->fields + DATA_N_SYS_COLS
+ < new_table->n_cols) {
+ DBUG_ASSERT(DICT_TF2_FLAG_IS_SET(
+ new_table,
+ DICT_TF2_FTS_HAS_DOC_ID));
+ DBUG_ASSERT(altered_table->s->fields
+ + DATA_N_SYS_COLS + 1
+ == new_table->n_cols);
+ col_map[i] = altered_table->s->fields;
+ } else {
+ DBUG_ASSERT(!DICT_TF2_FLAG_IS_SET(
+ new_table,
+ DICT_TF2_FTS_HAS_DOC_ID));
+ col_map[i] = ULINT_UNDEFINED;
}
- error = innobase_check_column_length(prebuilt->table,
- &key_info[i]);
+ i++;
+ } else {
+ DBUG_ASSERT(!DICT_TF2_FLAG_IS_SET(
+ old_table,
+ DICT_TF2_FTS_HAS_DOC_ID));
+ }
+
+ for (; i < old_table->n_cols; i++) {
+ col_map[i] = i + new_table->n_cols - old_table->n_cols;
+ }
+
+ DBUG_RETURN(col_map);
+}
+
+/** Drop newly create FTS index related auxiliary table during
+FIC create index process, before fts_add_index is called
+@param table table that was being rebuilt online
+@param trx transaction
+@return DB_SUCCESS if successful, otherwise last error code
+*/
+static
+dberr_t
+innobase_drop_fts_index_table(
+/*==========================*/
+ dict_table_t* table,
+ trx_t* trx)
+{
+ dberr_t ret_err = DB_SUCCESS;
- if (error) {
- dict_table_close(prebuilt->table, FALSE);
- DBUG_RETURN(error);
+ for (dict_index_t* index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+ if (index->type & DICT_FTS) {
+ dberr_t err;
+
+ err = fts_drop_index_tables(trx, index);
+
+ if (err != DB_SUCCESS) {
+ ret_err = err;
+ }
}
}
- heap = mem_heap_create(1024);
- trx_start_if_not_started(prebuilt->trx);
+ return(ret_err);
+}
+
+/** Update internal structures with concurrent writes blocked,
+while preparing ALTER TABLE.
+
+@param ha_alter_info Data used during in-place alter
+@param altered_table MySQL table that is being altered
+@param old_table MySQL table as it is before the ALTER operation
+@param user_table InnoDB table that is being altered
+@param user_trx User transaction, for locking the table
+@param table_name Table name in MySQL
+@param flags Table and tablespace flags
+@param flags2 Additional table flags
+@param heap Memory heap, or NULL
+@param drop_index Indexes to be dropped, or NULL
+@param n_drop_index Number of indexes to drop
+@param drop_foreign Foreign key constraints to be dropped, or NULL
+@param n_drop_foreign Number of foreign key constraints to drop
+@param fts_doc_id_col The column number of FTS_DOC_ID
+@param add_autoinc_col The number of an added AUTO_INCREMENT column,
+ or ULINT_UNDEFINED if none was added
+@param add_fts_doc_id Flag: add column FTS_DOC_ID?
+@param add_fts_doc_id_idx Flag: add index (FTS_DOC_ID)?
+
+@retval true Failure
+@retval false Success
+*/
+static __attribute__((warn_unused_result, nonnull(1,2,3,4)))
+bool
+prepare_inplace_alter_table_dict(
+/*=============================*/
+ Alter_inplace_info* ha_alter_info,
+ const TABLE* altered_table,
+ const TABLE* old_table,
+ dict_table_t* user_table,
+ trx_t* user_trx,
+ const char* table_name,
+ ulint flags,
+ ulint flags2,
+ mem_heap_t* heap,
+ dict_index_t** drop_index,
+ ulint n_drop_index,
+ dict_foreign_t** drop_foreign,
+ ulint n_drop_foreign,
+ dict_foreign_t** add_foreign,
+ ulint n_add_foreign,
+ ulint fts_doc_id_col,
+ ulint add_autoinc_col,
+ ulonglong autoinc_col_max_value,
+ bool add_fts_doc_id,
+ bool add_fts_doc_id_idx)
+{
+ trx_t* trx;
+ bool dict_locked = false;
+ dict_index_t** add_index; /* indexes to be created */
+ ulint* add_key_nums; /* MySQL key numbers */
+ ulint n_add_index;
+ index_def_t* index_defs; /* index definitions */
+ dict_index_t* fts_index = NULL;
+ dict_table_t* indexed_table = user_table;
+ ulint new_clustered = 0;
+ dberr_t error;
+ THD* user_thd = user_trx->mysql_thd;
+ const ulint* col_map = NULL;
+ dtuple_t* add_cols = NULL;
+ ulint num_fts_index;
+
+ DBUG_ENTER("prepare_inplace_alter_table_dict");
+ DBUG_ASSERT((add_autoinc_col != ULINT_UNDEFINED)
+ == (autoinc_col_max_value > 0));
+ DBUG_ASSERT(!n_drop_index == !drop_index);
+ DBUG_ASSERT(!n_drop_foreign == !drop_foreign);
+ DBUG_ASSERT(!add_fts_doc_id || add_fts_doc_id_idx);
+ DBUG_ASSERT(!add_fts_doc_id_idx
+ || innobase_fulltext_exist(altered_table->s));
+
+ trx_start_if_not_started_xa(user_trx);
/* Create a background transaction for the operations on
the data dictionary tables. */
trx = innobase_trx_allocate(user_thd);
- trx_start_if_not_started(trx);
-
- /* We don't want this table to be evicted from the cache while we
- are building an index on it. Another issue is that while we are
- building the index this table could be referred to in a foreign
- key relationship. In innobase_add_index_cleanup() we check for
- that condition before moving it back to the LRU list. */
- row_mysql_lock_data_dictionary(trx);
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
- if (prebuilt->table->can_be_evicted) {
- dict_table_move_from_lru_to_non_lru(prebuilt->table);
+ if (!heap) {
+ heap = mem_heap_create(1024);
}
- row_mysql_unlock_data_dictionary(trx);
-
/* Create table containing all indexes to be built in this
- alter table add index so that they are in the correct order
+ ALTER TABLE ADD INDEX so that they are in the correct order
in the table. */
- num_of_idx = num_of_keys;
+ n_add_index = ha_alter_info->index_add_count;
- index_defs = innobase_create_key_def(
- trx, prebuilt->table, heap, key_info, num_of_idx,
- &num_fts_index, &fts_add_doc_id, &fts_add_doc_idx);
+ index_defs = innobase_create_key_defs(
+ heap, ha_alter_info, altered_table, n_add_index,
+ num_fts_index, row_table_got_default_clust_index(indexed_table),
+ fts_doc_id_col, add_fts_doc_id, add_fts_doc_id_idx);
- if (!index_defs) {
- error = DB_UNSUPPORTED;
- goto error_handling;
- }
+ new_clustered = DICT_CLUSTERED & index_defs[0].ind_type;
+
+ const bool locked =
+ !ha_alter_info->online
+ || add_autoinc_col != ULINT_UNDEFINED
+ || num_fts_index > 0
+ || (innobase_need_rebuild(ha_alter_info)
+ && innobase_fulltext_exist(altered_table->s));
- /* Currently, support create one single FULLTEXT index in parallel at
- a time */
if (num_fts_index > 1) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Only support create ONE Fulltext index"
- " at a time\n");
- error = DB_UNSUPPORTED;
- goto error_handling;
+ my_error(ER_INNODB_FT_LIMIT, MYF(0));
+ goto error_handled;
}
- new_primary = DICT_CLUSTERED & index_defs[0].ind_type;
+ if (locked && ha_alter_info->online) {
+ /* This should have been blocked in
+ check_if_supported_inplace_alter(). */
+ ut_ad(0);
+ my_error(ER_NOT_SUPPORTED_YET, MYF(0),
+ thd_query_string(user_thd)->str);
+ goto error_handled;
+ }
- /* If a new FTS Doc ID column is to be added, there will be
- one additional index to be built on the Doc ID column itself. */
- num_idx_create = (fts_add_doc_idx) ? num_of_idx + 1 : num_of_idx;
+ /* The primary index would be rebuilt if a FTS Doc ID
+ column is to be added, and the primary index definition
+ is just copied from old table and stored in indexdefs[0] */
+ DBUG_ASSERT(!add_fts_doc_id || new_clustered);
+ DBUG_ASSERT(!!new_clustered ==
+ (innobase_need_rebuild(ha_alter_info)
+ || add_fts_doc_id));
/* Allocate memory for dictionary index definitions */
- index = (dict_index_t**) mem_heap_alloc(
- heap, num_idx_create * sizeof *index);
- /* Flag this transaction as a dictionary operation, so that
- the data dictionary will be locked in crash recovery. */
- trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+ add_index = (dict_index_t**) mem_heap_alloc(
+ heap, n_add_index * sizeof *add_index);
+ add_key_nums = (ulint*) mem_heap_alloc(
+ heap, n_add_index * sizeof *add_key_nums);
+
+ /* This transaction should be dictionary operation, so that
+ the data dictionary will be locked during crash recovery. */
+
+ ut_ad(trx->dict_operation == TRX_DICT_OP_INDEX);
/* Acquire a lock on the table before creating any indexes. */
- error = row_merge_lock_table(prebuilt->trx, prebuilt->table,
- new_primary ? LOCK_X : LOCK_S);
- if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
+ if (locked) {
+ error = row_merge_lock_table(
+ user_trx, indexed_table, LOCK_S);
+
+ if (error != DB_SUCCESS) {
- goto error_handling;
+ goto error_handling;
+ }
+ } else {
+ error = DB_SUCCESS;
}
/* Latch the InnoDB data dictionary exclusively so that no deadlocks
or lock waits can happen in it during an index create operation. */
row_mysql_lock_data_dictionary(trx);
- dict_locked = TRUE;
+ dict_locked = true;
+
+ /* Wait for background stats processing to stop using the table that
+ we are going to alter. We know bg stats will not start using it again
+ until we are holding the data dict locked and we are holding it here
+ at least until checking ut_ad(user_table->n_ref_count == 1) below.
+ XXX what may happen if bg stats opens the table after we
+ have unlocked data dictionary below? */
+ dict_stats_wait_bg_to_stop_using_tables(user_table, NULL, trx);
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
+ online_retry_drop_indexes_low(indexed_table, trx);
- /* If a new primary key is defined for the table we need
+ ut_d(dict_table_check_for_dup_indexes(
+ indexed_table, CHECK_ABORTED_OK));
+
+ /* If a new clustered index is defined for the table we need
to drop the original table and rebuild all indexes. */
- if (UNIV_UNLIKELY(new_primary)) {
- /* This transaction should be the only one
- operating on the table. The table get above
- would have incremented the ref count to 2. */
- ut_a(prebuilt->table->n_ref_count == 2);
+ if (new_clustered) {
+ char* new_table_name = dict_mem_create_temporary_tablename(
+ heap, indexed_table->name, indexed_table->id);
+ ulint n_cols;
+
+ if (innobase_check_foreigns(
+ ha_alter_info, altered_table, old_table,
+ user_table, drop_foreign, n_drop_foreign)) {
+ goto new_clustered_failed;
+ }
- char* new_table_name = innobase_create_temporary_tablename(
- heap, '1', prebuilt->table->name);
+ n_cols = altered_table->s->fields;
+
+ if (add_fts_doc_id) {
+ n_cols++;
+ DBUG_ASSERT(flags2 & DICT_TF2_FTS);
+ DBUG_ASSERT(add_fts_doc_id_idx);
+ flags2 |= DICT_TF2_FTS_ADD_DOC_ID
+ | DICT_TF2_FTS_HAS_DOC_ID
+ | DICT_TF2_FTS;
+ }
- /* Clone the table. */
+ DBUG_ASSERT(!add_fts_doc_id_idx || (flags2 & DICT_TF2_FTS));
+
+ /* Create the table. */
trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
- indexed_table = row_merge_create_temporary_table(
- new_table_name, index_defs, prebuilt->table, trx);
- if (!indexed_table) {
+ if (dict_table_get_low(new_table_name)) {
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0),
+ new_table_name);
+ goto new_clustered_failed;
+ }
- switch (trx->error_state) {
- case DB_TABLESPACE_ALREADY_EXISTS:
- case DB_DUPLICATE_KEY:
- innobase_convert_tablename(new_table_name);
- my_error(HA_ERR_TABLE_EXIST, MYF(0),
- new_table_name);
- error = HA_ERR_TABLE_EXIST;
- break;
- default:
- error = convert_error_code_to_mysql(
- trx->error_state,
- prebuilt->table->flags,
- user_thd);
+ /* The initial space id 0 may be overridden later. */
+ indexed_table = dict_mem_table_create(
+ new_table_name, 0, n_cols, flags, flags2);
+
+ if (DICT_TF_HAS_DATA_DIR(flags)) {
+ indexed_table->data_dir_path =
+ mem_heap_strdup(indexed_table->heap,
+ user_table->data_dir_path);
+ }
+
+ for (uint i = 0; i < altered_table->s->fields; i++) {
+ const Field* field = altered_table->field[i];
+ ulint is_unsigned;
+ ulint field_type
+ = (ulint) field->type();
+ ulint col_type
+ = get_innobase_type_from_mysql_type(
+ &is_unsigned, field);
+ ulint charset_no;
+ ulint col_len;
+
+ /* we assume in dtype_form_prtype() that this
+ fits in two bytes */
+ ut_a(field_type <= MAX_CHAR_COLL_NUM);
+
+ if (!field->real_maybe_null()) {
+ field_type |= DATA_NOT_NULL;
+ }
+
+ if (field->binary()) {
+ field_type |= DATA_BINARY_TYPE;
+ }
+
+ if (is_unsigned) {
+ field_type |= DATA_UNSIGNED;
+ }
+
+ if (dtype_is_string_type(col_type)) {
+ charset_no = (ulint) field->charset()->number;
+
+ if (charset_no > MAX_CHAR_COLL_NUM) {
+ dict_mem_table_free(indexed_table);
+ my_error(ER_WRONG_KEY_COLUMN, MYF(0),
+ field->field_name);
+ goto new_clustered_failed;
+ }
+ } else {
+ charset_no = 0;
}
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table,
- TRUE));
- row_mysql_unlock_data_dictionary(trx);
- mem_heap_free(heap);
+ col_len = field->pack_length();
+
+ /* The MySQL pack length contains 1 or 2 bytes
+ length field for a true VARCHAR. Let us
+ subtract that, so that the InnoDB column
+ length in the InnoDB data dictionary is the
+ real maximum byte length of the actual data. */
- innobase_add_index_cleanup(
- prebuilt, trx, prebuilt->table);
+ if (field->type() == MYSQL_TYPE_VARCHAR) {
+ uint32 length_bytes
+ = static_cast<const Field_varstring*>(
+ field)->length_bytes;
- DBUG_RETURN(error);
+ col_len -= length_bytes;
+
+ if (length_bytes == 2) {
+ field_type |= DATA_LONG_TRUE_VARCHAR;
+ }
+ }
+
+ if (dict_col_name_is_reserved(field->field_name)) {
+ dict_mem_table_free(indexed_table);
+ my_error(ER_WRONG_COLUMN_NAME, MYF(0),
+ field->field_name);
+ goto new_clustered_failed;
+ }
+
+ dict_mem_table_add_col(
+ indexed_table, heap,
+ field->field_name,
+ col_type,
+ dtype_form_prtype(field_type, charset_no),
+ col_len);
}
- trx->table_id = indexed_table->id;
+ if (add_fts_doc_id) {
+ fts_add_doc_id_column(indexed_table, heap);
+ indexed_table->fts->doc_col = fts_doc_id_col;
+ ut_ad(fts_doc_id_col == altered_table->s->fields);
+ } else if (indexed_table->fts) {
+ indexed_table->fts->doc_col = fts_doc_id_col;
+ }
+
+ error = row_create_table_for_mysql(indexed_table, trx, false);
+
+ switch (error) {
+ dict_table_t* temp_table;
+ case DB_SUCCESS:
+ /* We need to bump up the table ref count and
+ before we can use it we need to open the
+ table. The new_table must be in the data
+ dictionary cache, because we are still holding
+ the dict_sys->mutex. */
+ ut_ad(mutex_own(&dict_sys->mutex));
+ temp_table = dict_table_open_on_name(
+ indexed_table->name, TRUE, FALSE,
+ DICT_ERR_IGNORE_NONE);
+ ut_a(indexed_table == temp_table);
+ /* n_ref_count must be 1, because purge cannot
+ be executing on this very table as we are
+ holding dict_operation_lock X-latch. */
+ DBUG_ASSERT(indexed_table->n_ref_count == 1);
+ break;
+ case DB_TABLESPACE_EXISTS:
+ my_error(ER_TABLESPACE_EXISTS, MYF(0),
+ new_table_name);
+ goto new_clustered_failed;
+ case DB_DUPLICATE_KEY:
+ my_error(HA_ERR_TABLE_EXIST, MYF(0),
+ altered_table->s->table_name.str);
+ goto new_clustered_failed;
+ default:
+ my_error_innodb(error, table_name, flags);
+ new_clustered_failed:
+ DBUG_ASSERT(trx != user_trx);
+ trx_rollback_to_savepoint(trx, NULL);
+
+ ut_ad(user_table->n_ref_count == 1);
+
+ online_retry_drop_indexes_with_trx(user_table, trx);
+
+ goto err_exit;
+ }
+
+ if (ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_COLUMN) {
+
+ add_cols = dtuple_create(
+ heap, dict_table_get_n_cols(indexed_table));
+
+ dict_table_copy_types(add_cols, indexed_table);
+ }
+
+ col_map = innobase_build_col_map(
+ ha_alter_info, altered_table, old_table,
+ indexed_table, user_table,
+ add_cols, heap);
+ } else {
+ DBUG_ASSERT(!innobase_need_rebuild(ha_alter_info));
+
+ if (!indexed_table->fts
+ && innobase_fulltext_exist(altered_table->s)) {
+ indexed_table->fts = fts_create(indexed_table);
+ indexed_table->fts->doc_col = fts_doc_id_col;
+ }
}
+ /* Assign table_id, so that no table id of
+ fts_create_index_tables() will be written to the undo logs. */
+ DBUG_ASSERT(indexed_table->id != 0);
+ trx->table_id = indexed_table->id;
+
/* Create the indexes in SYS_INDEXES and load into dictionary. */
- for (num_created = 0; num_created < num_of_idx; num_created++) {
+ for (ulint num_created = 0; num_created < n_add_index; num_created++) {
- index[num_created] = row_merge_create_index(
+ add_index[num_created] = row_merge_create_index(
trx, indexed_table, &index_defs[num_created]);
- if (!index[num_created]) {
+ add_key_nums[num_created] = index_defs[num_created].key_number;
+
+ if (!add_index[num_created]) {
error = trx->error_state;
+ DBUG_ASSERT(error != DB_SUCCESS);
goto error_handling;
}
- if (index[num_created]->type & DICT_FTS) {
- fts_index = index[num_created];
- fts_create_index_tables(trx, fts_index);
+ if (add_index[num_created]->type & DICT_FTS) {
+ DBUG_ASSERT(num_fts_index);
+ DBUG_ASSERT(!fts_index);
+ DBUG_ASSERT(add_index[num_created]->type == DICT_FTS);
+ fts_index = add_index[num_created];
+ }
+ /* If only online ALTER TABLE operations have been
+ requested, allocate a modification log. If the table
+ will be locked anyway, the modification
+ log is unnecessary. When rebuilding the table
+ (new_clustered), we will allocate the log for the
+ clustered index of the old table, later. */
+ if (new_clustered
+ || locked
+ || user_table->ibd_file_missing
+ || dict_table_is_discarded(user_table)) {
+ /* No need to allocate a modification log. */
+ ut_ad(!add_index[num_created]->online_log);
+ } else if (add_index[num_created]->type & DICT_FTS) {
+ /* Fulltext indexes are not covered
+ by a modification log. */
+ } else {
+ DBUG_EXECUTE_IF("innodb_OOM_prepare_inplace_alter",
+ error = DB_OUT_OF_MEMORY;
+ goto error_handling;);
+ rw_lock_x_lock(&add_index[num_created]->lock);
+ bool ok = row_log_allocate(add_index[num_created],
+ NULL, true, NULL, NULL);
+ rw_lock_x_unlock(&add_index[num_created]->lock);
+
+ if (!ok) {
+ error = DB_OUT_OF_MEMORY;
+ goto error_handling;
+ }
}
}
- /* create FTS_DOC_ID_INDEX on the Doc ID column on the table */
- if (fts_add_doc_idx) {
- index[num_of_idx] = innobase_create_fts_doc_id_idx(
- indexed_table, trx, heap);
- /* FTS_DOC_ID_INDEX is internal defined new index */
- num_of_idx++;
- num_created++;
+ ut_ad(new_clustered == (indexed_table != user_table));
+
+ DBUG_EXECUTE_IF("innodb_OOM_prepare_inplace_alter",
+ error = DB_OUT_OF_MEMORY;
+ goto error_handling;);
+
+ if (new_clustered && !locked) {
+ /* Allocate a log for online table rebuild. */
+ dict_index_t* clust_index = dict_table_get_first_index(
+ user_table);
+
+ rw_lock_x_lock(&clust_index->lock);
+ bool ok = row_log_allocate(
+ clust_index, indexed_table,
+ !(ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_PK_INDEX),
+ add_cols, col_map);
+ rw_lock_x_unlock(&clust_index->lock);
+
+ if (!ok) {
+ error = DB_OUT_OF_MEMORY;
+ goto error_handling;
+ }
+
+ /* Assign a consistent read view for
+ row_merge_read_clustered_index(). */
+ trx_assign_read_view(user_trx);
}
- if (num_fts_index) {
+ if (fts_index) {
+ /* Ensure that the dictionary operation mode will
+ not change while creating the auxiliary tables. */
+ trx_dict_op_t op = trx_get_dict_operation(trx);
+
+#ifdef UNIV_DEBUG
+ switch (op) {
+ case TRX_DICT_OP_NONE:
+ break;
+ case TRX_DICT_OP_TABLE:
+ case TRX_DICT_OP_INDEX:
+ goto op_ok;
+ }
+ ut_error;
+op_ok:
+#endif /* UNIV_DEBUG */
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(mutex_own(&dict_sys->mutex));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
DICT_TF2_FLAG_SET(indexed_table, DICT_TF2_FTS);
+ /* This function will commit the transaction and reset
+ the trx_t::dict_operation flag on success. */
+
+ error = fts_create_index_tables(trx, fts_index);
+
+ DBUG_EXECUTE_IF("innodb_test_fail_after_fts_index_table",
+ error = DB_LOCK_WAIT_TIMEOUT;
+ goto error_handling;);
+
+ if (error != DB_SUCCESS) {
+ goto error_handling;
+ }
+
+ trx_start_for_ddl(trx, op);
+
if (!indexed_table->fts
|| ib_vector_size(indexed_table->fts->indexes) == 0) {
- fts_create_common_tables(trx, indexed_table,
- prebuilt->table->name, TRUE);
+ error = fts_create_common_tables(
+ trx, indexed_table, user_table->name, TRUE);
+
+ DBUG_EXECUTE_IF("innodb_test_fail_after_fts_common_table",
+ error = DB_LOCK_WAIT_TIMEOUT;
+ goto error_handling;);
+
+ if (error != DB_SUCCESS) {
+ goto error_handling;
+ }
indexed_table->fts->fts_status |= TABLE_DICT_LOCKED;
- innobase_fts_load_stopword(
- indexed_table, trx, ha_thd());
+
+ error = innobase_fts_load_stopword(
+ indexed_table, trx, user_thd)
+ ? DB_SUCCESS : DB_ERROR;
indexed_table->fts->fts_status &= ~TABLE_DICT_LOCKED;
- }
- if (new_primary && prebuilt->table->fts) {
- indexed_table->fts->doc_col = prebuilt->table->fts->doc_col;
+ if (error != DB_SUCCESS) {
+ goto error_handling;
+ }
}
+
+ ut_ad(trx_get_dict_operation(trx) == op);
}
- ut_ad(error == DB_SUCCESS);
+ DBUG_ASSERT(error == DB_SUCCESS);
/* Commit the data dictionary transaction in order to release
the table locks on the system tables. This means that if
@@ -1276,633 +3000,2212 @@ ha_innobase::add_index(
trx_commit_for_mysql(trx);
row_mysql_unlock_data_dictionary(trx);
- dict_locked = FALSE;
+ dict_locked = false;
ut_a(trx->lock.n_active_thrs == 0);
- if (UNIV_UNLIKELY(new_primary)) {
- /* A primary key is to be built. Acquire an exclusive
- table lock also on the table that is being created. */
- ut_ad(indexed_table != prebuilt->table);
-
- error = row_merge_lock_table(prebuilt->trx, indexed_table,
- LOCK_X);
-
- if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
+error_handling:
+ /* After an error, remove all those index definitions from the
+ dictionary which were defined. */
- goto error_handling;
- }
+ switch (error) {
+ case DB_SUCCESS:
+ ut_a(!dict_locked);
+
+ ut_d(mutex_enter(&dict_sys->mutex));
+ ut_d(dict_table_check_for_dup_indexes(
+ user_table, CHECK_PARTIAL_OK));
+ ut_d(mutex_exit(&dict_sys->mutex));
+ ha_alter_info->handler_ctx = new ha_innobase_inplace_ctx(
+ user_trx, add_index, add_key_nums, n_add_index,
+ drop_index, n_drop_index,
+ drop_foreign, n_drop_foreign,
+ add_foreign, n_add_foreign,
+ !locked, heap, trx, indexed_table, col_map,
+ add_autoinc_col,
+ ha_alter_info->create_info->auto_increment_value,
+ autoinc_col_max_value,
+ add_cols);
+ DBUG_RETURN(false);
+ case DB_TABLESPACE_EXISTS:
+ my_error(ER_TABLESPACE_EXISTS, MYF(0), "(unknown)");
+ break;
+ case DB_DUPLICATE_KEY:
+ my_error(ER_DUP_KEY, MYF(0), "SYS_INDEXES");
+ break;
+ default:
+ my_error_innodb(error, table_name, user_table->flags);
}
- /* Read the clustered index of the table and build indexes
- based on this information using temporary files and merge sort. */
- error = row_merge_build_indexes(prebuilt->trx,
- prebuilt->table, indexed_table,
- index, num_of_idx, table);
+error_handled:
-error_handling:
-
- /* After an error, remove all those index definitions from the
- dictionary which were defined. */
+ user_trx->error_info = NULL;
+ trx->error_state = DB_SUCCESS;
if (!dict_locked) {
row_mysql_lock_data_dictionary(trx);
- dict_locked = TRUE;
}
- switch (error) {
- case DB_SUCCESS:
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
+ if (new_clustered) {
+ if (indexed_table != user_table) {
- *add = new ha_innobase_add_index(
- table, key_info, num_of_keys, indexed_table);
+ if (DICT_TF2_FLAG_IS_SET(indexed_table, DICT_TF2_FTS)) {
+ innobase_drop_fts_index_table(
+ indexed_table, trx);
+ }
- dict_table_close(prebuilt->table, dict_locked);
- break;
+ dict_table_close(indexed_table, TRUE, FALSE);
- case DB_TOO_BIG_RECORD:
- my_error(HA_ERR_TO_BIG_ROW, MYF(0));
- goto error_exit;
- case DB_PRIMARY_KEY_IS_NULL:
- my_error(ER_PRIMARY_CANT_HAVE_NULL, MYF(0));
- /* fall through */
- case DB_DUPLICATE_KEY:
- if (fts_add_doc_idx
- && prebuilt->trx->error_key_num == num_of_idx - 1) {
- prebuilt->trx->error_key_num = ULINT_UNDEFINED;
- }
-error_exit:
- prebuilt->trx->error_info = NULL;
- /* fall through */
- default:
- dict_table_close(prebuilt->table, dict_locked);
+#ifdef UNIV_DDL_DEBUG
+ /* Nobody should have initialized the stats of the
+ newly created table yet. When this is the case, we
+ know that it has not been added for background stats
+ gathering. */
+ ut_a(!indexed_table->stat_initialized);
+#endif /* UNIV_DDL_DEBUG */
- trx->error_state = DB_SUCCESS;
+ row_merge_drop_table(trx, indexed_table);
- if (new_primary) {
- if (indexed_table != prebuilt->table) {
- dict_table_close(indexed_table, dict_locked);
- row_merge_drop_table(trx, indexed_table);
+ /* Free the log for online table rebuild, if
+ one was allocated. */
+
+ dict_index_t* clust_index = dict_table_get_first_index(
+ user_table);
+
+ rw_lock_x_lock(&clust_index->lock);
+
+ if (clust_index->online_log) {
+ ut_ad(!locked);
+ row_log_abort_sec(clust_index);
+ clust_index->online_status
+ = ONLINE_INDEX_COMPLETE;
}
- } else {
- row_merge_drop_indexes(trx, indexed_table,
- index, num_created);
+
+ rw_lock_x_unlock(&clust_index->lock);
}
+
+ trx_commit_for_mysql(trx);
+ /* n_ref_count must be 1, because purge cannot
+ be executing on this very table as we are
+ holding dict_operation_lock X-latch. */
+ DBUG_ASSERT(user_table->n_ref_count == 1 || !locked);
+
+ online_retry_drop_indexes_with_trx(user_table, trx);
+ } else {
+ ut_ad(indexed_table == user_table);
+ row_merge_drop_indexes(trx, user_table, TRUE);
+ trx_commit_for_mysql(trx);
+ }
+
+ ut_d(dict_table_check_for_dup_indexes(user_table, CHECK_ALL_COMPLETE));
+ ut_ad(!user_table->drop_aborted);
+
+err_exit:
+ /* Clear the to_be_dropped flag in the data dictionary cache. */
+ for (ulint i = 0; i < n_drop_index; i++) {
+ DBUG_ASSERT(*drop_index[i]->name != TEMP_INDEX_PREFIX);
+ DBUG_ASSERT(drop_index[i]->to_be_dropped);
+ drop_index[i]->to_be_dropped = 0;
}
- ut_ad(!new_primary || prebuilt->table->n_ref_count == 1);
- trx_commit_for_mysql(trx);
- ut_ad(dict_locked);
row_mysql_unlock_data_dictionary(trx);
+
trx_free_for_mysql(trx);
mem_heap_free(heap);
- if (prebuilt->trx) {
- trx_commit_for_mysql(prebuilt->trx);
- }
+ trx_commit_for_mysql(user_trx);
/* There might be work for utility threads.*/
srv_active_wake_master_thread();
- DBUG_RETURN(convert_error_code_to_mysql(error, prebuilt->table->flags,
- user_thd));
+ DBUG_RETURN(true);
}
-/*******************************************************************//**
-Finalize or undo add_index().
-@return 0 or error number */
+/* Check whether an index is needed for the foreign key constraint.
+If so, if it is dropped, is there an equivalent index can play its role.
+@return true if the index is needed and can't be dropped */
+static __attribute__((warn_unused_result))
+bool
+innobase_check_foreign_key_index(
+/*=============================*/
+ Alter_inplace_info* ha_alter_info, /*!< in: Structure describing
+ changes to be done by ALTER
+ TABLE */
+ dict_index_t* index, /*!< in: index to check */
+ dict_table_t* indexed_table, /*!< in: table that owns the
+ foreign keys */
+ trx_t* trx, /*!< in/out: transaction */
+ dict_foreign_t** drop_fk, /*!< in: Foreign key constraints
+ to drop */
+ ulint n_drop_fk) /*!< in: Number of foreign keys
+ to drop */
+{
+ dict_foreign_t* foreign;
+
+ ut_ad(!index->to_be_dropped);
+
+ /* Check if the index is referenced. */
+ foreign = dict_table_get_referenced_constraint(indexed_table, index);
+
+ ut_ad(!foreign || indexed_table
+ == foreign->referenced_table);
+
+ if (foreign
+ && !dict_foreign_find_index(
+ indexed_table,
+ foreign->referenced_col_names,
+ foreign->n_fields, index,
+ /*check_charsets=*/TRUE,
+ /*check_null=*/FALSE)
+ && !innobase_find_equiv_index(
+ foreign->referenced_col_names,
+ foreign->n_fields,
+ ha_alter_info->key_info_buffer,
+ ha_alter_info->index_add_buffer,
+ ha_alter_info->index_add_count)
+ ) {
+ trx->error_info = index;
+ return(true);
+ }
+
+ /* Check if this index references some
+ other table */
+ foreign = dict_table_get_foreign_constraint(
+ indexed_table, index);
+
+ ut_ad(!foreign || indexed_table
+ == foreign->foreign_table);
+
+ if (foreign
+ && !innobase_dropping_foreign(
+ foreign, drop_fk, n_drop_fk)
+ && !dict_foreign_find_index(
+ indexed_table,
+ foreign->foreign_col_names,
+ foreign->n_fields, index,
+ /*check_charsets=*/TRUE,
+ /*check_null=*/FALSE)
+ && !innobase_find_equiv_index(
+ foreign->foreign_col_names,
+ foreign->n_fields,
+ ha_alter_info->key_info_buffer,
+ ha_alter_info->index_add_buffer,
+ ha_alter_info->index_add_count)
+ ) {
+ trx->error_info = index;
+ return(true);
+ }
+
+ return(false);
+}
+
+/** Allows InnoDB to update internal structures with concurrent
+writes blocked (provided that check_if_supported_inplace_alter()
+did not return HA_ALTER_INPLACE_NO_LOCK).
+This will be invoked before inplace_alter_table().
+
+@param altered_table TABLE object for new version of table.
+@param ha_alter_info Structure describing changes to be done
+by ALTER TABLE and holding data used during in-place alter.
+
+@retval true Failure
+@retval false Success
+*/
UNIV_INTERN
-int
-ha_innobase::final_add_index(
-/*=========================*/
- handler_add_index* add_arg,/*!< in: context from add_index() */
- bool commit) /*!< in: true=commit, false=rollback */
+bool
+ha_innobase::prepare_inplace_alter_table(
+/*=====================================*/
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info)
{
- ha_innobase_add_index* add;
- trx_t* trx;
- int err = 0;
+ dict_index_t** drop_index; /*!< Index to be dropped */
+ ulint n_drop_index; /*!< Number of indexes to drop */
+ dict_foreign_t**drop_fk; /*!< Foreign key constraints to drop */
+ ulint n_drop_fk; /*!< Number of foreign keys to drop */
+ dict_foreign_t**add_fk = NULL; /*!< Foreign key constraints to drop */
+ ulint n_add_fk; /*!< Number of foreign keys to drop */
+ dict_table_t* indexed_table; /*!< Table where indexes are created */
+ mem_heap_t* heap;
+ int error;
+ ulint flags;
+ ulint flags2;
+ ulint max_col_len;
+ ulint add_autoinc_col_no = ULINT_UNDEFINED;
+ ulonglong autoinc_col_max_value = 0;
+ ulint fts_doc_col_no = ULINT_UNDEFINED;
+ bool add_fts_doc_id = false;
+ bool add_fts_doc_id_idx = false;
+
+ DBUG_ENTER("prepare_inplace_alter_table");
+ DBUG_ASSERT(!ha_alter_info->handler_ctx);
+ DBUG_ASSERT(ha_alter_info->create_info);
+
+ if (srv_read_only_mode) {
+ DBUG_RETURN(false);
+ }
- DBUG_ENTER("ha_innobase::final_add_index");
+ MONITOR_ATOMIC_INC(MONITOR_PENDING_ALTER_TABLE);
- ut_ad(add_arg);
- add = static_cast<class ha_innobase_add_index*>(add_arg);
+#ifdef UNIV_DEBUG
+ for (dict_index_t* index = dict_table_get_first_index(prebuilt->table);
+ index;
+ index = dict_table_get_next_index(index)) {
+ ut_ad(!index->to_be_dropped);
+ }
+#endif /* UNIV_DEBUG */
- /* Create a background transaction for the operations on
- the data dictionary tables. */
- trx = innobase_trx_allocate(user_thd);
- trx_start_if_not_started(trx);
+ ut_d(mutex_enter(&dict_sys->mutex));
+ ut_d(dict_table_check_for_dup_indexes(
+ prebuilt->table, CHECK_ABORTED_OK));
+ ut_d(mutex_exit(&dict_sys->mutex));
- /* Flag this transaction as a dictionary operation, so that
- the data dictionary will be locked in crash recovery. */
- trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+ if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) {
+ /* Nothing to do */
+ goto func_exit;
+ }
- /* Latch the InnoDB data dictionary exclusively so that no deadlocks
- or lock waits can happen in it during an index create operation. */
- row_mysql_lock_data_dictionary(trx);
+ if (ha_alter_info->handler_flags
+ == Alter_inplace_info::CHANGE_CREATE_OPTION
+ && !innobase_need_rebuild(ha_alter_info)) {
+ goto func_exit;
+ }
- if (add->indexed_table != prebuilt->table) {
- ulint error;
+ if (ha_alter_info->handler_flags
+ & Alter_inplace_info::CHANGE_CREATE_OPTION) {
+ if (const char* invalid_opt = create_options_are_invalid(
+ user_thd, altered_table,
+ ha_alter_info->create_info,
+ prebuilt->table->space != 0)) {
+ my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0),
+ table_type(), invalid_opt);
+ goto err_exit_no_heap;
+ }
+ }
- /* We copied the table (new_primary). */
- if (commit) {
- mem_heap_t* heap;
- char* tmp_name;
+ /* Check if any index name is reserved. */
+ if (innobase_index_name_is_reserved(
+ user_thd,
+ ha_alter_info->key_info_buffer,
+ ha_alter_info->key_count)) {
+err_exit_no_heap:
+ DBUG_ASSERT(prebuilt->trx->dict_operation_lock_mode == 0);
+ if (ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) {
+ online_retry_drop_indexes(prebuilt->table, user_thd);
+ }
+ DBUG_RETURN(true);
+ }
- heap = mem_heap_create(1024);
+ indexed_table = prebuilt->table;
- /* A new primary key was defined for the table
- and there was no error at this point. We can
- now rename the old table as a temporary table,
- rename the new temporary table as the old
- table and drop the old table. */
- tmp_name = innobase_create_temporary_tablename(
- heap, '2', prebuilt->table->name);
+ /* Check that index keys are sensible */
+ error = innobase_check_index_keys(ha_alter_info, indexed_table);
- error = row_merge_rename_tables(
- prebuilt->table, add->indexed_table,
- tmp_name, trx);
+ if (error) {
+ goto err_exit_no_heap;
+ }
- ut_a(prebuilt->table->n_ref_count == 1);
+ /* Prohibit renaming a column to something that the table
+ already contains. */
+ if (ha_alter_info->handler_flags
+ & Alter_inplace_info::ALTER_COLUMN_NAME) {
+ List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list);
- switch (error) {
- case DB_TABLESPACE_ALREADY_EXISTS:
- case DB_DUPLICATE_KEY:
- ut_a(add->indexed_table->n_ref_count == 0);
- innobase_convert_tablename(tmp_name);
- my_error(HA_ERR_TABLE_EXIST, MYF(0), tmp_name);
- err = HA_ERR_TABLE_EXIST;
- break;
- default:
- err = convert_error_code_to_mysql(
- error, prebuilt->table->flags,
- user_thd);
- break;
+ for (Field** fp = table->field; *fp; fp++) {
+ if (!((*fp)->flags & FIELD_IS_RENAMED)) {
+ continue;
}
- mem_heap_free(heap);
+ const char* name = 0;
+
+ cf_it.rewind();
+ while (Create_field* cf = cf_it++) {
+ if (cf->field == *fp) {
+ name = cf->field_name;
+ goto check_if_ok_to_rename;
+ }
+ }
+
+ ut_error;
+check_if_ok_to_rename:
+ /* Prohibit renaming a column from FTS_DOC_ID
+ if full-text indexes exist. */
+ if (!my_strcasecmp(system_charset_info,
+ (*fp)->field_name,
+ FTS_DOC_ID_COL_NAME)
+ && innobase_fulltext_exist(altered_table->s)) {
+ my_error(ER_INNODB_FT_WRONG_DOCID_COLUMN,
+ MYF(0), name);
+ goto err_exit_no_heap;
+ }
+
+ /* Prohibit renaming a column to an internal column. */
+ const char* s = prebuilt->table->col_names;
+ unsigned j;
+ /* Skip user columns.
+ MySQL should have checked these already.
+ We want to allow renaming of c1 to c2, c2 to c1. */
+ for (j = 0; j < table->s->fields; j++) {
+ s += strlen(s) + 1;
+ }
+
+ for (; j < prebuilt->table->n_def; j++) {
+ if (!my_strcasecmp(
+ system_charset_info, name, s)) {
+ my_error(ER_WRONG_COLUMN_NAME, MYF(0),
+ s);
+ goto err_exit_no_heap;
+ }
+
+ s += strlen(s) + 1;
+ }
}
+ }
- if (!commit || err) {
- dict_table_close(add->indexed_table, TRUE);
- error = row_merge_drop_table(trx, add->indexed_table);
- trx_commit_for_mysql(prebuilt->trx);
- } else {
- dict_table_t* old_table = prebuilt->table;
- trx_commit_for_mysql(prebuilt->trx);
- row_prebuilt_free(prebuilt, TRUE);
- error = row_merge_drop_table(trx, old_table);
- prebuilt = row_create_prebuilt(add->indexed_table,
- 0 /* XXX Do we know the mysql_row_len here?
- Before the addition of this parameter to
- row_create_prebuilt() the mysql_row_len
- member was left 0 (from zalloc) in the
- prebuilt object. */);
+ if (!innobase_table_flags(altered_table,
+ ha_alter_info->create_info,
+ user_thd,
+ srv_file_per_table
+ || indexed_table->space != 0,
+ &flags, &flags2)) {
+ goto err_exit_no_heap;
+ }
+
+ max_col_len = DICT_MAX_FIELD_LEN_BY_FORMAT_FLAG(flags);
+
+ /* Check each index's column length to make sure they do not
+ exceed limit */
+ for (ulint i = 0; i < ha_alter_info->index_add_count; i++) {
+ const KEY* key = &ha_alter_info->key_info_buffer[
+ ha_alter_info->index_add_buffer[i]];
+
+ if (key->flags & HA_FULLTEXT) {
+ /* The column length does not matter for
+ fulltext search indexes. But, UNIQUE
+ fulltext indexes are not supported. */
+ DBUG_ASSERT(!(key->flags & HA_NOSAME));
+ DBUG_ASSERT(!(key->flags & HA_KEYFLAG_MASK
+ & ~(HA_FULLTEXT
+ | HA_PACK_KEY
+ | HA_BINARY_PACK_KEY)));
+ continue;
}
- err = convert_error_code_to_mysql(
- error, prebuilt->table->flags, user_thd);
+ if (innobase_check_column_length(max_col_len, key)) {
+ my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0),
+ max_col_len);
+ goto err_exit_no_heap;
+ }
}
- if (add->indexed_table == prebuilt->table
- || DICT_TF2_FLAG_IS_SET(prebuilt->table, DICT_TF2_FTS_ADD_DOC_ID)) {
- /* We created secondary indexes (!new_primary) or create full
- text index and added a new Doc ID column, we will need to
- rename the secondary index on the Doc ID column to its
- official index name.. */
+ /* Check existing index definitions for too-long column
+ prefixes as well, in case max_col_len shrunk. */
+ for (const dict_index_t* index
+ = dict_table_get_first_index(indexed_table);
+ index;
+ index = dict_table_get_next_index(index)) {
+ if (index->type & DICT_FTS) {
+ DBUG_ASSERT(index->type == DICT_FTS
+ || (index->type & DICT_CORRUPT));
+ continue;
+ }
- if (commit) {
- err = convert_error_code_to_mysql(
- row_merge_rename_indexes(trx, prebuilt->table),
- prebuilt->table->flags, user_thd);
+ for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
+ const dict_field_t* field
+ = dict_index_get_nth_field(index, i);
+ if (field->prefix_len > max_col_len) {
+ my_error(ER_INDEX_COLUMN_TOO_LONG, MYF(0),
+ max_col_len);
+ goto err_exit_no_heap;
+ }
}
+ }
+
+ n_drop_index = 0;
+ n_drop_fk = 0;
+
+ if (ha_alter_info->handler_flags
+ & Alter_inplace_info::DROP_FOREIGN_KEY) {
+ DBUG_ASSERT(ha_alter_info->alter_info->drop_list.elements > 0);
- if (!commit || err) {
- dict_index_t* index;
- dict_index_t* next_index;
+ heap = mem_heap_create(1024);
- for (index = dict_table_get_first_index(
- prebuilt->table);
- index; index = next_index) {
+ drop_fk = static_cast<dict_foreign_t**>(
+ mem_heap_alloc(
+ heap,
+ ha_alter_info->alter_info->drop_list.elements
+ * sizeof(dict_foreign_t*)));
- next_index = dict_table_get_next_index(index);
+ List_iterator<Alter_drop> drop_it(
+ ha_alter_info->alter_info->drop_list);
- if (*index->name == TEMP_INDEX_PREFIX) {
- row_merge_drop_index(
- index, prebuilt->table, trx);
+ while (Alter_drop* drop = drop_it++) {
+ if (drop->type != Alter_drop::FOREIGN_KEY) {
+ continue;
+ }
+
+ for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(
+ prebuilt->table->foreign_list);
+ foreign != NULL;
+ foreign = UT_LIST_GET_NEXT(
+ foreign_list, foreign)) {
+ const char* fid = strchr(foreign->id, '/');
+
+ DBUG_ASSERT(fid);
+ /* If no database/ prefix was present in
+ the FOREIGN KEY constraint name, compare
+ to the full constraint name. */
+ fid = fid ? fid + 1 : foreign->id;
+
+ if (!my_strcasecmp(system_charset_info,
+ fid, drop->name)) {
+ drop_fk[n_drop_fk++] = foreign;
+ goto found_fk;
}
}
+
+ my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0),
+ drop->name);
+ goto err_exit;
+found_fk:
+ continue;
}
- DICT_TF2_FLAG_UNSET(prebuilt->table, DICT_TF2_FTS_ADD_DOC_ID);
+ DBUG_ASSERT(n_drop_fk > 0);
+ DBUG_ASSERT(n_drop_fk
+ == ha_alter_info->alter_info->drop_list.elements);
+ } else {
+ drop_fk = NULL;
+ heap = NULL;
}
- /* If index is successfully built, we will need to rebuild index
- translation table. Set valid index entry count in the translation
- table to zero. */
- if (err == 0 && commit) {
- ibool new_primary;
- dict_index_t* index;
- dict_index_t* next_index;
- ibool new_fts = FALSE;
- dict_index_t* primary;
+ if (ha_alter_info->index_drop_count) {
+ dict_index_t* drop_primary = NULL;
- new_primary = !my_strcasecmp(
- system_charset_info, add->key_info[0].name, "PRIMARY");
-
- primary = dict_table_get_first_index(add->indexed_table);
-
- if (!new_primary) {
- new_primary = !my_strcasecmp(
- system_charset_info, add->key_info[0].name,
- primary->name);
+ DBUG_ASSERT(ha_alter_info->handler_flags
+ & (Alter_inplace_info::DROP_INDEX
+ | Alter_inplace_info::DROP_UNIQUE_INDEX
+ | Alter_inplace_info::DROP_PK_INDEX));
+ /* Check which indexes to drop. */
+ if (!heap) {
+ heap = mem_heap_create(1024);
+ }
+ drop_index = static_cast<dict_index_t**>(
+ mem_heap_alloc(
+ heap, (ha_alter_info->index_drop_count + 1)
+ * sizeof *drop_index));
+
+ for (uint i = 0; i < ha_alter_info->index_drop_count; i++) {
+ const KEY* key
+ = ha_alter_info->index_drop_buffer[i];
+ dict_index_t* index
+ = dict_table_get_index_on_name_and_min_id(
+ indexed_table, key->name);
+
+ if (!index) {
+ push_warning_printf(
+ user_thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_INDEX,
+ "InnoDB could not find key "
+ "with name %s", key->name);
+ } else {
+ ut_ad(!index->to_be_dropped);
+ if (!dict_index_is_clust(index)) {
+ drop_index[n_drop_index++] = index;
+ } else {
+ drop_primary = index;
+ }
+ }
}
- share->idx_trans_tbl.index_count = 0;
+ /* If all FULLTEXT indexes were removed, drop an
+ internal FTS_DOC_ID_INDEX as well, unless it exists in
+ the table. */
+
+ if (innobase_fulltext_exist(table->s)
+ && !innobase_fulltext_exist(altered_table->s)
+ && !DICT_TF2_FLAG_IS_SET(
+ indexed_table, DICT_TF2_FTS_HAS_DOC_ID)) {
+ dict_index_t* fts_doc_index
+ = dict_table_get_index_on_name(
+ indexed_table, FTS_DOC_ID_INDEX_NAME);
+
+ // Add some fault tolerance for non-debug builds.
+ if (fts_doc_index == NULL) {
+ goto check_if_can_drop_indexes;
+ }
- if (new_primary) {
- for (index = primary; index; index = next_index) {
+ DBUG_ASSERT(!fts_doc_index->to_be_dropped);
+
+ for (uint i = 0; i < table->s->keys; i++) {
+ if (!my_strcasecmp(
+ system_charset_info,
+ FTS_DOC_ID_INDEX_NAME,
+ table->s->key_info[i].name)) {
+ /* The index exists in the MySQL
+ data dictionary. Do not drop it,
+ even though it is no longer needed
+ by InnoDB fulltext search. */
+ goto check_if_can_drop_indexes;
+ }
+ }
- next_index = dict_table_get_next_index(index);
+ drop_index[n_drop_index++] = fts_doc_index;
+ }
- if (index->type & DICT_FTS) {
- fts_add_index(index,
- add->indexed_table);
- new_fts = TRUE;
+check_if_can_drop_indexes:
+ /* Check if the indexes can be dropped. */
+
+ /* Prevent a race condition between DROP INDEX and
+ CREATE TABLE adding FOREIGN KEY constraints. */
+ row_mysql_lock_data_dictionary(prebuilt->trx);
+
+ if (prebuilt->trx->check_foreigns) {
+ for (uint i = 0; i < n_drop_index; i++) {
+ dict_index_t* index = drop_index[i];
+
+ if (innobase_check_foreign_key_index(
+ ha_alter_info, index, indexed_table,
+ prebuilt->trx, drop_fk, n_drop_fk)) {
+ row_mysql_unlock_data_dictionary(
+ prebuilt->trx);
+ prebuilt->trx->error_info = index;
+ print_error(HA_ERR_DROP_INDEX_FK,
+ MYF(0));
+ goto err_exit;
}
}
+
+ /* If a primary index is dropped, need to check
+ any depending foreign constraints get affected */
+ if (drop_primary
+ && innobase_check_foreign_key_index(
+ ha_alter_info, drop_primary, indexed_table,
+ prebuilt->trx, drop_fk, n_drop_fk)) {
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
+ print_error(HA_ERR_DROP_INDEX_FK, MYF(0));
+ goto err_exit;
+ }
+ }
+
+ if (!n_drop_index) {
+ drop_index = NULL;
} else {
- ulint i;
- for (i = 0; i < add->num_of_keys; i++) {
- if (add->key_info[i].flags & HA_FULLTEXT) {
- dict_index_t* fts_index;
-
- fts_index =
- dict_table_get_index_on_name(
- prebuilt->table,
- add->key_info[i].name);
-
- ut_ad(fts_index);
- fts_add_index(fts_index,
- prebuilt->table);
- new_fts = TRUE;
+ /* Flag all indexes that are to be dropped. */
+ for (ulint i = 0; i < n_drop_index; i++) {
+ ut_ad(!drop_index[i]->to_be_dropped);
+ drop_index[i]->to_be_dropped = 1;
+ }
+ }
+
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
+ } else {
+ drop_index = NULL;
+ }
+
+ n_add_fk = 0;
+
+ if (ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_FOREIGN_KEY) {
+ ut_ad(!prebuilt->trx->check_foreigns);
+
+ if (!heap) {
+ heap = mem_heap_create(1024);
+ }
+
+ add_fk = static_cast<dict_foreign_t**>(
+ mem_heap_zalloc(
+ heap,
+ ha_alter_info->alter_info->key_list.elements
+ * sizeof(dict_foreign_t*)));
+
+ if (!innobase_get_foreign_key_info(
+ ha_alter_info, table_share, prebuilt->table,
+ add_fk, &n_add_fk, heap, prebuilt->trx)) {
+err_exit:
+ if (n_drop_index) {
+ row_mysql_lock_data_dictionary(prebuilt->trx);
+
+ /* Clear the to_be_dropped flags, which might
+ have been set at this point. */
+ for (ulint i = 0; i < n_drop_index; i++) {
+ DBUG_ASSERT(*drop_index[i]->name
+ != TEMP_INDEX_PREFIX);
+ drop_index[i]->to_be_dropped = 0;
}
+
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
}
+
+ if (heap) {
+ mem_heap_free(heap);
+ }
+ goto err_exit_no_heap;
}
+ }
- if (new_fts) {
- fts_optimize_add_table(prebuilt->table);
+ if (!(ha_alter_info->handler_flags & INNOBASE_INPLACE_CREATE)) {
+ if (heap) {
+ ha_alter_info->handler_ctx
+ = new ha_innobase_inplace_ctx(
+ prebuilt->trx, 0, 0, 0,
+ drop_index, n_drop_index,
+ drop_fk, n_drop_fk,
+ add_fk, n_add_fk,
+ ha_alter_info->online,
+ heap, 0, indexed_table, 0,
+ ULINT_UNDEFINED, 0, 0, 0);
}
+
+func_exit:
+ DBUG_ASSERT(prebuilt->trx->dict_operation_lock_mode == 0);
+ if (ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE) {
+ online_retry_drop_indexes(prebuilt->table, user_thd);
+ }
+ DBUG_RETURN(false);
}
- trx_commit_for_mysql(trx);
- if (prebuilt->trx) {
- trx_commit_for_mysql(prebuilt->trx);
+ /* If we are to build a full-text search index, check whether
+ the table already has a DOC ID column. If not, we will need to
+ add a Doc ID hidden column and rebuild the primary index */
+ if (innobase_fulltext_exist(altered_table->s)) {
+ ulint doc_col_no;
+
+ if (!innobase_fts_check_doc_id_col(
+ prebuilt->table, altered_table, &fts_doc_col_no)) {
+ fts_doc_col_no = altered_table->s->fields;
+ add_fts_doc_id = true;
+ add_fts_doc_id_idx = true;
+
+ push_warning_printf(
+ user_thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ HA_ERR_WRONG_INDEX,
+ "InnoDB rebuilding table to add column "
+ FTS_DOC_ID_COL_NAME);
+ } else if (fts_doc_col_no == ULINT_UNDEFINED) {
+ goto err_exit;
+ }
+
+ switch (innobase_fts_check_doc_id_index(
+ prebuilt->table, altered_table, &doc_col_no)) {
+ case FTS_NOT_EXIST_DOC_ID_INDEX:
+ add_fts_doc_id_idx = true;
+ break;
+ case FTS_INCORRECT_DOC_ID_INDEX:
+ my_error(ER_INNODB_FT_WRONG_DOCID_INDEX, MYF(0),
+ FTS_DOC_ID_INDEX_NAME);
+ goto err_exit;
+ case FTS_EXIST_DOC_ID_INDEX:
+ DBUG_ASSERT(doc_col_no == fts_doc_col_no
+ || doc_col_no == ULINT_UNDEFINED
+ || (ha_alter_info->handler_flags
+ & (Alter_inplace_info::ALTER_COLUMN_ORDER
+ | Alter_inplace_info::DROP_COLUMN
+ | Alter_inplace_info::ADD_COLUMN)));
+ }
}
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
+ /* See if an AUTO_INCREMENT column was added. */
+ uint i = 0;
+ List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list);
+ while (const Create_field* new_field = cf_it++) {
+ const Field* field;
- ut_a(fts_check_cached_index(prebuilt->table));
+ DBUG_ASSERT(i < altered_table->s->fields);
- row_mysql_unlock_data_dictionary(trx);
+ for (uint old_i = 0; table->field[old_i]; old_i++) {
+ if (new_field->field == table->field[old_i]) {
+ goto found_col;
+ }
+ }
- trx_free_for_mysql(trx);
+ /* This is an added column. */
+ DBUG_ASSERT(!new_field->field);
+ DBUG_ASSERT(ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_COLUMN);
- /* There might be work for utility threads.*/
- srv_active_wake_master_thread();
+ field = altered_table->field[i];
- delete add;
- DBUG_RETURN(err);
+ DBUG_ASSERT((MTYP_TYPENR(field->unireg_check)
+ == Field::NEXT_NUMBER)
+ == !!(field->flags & AUTO_INCREMENT_FLAG));
+
+ if (field->flags & AUTO_INCREMENT_FLAG) {
+ if (add_autoinc_col_no != ULINT_UNDEFINED) {
+ /* This should have been blocked earlier. */
+ ut_ad(0);
+ my_error(ER_WRONG_AUTO_KEY, MYF(0));
+ goto err_exit;
+ }
+ add_autoinc_col_no = i;
+
+ autoinc_col_max_value = innobase_get_int_col_max_value(
+ field);
+ }
+found_col:
+ i++;
+ }
+
+ DBUG_ASSERT(user_thd == prebuilt->trx->mysql_thd);
+ DBUG_RETURN(prepare_inplace_alter_table_dict(
+ ha_alter_info, altered_table, table,
+ prebuilt->table, prebuilt->trx,
+ table_share->table_name.str,
+ flags, flags2,
+ heap, drop_index, n_drop_index,
+ drop_fk, n_drop_fk, add_fk, n_add_fk,
+ fts_doc_col_no, add_autoinc_col_no,
+ autoinc_col_max_value, add_fts_doc_id,
+ add_fts_doc_id_idx));
}
-/*******************************************************************//**
-Prepare to drop some indexes of a table.
-@return 0 or error number */
+
+/** Alter the table structure in-place with operations
+specified using Alter_inplace_info.
+The level of concurrency allowed during this operation depends
+on the return value from check_if_supported_inplace_alter().
+
+@param altered_table TABLE object for new version of table.
+@param ha_alter_info Structure describing changes to be done
+by ALTER TABLE and holding data used during in-place alter.
+
+@retval true Failure
+@retval false Success
+*/
UNIV_INTERN
-int
-ha_innobase::prepare_drop_index(
-/*============================*/
- TABLE* in_table, /*!< in: Table where indexes are dropped */
- uint* key_num, /*!< in: Key nums to be dropped */
- uint num_of_keys) /*!< in: Number of keys to be dropped */
+bool
+ha_innobase::inplace_alter_table(
+/*=============================*/
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info)
{
- trx_t* trx;
- int err = 0;
- uint n_key;
+ dberr_t error;
- DBUG_ENTER("ha_innobase::prepare_drop_index");
- ut_ad(table);
- ut_ad(key_num);
- ut_ad(num_of_keys);
- if (srv_created_new_raw || srv_force_recovery) {
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ DBUG_ENTER("inplace_alter_table");
+
+ if (srv_read_only_mode) {
+ DBUG_RETURN(false);
}
- update_thd();
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(!rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+ ut_ad(!rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED));
+#endif /* UNIV_SYNC_DEBUG */
- trx_search_latch_release_if_reserved(prebuilt->trx);
- trx = prebuilt->trx;
+ DEBUG_SYNC(user_thd, "innodb_inplace_alter_table_enter");
- /* Test and mark all the indexes to be dropped */
+ if (!(ha_alter_info->handler_flags & INNOBASE_INPLACE_CREATE)) {
+ok_exit:
+ DEBUG_SYNC(user_thd, "innodb_after_inplace_alter_table");
+ DBUG_RETURN(false);
+ }
- row_mysql_lock_data_dictionary(trx);
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
+ if (ha_alter_info->handler_flags
+ == Alter_inplace_info::CHANGE_CREATE_OPTION
+ && !innobase_need_rebuild(ha_alter_info)) {
+ goto ok_exit;
+ }
- /* Check that none of the indexes have previously been flagged
- for deletion. */
- {
- const dict_index_t* index
- = dict_table_get_first_index(prebuilt->table);
- do {
- ut_a(!index->to_be_dropped);
- index = dict_table_get_next_index(index);
- } while (index);
+ ha_innobase_inplace_ctx* ctx
+ = static_cast<ha_innobase_inplace_ctx*>
+ (ha_alter_info->handler_ctx);
+
+ DBUG_ASSERT(ctx);
+ DBUG_ASSERT(ctx->trx);
+
+ if (prebuilt->table->ibd_file_missing
+ || dict_table_is_discarded(prebuilt->table)) {
+ goto all_done;
+ }
+
+ /* Read the clustered index of the table and build
+ indexes based on this information using temporary
+ files and merge sort. */
+ DBUG_EXECUTE_IF("innodb_OOM_inplace_alter",
+ error = DB_OUT_OF_MEMORY; goto oom;);
+ error = row_merge_build_indexes(
+ prebuilt->trx,
+ prebuilt->table, ctx->indexed_table,
+ ctx->online,
+ ctx->add, ctx->add_key_numbers, ctx->num_to_add,
+ altered_table, ctx->add_cols, ctx->col_map,
+ ctx->add_autoinc, ctx->sequence);
+#ifndef DBUG_OFF
+oom:
+#endif /* !DBUG_OFF */
+ if (error == DB_SUCCESS && ctx->online
+ && ctx->indexed_table != prebuilt->table) {
+ DEBUG_SYNC_C("row_log_table_apply1_before");
+ error = row_log_table_apply(
+ ctx->thr, prebuilt->table, altered_table);
}
- for (n_key = 0; n_key < num_of_keys; n_key++) {
- const KEY* key;
- dict_index_t* index;
+ DEBUG_SYNC_C("inplace_after_index_build");
- key = table->key_info + key_num[n_key];
- index = dict_table_get_index_on_name_and_min_id(
- prebuilt->table, key->name);
+ DBUG_EXECUTE_IF("create_index_fail",
+ error = DB_DUPLICATE_KEY;);
- if (!index) {
- sql_print_error("InnoDB could not find key n:o %u "
- "with name %s for table %s",
- key_num[n_key],
- key ? key->name : "NULL",
- prebuilt->table->name);
+ /* After an error, remove all those index definitions
+ from the dictionary which were defined. */
- err = HA_ERR_KEY_NOT_FOUND;
- goto func_exit;
+ switch (error) {
+ KEY* dup_key;
+ all_done:
+ case DB_SUCCESS:
+ ut_d(mutex_enter(&dict_sys->mutex));
+ ut_d(dict_table_check_for_dup_indexes(
+ prebuilt->table, CHECK_PARTIAL_OK));
+ ut_d(mutex_exit(&dict_sys->mutex));
+ /* prebuilt->table->n_ref_count can be anything here,
+ given that we hold at most a shared lock on the table. */
+ goto ok_exit;
+ case DB_DUPLICATE_KEY:
+ if (prebuilt->trx->error_key_num == ULINT_UNDEFINED
+ || ha_alter_info->key_count == 0) {
+ /* This should be the hidden index on
+ FTS_DOC_ID, or there is no PRIMARY KEY in the
+ table. Either way, we should be seeing and
+ reporting a bogus duplicate key error. */
+ dup_key = NULL;
+ } else {
+ DBUG_ASSERT(prebuilt->trx->error_key_num
+ < ha_alter_info->key_count);
+ dup_key = &ha_alter_info->key_info_buffer[
+ prebuilt->trx->error_key_num];
}
+ print_keydup_error(altered_table, dup_key, MYF(0));
+ break;
+ case DB_ONLINE_LOG_TOO_BIG:
+ DBUG_ASSERT(ctx->online);
+ my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0),
+ (prebuilt->trx->error_key_num == ULINT_UNDEFINED)
+ ? FTS_DOC_ID_INDEX_NAME
+ : ha_alter_info->key_info_buffer[
+ prebuilt->trx->error_key_num].name);
+ break;
+ case DB_INDEX_CORRUPT:
+ my_error(ER_INDEX_CORRUPT, MYF(0),
+ (prebuilt->trx->error_key_num == ULINT_UNDEFINED)
+ ? FTS_DOC_ID_INDEX_NAME
+ : ha_alter_info->key_info_buffer[
+ prebuilt->trx->error_key_num].name);
+ break;
+ default:
+ my_error_innodb(error,
+ table_share->table_name.str,
+ prebuilt->table->flags);
+ }
- /* Refuse to drop the clustered index. It would be
- better to automatically generate a clustered index,
- but mysql_alter_table() will call this method only
- after ha_innobase::add_index(). */
+ /* prebuilt->table->n_ref_count can be anything here, given
+ that we hold at most a shared lock on the table. */
+ prebuilt->trx->error_info = NULL;
+ ctx->trx->error_state = DB_SUCCESS;
- if (dict_index_is_clust(index)) {
- my_error(ER_REQUIRES_PRIMARY_KEY, MYF(0));
- err = -1;
- goto func_exit;
- }
+ DBUG_RETURN(true);
+}
- rw_lock_x_lock(dict_index_get_lock(index));
- index->to_be_dropped = TRUE;
- rw_lock_x_unlock(dict_index_get_lock(index));
+/** Free the modification log for online table rebuild.
+@param table table that was being rebuilt online */
+static
+void
+innobase_online_rebuild_log_free(
+/*=============================*/
+ dict_table_t* table)
+{
+ dict_index_t* clust_index = dict_table_get_first_index(table);
+
+ ut_ad(mutex_own(&dict_sys->mutex));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ rw_lock_x_lock(&clust_index->lock);
+
+ if (clust_index->online_log) {
+ ut_ad(dict_index_get_online_status(clust_index)
+ == ONLINE_INDEX_CREATION);
+ clust_index->online_status = ONLINE_INDEX_COMPLETE;
+ row_log_free(clust_index->online_log);
+ DEBUG_SYNC_C("innodb_online_rebuild_log_free_aborted");
+ }
+
+ DBUG_ASSERT(dict_index_get_online_status(clust_index)
+ == ONLINE_INDEX_COMPLETE);
+ rw_lock_x_unlock(&clust_index->lock);
+}
+
+/** Rollback a secondary index creation, drop the indexes with
+temparary index prefix
+@param prebuilt the prebuilt struct
+@param table_share the TABLE_SHARE
+@param trx the transaction
+*/
+static
+void
+innobase_rollback_sec_index(
+/*========================*/
+ row_prebuilt_t* prebuilt,
+ const TABLE_SHARE* table_share,
+ trx_t* trx)
+{
+ row_merge_drop_indexes(trx, prebuilt->table, FALSE);
+
+ /* Free the table->fts only if there is no FTS_DOC_ID
+ in the table */
+ if (prebuilt->table->fts
+ && !DICT_TF2_FLAG_IS_SET(prebuilt->table,
+ DICT_TF2_FTS_HAS_DOC_ID)
+ && !innobase_fulltext_exist(table_share)) {
+ fts_free(prebuilt->table);
}
+}
- /* If FOREIGN_KEY_CHECKS = 1 you may not drop an index defined
- for a foreign key constraint because InnoDB requires that both
- tables contain indexes for the constraint. Such index can
- be dropped only if FOREIGN_KEY_CHECKS is set to 0.
- Note that CREATE INDEX id ON table does a CREATE INDEX and
- DROP INDEX, and we can ignore here foreign keys because a
- new index for the foreign key has already been created.
+/** Roll back the changes made during prepare_inplace_alter_table()
+and inplace_alter_table() inside the storage engine. Note that the
+allowed level of concurrency during this operation will be the same as
+for inplace_alter_table() and thus might be higher than during
+prepare_inplace_alter_table(). (E.g concurrent writes were blocked
+during prepare, but might not be during commit).
+
+@param ha_alter_info Data used during in-place alter.
+@param table_share the TABLE_SHARE
+@param prebuilt the prebuilt struct
+@retval true Failure
+@retval false Success
+*/
+inline
+bool
+rollback_inplace_alter_table(
+/*=========================*/
+ Alter_inplace_info* ha_alter_info,
+ const TABLE_SHARE* table_share,
+ row_prebuilt_t* prebuilt)
+{
+ bool fail = false;
- We check for the foreign key constraints after marking the
- candidate indexes for deletion, because when we check for an
- equivalent foreign index we don't want to select an index that
- is later deleted. */
+ ha_innobase_inplace_ctx* ctx
+ = static_cast<ha_innobase_inplace_ctx*>
+ (ha_alter_info->handler_ctx);
- if (trx->check_foreigns
- && thd_sql_command(user_thd) != SQLCOM_CREATE_INDEX) {
- dict_index_t* index;
+ DBUG_ENTER("rollback_inplace_alter_table");
- for (index = dict_table_get_first_index(prebuilt->table);
- index;
- index = dict_table_get_next_index(index)) {
- dict_foreign_t* foreign;
+ if (!ctx || !ctx->trx) {
+ /* If we have not started a transaction yet,
+ (almost) nothing has been or needs to be done. */
+ goto func_exit;
+ }
- if (!index->to_be_dropped) {
+ row_mysql_lock_data_dictionary(ctx->trx);
- continue;
+ if (prebuilt->table != ctx->indexed_table) {
+ dberr_t err;
+ ulint flags = ctx->indexed_table->flags;
+
+ /* DML threads can access ctx->indexed_table via the
+ online rebuild log. Free it first. */
+ innobase_online_rebuild_log_free(prebuilt->table);
+
+ /* Since the FTS index specific auxiliary tables has
+ not yet registered with "table->fts" by fts_add_index(),
+ we will need explicitly delete them here */
+ if (DICT_TF2_FLAG_IS_SET(ctx->indexed_table, DICT_TF2_FTS)) {
+
+ err = innobase_drop_fts_index_table(
+ ctx->indexed_table, ctx->trx);
+
+ if (err != DB_SUCCESS) {
+ my_error_innodb(
+ err, table_share->table_name.str,
+ flags);
+ fail = true;
}
+ }
- /* Check if the index is referenced. */
- foreign = dict_table_get_referenced_constraint(
- prebuilt->table, index);
+ /* Drop the table. */
+ dict_table_close(ctx->indexed_table, TRUE, FALSE);
- if (foreign) {
-index_needed:
- trx_set_detailed_error(
- trx,
- "Index needed in foreign key "
- "constraint");
+#ifdef UNIV_DDL_DEBUG
+ /* Nobody should have initialized the stats of the
+ newly created table yet. When this is the case, we
+ know that it has not been added for background stats
+ gathering. */
+ ut_a(!ctx->indexed_table->stat_initialized);
+#endif /* UNIV_DDL_DEBUG */
- trx->error_info = index;
+ err = row_merge_drop_table(ctx->trx, ctx->indexed_table);
- err = HA_ERR_DROP_INDEX_FK;
- break;
- } else {
- /* Check if this index references some
- other table */
- foreign = dict_table_get_foreign_constraint(
- prebuilt->table, index);
+ switch (err) {
+ case DB_SUCCESS:
+ break;
+ default:
+ my_error_innodb(err, table_share->table_name.str,
+ flags);
+ fail = true;
+ }
+ } else {
+ DBUG_ASSERT(!(ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_PK_INDEX));
- if (foreign) {
- ut_a(foreign->foreign_index == index);
+ trx_start_for_ddl(ctx->trx, TRX_DICT_OP_INDEX);
- /* Search for an equivalent index that
- the foreign key constraint could use
- if this index were to be deleted. */
- if (!dict_foreign_find_equiv_index(
- foreign)) {
+ innobase_rollback_sec_index(prebuilt, table_share, ctx->trx);
+ }
- goto index_needed;
- }
- }
+ trx_commit_for_mysql(ctx->trx);
+ row_mysql_unlock_data_dictionary(ctx->trx);
+ trx_free_for_mysql(ctx->trx);
+
+
+func_exit:
+#ifndef DBUG_OFF
+ dict_index_t* clust_index = dict_table_get_first_index(
+ prebuilt->table);
+ DBUG_ASSERT(!clust_index->online_log);
+ DBUG_ASSERT(dict_index_get_online_status(clust_index)
+ == ONLINE_INDEX_COMPLETE);
+#endif /* !DBUG_OFF */
+
+ if (ctx) {
+ if (ctx->num_to_add_fk) {
+ for (ulint i = 0; i < ctx->num_to_add_fk; i++) {
+ dict_foreign_free(ctx->add_fk[i]);
}
}
- } else if (thd_sql_command(user_thd) == SQLCOM_CREATE_INDEX) {
- /* This is a drop of a foreign key constraint index that
- was created by MySQL when the constraint was added. MySQL
- does this when the user creates an index explicitly which
- can be used in place of the automatically generated index. */
- dict_index_t* index;
+ if (ctx->num_to_drop) {
+ row_mysql_lock_data_dictionary(prebuilt->trx);
+
+ /* Clear the to_be_dropped flags
+ in the data dictionary cache.
+ The flags may already have been cleared,
+ in case an error was detected in
+ commit_inplace_alter_table(). */
+ for (ulint i = 0; i < ctx->num_to_drop; i++) {
+ dict_index_t* index = ctx->drop[i];
+ DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX);
+
+ index->to_be_dropped = 0;
+ }
+
+ row_mysql_unlock_data_dictionary(prebuilt->trx);
+ }
+ }
+
+ trx_commit_for_mysql(prebuilt->trx);
+ srv_active_wake_master_thread();
+ MONITOR_ATOMIC_DEC(MONITOR_PENDING_ALTER_TABLE);
+ DBUG_RETURN(fail);
+}
+
+/** Drop a FOREIGN KEY constraint.
+@param table_share the TABLE_SHARE
+@param trx data dictionary transaction
+@param foreign the foreign key constraint, will be freed
+@retval true Failure
+@retval false Success */
+static __attribute__((nonnull, warn_unused_result))
+bool
+innobase_drop_foreign(
+/*==================*/
+ const TABLE_SHARE* table_share,
+ trx_t* trx,
+ dict_foreign_t* foreign)
+{
+ DBUG_ENTER("innobase_drop_foreign");
+
+ DBUG_ASSERT(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(mutex_own(&dict_sys->mutex));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ /* Drop the constraint from the data dictionary. */
+ static const char sql[] =
+ "PROCEDURE DROP_FOREIGN_PROC () IS\n"
+ "BEGIN\n"
+ "DELETE FROM SYS_FOREIGN WHERE ID=:id;\n"
+ "DELETE FROM SYS_FOREIGN_COLS WHERE ID=:id;\n"
+ "END;\n";
+
+ dberr_t error;
+ pars_info_t* info;
+
+ info = pars_info_create();
+ pars_info_add_str_literal(info, "id", foreign->id);
+
+ trx->op_info = "dropping foreign key constraint from dictionary";
+ error = que_eval_sql(info, sql, FALSE, trx);
+ trx->op_info = "";
+
+ DBUG_EXECUTE_IF("ib_drop_foreign_error",
+ error = DB_OUT_OF_FILE_SPACE;);
+
+ if (error != DB_SUCCESS) {
+ my_error_innodb(error, table_share->table_name.str, 0);
+ trx->error_state = DB_SUCCESS;
+ DBUG_RETURN(true);
+ }
+
+ /* Drop the foreign key constraint from the data dictionary cache. */
+ dict_foreign_remove_from_cache(foreign);
+ DBUG_RETURN(false);
+}
+
+/** Rename a column.
+@param table_share the TABLE_SHARE
+@param prebuilt the prebuilt struct
+@param trx data dictionary transaction
+@param nth_col 0-based index of the column
+@param from old column name
+@param to new column name
+@param new_clustered whether the table has been rebuilt
+@retval true Failure
+@retval false Success */
+static __attribute__((nonnull, warn_unused_result))
+bool
+innobase_rename_column(
+/*===================*/
+ const TABLE_SHARE* table_share,
+ row_prebuilt_t* prebuilt,
+ trx_t* trx,
+ ulint nth_col,
+ const char* from,
+ const char* to,
+ bool new_clustered)
+{
+ pars_info_t* info;
+ dberr_t error;
+
+ DBUG_ENTER("innobase_rename_column");
+
+ DBUG_ASSERT(trx_get_dict_operation(trx)
+ == new_clustered ? TRX_DICT_OP_TABLE : TRX_DICT_OP_INDEX);
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(mutex_own(&dict_sys->mutex));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ if (new_clustered) {
+ goto rename_foreign;
+ }
+
+ info = pars_info_create();
- for (index = dict_table_get_first_index(prebuilt->table);
- index;
- index = dict_table_get_next_index(index)) {
- dict_foreign_t* foreign;
+ pars_info_add_ull_literal(info, "tableid", prebuilt->table->id);
+ pars_info_add_int4_literal(info, "nth", nth_col);
+ pars_info_add_str_literal(info, "old", from);
+ pars_info_add_str_literal(info, "new", to);
- if (!index->to_be_dropped) {
+ trx->op_info = "renaming column in SYS_COLUMNS";
+
+ error = que_eval_sql(
+ info,
+ "PROCEDURE RENAME_SYS_COLUMNS_PROC () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_COLUMNS SET NAME=:new\n"
+ "WHERE TABLE_ID=:tableid AND NAME=:old\n"
+ "AND POS=:nth;\n"
+ "END;\n",
+ FALSE, trx);
+
+ DBUG_EXECUTE_IF("ib_rename_column_error",
+ error = DB_OUT_OF_FILE_SPACE;);
+
+ if (error != DB_SUCCESS) {
+err_exit:
+ my_error_innodb(error, table_share->table_name.str, 0);
+ trx->error_state = DB_SUCCESS;
+ trx->op_info = "";
+ DBUG_RETURN(true);
+ }
+ trx->op_info = "renaming column in SYS_FIELDS";
+
+ for (dict_index_t* index = dict_table_get_first_index(prebuilt->table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ for (ulint i = 0; i < dict_index_get_n_fields(index); i++) {
+ if (strcmp(dict_index_get_nth_field(index, i)->name,
+ from)) {
continue;
}
- /* Check if this index references some other table */
- foreign = dict_table_get_foreign_constraint(
- prebuilt->table, index);
+ info = pars_info_create();
- if (foreign == NULL) {
+ pars_info_add_ull_literal(info, "indexid", index->id);
+ pars_info_add_int4_literal(info, "nth", i);
+ pars_info_add_str_literal(info, "old", from);
+ pars_info_add_str_literal(info, "new", to);
- continue;
+ error = que_eval_sql(
+ info,
+ "PROCEDURE RENAME_SYS_FIELDS_PROC () IS\n"
+ "BEGIN\n"
+
+ "UPDATE SYS_FIELDS SET COL_NAME=:new\n"
+ "WHERE INDEX_ID=:indexid AND COL_NAME=:old\n"
+ "AND POS=:nth;\n"
+
+ /* Try again, in case there is a prefix_len
+ encoded in SYS_FIELDS.POS */
+
+ "UPDATE SYS_FIELDS SET COL_NAME=:new\n"
+ "WHERE INDEX_ID=:indexid AND COL_NAME=:old\n"
+ "AND POS>=65536*:nth AND POS<65536*(:nth+1);\n"
+
+ "END;\n",
+ FALSE, trx);
+
+ if (error != DB_SUCCESS) {
+ goto err_exit;
}
+ }
+ }
- ut_a(foreign->foreign_index == index);
+rename_foreign:
+ trx->op_info = "renaming column in SYS_FOREIGN_COLS";
- /* Search for an equivalent index that the
- foreign key constraint could use if this index
- were to be deleted. */
+ for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(
+ prebuilt->table->foreign_list);
+ foreign != NULL;
+ foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
+ for (unsigned i = 0; i < foreign->n_fields; i++) {
+ if (strcmp(foreign->foreign_col_names[i], from)) {
+ continue;
+ }
- if (!dict_foreign_find_equiv_index(foreign)) {
- trx_set_detailed_error(
- trx,
- "Index needed in foreign key "
- "constraint");
+ info = pars_info_create();
+
+ pars_info_add_str_literal(info, "id", foreign->id);
+ pars_info_add_int4_literal(info, "nth", i);
+ pars_info_add_str_literal(info, "old", from);
+ pars_info_add_str_literal(info, "new", to);
+
+ error = que_eval_sql(
+ info,
+ "PROCEDURE RENAME_SYS_FOREIGN_F_PROC () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_FOREIGN_COLS\n"
+ "SET FOR_COL_NAME=:new\n"
+ "WHERE ID=:id AND POS=:nth\n"
+ "AND FOR_COL_NAME=:old;\n"
+ "END;\n",
+ FALSE, trx);
+
+ if (error != DB_SUCCESS) {
+ goto err_exit;
+ }
+ }
+ }
- trx->error_info = foreign->foreign_index;
+ for (dict_foreign_t* foreign = UT_LIST_GET_FIRST(
+ prebuilt->table->referenced_list);
+ foreign != NULL;
+ foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
+ for (unsigned i = 0; i < foreign->n_fields; i++) {
+ if (strcmp(foreign->referenced_col_names[i], from)) {
+ continue;
+ }
- err = HA_ERR_DROP_INDEX_FK;
- break;
+ info = pars_info_create();
+
+ pars_info_add_str_literal(info, "id", foreign->id);
+ pars_info_add_int4_literal(info, "nth", i);
+ pars_info_add_str_literal(info, "old", from);
+ pars_info_add_str_literal(info, "new", to);
+
+ error = que_eval_sql(
+ info,
+ "PROCEDURE RENAME_SYS_FOREIGN_R_PROC () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_FOREIGN_COLS\n"
+ "SET REF_COL_NAME=:new\n"
+ "WHERE ID=:id AND POS=:nth\n"
+ "AND REF_COL_NAME=:old;\n"
+ "END;\n",
+ FALSE, trx);
+
+ if (error != DB_SUCCESS) {
+ goto err_exit;
}
}
}
-func_exit:
- if (err) {
- /* Undo our changes since there was some sort of error. */
- dict_index_t* index
- = dict_table_get_first_index(prebuilt->table);
+ trx->op_info = "";
+ if (!new_clustered) {
+ /* Rename the column in the data dictionary cache. */
+ dict_mem_table_col_rename(prebuilt->table, nth_col, from, to);
+ }
+ DBUG_RETURN(false);
+}
+
+/** Rename columns.
+@param ha_alter_info Data used during in-place alter.
+@param new_clustered whether the table has been rebuilt
+@param table the TABLE
+@param table_share the TABLE_SHARE
+@param prebuilt the prebuilt struct
+@param trx data dictionary transaction
+@retval true Failure
+@retval false Success */
+static __attribute__((nonnull, warn_unused_result))
+bool
+innobase_rename_columns(
+/*====================*/
+ Alter_inplace_info* ha_alter_info,
+ bool new_clustered,
+ const TABLE* table,
+ const TABLE_SHARE* table_share,
+ row_prebuilt_t* prebuilt,
+ trx_t* trx)
+{
+ List_iterator_fast<Create_field> cf_it(
+ ha_alter_info->alter_info->create_list);
+ uint i = 0;
+
+ for (Field** fp = table->field; *fp; fp++, i++) {
+ if (!((*fp)->flags & FIELD_IS_RENAMED)) {
+ continue;
+ }
+
+ cf_it.rewind();
+ while (Create_field* cf = cf_it++) {
+ if (cf->field == *fp) {
+ if (innobase_rename_column(
+ table_share,
+ prebuilt, trx, i,
+ cf->field->field_name,
+ cf->field_name, new_clustered)) {
+ return(true);
+ }
+ goto processed_field;
+ }
+ }
- do {
- rw_lock_x_lock(dict_index_get_lock(index));
- index->to_be_dropped = FALSE;
- rw_lock_x_unlock(dict_index_get_lock(index));
- index = dict_table_get_next_index(index);
- } while (index);
+ ut_error;
+processed_field:
+ continue;
}
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
- row_mysql_unlock_data_dictionary(trx);
+ return(false);
+}
- DBUG_RETURN(err);
+/** Undo the in-memory addition of foreign key on table->foreign_list
+and table->referenced_list.
+@param ctx saved alter table context
+@param table the foreign table */
+static __attribute__((nonnull))
+void
+innobase_undo_add_fk(
+/*=================*/
+ ha_innobase_inplace_ctx* ctx,
+ dict_table_t* fk_table)
+{
+ for (ulint i = 0; i < ctx->num_to_add_fk; i++) {
+ UT_LIST_REMOVE(
+ foreign_list,
+ fk_table->foreign_list,
+ ctx->add_fk[i]);
+
+ if (ctx->add_fk[i]->referenced_table) {
+ UT_LIST_REMOVE(
+ referenced_list,
+ ctx->add_fk[i]->referenced_table
+ ->referenced_list,
+ ctx->add_fk[i]);
+ }
+ }
}
-/*******************************************************************//**
-Drop the indexes that were passed to a successful prepare_drop_index().
-@return 0 or error number */
+/** Commit or rollback the changes made during
+prepare_inplace_alter_table() and inplace_alter_table() inside
+the storage engine. Note that the allowed level of concurrency
+during this operation will be the same as for
+inplace_alter_table() and thus might be higher than during
+prepare_inplace_alter_table(). (E.g concurrent writes were
+blocked during prepare, but might not be during commit).
+@param altered_table TABLE object for new version of table.
+@param ha_alter_info Structure describing changes to be done
+by ALTER TABLE and holding data used during in-place alter.
+@param commit true => Commit, false => Rollback.
+@retval true Failure
+@retval false Success
+*/
UNIV_INTERN
-int
-ha_innobase::final_drop_index(
-/*==========================*/
- TABLE* iin_table) /*!< in: Table where indexes
- are dropped */
+bool
+ha_innobase::commit_inplace_alter_table(
+/*====================================*/
+ TABLE* altered_table,
+ Alter_inplace_info* ha_alter_info,
+ bool commit)
{
- dict_index_t* index; /*!< Index to be dropped */
- trx_t* trx; /*!< Transaction */
- int err;
-
- DBUG_ENTER("ha_innobase::final_drop_index");
- ut_ad(table);
+ ha_innobase_inplace_ctx* ctx
+ = static_cast<ha_innobase_inplace_ctx*>
+ (ha_alter_info->handler_ctx);
+ trx_t* trx;
+ trx_t* fk_trx = NULL;
+ int err = 0;
+ bool new_clustered;
+ dict_table_t* fk_table = NULL;
+ ulonglong max_autoinc;
+
+ ut_ad(!srv_read_only_mode);
+
+ DBUG_ENTER("commit_inplace_alter_table");
+
+ DEBUG_SYNC_C("innodb_commit_inplace_alter_table_enter");
+
+ DEBUG_SYNC_C("innodb_commit_inplace_alter_table_wait");
+
+ if (!commit) {
+ /* A rollback is being requested. So far we may at
+ most have created some indexes. If any indexes were to
+ be dropped, they would actually be dropped in this
+ method if commit=true. */
+ DBUG_RETURN(rollback_inplace_alter_table(
+ ha_alter_info, table_share, prebuilt));
+ }
- if (srv_created_new_raw || srv_force_recovery) {
- DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+ if (!altered_table->found_next_number_field) {
+ /* There is no AUTO_INCREMENT column in the table
+ after the ALTER operation. */
+ max_autoinc = 0;
+ } else if (ctx && ctx->add_autoinc != ULINT_UNDEFINED) {
+ /* An AUTO_INCREMENT column was added. Get the last
+ value from the sequence, which may be based on a
+ supplied AUTO_INCREMENT value. */
+ max_autoinc = ctx->sequence.last();
+ } else if ((ha_alter_info->handler_flags
+ & Alter_inplace_info::CHANGE_CREATE_OPTION)
+ && (ha_alter_info->create_info->used_fields
+ & HA_CREATE_USED_AUTO)) {
+ /* An AUTO_INCREMENT value was supplied, but the table
+ was not rebuilt. Get the user-supplied value. */
+ max_autoinc = ha_alter_info->create_info->auto_increment_value;
+ } else {
+ /* An AUTO_INCREMENT value was not specified.
+ Read the old counter value from the table. */
+ ut_ad(table->found_next_number_field);
+ dict_table_autoinc_lock(prebuilt->table);
+ max_autoinc = dict_table_autoinc_read(prebuilt->table);
+ dict_table_autoinc_unlock(prebuilt->table);
}
- update_thd();
+ if (!(ha_alter_info->handler_flags & ~INNOBASE_INPLACE_IGNORE)) {
+ DBUG_ASSERT(!ctx);
+ /* We may want to update table attributes. */
+ goto func_exit;
+ }
- trx_search_latch_release_if_reserved(prebuilt->trx);
trx_start_if_not_started_xa(prebuilt->trx);
- /* Create a background transaction for the operations on
- the data dictionary tables. */
- trx = innobase_trx_allocate(user_thd);
- trx_start_if_not_started_xa(trx);
-
- /* Flag this transaction as a dictionary operation, so that
- the data dictionary will be locked in crash recovery. */
- trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
-
- /* Lock the table exclusively, to ensure that no active
- transaction depends on an index that is being dropped. */
- err = convert_error_code_to_mysql(
- row_merge_lock_table(prebuilt->trx, prebuilt->table, LOCK_X),
- prebuilt->table->flags, user_thd);
-
- /* Delete corresponding rows from the stats table.
- Marko advises not to edit both user tables and SYS_* tables in one
- trx, thus we use prebuilt->trx instead of trx. Because of this the
- drop from SYS_* and from the stats table cannot happen in one
- transaction and eventually if a crash occurs below, between
- trx_commit_for_mysql(trx); which drops the indexes from SYS_* and
- trx_commit_for_mysql(prebuilt->trx);
- then an orphaned rows will be left in the stats table. */
- for (index = dict_table_get_first_index(prebuilt->table);
- index != NULL;
- index = dict_table_get_next_index(index)) {
+ {
+ /* Exclusively lock the table, to ensure that no other
+ transaction is holding locks on the table while we
+ change the table definition. The MySQL meta-data lock
+ should normally guarantee that no conflicting locks
+ exist. However, FOREIGN KEY constraints checks and any
+ transactions collected during crash recovery could be
+ holding InnoDB locks only, not MySQL locks. */
+ dberr_t error = row_merge_lock_table(
+ prebuilt->trx, prebuilt->table, LOCK_X);
+
+ if (error != DB_SUCCESS) {
+ my_error_innodb(error, table_share->table_name.str, 0);
+ DBUG_RETURN(true);
+ }
- if (index->to_be_dropped) {
+ DEBUG_SYNC(user_thd, "innodb_alter_commit_after_lock_table");
+ }
- enum db_err ret;
- char errstr[1024];
+ if (ctx) {
+ if (ctx->indexed_table != prebuilt->table) {
+ for (dict_index_t* index = dict_table_get_first_index(
+ ctx->indexed_table);
+ index;
+ index = dict_table_get_next_index(index)) {
+ DBUG_ASSERT(dict_index_get_online_status(index)
+ == ONLINE_INDEX_COMPLETE);
+ DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX);
+ if (dict_index_is_corrupted(index)) {
+ my_error(ER_INDEX_CORRUPT, MYF(0),
+ index->name);
+ DBUG_RETURN(true);
+ }
+ }
+ } else {
+ for (ulint i = 0; i < ctx->num_to_add; i++) {
+ dict_index_t* index = ctx->add[i];
+ DBUG_ASSERT(dict_index_get_online_status(index)
+ == ONLINE_INDEX_COMPLETE);
+ DBUG_ASSERT(*index->name == TEMP_INDEX_PREFIX);
+ if (dict_index_is_corrupted(index)) {
+ /* Report a duplicate key
+ error for the index that was
+ flagged corrupted, most likely
+ because a duplicate value was
+ inserted (directly or by
+ rollback) after
+ ha_innobase::inplace_alter_table()
+ completed. */
+ my_error(ER_DUP_UNKNOWN_IN_INDEX,
+ MYF(0), index->name + 1);
+ DBUG_RETURN(true);
+ }
+ }
+ }
+ }
- ret = dict_stats_delete_index_stats(
- index, prebuilt->trx,
- errstr, sizeof(errstr));
+ if (!ctx || !ctx->trx) {
+ /* Create a background transaction for the operations on
+ the data dictionary tables. */
+ trx = innobase_trx_allocate(user_thd);
- if (ret != DB_SUCCESS) {
- push_warning(user_thd,
- Sql_condition::WARN_LEVEL_WARN,
- ER_LOCK_WAIT_TIMEOUT,
- errstr);
- }
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ new_clustered = false;
+ } else {
+ trx_dict_op_t op;
+
+ trx = ctx->trx;
+
+ new_clustered = ctx->indexed_table != prebuilt->table;
+
+ op = (new_clustered) ? TRX_DICT_OP_TABLE : TRX_DICT_OP_INDEX;
+
+ trx_start_for_ddl(trx, op);
+ }
+
+ if (new_clustered) {
+ if (prebuilt->table->fts) {
+ ut_ad(!prebuilt->table->fts->add_wq);
+ fts_optimize_remove_table(prebuilt->table);
+ }
+
+ if (ctx->indexed_table->fts) {
+ ut_ad(!ctx->indexed_table->fts->add_wq);
+ fts_optimize_remove_table(ctx->indexed_table);
}
}
+ /* Latch the InnoDB data dictionary exclusively so that no deadlocks
+ or lock waits can happen in it during the data dictionary operation. */
row_mysql_lock_data_dictionary(trx);
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
- if (UNIV_UNLIKELY(err)) {
+ /* Wait for background stats processing to stop using the
+ indexes that we are going to drop (if any). */
+ if (ctx) {
+ dict_stats_wait_bg_to_stop_using_tables(
+ prebuilt->table, ctx->indexed_table, trx);
+ }
- /* Unmark the indexes to be dropped. */
- for (index = dict_table_get_first_index(prebuilt->table);
- index; index = dict_table_get_next_index(index)) {
+ /* Final phase of add foreign key processing */
+ if (ctx && ctx->num_to_add_fk > 0) {
+ ulint highest_id_so_far;
+ dberr_t error;
+
+ /* If it runs concurrently with create index or table
+ rebuild, we will need a separate trx to do the system
+ table change, since in the case of failure to rebuild/create
+ index, it will need to commit the trx that drops the newly
+ created table/index, while for FK, it needs to rollback
+ the metadata change */
+ if (new_clustered || ctx->num_to_add) {
+ fk_trx = innobase_trx_allocate(user_thd);
- rw_lock_x_lock(dict_index_get_lock(index));
- index->to_be_dropped = FALSE;
- rw_lock_x_unlock(dict_index_get_lock(index));
+ trx_start_for_ddl(fk_trx, TRX_DICT_OP_INDEX);
+
+ fk_trx->dict_operation_lock_mode =
+ trx->dict_operation_lock_mode;
+ } else {
+ fk_trx = trx;
}
- goto func_exit;
+ ut_ad(ha_alter_info->handler_flags
+ & Alter_inplace_info::ADD_FOREIGN_KEY);
+
+ highest_id_so_far = dict_table_get_highest_foreign_id(
+ prebuilt->table);
+
+ highest_id_so_far++;
+
+ fk_table = ctx->indexed_table;
+
+ for (ulint i = 0; i < ctx->num_to_add_fk; i++) {
+
+ /* Get the new dict_table_t */
+ if (new_clustered) {
+ ctx->add_fk[i]->foreign_table
+ = fk_table;
+ }
+
+ /* Add Foreign Key info to in-memory metadata */
+ UT_LIST_ADD_LAST(foreign_list,
+ fk_table->foreign_list,
+ ctx->add_fk[i]);
+
+ if (ctx->add_fk[i]->referenced_table) {
+ UT_LIST_ADD_LAST(
+ referenced_list,
+ ctx->add_fk[i]->referenced_table->referenced_list,
+ ctx->add_fk[i]);
+ }
+
+ if (!ctx->add_fk[i]->foreign_index) {
+ ctx->add_fk[i]->foreign_index
+ = dict_foreign_find_index(
+ fk_table,
+ ctx->add_fk[i]->foreign_col_names,
+ ctx->add_fk[i]->n_fields, NULL,
+ TRUE, FALSE);
+
+ ut_ad(ctx->add_fk[i]->foreign_index);
+
+ if (!innobase_check_fk_option(
+ ctx->add_fk[i])) {
+ my_error(ER_FK_INCORRECT_OPTION,
+ MYF(0),
+ table_share->table_name.str,
+ ctx->add_fk[i]->id);
+ goto undo_add_fk;
+ }
+ }
+
+ /* System table change */
+ error = dict_create_add_foreign_to_dictionary(
+ &highest_id_so_far, prebuilt->table,
+ ctx->add_fk[i], fk_trx);
+
+ DBUG_EXECUTE_IF(
+ "innodb_test_cannot_add_fk_system",
+ error = DB_ERROR;);
+
+ if (error != DB_SUCCESS) {
+ my_error(ER_FK_FAIL_ADD_SYSTEM, MYF(0),
+ ctx->add_fk[i]->id);
+ goto undo_add_fk;
+ }
+ }
+
+ /* Make sure the tables are moved to non-lru side of
+ dictionary list */
+ error = dict_load_foreigns(prebuilt->table->name, FALSE, TRUE);
+
+ if (error != DB_SUCCESS) {
+ my_error(ER_CANNOT_ADD_FOREIGN, MYF(0));
+
+undo_add_fk:
+ err = -1;
+
+ if (new_clustered) {
+ goto drop_new_clustered;
+ } else if (ctx->num_to_add > 0) {
+ ut_ad(trx != fk_trx);
+
+ innobase_rollback_sec_index(
+ prebuilt, table_share, trx);
+ innobase_undo_add_fk(ctx, fk_table);
+ trx_rollback_for_mysql(fk_trx);
+
+ goto trx_commit;
+ } else {
+ goto trx_rollback;
+ }
+ }
+ }
+
+ if (new_clustered) {
+ dberr_t error;
+ char* tmp_name;
+
+ /* Clear the to_be_dropped flag in the data dictionary. */
+ for (ulint i = 0; i < ctx->num_to_drop; i++) {
+ dict_index_t* index = ctx->drop[i];
+ DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX);
+ DBUG_ASSERT(index->to_be_dropped);
+ index->to_be_dropped = 0;
+ }
+
+ /* We copied the table. Any indexes that were
+ requested to be dropped were not created in the copy
+ of the table. Apply any last bit of the rebuild log
+ and then rename the tables. */
+
+ if (ctx->online) {
+ DEBUG_SYNC_C("row_log_table_apply2_before");
+ error = row_log_table_apply(
+ ctx->thr, prebuilt->table, altered_table);
+
+ switch (error) {
+ KEY* dup_key;
+ case DB_SUCCESS:
+ break;
+ case DB_DUPLICATE_KEY:
+ if (prebuilt->trx->error_key_num
+ == ULINT_UNDEFINED) {
+ /* This should be the hidden index on
+ FTS_DOC_ID. */
+ dup_key = NULL;
+ } else {
+ DBUG_ASSERT(
+ prebuilt->trx->error_key_num
+ < ha_alter_info->key_count);
+ dup_key = &ha_alter_info
+ ->key_info_buffer[
+ prebuilt->trx
+ ->error_key_num];
+ }
+ print_keydup_error(altered_table, dup_key, MYF(0));
+ break;
+ case DB_ONLINE_LOG_TOO_BIG:
+ my_error(ER_INNODB_ONLINE_LOG_TOO_BIG, MYF(0),
+ ha_alter_info->key_info_buffer[0]
+ .name);
+ break;
+ case DB_INDEX_CORRUPT:
+ my_error(ER_INDEX_CORRUPT, MYF(0),
+ (prebuilt->trx->error_key_num
+ == ULINT_UNDEFINED)
+ ? FTS_DOC_ID_INDEX_NAME
+ : ha_alter_info->key_info_buffer[
+ prebuilt->trx->error_key_num]
+ .name);
+ break;
+ default:
+ my_error_innodb(error,
+ table_share->table_name.str,
+ prebuilt->table->flags);
+ }
+
+ if (error != DB_SUCCESS) {
+ err = -1;
+ goto drop_new_clustered;
+ }
+ }
+
+ if ((ha_alter_info->handler_flags
+ & Alter_inplace_info::ALTER_COLUMN_NAME)
+ && innobase_rename_columns(ha_alter_info, true, table,
+ table_share, prebuilt, trx)) {
+ err = -1;
+ goto drop_new_clustered;
+ }
+
+ /* A new clustered index was defined for the table
+ and there was no error at this point. We can
+ now rename the old table as a temporary table,
+ rename the new temporary table as the old
+ table and drop the old table. */
+ tmp_name = dict_mem_create_temporary_tablename(
+ ctx->heap, ctx->indexed_table->name,
+ ctx->indexed_table->id);
+
+ /* Rename table will reload and refresh the in-memory
+ foreign key constraint metadata. This is a rename operation
+ in preparing for dropping the old table. Set the table
+ to_be_dropped bit here, so to make sure DML foreign key
+ constraint check does not use the stale dict_foreign_t.
+ This is done because WL#6049 (FK MDL) has not been
+ implemented yet */
+ prebuilt->table->to_be_dropped = true;
+
+ DBUG_EXECUTE_IF("ib_ddl_crash_before_rename",
+ DBUG_SUICIDE(););
+
+ /* The new table must inherit the flag from the
+ "parent" table. */
+ if (dict_table_is_discarded(prebuilt->table)) {
+ ctx->indexed_table->ibd_file_missing = true;
+ ctx->indexed_table->flags2 |= DICT_TF2_DISCARDED;
+ }
+
+ error = row_merge_rename_tables(
+ prebuilt->table, ctx->indexed_table,
+ tmp_name, trx);
+
+ DBUG_EXECUTE_IF("ib_ddl_crash_after_rename",
+ DBUG_SUICIDE(););
+
+ /* n_ref_count must be 1, because purge cannot
+ be executing on this very table as we are
+ holding dict_operation_lock X-latch. */
+ ut_a(prebuilt->table->n_ref_count == 1);
+
+ switch (error) {
+ dict_table_t* old_table;
+ case DB_SUCCESS:
+ old_table = prebuilt->table;
+
+ DBUG_EXECUTE_IF("ib_ddl_crash_before_commit",
+ DBUG_SUICIDE(););
+
+ trx_commit_for_mysql(prebuilt->trx);
+
+ DBUG_EXECUTE_IF("ib_ddl_crash_after_commit",
+ DBUG_SUICIDE(););
+
+ if (fk_trx) {
+ ut_ad(fk_trx != trx);
+ trx_commit_for_mysql(fk_trx);
+ }
+
+ row_prebuilt_free(prebuilt, TRUE);
+ error = row_merge_drop_table(trx, old_table);
+ prebuilt = row_create_prebuilt(
+ ctx->indexed_table, table->s->reclength);
+ err = 0;
+ break;
+ case DB_TABLESPACE_EXISTS:
+ ut_a(ctx->indexed_table->n_ref_count == 1);
+ my_error(ER_TABLESPACE_EXISTS, MYF(0), tmp_name);
+ err = HA_ERR_TABLESPACE_EXISTS;
+ goto drop_new_clustered;
+ case DB_DUPLICATE_KEY:
+ ut_a(ctx->indexed_table->n_ref_count == 1);
+ my_error(ER_TABLE_EXISTS_ERROR, MYF(0), tmp_name);
+ err = HA_ERR_TABLE_EXIST;
+ goto drop_new_clustered;
+ default:
+ my_error_innodb(error,
+ table_share->table_name.str,
+ prebuilt->table->flags);
+ err = -1;
+
+drop_new_clustered:
+ /* Reset the to_be_dropped bit for the old table,
+ since we are aborting the operation and dropping
+ the new table due to some error conditions */
+ prebuilt->table->to_be_dropped = false;
+
+ /* Need to drop the added foreign key first */
+ if (fk_trx) {
+ ut_ad(fk_trx != trx);
+ innobase_undo_add_fk(ctx, fk_table);
+ trx_rollback_for_mysql(fk_trx);
+ }
+
+ dict_table_close(ctx->indexed_table, TRUE, FALSE);
+
+#ifdef UNIV_DDL_DEBUG
+ /* Nobody should have initialized the stats of the
+ newly created table yet. When this is the case, we
+ know that it has not been added for background stats
+ gathering. */
+ ut_a(!ctx->indexed_table->stat_initialized);
+#endif /* UNIV_DDL_DEBUG */
+
+ row_merge_drop_table(trx, ctx->indexed_table);
+ ctx->indexed_table = NULL;
+ goto trx_commit;
+ }
+ } else if (ctx) {
+ dberr_t error;
+
+ /* We altered the table in place. */
+ /* Lose the TEMP_INDEX_PREFIX. */
+ for (ulint i = 0; i < ctx->num_to_add; i++) {
+ dict_index_t* index = ctx->add[i];
+ DBUG_ASSERT(dict_index_get_online_status(index)
+ == ONLINE_INDEX_COMPLETE);
+ DBUG_ASSERT(*index->name
+ == TEMP_INDEX_PREFIX);
+ index->name++;
+ error = row_merge_rename_index_to_add(
+ trx, prebuilt->table->id,
+ index->id);
+ if (error != DB_SUCCESS) {
+ sql_print_error(
+ "InnoDB: rename index to add: %lu\n",
+ (ulong) error);
+ DBUG_ASSERT(0);
+ }
+ }
+
+ /* Drop any indexes that were requested to be dropped.
+ Rename them to TEMP_INDEX_PREFIX in the data
+ dictionary first. We do not bother to rename
+ index->name in the dictionary cache, because the index
+ is about to be freed after row_merge_drop_indexes_dict(). */
+
+ for (ulint i = 0; i < ctx->num_to_drop; i++) {
+ dict_index_t* index = ctx->drop[i];
+ DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX);
+ DBUG_ASSERT(index->table == prebuilt->table);
+ DBUG_ASSERT(index->to_be_dropped);
+
+ error = row_merge_rename_index_to_drop(
+ trx, index->table->id, index->id);
+ if (error != DB_SUCCESS) {
+ sql_print_error(
+ "InnoDB: rename index to drop: %lu\n",
+ (ulong) error);
+ DBUG_ASSERT(0);
+ }
+ }
+ }
+
+ if (err == 0
+ && (ha_alter_info->handler_flags
+ & Alter_inplace_info::DROP_FOREIGN_KEY)) {
+ DBUG_ASSERT(ctx->num_to_drop_fk > 0);
+ DBUG_ASSERT(ctx->num_to_drop_fk
+ == ha_alter_info->alter_info->drop_list.elements);
+ for (ulint i = 0; i < ctx->num_to_drop_fk; i++) {
+ DBUG_ASSERT(prebuilt->table
+ == ctx->drop_fk[i]->foreign_table);
+
+ if (innobase_drop_foreign(
+ table_share, trx, ctx->drop_fk[i])) {
+ err = -1;
+ }
+ }
+ }
+
+ if (err == 0 && !new_clustered
+ && (ha_alter_info->handler_flags
+ & Alter_inplace_info::ALTER_COLUMN_NAME)
+ && innobase_rename_columns(ha_alter_info, false, table,
+ table_share, prebuilt, trx)) {
+ err = -1;
}
- /* Drop indexes marked to be dropped */
+ if (err == 0) {
+ if (fk_trx && fk_trx != trx) {
+ /* This needs to be placed before "trx_commit" marker,
+ since anyone called "goto trx_commit" has committed
+ or rolled back fk_trx before jumping here */
+ trx_commit_for_mysql(fk_trx);
+ }
+trx_commit:
+ trx_commit_for_mysql(trx);
+ } else {
+trx_rollback:
+ /* undo the addition of foreign key */
+ if (fk_trx) {
+ innobase_undo_add_fk(ctx, fk_table);
- index = dict_table_get_first_index(prebuilt->table);
+ if (fk_trx != trx) {
+ trx_rollback_for_mysql(fk_trx);
+ }
+ }
- while (index) {
- dict_index_t* next_index;
+ trx_rollback_for_mysql(trx);
+
+ /* If there are newly added secondary indexes, above
+ rollback will revert the rename operation and put the
+ new indexes with the temp index prefix, we can drop
+ them here */
+ if (ctx && !new_clustered) {
+ ulint i;
+
+ /* Need to drop the in-memory dict_index_t first
+ to avoid dict_table_check_for_dup_indexes()
+ assertion in row_merge_drop_indexes() in the case
+ of add and drop the same index */
+ for (i = 0; i < ctx->num_to_add; i++) {
+ dict_index_t* index = ctx->add[i];
+ dict_index_remove_from_cache(
+ prebuilt->table, index);
+ }
- next_index = dict_table_get_next_index(index);
+ if (ctx->num_to_add) {
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+ row_merge_drop_indexes(trx, prebuilt->table,
+ FALSE);
+ trx_commit_for_mysql(trx);
+ }
- if (index->to_be_dropped) {
- row_merge_drop_index(index, prebuilt->table, trx);
+ for (i = 0; i < ctx->num_to_drop; i++) {
+ dict_index_t* index = ctx->drop[i];
+ index->to_be_dropped = false;
+ }
}
+ }
- index = next_index;
+ /* Flush the log to reduce probability that the .frm files and
+ the InnoDB data dictionary get out-of-sync if the user runs
+ with innodb_flush_log_at_trx_commit = 0 */
+
+ log_buffer_flush_to_disk();
+
+ if (new_clustered) {
+ innobase_online_rebuild_log_free(prebuilt->table);
}
- /* Check that all flagged indexes were dropped. */
- for (index = dict_table_get_first_index(prebuilt->table);
- index; index = dict_table_get_next_index(index)) {
- ut_a(!index->to_be_dropped);
+ if (err == 0 && ctx) {
+ /* The changes were successfully performed. */
+ bool add_fts = false;
+
+ /* Rebuild the index translation table.
+ This should only be needed when !new_clustered. */
+ share->idx_trans_tbl.index_count = 0;
+
+ /* Publish the created fulltext index, if any.
+ Note that a fulltext index can be created without
+ creating the clustered index, if there already exists
+ a suitable FTS_DOC_ID column. If not, one will be
+ created, implying new_clustered */
+ for (ulint i = 0; i < ctx->num_to_add; i++) {
+ dict_index_t* index = ctx->add[i];
+
+ if (index->type & DICT_FTS) {
+ DBUG_ASSERT(index->type == DICT_FTS);
+ fts_add_index(index, prebuilt->table);
+ add_fts = true;
+ }
+ }
+
+ if (!new_clustered && ha_alter_info->index_drop_count) {
+
+ /* Really drop the indexes that were dropped.
+ The transaction had to be committed first
+ (after renaming the indexes), so that in the
+ event of a crash, crash recovery will drop the
+ indexes, because it drops all indexes whose
+ names start with TEMP_INDEX_PREFIX. Once we
+ have started dropping an index tree, there is
+ no way to roll it back. */
+
+ trx_start_for_ddl(trx, TRX_DICT_OP_INDEX);
+
+ for (ulint i = 0; i < ctx->num_to_drop; i++) {
+ dict_index_t* index = ctx->drop[i];
+ DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX);
+ DBUG_ASSERT(index->table == prebuilt->table);
+ DBUG_ASSERT(index->to_be_dropped);
+
+ /* Replace the indexes in foreign key
+ constraints if needed. */
+
+ dict_foreign_replace_index(
+ prebuilt->table, index, prebuilt->trx);
+
+ /* Mark the index dropped
+ in the data dictionary cache. */
+ rw_lock_x_lock(dict_index_get_lock(index));
+ index->page = FIL_NULL;
+ rw_lock_x_unlock(dict_index_get_lock(index));
+ }
+
+ row_merge_drop_indexes_dict(trx, prebuilt->table->id);
+
+ for (ulint i = 0; i < ctx->num_to_drop; i++) {
+ dict_index_t* index = ctx->drop[i];
+ DBUG_ASSERT(*index->name != TEMP_INDEX_PREFIX);
+ DBUG_ASSERT(index->table == prebuilt->table);
+
+ if (index->type & DICT_FTS) {
+ DBUG_ASSERT(index->type == DICT_FTS
+ || (index->type
+ & DICT_CORRUPT));
+ DBUG_ASSERT(prebuilt->table->fts);
+ fts_drop_index(
+ prebuilt->table, index, trx);
+ }
+
+ dict_index_remove_from_cache(
+ prebuilt->table, index);
+ }
+
+ trx_commit_for_mysql(trx);
+ }
+
+ ut_d(dict_table_check_for_dup_indexes(
+ prebuilt->table, CHECK_ALL_COMPLETE));
+ DBUG_ASSERT(new_clustered == !prebuilt->trx);
+
+ if (add_fts) {
+ fts_optimize_add_table(prebuilt->table);
+ }
}
- /* We will need to rebuild index translation table. Set
- valid index entry count in the translation table to zero */
- share->idx_trans_tbl.index_count = 0;
+ if (!prebuilt->trx) {
+ /* We created a new clustered index and committed the
+ user transaction already, so that we were able to
+ drop the old table. */
+ update_thd();
+ prebuilt->trx->will_lock++;
-func_exit:
- ut_d(dict_table_check_for_dup_indexes(prebuilt->table, TRUE));
+ DBUG_EXECUTE_IF("ib_ddl_crash_after_user_trx_commit",
+ DBUG_SUICIDE(););
+
+ trx_start_if_not_started_xa(prebuilt->trx);
+ }
+ ut_d(dict_table_check_for_dup_indexes(
+ prebuilt->table, CHECK_ABORTED_OK));
ut_a(fts_check_cached_index(prebuilt->table));
+ row_mysql_unlock_data_dictionary(trx);
+ if (fk_trx && fk_trx != trx) {
+ fk_trx->dict_operation_lock_mode = 0;
+ trx_free_for_mysql(fk_trx);
+ }
+ trx_free_for_mysql(trx);
+
+ if (ctx && trx == ctx->trx) {
+ ctx->trx = NULL;
+ }
+
+ if (err == 0) {
+ /* Delete corresponding rows from the stats table. We update
+ the statistics in a separate transaction from trx, because
+ lock waits are not allowed in a data dictionary transaction.
+ (Lock waits are possible on the statistics table, because it
+ is directly accessible by users, not covered by the
+ dict_operation_lock.)
+
+ Because the data dictionary changes were already committed,
+ orphaned rows may be left in the statistics table if the
+ system crashes. */
+
+ for (uint i = 0; i < ha_alter_info->index_drop_count; i++) {
+ const KEY* key
+ = ha_alter_info->index_drop_buffer[i];
+ dberr_t ret;
+ char errstr[1024];
+
+ ret = dict_stats_drop_index(
+ prebuilt->table->name, key->name,
+ errstr, sizeof(errstr));
+
+ if (ret != DB_SUCCESS) {
+ push_warning(user_thd,
+ Sql_condition::WARN_LEVEL_WARN,
+ ER_LOCK_WAIT_TIMEOUT,
+ errstr);
+ }
+ }
+
+ if (ctx && !dict_table_is_discarded(prebuilt->table)) {
+ bool stats_init_called = false;
+
+ for (uint i = 0; i < ctx->num_to_add; i++) {
+ dict_index_t* index = ctx->add[i];
+
+ if (!(index->type & DICT_FTS)) {
+
+ if (!stats_init_called) {
+ innobase_copy_frm_flags_from_table_share(
+ index->table,
+ altered_table->s);
+
+ dict_stats_init(index->table);
+
+ stats_init_called = true;
+ }
+
+ dict_stats_update_for_index(index);
+ }
+ }
+ }
+ }
- trx_commit_for_mysql(trx);
trx_commit_for_mysql(prebuilt->trx);
- row_mysql_unlock_data_dictionary(trx);
/* Flush the log to reduce probability that the .frm files and
the InnoDB data dictionary get out-of-sync if the user runs
@@ -1910,12 +5213,106 @@ func_exit:
log_buffer_flush_to_disk();
- trx_free_for_mysql(trx);
-
/* Tell the InnoDB server that there might be work for
utility threads: */
srv_active_wake_master_thread();
- DBUG_RETURN(err);
+func_exit:
+
+ if (err == 0 && altered_table->found_next_number_field != 0) {
+ dict_table_autoinc_lock(prebuilt->table);
+ dict_table_autoinc_initialize(prebuilt->table, max_autoinc);
+ dict_table_autoinc_unlock(prebuilt->table);
+ }
+
+#ifndef DBUG_OFF
+ dict_index_t* clust_index = dict_table_get_first_index(
+ prebuilt->table);
+ DBUG_ASSERT(!clust_index->online_log);
+ DBUG_ASSERT(dict_index_get_online_status(clust_index)
+ == ONLINE_INDEX_COMPLETE);
+#endif /* !DBUG_OFF */
+
+#ifdef UNIV_DEBUG
+ for (dict_index_t* index = dict_table_get_first_index(
+ prebuilt->table);
+ index;
+ index = dict_table_get_next_index(index)) {
+ ut_ad(!index->to_be_dropped);
+ }
+#endif /* UNIV_DEBUG */
+
+ if (err == 0) {
+ MONITOR_ATOMIC_DEC(MONITOR_PENDING_ALTER_TABLE);
+
+#ifdef UNIV_DDL_DEBUG
+ /* Invoke CHECK TABLE atomically after a successful
+ ALTER TABLE. */
+ TABLE* old_table = table;
+ table = altered_table;
+ ut_a(check(user_thd, 0) == HA_ADMIN_OK);
+ table = old_table;
+#endif /* UNIV_DDL_DEBUG */
+ }
+
+ DBUG_RETURN(err != 0);
+}
+
+/**
+@param thd - the session
+@param start_value - the lower bound
+@param max_value - the upper bound (inclusive) */
+ib_sequence_t::ib_sequence_t(
+ THD* thd,
+ ulonglong start_value,
+ ulonglong max_value)
+ :
+ m_max_value(max_value),
+ m_increment(0),
+ m_offset(0),
+ m_next_value(start_value),
+ m_eof(false)
+{
+ if (thd != 0 && m_max_value > 0) {
+
+ thd_get_autoinc(thd, &m_offset, &m_increment);
+
+ if (m_increment > 1 || m_offset > 1) {
+
+ /* If there is an offset or increment specified
+ then we need to work out the exact next value. */
+
+ m_next_value = innobase_next_autoinc(
+ start_value, 1,
+ m_increment, m_offset, m_max_value);
+
+ } else if (start_value == 0) {
+ /* The next value can never be 0. */
+ m_next_value = 1;
+ }
+ } else {
+ m_eof = true;
+ }
+}
+
+/**
+Postfix increment
+@return the next value to insert */
+ulonglong
+ib_sequence_t::operator++(int) UNIV_NOTHROW
+{
+ ulonglong current = m_next_value;
+
+ ut_ad(!m_eof);
+ ut_ad(m_max_value > 0);
+
+ m_next_value = innobase_next_autoinc(
+ current, 1, m_increment, m_offset, m_max_value);
+
+ if (m_next_value == m_max_value && current == m_next_value) {
+ m_eof = true;
+ }
+
+ return(current);
}
diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc
index 882f5040a38..25c0793b445 100644
--- a/storage/innobase/handler/i_s.cc
+++ b/storage/innobase/handler/i_s.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,6 +39,7 @@ Created July 18, 2007 Vasil Dimov
#include "btr0types.h"
#include "buf0buddy.h" /* for i_s_cmpmem */
#include "buf0buf.h" /* for buf_pool */
+#include "dict0dict.h" /* for dict_table_stats_lock() */
#include "dict0load.h" /* for file sys_tables related info. */
#include "dict0mem.h"
#include "dict0types.h"
@@ -57,14 +58,12 @@ Created July 18, 2007 Vasil Dimov
/** structure associates a name string with a file page type and/or buffer
page state. */
-struct buffer_page_desc_str_struct{
+struct buf_page_desc_t{
const char* type_str; /*!< String explain the page
type/state */
ulint type_value; /*!< Page type or page state */
};
-typedef struct buffer_page_desc_str_struct buf_page_desc_str_t;
-
/** Any states greater than FIL_PAGE_TYPE_LAST would be treated as unknown. */
#define I_S_PAGE_TYPE_UNKNOWN (FIL_PAGE_TYPE_LAST + 1)
@@ -73,7 +72,7 @@ in i_s_page_type[] array */
#define I_S_PAGE_TYPE_INDEX 1
/** Name string for File Page Types */
-static buf_page_desc_str_t i_s_page_type[] = {
+static buf_page_desc_t i_s_page_type[] = {
{"ALLOCATED", FIL_PAGE_TYPE_ALLOCATED},
{"INDEX", FIL_PAGE_INDEX},
{"UNDO_LOG", FIL_PAGE_UNDO_LOG},
@@ -98,7 +97,7 @@ static buf_page_desc_str_t i_s_page_type[] = {
/** This structure defines information we will fetch from pages
currently cached in the buffer pool. It will be used to populate
table INFORMATION_SCHEMA.INNODB_BUFFER_PAGE */
-struct buffer_page_info_struct{
+struct buf_page_info_t{
ulint block_id; /*!< Buffer Pool block ID */
unsigned space_id:32; /*!< Tablespace ID */
unsigned page_num:32; /*!< Page number/offset */
@@ -131,8 +130,6 @@ struct buffer_page_info_struct{
index_id_t index_id; /*!< Index ID if a index page */
};
-typedef struct buffer_page_info_struct buf_page_info_t;
-
/** maximum number of buffer page info we would cache. */
#define MAX_BUF_INFO_CACHED 10000
@@ -282,6 +279,43 @@ field_store_string(
}
/*******************************************************************//**
+Store the name of an index in a MYSQL_TYPE_VARCHAR field.
+Handles the names of incomplete secondary indexes.
+@return 0 on success */
+static
+int
+field_store_index_name(
+/*===================*/
+ Field* field, /*!< in/out: target field for
+ storage */
+ const char* index_name) /*!< in: NUL-terminated utf-8
+ index name, possibly starting with
+ TEMP_INDEX_PREFIX */
+{
+ int ret;
+
+ ut_ad(index_name != NULL);
+ ut_ad(field->real_type() == MYSQL_TYPE_VARCHAR);
+
+ /* Since TEMP_INDEX_PREFIX is not a valid UTF8, we need to convert
+ it to something else. */
+ if (index_name[0] == TEMP_INDEX_PREFIX) {
+ char buf[NAME_LEN + 1];
+ buf[0] = '?';
+ memcpy(buf + 1, index_name + 1, strlen(index_name));
+ ret = field->store(buf, strlen(buf),
+ system_charset_info);
+ } else {
+ ret = field->store(index_name, strlen(index_name),
+ system_charset_info);
+ }
+
+ field->set_notnull();
+
+ return(ret);
+}
+
+/*******************************************************************//**
Auxiliary function to store ulint value in MYSQL_TYPE_LONGLONG field.
If the value is ULINT_UNDEFINED then the field it set to NULL.
@return 0 on success */
@@ -923,16 +957,9 @@ fill_innodb_locks_from_cache(
/* lock_index */
if (row->lock_index != NULL) {
-
- bufend = innobase_convert_name(buf, sizeof(buf),
- row->lock_index,
- strlen(row->lock_index),
- thd, FALSE);
- OK(fields[IDX_LOCK_INDEX]->store(buf, bufend - buf,
- system_charset_info));
- fields[IDX_LOCK_INDEX]->set_notnull();
+ OK(field_store_index_name(fields[IDX_LOCK_INDEX],
+ row->lock_index));
} else {
-
fields[IDX_LOCK_INDEX]->set_null();
}
@@ -1594,6 +1621,356 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_cmp_reset =
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
+/* Fields of the dynamic tables
+information_schema.innodb_cmp_per_index and
+information_schema.innodb_cmp_per_index_reset. */
+static ST_FIELD_INFO i_s_cmp_per_index_fields_info[] =
+{
+#define IDX_DATABASE_NAME 0
+ {STRUCT_FLD(field_name, "database_name"),
+ STRUCT_FLD(field_length, 192),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define IDX_TABLE_NAME 1
+ {STRUCT_FLD(field_name, "table_name"),
+ STRUCT_FLD(field_length, 192),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define IDX_INDEX_NAME 2
+ {STRUCT_FLD(field_name, "index_name"),
+ STRUCT_FLD(field_length, 192),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define IDX_COMPRESS_OPS 3
+ {STRUCT_FLD(field_name, "compress_ops"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define IDX_COMPRESS_OPS_OK 4
+ {STRUCT_FLD(field_name, "compress_ops_ok"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define IDX_COMPRESS_TIME 5
+ {STRUCT_FLD(field_name, "compress_time"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define IDX_UNCOMPRESS_OPS 6
+ {STRUCT_FLD(field_name, "uncompress_ops"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define IDX_UNCOMPRESS_TIME 7
+ {STRUCT_FLD(field_name, "uncompress_time"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+ END_OF_ST_FIELD_INFO
+};
+
+/*******************************************************************//**
+Fill the dynamic table
+information_schema.innodb_cmp_per_index or
+information_schema.innodb_cmp_per_index_reset.
+@return 0 on success, 1 on failure */
+static
+int
+i_s_cmp_per_index_fill_low(
+/*=======================*/
+ THD* thd, /*!< in: thread */
+ TABLE_LIST* tables, /*!< in/out: tables to fill */
+ Item* , /*!< in: condition (ignored) */
+ ibool reset) /*!< in: TRUE=reset cumulated counts */
+{
+ TABLE* table = tables->table;
+ Field** fields = table->field;
+ int status = 0;
+
+ DBUG_ENTER("i_s_cmp_per_index_fill_low");
+
+ /* deny access to non-superusers */
+ if (check_global_access(thd, PROCESS_ACL)) {
+
+ DBUG_RETURN(0);
+ }
+
+ RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name);
+
+ /* Create a snapshot of the stats so we do not bump into lock
+ order violations with dict_sys->mutex below. */
+ mutex_enter(&page_zip_stat_per_index_mutex);
+ page_zip_stat_per_index_t snap (page_zip_stat_per_index);
+ mutex_exit(&page_zip_stat_per_index_mutex);
+
+ mutex_enter(&dict_sys->mutex);
+
+ page_zip_stat_per_index_t::iterator iter;
+ ulint i;
+
+ for (iter = snap.begin(), i = 0; iter != snap.end(); iter++, i++) {
+
+ char name[192];
+ dict_index_t* index = dict_index_find_on_id_low(iter->first);
+
+ if (index != NULL) {
+ char db_utf8[MAX_DB_UTF8_LEN];
+ char table_utf8[MAX_TABLE_UTF8_LEN];
+
+ dict_fs2utf8(index->table_name,
+ db_utf8, sizeof(db_utf8),
+ table_utf8, sizeof(table_utf8));
+
+ field_store_string(fields[IDX_DATABASE_NAME], db_utf8);
+ field_store_string(fields[IDX_TABLE_NAME], table_utf8);
+ field_store_index_name(fields[IDX_INDEX_NAME],
+ index->name);
+ } else {
+ /* index not found */
+ ut_snprintf(name, sizeof(name),
+ "index_id:" IB_ID_FMT, iter->first);
+ field_store_string(fields[IDX_DATABASE_NAME],
+ "unknown");
+ field_store_string(fields[IDX_TABLE_NAME],
+ "unknown");
+ field_store_string(fields[IDX_INDEX_NAME],
+ name);
+ }
+
+ fields[IDX_COMPRESS_OPS]->store(
+ iter->second.compressed);
+
+ fields[IDX_COMPRESS_OPS_OK]->store(
+ iter->second.compressed_ok);
+
+ fields[IDX_COMPRESS_TIME]->store(
+ (long) (iter->second.compressed_usec / 1000000));
+
+ fields[IDX_UNCOMPRESS_OPS]->store(
+ iter->second.decompressed);
+
+ fields[IDX_UNCOMPRESS_TIME]->store(
+ (long) (iter->second.decompressed_usec / 1000000));
+
+ if (schema_table_store_record(thd, table)) {
+ status = 1;
+ break;
+ }
+
+ /* Release and reacquire the dict mutex to allow other
+ threads to proceed. This could eventually result in the
+ contents of INFORMATION_SCHEMA.innodb_cmp_per_index being
+ inconsistent, but it is an acceptable compromise. */
+ if (i % 1000 == 0) {
+ mutex_exit(&dict_sys->mutex);
+ mutex_enter(&dict_sys->mutex);
+ }
+ }
+
+ mutex_exit(&dict_sys->mutex);
+
+ if (reset) {
+ page_zip_reset_stat_per_index();
+ }
+
+ DBUG_RETURN(status);
+}
+
+/*******************************************************************//**
+Fill the dynamic table information_schema.innodb_cmp_per_index.
+@return 0 on success, 1 on failure */
+static
+int
+i_s_cmp_per_index_fill(
+/*===================*/
+ THD* thd, /*!< in: thread */
+ TABLE_LIST* tables, /*!< in/out: tables to fill */
+ Item* cond) /*!< in: condition (ignored) */
+{
+ return(i_s_cmp_per_index_fill_low(thd, tables, cond, FALSE));
+}
+
+/*******************************************************************//**
+Fill the dynamic table information_schema.innodb_cmp_per_index_reset.
+@return 0 on success, 1 on failure */
+static
+int
+i_s_cmp_per_index_reset_fill(
+/*=========================*/
+ THD* thd, /*!< in: thread */
+ TABLE_LIST* tables, /*!< in/out: tables to fill */
+ Item* cond) /*!< in: condition (ignored) */
+{
+ return(i_s_cmp_per_index_fill_low(thd, tables, cond, TRUE));
+}
+
+/*******************************************************************//**
+Bind the dynamic table information_schema.innodb_cmp_per_index.
+@return 0 on success */
+static
+int
+i_s_cmp_per_index_init(
+/*===================*/
+ void* p) /*!< in/out: table schema object */
+{
+ DBUG_ENTER("i_s_cmp_init");
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info = i_s_cmp_per_index_fields_info;
+ schema->fill_table = i_s_cmp_per_index_fill;
+
+ DBUG_RETURN(0);
+}
+
+/*******************************************************************//**
+Bind the dynamic table information_schema.innodb_cmp_per_index_reset.
+@return 0 on success */
+static
+int
+i_s_cmp_per_index_reset_init(
+/*=========================*/
+ void* p) /*!< in/out: table schema object */
+{
+ DBUG_ENTER("i_s_cmp_reset_init");
+ ST_SCHEMA_TABLE* schema = (ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info = i_s_cmp_per_index_fields_info;
+ schema->fill_table = i_s_cmp_per_index_reset_fill;
+
+ DBUG_RETURN(0);
+}
+
+UNIV_INTERN struct st_maria_plugin i_s_innodb_cmp_per_index =
+{
+ /* the plugin type (a MYSQL_XXX_PLUGIN value) */
+ /* int */
+ STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
+
+ /* pointer to type-specific plugin descriptor */
+ /* void* */
+ STRUCT_FLD(info, &i_s_info),
+
+ /* plugin name */
+ /* const char* */
+ STRUCT_FLD(name, "INNODB_CMP_PER_INDEX"),
+
+ /* plugin author (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(author, plugin_author),
+
+ /* general descriptive text (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(descr, "Statistics for the InnoDB compression (per index)"),
+
+ /* the plugin license (PLUGIN_LICENSE_XXX) */
+ /* int */
+ STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
+
+ /* the function to invoke when plugin is loaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(init, i_s_cmp_per_index_init),
+
+ /* the function to invoke when plugin is unloaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(deinit, i_s_common_deinit),
+
+ /* plugin version (for SHOW PLUGINS) */
+ /* unsigned int */
+ STRUCT_FLD(version, INNODB_VERSION_SHORT),
+
+ /* struct st_mysql_show_var* */
+ STRUCT_FLD(status_vars, NULL),
+
+ /* struct st_mysql_sys_var** */
+ STRUCT_FLD(system_vars, NULL),
+
+ /* Maria extension */
+ STRUCT_FLD(version_info, INNODB_VERSION_STR),
+ STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
+};
+
+UNIV_INTERN struct st_maria_plugin i_s_innodb_cmp_per_index_reset =
+{
+ /* the plugin type (a MYSQL_XXX_PLUGIN value) */
+ /* int */
+ STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
+
+ /* pointer to type-specific plugin descriptor */
+ /* void* */
+ STRUCT_FLD(info, &i_s_info),
+
+ /* plugin name */
+ /* const char* */
+ STRUCT_FLD(name, "INNODB_CMP_PER_INDEX_RESET"),
+
+ /* plugin author (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(author, plugin_author),
+
+ /* general descriptive text (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(descr, "Statistics for the InnoDB compression (per index);"
+ " reset cumulated counts"),
+
+ /* the plugin license (PLUGIN_LICENSE_XXX) */
+ /* int */
+ STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
+
+ /* the function to invoke when plugin is loaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(init, i_s_cmp_per_index_reset_init),
+
+ /* the function to invoke when plugin is unloaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(deinit, i_s_common_deinit),
+
+ /* plugin version (for SHOW PLUGINS) */
+ /* unsigned int */
+ STRUCT_FLD(version, INNODB_VERSION_SHORT),
+
+ /* struct st_mysql_show_var* */
+ STRUCT_FLD(status_vars, NULL),
+
+ /* struct st_mysql_sys_var** */
+ STRUCT_FLD(system_vars, NULL),
+
+ /* Maria extension */
+ STRUCT_FLD(version_info, INNODB_VERSION_STR),
+ STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
+};
+
/* Fields of the dynamic table information_schema.innodb_cmpmem. */
static ST_FIELD_INFO i_s_cmpmem_fields_info[] =
{
@@ -1695,8 +2072,8 @@ i_s_cmpmem_fill_low(
table->field[3]->store(UNIV_LIKELY(x < BUF_BUDDY_SIZES)
? UT_LIST_GET_LEN(buf_pool->zip_free[x])
: 0);
- table->field[4]->store((longlong)
- buddy_stat->relocated, true);
+ table->field[4]->store(
+ (longlong) buddy_stat->relocated, true);
table->field[5]->store(
(ulong) (buddy_stat->relocated_usec / 1000000));
@@ -1935,7 +2312,7 @@ static ST_FIELD_INFO innodb_metrics_fields_info[] =
#define METRIC_AVG_VALUE_START 5
{STRUCT_FLD(field_name, "AVG_COUNT"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL),
@@ -1971,7 +2348,7 @@ static ST_FIELD_INFO innodb_metrics_fields_info[] =
#define METRIC_AVG_VALUE_RESET 9
{STRUCT_FLD(field_name, "AVG_COUNT_RESET"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL),
@@ -2481,7 +2858,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_default_stopword =
/* pointer to type-specific plugin descriptor */
/* void* */
- STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(info, &i_s_stopword_fields_info),
/* plugin name */
/* const char* */
@@ -2571,8 +2948,8 @@ i_s_fts_deleted_generic_fill(
deleted = fts_doc_ids_create();
- user_table = dict_table_open_on_name_no_stats(
- fts_internal_tbl_name, FALSE, DICT_ERR_IGNORE_NONE);
+ user_table = dict_table_open_on_name(
+ fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
if (!user_table) {
DBUG_RETURN(0);
@@ -2603,7 +2980,7 @@ i_s_fts_deleted_generic_fill(
fts_doc_ids_free(deleted);
- dict_table_close(user_table, FALSE);
+ dict_table_close(user_table, FALSE, FALSE);
DBUG_RETURN(0);
}
@@ -2650,7 +3027,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_deleted =
/* pointer to type-specific plugin descriptor */
/* void* */
- STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(info, &i_s_fts_doc_fields_info),
/* plugin name */
/* const char* */
@@ -2733,7 +3110,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_being_deleted =
/* pointer to type-specific plugin descriptor */
/* void* */
- STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(info, &i_s_fts_doc_fields_info),
/* plugin name */
/* const char* */
@@ -2803,8 +3180,8 @@ i_s_fts_inserted_fill(
DBUG_RETURN(0);
}
- user_table = dict_table_open_on_name_no_stats(
- fts_internal_tbl_name, FALSE, DICT_ERR_IGNORE_NONE);
+ user_table = dict_table_open_on_name(
+ fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
if (!user_table) {
DBUG_RETURN(0);
@@ -2835,7 +3212,7 @@ i_s_fts_inserted_fill(
fts_doc_ids_free(inserted);
- dict_table_close(user_table, FALSE);
+ dict_table_close(user_table, FALSE, FALSE);
DBUG_RETURN(0);
}
@@ -2866,7 +3243,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_inserted =
/* pointer to type-specific plugin descriptor */
/* void* */
- STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(info, &i_s_fts_doc_fields_info),
/* plugin name */
/* const char* */
@@ -3078,8 +3455,8 @@ i_s_fts_index_cache_fill(
DBUG_RETURN(0);
}
- user_table = dict_table_open_on_name_no_stats(
- fts_internal_tbl_name, FALSE, DICT_ERR_IGNORE_NONE);
+ user_table = dict_table_open_on_name(
+ fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
if (!user_table) {
DBUG_RETURN(0);
@@ -3098,7 +3475,7 @@ i_s_fts_index_cache_fill(
i_s_fts_index_cache_fill_one_index(index_cache, thd, tables);
}
- dict_table_close(user_table, FALSE);
+ dict_table_close(user_table, FALSE, FALSE);
DBUG_RETURN(0);
}
@@ -3129,7 +3506,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_index_cache =
/* pointer to type-specific plugin descriptor */
/* void* */
- STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(info, &i_s_fts_index_fields_info),
/* plugin name */
/* const char* */
@@ -3276,6 +3653,7 @@ i_s_fts_index_table_fill_one_index(
ulint num_row_fill;
DBUG_ENTER("i_s_fts_index_cache_fill_one_index");
+ DBUG_ASSERT(!dict_index_is_online_ddl(index));
heap = mem_heap_create(1024);
@@ -3384,8 +3762,8 @@ i_s_fts_index_table_fill(
DBUG_RETURN(0);
}
- user_table = dict_table_open_on_name_no_stats(
- fts_internal_tbl_name, FALSE, DICT_ERR_IGNORE_NONE);
+ user_table = dict_table_open_on_name(
+ fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
if (!user_table) {
DBUG_RETURN(0);
@@ -3398,7 +3776,7 @@ i_s_fts_index_table_fill(
}
}
- dict_table_close(user_table, FALSE);
+ dict_table_close(user_table, FALSE, FALSE);
DBUG_RETURN(0);
}
@@ -3429,7 +3807,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_index_table =
/* pointer to type-specific plugin descriptor */
/* void* */
- STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(info, &i_s_fts_index_fields_info),
/* plugin name */
/* const char* */
@@ -3541,8 +3919,8 @@ i_s_fts_config_fill(
fields = table->field;
- user_table = dict_table_open_on_name_no_stats(
- fts_internal_tbl_name, FALSE, DICT_ERR_IGNORE_NONE);
+ user_table = dict_table_open_on_name(
+ fts_internal_tbl_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
if (!user_table) {
DBUG_RETURN(0);
@@ -3556,6 +3934,7 @@ i_s_fts_config_fill(
if (!ib_vector_is_empty(user_table->fts->indexes)) {
index = (dict_index_t*) ib_vector_getp_const(
user_table->fts->indexes, 0);
+ DBUG_ASSERT(!dict_index_is_online_ddl(index));
}
while (fts_config_key[i]) {
@@ -3567,10 +3946,10 @@ i_s_fts_config_fill(
value.f_str = str;
- if (strcmp(fts_config_key[i], FTS_TOTAL_WORD_COUNT) == 0
- && index) {
+ if (index
+ && strcmp(fts_config_key[i], FTS_TOTAL_WORD_COUNT) == 0) {
key_name = fts_config_create_index_param_name(
- fts_config_key[i], index);
+ fts_config_key[i], index);
allocated = TRUE;
} else {
key_name = (char*) fts_config_key[i];
@@ -3597,7 +3976,7 @@ i_s_fts_config_fill(
trx_free_for_background(trx);
- dict_table_close(user_table, FALSE);
+ dict_table_close(user_table, FALSE, FALSE);
DBUG_RETURN(0);
}
@@ -3628,7 +4007,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_ft_config =
/* pointer to type-specific plugin descriptor */
/* void* */
- STRUCT_FLD(info, &i_s_info),
+ STRUCT_FLD(info, &i_s_fts_config_fields_info),
/* plugin name */
/* const char* */
@@ -3782,7 +4161,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] =
#define IDX_BUF_STATS_PAGE_YOUNG_RATE 12
{STRUCT_FLD(field_name, "PAGES_MADE_YOUNG_RATE"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
@@ -3791,7 +4170,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] =
#define IDX_BUF_STATS_PAGE_NOT_YOUNG_RATE 13
{STRUCT_FLD(field_name, "PAGES_MADE_NOT_YOUNG_RATE"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
@@ -3827,7 +4206,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] =
#define IDX_BUF_STATS_PAGE_READ_RATE 17
{STRUCT_FLD(field_name, "PAGES_READ_RATE"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
@@ -3836,7 +4215,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] =
#define IDX_BUF_STATS_PAGE_CREATE_RATE 18
{STRUCT_FLD(field_name, "PAGES_CREATE_RATE"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
@@ -3845,7 +4224,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] =
#define IDX_BUF_STATS_PAGE_WRITTEN_RATE 19
{STRUCT_FLD(field_name, "PAGES_WRITTEN_RATE"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
@@ -3908,7 +4287,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] =
#define IDX_BUF_STATS_READ_AHEAD_RATE 26
{STRUCT_FLD(field_name, "READ_AHEAD_RATE"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
@@ -3917,7 +4296,7 @@ static ST_FIELD_INFO i_s_innodb_buffer_stats_fields_info[] =
#define IDX_BUF_STATS_READ_AHEAD_EVICT_RATE 27
{STRUCT_FLD(field_name, "READ_AHEAD_EVICTED_RATE"),
- STRUCT_FLD(field_length, 0),
+ STRUCT_FLD(field_length, MAX_FLOAT_STR_LENGTH),
STRUCT_FLD(field_type, MYSQL_TYPE_FLOAT),
STRUCT_FLD(value, 0),
STRUCT_FLD(field_flags, 0),
@@ -4023,11 +4402,13 @@ i_s_innodb_stats_fill(
OK(fields[IDX_BUF_STATS_PAGE_WRITTEN]->store(info->n_pages_written));
+ OK(fields[IDX_BUF_STATS_GET]->store(info->n_page_gets));
+
OK(fields[IDX_BUF_STATS_PAGE_READ_RATE]->store(info->pages_read_rate));
- OK(fields[IDX_BUF_STATS_PAGE_CREATED]->store(info->pages_created_rate));
+ OK(fields[IDX_BUF_STATS_PAGE_CREATE_RATE]->store(info->pages_created_rate));
- OK(fields[IDX_BUF_STATS_PAGE_WRITTEN]->store(info->pages_written_rate));
+ OK(fields[IDX_BUF_STATS_PAGE_WRITTEN_RATE]->store(info->pages_written_rate));
if (info->n_page_get_delta) {
OK(fields[IDX_BUF_STATS_HIT_RATE]->store(
@@ -4384,9 +4765,8 @@ i_s_innodb_buffer_page_fill(
TABLE_LIST* tables, /*!< in/out: tables to fill */
const buf_page_info_t* info_array, /*!< in: array cached page
info */
- ulint num_page, /*!< in: number of page info
- cached */
- mem_heap_t* heap) /*!< in: temp heap memory */
+ ulint num_page) /*!< in: number of page info
+ cached */
{
TABLE* table;
Field** fields;
@@ -4400,15 +4780,13 @@ i_s_innodb_buffer_page_fill(
/* Iterate through the cached array and fill the I_S table rows */
for (ulint i = 0; i < num_page; i++) {
const buf_page_info_t* page_info;
- const char* table_name;
- const char* index_name;
+ char table_name[MAX_FULL_NAME_LEN + 1];
+ const char* table_name_end = NULL;
const char* state_str;
enum buf_page_state state;
page_info = info_array + i;
- table_name = NULL;
- index_name = NULL;
state_str = NULL;
OK(fields[IDX_BUFFER_POOL_ID]->store(page_info->pool_id));
@@ -4446,6 +4824,10 @@ i_s_innodb_buffer_page_fill(
OK(fields[IDX_BUFFER_PAGE_ACCESS_TIME]->store(
page_info->access_time));
+ fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_null();
+
+ fields[IDX_BUFFER_PAGE_INDEX_NAME]->set_null();
+
/* If this is an index page, fetch the index name
and table name */
if (page_info->page_type == I_S_PAGE_TYPE_INDEX) {
@@ -4455,32 +4837,28 @@ i_s_innodb_buffer_page_fill(
index = dict_index_get_if_in_cache_low(
page_info->index_id);
- /* Copy the index/table name under mutex. We
- do not want to hold the InnoDB mutex while
- filling the IS table */
if (index) {
- const char* name_ptr = index->name;
-
- if (name_ptr[0] == TEMP_INDEX_PREFIX) {
- name_ptr++;
- }
-
- index_name = mem_heap_strdup(heap, name_ptr);
-
- table_name = mem_heap_strdup(heap,
- index->table_name);
+ table_name_end = innobase_convert_name(
+ table_name, sizeof(table_name),
+ index->table_name,
+ strlen(index->table_name),
+ thd, TRUE);
+
+ OK(fields[IDX_BUFFER_PAGE_TABLE_NAME]->store(
+ table_name,
+ table_name_end - table_name,
+ system_charset_info));
+ fields[IDX_BUFFER_PAGE_TABLE_NAME]->set_notnull();
+
+ OK(field_store_index_name(
+ fields[IDX_BUFFER_PAGE_INDEX_NAME],
+ index->name));
}
mutex_exit(&dict_sys->mutex);
}
- OK(field_store_string(
- fields[IDX_BUFFER_PAGE_TABLE_NAME], table_name));
-
- OK(field_store_string(
- fields[IDX_BUFFER_PAGE_INDEX_NAME], index_name));
-
OK(fields[IDX_BUFFER_PAGE_NUM_RECS]->store(
page_info->num_recs));
@@ -4593,7 +4971,7 @@ i_s_innodb_set_page_type(
/* Encountered an unknown page type */
page_info->page_type = I_S_PAGE_TYPE_UNKNOWN;
} else {
- /* Make sure we get the righ index into the
+ /* Make sure we get the right index into the
i_s_page_type[] array */
ut_a(page_type == i_s_page_type[page_type].type_value);
@@ -4751,7 +5129,7 @@ i_s_innodb_fill_buffer_pool(
just collected from the buffer chunk scan */
status = i_s_innodb_buffer_page_fill(
thd, tables, info_buffer,
- num_page, heap);
+ num_page);
/* If something goes wrong, break and return */
if (status) {
@@ -5094,13 +5472,11 @@ i_s_innodb_buf_page_lru_fill(
/* Iterate through the cached array and fill the I_S table rows */
for (ulint i = 0; i < num_page; i++) {
const buf_page_info_t* page_info;
- const char* table_name;
- const char* index_name;
+ char table_name[MAX_FULL_NAME_LEN + 1];
+ const char* table_name_end = NULL;
const char* state_str;
enum buf_page_state state;
- table_name = NULL;
- index_name = NULL;
state_str = NULL;
page_info = info_array + i;
@@ -5140,6 +5516,10 @@ i_s_innodb_buf_page_lru_fill(
OK(fields[IDX_BUF_LRU_PAGE_ACCESS_TIME]->store(
page_info->access_time));
+ fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_null();
+
+ fields[IDX_BUF_LRU_PAGE_INDEX_NAME]->set_null();
+
/* If this is an index page, fetch the index name
and table name */
if (page_info->page_type == I_S_PAGE_TYPE_INDEX) {
@@ -5149,30 +5529,28 @@ i_s_innodb_buf_page_lru_fill(
index = dict_index_get_if_in_cache_low(
page_info->index_id);
- /* Copy the index/table name under mutex. We
- do not want to hold the InnoDB mutex while
- filling the IS table */
if (index) {
- const char* name_ptr = index->name;
-
- if (name_ptr[0] == TEMP_INDEX_PREFIX) {
- name_ptr++;
- }
-
- index_name = mem_heap_strdup(heap, name_ptr);
- table_name = mem_heap_strdup(heap,
- index->table_name);
+ table_name_end = innobase_convert_name(
+ table_name, sizeof(table_name),
+ index->table_name,
+ strlen(index->table_name),
+ thd, TRUE);
+
+ OK(fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->store(
+ table_name,
+ table_name_end - table_name,
+ system_charset_info));
+ fields[IDX_BUF_LRU_PAGE_TABLE_NAME]->set_notnull();
+
+ OK(field_store_index_name(
+ fields[IDX_BUF_LRU_PAGE_INDEX_NAME],
+ index->name));
}
mutex_exit(&dict_sys->mutex);
}
- OK(field_store_string(
- fields[IDX_BUF_LRU_PAGE_TABLE_NAME], table_name));
-
- OK(field_store_string(
- fields[IDX_BUF_LRU_PAGE_INDEX_NAME], index_name));
OK(fields[IDX_BUF_LRU_PAGE_NUM_RECS]->store(
page_info->num_recs));
@@ -5437,10 +5815,11 @@ i_s_common_deinit(
DBUG_RETURN(0);
}
+/** SYS_TABLES ***************************************************/
/* Fields of the dynamic table INFORMATION_SCHEMA.SYS_TABLES */
static ST_FIELD_INFO innodb_sys_tables_fields_info[] =
{
-#define SYS_TABLE_ID 0
+#define SYS_TABLES_ID 0
{STRUCT_FLD(field_name, "TABLE_ID"),
STRUCT_FLD(field_length, MY_INT64_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONGLONG),
@@ -5449,7 +5828,7 @@ static ST_FIELD_INFO innodb_sys_tables_fields_info[] =
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
-#define SYS_TABLE_NAME 1
+#define SYS_TABLES_NAME 1
{STRUCT_FLD(field_name, "NAME"),
STRUCT_FLD(field_length, MAX_FULL_NAME_LEN + 1),
STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
@@ -5458,7 +5837,7 @@ static ST_FIELD_INFO innodb_sys_tables_fields_info[] =
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
-#define SYS_TABLE_FLAG 2
+#define SYS_TABLES_FLAG 2
{STRUCT_FLD(field_name, "FLAG"),
STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
@@ -5467,7 +5846,7 @@ static ST_FIELD_INFO innodb_sys_tables_fields_info[] =
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
-#define SYS_TABLE_NUM_COLUMN 3
+#define SYS_TABLES_NUM_COLUMN 3
{STRUCT_FLD(field_name, "N_COLS"),
STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
@@ -5476,7 +5855,7 @@ static ST_FIELD_INFO innodb_sys_tables_fields_info[] =
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
-#define SYS_TABLE_SPACE 4
+#define SYS_TABLES_SPACE 4
{STRUCT_FLD(field_name, "SPACE"),
STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
@@ -5485,6 +5864,33 @@ static ST_FIELD_INFO innodb_sys_tables_fields_info[] =
STRUCT_FLD(old_name, ""),
STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+#define SYS_TABLES_FILE_FORMAT 5
+ {STRUCT_FLD(field_name, "FILE_FORMAT"),
+ STRUCT_FLD(field_length, 10),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLES_ROW_FORMAT 6
+ {STRUCT_FLD(field_name, "ROW_FORMAT"),
+ STRUCT_FLD(field_length, 12),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLES_ZIP_PAGE_SIZE 7
+ {STRUCT_FLD(field_name, "ZIP_PAGE_SIZE"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
END_OF_ST_FIELD_INFO
};
@@ -5501,20 +5907,42 @@ i_s_dict_fill_sys_tables(
TABLE* table_to_fill) /*!< in/out: fill this table */
{
Field** fields;
+ ulint compact = DICT_TF_GET_COMPACT(table->flags);
+ ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(table->flags);
+ ulint zip_size = dict_tf_get_zip_size(table->flags);
+ const char* file_format;
+ const char* row_format;
+
+ file_format = trx_sys_file_format_id_to_name(atomic_blobs);
+ if (!compact) {
+ row_format = "Redundant";
+ } else if (!atomic_blobs) {
+ row_format = "Compact";
+ } else if DICT_TF_GET_ZIP_SSIZE(table->flags) {
+ row_format = "Compressed";
+ } else {
+ row_format = "Dynamic";
+ }
DBUG_ENTER("i_s_dict_fill_sys_tables");
fields = table_to_fill->field;
- OK(fields[SYS_TABLE_ID]->store(longlong(table->id), TRUE));
+ OK(fields[SYS_TABLES_ID]->store(longlong(table->id), TRUE));
+
+ OK(field_store_string(fields[SYS_TABLES_NAME], table->name));
+
+ OK(fields[SYS_TABLES_FLAG]->store(table->flags));
+
+ OK(fields[SYS_TABLES_NUM_COLUMN]->store(table->n_cols));
- OK(field_store_string(fields[SYS_TABLE_NAME], table->name));
+ OK(fields[SYS_TABLES_SPACE]->store(table->space));
- OK(fields[SYS_TABLE_FLAG]->store(table->flags));
+ OK(field_store_string(fields[SYS_TABLES_FILE_FORMAT], file_format));
- OK(fields[SYS_TABLE_NUM_COLUMN]->store(table->n_cols));
+ OK(field_store_string(fields[SYS_TABLES_ROW_FORMAT], row_format));
- OK(fields[SYS_TABLE_SPACE]->store(table->space));
+ OK(fields[SYS_TABLES_ZIP_PAGE_SIZE]->store(zip_size));
OK(schema_table_store_record(thd, table_to_fill));
@@ -5663,6 +6091,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_tables =
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
+/** SYS_TABLESTATS ***********************************************/
/* Fields of the dynamic table INFORMATION_SCHEMA.SYS_TABLESTATS */
static ST_FIELD_INFO innodb_sys_tablestats_fields_info[] =
{
@@ -5772,24 +6201,37 @@ i_s_dict_fill_sys_tablestats(
OK(field_store_string(fields[SYS_TABLESTATS_NAME], table->name));
+ dict_table_stats_lock(table, RW_S_LATCH);
+
if (table->stat_initialized) {
OK(field_store_string(fields[SYS_TABLESTATS_INIT],
"Initialized"));
+
+ OK(fields[SYS_TABLESTATS_NROW]->store(table->stat_n_rows,
+ TRUE));
+
+ OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(
+ table->stat_clustered_index_size));
+
+ OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(
+ table->stat_sum_of_other_index_sizes));
+
+ OK(fields[SYS_TABLESTATS_MODIFIED]->store(
+ (ulint) table->stat_modified_counter));
} else {
OK(field_store_string(fields[SYS_TABLESTATS_INIT],
"Uninitialized"));
- }
- OK(fields[SYS_TABLESTATS_NROW]->store(table->stat_n_rows, TRUE));
+ OK(fields[SYS_TABLESTATS_NROW]->store(0, TRUE));
+
+ OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(0));
- OK(fields[SYS_TABLESTATS_CLUST_SIZE]->store(
- table->stat_clustered_index_size));
+ OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(0));
- OK(fields[SYS_TABLESTATS_INDEX_SIZE]->store(
- table->stat_sum_of_other_index_sizes));
+ OK(fields[SYS_TABLESTATS_MODIFIED]->store(0));
+ }
- OK(fields[SYS_TABLESTATS_MODIFIED]->store(
- table->stat_modified_counter));
+ dict_table_stats_unlock(table, RW_S_LATCH);
OK(fields[SYS_TABLESTATS_AUTONINC]->store(table->autoinc, TRUE));
@@ -5938,6 +6380,7 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_tablestats =
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
+/** SYS_INDEXES **************************************************/
/* Fields of the dynamic table INFORMATION_SCHEMA.SYS_INDEXES */
static ST_FIELD_INFO innodb_sysindex_fields_info[] =
{
@@ -6022,17 +6465,12 @@ i_s_dict_fill_sys_indexes(
TABLE* table_to_fill) /*!< in/out: fill this table */
{
Field** fields;
- const char* name_ptr = index->name;
DBUG_ENTER("i_s_dict_fill_sys_indexes");
fields = table_to_fill->field;
- if (name_ptr[0] == TEMP_INDEX_PREFIX) {
- name_ptr++;
- }
-
- OK(field_store_string(fields[SYS_INDEX_NAME], name_ptr));
+ OK(field_store_index_name(fields[SYS_INDEX_NAME], index->name));
OK(fields[SYS_INDEX_ID]->store(longlong(index->id), TRUE));
@@ -6193,7 +6631,8 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_indexes =
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
-/* Fields of the dynamic table INFORMATION_SCHEMA.SYS_COLUMNS */
+/** SYS_COLUMNS **************************************************/
+/* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_COLUMNS */
static ST_FIELD_INFO innodb_sys_columns_fields_info[] =
{
#define SYS_COLUMN_TABLE_ID 0
@@ -6427,7 +6866,9 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_columns =
STRUCT_FLD(version_info, INNODB_VERSION_STR),
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
-/* Fields of the dynamic table INFORMATION_SCHEMA.innodb_sys_fields */
+
+/** SYS_FIELDS ***************************************************/
+/* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_FIELDS */
static ST_FIELD_INFO innodb_sys_fields_fields_info[] =
{
#define SYS_FIELD_INDEX_ID 0
@@ -6635,7 +7076,8 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_fields =
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
-/* Fields of the dynamic table INFORMATION_SCHEMA.innodb_sys_foreign */
+/** SYS_FOREIGN ********************************************/
+/* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_FOREIGN */
static ST_FIELD_INFO innodb_sys_foreign_fields_info[] =
{
#define SYS_FOREIGN_ID 0
@@ -6720,6 +7162,7 @@ i_s_dict_fill_sys_foreign(
DBUG_RETURN(0);
}
+
/*******************************************************************//**
Function to populate INFORMATION_SCHEMA.innodb_sys_foreign table. Loop
through each record in SYS_FOREIGN, and extract the foreign key
@@ -6786,6 +7229,7 @@ i_s_sys_foreign_fill_table(
DBUG_RETURN(0);
}
+
/*******************************************************************//**
Bind the dynamic table INFORMATION_SCHEMA.innodb_sys_foreign
@return 0 on success */
@@ -6855,7 +7299,9 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_foreign =
STRUCT_FLD(version_info, INNODB_VERSION_STR),
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
-/* Fields of the dynamic table INFORMATION_SCHEMA.innodb_sys_foreign_cols */
+
+/** SYS_FOREIGN_COLS ********************************************/
+/* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS */
static ST_FIELD_INFO innodb_sys_foreign_cols_fields_info[] =
{
#define SYS_FOREIGN_COL_ID 0
@@ -7070,3 +7516,453 @@ UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_foreign_cols =
STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
};
+/** SYS_TABLESPACES ********************************************/
+/* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES */
+static ST_FIELD_INFO innodb_sys_tablespaces_fields_info[] =
+{
+#define SYS_TABLESPACES_SPACE 0
+ {STRUCT_FLD(field_name, "SPACE"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLESPACES_NAME 1
+ {STRUCT_FLD(field_name, "NAME"),
+ STRUCT_FLD(field_length, MAX_FULL_NAME_LEN + 1),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLESPACES_FLAGS 2
+ {STRUCT_FLD(field_name, "FLAG"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLESPACES_FILE_FORMAT 3
+ {STRUCT_FLD(field_name, "FILE_FORMAT"),
+ STRUCT_FLD(field_length, 10),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLESPACES_ROW_FORMAT 4
+ {STRUCT_FLD(field_name, "ROW_FORMAT"),
+ STRUCT_FLD(field_length, 22),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_MAYBE_NULL),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLESPACES_PAGE_SIZE 5
+ {STRUCT_FLD(field_name, "PAGE_SIZE"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_TABLESPACES_ZIP_PAGE_SIZE 6
+ {STRUCT_FLD(field_name, "ZIP_PAGE_SIZE"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+ END_OF_ST_FIELD_INFO
+
+};
+
+/**********************************************************************//**
+Function to fill INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES with information
+collected by scanning SYS_TABLESPACESS table.
+@return 0 on success */
+static
+int
+i_s_dict_fill_sys_tablespaces(
+/*==========================*/
+ THD* thd, /*!< in: thread */
+ ulint space, /*!< in: space ID */
+ const char* name, /*!< in: tablespace name */
+ ulint flags, /*!< in: tablespace flags */
+ TABLE* table_to_fill) /*!< in/out: fill this table */
+{
+ Field** fields;
+ ulint atomic_blobs = FSP_FLAGS_HAS_ATOMIC_BLOBS(flags);
+ ulint page_size = fsp_flags_get_page_size(flags);;
+ ulint zip_size = fsp_flags_get_zip_size(flags);
+ const char* file_format;
+ const char* row_format;
+
+ DBUG_ENTER("i_s_dict_fill_sys_tablespaces");
+
+ file_format = trx_sys_file_format_id_to_name(atomic_blobs);
+ if (!atomic_blobs) {
+ row_format = "Compact or Redundant";
+ } else if DICT_TF_GET_ZIP_SSIZE(flags) {
+ row_format = "Compressed";
+ } else {
+ row_format = "Dynamic";
+ }
+
+ fields = table_to_fill->field;
+
+ OK(fields[SYS_TABLESPACES_SPACE]->store(space));
+
+ OK(field_store_string(fields[SYS_TABLESPACES_NAME], name));
+
+ OK(fields[SYS_TABLESPACES_FLAGS]->store(flags));
+
+ OK(field_store_string(fields[SYS_TABLESPACES_FILE_FORMAT],
+ file_format));
+
+ OK(field_store_string(fields[SYS_TABLESPACES_ROW_FORMAT],
+ row_format));
+
+ OK(fields[SYS_TABLESPACES_PAGE_SIZE]->store(page_size));
+
+ OK(fields[SYS_TABLESPACES_ZIP_PAGE_SIZE]->store(zip_size));
+
+ OK(schema_table_store_record(thd, table_to_fill));
+
+ DBUG_RETURN(0);
+}
+/*******************************************************************//**
+Function to populate INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES table.
+Loop through each record in SYS_TABLESPACES, and extract the column
+information and fill the INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES table.
+@return 0 on success */
+static
+int
+i_s_sys_tablespaces_fill_table(
+/*===========================*/
+ THD* thd, /*!< in: thread */
+ TABLE_LIST* tables, /*!< in/out: tables to fill */
+ Item* ) /*!< in: condition (not used) */
+{
+ btr_pcur_t pcur;
+ const rec_t* rec;
+ mem_heap_t* heap;
+ mtr_t mtr;
+
+ DBUG_ENTER("i_s_sys_tablespaces_fill_table");
+
+ /* deny access to user without PROCESS_ACL privilege */
+ if (check_global_access(thd, PROCESS_ACL)) {
+ DBUG_RETURN(0);
+ }
+
+ heap = mem_heap_create(1000);
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+
+ rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES);
+
+ while (rec) {
+ const char* err_msg;
+ ulint space;
+ const char* name;
+ ulint flags;
+
+ /* Extract necessary information from a SYS_TABLESPACES row */
+ err_msg = dict_process_sys_tablespaces(
+ heap, rec, &space, &name, &flags);
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+
+ if (!err_msg) {
+ i_s_dict_fill_sys_tablespaces(
+ thd, space, name, flags,
+ tables->table);
+ } else {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_CANT_FIND_SYSTEM_REC, "%s",
+ err_msg);
+ }
+
+ mem_heap_empty(heap);
+
+ /* Get the next record */
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+ rec = dict_getnext_system(&pcur, &mtr);
+ }
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+ mem_heap_free(heap);
+
+ DBUG_RETURN(0);
+}
+/*******************************************************************//**
+Bind the dynamic table INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES
+@return 0 on success */
+static
+int
+innodb_sys_tablespaces_init(
+/*========================*/
+ void* p) /*!< in/out: table schema object */
+{
+ ST_SCHEMA_TABLE* schema;
+
+ DBUG_ENTER("innodb_sys_tablespaces_init");
+
+ schema = (ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info = innodb_sys_tablespaces_fields_info;
+ schema->fill_table = i_s_sys_tablespaces_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_tablespaces =
+{
+ /* the plugin type (a MYSQL_XXX_PLUGIN value) */
+ /* int */
+ STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
+
+ /* pointer to type-specific plugin descriptor */
+ /* void* */
+ STRUCT_FLD(info, &i_s_info),
+
+ /* plugin name */
+ /* const char* */
+ STRUCT_FLD(name, "INNODB_SYS_TABLESPACES"),
+
+ /* plugin author (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(author, plugin_author),
+
+ /* general descriptive text (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(descr, "InnoDB SYS_TABLESPACES"),
+
+ /* the plugin license (PLUGIN_LICENSE_XXX) */
+ /* int */
+ STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
+
+ /* the function to invoke when plugin is loaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(init, innodb_sys_tablespaces_init),
+
+ /* the function to invoke when plugin is unloaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(deinit, i_s_common_deinit),
+
+ /* plugin version (for SHOW PLUGINS) */
+ /* unsigned int */
+ STRUCT_FLD(version, INNODB_VERSION_SHORT),
+
+ /* struct st_mysql_show_var* */
+ STRUCT_FLD(status_vars, NULL),
+
+ /* struct st_mysql_sys_var** */
+ STRUCT_FLD(system_vars, NULL),
+
+ /* Maria extension */
+ STRUCT_FLD(version_info, INNODB_VERSION_STR),
+ STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
+};
+
+/** SYS_DATAFILES ************************************************/
+/* Fields of the dynamic table INFORMATION_SCHEMA.INNODB_SYS_DATAFILES */
+static ST_FIELD_INFO innodb_sys_datafiles_fields_info[] =
+{
+#define SYS_DATAFILES_SPACE 0
+ {STRUCT_FLD(field_name, "SPACE"),
+ STRUCT_FLD(field_length, MY_INT32_NUM_DECIMAL_DIGITS),
+ STRUCT_FLD(field_type, MYSQL_TYPE_LONG),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, MY_I_S_UNSIGNED),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+#define SYS_DATAFILES_PATH 1
+ {STRUCT_FLD(field_name, "PATH"),
+ STRUCT_FLD(field_length, OS_FILE_MAX_PATH),
+ STRUCT_FLD(field_type, MYSQL_TYPE_STRING),
+ STRUCT_FLD(value, 0),
+ STRUCT_FLD(field_flags, 0),
+ STRUCT_FLD(old_name, ""),
+ STRUCT_FLD(open_method, SKIP_OPEN_TABLE)},
+
+ END_OF_ST_FIELD_INFO
+};
+
+/**********************************************************************//**
+Function to fill INFORMATION_SCHEMA.INNODB_SYS_DATAFILES with information
+collected by scanning SYS_DATAFILESS table.
+@return 0 on success */
+static
+int
+i_s_dict_fill_sys_datafiles(
+/*========================*/
+ THD* thd, /*!< in: thread */
+ ulint space, /*!< in: space ID */
+ const char* path, /*!< in: absolute path */
+ TABLE* table_to_fill) /*!< in/out: fill this table */
+{
+ Field** fields;
+
+ DBUG_ENTER("i_s_dict_fill_sys_datafiles");
+
+ fields = table_to_fill->field;
+
+ OK(field_store_ulint(fields[SYS_DATAFILES_SPACE], space));
+
+ OK(field_store_string(fields[SYS_DATAFILES_PATH], path));
+
+ OK(schema_table_store_record(thd, table_to_fill));
+
+ DBUG_RETURN(0);
+}
+/*******************************************************************//**
+Function to populate INFORMATION_SCHEMA.INNODB_SYS_DATAFILES table.
+Loop through each record in SYS_DATAFILES, and extract the column
+information and fill the INFORMATION_SCHEMA.INNODB_SYS_DATAFILES table.
+@return 0 on success */
+static
+int
+i_s_sys_datafiles_fill_table(
+/*=========================*/
+ THD* thd, /*!< in: thread */
+ TABLE_LIST* tables, /*!< in/out: tables to fill */
+ Item* ) /*!< in: condition (not used) */
+{
+ btr_pcur_t pcur;
+ const rec_t* rec;
+ mem_heap_t* heap;
+ mtr_t mtr;
+
+ DBUG_ENTER("i_s_sys_datafiles_fill_table");
+
+ /* deny access to user without PROCESS_ACL privilege */
+ if (check_global_access(thd, PROCESS_ACL)) {
+ DBUG_RETURN(0);
+ }
+
+ heap = mem_heap_create(1000);
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+
+ rec = dict_startscan_system(&pcur, &mtr, SYS_DATAFILES);
+
+ while (rec) {
+ const char* err_msg;
+ ulint space;
+ const char* path;
+
+ /* Extract necessary information from a SYS_DATAFILES row */
+ err_msg = dict_process_sys_datafiles(
+ heap, rec, &space, &path);
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+
+ if (!err_msg) {
+ i_s_dict_fill_sys_datafiles(
+ thd, space, path, tables->table);
+ } else {
+ push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
+ ER_CANT_FIND_SYSTEM_REC, "%s",
+ err_msg);
+ }
+
+ mem_heap_empty(heap);
+
+ /* Get the next record */
+ mutex_enter(&dict_sys->mutex);
+ mtr_start(&mtr);
+ rec = dict_getnext_system(&pcur, &mtr);
+ }
+
+ mtr_commit(&mtr);
+ mutex_exit(&dict_sys->mutex);
+ mem_heap_free(heap);
+
+ DBUG_RETURN(0);
+}
+/*******************************************************************//**
+Bind the dynamic table INFORMATION_SCHEMA.INNODB_SYS_DATAFILES
+@return 0 on success */
+static
+int
+innodb_sys_datafiles_init(
+/*======================*/
+ void* p) /*!< in/out: table schema object */
+{
+ ST_SCHEMA_TABLE* schema;
+
+ DBUG_ENTER("innodb_sys_datafiles_init");
+
+ schema = (ST_SCHEMA_TABLE*) p;
+
+ schema->fields_info = innodb_sys_datafiles_fields_info;
+ schema->fill_table = i_s_sys_datafiles_fill_table;
+
+ DBUG_RETURN(0);
+}
+
+UNIV_INTERN struct st_maria_plugin i_s_innodb_sys_datafiles =
+{
+ /* the plugin type (a MYSQL_XXX_PLUGIN value) */
+ /* int */
+ STRUCT_FLD(type, MYSQL_INFORMATION_SCHEMA_PLUGIN),
+
+ /* pointer to type-specific plugin descriptor */
+ /* void* */
+ STRUCT_FLD(info, &i_s_info),
+
+ /* plugin name */
+ /* const char* */
+ STRUCT_FLD(name, "INNODB_SYS_DATAFILES"),
+
+ /* plugin author (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(author, plugin_author),
+
+ /* general descriptive text (for SHOW PLUGINS) */
+ /* const char* */
+ STRUCT_FLD(descr, "InnoDB SYS_DATAFILES"),
+
+ /* the plugin license (PLUGIN_LICENSE_XXX) */
+ /* int */
+ STRUCT_FLD(license, PLUGIN_LICENSE_GPL),
+
+ /* the function to invoke when plugin is loaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(init, innodb_sys_datafiles_init),
+
+ /* the function to invoke when plugin is unloaded */
+ /* int (*)(void*); */
+ STRUCT_FLD(deinit, i_s_common_deinit),
+
+ /* plugin version (for SHOW PLUGINS) */
+ /* unsigned int */
+ STRUCT_FLD(version, INNODB_VERSION_SHORT),
+
+ /* struct st_mysql_show_var* */
+ STRUCT_FLD(status_vars, NULL),
+
+ /* struct st_mysql_sys_var** */
+ STRUCT_FLD(system_vars, NULL),
+
+ /* Maria extension */
+ STRUCT_FLD(version_info, INNODB_VERSION_STR),
+ STRUCT_FLD(maturity, MariaDB_PLUGIN_MATURITY_STABLE),
+};
diff --git a/storage/innobase/handler/i_s.h b/storage/innobase/handler/i_s.h
index 7fc7b091795..05f6fd8ecd2 100644
--- a/storage/innobase/handler/i_s.h
+++ b/storage/innobase/handler/i_s.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,6 +33,8 @@ extern struct st_maria_plugin i_s_innodb_locks;
extern struct st_maria_plugin i_s_innodb_lock_waits;
extern struct st_maria_plugin i_s_innodb_cmp;
extern struct st_maria_plugin i_s_innodb_cmp_reset;
+extern struct st_maria_plugin i_s_innodb_cmp_per_index;
+extern struct st_maria_plugin i_s_innodb_cmp_per_index_reset;
extern struct st_maria_plugin i_s_innodb_cmpmem;
extern struct st_maria_plugin i_s_innodb_cmpmem_reset;
extern struct st_maria_plugin i_s_innodb_metrics;
@@ -53,5 +55,7 @@ extern struct st_maria_plugin i_s_innodb_sys_columns;
extern struct st_maria_plugin i_s_innodb_sys_fields;
extern struct st_maria_plugin i_s_innodb_sys_foreign;
extern struct st_maria_plugin i_s_innodb_sys_foreign_cols;
+extern struct st_maria_plugin i_s_innodb_sys_tablespaces;
+extern struct st_maria_plugin i_s_innodb_sys_datafiles;
#endif /* i_s_h */
diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc
index cd9de39f3c6..168da732bc0 100644
--- a/storage/innobase/ibuf/ibuf0ibuf.cc
+++ b/storage/innobase/ibuf/ibuf0ibuf.cc
@@ -25,6 +25,10 @@ Created 7/19/1997 Heikki Tuuri
#include "ibuf0ibuf.h"
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+UNIV_INTERN my_bool srv_ibuf_disable_background_merge;
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
+
/** Number of bits describing a single page */
#define IBUF_BITS_PER_PAGE 4
#if IBUF_BITS_PER_PAGE % 2
@@ -56,6 +60,7 @@ Created 7/19/1997 Heikki Tuuri
#include "log0recv.h"
#include "que0que.h"
#include "srv0start.h" /* srv_shutdown_state */
+#include "ha_prototypes.h"
/* STRUCTURE OF AN INSERT BUFFER RECORD
@@ -284,16 +289,16 @@ type, counter, and some flags. */
/** The mutex used to block pessimistic inserts to ibuf trees */
-static mutex_t ibuf_pessimistic_insert_mutex;
+static ib_mutex_t ibuf_pessimistic_insert_mutex;
/** The mutex protecting the insert buffer structs */
-static mutex_t ibuf_mutex;
+static ib_mutex_t ibuf_mutex;
/** The mutex protecting the insert buffer bitmaps */
-static mutex_t ibuf_bitmap_mutex;
+static ib_mutex_t ibuf_bitmap_mutex;
/** The area in pages from which contract looks for page numbers for merge */
-#define IBUF_MERGE_AREA 8
+#define IBUF_MERGE_AREA 8UL
/** Inside the merge area, pages which have at most 1 per this number less
buffered entries compared to maximum volume that can buffered for a single
@@ -507,7 +512,7 @@ ibuf_init_at_db_start(void)
dict_index_t* index;
ulint n_used;
page_t* header_page;
- ulint error;
+ dberr_t error;
ibuf = static_cast<ibuf_t*>(mem_zalloc(sizeof(ibuf_t)));
@@ -2485,6 +2490,73 @@ ibuf_get_merge_page_nos_func(
return(sum_volumes);
}
+/*******************************************************************//**
+Get the matching records for space id.
+@return current rec or NULL */
+static __attribute__((nonnull, warn_unused_result))
+const rec_t*
+ibuf_get_user_rec(
+/*===============*/
+ btr_pcur_t* pcur, /*!< in: the current cursor */
+ mtr_t* mtr) /*!< in: mini transaction */
+{
+ do {
+ const rec_t* rec = btr_pcur_get_rec(pcur);
+
+ if (page_rec_is_user_rec(rec)) {
+ return(rec);
+ }
+ } while (btr_pcur_move_to_next(pcur, mtr));
+
+ return(NULL);
+}
+
+/*********************************************************************//**
+Reads page numbers for a space id from an ibuf tree.
+@return a lower limit for the combined volume of records which will be
+merged */
+static __attribute__((nonnull, warn_unused_result))
+ulint
+ibuf_get_merge_pages(
+/*=================*/
+ btr_pcur_t* pcur, /*!< in/out: cursor */
+ ulint space, /*!< in: space for which to merge */
+ ulint limit, /*!< in: max page numbers to read */
+ ulint* pages, /*!< out: pages read */
+ ulint* spaces, /*!< out: spaces read */
+ ib_int64_t* versions,/*!< out: space versions read */
+ ulint* n_pages,/*!< out: number of pages read */
+ mtr_t* mtr) /*!< in: mini transaction */
+{
+ const rec_t* rec;
+ ulint volume = 0;
+ ib_int64_t version = fil_space_get_version(space);
+
+ ut_a(space != ULINT_UNDEFINED);
+
+ *n_pages = 0;
+
+ while ((rec = ibuf_get_user_rec(pcur, mtr)) != 0
+ && ibuf_rec_get_space(mtr, rec) == space
+ && *n_pages < limit) {
+
+ ulint page_no = ibuf_rec_get_page_no(mtr, rec);
+
+ if (*n_pages == 0 || pages[*n_pages - 1] != page_no) {
+ spaces[*n_pages] = space;
+ pages[*n_pages] = page_no;
+ versions[*n_pages] = version;
+ ++*n_pages;
+ }
+
+ volume += ibuf_rec_get_volume(mtr, rec);
+
+ btr_pcur_move_to_next(pcur, mtr);
+ }
+
+ return(volume);
+}
+
/*********************************************************************//**
Contracts insert buffer trees by reading pages to the buffer pool.
@return a lower limit for the combined size in bytes of entries which
@@ -2492,32 +2564,22 @@ will be merged from ibuf trees to the pages read, 0 if ibuf is
empty */
static
ulint
-ibuf_contract_ext(
-/*==============*/
- ulint* n_pages,/*!< out: number of pages to which merged */
- ibool sync) /*!< in: TRUE if the caller wants to wait for the
- issued read with the highest tablespace address
- to complete */
+ibuf_merge_pages(
+/*=============*/
+ ulint* n_pages, /*!< out: number of pages to which merged */
+ bool sync) /*!< in: TRUE if the caller wants to wait for
+ the issued read with the highest tablespace
+ address to complete */
{
+ mtr_t mtr;
btr_pcur_t pcur;
+ ulint sum_sizes;
ulint page_nos[IBUF_MAX_N_PAGES_MERGED];
ulint space_ids[IBUF_MAX_N_PAGES_MERGED];
ib_int64_t space_versions[IBUF_MAX_N_PAGES_MERGED];
- ulint sum_sizes;
- mtr_t mtr;
*n_pages = 0;
- /* We perform a dirty read of ibuf->empty, without latching
- the insert buffer root page. We trust this dirty read except
- when a slow shutdown is being executed. During a slow
- shutdown, the insert buffer merge must be completed. */
-
- if (UNIV_UNLIKELY(ibuf->empty)
- && UNIV_LIKELY(!srv_shutdown_state)) {
- return(0);
- }
-
ibuf_mtr_start(&mtr);
/* Open a cursor to a randomly chosen leaf of the tree, at a random
@@ -2554,18 +2616,159 @@ ibuf_contract_ext(
ibuf_mtr_commit(&mtr);
btr_pcur_close(&pcur);
- buf_read_ibuf_merge_pages(sync, space_ids, space_versions, page_nos,
- *n_pages);
+ buf_read_ibuf_merge_pages(
+ sync, space_ids, space_versions, page_nos, *n_pages);
return(sum_sizes + 1);
}
/*********************************************************************//**
+Get the table instance from the table id.
+@return table instance */
+static __attribute__((warn_unused_result))
+dict_table_t*
+ibuf_get_table(
+/*===========*/
+ table_id_t table_id) /*!< in: valid table id */
+{
+ rw_lock_s_lock_func(&dict_operation_lock, 0, __FILE__, __LINE__);
+
+ dict_table_t* table = dict_table_open_on_id(table_id, FALSE, FALSE);
+
+ rw_lock_s_unlock_gen(&dict_operation_lock, 0);
+
+ return(table);
+}
+
+/*********************************************************************//**
Contracts insert buffer trees by reading pages to the buffer pool.
@return a lower limit for the combined size in bytes of entries which
will be merged from ibuf trees to the pages read, 0 if ibuf is
empty */
-UNIV_INTERN
+static
+ulint
+ibuf_merge_space(
+/*=============*/
+ ulint space, /*!< in: tablespace id to merge */
+ ulint* n_pages)/*!< out: number of pages to which merged */
+{
+ mtr_t mtr;
+ btr_pcur_t pcur;
+ mem_heap_t* heap = mem_heap_create(512);
+ dtuple_t* tuple = ibuf_search_tuple_build(space, 0, heap);
+
+ ibuf_mtr_start(&mtr);
+
+ /* Position the cursor on the first matching record. */
+
+ btr_pcur_open(
+ ibuf->index, tuple, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur,
+ &mtr);
+
+ mem_heap_free(heap);
+
+ ut_ad(page_validate(btr_pcur_get_page(&pcur), ibuf->index));
+
+ ulint sum_sizes = 0;
+ ulint pages[IBUF_MAX_N_PAGES_MERGED];
+ ulint spaces[IBUF_MAX_N_PAGES_MERGED];
+ ib_int64_t versions[IBUF_MAX_N_PAGES_MERGED];
+
+ if (page_get_n_recs(btr_pcur_get_page(&pcur)) == 0) {
+ /* If a B-tree page is empty, it must be the root page
+ and the whole B-tree must be empty. InnoDB does not
+ allow empty B-tree pages other than the root. */
+ ut_ad(ibuf->empty);
+ ut_ad(page_get_space_id(btr_pcur_get_page(&pcur))
+ == IBUF_SPACE_ID);
+ ut_ad(page_get_page_no(btr_pcur_get_page(&pcur))
+ == FSP_IBUF_TREE_ROOT_PAGE_NO);
+
+ } else {
+
+ sum_sizes = ibuf_get_merge_pages(
+ &pcur, space, IBUF_MAX_N_PAGES_MERGED,
+ &pages[0], &spaces[0], &versions[0], n_pages,
+ &mtr);
+
+ ++sum_sizes;
+ }
+
+ ibuf_mtr_commit(&mtr);
+
+ btr_pcur_close(&pcur);
+
+ if (sum_sizes > 0) {
+
+ ut_a(*n_pages > 0 || sum_sizes == 1);
+
+#ifdef UNIV_DEBUG
+ ut_ad(*n_pages <= UT_ARR_SIZE(pages));
+
+ for (ulint i = 0; i < *n_pages; ++i) {
+ ut_ad(spaces[i] == space);
+ ut_ad(i == 0 || versions[i] == versions[i - 1]);
+ }
+#endif /* UNIV_DEBUG */
+
+ buf_read_ibuf_merge_pages(
+ TRUE, spaces, versions, pages, *n_pages);
+ }
+
+ return(sum_sizes);
+}
+
+/*********************************************************************//**
+Contracts insert buffer trees by reading pages to the buffer pool.
+@return a lower limit for the combined size in bytes of entries which
+will be merged from ibuf trees to the pages read, 0 if ibuf is
+empty */
+static __attribute__((nonnull, warn_unused_result))
+ulint
+ibuf_merge(
+/*=======*/
+ table_id_t table_id, /*!< in: if merge should be
+ done only for a specific
+ table, for all tables this
+ should be 0 */
+ ulint* n_pages, /*!< out: number of pages to
+ which merged */
+ bool sync) /*!< in: TRUE if the caller
+ wants to wait for the issued
+ read with the highest
+ tablespace address to complete */
+{
+ dict_table_t* table;
+
+ *n_pages = 0;
+
+ /* We perform a dirty read of ibuf->empty, without latching
+ the insert buffer root page. We trust this dirty read except
+ when a slow shutdown is being executed. During a slow
+ shutdown, the insert buffer merge must be completed. */
+
+ if (ibuf->empty && !srv_shutdown_state) {
+ return(0);
+ } else if (table_id == 0) {
+ return(ibuf_merge_pages(n_pages, sync));
+ } else if ((table = ibuf_get_table(table_id)) == 0) {
+ /* Table has been dropped. */
+ return(0);
+ }
+
+ ulint volume = ibuf_merge_space(table->space, n_pages);
+
+ dict_table_close(table, FALSE, FALSE);
+
+ return(volume);
+}
+
+/*********************************************************************//**
+Contracts insert buffer trees by reading pages to the buffer pool.
+@return a lower limit for the combined size in bytes of entries which
+will be merged from ibuf trees to the pages read, 0 if ibuf is
+empty */
+static
ulint
ibuf_contract(
/*==========*/
@@ -2575,7 +2778,7 @@ ibuf_contract(
{
ulint n_pages;
- return(ibuf_contract_ext(&n_pages, sync));
+ return(ibuf_merge(0, &n_pages, sync));
}
/*********************************************************************//**
@@ -2587,17 +2790,26 @@ UNIV_INTERN
ulint
ibuf_contract_in_background(
/*========================*/
- ibool full) /*!< in: TRUE if the caller wants to do a full
- contract based on PCT_IO(100). If FALSE then
- the size of contract batch is determined based
- on the current size of the ibuf tree. */
+ table_id_t table_id, /*!< in: if merge should be done only
+ for a specific table, for all tables
+ this should be 0 */
+ ibool full) /*!< in: TRUE if the caller wants to
+ do a full contract based on PCT_IO(100).
+ If FALSE then the size of contract
+ batch is determined based on the
+ current size of the ibuf tree. */
{
ulint sum_bytes = 0;
ulint sum_pages = 0;
- ulint n_bytes;
ulint n_pag2;
ulint n_pages;
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+ if (srv_ibuf_disable_background_merge && table_id == 0) {
+ return(0);
+ }
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
+
if (full) {
/* Caller has requested a full batch */
n_pages = PCT_IO(100);
@@ -2620,7 +2832,9 @@ ibuf_contract_in_background(
}
while (sum_pages < n_pages) {
- n_bytes = ibuf_contract_ext(&n_pag2, FALSE);
+ ulint n_bytes;
+
+ n_bytes = ibuf_merge(table_id, &n_pag2, FALSE);
if (n_bytes == 0) {
return(sum_bytes);
@@ -3061,7 +3275,7 @@ ibuf_update_max_tablespace_id(void)
ibuf_mtr_start(&mtr);
btr_pcur_open_at_index_side(
- FALSE, ibuf->index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
+ false, ibuf->index, BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
ut_ad(page_validate(btr_pcur_get_page(&pcur), ibuf->index));
@@ -3223,8 +3437,8 @@ ibuf_get_entry_counter_func(
Buffer an operation in the insert/delete buffer, instead of doing it
directly to the disk page, if this is possible.
@return DB_SUCCESS, DB_STRONG_FAIL or other error */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
ibuf_insert_low(
/*============*/
ulint mode, /*!< in: BTR_MODIFY_PREV or BTR_MODIFY_TREE */
@@ -3246,7 +3460,9 @@ ibuf_insert_low(
btr_pcur_t pcur;
btr_cur_t* cursor;
dtuple_t* ibuf_entry;
+ mem_heap_t* offsets_heap = NULL;
mem_heap_t* heap;
+ ulint* offsets = NULL;
ulint buffered;
lint min_n_recs;
rec_t* ins_rec;
@@ -3254,7 +3470,7 @@ ibuf_insert_low(
page_t* bitmap_page;
buf_block_t* block;
page_t* root;
- ulint err;
+ dberr_t err;
ibool do_merge;
ulint space_ids[IBUF_MAX_N_PAGES_MERGED];
ib_int64_t space_versions[IBUF_MAX_N_PAGES_MERGED];
@@ -3294,7 +3510,7 @@ ibuf_insert_low(
return(DB_STRONG_FAIL);
}
- heap = mem_heap_create(512);
+ heap = mem_heap_create(1024);
/* Build the entry which contains the space id and the page number
as the first fields and the type information for other fields, and
@@ -3464,9 +3680,11 @@ fail_exit:
cursor = btr_pcur_get_btr_cur(&pcur);
if (mode == BTR_MODIFY_PREV) {
- err = btr_cur_optimistic_insert(BTR_NO_LOCKING_FLAG, cursor,
- ibuf_entry, &ins_rec,
- &dummy_big_rec, 0, thr, &mtr);
+ err = btr_cur_optimistic_insert(
+ BTR_NO_LOCKING_FLAG,
+ cursor, &offsets, &offsets_heap,
+ ibuf_entry, &ins_rec,
+ &dummy_big_rec, 0, thr, &mtr);
block = btr_cur_get_block(cursor);
ut_ad(buf_block_get_space(block) == IBUF_SPACE_ID);
@@ -3493,13 +3711,15 @@ fail_exit:
err = btr_cur_optimistic_insert(
BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
- cursor, ibuf_entry, &ins_rec,
+ cursor, &offsets, &offsets_heap,
+ ibuf_entry, &ins_rec,
&dummy_big_rec, 0, thr, &mtr);
if (err == DB_FAIL) {
err = btr_cur_pessimistic_insert(
BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG,
- cursor, ibuf_entry, &ins_rec,
+ cursor, &offsets, &offsets_heap,
+ ibuf_entry, &ins_rec,
&dummy_big_rec, 0, thr, &mtr);
}
@@ -3512,6 +3732,10 @@ fail_exit:
ut_ad(buf_block_get_space(block) == IBUF_SPACE_ID);
}
+ if (offsets_heap) {
+ mem_heap_free(offsets_heap);
+ }
+
if (err == DB_SUCCESS && op != IBUF_OP_DELETE) {
/* Update the page max trx id field */
page_update_max_trx_id(block, NULL,
@@ -3568,7 +3792,7 @@ ibuf_insert(
ulint page_no,/*!< in: page number where to insert */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
+ dberr_t err;
ulint entry_size;
ibool no_counter;
/* Read the settable global variable ibuf_use only once in
@@ -3699,7 +3923,7 @@ skip_watch:
/********************************************************************//**
During merge, inserts to an index page a secondary index entry extracted
from the insert buffer. */
-static
+static __attribute__((nonnull))
void
ibuf_insert_to_index_page_low(
/*==========================*/
@@ -3707,6 +3931,8 @@ ibuf_insert_to_index_page_low(
buf_block_t* block, /*!< in/out: index page where the buffered
entry should be placed */
dict_index_t* index, /*!< in: record descriptor */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t* heap, /*!< in/out: memory heap */
mtr_t* mtr, /*!< in/out: mtr */
page_cur_t* page_cur)/*!< in/out: cursor positioned on the record
after which to insert the buffered entry */
@@ -3718,8 +3944,8 @@ ibuf_insert_to_index_page_low(
const page_t* bitmap_page;
ulint old_bits;
- if (UNIV_LIKELY
- (page_cur_tuple_insert(page_cur, entry, index, 0, mtr) != NULL)) {
+ if (page_cur_tuple_insert(
+ page_cur, entry, index, offsets, &heap, 0, mtr) != NULL) {
return;
}
@@ -3730,8 +3956,8 @@ ibuf_insert_to_index_page_low(
/* This time the record must fit */
- if (UNIV_LIKELY
- (page_cur_tuple_insert(page_cur, entry, index, 0, mtr) != NULL)) {
+ if (page_cur_tuple_insert(page_cur, entry, index,
+ offsets, &heap, 0, mtr) != NULL) {
return;
}
@@ -3785,6 +4011,8 @@ ibuf_insert_to_index_page(
ulint low_match;
page_t* page = buf_block_get_frame(block);
rec_t* rec;
+ ulint* offsets;
+ mem_heap_t* heap;
ut_ad(ibuf_inside(mtr));
ut_ad(dtuple_check_typed(entry));
@@ -3835,10 +4063,14 @@ dump:
low_match = page_cur_search(block, index, entry,
PAGE_CUR_LE, &page_cur);
+ heap = mem_heap_create(
+ sizeof(upd_t)
+ + REC_OFFS_HEADER_SIZE * sizeof(*offsets)
+ + dtuple_get_n_fields(entry)
+ * (sizeof(upd_field_t) + sizeof *offsets));
+
if (UNIV_UNLIKELY(low_match == dtuple_get_n_fields(entry))) {
- mem_heap_t* heap;
upd_t* update;
- ulint* offsets;
page_zip_des_t* page_zip;
rec = page_cur_get_rec(&page_cur);
@@ -3847,12 +4079,10 @@ dump:
row_ins_sec_index_entry_by_modify(BTR_MODIFY_LEAF). */
ut_ad(rec_get_deleted_flag(rec, page_is_comp(page)));
- heap = mem_heap_create(1024);
-
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED,
&heap);
update = row_upd_build_sec_rec_difference_binary(
- index, entry, rec, NULL, heap);
+ rec, index, offsets, entry, heap);
page_zip = buf_block_get_page_zip(block);
@@ -3862,9 +4092,7 @@ dump:
Bug #56680 was fixed. */
btr_cur_set_deleted_flag_for_ibuf(
rec, page_zip, FALSE, mtr);
-updated_in_place:
- mem_heap_free(heap);
- return;
+ goto updated_in_place;
}
/* Copy the info bits. Clear the delete-mark. */
@@ -3908,15 +4136,20 @@ updated_in_place:
lock_rec_store_on_page_infimum(block, rec);
page_cur_delete_rec(&page_cur, index, offsets, mtr);
page_cur_move_to_prev(&page_cur);
- mem_heap_free(heap);
- ibuf_insert_to_index_page_low(entry, block, index, mtr,
+ ibuf_insert_to_index_page_low(entry, block, index,
+ &offsets, heap, mtr,
&page_cur);
lock_rec_restore_from_page_infimum(block, rec, block);
} else {
- ibuf_insert_to_index_page_low(entry, block, index, mtr,
+ offsets = NULL;
+ ibuf_insert_to_index_page_low(entry, block, index,
+ &offsets, heap, mtr,
&page_cur);
}
+
+updated_in_place:
+ mem_heap_free(heap);
}
/****************************************************************//**
@@ -3950,7 +4183,7 @@ ibuf_set_del_mark(
/* Delete mark the old index record. According to a
comment in row_upd_sec_index_entry(), it can already
have been delete marked if a lock wait occurred in
- row_ins_index_entry() in a previous invocation of
+ row_ins_sec_index_entry() in a previous invocation of
row_upd_sec_index_entry(). */
if (UNIV_LIKELY
@@ -4128,7 +4361,7 @@ ibuf_restore_pos(
ibuf_btr_pcur_commit_specify_mtr(pcur, mtr);
fputs("InnoDB: Validating insert buffer tree:\n", stderr);
- if (!btr_validate_index(ibuf->index, NULL)) {
+ if (!btr_validate_index(ibuf->index, 0)) {
ut_error;
}
@@ -4160,7 +4393,7 @@ ibuf_delete_rec(
{
ibool success;
page_t* root;
- ulint err;
+ dberr_t err;
ut_ad(ibuf_inside(mtr));
ut_ad(page_rec_is_user_rec(btr_pcur_get_rec(pcur)));
@@ -4183,7 +4416,8 @@ ibuf_delete_rec(
}
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
- success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur), mtr);
+ success = btr_cur_optimistic_delete(btr_pcur_get_btr_cur(pcur),
+ 0, mtr);
if (success) {
if (UNIV_UNLIKELY(!page_get_n_recs(btr_pcur_get_page(pcur)))) {
@@ -4241,7 +4475,7 @@ ibuf_delete_rec(
root = ibuf_tree_root_get(mtr);
- btr_cur_pessimistic_delete(&err, TRUE, btr_pcur_get_btr_cur(pcur),
+ btr_cur_pessimistic_delete(&err, TRUE, btr_pcur_get_btr_cur(pcur), 0,
RB_NONE, mtr);
ut_a(err == DB_SUCCESS);
@@ -4829,4 +5063,109 @@ ibuf_print(
mutex_exit(&ibuf_mutex);
}
+
+/******************************************************************//**
+Checks the insert buffer bitmaps on IMPORT TABLESPACE.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+ibuf_check_bitmap_on_import(
+/*========================*/
+ const trx_t* trx, /*!< in: transaction */
+ ulint space_id) /*!< in: tablespace identifier */
+{
+ ulint zip_size;
+ ulint page_size;
+ ulint size;
+ ulint page_no;
+
+ ut_ad(space_id);
+ ut_ad(trx->mysql_thd);
+
+ zip_size = fil_space_get_zip_size(space_id);
+
+ if (zip_size == ULINT_UNDEFINED) {
+ return(DB_TABLE_NOT_FOUND);
+ }
+
+ size = fil_space_get_size(space_id);
+
+ if (size == 0) {
+ return(DB_TABLE_NOT_FOUND);
+ }
+
+ mutex_enter(&ibuf_mutex);
+
+ page_size = zip_size ? zip_size : UNIV_PAGE_SIZE;
+
+ for (page_no = 0; page_no < size; page_no += page_size) {
+ mtr_t mtr;
+ page_t* bitmap_page;
+ ulint i;
+
+ if (trx_is_interrupted(trx)) {
+ mutex_exit(&ibuf_mutex);
+ return(DB_INTERRUPTED);
+ }
+
+ mtr_start(&mtr);
+
+ mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
+
+ ibuf_enter(&mtr);
+
+ bitmap_page = ibuf_bitmap_get_map_page(
+ space_id, page_no, zip_size, &mtr);
+
+ for (i = FSP_IBUF_BITMAP_OFFSET + 1; i < page_size; i++) {
+ const ulint offset = page_no + i;
+
+ if (ibuf_bitmap_page_get_bits(
+ bitmap_page, offset, zip_size,
+ IBUF_BITMAP_IBUF, &mtr)) {
+
+ mutex_exit(&ibuf_mutex);
+ ibuf_exit(&mtr);
+ mtr_commit(&mtr);
+
+ ib_errf(trx->mysql_thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_INNODB_INDEX_CORRUPT,
+ "Space %u page %u"
+ " is wrongly flagged to belong to the"
+ " insert buffer",
+ (unsigned) space_id,
+ (unsigned) offset);
+
+ return(DB_CORRUPTION);
+ }
+
+ if (ibuf_bitmap_page_get_bits(
+ bitmap_page, offset, zip_size,
+ IBUF_BITMAP_BUFFERED, &mtr)) {
+
+ ib_errf(trx->mysql_thd,
+ IB_LOG_LEVEL_WARN,
+ ER_INNODB_INDEX_CORRUPT,
+ "Buffered changes"
+ " for space %u page %u are lost",
+ (unsigned) space_id,
+ (unsigned) offset);
+
+ /* Tolerate this error, so that
+ slightly corrupted tables can be
+ imported and dumped. Clear the bit. */
+ ibuf_bitmap_page_set_bits(
+ bitmap_page, offset, zip_size,
+ IBUF_BITMAP_BUFFERED, FALSE, &mtr);
+ }
+ }
+
+ ibuf_exit(&mtr);
+ mtr_commit(&mtr);
+ }
+
+ mutex_exit(&ibuf_mutex);
+ return(DB_SUCCESS);
+}
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/innobase/include/api0api.h b/storage/innobase/include/api0api.h
new file mode 100644
index 00000000000..5b7bfdbdde5
--- /dev/null
+++ b/storage/innobase/include/api0api.h
@@ -0,0 +1,1282 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/api0api.h
+InnoDB Native API
+
+2008-08-01 Created by Sunny Bains.
+3/20/2011 Jimmy Yang extracted from Embedded InnoDB
+*******************************************************/
+
+#ifndef api0api_h
+#define api0api_h
+
+#include "db0err.h"
+#include <stdio.h>
+
+#ifdef _MSC_VER
+#define strncasecmp _strnicmp
+#define strcasecmp _stricmp
+#endif
+
+#if defined(__GNUC__) && (__GNUC__ > 2) && ! defined(__INTEL_COMPILER)
+#define UNIV_NO_IGNORE __attribute__ ((warn_unused_result))
+#else
+#define UNIV_NO_IGNORE
+#endif /* __GNUC__ && __GNUC__ > 2 && !__INTEL_COMPILER */
+
+/* See comment about ib_bool_t as to why the two macros are unsigned long. */
+/** The boolean value of "true" used internally within InnoDB */
+#define IB_TRUE 0x1UL
+/** The boolean value of "false" used internally within InnoDB */
+#define IB_FALSE 0x0UL
+
+/* Basic types used by the InnoDB API. */
+/** All InnoDB error codes are represented by ib_err_t */
+typedef enum dberr_t ib_err_t;
+/** Representation of a byte within InnoDB */
+typedef unsigned char ib_byte_t;
+/** Representation of an unsigned long int within InnoDB */
+typedef unsigned long int ib_ulint_t;
+
+/* We assume C99 support except when using VisualStudio. */
+#if !defined(_MSC_VER)
+#include <stdint.h>
+#endif /* _MSC_VER */
+
+/* Integer types used by the API. Microsft VS defines its own types
+and we use the Microsoft types when building with Visual Studio. */
+#if defined(_MSC_VER)
+/** A signed 8 bit integral type. */
+typedef __int8 ib_i8_t;
+#else
+/** A signed 8 bit integral type. */
+typedef int8_t ib_i8_t;
+#endif
+
+#if defined(_MSC_VER)
+/** An unsigned 8 bit integral type. */
+typedef unsigned __int8 ib_u8_t;
+#else
+/** An unsigned 8 bit integral type. */
+typedef uint8_t ib_u8_t;
+#endif
+
+#if defined(_MSC_VER)
+/** A signed 16 bit integral type. */
+typedef __int16 ib_i16_t;
+#else
+/** A signed 16 bit integral type. */
+typedef int16_t ib_i16_t;
+#endif
+
+#if defined(_MSC_VER)
+/** An unsigned 16 bit integral type. */
+typedef unsigned __int16 ib_u16_t;
+#else
+/** An unsigned 16 bit integral type. */
+typedef uint16_t ib_u16_t;
+#endif
+
+#if defined(_MSC_VER)
+/** A signed 32 bit integral type. */
+typedef __int32 ib_i32_t;
+#else
+/** A signed 32 bit integral type. */
+typedef int32_t ib_i32_t;
+#endif
+
+#if defined(_MSC_VER)
+/** An unsigned 32 bit integral type. */
+typedef unsigned __int32 ib_u32_t;
+#else
+/** An unsigned 32 bit integral type. */
+typedef uint32_t ib_u32_t;
+#endif
+
+#if defined(_MSC_VER)
+/** A signed 64 bit integral type. */
+typedef __int64 ib_i64_t;
+#else
+/** A signed 64 bit integral type. */
+typedef int64_t ib_i64_t;
+#endif
+
+#if defined(_MSC_VER)
+/** An unsigned 64 bit integral type. */
+typedef unsigned __int64 ib_u64_t;
+#else
+/** An unsigned 64 bit integral type. */
+typedef uint64_t ib_u64_t;
+#endif
+
+typedef void* ib_opaque_t;
+typedef ib_opaque_t ib_charset_t;
+typedef ib_ulint_t ib_bool_t;
+typedef ib_u64_t ib_id_u64_t;
+
+/** @enum ib_cfg_type_t Possible types for a configuration variable. */
+typedef enum {
+ IB_CFG_IBOOL, /*!< The configuration parameter is
+ of type ibool */
+
+ /* XXX Can we avoid having different types for ulint and ulong?
+ - On Win64 "unsigned long" is 32 bits
+ - ulong is always defined as "unsigned long"
+ - On Win64 ulint is defined as 64 bit integer
+ => On Win64 ulint != ulong.
+ If we typecast all ulong and ulint variables to the smaller type
+ ulong, then we will cut the range of the ulint variables.
+ This is not a problem for most ulint variables because their max
+ allowed values do not exceed 2^32-1 (e.g. log_groups is ulint
+ but its max allowed value is 10). BUT buffer_pool_size and
+ log_file_size allow up to 2^64-1. */
+
+ IB_CFG_ULINT, /*!< The configuration parameter is
+ of type ulint */
+
+ IB_CFG_ULONG, /*!< The configuration parameter is
+ of type ulong */
+
+ IB_CFG_TEXT, /*!< The configuration parameter is
+ of type char* */
+
+ IB_CFG_CB /*!< The configuration parameter is
+ a callback parameter */
+} ib_cfg_type_t;
+
+/** @enum ib_col_type_t column types that are supported. */
+typedef enum {
+ IB_VARCHAR = 1, /*!< Character varying length. The
+ column is not padded. */
+
+ IB_CHAR = 2, /*!< Fixed length character string. The
+ column is padded to the right. */
+
+ IB_BINARY = 3, /*!< Fixed length binary, similar to
+ IB_CHAR but the column is not padded
+ to the right. */
+
+ IB_VARBINARY = 4, /*!< Variable length binary */
+
+ IB_BLOB = 5, /*!< Binary large object, or
+ a TEXT type */
+
+ IB_INT = 6, /*!< Integer: can be any size
+ from 1 - 8 bytes. If the size is
+ 1, 2, 4 and 8 bytes then you can use
+ the typed read and write functions. For
+ other sizes you will need to use the
+ ib_col_get_value() function and do the
+ conversion yourself. */
+
+ IB_SYS = 8, /*!< System column, this column can
+ be one of DATA_TRX_ID, DATA_ROLL_PTR
+ or DATA_ROW_ID. */
+
+ IB_FLOAT = 9, /*!< C (float) floating point value. */
+
+ IB_DOUBLE = 10, /*!> C (double) floating point value. */
+
+ IB_DECIMAL = 11, /*!< Decimal stored as an ASCII
+ string */
+
+ IB_VARCHAR_ANYCHARSET = 12, /*!< Any charset, varying length */
+
+ IB_CHAR_ANYCHARSET = 13 /*!< Any charset, fixed length */
+
+} ib_col_type_t;
+
+/** @enum ib_tbl_fmt_t InnoDB table format types */
+typedef enum {
+ IB_TBL_REDUNDANT, /*!< Redundant row format, the column
+ type and length is stored in the row.*/
+
+ IB_TBL_COMPACT, /*!< Compact row format, the column
+ type is not stored in the row. The
+ length is stored in the row but the
+ storage format uses a compact format
+ to store the length of the column data
+ and record data storage format also
+ uses less storage. */
+
+ IB_TBL_DYNAMIC, /*!< Compact row format. BLOB prefixes
+ are not stored in the clustered index */
+
+ IB_TBL_COMPRESSED /*!< Similar to dynamic format but
+ with pages compressed */
+} ib_tbl_fmt_t;
+
+/** @enum ib_col_attr_t InnoDB column attributes */
+typedef enum {
+ IB_COL_NONE = 0, /*!< No special attributes. */
+
+ IB_COL_NOT_NULL = 1, /*!< Column data can't be NULL. */
+
+ IB_COL_UNSIGNED = 2, /*!< Column is IB_INT and unsigned. */
+
+ IB_COL_NOT_USED = 4, /*!< Future use, reserved. */
+
+ IB_COL_CUSTOM1 = 8, /*!< Custom precision type, this is
+ a bit that is ignored by InnoDB and so
+ can be set and queried by users. */
+
+ IB_COL_CUSTOM2 = 16, /*!< Custom precision type, this is
+ a bit that is ignored by InnoDB and so
+ can be set and queried by users. */
+
+ IB_COL_CUSTOM3 = 32 /*!< Custom precision type, this is
+ a bit that is ignored by InnoDB and so
+ can be set and queried by users. */
+} ib_col_attr_t;
+
+/* Note: must match lock0types.h */
+/** @enum ib_lck_mode_t InnoDB lock modes. */
+typedef enum {
+ IB_LOCK_IS = 0, /*!< Intention shared, an intention
+ lock should be used to lock tables */
+
+ IB_LOCK_IX, /*!< Intention exclusive, an intention
+ lock should be used to lock tables */
+
+ IB_LOCK_S, /*!< Shared locks should be used to
+ lock rows */
+
+ IB_LOCK_X, /*!< Exclusive locks should be used to
+ lock rows*/
+
+ IB_LOCK_TABLE_X, /*!< exclusive table lock */
+
+ IB_LOCK_NONE, /*!< This is used internally to note
+ consistent read */
+
+ IB_LOCK_NUM = IB_LOCK_NONE /*!< number of lock modes */
+} ib_lck_mode_t;
+
+typedef enum {
+ IB_CLUSTERED = 1, /*!< clustered index */
+ IB_UNIQUE = 2 /*!< unique index */
+} ib_index_type_t;
+
+/** @enum ib_srch_mode_t InnoDB cursor search modes for ib_cursor_moveto().
+Note: Values must match those found in page0cur.h */
+typedef enum {
+ IB_CUR_G = 1, /*!< If search key is not found then
+ position the cursor on the row that
+ is greater than the search key */
+
+ IB_CUR_GE = 2, /*!< If the search key not found then
+ position the cursor on the row that
+ is greater than or equal to the search
+ key */
+
+ IB_CUR_L = 3, /*!< If search key is not found then
+ position the cursor on the row that
+ is less than the search key */
+
+ IB_CUR_LE = 4 /*!< If search key is not found then
+ position the cursor on the row that
+ is less than or equal to the search
+ key */
+} ib_srch_mode_t;
+
+/** @enum ib_match_mode_t Various match modes used by ib_cursor_moveto() */
+typedef enum {
+ IB_CLOSEST_MATCH, /*!< Closest match possible */
+
+ IB_EXACT_MATCH, /*!< Search using a complete key
+ value */
+
+ IB_EXACT_PREFIX /*!< Search using a key prefix which
+ must match to rows: the prefix may
+ contain an incomplete field (the
+ last field in prefix may be just
+ a prefix of a fixed length column) */
+} ib_match_mode_t;
+
+/** @struct ib_col_meta_t InnoDB column meta data. */
+typedef struct {
+ ib_col_type_t type; /*!< Type of the column */
+
+ ib_col_attr_t attr; /*!< Column attributes */
+
+ ib_u32_t type_len; /*!< Length of type */
+
+ ib_u16_t client_type; /*!< 16 bits of data relevant only to
+ the client. InnoDB doesn't care */
+
+ ib_charset_t* charset; /*!< Column charset */
+} ib_col_meta_t;
+
+/* Note: Must be in sync with trx0trx.h */
+/** @enum ib_trx_state_t The transaction state can be queried using the
+ib_trx_state() function. The InnoDB deadlock monitor can roll back a
+transaction and users should be prepared for this, especially where there
+is high contention. The way to determine the state of the transaction is to
+query it's state and check. */
+typedef enum {
+ IB_TRX_NOT_STARTED, /*!< Has not started yet, the
+ transaction has not ben started yet.*/
+
+ IB_TRX_ACTIVE, /*!< The transaction is currently
+ active and needs to be either
+ committed or rolled back. */
+
+ IB_TRX_COMMITTED_IN_MEMORY, /*!< Not committed to disk yet */
+
+ IB_TRX_PREPARED /*!< Support for 2PC/XA */
+} ib_trx_state_t;
+
+/* Note: Must be in sync with trx0trx.h */
+/** @enum ib_trx_level_t Transaction isolation levels */
+typedef enum {
+ IB_TRX_READ_UNCOMMITTED = 0, /*!< Dirty read: non-locking SELECTs are
+ performed so that we do not look at a
+ possible earlier version of a record;
+ thus they are not 'consistent' reads
+ under this isolation level; otherwise
+ like level 2 */
+
+ IB_TRX_READ_COMMITTED = 1, /*!< Somewhat Oracle-like isolation,
+ except that in range UPDATE and DELETE
+ we must block phantom rows with
+ next-key locks; SELECT ... FOR UPDATE
+ and ... LOCK IN SHARE MODE only lock
+ the index records, NOT the gaps before
+ them, and thus allow free inserting;
+ each consistent read reads its own
+ snapshot */
+
+ IB_TRX_REPEATABLE_READ = 2, /*!< All consistent reads in the same
+ trx read the same snapshot; full
+ next-key locking used in locking reads
+ to block insertions into gaps */
+
+ IB_TRX_SERIALIZABLE = 3 /*!< All plain SELECTs are converted to
+ LOCK IN SHARE MODE reads */
+} ib_trx_level_t;
+
+/** Generical InnoDB callback prototype. */
+typedef void (*ib_cb_t)(void);
+
+#define IB_CFG_BINLOG_ENABLED 0x1
+#define IB_CFG_MDL_ENABLED 0x2
+#define IB_CFG_DISABLE_ROWLOCK 0x4
+
+/** The first argument to the InnoDB message logging function. By default
+it's set to stderr. You should treat ib_msg_stream_t as a void*, since
+it will probably change in the future. */
+typedef FILE* ib_msg_stream_t;
+
+/** All log messages are written to this function.It should have the same
+behavior as fprintf(3). */
+typedef int (*ib_msg_log_t)(ib_msg_stream_t, const char*, ...);
+
+/* Note: This is to make it easy for API users to have type
+checking for arguments to our functions. Making it ib_opaque_t
+by itself will result in pointer decay resulting in subverting
+of the compiler's type checking. */
+
+/** InnoDB tuple handle. This handle can refer to either a cluster index
+tuple or a secondary index tuple. There are two types of tuples for each
+type of index, making a total of four types of tuple handles. There
+is a tuple for reading the entire row contents and another for searching
+on the index key. */
+typedef struct ib_tuple_t* ib_tpl_t;
+
+/** InnoDB transaction handle, all database operations need to be covered
+by transactions. This handle represents a transaction. The handle can be
+created with ib_trx_begin(), you commit your changes with ib_trx_commit()
+and undo your changes using ib_trx_rollback(). If the InnoDB deadlock
+monitor rolls back the transaction then you need to free the transaction
+using the function ib_trx_release(). You can query the state of an InnoDB
+transaction by calling ib_trx_state(). */
+typedef struct trx_t* ib_trx_t;
+
+/** InnoDB cursor handle */
+typedef struct ib_cursor_t* ib_crsr_t;
+
+/*************************************************************//**
+This function is used to compare two data fields for which the data type
+is such that we must use the client code to compare them.
+
+@param col_meta column meta data
+@param p1 key
+@oaram p1_len key length
+@param p2 second key
+@param p2_len second key length
+@return 1, 0, -1, if a is greater, equal, less than b, respectively */
+
+typedef int (*ib_client_cmp_t)(
+ const ib_col_meta_t* col_meta,
+ const ib_byte_t* p1,
+ ib_ulint_t p1_len,
+ const ib_byte_t* p2,
+ ib_ulint_t p2_len);
+
+/* This should be the same as univ.i */
+/** Represents SQL_NULL length */
+#define IB_SQL_NULL 0xFFFFFFFF
+/** The number of system columns in a row. */
+#define IB_N_SYS_COLS 3
+
+/** The maximum length of a text column. */
+#define MAX_TEXT_LEN 4096
+
+/* MySQL uses 3 byte UTF-8 encoding. */
+/** The maximum length of a column name in a table schema. */
+#define IB_MAX_COL_NAME_LEN (64 * 3)
+
+/** The maximum length of a table name (plus database name). */
+#define IB_MAX_TABLE_NAME_LEN (64 * 3) * 2
+
+/*****************************************************************//**
+Start a transaction that's been rolled back. This special function
+exists for the case when InnoDB's deadlock detector has rolledack
+a transaction. While the transaction has been rolled back the handle
+is still valid and can be reused by calling this function. If you
+don't want to reuse the transaction handle then you can free the handle
+by calling ib_trx_release().
+@return innobase txn handle */
+
+ib_err_t
+ib_trx_start(
+/*=========*/
+ ib_trx_t ib_trx, /*!< in: transaction to restart */
+ ib_trx_level_t ib_trx_level, /*!< in: trx isolation level */
+ void* thd); /*!< in: THD */
+
+/*****************************************************************//**
+Begin a transaction. This will allocate a new transaction handle and
+put the transaction in the active state.
+@return innobase txn handle */
+
+ib_trx_t
+ib_trx_begin(
+/*=========*/
+ ib_trx_level_t ib_trx_level); /*!< in: trx isolation level */
+
+/*****************************************************************//**
+Query the transaction's state. This function can be used to check for
+the state of the transaction in case it has been rolled back by the
+InnoDB deadlock detector. Note that when a transaction is selected as
+a victim for rollback, InnoDB will always return an appropriate error
+code indicating this. @see DB_DEADLOCK, @see DB_LOCK_TABLE_FULL and
+@see DB_LOCK_WAIT_TIMEOUT
+@return transaction state */
+
+ib_trx_state_t
+ib_trx_state(
+/*=========*/
+ ib_trx_t ib_trx); /*!< in: trx handle */
+
+/*****************************************************************//**
+Release the resources of the transaction. If the transaction was
+selected as a victim by InnoDB and rolled back then use this function
+to free the transaction handle.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_trx_release(
+/*===========*/
+ ib_trx_t ib_trx); /*!< in: trx handle */
+
+/*****************************************************************//**
+Commit a transaction. This function will release the schema latches too.
+It will also free the transaction handle.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_trx_commit(
+/*==========*/
+ ib_trx_t ib_trx); /*!< in: trx handle */
+
+/*****************************************************************//**
+Rollback a transaction. This function will release the schema latches too.
+It will also free the transaction handle.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_trx_rollback(
+/*============*/
+ ib_trx_t ib_trx); /*!< in: trx handle */
+
+/*****************************************************************//**
+Open an InnoDB table and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_open_table_using_id(
+/*==========================*/
+ ib_id_u64_t table_id, /*!< in: table id of table to open */
+ ib_trx_t ib_trx, /*!< in: Current transaction handle
+ can be NULL */
+ ib_crsr_t* ib_crsr); /*!< out,own: InnoDB cursor */
+
+/*****************************************************************//**
+Open an InnoDB index and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_open_index_using_id(
+/*==========================*/
+ ib_id_u64_t index_id, /*!< in: index id of index to open */
+ ib_trx_t ib_trx, /*!< in: Current transaction handle
+ can be NULL */
+ ib_crsr_t* ib_crsr); /*!< out: InnoDB cursor */
+
+/*****************************************************************//**
+Open an InnoDB secondary index cursor and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_open_index_using_name(
+/*============================*/
+ ib_crsr_t ib_open_crsr, /*!< in: open/active cursor */
+ const char* index_name, /*!< in: secondary index name */
+ ib_crsr_t* ib_crsr, /*!< out,own: InnoDB index cursor */
+ int* idx_type, /*!< out: index is cluster index */
+ ib_id_u64_t* idx_id); /*!< out: index id */
+
+/*****************************************************************//**
+Open an InnoDB table by name and return a cursor handle to it.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_open_table(
+/*=================*/
+ const char* name, /*!< in: table name */
+ ib_trx_t ib_trx, /*!< in: Current transaction handle
+ can be NULL */
+ ib_crsr_t* ib_crsr); /*!< out,own: InnoDB cursor */
+
+/*****************************************************************//**
+Reset the cursor.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_reset(
+/*============*/
+ ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */
+
+
+/*****************************************************************//**
+set a cursor trx to NULL*/
+
+void
+ib_cursor_clear_trx(
+/*================*/
+ ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */
+
+/*****************************************************************//**
+Close an InnoDB table and free the cursor.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_close(
+/*============*/
+ ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */
+
+/*****************************************************************//**
+Close the table, decrement n_ref_count count.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_close_table(
+/*==================*/
+ ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */
+
+/*****************************************************************//**
+update the cursor with new transactions and also reset the cursor
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_new_trx(
+/*==============*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_trx_t ib_trx); /*!< in: transaction */
+
+/*****************************************************************//**
+Commit the transaction in a cursor
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_commit_trx(
+/*=================*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_trx_t ib_trx); /*!< in: transaction */
+
+/********************************************************************//**
+Open a table using the table name, if found then increment table ref count.
+@return table instance if found */
+
+void*
+ib_open_table_by_name(
+/*==================*/
+ const char* name); /*!< in: table name to lookup */
+
+/*****************************************************************//**
+Insert a row to a table.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_insert_row(
+/*=================*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor instance */
+ const ib_tpl_t ib_tpl); /*!< in: tuple to insert */
+
+/*****************************************************************//**
+Update a row in a table.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_update_row(
+/*=================*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ const ib_tpl_t ib_old_tpl, /*!< in: Old tuple in table */
+ const ib_tpl_t ib_new_tpl); /*!< in: New tuple to update */
+
+/*****************************************************************//**
+Delete a row in a table.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_delete_row(
+/*=================*/
+ ib_crsr_t ib_crsr); /*!< in: cursor instance */
+
+/*****************************************************************//**
+Read current row.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_read_row(
+/*===============*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_tpl_t ib_tpl); /*!< out: read cols into this tuple */
+
+/*****************************************************************//**
+Move cursor to the first record in the table.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_first(
+/*============*/
+ ib_crsr_t ib_crsr); /*!< in: InnoDB cursor instance */
+
+/*****************************************************************//**
+Move cursor to the last record in the table.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_last(
+/*===========*/
+ ib_crsr_t ib_crsr); /*!< in: InnoDB cursor instance */
+
+/*****************************************************************//**
+Move cursor to the next record in the table.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_next(
+/*===========*/
+ ib_crsr_t ib_crsr); /*!< in: InnoDB cursor instance */
+
+/*****************************************************************//**
+Search for key.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_cursor_moveto(
+/*=============*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_tpl_t ib_tpl, /*!< in: Key to search for */
+ ib_srch_mode_t ib_srch_mode); /*!< in: search mode */
+
+/*****************************************************************//**
+Set the match mode for ib_cursor_move(). */
+
+void
+ib_cursor_set_match_mode(
+/*=====================*/
+ ib_crsr_t ib_crsr, /*!< in: Cursor instance */
+ ib_match_mode_t match_mode); /*!< in: ib_cursor_moveto match mode */
+
+/*****************************************************************//**
+Set a column of the tuple. Make a copy using the tuple's heap.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_col_set_value(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t col_no, /*!< in: column index in tuple */
+ const void* src, /*!< in: data value */
+ ib_ulint_t len); /*!< in: data value len */
+
+/*****************************************************************//**
+Get the size of the data available in the column the tuple.
+@return bytes avail or IB_SQL_NULL */
+
+ib_ulint_t
+ib_col_get_len(
+/*===========*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i); /*!< in: column index in tuple */
+
+/*****************************************************************//**
+Copy a column value from the tuple.
+@return bytes copied or IB_SQL_NULL */
+
+ib_ulint_t
+ib_col_copy_value(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: tuple instance */
+ ib_ulint_t i, /*!< in: column index in tuple */
+ void* dst, /*!< out: copied data value */
+ ib_ulint_t len); /*!< in: max data value len to copy */
+
+/*************************************************************//**
+Read a signed int 8 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_i8(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i8_t* ival); /*!< out: integer value */
+
+/*************************************************************//**
+Read an unsigned int 8 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_u8(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u8_t* ival); /*!< out: integer value */
+
+/*************************************************************//**
+Read a signed int 16 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_i16(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i16_t* ival); /*!< out: integer value */
+
+/*************************************************************//**
+Read an unsigned int 16 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_u16(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u16_t* ival); /*!< out: integer value */
+
+/*************************************************************//**
+Read a signed int 32 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_i32(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i32_t* ival); /*!< out: integer value */
+
+/*************************************************************//**
+Read an unsigned int 32 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_u32(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u32_t* ival); /*!< out: integer value */
+
+/*************************************************************//**
+Read a signed int 64 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_i64(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_i64_t* ival); /*!< out: integer value */
+
+/*************************************************************//**
+Read an unsigned int 64 bit column from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_u64(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_u64_t* ival); /*!< out: integer value */
+
+/*****************************************************************//**
+Get a column value pointer from the tuple.
+@return NULL or pointer to buffer */
+
+const void*
+ib_col_get_value(
+/*=============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i); /*!< in: column number */
+
+/*****************************************************************//**
+Get a column type, length and attributes from the tuple.
+@return len of column data */
+
+ib_ulint_t
+ib_col_get_meta(
+/*============*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t i, /*!< in: column number */
+ ib_col_meta_t* ib_col_meta); /*!< out: column meta data */
+
+/*****************************************************************//**
+"Clear" or reset an InnoDB tuple. We free the heap and recreate the tuple.
+@return new tuple, or NULL */
+
+ib_tpl_t
+ib_tuple_clear(
+/*============*/
+ ib_tpl_t ib_tpl); /*!< in: InnoDB tuple */
+
+/*****************************************************************//**
+Create a new cluster key search tuple and copy the contents of the
+secondary index key tuple columns that refer to the cluster index record
+to the cluster key. It does a deep copy of the column data.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_tuple_get_cluster_key(
+/*=====================*/
+ ib_crsr_t ib_crsr, /*!< in: secondary index cursor */
+ ib_tpl_t* ib_dst_tpl, /*!< out,own: destination tuple */
+ const ib_tpl_t ib_src_tpl); /*!< in: source tuple */
+
+/*****************************************************************//**
+Copy the contents of source tuple to destination tuple. The tuples
+must be of the same type and belong to the same table/index.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_tuple_copy(
+/*==========*/
+ ib_tpl_t ib_dst_tpl, /*!< in: destination tuple */
+ const ib_tpl_t ib_src_tpl); /*!< in: source tuple */
+
+/*****************************************************************//**
+Create an InnoDB tuple used for index/table search.
+@return tuple for current index */
+
+ib_tpl_t
+ib_sec_search_tuple_create(
+/*=======================*/
+ ib_crsr_t ib_crsr); /*!< in: Cursor instance */
+
+/*****************************************************************//**
+Create an InnoDB tuple used for index/table search.
+@return tuple for current index */
+
+ib_tpl_t
+ib_sec_read_tuple_create(
+/*=====================*/
+ ib_crsr_t ib_crsr); /*!< in: Cursor instance */
+
+/*****************************************************************//**
+Create an InnoDB tuple used for table key operations.
+@return tuple for current table */
+
+ib_tpl_t
+ib_clust_search_tuple_create(
+/*=========================*/
+ ib_crsr_t ib_crsr); /*!< in: Cursor instance */
+
+/*****************************************************************//**
+Create an InnoDB tuple for table row operations.
+@return tuple for current table */
+
+ib_tpl_t
+ib_clust_read_tuple_create(
+/*=======================*/
+ ib_crsr_t ib_crsr); /*!< in: Cursor instance */
+
+/*****************************************************************//**
+Return the number of user columns in the tuple definition.
+@return number of user columns */
+
+ib_ulint_t
+ib_tuple_get_n_user_cols(
+/*=====================*/
+ const ib_tpl_t ib_tpl); /*!< in: Tuple for current table */
+
+/*****************************************************************//**
+Return the number of columns in the tuple definition.
+@return number of columns */
+
+ib_ulint_t
+ib_tuple_get_n_cols(
+/*================*/
+ const ib_tpl_t ib_tpl); /*!< in: Tuple for current table */
+
+/*****************************************************************//**
+Destroy an InnoDB tuple. */
+
+void
+ib_tuple_delete(
+/*============*/
+ ib_tpl_t ib_tpl); /*!< in,own: Tuple instance to delete */
+
+/*****************************************************************//**
+Truncate a table. The cursor handle will be closed and set to NULL
+on success.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_cursor_truncate(
+/*===============*/
+ ib_crsr_t* ib_crsr, /*!< in/out: cursor for table
+ to truncate */
+ ib_id_u64_t* table_id); /*!< out: new table id */
+
+/*****************************************************************//**
+Get a table id.
+@return DB_SUCCESS if found */
+
+ib_err_t
+ib_table_get_id(
+/*============*/
+ const char* table_name, /*!< in: table to find */
+ ib_id_u64_t* table_id); /*!< out: table id if found */
+
+/*****************************************************************//**
+Get an index id.
+@return DB_SUCCESS if found */
+
+ib_err_t
+ib_index_get_id(
+/*============*/
+ const char* table_name, /*!< in: find index for this table */
+ const char* index_name, /*!< in: index to find */
+ ib_id_u64_t* index_id); /*!< out: index id if found */
+
+/*****************************************************************//**
+Check if cursor is positioned.
+@return IB_TRUE if positioned */
+
+ib_bool_t
+ib_cursor_is_positioned(
+/*====================*/
+ const ib_crsr_t ib_crsr); /*!< in: InnoDB cursor instance */
+
+/*****************************************************************//**
+Checks if the data dictionary is latched in exclusive mode by a
+user transaction.
+@return TRUE if exclusive latch */
+
+ib_bool_t
+ib_schema_lock_is_exclusive(
+/*========================*/
+ const ib_trx_t ib_trx); /*!< in: transaction */
+
+/*****************************************************************//**
+Lock an InnoDB cursor/table.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_cursor_lock(
+/*===========*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_lck_mode_t ib_lck_mode); /*!< in: InnoDB lock mode */
+
+/*****************************************************************//**
+Set the Lock an InnoDB table using the table id.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_table_lock(
+/*===========*/
+ ib_trx_t ib_trx, /*!< in/out: transaction */
+ ib_id_u64_t table_id, /*!< in: table id */
+ ib_lck_mode_t ib_lck_mode); /*!< in: InnoDB lock mode */
+
+/*****************************************************************//**
+Set the Lock mode of the cursor.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_cursor_set_lock_mode(
+/*====================*/
+ ib_crsr_t ib_crsr, /*!< in/out: InnoDB cursor */
+ ib_lck_mode_t ib_lck_mode); /*!< in: InnoDB lock mode */
+
+/*****************************************************************//**
+Set need to access clustered index record flag. */
+
+void
+ib_cursor_set_cluster_access(
+/*=========================*/
+ ib_crsr_t ib_crsr); /*!< in/out: InnoDB cursor */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_i8(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i8_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_i16(
+/*=================*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i16_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_i32(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i32_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_i64(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_i64_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_u8(
+/*==============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_u8_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_u16(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_u16_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_u32(
+/*=================*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_u32_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Write an integer value to a column. Integers are stored in big-endian
+format and will need to be converted from the host format.
+@return DB_SUCESS or error */
+
+ib_err_t
+ib_tuple_write_u64(
+/*===============*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ ib_u64_t val); /*!< in: value to write */
+
+/*****************************************************************//**
+Inform the cursor that it's the start of an SQL statement. */
+
+void
+ib_cursor_stmt_begin(
+/*=================*/
+ ib_crsr_t ib_crsr); /*!< in: cursor */
+
+/*****************************************************************//**
+Write a double value to a column.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_write_double(
+/*==================*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ int col_no, /*!< in: column number */
+ double val); /*!< in: value to write */
+
+/*************************************************************//**
+Read a double column value from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_double(
+/*=================*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t col_no, /*!< in: column number */
+ double* dval); /*!< out: double value */
+
+/*****************************************************************//**
+Write a float value to a column.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_write_float(
+/*=================*/
+ ib_tpl_t ib_tpl, /*!< in/out: tuple to write to */
+ int col_no, /*!< in: column number */
+ float val); /*!< in: value to write */
+
+/*************************************************************//**
+Read a float value from an InnoDB tuple.
+@return DB_SUCCESS or error */
+
+ib_err_t
+ib_tuple_read_float(
+/*================*/
+ ib_tpl_t ib_tpl, /*!< in: InnoDB tuple */
+ ib_ulint_t col_no, /*!< in: column number */
+ float* fval); /*!< out: float value */
+
+/*****************************************************************//**
+Get a column type, length and attributes from the tuple.
+@return len of column data */
+
+const char*
+ib_col_get_name(
+/*============*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_ulint_t i); /*!< in: column index in tuple */
+
+/*****************************************************************//**
+Get an index field name from the cursor.
+@return name of the field */
+
+const char*
+ib_get_idx_field_name(
+/*==================*/
+ ib_crsr_t ib_crsr, /*!< in: InnoDB cursor instance */
+ ib_ulint_t i); /*!< in: column index in tuple */
+
+/*****************************************************************//**
+Truncate a table.
+@return DB_SUCCESS or error code */
+
+ib_err_t
+ib_table_truncate(
+/*==============*/
+ const char* table_name, /*!< in: table name */
+ ib_id_u64_t* table_id); /*!< out: new table id */
+
+/*****************************************************************//**
+Frees a possible InnoDB trx object associated with the current THD.
+@return DB_SUCCESS or error number */
+
+ib_err_t
+ib_close_thd(
+/*=========*/
+ void* thd); /*!< in: handle to the MySQL
+ thread of the user whose resources
+ should be free'd */
+
+/*****************************************************************//**
+Get generic configure status
+@return configure status*/
+
+int
+ib_cfg_get_cfg();
+/*============*/
+
+/*****************************************************************//**
+Check whether the table name conforms to our requirements. Currently
+we only do a simple check for the presence of a '/'.
+@return DB_SUCCESS or err code */
+
+ib_err_t
+ib_table_name_check(
+/*================*/
+ const char* name); /*!< in: table name to check */
+
+/*****************************************************************//**
+Return isolation configuration set by "innodb_api_trx_level"
+@return trx isolation level*/
+
+ib_trx_state_t
+ib_cfg_trx_level();
+/*==============*/
+
+/*****************************************************************//**
+Return configure value for background commit interval (in seconds)
+@return background commit interval (in seconds) */
+
+ib_ulint_t
+ib_cfg_bk_commit_interval();
+/*=======================*/
+
+/*****************************************************************//**
+Get a trx start time.
+@return trx start_time */
+
+ib_u64_t
+ib_trx_get_start_time(
+/*==================*/
+ ib_trx_t ib_trx); /*!< in: transaction */
+
+#endif /* api0api_h */
diff --git a/storage/innobase/include/api0misc.h b/storage/innobase/include/api0misc.h
new file mode 100644
index 00000000000..fcd748390d1
--- /dev/null
+++ b/storage/innobase/include/api0misc.h
@@ -0,0 +1,78 @@
+/*****************************************************************************
+
+Copyright (c) 2008, 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/api0misc.h
+InnoDB Native API
+
+3/20/2011 Jimmy Yang extracted from Embedded InnoDB
+2008 Created by Sunny Bains
+*******************************************************/
+
+#ifndef api0misc_h
+#define api0misc_h
+
+#include "univ.i"
+#include "os0file.h"
+#include "que0que.h"
+#include "trx0trx.h"
+
+/** Whether binlog is enabled for applications using InnoDB APIs */
+extern my_bool ib_binlog_enabled;
+
+/** Whether MySQL MDL is enabled for applications using InnoDB APIs */
+extern my_bool ib_mdl_enabled;
+
+/** Whether InnoDB row lock is disabled for applications using InnoDB APIs */
+extern my_bool ib_disable_row_lock;
+
+/** configure value for transaction isolation level */
+extern ulong ib_trx_level_setting;
+
+/** configure value for background commit interval (in seconds) */
+extern ulong ib_bk_commit_interval;
+
+/********************************************************************
+Handles user errors and lock waits detected by the database engine.
+@return TRUE if it was a lock wait and we should continue running
+the query thread */
+UNIV_INTERN
+ibool
+ib_handle_errors(
+/*=============*/
+ dberr_t* new_err, /*!< out: possible new error
+ encountered in lock wait, or if
+ no new error, the value of
+ trx->error_state at the entry of this
+ function */
+ trx_t* trx, /*!< in: transaction */
+ que_thr_t* thr, /*!< in: query thread */
+ trx_savept_t* savept); /*!< in: savepoint or NULL */
+
+/*************************************************************************
+Sets a lock on a table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+ib_trx_lock_table_with_retry(
+/*=========================*/
+ trx_t* trx, /*!< in/out: transaction */
+ dict_table_t* table, /*!< in: table to lock */
+ enum lock_mode mode); /*!< in: lock mode */
+
+#endif /* api0misc_h */
diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h
index 5592995d4b2..b99b0c0cd7b 100644
--- a/storage/innobase/include/btr0btr.h
+++ b/storage/innobase/include/btr0btr.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -92,6 +93,17 @@ insert/delete buffer when the record is not in the buffer pool. */
buffer when the record is not in the buffer pool. */
#define BTR_DELETE 8192
+/** In the case of BTR_SEARCH_LEAF or BTR_MODIFY_LEAF, the caller is
+already holding an S latch on the index tree */
+#define BTR_ALREADY_S_LATCHED 16384
+
+#define BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode) \
+ ((latch_mode) & ~(BTR_INSERT \
+ | BTR_DELETE_MARK \
+ | BTR_DELETE \
+ | BTR_ESTIMATE \
+ | BTR_IGNORE_SEC_UNIQUE \
+ | BTR_ALREADY_S_LATCHED))
#endif /* UNIV_HOTBACKUP */
/**************************************************************//**
@@ -118,7 +130,7 @@ btr_corruption_report(
#ifdef UNIV_BLOB_DEBUG
# include "ut0rbt.h"
/** An index->blobs entry for keeping track of off-page column references */
-struct btr_blob_dbg_struct
+struct btr_blob_dbg_t
{
unsigned blob_page_no:32; /*!< first BLOB page number */
unsigned ref_page_no:32; /*!< referring page number */
@@ -207,8 +219,32 @@ UNIV_INTERN
page_t*
btr_root_get(
/*=========*/
+ const dict_index_t* index, /*!< in: index tree */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
+
+/**************************************************************//**
+Checks and adjusts the root node of a tree during IMPORT TABLESPACE.
+@return error code, or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+btr_root_adjust_on_import(
+/*======================*/
+ const dict_index_t* index) /*!< in: index tree */
+ __attribute__((nonnull, warn_unused_result));
+
+/**************************************************************//**
+Gets the height of the B-tree (the level of the root, when the leaf
+level is assumed to be 0). The caller must hold an S or X latch on
+the index.
+@return tree height (level of the root) */
+UNIV_INTERN
+ulint
+btr_height_get(
+/*===========*/
dict_index_t* index, /*!< in: index tree */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
+ __attribute__((nonnull, warn_unused_result));
/**************************************************************//**
Gets a buffer page and declares its latching order level. */
UNIV_INLINE
@@ -269,7 +305,8 @@ UNIV_INLINE
index_id_t
btr_page_get_index_id(
/*==================*/
- const page_t* page); /*!< in: index page */
+ const page_t* page) /*!< in: index page */
+ __attribute__((nonnull, pure, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/********************************************************//**
Gets the node level field in an index page.
@@ -278,16 +315,9 @@ UNIV_INLINE
ulint
btr_page_get_level_low(
/*===================*/
- const page_t* page); /*!< in: index page */
-/********************************************************//**
-Gets the node level field in an index page.
-@return level, leaf level == 0 */
-UNIV_INLINE
-ulint
-btr_page_get_level(
-/*===============*/
- const page_t* page, /*!< in: index page */
- mtr_t* mtr); /*!< in: mini-transaction handle */
+ const page_t* page) /*!< in: index page */
+ __attribute__((nonnull, pure, warn_unused_result));
+#define btr_page_get_level(page, mtr) btr_page_get_level_low(page)
/********************************************************//**
Gets the next index page number.
@return next page number */
@@ -296,7 +326,8 @@ ulint
btr_page_get_next(
/*==============*/
const page_t* page, /*!< in: index page */
- mtr_t* mtr); /*!< in: mini-transaction handle */
+ mtr_t* mtr) /*!< in: mini-transaction handle */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************//**
Gets the previous index page number.
@return prev page number */
@@ -305,7 +336,8 @@ ulint
btr_page_get_prev(
/*==============*/
const page_t* page, /*!< in: index page */
- mtr_t* mtr); /*!< in: mini-transaction handle */
+ mtr_t* mtr) /*!< in: mini-transaction handle */
+ __attribute__((nonnull, warn_unused_result));
/*************************************************************//**
Gets pointer to the previous user record in the tree. It is assumed
that the caller has appropriate latches on the page and its neighbor.
@@ -315,8 +347,9 @@ rec_t*
btr_get_prev_user_rec(
/*==================*/
rec_t* rec, /*!< in: record on leaf level */
- mtr_t* mtr); /*!< in: mtr holding a latch on the page, and if
+ mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if
needed, also to the previous page */
+ __attribute__((nonnull, warn_unused_result));
/*************************************************************//**
Gets pointer to the next user record in the tree. It is assumed
that the caller has appropriate latches on the page and its neighbor.
@@ -326,8 +359,9 @@ rec_t*
btr_get_next_user_rec(
/*==================*/
rec_t* rec, /*!< in: record on leaf level */
- mtr_t* mtr); /*!< in: mtr holding a latch on the page, and if
+ mtr_t* mtr) /*!< in: mtr holding a latch on the page, and if
needed, also to the next page */
+ __attribute__((nonnull, warn_unused_result));
/**************************************************************//**
Releases the latch on a leaf page and bufferunfixes it. */
UNIV_INLINE
@@ -337,7 +371,8 @@ btr_leaf_page_release(
buf_block_t* block, /*!< in: buffer block */
ulint latch_mode, /*!< in: BTR_SEARCH_LEAF or
BTR_MODIFY_LEAF */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
/**************************************************************//**
Gets the child node file address in a node pointer.
NOTE: the offsets array must contain all offsets for the record since
@@ -350,7 +385,8 @@ ulint
btr_node_ptr_get_child_page_no(
/*===========================*/
const rec_t* rec, /*!< in: node pointer record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/************************************************************//**
Creates the root node for a new index tree.
@return page number of the created root, FIL_NULL if did not succeed */
@@ -364,7 +400,8 @@ btr_create(
or 0 for uncompressed pages */
index_id_t index_id,/*!< in: index id */
dict_index_t* index, /*!< in: index */
- mtr_t* mtr); /*!< in: mini-transaction handle */
+ mtr_t* mtr) /*!< in: mini-transaction handle */
+ __attribute__((nonnull));
/************************************************************//**
Frees a B-tree except the root page, which MUST be freed after this
by calling btr_free_root. */
@@ -386,7 +423,8 @@ btr_free_root(
ulint zip_size, /*!< in: compressed page size in bytes
or 0 for uncompressed pages */
ulint root_page_no, /*!< in: root page number */
- mtr_t* mtr); /*!< in/out: mini-transaction */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
+ __attribute__((nonnull));
/*************************************************************//**
Makes tree one level higher by splitting the root, and inserts
the tuple. It is assumed that mtr contains an x-latch on the tree.
@@ -398,13 +436,18 @@ UNIV_INTERN
rec_t*
btr_root_raise_and_insert(
/*======================*/
+ ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor at which to insert: must be
on the root page; when the function returns,
the cursor is positioned on the predecessor
of the inserted record */
+ ulint** offsets,/*!< out: offsets on inserted record */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap
+ that can be emptied, or NULL */
const dtuple_t* tuple, /*!< in: tuple to insert */
ulint n_ext, /*!< in: number of externally stored columns */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull, warn_unused_result));
/*************************************************************//**
Reorganizes an index page.
IMPORTANT: if btr_page_reorganize() is invoked on a compressed leaf
@@ -418,7 +461,8 @@ btr_page_reorganize(
/*================*/
buf_block_t* block, /*!< in: page to be reorganized */
dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
/*************************************************************//**
Decides if the page should be split at the convergence point of
inserts converging to left.
@@ -428,9 +472,10 @@ ibool
btr_page_get_split_rec_to_left(
/*===========================*/
btr_cur_t* cursor, /*!< in: cursor at which to insert */
- rec_t** split_rec);/*!< out: if split recommended,
+ rec_t** split_rec)/*!< out: if split recommended,
the first record on upper half page,
or NULL if tuple should be first */
+ __attribute__((nonnull, warn_unused_result));
/*************************************************************//**
Decides if the page should be split at the convergence point of
inserts converging to right.
@@ -440,9 +485,10 @@ ibool
btr_page_get_split_rec_to_right(
/*============================*/
btr_cur_t* cursor, /*!< in: cursor at which to insert */
- rec_t** split_rec);/*!< out: if split recommended,
+ rec_t** split_rec)/*!< out: if split recommended,
the first record on upper half page,
or NULL if tuple should be first */
+ __attribute__((nonnull, warn_unused_result));
/*************************************************************//**
Splits an index page to halves and inserts the tuple. It is assumed
that mtr holds an x-latch to the index tree. NOTE: the tree x-latch is
@@ -456,12 +502,17 @@ UNIV_INTERN
rec_t*
btr_page_split_and_insert(
/*======================*/
+ ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor at which to insert; when the
function returns, the cursor is positioned
on the predecessor of the inserted record */
+ ulint** offsets,/*!< out: offsets on inserted record */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap
+ that can be emptied, or NULL */
const dtuple_t* tuple, /*!< in: tuple to insert */
ulint n_ext, /*!< in: number of externally stored columns */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************//**
Inserts a data tuple to a tree on a non-leaf level. It is assumed
that mtr holds an x-latch on the tree. */
@@ -469,14 +520,16 @@ UNIV_INTERN
void
btr_insert_on_non_leaf_level_func(
/*==============================*/
+ ulint flags, /*!< in: undo logging and locking flags */
dict_index_t* index, /*!< in: index */
ulint level, /*!< in: level, must be > 0 */
dtuple_t* tuple, /*!< in: the record to be inserted */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
- mtr_t* mtr); /*!< in: mtr */
-# define btr_insert_on_non_leaf_level(i,l,t,m) \
- btr_insert_on_non_leaf_level_func(i,l,t,__FILE__,__LINE__,m)
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
+# define btr_insert_on_non_leaf_level(f,i,l,t,m) \
+ btr_insert_on_non_leaf_level_func(f,i,l,t,__FILE__,__LINE__,m)
#endif /* !UNIV_HOTBACKUP */
/****************************************************************//**
Sets a record as the predefined minimum record. */
@@ -485,7 +538,8 @@ void
btr_set_min_rec_mark(
/*=================*/
rec_t* rec, /*!< in/out: record */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
/*************************************************************//**
Deletes on the upper level the node pointer to a page. */
@@ -495,7 +549,8 @@ btr_node_ptr_delete(
/*================*/
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: page whose node pointer is deleted */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
#ifdef UNIV_DEBUG
/************************************************************//**
Checks that the node pointer to a page is appropriate.
@@ -506,7 +561,8 @@ btr_check_node_ptr(
/*===============*/
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: index page */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/*************************************************************//**
Tries to merge the page first to the left immediate brother if such a
@@ -540,7 +596,8 @@ btr_discard_page(
/*=============*/
btr_cur_t* cursor, /*!< in: cursor on the page to discard: not on
the root page */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
#endif /* !UNIV_HOTBACKUP */
/****************************************************************//**
Parses the redo log record for setting an index record as the predefined
@@ -554,7 +611,8 @@ btr_parse_set_min_rec_mark(
byte* end_ptr,/*!< in: buffer end */
ulint comp, /*!< in: nonzero=compact page format */
page_t* page, /*!< in: page or NULL */
- mtr_t* mtr); /*!< in: mtr or NULL */
+ mtr_t* mtr) /*!< in: mtr or NULL */
+ __attribute__((nonnull(1,2), warn_unused_result));
/***********************************************************//**
Parses a redo log record of reorganizing a page.
@return end of log record or NULL */
@@ -565,8 +623,10 @@ btr_parse_page_reorganize(
byte* ptr, /*!< in: buffer */
byte* end_ptr,/*!< in: buffer end */
dict_index_t* index, /*!< in: record descriptor */
+ bool compressed,/*!< in: true if compressed page */
buf_block_t* block, /*!< in: page to be reorganized, or NULL */
- mtr_t* mtr); /*!< in: mtr or NULL */
+ mtr_t* mtr) /*!< in: mtr or NULL */
+ __attribute__((nonnull(1,2,3), warn_unused_result));
#ifndef UNIV_HOTBACKUP
/**************************************************************//**
Gets the number of pages in a B-tree.
@@ -612,7 +672,8 @@ btr_page_free(
/*==========*/
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: block to be freed, x-latched */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
/**************************************************************//**
Frees a file page used in an index tree. Can be used also to BLOB
external storage pages, because the page level 0 can be given as an
@@ -624,7 +685,8 @@ btr_page_free_low(
dict_index_t* index, /*!< in: index tree */
buf_block_t* block, /*!< in: block to be freed, x-latched */
ulint level, /*!< in: page level */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
#ifdef UNIV_BTR_PRINT
/*************************************************************//**
Prints size info of a B-tree. */
@@ -632,7 +694,8 @@ UNIV_INTERN
void
btr_print_size(
/*===========*/
- dict_index_t* index); /*!< in: index tree */
+ dict_index_t* index) /*!< in: index tree */
+ __attribute__((nonnull));
/**************************************************************//**
Prints directories and other info of all nodes in the index. */
UNIV_INTERN
@@ -640,8 +703,9 @@ void
btr_print_index(
/*============*/
dict_index_t* index, /*!< in: index */
- ulint width); /*!< in: print this many entries from start
+ ulint width) /*!< in: print this many entries from start
and end */
+ __attribute__((nonnull));
#endif /* UNIV_BTR_PRINT */
/************************************************************//**
Checks the size and number of fields in a record based on the definition of
@@ -653,18 +717,20 @@ btr_index_rec_validate(
/*===================*/
const rec_t* rec, /*!< in: index record */
const dict_index_t* index, /*!< in: index */
- ibool dump_on_error); /*!< in: TRUE if the function
+ ibool dump_on_error) /*!< in: TRUE if the function
should print hex dump of record
and page on error */
+ __attribute__((nonnull, warn_unused_result));
/**************************************************************//**
Checks the consistency of an index tree.
@return TRUE if ok */
UNIV_INTERN
-ibool
+bool
btr_validate_index(
/*===============*/
- dict_index_t* index, /*!< in: index */
- trx_t* trx); /*!< in: transaction or NULL */
+ dict_index_t* index, /*!< in: index */
+ const trx_t* trx) /*!< in: transaction or 0 */
+ __attribute__((nonnull(1), warn_unused_result));
#define BTR_N_LEAF_PAGES 1
#define BTR_TOTAL_SIZE 2
diff --git a/storage/innobase/include/btr0btr.ic b/storage/innobase/include/btr0btr.ic
index 6f7a66b12ac..00f50b5dcaf 100644
--- a/storage/innobase/include/btr0btr.ic
+++ b/storage/innobase/include/btr0btr.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -126,22 +126,6 @@ btr_page_get_level_low(
}
/********************************************************//**
-Gets the node level field in an index page.
-@return level, leaf level == 0 */
-UNIV_INLINE
-ulint
-btr_page_get_level(
-/*===============*/
- const page_t* page, /*!< in: index page */
- mtr_t* mtr __attribute__((unused)))
- /*!< in: mini-transaction handle */
-{
- ut_ad(page && mtr);
-
- return(btr_page_get_level_low(page));
-}
-
-/********************************************************//**
Sets the node level field in an index page. */
UNIV_INLINE
void
@@ -278,6 +262,7 @@ btr_node_ptr_get_child_page_no(
" in a node ptr record at offset %lu\n",
(ulong) page_offset(rec));
buf_page_print(page_align(rec), 0, 0);
+ ut_ad(0);
}
return(page_no);
diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h
index 80c62185fb0..edba1d1d77f 100644
--- a/storage/innobase/include/btr0cur.h
+++ b/storage/innobase/include/btr0cur.h
@@ -31,14 +31,22 @@ Created 10/16/1994 Heikki Tuuri
#include "page0cur.h"
#include "btr0types.h"
-/* Mode flags for btr_cur operations; these can be ORed */
-#define BTR_NO_UNDO_LOG_FLAG 1 /* do no undo logging */
-#define BTR_NO_LOCKING_FLAG 2 /* do no record lock checking */
-#define BTR_KEEP_SYS_FLAG 4 /* sys fields will be found from the
- update vector or inserted entry */
-#define BTR_KEEP_POS_FLAG 8 /* btr_cur_pessimistic_update()
- must keep cursor position when
- moving columns to big_rec */
+/** Mode flags for btr_cur operations; these can be ORed */
+enum {
+ /** do no undo logging */
+ BTR_NO_UNDO_LOG_FLAG = 1,
+ /** do no record lock checking */
+ BTR_NO_LOCKING_FLAG = 2,
+ /** sys fields will be found in the update vector or inserted
+ entry */
+ BTR_KEEP_SYS_FLAG = 4,
+ /** btr_cur_pessimistic_update() must keep cursor position
+ when moving columns to big_rec */
+ BTR_KEEP_POS_FLAG = 8,
+ /** the caller is creating the index or wants to bypass the
+ index->info.online creation log */
+ BTR_CREATE_FLAG = 16
+};
#ifndef UNIV_HOTBACKUP
#include "que0types.h"
@@ -164,16 +172,19 @@ UNIV_INTERN
void
btr_cur_open_at_index_side_func(
/*============================*/
- ibool from_left, /*!< in: TRUE if open to the low end,
- FALSE if to the high end */
+ bool from_left, /*!< in: true if open to the low end,
+ false if to the high end */
dict_index_t* index, /*!< in: index */
ulint latch_mode, /*!< in: latch mode */
- btr_cur_t* cursor, /*!< in: cursor */
+ btr_cur_t* cursor, /*!< in/out: cursor */
+ ulint level, /*!< in: level to search for
+ (0=leaf) */
const char* file, /*!< in: file name */
ulint line, /*!< in: line where called */
- mtr_t* mtr); /*!< in: mtr */
-#define btr_cur_open_at_index_side(f,i,l,c,m) \
- btr_cur_open_at_index_side_func(f,i,l,c,__FILE__,__LINE__,m)
+ mtr_t* mtr) /*!< in/out: mini-transaction */
+ __attribute__((nonnull));
+#define btr_cur_open_at_index_side(f,i,l,c,lv,m) \
+ btr_cur_open_at_index_side_func(f,i,l,c,lv,__FILE__,__LINE__,m)
/**********************************************************************//**
Positions a cursor at a randomly chosen position within a B-tree. */
UNIV_INTERN
@@ -196,7 +207,7 @@ one record on the page, the insert will always succeed; this is to
prevent trying to split a page with just one record.
@return DB_SUCCESS, DB_WAIT_LOCK, DB_FAIL, or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_optimistic_insert(
/*======================*/
ulint flags, /*!< in: undo logging and locking flags: if not
@@ -204,6 +215,8 @@ btr_cur_optimistic_insert(
specified */
btr_cur_t* cursor, /*!< in: cursor on page after which to insert;
cursor stays valid */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
@@ -212,11 +225,12 @@ btr_cur_optimistic_insert(
NULL */
ulint n_ext, /*!< in: number of externally stored columns */
que_thr_t* thr, /*!< in: query thread or NULL */
- mtr_t* mtr); /*!< in: mtr; if this function returns
+ mtr_t* mtr) /*!< in: mtr; if this function returns
DB_SUCCESS on a leaf page of a secondary
index in a compressed tablespace, the
mtr must be committed before latching
any further pages */
+ __attribute__((nonnull(2,3,4,5,6,7,10), warn_unused_result));
/*************************************************************//**
Performs an insert on a page of an index tree. It is assumed that mtr
holds an x-latch on the tree and on the cursor page. If the insert is
@@ -224,7 +238,7 @@ made on the leaf level, to avoid deadlocks, mtr must also own x-latches
to brothers of page, if those brothers exist.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_pessimistic_insert(
/*=======================*/
ulint flags, /*!< in: undo logging and locking flags: if not
@@ -235,6 +249,9 @@ btr_cur_pessimistic_insert(
insertion will certainly succeed */
btr_cur_t* cursor, /*!< in: cursor after which to insert;
cursor stays valid */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap
+ that can be emptied, or NULL */
dtuple_t* entry, /*!< in/out: entry to insert */
rec_t** rec, /*!< out: pointer to inserted record if
succeed */
@@ -243,7 +260,8 @@ btr_cur_pessimistic_insert(
NULL */
ulint n_ext, /*!< in: number of externally stored columns */
que_thr_t* thr, /*!< in: query thread or NULL */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull(2,3,4,5,6,7,10), warn_unused_result));
/*************************************************************//**
See if there is enough place in the page modification log to log
an update-in-place.
@@ -264,19 +282,23 @@ btr_cur_update_alloc_zip(
Updates a record when the update causes no size changes in its fields.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_update_in_place(
/*====================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor on the record to update;
cursor stays valid and positioned on the
same record */
+ const ulint* offsets,/*!< in: offsets on cursor->page_cur.rec */
const upd_t* update, /*!< in: update vector */
ulint cmpl_info,/*!< in: compiler info on secondary index
updates */
- que_thr_t* thr, /*!< in: query thread */
- mtr_t* mtr); /*!< in: mtr; must be committed before
+ que_thr_t* thr, /*!< in: query thread, or NULL if
+ appropriate flags are set */
+ trx_id_t trx_id, /*!< in: transaction id */
+ mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
+ __attribute__((warn_unused_result, nonnull(2,3,4,8)));
/*************************************************************//**
Tries to update a record on a page in an index tree. It is assumed that mtr
holds an x-latch on the page. The operation does not succeed if there is too
@@ -286,20 +308,25 @@ so that tree compression is recommended.
DB_UNDERFLOW if the page would become too empty, or DB_ZIP_OVERFLOW if
there is not enough space left on the compressed page */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_optimistic_update(
/*======================*/
ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: cursor on the record to update;
cursor stays valid and positioned on the
same record */
+ ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
const upd_t* update, /*!< in: update vector; this must also
contain trx id and roll ptr fields */
ulint cmpl_info,/*!< in: compiler info on secondary index
updates */
- que_thr_t* thr, /*!< in: query thread */
- mtr_t* mtr); /*!< in: mtr; must be committed before
+ que_thr_t* thr, /*!< in: query thread, or NULL if
+ appropriate flags are set */
+ trx_id_t trx_id, /*!< in: transaction id */
+ mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
+ __attribute__((warn_unused_result, nonnull(2,3,4,5,9)));
/*************************************************************//**
Performs an update of a record on a page of a tree. It is assumed
that mtr holds an x-latch on the tree and on the cursor page. If the
@@ -307,7 +334,7 @@ update is made on the leaf level, to avoid deadlocks, mtr must also
own x-latches to brothers of page, if those brothers exist.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_pessimistic_update(
/*=======================*/
ulint flags, /*!< in: undo logging, locking, and rollback
@@ -315,7 +342,13 @@ btr_cur_pessimistic_update(
btr_cur_t* cursor, /*!< in/out: cursor on the record to update;
cursor may become invalid if *big_rec == NULL
|| !(flags & BTR_KEEP_POS_FLAG) */
- mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
+ mem_heap_t** offsets_heap,
+ /*!< in/out: pointer to memory heap
+ that can be emptied, or NULL */
+ mem_heap_t* entry_heap,
+ /*!< in/out: memory heap for allocating
+ big_rec and the index tuple */
big_rec_t** big_rec,/*!< out: big rec vector whose fields have to
be stored externally by the caller, or NULL */
const upd_t* update, /*!< in: update vector; this is allowed also
@@ -323,9 +356,12 @@ btr_cur_pessimistic_update(
the values in update vector have no effect */
ulint cmpl_info,/*!< in: compiler info on secondary index
updates */
- que_thr_t* thr, /*!< in: query thread */
- mtr_t* mtr); /*!< in: mtr; must be committed before
+ que_thr_t* thr, /*!< in: query thread, or NULL if
+ appropriate flags are set */
+ trx_id_t trx_id, /*!< in: transaction id */
+ mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
+ __attribute__((warn_unused_result, nonnull(2,3,4,5,6,7,11)));
/***********************************************************//**
Marks a clustered index record deleted. Writes an undo log record to
undo log on this delete marking. Writes in the trx id field the id
@@ -333,15 +369,13 @@ of the deleting transaction, and in the roll ptr field pointer to the
undo log record created.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_del_mark_set_clust_rec(
/*===========================*/
- ulint flags, /*!< in: undo logging and locking flags */
buf_block_t* block, /*!< in/out: buffer block of the record */
rec_t* rec, /*!< in/out: record */
dict_index_t* index, /*!< in: clustered index of the record */
const ulint* offsets,/*!< in: rec_get_offsets(rec) */
- ibool val, /*!< in: value to set */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr */
__attribute__((nonnull));
@@ -349,7 +383,7 @@ btr_cur_del_mark_set_clust_rec(
Sets a secondary index record delete mark to TRUE or FALSE.
@return DB_SUCCESS, DB_LOCK_WAIT, or error number */
UNIV_INTERN
-ulint
+dberr_t
btr_cur_del_mark_set_sec_rec(
/*=========================*/
ulint flags, /*!< in: locking flag */
@@ -382,16 +416,27 @@ but no latch on the whole tree.
@return TRUE if success, i.e., the page did not become too empty */
UNIV_INTERN
ibool
-btr_cur_optimistic_delete(
-/*======================*/
+btr_cur_optimistic_delete_func(
+/*===========================*/
btr_cur_t* cursor, /*!< in: cursor on the record to delete;
cursor stays valid: if deletion succeeds,
on function exit it points to the successor
of the deleted record */
- mtr_t* mtr); /*!< in: mtr; if this function returns
+# ifdef UNIV_DEBUG
+ ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */
+# endif /* UNIV_DEBUG */
+ mtr_t* mtr) /*!< in: mtr; if this function returns
TRUE on a leaf page of a secondary
index, the mtr must be committed
before latching any further pages */
+ __attribute__((nonnull, warn_unused_result));
+# ifdef UNIV_DEBUG
+# define btr_cur_optimistic_delete(cursor, flags, mtr) \
+ btr_cur_optimistic_delete_func(cursor, flags, mtr)
+# else /* UNIV_DEBUG */
+# define btr_cur_optimistic_delete(cursor, flags, mtr) \
+ btr_cur_optimistic_delete_func(cursor, mtr)
+# endif /* UNIV_DEBUG */
/*************************************************************//**
Removes the record on which the tree cursor is positioned. Tries
to compress the page if its fillfactor drops below a threshold
@@ -404,7 +449,7 @@ UNIV_INTERN
ibool
btr_cur_pessimistic_delete(
/*=======================*/
- ulint* err, /*!< out: DB_SUCCESS or DB_OUT_OF_FILE_SPACE;
+ dberr_t* err, /*!< out: DB_SUCCESS or DB_OUT_OF_FILE_SPACE;
the latter may occur because we may have
to update node pointers on upper levels,
and in the case of variable length keys
@@ -417,8 +462,10 @@ btr_cur_pessimistic_delete(
if compression does not occur, the cursor
stays valid: it points to successor of
deleted record on function exit */
+ ulint flags, /*!< in: BTR_CREATE_FLAG or 0 */
enum trx_rb_ctx rb_ctx, /*!< in: rollback context */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Parses a redo log record of updating a record in-place.
@@ -472,9 +519,10 @@ btr_estimate_n_rows_in_range(
ulint mode2); /*!< in: search mode for range end */
/*******************************************************************//**
Estimates the number of different key values in a given index, for
-each n-column prefix of the index where n <= dict_index_get_n_unique(index).
-The estimates are stored in the array index->stat_n_diff_key_vals[] and
-the number of pages that were sampled is saved in index->stat_n_sample_sizes[].
+each n-column prefix of the index where 1 <= n <= dict_index_get_n_unique(index).
+The estimates are stored in the array index->stat_n_diff_key_vals[] (indexed
+0..n_uniq-1) and the number of pages that were sampled is saved in
+index->stat_n_sample_sizes[].
If innodb_stats_method is nulls_ignored, we also record the number of
non-null values for each prefix and stored the estimates in
array index->stat_n_non_null_key_vals. */
@@ -528,7 +576,7 @@ The fields are stored on pages allocated from leaf node
file segment of the index tree.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
UNIV_INTERN
-enum db_err
+dberr_t
btr_store_big_rec_extern_fields(
/*============================*/
dict_index_t* index, /*!< in: index of rec; the index tree
@@ -662,8 +710,7 @@ limit, merging it to a neighbor is tried */
/** A slot in the path array. We store here info on a search path down the
tree. Each slot contains data on a single level of the tree. */
-typedef struct btr_path_struct btr_path_t;
-struct btr_path_struct{
+struct btr_path_t{
ulint nth_rec; /*!< index of the record
where the page cursor stopped on
this level (index in alphabetical
@@ -700,7 +747,7 @@ enum btr_cur_method {
/** The tree cursor: the definition appears here only for the compiler
to know struct size! */
-struct btr_cur_struct {
+struct btr_cur_t {
dict_index_t* index; /*!< index where positioned */
page_cur_t page_cur; /*!< page cursor */
purge_node_t* purge_node; /*!< purge node, for BTR_DELETE */
@@ -737,7 +784,7 @@ struct btr_cur_struct {
for comparison to the adjacent user
record if that record is on a
different leaf page! (See the note in
- row_ins_duplicate_key.) */
+ row_ins_duplicate_error_in_clust.) */
ulint up_bytes; /*!< number of matched bytes to the
right at the time cursor positioned;
only used internally in searches: not
diff --git a/storage/innobase/include/btr0pcur.h b/storage/innobase/include/btr0pcur.h
index a8eaac4690b..973fae382ab 100644
--- a/storage/innobase/include/btr0pcur.h
+++ b/storage/innobase/include/btr0pcur.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -146,13 +146,16 @@ UNIV_INLINE
void
btr_pcur_open_at_index_side(
/*========================*/
- ibool from_left, /*!< in: TRUE if open to the low end,
- FALSE if to the high end */
+ bool from_left, /*!< in: true if open to the low end,
+ false if to the high end */
dict_index_t* index, /*!< in: index */
ulint latch_mode, /*!< in: latch mode */
- btr_pcur_t* pcur, /*!< in: cursor */
- ibool do_init, /*!< in: TRUE if should be initialized */
- mtr_t* mtr); /*!< in: mtr */
+ btr_pcur_t* pcur, /*!< in/out: cursor */
+ bool init_pcur, /*!< in: whether to initialize pcur */
+ ulint level, /*!< in: level to search for
+ (0=leaf) */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
+ __attribute__((nonnull));
/**************************************************************//**
Gets the up_match value for a pcur after a search.
@return number of matched fields at the cursor or to the right if
@@ -209,8 +212,17 @@ btr_pcur_open_at_rnd_pos_func(
#define btr_pcur_open_at_rnd_pos(i,l,c,m) \
btr_pcur_open_at_rnd_pos_func(i,l,c,__FILE__,__LINE__,m)
/**************************************************************//**
-Frees the possible old_rec_buf buffer of a persistent cursor and sets the
-latch mode of the persistent cursor to BTR_NO_LATCHES. */
+Frees the possible memory heap of a persistent cursor and sets the latch
+mode of the persistent cursor to BTR_NO_LATCHES.
+WARNING: this function does not release the latch on the page where the
+cursor is currently positioned. The latch is acquired by the
+"move to next/previous" family of functions. Since recursive shared locks
+are not allowed, you must take care (if using the cursor in S-mode) to
+manually release the latch by either calling
+btr_leaf_page_release(btr_pcur_get_block(&pcur), pcur.latch_mode, mtr)
+or by committing the mini-transaction right after btr_pcur_close().
+A subsequent attempt to crawl the same page in the same mtr would cause
+an assertion failure. */
UNIV_INLINE
void
btr_pcur_close(
@@ -452,14 +464,14 @@ btr_pcur_move_to_prev_on_page(
/* The persistent B-tree cursor structure. This is used mainly for SQL
selects, updates, and deletes. */
-struct btr_pcur_struct{
+struct btr_pcur_t{
btr_cur_t btr_cur; /*!< a B-tree cursor */
ulint latch_mode; /*!< see TODO note below!
BTR_SEARCH_LEAF, BTR_MODIFY_LEAF,
BTR_MODIFY_TREE, or BTR_NO_LATCHES,
depending on the latching state of
the page and tree where the cursor is
- positioned; the last value means that
+ positioned; BTR_NO_LATCHES means that
the cursor is not currently positioned:
we say then that the cursor is
detached; it can be restored to
diff --git a/storage/innobase/include/btr0pcur.ic b/storage/innobase/include/btr0pcur.ic
index a27033c4a7c..79afd7c322e 100644
--- a/storage/innobase/include/btr0pcur.ic
+++ b/storage/innobase/include/btr0pcur.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -429,7 +429,7 @@ btr_pcur_open_low(
btr_pcur_init(cursor);
- cursor->latch_mode = latch_mode;
+ cursor->latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode);
cursor->search_mode = mode;
/* Search with the tree cursor */
@@ -496,28 +496,26 @@ UNIV_INLINE
void
btr_pcur_open_at_index_side(
/*========================*/
- ibool from_left, /*!< in: TRUE if open to the low end,
- FALSE if to the high end */
+ bool from_left, /*!< in: true if open to the low end,
+ false if to the high end */
dict_index_t* index, /*!< in: index */
ulint latch_mode, /*!< in: latch mode */
- btr_pcur_t* pcur, /*!< in: cursor */
- ibool do_init, /*!< in: TRUE if should be initialized */
- mtr_t* mtr) /*!< in: mtr */
+ btr_pcur_t* pcur, /*!< in/out: cursor */
+ bool init_pcur, /*!< in: whether to initialize pcur */
+ ulint level, /*!< in: level to search for
+ (0=leaf) */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
{
- pcur->latch_mode = latch_mode;
+ pcur->latch_mode = BTR_LATCH_MODE_WITHOUT_FLAGS(latch_mode);
- if (from_left) {
- pcur->search_mode = PAGE_CUR_G;
- } else {
- pcur->search_mode = PAGE_CUR_L;
- }
+ pcur->search_mode = from_left ? PAGE_CUR_G : PAGE_CUR_L;
- if (do_init) {
+ if (init_pcur) {
btr_pcur_init(pcur);
}
btr_cur_open_at_index_side(from_left, index, latch_mode,
- btr_pcur_get_btr_cur(pcur), mtr);
+ btr_pcur_get_btr_cur(pcur), level, mtr);
pcur->pos_state = BTR_PCUR_IS_POSITIONED;
pcur->old_stored = BTR_PCUR_OLD_NOT_STORED;
@@ -556,7 +554,16 @@ btr_pcur_open_at_rnd_pos_func(
/**************************************************************//**
Frees the possible memory heap of a persistent cursor and sets the latch
-mode of the persistent cursor to BTR_NO_LATCHES. */
+mode of the persistent cursor to BTR_NO_LATCHES.
+WARNING: this function does not release the latch on the page where the
+cursor is currently positioned. The latch is acquired by the
+"move to next/previous" family of functions. Since recursive shared locks
+are not allowed, you must take care (if using the cursor in S-mode) to
+manually release the latch by either calling
+btr_leaf_page_release(btr_pcur_get_block(&pcur), pcur.latch_mode, mtr)
+or by committing the mini-transaction right after btr_pcur_close().
+A subsequent attempt to crawl the same page in the same mtr would cause
+an assertion failure. */
UNIV_INLINE
void
btr_pcur_close(
diff --git a/storage/innobase/include/btr0sea.h b/storage/innobase/include/btr0sea.h
index 5316c3efd39..fea117d0aaf 100644
--- a/storage/innobase/include/btr0sea.h
+++ b/storage/innobase/include/btr0sea.h
@@ -68,7 +68,8 @@ UNIV_INLINE
btr_search_t*
btr_search_get_info(
/*================*/
- dict_index_t* index); /*!< in: index */
+ dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull));
/*****************************************************************//**
Creates and initializes a search info struct.
@return own: search info struct */
@@ -193,7 +194,7 @@ btr_search_validate(void);
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
/** The search info struct in an index */
-struct btr_search_struct{
+struct btr_search_t{
ulint ref_count; /*!< Number of blocks in this index tree
that have search index built
i.e. block->index points to this index.
@@ -242,16 +243,13 @@ struct btr_search_struct{
#endif /* UNIV_SEARCH_PERF_STAT */
#ifdef UNIV_DEBUG
ulint magic_n; /*!< magic number @see BTR_SEARCH_MAGIC_N */
-/** value of btr_search_struct::magic_n, used in assertions */
+/** value of btr_search_t::magic_n, used in assertions */
# define BTR_SEARCH_MAGIC_N 1112765
#endif /* UNIV_DEBUG */
};
/** The hash index system */
-typedef struct btr_search_sys_struct btr_search_sys_t;
-
-/** The hash index system */
-struct btr_search_sys_struct{
+struct btr_search_sys_t{
hash_table_t* hash_index; /*!< the adaptive hash index,
mapping dtuple_fold values
to rec_t pointers on index pages */
diff --git a/storage/innobase/include/btr0sea.ic b/storage/innobase/include/btr0sea.ic
index 49ba0fd3f0b..0bd869be136 100644
--- a/storage/innobase/include/btr0sea.ic
+++ b/storage/innobase/include/btr0sea.ic
@@ -45,8 +45,6 @@ btr_search_get_info(
/*================*/
dict_index_t* index) /*!< in: index */
{
- ut_ad(index);
-
return(index->search_info);
}
diff --git a/storage/innobase/include/btr0types.h b/storage/innobase/include/btr0types.h
index 09f97b3cabd..c1a4531f861 100644
--- a/storage/innobase/include/btr0types.h
+++ b/storage/innobase/include/btr0types.h
@@ -33,11 +33,11 @@ Created 2/17/1996 Heikki Tuuri
#include "sync0rw.h"
/** Persistent cursor */
-typedef struct btr_pcur_struct btr_pcur_t;
+struct btr_pcur_t;
/** B-tree cursor */
-typedef struct btr_cur_struct btr_cur_t;
+struct btr_cur_t;
/** B-tree search information for the adaptive hash index */
-typedef struct btr_search_struct btr_search_t;
+struct btr_search_t;
#ifndef UNIV_HOTBACKUP
@@ -68,7 +68,7 @@ extern char btr_search_enabled;
#ifdef UNIV_BLOB_DEBUG
# include "buf0types.h"
/** An index->blobs entry for keeping track of off-page column references */
-typedef struct btr_blob_dbg_struct btr_blob_dbg_t;
+struct btr_blob_dbg_t;
/** Insert to index->blobs a reference to an off-page column.
@param index the index tree
diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h
index d56f1790ae4..74a6e203808 100644
--- a/storage/innobase/include/buf0buf.h
+++ b/storage/innobase/include/buf0buf.h
@@ -89,8 +89,6 @@ extern ibool buf_debug_prints;/*!< If this is set TRUE, the program
prints info whenever read or flush
occurs */
#endif /* UNIV_DEBUG */
-extern ulint srv_buf_pool_write_requests; /*!< variable to count write request
- issued */
extern ulint srv_buf_pool_instances;
extern ulint srv_buf_pool_curr_size;
#else /* !UNIV_HOTBACKUP */
@@ -102,7 +100,7 @@ extern buf_block_t* back_block2; /*!< second block, for page reorganize */
#define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL
/** @brief States of a control block
-@see buf_page_struct
+@see buf_page_t
The enumeration values must be 0..7. */
enum buf_page_state {
@@ -132,7 +130,7 @@ enum buf_page_state {
/** This structure defines information we will fetch from each buffer pool. It
will be used to print table IO stats */
-struct buf_pool_info_struct{
+struct buf_pool_info_t{
/* General buffer pool info */
ulint pool_unique_id; /*!< Buffer Pool ID */
ulint pool_size; /*!< Buffer Pool size in pages */
@@ -203,17 +201,13 @@ struct buf_pool_info_struct{
interval */
};
-typedef struct buf_pool_info_struct buf_pool_info_t;
-
/** The occupied bytes of lists in all buffer pools */
-struct buf_pools_list_size_struct {
+struct buf_pools_list_size_t {
ulint LRU_bytes; /*!< LRU size in bytes */
ulint unzip_LRU_bytes; /*!< unzip_LRU size in bytes */
ulint flush_list_bytes; /*!< flush_list size in bytes */
};
-typedef struct buf_pools_list_size_struct buf_pools_list_size_t;
-
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Acquire mutex on all buffer pool instances */
@@ -231,9 +225,9 @@ buf_pool_mutex_exit_all(void);
/********************************************************************//**
Creates the buffer pool.
-@return own: buf_pool object, NULL if not enough memory or error */
+@return DB_SUCCESS if success, DB_ERROR if not enough memory or error */
UNIV_INTERN
-ulint
+dberr_t
buf_pool_init(
/*=========*/
ulint size, /*!< in: Size of the total pool in bytes */
@@ -638,9 +632,12 @@ UNIV_INTERN
ibool
buf_page_is_corrupted(
/*==================*/
+ bool check_lsn, /*!< in: true if we need to check the
+ and complain about the LSN */
const byte* read_buf, /*!< in: a database page */
- ulint zip_size); /*!< in: size of compressed page;
+ ulint zip_size) /*!< in: size of compressed page;
0 for uncompressed pages */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Gets the space id, page offset, and byte offset within page of a
@@ -890,7 +887,7 @@ buf_page_belongs_to_unzip_LRU(
Gets the mutex of a block.
@return pointer to mutex protecting bpage */
UNIV_INLINE
-mutex_t*
+ib_mutex_t*
buf_page_get_mutex(
/*===============*/
const buf_page_t* bpage) /*!< in: pointer to control block */
@@ -1160,7 +1157,7 @@ UNIV_INTERN
buf_page_t*
buf_page_init_for_read(
/*===================*/
- ulint* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */
+ dberr_t* err, /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */
ulint mode, /*!< in: BUF_READ_IBUF_PAGES_ONLY, ... */
ulint space, /*!< in: space id */
ulint zip_size,/*!< in: compressed page size, or 0 */
@@ -1172,9 +1169,9 @@ buf_page_init_for_read(
/********************************************************************//**
Completes an asynchronous read or write request of a file page to or from
the buffer pool.
-@return TRUE if successful */
+@return true if successful */
UNIV_INTERN
-ibool
+bool
buf_page_io_complete(
/*=================*/
buf_page_t* bpage); /*!< in: pointer to the block in question */
@@ -1401,6 +1398,16 @@ buf_get_nth_chunk_block(
ulint n, /*!< in: nth chunk in the buffer pool */
ulint* chunk_size); /*!< in: chunk size */
+/********************************************************************//**
+Calculate the checksum of a page from compressed table and update the page. */
+UNIV_INTERN
+void
+buf_flush_update_zip_checksum(
+/*==========================*/
+ buf_frame_t* page, /*!< in/out: Page to update */
+ ulint zip_size, /*!< in: Compressed page size */
+ lsn_t lsn); /*!< in: Lsn to stamp on the page */
+
#endif /* !UNIV_HOTBACKUP */
/** The common buffer control block structure
@@ -1409,10 +1416,10 @@ for compressed and uncompressed frames */
/** Number of bits used for buffer page states. */
#define BUF_PAGE_STATE_BITS 3
-struct buf_page_struct{
+struct buf_page_t{
/** @name General fields
None of these bit-fields must be modified without holding
- buf_page_get_mutex() [buf_block_struct::mutex or
+ buf_page_get_mutex() [buf_block_t::mutex or
buf_pool->zip_mutex], since they can be stored in the same
machine word. Some of these fields are additionally protected
by buf_pool->mutex. */
@@ -1543,7 +1550,7 @@ struct buf_page_struct{
/* @} */
/** @name LRU replacement algorithm fields
These fields are protected by buf_pool->mutex only (not
- buf_pool->zip_mutex or buf_block_struct::mutex). */
+ buf_pool->zip_mutex or buf_block_t::mutex). */
/* @{ */
UT_LIST_NODE_T(buf_page_t) LRU;
@@ -1573,14 +1580,14 @@ struct buf_page_struct{
/*!< this is set to TRUE when
fsp frees a page in buffer pool;
protected by buf_pool->zip_mutex
- or buf_block_struct::mutex. */
+ or buf_block_t::mutex. */
# endif /* UNIV_DEBUG_FILE_ACCESSES || UNIV_DEBUG */
#endif /* !UNIV_HOTBACKUP */
};
/** The buffer control block structure */
-struct buf_block_struct{
+struct buf_block_t{
/** @name General fields */
/* @{ */
@@ -1604,7 +1611,7 @@ struct buf_block_struct{
decompressed LRU list;
used in debugging */
#endif /* UNIV_DEBUG */
- mutex_t mutex; /*!< mutex protecting this block:
+ ib_mutex_t mutex; /*!< mutex protecting this block:
state (also protected by the buffer
pool mutex), io_fix, buf_fix_count,
and accessed; we introduce this new
@@ -1663,8 +1670,8 @@ struct buf_block_struct{
/** @name Hash search fields
These 5 fields may only be modified when we have
an x-latch on btr_search_latch AND
- - we are holding an s-latch or x-latch on buf_block_struct::lock or
- - we know that buf_block_struct::buf_fix_count == 0.
+ - we are holding an s-latch or x-latch on buf_block_t::lock or
+ - we know that buf_block_t::buf_fix_count == 0.
An exception to this is when we init or create a page
in the buffer pool in buf0buf.cc.
@@ -1723,7 +1730,7 @@ Compute the hash fold value for blocks in buf_pool->zip_hash. */
/* @} */
/** @brief The buffer pool statistics structure. */
-struct buf_pool_stat_struct{
+struct buf_pool_stat_t{
ulint n_page_gets; /*!< number of page gets performed;
also successful searches through
the adaptive hash index are
@@ -1752,7 +1759,7 @@ struct buf_pool_stat_struct{
};
/** Statistics of buddy blocks of a given size. */
-struct buf_buddy_stat_struct {
+struct buf_buddy_stat_t {
/** Number of blocks allocated from the buddy system. */
ulint used;
/** Number of blocks relocated by the buddy system. */
@@ -1766,13 +1773,13 @@ struct buf_buddy_stat_struct {
NOTE! The definition appears here only for other modules of this
directory (buf) to see it. Do not use from outside! */
-struct buf_pool_struct{
+struct buf_pool_t{
/** @name General fields */
/* @{ */
- mutex_t mutex; /*!< Buffer pool mutex of this
+ ib_mutex_t mutex; /*!< Buffer pool mutex of this
instance */
- mutex_t zip_mutex; /*!< Zip mutex of this buffer
+ ib_mutex_t zip_mutex; /*!< Zip mutex of this buffer
pool instance, protects compressed
only pages (of type buf_page_t, not
buf_block_t */
@@ -1826,7 +1833,7 @@ struct buf_pool_struct{
/* @{ */
- mutex_t flush_list_mutex;/*!< mutex protecting the
+ ib_mutex_t flush_list_mutex;/*!< mutex protecting the
flush list access. This mutex
protects flush_list, flush_rbt
and bpage::list pointers when
diff --git a/storage/innobase/include/buf0buf.ic b/storage/innobase/include/buf0buf.ic
index d0a6df4eb40..b310efdf451 100644
--- a/storage/innobase/include/buf0buf.ic
+++ b/storage/innobase/include/buf0buf.ic
@@ -31,13 +31,13 @@ Created 11/5/1995 Heikki Tuuri
*******************************************************/
#include "mtr0mtr.h"
+#ifndef UNIV_HOTBACKUP
#include "buf0flu.h"
#include "buf0lru.h"
#include "buf0rea.h"
-#ifndef UNIV_HOTBACKUP
/** A chunk of buffers. The buffer pool is allocated in chunks. */
-struct buf_chunk_struct{
+struct buf_chunk_t{
ulint mem_size; /*!< allocated size of the chunk */
ulint size; /*!< size of frames[] and blocks[] */
void* mem; /*!< pointer to the memory area which
@@ -339,7 +339,7 @@ buf_page_belongs_to_unzip_LRU(
Gets the mutex of a block.
@return pointer to mutex protecting bpage */
UNIV_INLINE
-mutex_t*
+ib_mutex_t*
buf_page_get_mutex(
/*===============*/
const buf_page_t* bpage) /*!< in: pointer to control block */
@@ -419,6 +419,8 @@ buf_page_get_io_fix(
/*================*/
const buf_page_t* bpage) /*!< in: pointer to the control block */
{
+ ut_ad(bpage != NULL);
+
enum buf_io_fix io_fix = (enum buf_io_fix) bpage->io_fix;
#ifdef UNIV_DEBUG
switch (io_fix) {
@@ -942,7 +944,7 @@ buf_page_get_newest_modification(
page frame */
{
lsn_t lsn;
- mutex_t* block_mutex = buf_page_get_mutex(bpage);
+ ib_mutex_t* block_mutex = buf_page_get_mutex(bpage);
mutex_enter(block_mutex);
diff --git a/storage/innobase/include/buf0dblwr.h b/storage/innobase/include/buf0dblwr.h
index fcc56d91405..357ba697f6a 100644
--- a/storage/innobase/include/buf0dblwr.h
+++ b/storage/innobase/include/buf0dblwr.h
@@ -29,7 +29,6 @@ Created 2011/12/19 Inaam Rana
#include "univ.i"
#include "ut0byte.h"
#include "log0log.h"
-#include "buf0types.h"
#ifndef UNIV_HOTBACKUP
@@ -113,8 +112,8 @@ buf_dblwr_write_single_page(
buf_page_t* bpage); /*!< in: buffer block to write */
/** Doublewrite control struct */
-struct buf_dblwr_struct{
- mutex_t mutex; /*!< mutex protecting the first_free field and
+struct buf_dblwr_t{
+ ib_mutex_t mutex; /*!< mutex protecting the first_free field and
write_buf */
ulint block1; /*!< the page number of the first
doublewrite block (64 pages) */
diff --git a/storage/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h
index faf577f718b..94f4e6dedd1 100644
--- a/storage/innobase/include/buf0flu.h
+++ b/storage/innobase/include/buf0flu.h
@@ -95,23 +95,27 @@ void
buf_flush_sync_datafiles(void);
/*==========================*/
/*******************************************************************//**
-This utility flushes dirty blocks from the end of the flush_list of
+This utility flushes dirty blocks from the end of the flush list of
all buffer pool instances.
NOTE: The calling thread is not allowed to own any latches on pages!
-@return number of blocks for which the write request was queued;
-ULINT_UNDEFINED if there was a flush of the same type already running */
+@return true if a batch was queued successfully for each buffer pool
+instance. false if another batch of same type was already running in
+at least one of the buffer pool instance */
UNIV_INTERN
-ulint
+bool
buf_flush_list(
-/*============*/
+/*===========*/
ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the
actual number is that big, though) */
- lsn_t lsn_limit); /*!< in the case BUF_FLUSH_LIST all
+ lsn_t lsn_limit, /*!< in the case BUF_FLUSH_LIST all
blocks whose oldest_modification is
smaller than this should be flushed
(if their number does not exceed
min_n), otherwise ignored */
+ ulint* n_processed); /*!< out: the number of pages
+ which were processed is passed
+ back to caller. Ignored if NULL */
/******************************************************************//**
This function picks up a single dirty page from the tail of the LRU
list, flushes it, removes it from page_hash and LRU list and puts
@@ -176,31 +180,6 @@ buf_flush_ready_for_replace(
/*========================*/
buf_page_t* bpage); /*!< in: buffer control block, must be
buf_page_in_file(bpage) and in the LRU list */
-
-/** @brief Statistics for selecting flush rate based on redo log
-generation speed.
-
-These statistics are generated for heuristics used in estimating the
-rate at which we should flush the dirty blocks to avoid bursty IO
-activity. Note that the rate of flushing not only depends on how many
-dirty pages we have in the buffer pool but it is also a fucntion of
-how much redo the workload is generating and at what rate. */
-
-struct buf_flush_stat_struct
-{
- lsn_t redo; /**< amount of redo generated. */
- ulint n_flushed; /**< number of pages flushed. */
-};
-
-/** Statistics for selecting flush rate of dirty pages. */
-typedef struct buf_flush_stat_struct buf_flush_stat_t;
-/*********************************************************************
-Update the historical stats that we are collecting for flush rate
-heuristics at the end of each interval. */
-UNIV_INTERN
-void
-buf_flush_stat_update(void);
-/*=======================*/
/******************************************************************//**
page_cleaner thread tasked with flushing dirty pages from the buffer
pools. As of now we'll have only one instance of this thread.
@@ -211,6 +190,23 @@ DECLARE_THREAD(buf_flush_page_cleaner_thread)(
/*==========================================*/
void* arg); /*!< in: a dummy parameter required by
os_thread_create */
+/*********************************************************************//**
+Clears up tail of the LRU lists:
+* Put replaceable pages at the tail of LRU to the free list
+* Flush dirty pages at the tail of LRU to the disk
+The depth to which we scan each buffer pool is controlled by dynamic
+config parameter innodb_LRU_scan_depth.
+@return total pages flushed */
+UNIV_INTERN
+ulint
+buf_flush_LRU_tail(void);
+/*====================*/
+/*********************************************************************//**
+Wait for any possible LRU flushes that are in progress to end. */
+UNIV_INTERN
+void
+buf_flush_wait_LRU_batch_end(void);
+/*==============================*/
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/******************************************************************//**
@@ -238,6 +234,44 @@ UNIV_INTERN
void
buf_flush_free_flush_rbt(void);
/*==========================*/
+
+/********************************************************************//**
+Writes a flushable page asynchronously from the buffer pool to a file.
+NOTE: in simulated aio we must call
+os_aio_simulated_wake_handler_threads after we have posted a batch of
+writes! NOTE: buf_pool->mutex and buf_page_get_mutex(bpage) must be
+held upon entering this function, and they will be released by this
+function. */
+UNIV_INTERN
+void
+buf_flush_page(
+/*===========*/
+ buf_pool_t* buf_pool, /*!< in: buffer pool instance */
+ buf_page_t* bpage, /*!< in: buffer control block */
+ buf_flush flush_type) /*!< in: type of flush */
+ __attribute__((nonnull));
+
+#ifdef UNIV_DEBUG
+/******************************************************************//**
+Check if there are any dirty pages that belong to a space id in the flush
+list in a particular buffer pool.
+@return number of dirty pages present in a single buffer pool */
+UNIV_INTERN
+ulint
+buf_pool_get_dirty_pages_count(
+/*===========================*/
+ buf_pool_t* buf_pool, /*!< in: buffer pool */
+ ulint id); /*!< in: space id to check */
+/******************************************************************//**
+Check if there are any dirty pages that belong to a space id in the flush list.
+@return count of dirty pages present in all the buffer pools */
+UNIV_INTERN
+ulint
+buf_flush_get_dirty_pages_count(
+/*============================*/
+ ulint id); /*!< in: space id to check */
+#endif /* UNIV_DEBUG */
+
#endif /* !UNIV_HOTBACKUP */
#ifndef UNIV_NONINL
diff --git a/storage/innobase/include/buf0flu.ic b/storage/innobase/include/buf0flu.ic
index 68a76c0b637..a763cd115fe 100644
--- a/storage/innobase/include/buf0flu.ic
+++ b/storage/innobase/include/buf0flu.ic
@@ -26,6 +26,7 @@ Created 11/5/1995 Heikki Tuuri
#ifndef UNIV_HOTBACKUP
#include "buf0buf.h"
#include "mtr0mtr.h"
+#include "srv0srv.h"
/********************************************************************//**
Inserts a modified block into the flush list. */
@@ -61,7 +62,7 @@ buf_flush_note_modification(
{
buf_pool_t* buf_pool = buf_pool_from_block(block);
- ut_ad(block);
+ ut_ad(!srv_read_only_mode);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.buf_fix_count > 0);
#ifdef UNIV_SYNC_DEBUG
@@ -91,7 +92,7 @@ buf_flush_note_modification(
mutex_exit(&block->mutex);
- ++srv_buf_pool_write_requests;
+ srv_stats.buf_pool_write_requests.inc();
}
/********************************************************************//**
@@ -108,7 +109,7 @@ buf_flush_recv_note_modification(
{
buf_pool_t* buf_pool = buf_pool_from_block(block);
- ut_ad(block);
+ ut_ad(!srv_read_only_mode);
ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
ut_ad(block->page.buf_fix_count > 0);
#ifdef UNIV_SYNC_DEBUG
diff --git a/storage/innobase/include/buf0lru.h b/storage/innobase/include/buf0lru.h
index 74c5525c2e5..f7a69e1c9e4 100644
--- a/storage/innobase/include/buf0lru.h
+++ b/storage/innobase/include/buf0lru.h
@@ -31,6 +31,9 @@ Created 11/5/1995 Heikki Tuuri
#include "ut0byte.h"
#include "buf0types.h"
+// Forward declaration
+struct trx_t;
+
/******************************************************************//**
Returns TRUE if less than 25 % of the buffer pool is available. This can be
used in heuristics to prevent huge transactions eating up the whole buffer
@@ -49,15 +52,19 @@ These are low-level functions
#define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
/******************************************************************//**
-Invalidates all pages belonging to a given tablespace when we are deleting
-the data file(s) of that tablespace. A PROBLEM: if readahead is being started,
-what guarantees that it will not try to read in pages after this operation has
-completed? */
+Flushes all dirty pages or removes all pages belonging
+to a given tablespace. A PROBLEM: if readahead is being started, what
+guarantees that it will not try to read in pages after this operation
+has completed? */
UNIV_INTERN
void
-buf_LRU_invalidate_tablespace(
+buf_LRU_flush_or_remove_pages(
/*==========================*/
- ulint id); /*!< in: space id */
+ ulint id, /*!< in: space id */
+ buf_remove_t buf_remove, /*!< in: remove or flush strategy */
+ const trx_t* trx); /*!< to check if the operation must
+ be interrupted */
+
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */
@@ -273,15 +280,12 @@ extern uint buf_LRU_old_threshold_ms;
These statistics are not 'of' LRU but 'for' LRU. We keep count of I/O
and page_zip_decompress() operations. Based on the statistics we decide
if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */
-struct buf_LRU_stat_struct
+struct buf_LRU_stat_t
{
ulint io; /**< Counter of buffer pool I/O operations. */
ulint unzip; /**< Counter of page_zip_decompress operations. */
};
-/** Statistics for selecting the LRU list for eviction. */
-typedef struct buf_LRU_stat_struct buf_LRU_stat_t;
-
/** Current operation counters. Not protected by any mutex.
Cleared by buf_LRU_stat_update(). */
extern buf_LRU_stat_t buf_LRU_stat_cur;
diff --git a/storage/innobase/include/buf0types.h b/storage/innobase/include/buf0types.h
index ba54a8aeeea..5ed210d3b90 100644
--- a/storage/innobase/include/buf0types.h
+++ b/storage/innobase/include/buf0types.h
@@ -27,19 +27,19 @@ Created 11/17/1995 Heikki Tuuri
#define buf0types_h
/** Buffer page (uncompressed or compressed) */
-typedef struct buf_page_struct buf_page_t;
+struct buf_page_t;
/** Buffer block for which an uncompressed page exists */
-typedef struct buf_block_struct buf_block_t;
+struct buf_block_t;
/** Buffer pool chunk comprising buf_block_t */
-typedef struct buf_chunk_struct buf_chunk_t;
+struct buf_chunk_t;
/** Buffer pool comprising buf_chunk_t */
-typedef struct buf_pool_struct buf_pool_t;
+struct buf_pool_t;
/** Buffer pool statistics struct */
-typedef struct buf_pool_stat_struct buf_pool_stat_t;
+struct buf_pool_stat_t;
/** Buffer pool buddy statistics struct */
-typedef struct buf_buddy_stat_struct buf_buddy_stat_t;
+struct buf_buddy_stat_t;
/** Doublewrite memory struct */
-typedef struct buf_dblwr_struct buf_dblwr_t;
+struct buf_dblwr_t;
/** A buffer frame. @see page_t */
typedef byte buf_frame_t;
@@ -54,6 +54,17 @@ enum buf_flush {
BUF_FLUSH_N_TYPES /*!< index of last element + 1 */
};
+/** Algorithm to remove the pages for a tablespace from the buffer pool.
+See buf_LRU_flush_or_remove_pages(). */
+enum buf_remove_t {
+ BUF_REMOVE_ALL_NO_WRITE, /*!< Remove all pages from the buffer
+ pool, don't write or sync to disk */
+ BUF_REMOVE_FLUSH_NO_WRITE, /*!< Remove only, from the flush list,
+ don't write or sync to disk */
+ BUF_REMOVE_FLUSH_WRITE /*!< Flush dirty pages to disk only
+ don't remove from the buffer pool */
+};
+
/** Flags for io_fix types */
enum buf_io_fix {
BUF_IO_NONE = 0, /**< no pending I/O */
@@ -66,7 +77,7 @@ enum buf_io_fix {
/** Alternatives for srv_checksum_algorithm, which can be changed by
setting innodb_checksum_algorithm */
-enum srv_checksum_algorithm_enum {
+enum srv_checksum_algorithm_t {
SRV_CHECKSUM_ALGORITHM_CRC32, /*!< Write crc32, allow crc32,
innodb or none when reading */
SRV_CHECKSUM_ALGORITHM_STRICT_CRC32, /*!< Write crc32, allow crc32
@@ -81,8 +92,6 @@ enum srv_checksum_algorithm_enum {
when reading */
};
-typedef enum srv_checksum_algorithm_enum srv_checksum_algorithm_t;
-
/** Parameters of binary buddy system for compressed pages (buf0buddy.h) */
/* @{ */
/** Zip shift value for the smallest page size */
diff --git a/storage/innobase/include/data0data.h b/storage/innobase/include/data0data.h
index 37364e891f5..a548c7b89b3 100644
--- a/storage/innobase/include/data0data.h
+++ b/storage/innobase/include/data0data.h
@@ -35,7 +35,7 @@ Created 5/30/1994 Heikki Tuuri
/** Storage for overflow data in a big record, that is, a clustered
index record which needs external storage of data fields */
-typedef struct big_rec_struct big_rec_t;
+struct big_rec_t;
#ifdef UNIV_DEBUG
/*********************************************************************//**
@@ -45,7 +45,8 @@ UNIV_INLINE
dtype_t*
dfield_get_type(
/*============*/
- const dfield_t* field); /*!< in: SQL data field */
+ const dfield_t* field) /*!< in: SQL data field */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Gets pointer to the data in a field.
@return pointer to data */
@@ -53,7 +54,8 @@ UNIV_INLINE
void*
dfield_get_data(
/*============*/
- const dfield_t* field); /*!< in: field */
+ const dfield_t* field) /*!< in: field */
+ __attribute__((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
# define dfield_get_type(field) (&(field)->type)
# define dfield_get_data(field) ((field)->data)
@@ -65,7 +67,8 @@ void
dfield_set_type(
/*============*/
dfield_t* field, /*!< in: SQL data field */
- dtype_t* type); /*!< in: pointer to data type struct */
+ const dtype_t* type) /*!< in: pointer to data type struct */
+ __attribute__((nonnull));
/*********************************************************************//**
Gets length of field data.
@return length of data; UNIV_SQL_NULL if SQL null data */
@@ -73,7 +76,8 @@ UNIV_INLINE
ulint
dfield_get_len(
/*===========*/
- const dfield_t* field); /*!< in: field */
+ const dfield_t* field) /*!< in: field */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Sets length in a field. */
UNIV_INLINE
@@ -81,7 +85,8 @@ void
dfield_set_len(
/*===========*/
dfield_t* field, /*!< in: field */
- ulint len); /*!< in: length or UNIV_SQL_NULL */
+ ulint len) /*!< in: length or UNIV_SQL_NULL */
+ __attribute__((nonnull));
/*********************************************************************//**
Determines if a field is SQL NULL
@return nonzero if SQL null data */
@@ -89,7 +94,8 @@ UNIV_INLINE
ulint
dfield_is_null(
/*===========*/
- const dfield_t* field); /*!< in: field */
+ const dfield_t* field) /*!< in: field */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Determines if a field is externally stored
@return nonzero if externally stored */
@@ -97,14 +103,16 @@ UNIV_INLINE
ulint
dfield_is_ext(
/*==========*/
- const dfield_t* field); /*!< in: field */
+ const dfield_t* field) /*!< in: field */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Sets the "external storage" flag */
UNIV_INLINE
void
dfield_set_ext(
/*===========*/
- dfield_t* field); /*!< in/out: field */
+ dfield_t* field) /*!< in/out: field */
+ __attribute__((nonnull));
/*********************************************************************//**
Sets pointer to the data and length in a field. */
UNIV_INLINE
@@ -113,14 +121,16 @@ dfield_set_data(
/*============*/
dfield_t* field, /*!< in: field */
const void* data, /*!< in: data */
- ulint len); /*!< in: length or UNIV_SQL_NULL */
+ ulint len) /*!< in: length or UNIV_SQL_NULL */
+ __attribute__((nonnull(1)));
/*********************************************************************//**
Sets a data field to SQL NULL. */
UNIV_INLINE
void
dfield_set_null(
/*============*/
- dfield_t* field); /*!< in/out: field */
+ dfield_t* field) /*!< in/out: field */
+ __attribute__((nonnull));
/**********************************************************************//**
Writes an SQL null field full of zeros. */
UNIV_INLINE
@@ -128,7 +138,8 @@ void
data_write_sql_null(
/*================*/
byte* data, /*!< in: pointer to a buffer of size len */
- ulint len); /*!< in: SQL null size in bytes */
+ ulint len) /*!< in: SQL null size in bytes */
+ __attribute__((nonnull));
/*********************************************************************//**
Copies the data and len fields. */
UNIV_INLINE
@@ -136,7 +147,8 @@ void
dfield_copy_data(
/*=============*/
dfield_t* field1, /*!< out: field to copy to */
- const dfield_t* field2);/*!< in: field to copy from */
+ const dfield_t* field2) /*!< in: field to copy from */
+ __attribute__((nonnull));
/*********************************************************************//**
Copies a data field to another. */
UNIV_INLINE
@@ -144,7 +156,8 @@ void
dfield_copy(
/*========*/
dfield_t* field1, /*!< out: field to copy to */
- const dfield_t* field2);/*!< in: field to copy from */
+ const dfield_t* field2) /*!< in: field to copy from */
+ __attribute__((nonnull));
/*********************************************************************//**
Copies the data pointed to by a data field. */
UNIV_INLINE
@@ -152,7 +165,8 @@ void
dfield_dup(
/*=======*/
dfield_t* field, /*!< in/out: data field */
- mem_heap_t* heap); /*!< in: memory heap where allocated */
+ mem_heap_t* heap) /*!< in: memory heap where allocated */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
Tests if two data fields are equal.
@@ -187,7 +201,8 @@ UNIV_INLINE
ulint
dtuple_get_n_fields(
/*================*/
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
/*********************************************************************//**
Gets nth field of a tuple.
@@ -208,7 +223,8 @@ UNIV_INLINE
ulint
dtuple_get_info_bits(
/*=================*/
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Sets info bits in a data tuple. */
UNIV_INLINE
@@ -216,7 +232,8 @@ void
dtuple_set_info_bits(
/*=================*/
dtuple_t* tuple, /*!< in: tuple */
- ulint info_bits); /*!< in: info bits */
+ ulint info_bits) /*!< in: info bits */
+ __attribute__((nonnull));
/*********************************************************************//**
Gets number of fields used in record comparisons.
@return number of fields used in comparisons in rem0cmp.* */
@@ -224,7 +241,8 @@ UNIV_INLINE
ulint
dtuple_get_n_fields_cmp(
/*====================*/
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Gets number of fields used in record comparisons. */
UNIV_INLINE
@@ -232,8 +250,9 @@ void
dtuple_set_n_fields_cmp(
/*====================*/
dtuple_t* tuple, /*!< in: tuple */
- ulint n_fields_cmp); /*!< in: number of fields used in
+ ulint n_fields_cmp) /*!< in: number of fields used in
comparisons in rem0cmp.* */
+ __attribute__((nonnull));
/* Estimate the number of bytes that are going to be allocated when
creating a new dtuple_t object */
@@ -252,7 +271,8 @@ dtuple_create_from_mem(
/*===================*/
void* buf, /*!< in, out: buffer to use */
ulint buf_size, /*!< in: buffer size */
- ulint n_fields); /*!< in: number of fields */
+ ulint n_fields) /*!< in: number of fields */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************//**
Creates a data tuple to a memory heap. The default value for number
@@ -265,19 +285,8 @@ dtuple_create(
mem_heap_t* heap, /*!< in: memory heap where the tuple
is created, DTUPLE_EST_ALLOC(n_fields)
bytes will be allocated from this heap */
- ulint n_fields); /*!< in: number of fields */
-
-/**********************************************************//**
-Wrap data fields in a tuple. The default value for number
-of fields used in record comparisons for this tuple is n_fields.
-@return data tuple */
-UNIV_INLINE
-const dtuple_t*
-dtuple_from_fields(
-/*===============*/
- dtuple_t* tuple, /*!< in: storage for data tuple */
- const dfield_t* fields, /*!< in: fields */
- ulint n_fields); /*!< in: number of fields */
+ ulint n_fields)/*!< in: number of fields */
+ __attribute__((nonnull, malloc));
/*********************************************************************//**
Sets number of fields used in a tuple. Normally this is set in
@@ -287,7 +296,8 @@ void
dtuple_set_n_fields(
/*================*/
dtuple_t* tuple, /*!< in: tuple */
- ulint n_fields); /*!< in: number of fields */
+ ulint n_fields) /*!< in: number of fields */
+ __attribute__((nonnull));
/*********************************************************************//**
Copies a data tuple to another. This is a shallow copy; if a deep copy
is desired, dfield_dup() will have to be invoked on each field.
@@ -297,8 +307,9 @@ dtuple_t*
dtuple_copy(
/*========*/
const dtuple_t* tuple, /*!< in: tuple to copy from */
- mem_heap_t* heap); /*!< in: memory heap
+ mem_heap_t* heap) /*!< in: memory heap
where the tuple is created */
+ __attribute__((nonnull, malloc));
/**********************************************************//**
The following function returns the sum of data lengths of a tuple. The space
occupied by the field structs or the tuple struct is not counted.
@@ -308,7 +319,8 @@ ulint
dtuple_get_data_size(
/*=================*/
const dtuple_t* tuple, /*!< in: typed data tuple */
- ulint comp); /*!< in: nonzero=ROW_FORMAT=COMPACT */
+ ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */
+ __attribute__((nonnull));
/*********************************************************************//**
Computes the number of externally stored fields in a data tuple.
@return number of fields */
@@ -316,7 +328,8 @@ UNIV_INLINE
ulint
dtuple_get_n_ext(
/*=============*/
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull));
/************************************************************//**
Compare two data tuples, respecting the collation of character fields.
@return 1, 0 , -1 if tuple1 is greater, equal, less, respectively,
@@ -326,7 +339,8 @@ int
dtuple_coll_cmp(
/*============*/
const dtuple_t* tuple1, /*!< in: tuple 1 */
- const dtuple_t* tuple2);/*!< in: tuple 2 */
+ const dtuple_t* tuple2) /*!< in: tuple 2 */
+ __attribute__((nonnull, warn_unused_result));
/************************************************************//**
Folds a prefix given as the number of fields of a tuple.
@return the folded value */
@@ -339,7 +353,7 @@ dtuple_fold(
ulint n_bytes,/*!< in: number of bytes to fold in an
incomplete last field */
index_id_t tree_id)/*!< in: index tree id */
- __attribute__((pure));
+ __attribute__((nonnull, pure, warn_unused_result));
/*******************************************************************//**
Sets types of fields binary in a tuple. */
UNIV_INLINE
@@ -347,7 +361,8 @@ void
dtuple_set_types_binary(
/*====================*/
dtuple_t* tuple, /*!< in: data tuple */
- ulint n); /*!< in: number of fields to set */
+ ulint n) /*!< in: number of fields to set */
+ __attribute__((nonnull));
/**********************************************************************//**
Checks if a dtuple contains an SQL null value.
@return TRUE if some field is SQL null */
@@ -355,7 +370,8 @@ UNIV_INLINE
ibool
dtuple_contains_null(
/*=================*/
- const dtuple_t* tuple); /*!< in: dtuple */
+ const dtuple_t* tuple) /*!< in: dtuple */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************//**
Checks that a data field is typed. Asserts an error if not.
@return TRUE if ok */
@@ -363,7 +379,8 @@ UNIV_INTERN
ibool
dfield_check_typed(
/*===============*/
- const dfield_t* field); /*!< in: data field */
+ const dfield_t* field) /*!< in: data field */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************//**
Checks that a data tuple is typed. Asserts an error if not.
@return TRUE if ok */
@@ -371,7 +388,8 @@ UNIV_INTERN
ibool
dtuple_check_typed(
/*===============*/
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************//**
Checks that a data tuple is typed.
@return TRUE if ok */
@@ -379,7 +397,8 @@ UNIV_INTERN
ibool
dtuple_check_typed_no_assert(
/*=========================*/
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
/**********************************************************//**
Validates the consistency of a tuple which must be complete, i.e,
@@ -389,7 +408,8 @@ UNIV_INTERN
ibool
dtuple_validate(
/*============*/
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/*************************************************************//**
Pretty prints a dfield value according to its data type. */
@@ -397,7 +417,8 @@ UNIV_INTERN
void
dfield_print(
/*=========*/
- const dfield_t* dfield);/*!< in: dfield */
+ const dfield_t* dfield) /*!< in: dfield */
+ __attribute__((nonnull));
/*************************************************************//**
Pretty prints a dfield value according to its data type. Also the hex string
is printed if a string contains non-printable characters. */
@@ -405,7 +426,8 @@ UNIV_INTERN
void
dfield_print_also_hex(
/*==================*/
- const dfield_t* dfield); /*!< in: dfield */
+ const dfield_t* dfield) /*!< in: dfield */
+ __attribute__((nonnull));
/**********************************************************//**
The following function prints the contents of a tuple. */
UNIV_INTERN
@@ -413,7 +435,8 @@ void
dtuple_print(
/*=========*/
FILE* f, /*!< in: output stream */
- const dtuple_t* tuple); /*!< in: tuple */
+ const dtuple_t* tuple) /*!< in: tuple */
+ __attribute__((nonnull));
/**************************************************************//**
Moves parts of long fields in entry to the big record vector so that
the size of tuple drops below the maximum record size allowed in the
@@ -428,8 +451,9 @@ dtuple_convert_big_rec(
/*===================*/
dict_index_t* index, /*!< in: index */
dtuple_t* entry, /*!< in/out: index entry */
- ulint* n_ext); /*!< in/out: number of
+ ulint* n_ext) /*!< in/out: number of
externally stored columns */
+ __attribute__((nonnull, malloc, warn_unused_result));
/**************************************************************//**
Puts back to entry the data stored in vector. Note that to ensure the
fields in entry can accommodate the data, vector must have been created
@@ -440,21 +464,23 @@ dtuple_convert_back_big_rec(
/*========================*/
dict_index_t* index, /*!< in: index */
dtuple_t* entry, /*!< in: entry whose data was put to vector */
- big_rec_t* vector);/*!< in, own: big rec vector; it is
+ big_rec_t* vector) /*!< in, own: big rec vector; it is
freed in this function */
+ __attribute__((nonnull));
/**************************************************************//**
Frees the memory in a big rec vector. */
UNIV_INLINE
void
dtuple_big_rec_free(
/*================*/
- big_rec_t* vector); /*!< in, own: big rec vector; it is
+ big_rec_t* vector) /*!< in, own: big rec vector; it is
freed in this function */
+ __attribute__((nonnull));
/*######################################################################*/
/** Structure for an SQL data field */
-struct dfield_struct{
+struct dfield_t{
void* data; /*!< pointer to data */
unsigned ext:1; /*!< TRUE=externally stored, FALSE=local */
unsigned len:32; /*!< data length; UNIV_SQL_NULL if SQL null */
@@ -462,7 +488,7 @@ struct dfield_struct{
};
/** Structure for an SQL data tuple of fields (logical record) */
-struct dtuple_struct {
+struct dtuple_t {
ulint info_bits; /*!< info bits of an index record:
the default is 0; this field is used
if an index record is built from
@@ -482,15 +508,13 @@ struct dtuple_struct {
#ifdef UNIV_DEBUG
ulint magic_n; /*!< magic number, used in
debug assertions */
-/** Value of dtuple_struct::magic_n */
+/** Value of dtuple_t::magic_n */
# define DATA_TUPLE_MAGIC_N 65478679
#endif /* UNIV_DEBUG */
};
/** A slot for a field in a big rec vector */
-typedef struct big_rec_field_struct big_rec_field_t;
-/** A slot for a field in a big rec vector */
-struct big_rec_field_struct {
+struct big_rec_field_t {
ulint field_no; /*!< field number in record */
ulint len; /*!< stored data length, in bytes */
const void* data; /*!< stored data */
@@ -498,7 +522,7 @@ struct big_rec_field_struct {
/** Storage format for overflow data in a big record, that is, a
clustered index record which needs external storage of data fields */
-struct big_rec_struct {
+struct big_rec_t {
mem_heap_t* heap; /*!< memory heap from which
allocated */
ulint n_fields; /*!< number of stored fields */
diff --git a/storage/innobase/include/data0data.ic b/storage/innobase/include/data0data.ic
index da50e91e98d..6937d55d211 100644
--- a/storage/innobase/include/data0data.ic
+++ b/storage/innobase/include/data0data.ic
@@ -54,7 +54,7 @@ void
dfield_set_type(
/*============*/
dfield_t* field, /*!< in: SQL data field */
- dtype_t* type) /*!< in: pointer to data type struct */
+ const dtype_t* type) /*!< in: pointer to data type struct */
{
ut_ad(field && type);
@@ -407,6 +407,8 @@ dtuple_create_from_mem(
}
}
#endif
+ UNIV_MEM_ASSERT_W(tuple->fields, n_fields * sizeof *tuple->fields);
+ UNIV_MEM_INVALID(tuple->fields, n_fields * sizeof *tuple->fields);
return(tuple);
}
@@ -434,30 +436,6 @@ dtuple_create(
tuple = dtuple_create_from_mem(buf, buf_size, n_fields);
-#ifdef UNIV_DEBUG
- UNIV_MEM_INVALID(tuple->fields, n_fields * sizeof *tuple->fields);
-#endif
-
- return(tuple);
-}
-
-/**********************************************************//**
-Wrap data fields in a tuple. The default value for number
-of fields used in record comparisons for this tuple is n_fields.
-@return data tuple */
-UNIV_INLINE
-const dtuple_t*
-dtuple_from_fields(
-/*===============*/
- dtuple_t* tuple, /*!< in: storage for data tuple */
- const dfield_t* fields, /*!< in: fields */
- ulint n_fields) /*!< in: number of fields */
-{
- tuple->info_bits = 0;
- tuple->n_fields = tuple->n_fields_cmp = n_fields;
- tuple->fields = (dfield_t*) fields;
- ut_d(tuple->magic_n = DATA_TUPLE_MAGIC_N);
-
return(tuple);
}
diff --git a/storage/innobase/include/data0type.h b/storage/innobase/include/data0type.h
index c7fcf316f24..111664b0b52 100644
--- a/storage/innobase/include/data0type.h
+++ b/storage/innobase/include/data0type.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,20 +33,20 @@ extern ulint data_mysql_default_charset_coll;
#define DATA_MYSQL_BINARY_CHARSET_COLL 63
/* SQL data type struct */
-typedef struct dtype_struct dtype_t;
+struct dtype_t;
/* SQL Like operator comparison types */
-enum ib_like_enum {
+enum ib_like_t {
IB_LIKE_EXACT, /* e.g. STRING */
IB_LIKE_PREFIX, /* e.g., STRING% */
IB_LIKE_SUFFIX, /* e.g., %STRING */
IB_LIKE_SUBSTR, /* e.g., %STRING% */
IB_LIKE_REGEXP /* Future */
};
-typedef enum ib_like_enum ib_like_t;
/*-------------------------------------------*/
/* The 'MAIN TYPE' of a column */
+#define DATA_MISSING 0 /* missing column */
#define DATA_VARCHAR 1 /* character varying of the
latin1_swedish_ci charset-collation; note
that the MySQL format for this, DATA_BINARY,
@@ -508,7 +508,7 @@ dtype_read_for_order_and_null_size()
dtype_new_read_for_order_and_null_size()
sym_tab_add_null_lit() */
-struct dtype_struct{
+struct dtype_t{
unsigned prtype:32; /*!< precise type; MySQL data
type, charset code, flags to
indicate nullability,
diff --git a/storage/innobase/include/data0types.h b/storage/innobase/include/data0types.h
index 7d599ef2c8d..bd2bb577611 100644
--- a/storage/innobase/include/data0types.h
+++ b/storage/innobase/include/data0types.h
@@ -27,10 +27,10 @@ Created 9/21/2000 Heikki Tuuri
#define data0types_h
/* SQL data field struct */
-typedef struct dfield_struct dfield_t;
+struct dfield_t;
/* SQL data tuple struct */
-typedef struct dtuple_struct dtuple_t;
+struct dtuple_t;
#endif
diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h
index 1a3499b09e0..12e9f543e94 100644
--- a/storage/innobase/include/db0err.h
+++ b/storage/innobase/include/db0err.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,7 +27,7 @@ Created 5/24/1996 Heikki Tuuri
#define db0err_h
-enum db_err {
+enum dberr_t {
DB_SUCCESS_LOCKED_REC = 9, /*!< like DB_SUCCESS, but a new
explicit record lock was created */
DB_SUCCESS = 10,
@@ -68,11 +68,14 @@ enum db_err {
from a table failed */
DB_NO_SAVEPOINT, /*!< no savepoint exists with the given
name */
- DB_TABLESPACE_ALREADY_EXISTS, /*!< we cannot create a new single-table
+ DB_TABLESPACE_EXISTS, /*!< we cannot create a new single-table
tablespace because a file of the same
name already exists */
- DB_TABLESPACE_DELETED, /*!< tablespace does not exist or is
+ DB_TABLESPACE_DELETED, /*!< tablespace was deleted or is
being dropped right now */
+ DB_TABLESPACE_NOT_FOUND, /*<! Attempt to delete a tablespace
+ instance that was not found in the
+ tablespace hash table */
DB_LOCK_TABLE_FULL, /*!< lock structs have exhausted the
buffer pool (for big transactions,
InnoDB stores the lock structs in the
@@ -90,8 +93,8 @@ enum db_err {
work with e.g., FT indexes created by
a later version of the engine. */
- DB_PRIMARY_KEY_IS_NULL, /*!< a column in the PRIMARY KEY
- was found to be NULL */
+ DB_INVALID_NULL, /*!< a NOT NULL column was found to
+ be NULL during table rebuild */
DB_STATS_DO_NOT_EXIST, /*!< an operation that requires the
persistent storage, used for recording
@@ -115,6 +118,12 @@ enum db_err {
DB_READ_ONLY, /*!< Update operation attempted in
a read-only transaction */
DB_FTS_INVALID_DOCID, /* FTS Doc ID cannot be zero */
+ DB_TABLE_IN_FK_CHECK, /* table is being used in foreign
+ key check */
+ DB_ONLINE_LOG_TOO_BIG, /*!< Modification log grew too big
+ during online index creation */
+
+ DB_IO_ERROR, /*!< Generic IO error */
/* The following are partial failure codes */
DB_FAIL = 1000,
@@ -123,7 +132,23 @@ enum db_err {
DB_STRONG_FAIL,
DB_ZIP_OVERFLOW,
DB_RECORD_NOT_FOUND = 1500,
- DB_END_OF_INDEX
+ DB_END_OF_INDEX,
+ DB_DICT_CHANGED, /*!< Some part of table dictionary has
+ changed. Such as index dropped or
+ foreign key dropped */
+
+
+ /* The following are API only error codes. */
+ DB_DATA_MISMATCH = 2000, /*!< Column update or read failed
+ because the types mismatch */
+
+ DB_SCHEMA_NOT_LOCKED, /*!< If an API function expects the
+ schema to be locked in exclusive mode
+ and if it's not then that API function
+ will return this error code */
+
+ DB_NOT_FOUND /*!< Generic error code for "Not found"
+ type of errors */
};
#endif
diff --git a/storage/innobase/include/dict0boot.h b/storage/innobase/include/dict0boot.h
index 364aa746638..a994c9d8ff1 100644
--- a/storage/innobase/include/dict0boot.h
+++ b/storage/innobase/include/dict0boot.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -58,6 +58,13 @@ dict_hdr_get_new_id(
ulint* space_id); /*!< out: space id
(not assigned if NULL) */
/**********************************************************************//**
+Writes the current value of the row id counter to the dictionary header file
+page. */
+UNIV_INTERN
+void
+dict_hdr_flush_row_id(void);
+/*=======================*/
+/**********************************************************************//**
Returns a new row id.
@return the new id */
UNIV_INLINE
@@ -82,18 +89,32 @@ dict_sys_write_row_id(
row_id_t row_id);/*!< in: row id */
/*****************************************************************//**
Initializes the data dictionary memory structures when the database is
-started. This function is also called when the data dictionary is created. */
+started. This function is also called when the data dictionary is created.
+@return DB_SUCCESS or error code. */
UNIV_INTERN
-void
-dict_boot(void);
+dberr_t
+dict_boot(void)
/*===========*/
+ __attribute__((warn_unused_result));
+
/*****************************************************************//**
-Creates and initializes the data dictionary at the database creation. */
+Creates and initializes the data dictionary at the server bootstrap.
+@return DB_SUCCESS or error code. */
UNIV_INTERN
-void
-dict_create(void);
+dberr_t
+dict_create(void)
/*=============*/
+ __attribute__((warn_unused_result));
+/*********************************************************************//**
+Check if a table id belongs to system table.
+@return true if the table id belongs to a system table. */
+UNIV_INLINE
+bool
+dict_is_sys_table(
+/*==============*/
+ table_id_t id) /*!< in: table id to check */
+ __attribute__((warn_unused_result));
/* Space id and page no where the dictionary header resides */
#define DICT_HDR_SPACE 0 /* the SYSTEM tablespace */
@@ -273,6 +294,41 @@ enum dict_fld_sys_foreign_cols_enum {
DICT_FLD__SYS_FOREIGN_COLS__REF_COL_NAME = 5,
DICT_NUM_FIELDS__SYS_FOREIGN_COLS = 6
};
+/* The columns in SYS_TABLESPACES */
+enum dict_col_sys_tablespaces_enum {
+ DICT_COL__SYS_TABLESPACES__SPACE = 0,
+ DICT_COL__SYS_TABLESPACES__NAME = 1,
+ DICT_COL__SYS_TABLESPACES__FLAGS = 2,
+ DICT_NUM_COLS__SYS_TABLESPACES = 3
+};
+/* The field numbers in the SYS_TABLESPACES clustered index */
+enum dict_fld_sys_tablespaces_enum {
+ DICT_FLD__SYS_TABLESPACES__SPACE = 0,
+ DICT_FLD__SYS_TABLESPACES__DB_TRX_ID = 1,
+ DICT_FLD__SYS_TABLESPACES__DB_ROLL_PTR = 2,
+ DICT_FLD__SYS_TABLESPACES__NAME = 3,
+ DICT_FLD__SYS_TABLESPACES__FLAGS = 4,
+ DICT_NUM_FIELDS__SYS_TABLESPACES = 5
+};
+/* The columns in SYS_DATAFILES */
+enum dict_col_sys_datafiles_enum {
+ DICT_COL__SYS_DATAFILES__SPACE = 0,
+ DICT_COL__SYS_DATAFILES__PATH = 1,
+ DICT_NUM_COLS__SYS_DATAFILES = 2
+};
+/* The field numbers in the SYS_DATAFILES clustered index */
+enum dict_fld_sys_datafiles_enum {
+ DICT_FLD__SYS_DATAFILES__SPACE = 0,
+ DICT_FLD__SYS_DATAFILES__DB_TRX_ID = 1,
+ DICT_FLD__SYS_DATAFILES__DB_ROLL_PTR = 2,
+ DICT_FLD__SYS_DATAFILES__PATH = 3,
+ DICT_NUM_FIELDS__SYS_DATAFILES = 4
+};
+
+/* A number of the columns above occur in multiple tables. These are the
+length of thos fields. */
+#define DICT_FLD_LEN_SPACE 4
+#define DICT_FLD_LEN_FLAGS 4
/* When a row id which is zero modulo this number (which must be a power of
two) is assigned, the field DICT_HDR_ROW_ID on the dictionary header page is
diff --git a/storage/innobase/include/dict0boot.ic b/storage/innobase/include/dict0boot.ic
index 0f660ab7555..2b156a4f672 100644
--- a/storage/innobase/include/dict0boot.ic
+++ b/storage/innobase/include/dict0boot.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -24,15 +24,6 @@ Created 4/18/1996 Heikki Tuuri
*******************************************************/
/**********************************************************************//**
-Writes the current value of the row id counter to the dictionary header file
-page. */
-UNIV_INTERN
-void
-dict_hdr_flush_row_id(void);
-/*=======================*/
-
-
-/**********************************************************************//**
Returns a new row id.
@return the new id */
UNIV_INLINE
@@ -90,4 +81,16 @@ dict_sys_write_row_id(
mach_write_to_6(field, row_id);
}
+/*********************************************************************//**
+Check if a table id belongs to system table.
+@return true if the table id belongs to a system table. */
+UNIV_INLINE
+bool
+dict_is_sys_table(
+/*==============*/
+ table_id_t id) /*!< in: table id to check */
+{
+ return(id < DICT_HDR_FIRST_ID);
+}
+
diff --git a/storage/innobase/include/dict0crea.h b/storage/innobase/include/dict0crea.h
index 68fc9ba195a..217da0142ee 100644
--- a/storage/innobase/include/dict0crea.h
+++ b/storage/innobase/include/dict0crea.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -42,7 +42,9 @@ tab_create_graph_create(
/*====================*/
dict_table_t* table, /*!< in: table to create, built as a memory data
structure */
- mem_heap_t* heap); /*!< in: heap where created */
+ mem_heap_t* heap, /*!< in: heap where created */
+ bool commit);/*!< in: true if the commit node should be
+ added to the query graph */
/*********************************************************************//**
Creates an index create graph.
@return own: index create node */
@@ -52,7 +54,9 @@ ind_create_graph_create(
/*====================*/
dict_index_t* index, /*!< in: index to create, built as a memory data
structure */
- mem_heap_t* heap); /*!< in: heap where created */
+ mem_heap_t* heap, /*!< in: heap where created */
+ bool commit);/*!< in: true if the commit node should be
+ added to the query graph */
/***********************************************************//**
Creates a table. This is a high-level function used in SQL execution graphs.
@return query thread to run next or NULL */
@@ -99,11 +103,11 @@ dict_drop_index_tree(
mtr_t* mtr); /*!< in: mtr having the latch on the record page */
/****************************************************************//**
Creates the foreign key constraints system tables inside InnoDB
-at database creation or database start if they are not found or are
+at server bootstrap or server start if they are not found or are
not of the right form.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
dict_create_or_check_foreign_constraint_tables(void);
/*================================================*/
/********************************************************************//**
@@ -115,7 +119,7 @@ given locally for this table, that is, the number is not global, as in the
old format constraints < 4.0.18 it used to be.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
dict_create_add_foreigns_to_dictionary(
/*===================================*/
ulint start_id,/*!< in: if we are actually doing ALTER TABLE
@@ -127,11 +131,56 @@ dict_create_add_foreigns_to_dictionary(
so far has no constraints for which the name
was generated here */
dict_table_t* table, /*!< in: table */
- trx_t* trx); /*!< in: transaction */
+ trx_t* trx) /*!< in: transaction */
+ __attribute__((nonnull, warn_unused_result));
+/****************************************************************//**
+Creates the tablespaces and datafiles system tables inside InnoDB
+at server bootstrap or server start if they are not found or are
+not of the right form.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+dict_create_or_check_sys_tablespace(void);
+/*=====================================*/
+/********************************************************************//**
+Add a single tablespace definition to the data dictionary tables in the
+database.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_add_tablespace_to_dictionary(
+/*=====================================*/
+ ulint space, /*!< in: tablespace id */
+ const char* name, /*!< in: tablespace name */
+ ulint flags, /*!< in: tablespace flags */
+ const char* path, /*!< in: tablespace path */
+ trx_t* trx, /*!< in: transaction */
+ bool commit); /*!< in: if true then commit the
+ transaction */
+/********************************************************************//**
+Table create node structure */
-/* Table create node structure */
+/********************************************************************//**
+Add a single foreign key definition to the data dictionary tables in the
+database. We also generate names to constraints that were not named by the
+user. A generated constraint has a name of the format
+databasename/tablename_ibfk_NUMBER, where the numbers start from 1, and
+are given locally for this table, that is, the number is not global, as in
+the old format constraints < 4.0.18 it used to be.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+dict_create_add_foreign_to_dictionary(
+/*==================================*/
+ ulint* id_nr, /*!< in/out: number to use in id generation;
+ incremented if used */
+ dict_table_t* table, /*!< in: table */
+ dict_foreign_t* foreign,/*!< in: foreign */
+ trx_t* trx) /*!< in/out: dictionary transaction */
+ __attribute__((nonnull, warn_unused_result));
-struct tab_node_struct{
+/* Table create node structure */
+struct tab_node_t{
que_common_t common; /*!< node type: QUE_NODE_TABLE_CREATE */
dict_table_t* table; /*!< table to create, built as a memory data
structure with dict_mem_... functions */
@@ -160,7 +209,7 @@ struct tab_node_struct{
/* Index create node struct */
-struct ind_node_struct{
+struct ind_node_t{
que_common_t common; /*!< node type: QUE_NODE_INDEX_CREATE */
dict_index_t* index; /*!< index to create, built as a memory data
structure with dict_mem_... functions */
diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h
index 68008f95c2f..af0a5b31cc4 100644
--- a/storage/innobase/include/dict0dict.h
+++ b/storage/innobase/include/dict0dict.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -40,6 +41,7 @@ Created 1/8/1996 Heikki Tuuri
#include "ut0rnd.h"
#include "ut0byte.h"
#include "trx0types.h"
+#include "row0types.h"
#ifndef UNIV_HOTBACKUP
# include "sync0sync.h"
@@ -50,7 +52,8 @@ UNIV_INTERN
void
dict_casedn_str(
/*============*/
- char* a); /*!< in/out: string to put in lower case */
+ char* a) /*!< in/out: string to put in lower case */
+ __attribute__((nonnull));
/********************************************************************//**
Get the database name length in a table name.
@return database name length */
@@ -58,17 +61,53 @@ UNIV_INTERN
ulint
dict_get_db_name_len(
/*=================*/
- const char* name); /*!< in: table name in the form
+ const char* name) /*!< in: table name in the form
dbname '/' tablename */
+ __attribute__((nonnull, warn_unused_result));
+/*********************************************************************//**
+Open a table from its database and table name, this is currently used by
+foreign constraint parser to get the referenced table.
+@return complete table name with database and table name, allocated from
+heap memory passed in */
+UNIV_INTERN
+char*
+dict_get_referenced_table(
+/*======================*/
+ const char* name, /*!< in: foreign key table name */
+ const char* database_name, /*!< in: table db name */
+ ulint database_name_len,/*!< in: db name length */
+ const char* table_name, /*!< in: table name */
+ ulint table_name_len, /*!< in: table name length */
+ dict_table_t** table, /*!< out: table object or NULL */
+ mem_heap_t* heap); /*!< in: heap memory */
+/*********************************************************************//**
+Frees a foreign key struct. */
+UNIV_INTERN
+void
+dict_foreign_free(
+/*==============*/
+ dict_foreign_t* foreign); /*!< in, own: foreign key struct */
+/*********************************************************************//**
+Finds the highest [number] for foreign key constraints of the table. Looks
+only at the >= 4.0.18-format id's, which are of the form
+databasename/tablename_ibfk_[number].
+@return highest number, 0 if table has no new format foreign key constraints */
+UNIV_INTERN
+ulint
+dict_table_get_highest_foreign_id(
+/*==============================*/
+ dict_table_t* table); /*!< in: table in the dictionary
+ memory cache */
/********************************************************************//**
Return the end of table name where we have removed dbname and '/'.
@return table name */
-
+UNIV_INTERN
const char*
dict_remove_db_name(
/*================*/
- const char* name); /*!< in: table name in the form
+ const char* name) /*!< in: table name in the form
dbname '/' tablename */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Returns a table object based on table id.
@return table, NULL if does not exist */
@@ -77,7 +116,11 @@ dict_table_t*
dict_table_open_on_id(
/*==================*/
table_id_t table_id, /*!< in: table id */
- ibool dict_locked); /*!< in: TRUE=data dictionary locked */
+ ibool dict_locked, /*!< in: TRUE=data dictionary locked */
+ ibool try_drop) /*!< in: TRUE=try to drop any orphan
+ indexes after an aborted online
+ index creation */
+ __attribute__((warn_unused_result));
/********************************************************************//**
Decrements the count of open handles to a table. */
UNIV_INTERN
@@ -85,7 +128,11 @@ void
dict_table_close(
/*=============*/
dict_table_t* table, /*!< in/out: table */
- ibool dict_locked); /*!< in: TRUE=data dictionary locked */
+ ibool dict_locked, /*!< in: TRUE=data dictionary locked */
+ ibool try_drop) /*!< in: TRUE=try to drop any orphan
+ indexes after an aborted online
+ index creation */
+ __attribute__((nonnull));
/**********************************************************************//**
Inits the data dictionary module. */
UNIV_INTERN
@@ -109,7 +156,8 @@ UNIV_INLINE
ulint
dict_col_get_mbminlen(
/*==================*/
- const dict_col_t* col); /*!< in: column */
+ const dict_col_t* col) /*!< in: column */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the maximum number of bytes per character.
@return maximum multi-byte char size, in bytes */
@@ -117,7 +165,8 @@ UNIV_INLINE
ulint
dict_col_get_mbmaxlen(
/*==================*/
- const dict_col_t* col); /*!< in: column */
+ const dict_col_t* col) /*!< in: column */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Sets the minimum and maximum number of bytes per character. */
UNIV_INLINE
@@ -127,8 +176,9 @@ dict_col_set_mbminmaxlen(
dict_col_t* col, /*!< in/out: column */
ulint mbminlen, /*!< in: minimum multi-byte
character size, in bytes */
- ulint mbmaxlen); /*!< in: minimum multi-byte
+ ulint mbmaxlen) /*!< in: minimum multi-byte
character size, in bytes */
+ __attribute__((nonnull));
/*********************************************************************//**
Gets the column data type. */
UNIV_INLINE
@@ -136,7 +186,8 @@ void
dict_col_copy_type(
/*===============*/
const dict_col_t* col, /*!< in: column */
- dtype_t* type); /*!< out: data type */
+ dtype_t* type) /*!< out: data type */
+ __attribute__((nonnull));
/**********************************************************************//**
Determine bytes of column prefix to be stored in the undo log. Please
note if the table format is UNIV_FORMAT_A (< UNIV_FORMAT_B), no prefix
@@ -147,9 +198,9 @@ ulint
dict_max_field_len_store_undo(
/*==========================*/
dict_table_t* table, /*!< in: table */
- const dict_col_t* col); /*!< in: column which index prefix
+ const dict_col_t* col) /*!< in: column which index prefix
is based on */
-
+ __attribute__((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
#ifdef UNIV_DEBUG
/*********************************************************************//**
@@ -160,7 +211,8 @@ ibool
dict_col_type_assert_equal(
/*=======================*/
const dict_col_t* col, /*!< in: column */
- const dtype_t* type); /*!< in: data type */
+ const dtype_t* type) /*!< in: data type */
+ __attribute__((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
#ifndef UNIV_HOTBACKUP
/***********************************************************************//**
@@ -170,7 +222,8 @@ UNIV_INLINE
ulint
dict_col_get_min_size(
/*==================*/
- const dict_col_t* col); /*!< in: column */
+ const dict_col_t* col) /*!< in: column */
+ __attribute__((nonnull, warn_unused_result));
/***********************************************************************//**
Returns the maximum size of the column.
@return maximum size */
@@ -178,7 +231,8 @@ UNIV_INLINE
ulint
dict_col_get_max_size(
/*==================*/
- const dict_col_t* col); /*!< in: column */
+ const dict_col_t* col) /*!< in: column */
+ __attribute__((nonnull, warn_unused_result));
/***********************************************************************//**
Returns the size of a fixed size column, 0 if not a fixed size column.
@return fixed size, or 0 */
@@ -187,7 +241,8 @@ ulint
dict_col_get_fixed_size(
/*====================*/
const dict_col_t* col, /*!< in: column */
- ulint comp); /*!< in: nonzero=ROW_FORMAT=COMPACT */
+ ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */
+ __attribute__((nonnull, warn_unused_result));
/***********************************************************************//**
Returns the ROW_FORMAT=REDUNDANT stored SQL NULL size of a column.
For fixed length types it is the fixed length of the type, otherwise 0.
@@ -197,8 +252,8 @@ ulint
dict_col_get_sql_null_size(
/*=======================*/
const dict_col_t* col, /*!< in: column */
- ulint comp); /*!< in: nonzero=ROW_FORMAT=COMPACT */
-
+ ulint comp) /*!< in: nonzero=ROW_FORMAT=COMPACT */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the column number.
@return col->ind, table column position (starting from 0) */
@@ -206,7 +261,8 @@ UNIV_INLINE
ulint
dict_col_get_no(
/*============*/
- const dict_col_t* col); /*!< in: column */
+ const dict_col_t* col) /*!< in: column */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the column position in the clustered index. */
UNIV_INLINE
@@ -214,7 +270,8 @@ ulint
dict_col_get_clust_pos(
/*===================*/
const dict_col_t* col, /*!< in: table column */
- const dict_index_t* clust_index); /*!< in: clustered index */
+ const dict_index_t* clust_index) /*!< in: clustered index */
+ __attribute__((nonnull, warn_unused_result));
/****************************************************************//**
If the given column name is reserved for InnoDB system columns, return
TRUE.
@@ -223,14 +280,16 @@ UNIV_INTERN
ibool
dict_col_name_is_reserved(
/*======================*/
- const char* name); /*!< in: column name */
+ const char* name) /*!< in: column name */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Acquire the autoinc lock. */
UNIV_INTERN
void
dict_table_autoinc_lock(
/*====================*/
- dict_table_t* table); /*!< in/out: table */
+ dict_table_t* table) /*!< in/out: table */
+ __attribute__((nonnull));
/********************************************************************//**
Unconditionally set the autoinc counter. */
UNIV_INTERN
@@ -238,7 +297,8 @@ void
dict_table_autoinc_initialize(
/*==========================*/
dict_table_t* table, /*!< in/out: table */
- ib_uint64_t value); /*!< in: next value to assign to a row */
+ ib_uint64_t value) /*!< in: next value to assign to a row */
+ __attribute__((nonnull));
/********************************************************************//**
Reads the next autoinc value (== autoinc counter value), 0 if not yet
initialized.
@@ -247,7 +307,8 @@ UNIV_INTERN
ib_uint64_t
dict_table_autoinc_read(
/*====================*/
- const dict_table_t* table); /*!< in: table */
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Updates the autoinc counter if the value supplied is greater than the
current value. */
@@ -257,14 +318,16 @@ dict_table_autoinc_update_if_greater(
/*=================================*/
dict_table_t* table, /*!< in/out: table */
- ib_uint64_t value); /*!< in: value which was assigned to a row */
+ ib_uint64_t value) /*!< in: value which was assigned to a row */
+ __attribute__((nonnull));
/********************************************************************//**
Release the autoinc lock. */
UNIV_INTERN
void
dict_table_autoinc_unlock(
/*======================*/
- dict_table_t* table); /*!< in/out: table */
+ dict_table_t* table) /*!< in/out: table */
+ __attribute__((nonnull));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
Adds system columns to a table object. */
@@ -273,7 +336,8 @@ void
dict_table_add_system_columns(
/*==========================*/
dict_table_t* table, /*!< in/out: table */
- mem_heap_t* heap); /*!< in: temporary heap */
+ mem_heap_t* heap) /*!< in: temporary heap */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Adds a table object to the dictionary cache. */
@@ -283,26 +347,30 @@ dict_table_add_to_cache(
/*====================*/
dict_table_t* table, /*!< in: table */
ibool can_be_evicted, /*!< in: TRUE if can be evicted*/
- mem_heap_t* heap); /*!< in: temporary heap */
+ mem_heap_t* heap) /*!< in: temporary heap */
+ __attribute__((nonnull));
/**********************************************************************//**
Removes a table object from the dictionary cache. */
UNIV_INTERN
void
dict_table_remove_from_cache(
/*=========================*/
- dict_table_t* table); /*!< in, own: table */
+ dict_table_t* table) /*!< in, own: table */
+ __attribute__((nonnull));
/**********************************************************************//**
Renames a table object.
@return TRUE if success */
UNIV_INTERN
-ibool
+dberr_t
dict_table_rename_in_cache(
/*=======================*/
dict_table_t* table, /*!< in/out: table */
const char* new_name, /*!< in: new name */
- ibool rename_also_foreigns);/*!< in: in ALTER TABLE we want
+ ibool rename_also_foreigns)
+ /*!< in: in ALTER TABLE we want
to preserve the original table name
in constraints which reference it */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Removes an index from the dictionary cache. */
UNIV_INTERN
@@ -310,7 +378,8 @@ void
dict_index_remove_from_cache(
/*=========================*/
dict_table_t* table, /*!< in/out: table */
- dict_index_t* index); /*!< in, own: index */
+ dict_index_t* index) /*!< in, own: index */
+ __attribute__((nonnull));
/**********************************************************************//**
Change the id of a table object in the dictionary cache. This is used in
DISCARD TABLESPACE. */
@@ -319,7 +388,16 @@ void
dict_table_change_id_in_cache(
/*==========================*/
dict_table_t* table, /*!< in/out: table object already in cache */
- table_id_t new_id);/*!< in: new id to set */
+ table_id_t new_id) /*!< in: new id to set */
+ __attribute__((nonnull));
+/**********************************************************************//**
+Removes a foreign constraint struct from the dictionary cache. */
+UNIV_INTERN
+void
+dict_foreign_remove_from_cache(
+/*===========================*/
+ dict_foreign_t* foreign) /*!< in, own: foreign constraint */
+ __attribute__((nonnull));
/**********************************************************************//**
Adds a foreign key constraint object to the dictionary cache. May free
the object if there already is an object with the same identifier in.
@@ -327,12 +405,13 @@ At least one of foreign table or referenced table must already be in
the dictionary cache!
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
dict_foreign_add_to_cache(
/*======================*/
dict_foreign_t* foreign, /*!< in, own: foreign key constraint */
- ibool check_charsets);/*!< in: TRUE=check charset
+ ibool check_charsets) /*!< in: TRUE=check charset
compatibility */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Check if the index is referenced by a foreign key, if TRUE return the
matching instance NULL otherwise.
@@ -343,7 +422,8 @@ dict_foreign_t*
dict_table_get_referenced_constraint(
/*=================================*/
dict_table_t* table, /*!< in: InnoDB table */
- dict_index_t* index); /*!< in: InnoDB index */
+ dict_index_t* index) /*!< in: InnoDB index */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Checks if a table is referenced by foreign keys.
@return TRUE if table is referenced by a foreign key */
@@ -351,17 +431,19 @@ UNIV_INTERN
ibool
dict_table_is_referenced_by_foreign_key(
/*====================================*/
- const dict_table_t* table); /*!< in: InnoDB table */
+ const dict_table_t* table) /*!< in: InnoDB table */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
-Replace the index in the foreign key list that matches this index's
-definition with an equivalent index. */
+Replace the index passed in with another equivalent index in the
+foreign key lists of the table. */
UNIV_INTERN
void
-dict_table_replace_index_in_foreign_list(
-/*=====================================*/
- dict_table_t* table, /*!< in/out: table */
- dict_index_t* index, /*!< in: index to be replaced */
- const trx_t* trx); /*!< in: transaction handle */
+dict_foreign_replace_index(
+/*=======================*/
+ dict_table_t* table, /*!< in/out: table */
+ const dict_index_t* index, /*!< in: index to be replaced */
+ const trx_t* trx) /*!< in: transaction handle */
+ __attribute__((nonnull));
/**********************************************************************//**
Determines whether a string starts with the specified keyword.
@return TRUE if str starts with keyword */
@@ -369,9 +451,10 @@ UNIV_INTERN
ibool
dict_str_starts_with_keyword(
/*=========================*/
- void* mysql_thd, /*!< in: MySQL thread handle */
+ THD* thd, /*!< in: MySQL thread handle */
const char* str, /*!< in: string to scan for keyword */
- const char* keyword); /*!< in: keyword to look for */
+ const char* keyword) /*!< in: keyword to look for */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Checks if a index is defined for a foreign key constraint. Index is a part
of a foreign key constraint if the index is referenced by foreign key
@@ -383,7 +466,8 @@ dict_foreign_t*
dict_table_get_foreign_constraint(
/*==============================*/
dict_table_t* table, /*!< in: InnoDB table */
- dict_index_t* index); /*!< in: InnoDB index */
+ dict_index_t* index) /*!< in: InnoDB index */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Scans a table create SQL string and adds to the data dictionary
the foreign key constraints declared in the string. This function
@@ -393,7 +477,7 @@ bot participating tables. The indexes are allowed to contain more
fields than mentioned in the constraint.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
dict_create_foreign_constraints(
/*============================*/
trx_t* trx, /*!< in: transaction */
@@ -409,15 +493,16 @@ dict_create_foreign_constraints(
const char* name, /*!< in: table full name in the
normalized form
database_name/table_name */
- ibool reject_fks); /*!< in: if TRUE, fail with error
+ ibool reject_fks) /*!< in: if TRUE, fail with error
code DB_CANNOT_ADD_CONSTRAINT if
any foreign keys are found. */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement.
@return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the
constraint id does not match */
UNIV_INTERN
-ulint
+dberr_t
dict_foreign_parse_drop_constraints(
/*================================*/
mem_heap_t* heap, /*!< in: heap from which we can
@@ -426,8 +511,9 @@ dict_foreign_parse_drop_constraints(
dict_table_t* table, /*!< in: table */
ulint* n, /*!< out: number of constraints
to drop */
- const char*** constraints_to_drop); /*!< out: id's of the
+ const char*** constraints_to_drop) /*!< out: id's of the
constraints to drop */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Returns a table object and increments its open handle count.
NOTE! This is a high-level function to be used mainly from outside the
@@ -439,43 +525,40 @@ dict_table_t*
dict_table_open_on_name(
/*====================*/
const char* table_name, /*!< in: table name */
- ibool dict_locked); /*!< in: TRUE=data dictionary locked */
-
-/**********************************************************************//**
-Returns a table object and increment its open handle count. Table
-statistics will not be updated if they are not initialized.
-Call this function when dropping a table.
-@return table, NULL if does not exist */
-UNIV_INTERN
-dict_table_t*
-dict_table_open_on_name_no_stats(
-/*=============================*/
- const char* table_name, /*!< in: table name */
ibool dict_locked, /*!< in: TRUE=data dictionary locked */
+ ibool try_drop, /*!< in: TRUE=try to drop any orphan
+ indexes after an aborted online
+ index creation */
dict_err_ignore_t
- ignore_err); /*!< in: error to be ignored when
+ ignore_err) /*!< in: error to be ignored when
loading the table */
-/**********************************************************************//**
-Find an index that is equivalent to the one passed in and is not marked
-for deletion.
-@return index equivalent to foreign->foreign_index, or NULL */
-UNIV_INTERN
-dict_index_t*
-dict_foreign_find_equiv_index(
-/*==========================*/
- dict_foreign_t* foreign);/*!< in: foreign key */
-/**********************************************************************//**
-Returns an index object by matching on the name and column names and
-if more than one index matches return the index with the max id
+ __attribute__((nonnull, warn_unused_result));
+
+/*********************************************************************//**
+Tries to find an index whose first fields are the columns in the array,
+in the same order and is not marked for deletion and is not the same
+as types_idx.
@return matching index, NULL if not found */
UNIV_INTERN
dict_index_t*
-dict_table_get_index_by_max_id(
-/*===========================*/
- dict_table_t* table, /*!< in: table */
- const char* name, /*!< in: the index name to find */
- const char** columns,/*!< in: array of column names */
- ulint n_cols);/*!< in: number of columns */
+dict_foreign_find_index(
+/*====================*/
+ const dict_table_t* table, /*!< in: table */
+ const char** columns,/*!< in: array of column names */
+ ulint n_cols, /*!< in: number of columns */
+ const dict_index_t* types_idx,
+ /*!< in: NULL or an index
+ whose types the column types
+ must match */
+ ibool check_charsets,
+ /*!< in: whether to check
+ charsets. only has an effect
+ if types_idx != NULL */
+ ulint check_null)
+ /*!< in: nonzero if none of
+ the columns must be declared
+ NOT NULL */
+ __attribute__((nonnull(1,2), warn_unused_result));
/**********************************************************************//**
Returns a column's name.
@return column name. NOTE: not guaranteed to stay valid if table is
@@ -485,29 +568,16 @@ const char*
dict_table_get_col_name(
/*====================*/
const dict_table_t* table, /*!< in: table */
- ulint col_nr);/*!< in: column number */
-
+ ulint col_nr) /*!< in: column number */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
-Prints a table definition. */
+Prints a table data. */
UNIV_INTERN
void
dict_table_print(
/*=============*/
- dict_table_t* table); /*!< in: table */
-/**********************************************************************//**
-Prints a table data. */
-UNIV_INTERN
-void
-dict_table_print_low(
-/*=================*/
- dict_table_t* table); /*!< in: table */
-/**********************************************************************//**
-Prints a table data when we know the table name. */
-UNIV_INTERN
-void
-dict_table_print_by_name(
-/*=====================*/
- const char* name); /*!< in: table name */
+ dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull));
/**********************************************************************//**
Outputs info on foreign keys of a table. */
UNIV_INTERN
@@ -520,7 +590,8 @@ dict_print_info_on_foreign_keys(
of SHOW TABLE STATUS */
FILE* file, /*!< in: file where to print */
trx_t* trx, /*!< in: transaction */
- dict_table_t* table); /*!< in: table */
+ dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull));
/**********************************************************************//**
Outputs info on a foreign key of a table in a format suitable for
CREATE TABLE. */
@@ -531,7 +602,8 @@ dict_print_info_on_foreign_key_in_create_format(
FILE* file, /*!< in: file where to print */
trx_t* trx, /*!< in: transaction */
dict_foreign_t* foreign, /*!< in: foreign key constraint */
- ibool add_newline); /*!< in: whether to add a newline */
+ ibool add_newline) /*!< in: whether to add a newline */
+ __attribute__((nonnull(1,3)));
/********************************************************************//**
Displays the names of the index and the table. */
UNIV_INTERN
@@ -539,8 +611,35 @@ void
dict_index_name_print(
/*==================*/
FILE* file, /*!< in: output stream */
- trx_t* trx, /*!< in: transaction */
- const dict_index_t* index); /*!< in: index to print */
+ const trx_t* trx, /*!< in: transaction */
+ const dict_index_t* index) /*!< in: index to print */
+ __attribute__((nonnull(1,3)));
+/*********************************************************************//**
+Tries to find an index whose first fields are the columns in the array,
+in the same order and is not marked for deletion and is not the same
+as types_idx.
+@return matching index, NULL if not found */
+UNIV_INTERN
+bool
+dict_foreign_qualify_index(
+/*====================*/
+ const dict_table_t* table, /*!< in: table */
+ const char** columns,/*!< in: array of column names */
+ ulint n_cols, /*!< in: number of columns */
+ const dict_index_t* index, /*!< in: index to check */
+ const dict_index_t* types_idx,
+ /*!< in: NULL or an index
+ whose types the column types
+ must match */
+ ibool check_charsets,
+ /*!< in: whether to check
+ charsets. only has an effect
+ if types_idx != NULL */
+ ulint check_null)
+ /*!< in: nonzero if none of
+ the columns must be declared
+ NOT NULL */
+ __attribute__((nonnull(1,2), warn_unused_result));
#ifdef UNIV_DEBUG
/********************************************************************//**
Gets the first index on the table (the clustered index).
@@ -549,7 +648,17 @@ UNIV_INLINE
dict_index_t*
dict_table_get_first_index(
/*=======================*/
- const dict_table_t* table); /*!< in: table */
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
+/********************************************************************//**
+Gets the last index on the table.
+@return index, NULL if none exists */
+UNIV_INLINE
+dict_index_t*
+dict_table_get_last_index(
+/*=======================*/
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Gets the next index on the table.
@return index, NULL if none left */
@@ -557,9 +666,11 @@ UNIV_INLINE
dict_index_t*
dict_table_get_next_index(
/*======================*/
- const dict_index_t* index); /*!< in: index */
+ const dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
# define dict_table_get_first_index(table) UT_LIST_GET_FIRST((table)->indexes)
+# define dict_table_get_last_index(table) UT_LIST_GET_LAST((table)->indexes)
# define dict_table_get_next_index(index) UT_LIST_GET_NEXT(indexes, index)
#endif /* UNIV_DEBUG */
#endif /* !UNIV_HOTBACKUP */
@@ -605,15 +716,6 @@ dict_index_is_ibuf(
const dict_index_t* index) /*!< in: index */
__attribute__((nonnull, pure, warn_unused_result));
/********************************************************************//**
-Check whether the index is an universal index tree.
-@return nonzero for universal tree, zero for other indexes */
-UNIV_INLINE
-ulint
-dict_index_is_univ(
-/*===============*/
- const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, pure, warn_unused_result));
-/********************************************************************//**
Check whether the index is a secondary index or the insert buffer tree.
@return nonzero for insert buffer, zero for other indexes */
UNIV_INLINE
@@ -626,13 +728,14 @@ dict_index_is_sec_or_ibuf(
/************************************************************************
Gets the all the FTS indexes for the table. NOTE: must not be called for
tables which do not have an FTS-index. */
-
+UNIV_INTERN
ulint
dict_table_get_all_fts_indexes(
/*===========================*/
/* out: number of indexes collected */
dict_table_t* table, /* in: table */
- ib_vector_t* indexes);/* out: vector for collecting FTS indexes */
+ ib_vector_t* indexes)/* out: vector for collecting FTS indexes */
+ __attribute__((nonnull));
/********************************************************************//**
Gets the number of user-defined columns in a table in the dictionary
cache.
@@ -662,6 +765,35 @@ dict_table_get_n_cols(
/*==================*/
const dict_table_t* table) /*!< in: table */
__attribute__((nonnull, pure, warn_unused_result));
+/********************************************************************//**
+Gets the approximately estimated number of rows in the table.
+@return estimated number of rows */
+UNIV_INLINE
+ib_uint64_t
+dict_table_get_n_rows(
+/*==================*/
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
+/********************************************************************//**
+Increment the number of rows in the table by one.
+Notice that this operation is not protected by any latch, the number is
+approximate. */
+UNIV_INLINE
+void
+dict_table_n_rows_inc(
+/*==================*/
+ dict_table_t* table) /*!< in/out: table */
+ __attribute__((nonnull));
+/********************************************************************//**
+Decrement the number of rows in the table by one.
+Notice that this operation is not protected by any latch, the number is
+approximate. */
+UNIV_INLINE
+void
+dict_table_n_rows_dec(
+/*==================*/
+ dict_table_t* table) /*!< in/out: table */
+ __attribute__((nonnull));
#ifdef UNIV_DEBUG
/********************************************************************//**
Gets the nth column of a table.
@@ -671,7 +803,8 @@ dict_col_t*
dict_table_get_nth_col(
/*===================*/
const dict_table_t* table, /*!< in: table */
- ulint pos); /*!< in: position of column */
+ ulint pos) /*!< in: position of column */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Gets the given system column of a table.
@return pointer to column object */
@@ -680,7 +813,8 @@ dict_col_t*
dict_table_get_sys_col(
/*===================*/
const dict_table_t* table, /*!< in: table */
- ulint sys); /*!< in: DATA_ROW_ID, ... */
+ ulint sys) /*!< in: DATA_ROW_ID, ... */
+ __attribute__((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
#define dict_table_get_nth_col(table, pos) \
((table)->cols + (pos))
@@ -695,7 +829,8 @@ ulint
dict_table_get_sys_col_no(
/*======================*/
const dict_table_t* table, /*!< in: table */
- ulint sys); /*!< in: DATA_ROW_ID, ... */
+ ulint sys) /*!< in: DATA_ROW_ID, ... */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Returns the minimum data size of an index record.
@@ -704,7 +839,8 @@ UNIV_INLINE
ulint
dict_index_get_min_size(
/*====================*/
- const dict_index_t* index); /*!< in: index */
+ const dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
Check whether the table uses the compact page format.
@@ -713,7 +849,8 @@ UNIV_INLINE
ibool
dict_table_is_comp(
/*===============*/
- const dict_table_t* table); /*!< in: table */
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Determine the file format of a table.
@return file format version */
@@ -721,7 +858,8 @@ UNIV_INLINE
ulint
dict_table_get_format(
/*==================*/
- const dict_table_t* table); /*!< in: table */
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Determine the file format from a dict_table_t::flags.
@return file format version */
@@ -729,7 +867,8 @@ UNIV_INLINE
ulint
dict_tf_get_format(
/*===============*/
- ulint flags); /*!< in: dict_table_t::flags */
+ ulint flags) /*!< in: dict_table_t::flags */
+ __attribute__((warn_unused_result));
/********************************************************************//**
Set the various values in a dict_table_t::flags pointer. */
UNIV_INLINE
@@ -738,7 +877,9 @@ dict_tf_set(
/*========*/
ulint* flags, /*!< in/out: table */
rec_format_t format, /*!< in: file format */
- ulint zip_ssize); /*!< in: zip shift size */
+ ulint zip_ssize, /*!< in: zip shift size */
+ bool remote_path) /*!< in: table uses DATA DIRECTORY */
+ __attribute__((nonnull));
/********************************************************************//**
Convert a 32 bit integer table flags to the 32 bit integer that is
written into the tablespace header at the offset FSP_SPACE_FLAGS and is
@@ -756,13 +897,6 @@ dict_tf_to_fsp_flags(
/*=================*/
ulint flags) /*!< in: dict_table_t::flags */
__attribute__((const));
-/********************************************************************/
-UNIV_INLINE
-ulint
-dict_tf_to_sys_tables_type(
-/*=======================*/
- ulint flags) /*!< in: dict_table_t::flags */
- __attribute__((const));
/********************************************************************//**
Extract the compressed page size from table flags.
@return compressed page size, or 0 if not compressed */
@@ -779,7 +913,8 @@ UNIV_INLINE
ulint
dict_table_zip_size(
/*================*/
- const dict_table_t* table); /*!< in: table */
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
Obtain exclusive locks on all index trees of the table. This is to prevent
@@ -789,15 +924,16 @@ UNIV_INLINE
void
dict_table_x_lock_indexes(
/*======================*/
- dict_table_t* table); /*!< in: table */
+ dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull));
/*********************************************************************//**
Release the exclusive locks on all index tree. */
UNIV_INLINE
void
dict_table_x_unlock_indexes(
/*========================*/
- dict_table_t* table); /*!< in: table */
-#endif /* !UNIV_HOTBACKUP */
+ dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull));
/********************************************************************//**
Checks if a column is in the ordering columns of the clustered index of a
table. Column prefixes are treated like whole columns.
@@ -807,7 +943,8 @@ ibool
dict_table_col_in_clustered_key(
/*============================*/
const dict_table_t* table, /*!< in: table */
- ulint n); /*!< in: column number */
+ ulint n) /*!< in: column number */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Check if the table has an FTS index.
@return TRUE if table has an FTS index */
@@ -815,36 +952,8 @@ UNIV_INLINE
ibool
dict_table_has_fts_index(
/*=====================*/
- dict_table_t* table); /*!< in: table */
-/*******************************************************************//**
-Validate and return the table flags.
-@return Same as input after validating it as dict_table_t::flags.
-If there is an error, trigger assertion failure. */
-UNIV_INLINE
-ulint
-dict_tf_validate(
-/*=============*/
- ulint flags); /*!< in: table flags */
-/********************************************************************//**
-Validate a SYS_TABLES TYPE field and return it.
-@return Same as input after validating it as a SYS_TABLES TYPE field.
-If there is an error, return ULINT_UNDEFINED. */
-UNIV_INLINE
-ulint
-dict_sys_tables_type_validate(
-/*==========================*/
- ulint type, /*!< in: SYS_TABLES.TYPE */
- ulint n_cols); /*!< in: SYS_TABLES.N_COLS */
-/********************************************************************//**
-Determine the file format from dict_table_t::flags
-The low order bit will be zero for REDUNDANT and 1 for COMPACT. For any
-other row_format, file_format is > 0 and DICT_TF_COMPACT will also be set.
-@return file format version */
-UNIV_INLINE
-rec_format_t
-dict_tf_get_rec_format(
-/*===================*/
- ulint flags); /*!< in: dict_table_t::flags */
+ dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Copies types of columns contained in table to tuple and sets all
fields of the tuple to the SQL NULL value. This function should
@@ -854,18 +963,20 @@ void
dict_table_copy_types(
/*==================*/
dtuple_t* tuple, /*!< in/out: data tuple */
- const dict_table_t* table); /*!< in: table */
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull));
/********************************************************************
Wait until all the background threads of the given table have exited, i.e.,
bg_threads == 0. Note: bg_threads_mutex must be reserved when
calling this. */
-
+UNIV_INTERN
void
dict_table_wait_for_bg_threads_to_exit(
/*===================================*/
dict_table_t* table, /* in: table */
- ulint delay); /* in: time in microseconds to wait between
+ ulint delay) /* in: time in microseconds to wait between
checks of bg_threads. */
+ __attribute__((nonnull));
/**********************************************************************//**
Looks for an index with the given id. NOTE that we do not reserve
the dictionary mutex: this function is for emergency purposes like
@@ -875,7 +986,8 @@ UNIV_INTERN
dict_index_t*
dict_index_find_on_id_low(
/*======================*/
- index_id_t id); /*!< in: index id */
+ index_id_t id) /*!< in: index id */
+ __attribute__((warn_unused_result));
/**********************************************************************//**
Make room in the table cache by evicting an unused table. The unused table
should not be part of FK relationship and currently not used in any user
@@ -891,16 +1003,17 @@ dict_make_room_in_cache(
Adds an index to the dictionary cache.
@return DB_SUCCESS, DB_TOO_BIG_RECORD, or DB_CORRUPTION */
UNIV_INTERN
-ulint
+dberr_t
dict_index_add_to_cache(
/*====================*/
dict_table_t* table, /*!< in: table on which the index is */
dict_index_t* index, /*!< in, own: index; NOTE! The index memory
object is freed in this function! */
ulint page_no,/*!< in: root page number of the index */
- ibool strict);/*!< in: TRUE=refuse to create the index
+ ibool strict) /*!< in: TRUE=refuse to create the index
if records could be too big to fit in
an B-tree page */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Removes an index from the dictionary cache. */
UNIV_INTERN
@@ -908,8 +1021,9 @@ void
dict_index_remove_from_cache(
/*=========================*/
dict_table_t* table, /*!< in/out: table */
- dict_index_t* index); /*!< in, own: index */
-
+ dict_index_t* index) /*!< in, own: index */
+ __attribute__((nonnull));
+#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
Gets the number of fields in the internal representation of an index,
including fields added by the dictionary system.
@@ -918,9 +1032,10 @@ UNIV_INLINE
ulint
dict_index_get_n_fields(
/*====================*/
- const dict_index_t* index); /*!< in: an internal
+ const dict_index_t* index) /*!< in: an internal
representation of index (in
the dictionary cache) */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Gets the number of fields in the internal representation of an index
that uniquely determine the position of an index entry in the index, if
@@ -931,8 +1046,9 @@ UNIV_INLINE
ulint
dict_index_get_n_unique(
/*====================*/
- const dict_index_t* index); /*!< in: an internal representation
+ const dict_index_t* index) /*!< in: an internal representation
of index (in the dictionary cache) */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Gets the number of fields in the internal representation of an index
which uniquely determine the position of an index entry in the index, if
@@ -942,8 +1058,9 @@ UNIV_INLINE
ulint
dict_index_get_n_unique_in_tree(
/*============================*/
- const dict_index_t* index); /*!< in: an internal representation
+ const dict_index_t* index) /*!< in: an internal representation
of index (in the dictionary cache) */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Gets the number of user-defined ordering fields in the index. In the internal
representation we add the row id to the ordering fields to make all indexes
@@ -954,8 +1071,9 @@ UNIV_INLINE
ulint
dict_index_get_n_ordering_defined_by_user(
/*======================================*/
- const dict_index_t* index); /*!< in: an internal representation
+ const dict_index_t* index) /*!< in: an internal representation
of index (in the dictionary cache) */
+ __attribute__((nonnull, warn_unused_result));
#ifdef UNIV_DEBUG
/********************************************************************//**
Gets the nth field of an index.
@@ -965,7 +1083,8 @@ dict_field_t*
dict_index_get_nth_field(
/*=====================*/
const dict_index_t* index, /*!< in: index */
- ulint pos); /*!< in: position of field */
+ ulint pos) /*!< in: position of field */
+ __attribute__((nonnull, warn_unused_result));
#else /* UNIV_DEBUG */
# define dict_index_get_nth_field(index, pos) ((index)->fields + (pos))
#endif /* UNIV_DEBUG */
@@ -977,7 +1096,8 @@ const dict_col_t*
dict_index_get_nth_col(
/*===================*/
const dict_index_t* index, /*!< in: index */
- ulint pos); /*!< in: position of the field */
+ ulint pos) /*!< in: position of the field */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Gets the column number of the nth field in an index.
@return column number */
@@ -986,7 +1106,8 @@ ulint
dict_index_get_nth_col_no(
/*======================*/
const dict_index_t* index, /*!< in: index */
- ulint pos); /*!< in: position of the field */
+ ulint pos) /*!< in: position of the field */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Looks for column n in an index.
@return position in internal representation of the index;
@@ -996,7 +1117,8 @@ ulint
dict_index_get_nth_col_pos(
/*=======================*/
const dict_index_t* index, /*!< in: index */
- ulint n); /*!< in: column number */
+ ulint n) /*!< in: column number */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Looks for column n in an index.
@return position in internal representation of the index;
@@ -1007,8 +1129,9 @@ dict_index_get_nth_col_or_prefix_pos(
/*=================================*/
const dict_index_t* index, /*!< in: index */
ulint n, /*!< in: column number */
- ibool inc_prefix); /*!< in: TRUE=consider
+ ibool inc_prefix) /*!< in: TRUE=consider
column prefixes too */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Returns TRUE if the index contains a column or a prefix of that column.
@return TRUE if contains the column or its prefix */
@@ -1017,7 +1140,8 @@ ibool
dict_index_contains_col_or_prefix(
/*==============================*/
const dict_index_t* index, /*!< in: index */
- ulint n); /*!< in: column number */
+ ulint n) /*!< in: column number */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Looks for a matching field in an index. The column has to be the same. The
column in index must be complete, or must contain a prefix longer than the
@@ -1031,7 +1155,8 @@ dict_index_get_nth_field_pos(
/*=========================*/
const dict_index_t* index, /*!< in: index from which to search */
const dict_index_t* index2, /*!< in: index */
- ulint n); /*!< in: field number in index2 */
+ ulint n) /*!< in: field number in index2 */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Looks for column n position in the clustered index.
@return position in internal representation of the clustered index */
@@ -1040,7 +1165,8 @@ ulint
dict_table_get_nth_col_pos(
/*=======================*/
const dict_table_t* table, /*!< in: table */
- ulint n); /*!< in: column number */
+ ulint n) /*!< in: column number */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Returns the position of a system column in an index.
@return position, ULINT_UNDEFINED if not contained */
@@ -1049,7 +1175,8 @@ ulint
dict_index_get_sys_col_pos(
/*=======================*/
const dict_index_t* index, /*!< in: index */
- ulint type); /*!< in: DATA_ROW_ID, ... */
+ ulint type) /*!< in: DATA_ROW_ID, ... */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Adds a column to index. */
UNIV_INTERN
@@ -1059,7 +1186,8 @@ dict_index_add_col(
dict_index_t* index, /*!< in/out: index */
const dict_table_t* table, /*!< in: table */
dict_col_t* col, /*!< in: column */
- ulint prefix_len); /*!< in: column prefix length */
+ ulint prefix_len) /*!< in: column prefix length */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
/*******************************************************************//**
Copies types of fields contained in index to tuple. */
@@ -1069,8 +1197,9 @@ dict_index_copy_types(
/*==================*/
dtuple_t* tuple, /*!< in/out: data tuple */
const dict_index_t* index, /*!< in: index */
- ulint n_fields); /*!< in: number of
+ ulint n_fields) /*!< in: number of
field types to copy */
+ __attribute__((nonnull));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************************//**
Gets the field column.
@@ -1079,7 +1208,8 @@ UNIV_INLINE
const dict_col_t*
dict_field_get_col(
/*===============*/
- const dict_field_t* field); /*!< in: index field */
+ const dict_field_t* field) /*!< in: index field */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/**********************************************************************//**
Returns an index object if it is found in the dictionary cache.
@@ -1089,7 +1219,8 @@ UNIV_INTERN
dict_index_t*
dict_index_get_if_in_cache_low(
/*===========================*/
- index_id_t index_id); /*!< in: index id */
+ index_id_t index_id) /*!< in: index id */
+ __attribute__((warn_unused_result));
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/**********************************************************************//**
Returns an index object if it is found in the dictionary cache.
@@ -1098,7 +1229,8 @@ UNIV_INTERN
dict_index_t*
dict_index_get_if_in_cache(
/*=======================*/
- index_id_t index_id); /*!< in: index id */
+ index_id_t index_id) /*!< in: index id */
+ __attribute__((warn_unused_result));
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
#ifdef UNIV_DEBUG
/**********************************************************************//**
@@ -1110,7 +1242,17 @@ ibool
dict_index_check_search_tuple(
/*==========================*/
const dict_index_t* index, /*!< in: index tree */
- const dtuple_t* tuple); /*!< in: tuple used in a search */
+ const dtuple_t* tuple) /*!< in: tuple used in a search */
+ __attribute__((nonnull, warn_unused_result));
+/** Whether and when to allow temporary index names */
+enum check_name {
+ /** Require all indexes to be complete. */
+ CHECK_ALL_COMPLETE,
+ /** Allow aborted online index creation. */
+ CHECK_ABORTED_OK,
+ /** Allow partial indexes to exist. */
+ CHECK_PARTIAL_OK
+};
/**********************************************************************//**
Check for duplicate index entries in a table [using the index name] */
UNIV_INTERN
@@ -1119,8 +1261,9 @@ dict_table_check_for_dup_indexes(
/*=============================*/
const dict_table_t* table, /*!< in: Check for dup indexes
in this table */
- ibool tmp_ok);/*!< in: TRUE=allow temporary
- index names */
+ enum check_name check) /*!< in: whether and when to allow
+ temporary index names */
+ __attribute__((nonnull));
#endif /* UNIV_DEBUG */
/**********************************************************************//**
Builds a node pointer out of a physical record and a page number.
@@ -1136,8 +1279,9 @@ dict_index_build_node_ptr(
pointer */
mem_heap_t* heap, /*!< in: memory heap where pointer
created */
- ulint level); /*!< in: level of rec in tree:
+ ulint level) /*!< in: level of rec in tree:
0 means leaf level */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Copies an initial segment of a physical record, long enough to specify an
index entry uniquely.
@@ -1152,7 +1296,8 @@ dict_index_copy_rec_order_prefix(
ulint* n_fields,/*!< out: number of fields copied */
byte** buf, /*!< in/out: memory buffer for the
copied prefix, or NULL */
- ulint* buf_size);/*!< in/out: buffer size */
+ ulint* buf_size)/*!< in/out: buffer size */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Builds a typed data tuple out of a physical record.
@return own: data tuple */
@@ -1163,7 +1308,8 @@ dict_index_build_data_tuple(
dict_index_t* index, /*!< in: index */
rec_t* rec, /*!< in: record for which to build data tuple */
ulint n_fields,/*!< in: number of data fields */
- mem_heap_t* heap); /*!< in: memory heap where tuple created */
+ mem_heap_t* heap) /*!< in: memory heap where tuple created */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the space id of the root of the index tree.
@return space id */
@@ -1171,7 +1317,8 @@ UNIV_INLINE
ulint
dict_index_get_space(
/*=================*/
- const dict_index_t* index); /*!< in: index */
+ const dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Sets the space id of the root of the index tree. */
UNIV_INLINE
@@ -1179,7 +1326,8 @@ void
dict_index_set_space(
/*=================*/
dict_index_t* index, /*!< in/out: index */
- ulint space); /*!< in: space id */
+ ulint space) /*!< in: space id */
+ __attribute__((nonnull));
/*********************************************************************//**
Gets the page number of the root of the index tree.
@return page number */
@@ -1187,7 +1335,8 @@ UNIV_INLINE
ulint
dict_index_get_page(
/*================*/
- const dict_index_t* tree); /*!< in: index */
+ const dict_index_t* tree) /*!< in: index */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Gets the read-write lock of the index tree.
@return read-write lock */
@@ -1195,7 +1344,8 @@ UNIV_INLINE
rw_lock_t*
dict_index_get_lock(
/*================*/
- dict_index_t* index); /*!< in: index */
+ dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Returns free space reserved for future updates of records. This is
relevant only in the case of many consecutive inserts, as updates
@@ -1205,13 +1355,48 @@ UNIV_INLINE
ulint
dict_index_get_space_reserve(void);
/*==============================*/
+
+/* Online index creation @{ */
+/********************************************************************//**
+Gets the status of online index creation.
+@return the status */
+UNIV_INLINE
+enum online_index_status
+dict_index_get_online_status(
+/*=========================*/
+ const dict_index_t* index) /*!< in: secondary index */
+ __attribute__((nonnull, warn_unused_result));
+/********************************************************************//**
+Sets the status of online index creation. */
+UNIV_INLINE
+void
+dict_index_set_online_status(
+/*=========================*/
+ dict_index_t* index, /*!< in/out: index */
+ enum online_index_status status) /*!< in: status */
+ __attribute__((nonnull));
+/********************************************************************//**
+Determines if a secondary index is being or has been created online,
+or if the table is being rebuilt online, allowing concurrent modifications
+to the table.
+@retval true if the index is being or has been built online, or
+if this is a clustered index and the table is being or has been rebuilt online
+@retval false if the index has been created or the table has been
+rebuilt completely */
+UNIV_INLINE
+bool
+dict_index_is_online_ddl(
+/*=====================*/
+ const dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Calculates the minimum record length in an index. */
UNIV_INTERN
ulint
dict_index_calc_min_rec_len(
/*========================*/
- const dict_index_t* index); /*!< in: index */
+ const dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Reserves the dictionary system mutex for MySQL. */
UNIV_INTERN
@@ -1233,8 +1418,9 @@ void
dict_table_stats_lock(
/*==================*/
const dict_table_t* table, /*!< in: table */
- ulint latch_mode); /*!< in: RW_S_LATCH or
+ ulint latch_mode) /*!< in: RW_S_LATCH or
RW_X_LATCH */
+ __attribute__((nonnull));
/**********************************************************************//**
Unlock the latch that has been locked by dict_table_stats_lock() */
UNIV_INTERN
@@ -1242,8 +1428,9 @@ void
dict_table_stats_unlock(
/*====================*/
const dict_table_t* table, /*!< in: table */
- ulint latch_mode); /*!< in: RW_S_LATCH or
+ ulint latch_mode) /*!< in: RW_S_LATCH or
RW_X_LATCH */
+ __attribute__((nonnull));
/********************************************************************//**
Checks if the database name in two table names is the same.
@return TRUE if same db name */
@@ -1253,8 +1440,9 @@ dict_tables_have_same_db(
/*=====================*/
const char* name1, /*!< in: table name in the form
dbname '/' tablename */
- const char* name2); /*!< in: table name in the form
+ const char* name2) /*!< in: table name in the form
dbname '/' tablename */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Removes an index from the cache */
UNIV_INTERN
@@ -1262,7 +1450,8 @@ void
dict_index_remove_from_cache(
/*=========================*/
dict_table_t* table, /*!< in/out: table */
- dict_index_t* index); /*!< in, own: index */
+ dict_index_t* index) /*!< in, own: index */
+ __attribute__((nonnull));
/**********************************************************************//**
Get index by name
@return index, NULL if does not exist */
@@ -1271,7 +1460,8 @@ dict_index_t*
dict_table_get_index_on_name(
/*=========================*/
dict_table_t* table, /*!< in: table */
- const char* name); /*!< in: name of the index to find */
+ const char* name) /*!< in: name of the index to find */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
In case there is more than one index with the same name return the index
with the min(id).
@@ -1281,7 +1471,8 @@ dict_index_t*
dict_table_get_index_on_name_and_min_id(
/*====================================*/
dict_table_t* table, /*!< in: table */
- const char* name); /*!< in: name of the index to find */
+ const char* name) /*!< in: name of the index to find */
+ __attribute__((nonnull, warn_unused_result));
/***************************************************************
Check whether a column exists in an FTS index. */
UNIV_INLINE
@@ -1291,32 +1482,42 @@ dict_table_is_fts_column(
/* out: ULINT_UNDEFINED if no match else
the offset within the vector */
ib_vector_t* indexes,/* in: vector containing only FTS indexes */
- ulint col_no);/* in: col number to search for */
+ ulint col_no) /* in: col number to search for */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Move a table to the non LRU end of the LRU list. */
UNIV_INTERN
void
dict_table_move_from_lru_to_non_lru(
/*================================*/
- dict_table_t* table); /*!< in: table to move from LRU to non-LRU */
+ dict_table_t* table) /*!< in: table to move from LRU to non-LRU */
+ __attribute__((nonnull));
/**********************************************************************//**
Move a table to the LRU list from the non-LRU list. */
UNIV_INTERN
void
dict_table_move_from_non_lru_to_lru(
/*================================*/
- dict_table_t* table); /*!< in: table to move from non-LRU to LRU */
+ dict_table_t* table) /*!< in: table to move from non-LRU to LRU */
+ __attribute__((nonnull));
/**********************************************************************//**
Move to the most recently used segment of the LRU list. */
UNIV_INTERN
void
dict_move_to_mru(
/*=============*/
- dict_table_t* table); /*!< in: table to move to MRU */
+ dict_table_t* table) /*!< in: table to move to MRU */
+ __attribute__((nonnull));
+
+/** Maximum number of columns in a foreign key constraint. Please Note MySQL
+has a much lower limit on the number of columns allowed in a foreign key
+constraint */
+#define MAX_NUM_FK_COLUMNS 500
+
/* Buffers for storing detailed information about the latest foreign key
and unique key errors */
extern FILE* dict_foreign_err_file;
-extern mutex_t dict_foreign_err_mutex; /* mutex protecting the buffers */
+extern ib_mutex_t dict_foreign_err_mutex; /* mutex protecting the buffers */
/** the dictionary system */
extern dict_sys_t* dict_sys;
@@ -1324,8 +1525,8 @@ extern dict_sys_t* dict_sys;
extern rw_lock_t dict_operation_lock;
/* Dictionary system struct */
-struct dict_sys_struct{
- mutex_t mutex; /*!< mutex protecting the data
+struct dict_sys_t{
+ ib_mutex_t mutex; /*!< mutex protecting the data
dictionary; protects also the
disk-based dictionary system tables;
this mutex serializes CREATE TABLE
@@ -1376,7 +1577,7 @@ dict_ind_init(void);
/* This struct is used to specify the name and type that a column must
have when checking a table's schema. */
-struct dict_col_meta_struct {
+struct dict_col_meta_t {
const char* name; /* column name */
ulint mtype; /* required column main type */
ulint prtype_mask; /* required column precise type mask;
@@ -1385,12 +1586,11 @@ struct dict_col_meta_struct {
in the column's prtype */
ulint len; /* required column length */
};
-typedef struct dict_col_meta_struct dict_col_meta_t;
/* This struct is used for checking whether a given table exists and
whether it has a predefined schema (number of columns and columns names
and types) */
-struct dict_table_schema_struct {
+struct dict_table_schema_t {
const char* table_name; /* the name of the table whose
structure we are checking */
ulint n_cols; /* the number of columns the
@@ -1398,8 +1598,15 @@ struct dict_table_schema_struct {
dict_col_meta_t* columns; /* metadata for the columns;
this array has n_cols
elements */
+ ulint n_foreign; /* number of foreign keys this
+ table has, pointing to other
+ tables (where this table is
+ FK child) */
+ ulint n_referenced; /* number of foreign keys other
+ tables have, pointing to this
+ table (where this table is
+ parent) */
};
-typedef struct dict_table_schema_struct dict_table_schema_t;
/* @} */
/*********************************************************************//**
@@ -1410,7 +1617,7 @@ The caller must own the dictionary mutex.
dict_table_schema_check() @{
@return DB_SUCCESS if the table exists and contains the necessary columns */
UNIV_INTERN
-enum db_err
+dberr_t
dict_table_schema_check(
/*====================*/
dict_table_schema_t* req_schema, /*!< in/out: required table
@@ -1419,9 +1626,27 @@ dict_table_schema_check(
message if != DB_SUCCESS and
!= DB_TABLE_NOT_FOUND is
returned */
- size_t errstr_sz); /*!< in: errstr size */
+ size_t errstr_sz) /*!< in: errstr size */
+ __attribute__((nonnull, warn_unused_result));
/* @} */
+/*********************************************************************//**
+Converts a database and table name from filesystem encoding
+(e.g. d@i1b/a@q1b@1Kc, same format as used in dict_table_t::name) in two
+strings in UTF8 encoding (e.g. dцb and aюbØc). The output buffers must be
+at least MAX_DB_UTF8_LEN and MAX_TABLE_UTF8_LEN bytes. */
+UNIV_INTERN
+void
+dict_fs2utf8(
+/*=========*/
+ const char* db_and_table, /*!< in: database and table names,
+ e.g. d@i1b/a@q1b@1Kc */
+ char* db_utf8, /*!< out: database name, e.g. dцb */
+ size_t db_utf8_size, /*!< in: dbname_utf8 size */
+ char* table_utf8, /*!< out: table name, e.g. aюbØc */
+ size_t table_utf8_size)/*!< in: table_utf8 size */
+ __attribute__((nonnull));
+
/**********************************************************************//**
Closes the data dictionary module. */
UNIV_INTERN
@@ -1437,7 +1662,7 @@ ulint
dict_table_is_corrupted(
/*====================*/
const dict_table_t* table) /*!< in: table */
- __attribute__((nonnull, pure, warn_unused_result));
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************************//**
Check whether the index is corrupted.
@@ -1447,7 +1672,7 @@ ulint
dict_index_is_corrupted(
/*====================*/
const dict_index_t* index) /*!< in: index */
- __attribute__((nonnull, pure, warn_unused_result));
+ __attribute__((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/**********************************************************************//**
@@ -1457,7 +1682,9 @@ UNIV_INTERN
void
dict_set_corrupted(
/*===============*/
- dict_index_t* index) /*!< in/out: index */
+ dict_index_t* index, /*!< in/out: index */
+ trx_t* trx, /*!< in/out: transaction */
+ const char* ctx) /*!< in: context */
UNIV_COLD __attribute__((nonnull));
/**********************************************************************//**
@@ -1469,7 +1696,8 @@ void
dict_set_corrupted_index_cache_only(
/*================================*/
dict_index_t* index, /*!< in/out: index */
- dict_table_t* table); /*!< in/out: table */
+ dict_table_t* table) /*!< in/out: table */
+ __attribute__((nonnull));
/**********************************************************************//**
Flags a table with specified space_id corrupted in the table dictionary
@@ -1481,6 +1709,76 @@ dict_set_corrupted_by_space(
/*========================*/
ulint space_id); /*!< in: space ID */
+/********************************************************************//**
+Validate the table flags.
+@return true if valid. */
+UNIV_INLINE
+bool
+dict_tf_is_valid(
+/*=============*/
+ ulint flags) /*!< in: table flags */
+ __attribute__((warn_unused_result));
+
+/********************************************************************//**
+Check if the tablespace for the table has been discarded.
+@return true if the tablespace has been discarded. */
+UNIV_INLINE
+bool
+dict_table_is_discarded(
+/*====================*/
+ const dict_table_t* table) /*!< in: table to check */
+ __attribute__((nonnull, pure, warn_unused_result));
+
+/********************************************************************//**
+Check if it is a temporary table.
+@return true if temporary table flag is set. */
+UNIV_INLINE
+bool
+dict_table_is_temporary(
+/*====================*/
+ const dict_table_t* table) /*!< in: table to check */
+ __attribute__((nonnull, pure, warn_unused_result));
+
+#ifndef UNIV_HOTBACKUP
+/*********************************************************************//**
+This function should be called whenever a page is successfully
+compressed. Updates the compression padding information. */
+UNIV_INTERN
+void
+dict_index_zip_success(
+/*===================*/
+ dict_index_t* index) /*!< in/out: index to be updated. */
+ __attribute__((nonnull));
+/*********************************************************************//**
+This function should be called whenever a page compression attempt
+fails. Updates the compression padding information. */
+UNIV_INTERN
+void
+dict_index_zip_failure(
+/*===================*/
+ dict_index_t* index) /*!< in/out: index to be updated. */
+ __attribute__((nonnull));
+/*********************************************************************//**
+Return the optimal page size, for which page will likely compress.
+@return page size beyond which page may not compress*/
+UNIV_INTERN
+ulint
+dict_index_zip_pad_optimal_page_size(
+/*=================================*/
+ dict_index_t* index) /*!< in: index for which page size
+ is requested */
+ __attribute__((nonnull, warn_unused_result));
+/*************************************************************//**
+Convert table flag to row format string.
+@return row format name */
+UNIV_INTERN
+const char*
+dict_tf_to_row_format_string(
+/*=========================*/
+ ulint table_flag); /*!< in: row format setting */
+
+#endif /* !UNIV_HOTBACKUP */
+
#ifndef UNIV_NONINL
#include "dict0dict.ic"
#endif
diff --git a/storage/innobase/include/dict0dict.ic b/storage/innobase/include/dict0dict.ic
index f6585ea8205..83953c9325a 100644
--- a/storage/innobase/include/dict0dict.ic
+++ b/storage/innobase/include/dict0dict.ic
@@ -29,6 +29,7 @@ Created 1/8/1996 Heikki Tuuri
#include "rem0types.h"
#include "fsp0fsp.h"
#include "srv0srv.h"
+#include "sync0rw.h" /* RW_S_LATCH */
/*********************************************************************//**
Gets the minimum number of bytes per character.
@@ -223,6 +224,22 @@ dict_table_get_first_index(
}
/********************************************************************//**
+Gets the last index on the table.
+@return index, NULL if none exists */
+UNIV_INLINE
+dict_index_t*
+dict_table_get_last_index(
+/*=======================*/
+ const dict_table_t* table) /*!< in: table */
+{
+ ut_ad(table);
+ ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
+
+ return(UT_LIST_GET_LAST((const_cast<dict_table_t*>(table))
+ ->indexes));
+}
+
+/********************************************************************//**
Gets the next index on the table.
@return index, NULL if none left */
UNIV_INLINE
@@ -365,6 +382,56 @@ dict_table_get_n_cols(
return(table->n_cols);
}
+/********************************************************************//**
+Gets the approximately estimated number of rows in the table.
+@return estimated number of rows */
+UNIV_INLINE
+ib_uint64_t
+dict_table_get_n_rows(
+/*==================*/
+ const dict_table_t* table) /*!< in: table */
+{
+ ut_ad(table->stat_initialized);
+
+ return(table->stat_n_rows);
+}
+
+/********************************************************************//**
+Increment the number of rows in the table by one.
+Notice that this operation is not protected by any latch, the number is
+approximate. */
+UNIV_INLINE
+void
+dict_table_n_rows_inc(
+/*==================*/
+ dict_table_t* table) /*!< in/out: table */
+{
+ if (table->stat_initialized) {
+ ib_uint64_t n_rows = table->stat_n_rows;
+ if (n_rows < 0xFFFFFFFFFFFFFFFFULL) {
+ table->stat_n_rows = n_rows + 1;
+ }
+ }
+}
+
+/********************************************************************//**
+Decrement the number of rows in the table by one.
+Notice that this operation is not protected by any latch, the number is
+approximate. */
+UNIV_INLINE
+void
+dict_table_n_rows_dec(
+/*==================*/
+ dict_table_t* table) /*!< in/out: table */
+{
+ if (table->stat_initialized) {
+ ib_uint64_t n_rows = table->stat_n_rows;
+ if (n_rows > 0) {
+ table->stat_n_rows = n_rows - 1;
+ }
+ }
+}
+
#ifdef UNIV_DEBUG
/********************************************************************//**
Gets the nth column of a table.
@@ -458,12 +525,11 @@ dict_table_has_fts_index(
}
/********************************************************************//**
-Validate and return the table flags.
-@return Same as input after validating it as dict_table_t::flags.
-If there is an error, trigger assertion failure. */
+Validate the table flags.
+@return true if valid. */
UNIV_INLINE
-ulint
-dict_tf_validate(
+bool
+dict_tf_is_valid(
/*=============*/
ulint flags) /*!< in: table flags */
{
@@ -473,31 +539,43 @@ dict_tf_validate(
ulint unused = DICT_TF_GET_UNUSED(flags);
/* Make sure there are no bits that we do not know about. */
- ut_a(unused == 0);
+ if (unused != 0) {
- if (atomic_blobs) {
+ return(false);
+
+ } else if (atomic_blobs) {
/* Barracuda row formats COMPRESSED and DYNAMIC build on
the page structure introduced for the COMPACT row format
by allowing keys in secondary indexes to be made from
data stored off-page in the clustered index. */
- ut_a(compact);
- } else {
+
+ if (!compact) {
+ return(false);
+ }
+
+ } else if (zip_ssize) {
+
/* Antelope does not support COMPRESSED row format. */
- ut_a(!zip_ssize);
+ return(false);
}
if (zip_ssize) {
+
/* COMPRESSED row format must have compact and atomic_blobs
- bits set. */
- ut_a(compact);
- ut_a(atomic_blobs);
+ bits set and validate the number is within allowed range. */
- /* Validate the number is within allowed range. */
- ut_a(zip_ssize <= PAGE_ZIP_SSIZE_MAX);
+ if (!compact
+ || !atomic_blobs
+ || zip_ssize > PAGE_ZIP_SSIZE_MAX) {
+
+ return(false);
+ }
}
- /* Return the flags sent if we did not crash. */
- return(flags);
+ /* CREATE TABLE ... DATA DIRECTORY is supported for any row format,
+ so the DATA_DIR flag is compatible with all other table flags. */
+
+ return(true);
}
/********************************************************************//**
@@ -517,9 +595,7 @@ dict_sys_tables_type_validate(
ulint atomic_blobs = DICT_TF_HAS_ATOMIC_BLOBS(type);
ulint unused = DICT_TF_GET_UNUSED(type);
- /* If the format is UNIV_FORMAT_A, table->flags == 0, but
- SYS_TABLES.TYPE == 1, which is defined as SYS_TABLE_TYPE_ANTELOPE.
- The low order bit of SYS_TABLES.TYPE is always set to 1.
+ /* The low order bit of SYS_TABLES.TYPE is always set to 1.
If the format is UNIV_FORMAT_B or higher, this field is the same
as dict_table_t::flags. Zero is not allowed here. */
if (!low_order_bit) {
@@ -527,12 +603,9 @@ dict_sys_tables_type_validate(
}
if (redundant) {
- /* This is Redundant row format, only the first bit
- should be set in SYS_TABLES.TYPE */
- if (type != SYS_TABLE_TYPE_ANTELOPE) {
+ if (zip_ssize || atomic_blobs) {
return(ULINT_UNDEFINED);
}
- return(DICT_TF_REDUNDANT);
}
/* Make sure there are no bits that we do not know about. */
@@ -569,6 +642,11 @@ dict_sys_tables_type_validate(
}
}
+ /* There is nothing to validate for the data_dir field.
+ CREATE TABLE ... DATA DIRECTORY is supported for any row
+ format, so the DATA_DIR flag is compatible with any other
+ table flags. However, it is not used with TEMPORARY tables.*/
+
/* Return the validated SYS_TABLES.TYPE. */
return(type);
}
@@ -584,7 +662,7 @@ dict_tf_get_rec_format(
/*===================*/
ulint flags) /*!< in: dict_table_t::flags */
{
- dict_tf_validate(flags);
+ ut_a(dict_tf_is_valid(flags));
if (!DICT_TF_GET_COMPACT(flags)) {
return(REC_FORMAT_REDUNDANT);
@@ -640,7 +718,8 @@ dict_tf_set(
/*========*/
ulint* flags, /*!< in/out: table flags */
rec_format_t format, /*!< in: file format */
- ulint zip_ssize) /*!< in: zip shift size */
+ ulint zip_ssize, /*!< in: zip shift size */
+ bool use_data_dir) /*!< in: table uses DATA DIRECTORY */
{
switch (format) {
case REC_FORMAT_REDUNDANT:
@@ -662,6 +741,10 @@ dict_tf_set(
ut_ad(zip_ssize == 0);
break;
}
+
+ if (use_data_dir) {
+ *flags |= (1 << DICT_TF_POS_DATA_DIR);
+ }
}
/********************************************************************//**
@@ -679,15 +762,61 @@ UNIV_INLINE
ulint
dict_tf_to_fsp_flags(
/*=================*/
- ulint flags) /*!< in: dict_table_t::flags */
+ ulint table_flags) /*!< in: dict_table_t::flags */
{
+ ulint fsp_flags;
+
+ DBUG_EXECUTE_IF("dict_tf_to_fsp_flags_failure",
+ return(ULINT_UNDEFINED););
+
/* Adjust bit zero. */
- flags = (flags == DICT_TF_COMPACT) ? 0 : flags;
+ fsp_flags = DICT_TF_HAS_ATOMIC_BLOBS(table_flags) ? 1 : 0;
+
+ /* ZIP_SSIZE and ATOMIC_BLOBS are at the same position. */
+ fsp_flags |= table_flags & DICT_TF_MASK_ZIP_SSIZE;
+ fsp_flags |= table_flags & DICT_TF_MASK_ATOMIC_BLOBS;
/* In addition, tablespace flags also contain the page size. */
- flags = fsp_flags_set_page_size(flags, UNIV_PAGE_SIZE);
+ fsp_flags |= fsp_flags_set_page_size(fsp_flags, UNIV_PAGE_SIZE);
+
+ /* The DATA_DIR flag is in a different position in fsp_flag */
+ fsp_flags |= DICT_TF_HAS_DATA_DIR(table_flags)
+ ? FSP_FLAGS_MASK_DATA_DIR : 0;
- return(fsp_flags_validate(flags));
+ ut_a(fsp_flags_is_valid(fsp_flags));
+
+ return(fsp_flags);
+}
+
+/********************************************************************//**
+Convert a 32 bit integer from SYS_TABLES.TYPE to dict_table_t::flags
+The following chart shows the translation of the low order bit.
+Other bits are the same.
+========================= Low order bit ==========================
+ | REDUNDANT | COMPACT | COMPRESSED and DYNAMIC
+SYS_TABLES.TYPE | 1 | 1 | 1
+dict_table_t::flags | 0 | 1 | 1
+==================================================================
+@return ulint containing SYS_TABLES.TYPE */
+UNIV_INLINE
+ulint
+dict_sys_tables_type_to_tf(
+/*=======================*/
+ ulint type, /*!< in: SYS_TABLES.TYPE field */
+ ulint n_cols) /*!< in: SYS_TABLES.N_COLS field */
+{
+ ulint flags;
+ ulint redundant = !(n_cols & DICT_N_COLS_COMPACT);
+
+ /* Adjust bit zero. */
+ flags = redundant ? 0 : 1;
+
+ /* ZIP_SSIZE, ATOMIC_BLOBS & DATA_DIR are the same. */
+ flags |= type & (DICT_TF_MASK_ZIP_SSIZE
+ | DICT_TF_MASK_ATOMIC_BLOBS
+ | DICT_TF_MASK_DATA_DIR);
+
+ return(flags);
}
/********************************************************************//**
@@ -706,13 +835,19 @@ dict_tf_to_sys_tables_type(
/*=======================*/
ulint flags) /*!< in: dict_table_t::flags */
{
- if (!DICT_TF_HAS_ATOMIC_BLOBS(flags)) {
- ut_a(flags == DICT_TF_REDUNDANT
- || flags == DICT_TF_COMPACT);
- return(SYS_TABLE_TYPE_ANTELOPE);
- }
+ ulint type;
+
+ ut_a(dict_tf_is_valid(flags));
+
+ /* Adjust bit zero. It is always 1 in SYS_TABLES.TYPE */
+ type = 1;
+
+ /* ZIP_SSIZE, ATOMIC_BLOBS & DATA_DIR are the same. */
+ type |= flags & (DICT_TF_MASK_ZIP_SSIZE
+ | DICT_TF_MASK_ATOMIC_BLOBS
+ | DICT_TF_MASK_DATA_DIR);
- return(dict_tf_validate(flags));
+ return(type);
}
/********************************************************************//**
@@ -1064,6 +1199,103 @@ dict_index_get_space_reserve(void)
return(UNIV_PAGE_SIZE / 16);
}
+/********************************************************************//**
+Gets the status of online index creation.
+@return the status */
+UNIV_INLINE
+enum online_index_status
+dict_index_get_online_status(
+/*=========================*/
+ const dict_index_t* index) /*!< in: secondary index */
+{
+ enum online_index_status status;
+
+ status = (enum online_index_status) index->online_status;
+
+ /* Without the index->lock protection, the online
+ status can change from ONLINE_INDEX_CREATION to
+ ONLINE_INDEX_COMPLETE (or ONLINE_INDEX_ABORTED) in
+ row_log_apply() once log application is done. So to make
+ sure the status is ONLINE_INDEX_CREATION or ONLINE_INDEX_COMPLETE
+ you should always do the recheck after acquiring index->lock */
+
+#ifdef UNIV_DEBUG
+ switch (status) {
+ case ONLINE_INDEX_COMPLETE:
+ case ONLINE_INDEX_CREATION:
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ return(status);
+ }
+ ut_error;
+#endif /* UNIV_DEBUG */
+ return(status);
+}
+
+/********************************************************************//**
+Sets the status of online index creation. */
+UNIV_INLINE
+void
+dict_index_set_online_status(
+/*=========================*/
+ dict_index_t* index, /*!< in/out: index */
+ enum online_index_status status) /*!< in: status */
+{
+ ut_ad(!(index->type & DICT_FTS));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+#ifdef UNIV_DEBUG
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_COMPLETE:
+ case ONLINE_INDEX_CREATION:
+ break;
+ case ONLINE_INDEX_ABORTED:
+ ut_ad(status == ONLINE_INDEX_ABORTED_DROPPED);
+ break;
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ ut_error;
+ }
+#endif /* UNIV_DEBUG */
+
+ index->online_status = status;
+ ut_ad(dict_index_get_online_status(index) == status);
+}
+
+/********************************************************************//**
+Determines if a secondary index is being or has been created online,
+or if the table is being rebuilt online, allowing concurrent modifications
+to the table.
+@retval true if the index is being or has been built online, or
+if this is a clustered index and the table is being or has been rebuilt online
+@retval false if the index has been created or the table has been
+rebuilt completely */
+UNIV_INLINE
+bool
+dict_index_is_online_ddl(
+/*=====================*/
+ const dict_index_t* index) /*!< in: index */
+{
+#ifdef UNIV_DEBUG
+ if (dict_index_is_clust(index)) {
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_CREATION:
+ return(true);
+ case ONLINE_INDEX_COMPLETE:
+ return(false);
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ break;
+ }
+ ut_ad(0);
+ return(false);
+ }
+#endif /* UNIV_DEBUG */
+
+ return(UNIV_UNLIKELY(dict_index_get_online_status(index)
+ != ONLINE_INDEX_COMPLETE));
+}
+
/**********************************************************************//**
Check whether a column exists in an FTS index.
@return ULINT_UNDEFINED if no match else the offset within the vector */
@@ -1147,4 +1379,28 @@ dict_index_is_corrupted(
|| (index->table && index->table->corrupted));
}
+/********************************************************************//**
+Check if the tablespace for the table has been discarded.
+@return true if the tablespace has been discarded. */
+UNIV_INLINE
+bool
+dict_table_is_discarded(
+/*====================*/
+ const dict_table_t* table) /*!< in: table to check */
+{
+ return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_DISCARDED));
+}
+
+/********************************************************************//**
+Check if it is a temporary table.
+@return true if temporary table flag is set. */
+UNIV_INLINE
+bool
+dict_table_is_temporary(
+/*====================*/
+ const dict_table_t* table) /*!< in: table to check */
+{
+ return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY));
+}
+
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/innobase/include/dict0load.h b/storage/innobase/include/dict0load.h
index 13b9a121c1c..5991d58a686 100644
--- a/storage/innobase/include/dict0load.h
+++ b/storage/innobase/include/dict0load.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,38 +29,35 @@ Created 4/24/1996 Heikki Tuuri
#include "univ.i"
#include "dict0types.h"
+#include "trx0types.h"
#include "ut0byte.h"
#include "mem0mem.h"
#include "btr0types.h"
-/** enum that defines all 6 system table IDs */
-enum dict_system_table_id {
+/** enum that defines all system table IDs. @see SYSTEM_TABLE_NAME[] */
+enum dict_system_id_t {
SYS_TABLES = 0,
SYS_INDEXES,
SYS_COLUMNS,
SYS_FIELDS,
SYS_FOREIGN,
SYS_FOREIGN_COLS,
+ SYS_TABLESPACES,
+ SYS_DATAFILES,
/* This must be last item. Defines the number of system tables. */
SYS_NUM_SYSTEM_TABLES
};
-typedef enum dict_system_table_id dict_system_id_t;
-
/** Status bit for dict_process_sys_tables_rec_and_mtr_commit() */
-enum dict_table_info {
+enum dict_table_info_t {
DICT_TABLE_LOAD_FROM_RECORD = 0,/*!< Directly populate a dict_table_t
structure with information from
a SYS_TABLES record */
- DICT_TABLE_LOAD_FROM_CACHE = 1, /*!< Check first whether dict_table_t
+ DICT_TABLE_LOAD_FROM_CACHE = 1 /*!< Check first whether dict_table_t
is in the cache, if so, return it */
- DICT_TABLE_UPDATE_STATS = 2 /*!< whether to update statistics
- when loading SYS_TABLES information. */
};
-typedef enum dict_table_info dict_table_info_t;
-
/********************************************************************//**
In a crash recovery we already have all the tablespace objects created.
This function compares the space id information in the InnoDB data dictionary
@@ -157,6 +154,27 @@ dict_load_field_low(
for temporary storage */
const rec_t* rec); /*!< in: SYS_FIELDS record */
/********************************************************************//**
+Using the table->heap, copy the null-terminated filepath into
+table->data_dir_path and put a null byte before the extension.
+This allows SHOW CREATE TABLE to return the correct DATA DIRECTORY path.
+Make this data directory path only if it has not yet been saved. */
+UNIV_INTERN
+void
+dict_save_data_dir_path(
+/*====================*/
+ dict_table_t* table, /*!< in/out: table */
+ char* filepath); /*!< in: filepath of tablespace */
+/*****************************************************************//**
+Make sure the data_file_name is saved in dict_table_t if needed. Try to
+read it from the file dictionary first, then from SYS_DATAFILES. */
+UNIV_INTERN
+void
+dict_get_and_save_data_dir_path(
+/*============================*/
+ dict_table_t* table, /*!< in/out: table */
+ bool dict_mutex_own); /*!< in: true if dict_sys->mutex
+ is owned already */
+/********************************************************************//**
Loads a table definition and also all its index definitions, and also
the cluster definition if the table is a member in a cluster. Also loads
all foreign key constraints where the foreign key is in the table or where
@@ -199,14 +217,15 @@ cache already contains all constraints where the other relevant table is
already in the dictionary cache.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
dict_load_foreigns(
/*===============*/
const char* table_name, /*!< in: table name */
ibool check_recursive,/*!< in: Whether to check recursive
load of tables chained by FK */
- ibool check_charsets);/*!< in: TRUE=check charsets
+ ibool check_charsets) /*!< in: TRUE=check charsets
compatibility */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************//**
Prints to the standard output information on all tables found in the data
dictionary system table. */
@@ -324,6 +343,66 @@ dict_process_sys_foreign_col_rec(
const char** ref_col_name, /*!< out: referenced column name
in referenced table */
ulint* pos); /*!< out: column position */
+/********************************************************************//**
+This function parses a SYS_TABLESPACES record, extracts necessary
+information from the record and returns to caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_tablespaces(
+/*=========================*/
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ const rec_t* rec, /*!< in: current SYS_TABLESPACES rec */
+ ulint* space, /*!< out: pace id */
+ const char** name, /*!< out: tablespace name */
+ ulint* flags); /*!< out: tablespace flags */
+/********************************************************************//**
+This function parses a SYS_DATAFILES record, extracts necessary
+information from the record and returns to caller.
+@return error message, or NULL on success */
+UNIV_INTERN
+const char*
+dict_process_sys_datafiles(
+/*=======================*/
+ mem_heap_t* heap, /*!< in/out: heap memory */
+ const rec_t* rec, /*!< in: current SYS_DATAFILES rec */
+ ulint* space, /*!< out: pace id */
+ const char** path); /*!< out: datafile path */
+/********************************************************************//**
+Get the filepath for a spaceid from SYS_DATAFILES. This function provides
+a temporary heap which is used for the table lookup, but not for the path.
+The caller must free the memory for the path returned. This function can
+return NULL if the space ID is not found in SYS_DATAFILES, then the caller
+will assume that the ibd file is in the normal datadir.
+@return own: A copy of the first datafile found in SYS_DATAFILES.PATH for
+the given space ID. NULL if space ID is zero or not found. */
+UNIV_INTERN
+char*
+dict_get_first_path(
+/*================*/
+ ulint space, /*!< in: space id */
+ const char* name); /*!< in: tablespace name */
+/********************************************************************//**
+Update the record for space_id in SYS_TABLESPACES to this filepath.
+@return DB_SUCCESS if OK, dberr_t if the insert failed */
+UNIV_INTERN
+dberr_t
+dict_update_filepath(
+/*=================*/
+ ulint space_id, /*!< in: space id */
+ const char* filepath); /*!< in: filepath */
+/********************************************************************//**
+Insert records into SYS_TABLESPACES and SYS_DATAFILES.
+@return DB_SUCCESS if OK, dberr_t if the insert failed */
+UNIV_INTERN
+dberr_t
+dict_insert_tablespace_and_filepath(
+/*================================*/
+ ulint space, /*!< in: space id */
+ const char* name, /*!< in: talespace name */
+ const char* filepath, /*!< in: filepath */
+ ulint fsp_flags); /*!< in: tablespace flags */
+
#ifndef UNIV_NONINL
#include "dict0load.ic"
#endif
diff --git a/storage/innobase/include/dict0mem.h b/storage/innobase/include/dict0mem.h
index ea7e996dfa8..671f67eb1f8 100644
--- a/storage/innobase/include/dict0mem.h
+++ b/storage/innobase/include/dict0mem.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -30,6 +31,7 @@ Created 1/8/1996 Heikki Tuuri
#include "dict0types.h"
#include "data0type.h"
#include "mem0mem.h"
+#include "row0types.h"
#include "rem0types.h"
#include "btr0types.h"
#ifndef UNIV_HOTBACKUP
@@ -46,7 +48,7 @@ Created 1/8/1996 Heikki Tuuri
#include "fts0fts.h"
/* Forward declaration. */
-typedef struct ib_rbt_struct ib_rbt_t;
+struct ib_rbt_t;
/** Type flags of an index: OR'ing of the flags is allowed to define a
combination of types */
@@ -93,12 +95,9 @@ and SYS_TABLES.TYPE. Similar flags found in fil_space_t and FSP_SPACE_FLAGS
are described in fsp0fsp.h. */
/* @{ */
-/** SYS_TABLES.TYPE can be equal to 1 which means that the Row format
-is one of two Antelope row formats, Redundant or Compact. */
-#define SYS_TABLE_TYPE_ANTELOPE 1
-/** dict_table_t::flags can be equal to 0 if the row format = Redundant */
+/** dict_table_t::flags bit 0 is equal to 0 if the row format = Redundant */
#define DICT_TF_REDUNDANT 0 /*!< Redundant row format. */
-/** dict_table_t::flags can be equal to 1 if the row format = Compact */
+/** dict_table_t::flags bit 0 is equal to 1 if the row format = Compact */
#define DICT_TF_COMPACT 1 /*!< Compact row format. */
/** This bitmask is used in SYS_TABLES.N_COLS to set and test whether
@@ -115,10 +114,17 @@ Brracuda row formats store the whole blob or text field off-page atomically.
Secondary indexes are created from this external data using row_ext_t
to cache the BLOB prefixes. */
#define DICT_TF_WIDTH_ATOMIC_BLOBS 1
+/** If a table is created with the MYSQL option DATA DIRECTORY and
+innodb-file-per-table, an older engine will not be able to find that table.
+This flag prevents older engines from attempting to open the table and
+allows InnoDB to update_create_info() accordingly. */
+#define DICT_TF_WIDTH_DATA_DIR 1
+
/** Width of all the currently known table flags */
#define DICT_TF_BITS (DICT_TF_WIDTH_COMPACT \
+ DICT_TF_WIDTH_ZIP_SSIZE \
- + DICT_TF_WIDTH_ATOMIC_BLOBS)
+ + DICT_TF_WIDTH_ATOMIC_BLOBS \
+ + DICT_TF_WIDTH_DATA_DIR)
/** A mask of all the known/used bits in table flags */
#define DICT_TF_BIT_MASK (~(~0 << DICT_TF_BITS))
@@ -131,9 +137,12 @@ to cache the BLOB prefixes. */
/** Zero relative shift position of the ATOMIC_BLOBS field */
#define DICT_TF_POS_ATOMIC_BLOBS (DICT_TF_POS_ZIP_SSIZE \
+ DICT_TF_WIDTH_ZIP_SSIZE)
-/** Zero relative shift position of the start of the UNUSED bits */
-#define DICT_TF_POS_UNUSED (DICT_TF_POS_ATOMIC_BLOBS \
+/** Zero relative shift position of the DATA_DIR field */
+#define DICT_TF_POS_DATA_DIR (DICT_TF_POS_ATOMIC_BLOBS \
+ DICT_TF_WIDTH_ATOMIC_BLOBS)
+/** Zero relative shift position of the start of the UNUSED bits */
+#define DICT_TF_POS_UNUSED (DICT_TF_POS_DATA_DIR \
+ + DICT_TF_WIDTH_DATA_DIR)
/** Bit mask of the COMPACT field */
#define DICT_TF_MASK_COMPACT \
@@ -147,6 +156,10 @@ to cache the BLOB prefixes. */
#define DICT_TF_MASK_ATOMIC_BLOBS \
((~(~0 << DICT_TF_WIDTH_ATOMIC_BLOBS)) \
<< DICT_TF_POS_ATOMIC_BLOBS)
+/** Bit mask of the DATA_DIR field */
+#define DICT_TF_MASK_DATA_DIR \
+ ((~(~0 << DICT_TF_WIDTH_DATA_DIR)) \
+ << DICT_TF_POS_DATA_DIR)
/** Return the value of the COMPACT field */
#define DICT_TF_GET_COMPACT(flags) \
@@ -160,6 +173,10 @@ to cache the BLOB prefixes. */
#define DICT_TF_HAS_ATOMIC_BLOBS(flags) \
((flags & DICT_TF_MASK_ATOMIC_BLOBS) \
>> DICT_TF_POS_ATOMIC_BLOBS)
+/** Return the value of the ATOMIC_BLOBS field */
+#define DICT_TF_HAS_DATA_DIR(flags) \
+ ((flags & DICT_TF_MASK_DATA_DIR) \
+ >> DICT_TF_POS_DATA_DIR)
/** Return the contents of the UNUSED bits */
#define DICT_TF_GET_UNUSED(flags) \
(flags >> DICT_TF_POS_UNUSED)
@@ -174,7 +191,7 @@ ROW_FORMAT=REDUNDANT. InnoDB engines do not check these flags
for unknown bits in order to protect backward incompatibility. */
/* @{ */
/** Total number of bits in table->flags2. */
-#define DICT_TF2_BITS 5
+#define DICT_TF2_BITS 6
#define DICT_TF2_BIT_MASK ~(~0 << DICT_TF2_BITS)
/** TEMPORARY; TRUE for tables from CREATE TEMPORARY TABLE. */
@@ -189,6 +206,9 @@ This is a transient bit for index build */
/** This bit is used during table creation to indicate that it will
use its own tablespace instead of the system tablespace. */
#define DICT_TF2_USE_TABLESPACE 16
+
+/** Set when we discard/detach the tablespace */
+#define DICT_TF2_DISCARDED 32
/* @} */
#define DICT_TF2_FLAG_SET(table, flag) \
@@ -225,9 +245,7 @@ dict_mem_table_create(
/*==================*/
const char* name, /*!< in: table name */
ulint space, /*!< in: space where the clustered index
- of the table is placed; this parameter
- is ignored if the table is made
- a member of a cluster */
+ of the table is placed */
ulint n_cols, /*!< in: number of columns */
ulint flags, /*!< in: table flags */
ulint flags2); /*!< in: table flags2 */
@@ -249,7 +267,19 @@ dict_mem_table_add_col(
const char* name, /*!< in: column name, or NULL */
ulint mtype, /*!< in: main datatype */
ulint prtype, /*!< in: precise type */
- ulint len); /*!< in: precision */
+ ulint len) /*!< in: precision */
+ __attribute__((nonnull(1)));
+/**********************************************************************//**
+Renames a column of a table in the data dictionary cache. */
+UNIV_INTERN
+void
+dict_mem_table_col_rename(
+/*======================*/
+ dict_table_t* table, /*!< in/out: table */
+ unsigned nth_col,/*!< in: column index */
+ const char* from, /*!< in: old column name */
+ const char* to) /*!< in: new column name */
+ __attribute__((nonnull));
/**********************************************************************//**
This function populates a dict_col_t memory structure with
supplied information. */
@@ -347,8 +377,19 @@ dict_mem_referenced_table_name_lookup_set(
dict_foreign_t* foreign, /*!< in/out: foreign struct */
ibool do_alloc); /*!< in: is an alloc needed */
+/*******************************************************************//**
+Create a temporary tablename.
+@return temporary tablename suitable for InnoDB use */
+UNIV_INTERN __attribute__((nonnull, warn_unused_result))
+char*
+dict_mem_create_temporary_tablename(
+/*================================*/
+ mem_heap_t* heap, /*!< in: memory heap */
+ const char* dbtab, /*!< in: database/table name */
+ table_id_t id); /*!< in: InnoDB table id */
+
/** Data structure for a column in a table */
-struct dict_col_struct{
+struct dict_col_t{
/*----------------------*/
/** The following are copied from dtype_t,
so that all bit-fields can be packed tightly. */
@@ -424,7 +465,7 @@ be REC_VERSION_56_MAX_INDEX_COL_LEN (3072) bytes */
#define DICT_MAX_FIXED_COL_LEN DICT_ANTELOPE_MAX_INDEX_COL_LEN
/** Data structure for a field in an index */
-struct dict_field_struct{
+struct dict_field_t{
dict_col_t* col; /*!< pointer to the table column */
const char* name; /*!< name of the column */
unsigned prefix_len:12; /*!< 0 or the length of the column
@@ -440,9 +481,61 @@ struct dict_field_struct{
DICT_ANTELOPE_MAX_INDEX_COL_LEN */
};
+/**********************************************************************//**
+PADDING HEURISTIC BASED ON LINEAR INCREASE OF PADDING TO AVOID
+COMPRESSION FAILURES
+(Note: this is relevant only for compressed indexes)
+GOAL: Avoid compression failures by maintaining information about the
+compressibility of data. If data is not very compressible then leave
+some extra space 'padding' in the uncompressed page making it more
+likely that compression of less than fully packed uncompressed page will
+succeed.
+
+This padding heuristic works by increasing the pad linearly until the
+desired failure rate is reached. A "round" is a fixed number of
+compression operations.
+After each round, the compression failure rate for that round is
+computed. If the failure rate is too high, then padding is incremented
+by a fixed value, otherwise it's left intact.
+If the compression failure is lower than the desired rate for a fixed
+number of consecutive rounds, then the padding is decreased by a fixed
+value. This is done to prevent overshooting the padding value,
+and to accommodate the possible change in data compressibility. */
+
+/** Number of zip ops in one round. */
+#define ZIP_PAD_ROUND_LEN (128)
+
+/** Number of successful rounds after which the padding is decreased */
+#define ZIP_PAD_SUCCESSFUL_ROUND_LIMIT (5)
+
+/** Amount by which padding is increased. */
+#define ZIP_PAD_INCR (128)
+
+/** Percentage of compression failures that are allowed in a single
+round */
+extern ulong zip_failure_threshold_pct;
+
+/** Maximum percentage of a page that can be allowed as a pad to avoid
+compression failures */
+extern ulong zip_pad_max;
+
+/** Data structure to hold information about about how much space in
+an uncompressed page should be left as padding to avoid compression
+failures. This estimate is based on a self-adapting heuristic. */
+struct zip_pad_info_t {
+ os_fast_mutex_t mutex; /*!< mutex protecting the info */
+ ulint pad; /*!< number of bytes used as pad */
+ ulint success;/*!< successful compression ops during
+ current round */
+ ulint failure;/*!< failed compression ops during
+ current round */
+ ulint n_rounds;/*!< number of currently successful
+ rounds */
+};
+
/** Data structure for an index. Most fields will be
initialized to 0, NULL or FALSE in dict_mem_index_create(). */
-struct dict_index_struct{
+struct dict_index_t{
index_id_t id; /*!< id of the index */
mem_heap_t* heap; /*!< memory heap */
const char* name; /*!< index name */
@@ -478,24 +571,35 @@ struct dict_index_struct{
unsigned cached:1;/*!< TRUE if the index object is in the
dictionary cache */
unsigned to_be_dropped:1;
- /*!< TRUE if this index is marked to be
- dropped in ha_innobase::prepare_drop_index(),
- otherwise FALSE. Protected by
- dict_sys->mutex, dict_operation_lock and
- index->lock.*/
+ /*!< TRUE if the index is to be dropped;
+ protected by dict_operation_lock */
+ unsigned online_status:2;
+ /*!< enum online_index_status.
+ Transitions from ONLINE_INDEX_COMPLETE (to
+ ONLINE_INDEX_CREATION) are protected
+ by dict_operation_lock and
+ dict_sys->mutex. Other changes are
+ protected by index->lock. */
dict_field_t* fields; /*!< array of field descriptions */
#ifndef UNIV_HOTBACKUP
UT_LIST_NODE_T(dict_index_t)
indexes;/*!< list of indexes of the table */
- btr_search_t* search_info; /*!< info used in optimistic searches */
+ btr_search_t* search_info;
+ /*!< info used in optimistic searches */
+ row_log_t* online_log;
+ /*!< the log of modifications
+ during online index creation;
+ valid when online_status is
+ ONLINE_INDEX_CREATION */
/*----------------------*/
/** Statistics for query optimization */
/* @{ */
ib_uint64_t* stat_n_diff_key_vals;
/*!< approximate number of different
key values for this index, for each
- n-column prefix where n <=
- dict_get_n_unique(index); we
+ n-column prefix where 1 <= n <=
+ dict_get_n_unique(index) (the array is
+ indexed from 0 to n_uniq-1); we
periodically calculate new
estimates */
ib_uint64_t* stat_n_sample_sizes;
@@ -506,7 +610,8 @@ struct dict_index_struct{
ib_uint64_t* stat_n_non_null_key_vals;
/* approximate number of non-null key values
for this index, for each column where
- n < dict_get_n_unique(index); This
+ 1 <= n <= dict_get_n_unique(index) (the array
+ is indexed from 0 to n_uniq-1); This
is used when innodb_stats_method is
"nulls_ignored". */
ulint stat_index_size;
@@ -521,9 +626,11 @@ struct dict_index_struct{
trx_id_t trx_id; /*!< id of the transaction that created this
index, or 0 if the index existed
when InnoDB was started up */
+ zip_pad_info_t zip_pad;/*!< Information about state of
+ compression failures and successes */
#endif /* !UNIV_HOTBACKUP */
#ifdef UNIV_BLOB_DEBUG
- mutex_t blobs_mutex;
+ ib_mutex_t blobs_mutex;
/*!< mutex protecting blobs */
ib_rbt_t* blobs; /*!< map of (page_no,heap_no,field_no)
to first_blob_page_no; protected by
@@ -531,15 +638,35 @@ struct dict_index_struct{
#endif /* UNIV_BLOB_DEBUG */
#ifdef UNIV_DEBUG
ulint magic_n;/*!< magic number */
-/** Value of dict_index_struct::magic_n */
+/** Value of dict_index_t::magic_n */
# define DICT_INDEX_MAGIC_N 76789786
#endif
};
+/** The status of online index creation */
+enum online_index_status {
+ /** the index is complete and ready for access */
+ ONLINE_INDEX_COMPLETE = 0,
+ /** the index is being created, online
+ (allowing concurrent modifications) */
+ ONLINE_INDEX_CREATION,
+ /** secondary index creation was aborted and the index
+ should be dropped as soon as index->table->n_ref_count reaches 0,
+ or online table rebuild was aborted and the clustered index
+ of the original table should soon be restored to
+ ONLINE_INDEX_COMPLETE */
+ ONLINE_INDEX_ABORTED,
+ /** the online index creation was aborted, the index was
+ dropped from the data dictionary and the tablespace, and it
+ should be dropped from the data dictionary cache as soon as
+ index->table->n_ref_count reaches 0. */
+ ONLINE_INDEX_ABORTED_DROPPED
+};
+
/** Data structure for a foreign key constraint; an example:
FOREIGN KEY (A, B) REFERENCES TABLE2 (C, D). Most fields will be
initialized to 0, NULL or FALSE in dict_mem_foreign_create(). */
-struct dict_foreign_struct{
+struct dict_foreign_t{
mem_heap_t* heap; /*!< this object is allocated from
this memory heap */
char* id; /*!< id of the constraint as a
@@ -592,7 +719,7 @@ a foreign key constraint is enforced, therefore RESTRICT just means no flag */
/** Data structure for a database table. Most fields will be
initialized to 0, NULL or FALSE in dict_mem_table_create(). */
-struct dict_table_struct{
+struct dict_table_t{
table_id_t id; /*!< id of the table */
mem_heap_t* heap; /*!< memory heap */
char* name; /*!< table name */
@@ -602,6 +729,8 @@ struct dict_table_struct{
innodb_file_per_table is defined in my.cnf;
in Unix this is usually /tmp/..., in Windows
temp\... */
+ char* data_dir_path; /*!< NULL or the directory path
+ specified by DATA DIRECTORY */
unsigned space:32;
/*!< space where the clustered index of the
table is placed */
@@ -612,13 +741,16 @@ struct dict_table_struct{
tablespace and the .ibd file is missing; then
we must return in ha_innodb.cc an error if the
user tries to query such an orphaned table */
- unsigned tablespace_discarded:1;
- /*!< this flag is set TRUE when the user
- calls DISCARD TABLESPACE on this
- table, and reset to FALSE in IMPORT
- TABLESPACE */
unsigned cached:1;/*!< TRUE if the table object has been added
to the dictionary cache */
+ unsigned to_be_dropped:1;
+ /*!< TRUE if the table is to be dropped, but
+ not yet actually dropped (could in the bk
+ drop list); It is turned on at the beginning
+ of row_drop_table_for_mysql() and turned off
+ just before we start to update system tables
+ for the drop. It is protected by
+ dict_operation_lock */
unsigned n_def:10;/*!< number of columns defined so far */
unsigned n_cols:10;/*!< number of columns */
unsigned can_be_evicted:1;
@@ -626,6 +758,10 @@ struct dict_table_struct{
or a table that has no FK relationships */
unsigned corrupted:1;
/*!< TRUE if table is corrupted */
+ unsigned drop_aborted:1;
+ /*!< TRUE if some indexes should be dropped
+ after ONLINE_INDEX_ABORTED
+ or ONLINE_INDEX_ABORTED_DROPPED */
dict_col_t* cols; /*!< array of column descriptions */
const char* col_names;
/*!< Column names packed in a character string
@@ -659,6 +795,12 @@ struct dict_table_struct{
on the table: we cannot drop the table while
there are foreign key checks running on
it! */
+ trx_id_t def_trx_id;
+ /*!< transaction id that last touched
+ the table definition, either when
+ loading the definition or CREATE
+ TABLE, or ALTER TABLE (prepare,
+ commit, and rollback phases) */
trx_id_t query_cache_inv_trx_id;
/*!< transactions whose trx id is
smaller than this number are not
@@ -691,7 +833,55 @@ struct dict_table_struct{
unsigned stat_initialized:1; /*!< TRUE if statistics have
been calculated the first time
after database startup or table creation */
- ib_int64_t stat_n_rows;
+ ib_time_t stats_last_recalc;
+ /*!< Timestamp of last recalc of the stats */
+ ib_uint32_t stat_persistent;
+ /*!< The two bits below are set in the
+ ::stat_persistent member and have the following
+ meaning:
+ 1. _ON=0, _OFF=0, no explicit persistent stats
+ setting for this table, the value of the global
+ srv_stats_persistent is used to determine
+ whether the table has persistent stats enabled
+ or not
+ 2. _ON=0, _OFF=1, persistent stats are
+ explicitly disabled for this table, regardless
+ of the value of the global srv_stats_persistent
+ 3. _ON=1, _OFF=0, persistent stats are
+ explicitly enabled for this table, regardless
+ of the value of the global srv_stats_persistent
+ 4. _ON=1, _OFF=1, not allowed, we assert if
+ this ever happens. */
+#define DICT_STATS_PERSISTENT_ON (1 << 1)
+#define DICT_STATS_PERSISTENT_OFF (1 << 2)
+ ib_uint32_t stats_auto_recalc;
+ /*!< The two bits below are set in the
+ ::stats_auto_recalc member and have
+ the following meaning:
+ 1. _ON=0, _OFF=0, no explicit auto recalc
+ setting for this table, the value of the global
+ srv_stats_persistent_auto_recalc is used to
+ determine whether the table has auto recalc
+ enabled or not
+ 2. _ON=0, _OFF=1, auto recalc is explicitly
+ disabled for this table, regardless of the
+ value of the global
+ srv_stats_persistent_auto_recalc
+ 3. _ON=1, _OFF=0, auto recalc is explicitly
+ enabled for this table, regardless of the
+ value of the global
+ srv_stats_persistent_auto_recalc
+ 4. _ON=1, _OFF=1, not allowed, we assert if
+ this ever happens. */
+#define DICT_STATS_AUTO_RECALC_ON (1 << 1)
+#define DICT_STATS_AUTO_RECALC_OFF (1 << 2)
+ ulint stats_sample_pages;
+ /*!< the number of pages to sample for this
+ table during persistent stats estimation;
+ if this is 0, then the value of the global
+ srv_stats_persistent_sample_pages will be
+ used instead. */
+ ib_uint64_t stat_n_rows;
/*!< approximate number of rows in the table;
we periodically calculate new estimates */
ulint stat_clustered_index_size;
@@ -699,19 +889,34 @@ struct dict_table_struct{
database pages */
ulint stat_sum_of_other_index_sizes;
/*!< other indexes in database pages */
- ulint stat_modified_counter;
+ ib_uint64_t stat_modified_counter;
/*!< when a row is inserted, updated,
or deleted,
we add 1 to this number; we calculate new
estimates for the stat_... values for the
- table and the indexes at an interval of 2 GB
- or when about 1 / 16 of table has been
- modified; also when the estimate operation is
+ table and the indexes when about 1 / 16 of
+ table has been modified;
+ also when the estimate operation is
called for MySQL SHOW TABLE STATUS; the
counter is reset to zero at statistics
calculation; this counter is not protected by
any latch, because this is only used for
heuristics */
+#define BG_STAT_NONE 0
+#define BG_STAT_IN_PROGRESS (1 << 0)
+ /*!< BG_STAT_IN_PROGRESS is set in
+ stats_bg_flag when the background
+ stats code is working on this table. The DROP
+ TABLE code waits for this to be cleared
+ before proceeding. */
+#define BG_STAT_SHOULD_QUIT (1 << 1)
+ /*!< BG_STAT_SHOULD_QUIT is set in
+ stats_bg_flag when DROP TABLE starts
+ waiting on BG_STAT_IN_PROGRESS to be cleared,
+ the background stats thread will detect this
+ and will eventually quit sooner */
+ byte stats_bg_flag;
+ /*!< see BG_STAT_* above */
/* @} */
/*----------------------*/
/**!< The following fields are used by the
@@ -737,7 +942,7 @@ struct dict_table_struct{
space from the lock heap of the trx:
otherwise the lock heap would grow rapidly
if we do a large insert from a select */
- mutex_t autoinc_mutex;
+ ib_mutex_t autoinc_mutex;
/*!< mutex protecting the autoincrement
counter */
ib_uint64_t autoinc;/*!< autoinc counter value to give to the
@@ -758,6 +963,14 @@ struct dict_table_struct{
fts_t* fts; /* FTS specific state variables */
/* @} */
/*----------------------*/
+
+ ib_quiesce_t quiesce;/*!< Quiescing states, protected by the
+ dict_index_t::lock. ie. we can only change
+ the state if we acquire all the latches
+ (dict_index_t::lock) in X mode of this table's
+ indexes. */
+
+ /*----------------------*/
ulint n_rec_locks;
/*!< Count of the number of record locks on
this table. We use this to determine whether
@@ -776,7 +989,7 @@ struct dict_table_struct{
#ifdef UNIV_DEBUG
ulint magic_n;/*!< magic number */
-/** Value of dict_table_struct::magic_n */
+/** Value of dict_table_t::magic_n */
# define DICT_TABLE_MAGIC_N 76333786
#endif /* UNIV_DEBUG */
};
diff --git a/storage/innobase/include/dict0stats.h b/storage/innobase/include/dict0stats.h
index 879e67a0918..186f90e3694 100644
--- a/storage/innobase/include/dict0stats.h
+++ b/storage/innobase/include/dict0stats.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2009, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2009, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -32,61 +32,128 @@ Created Jan 06, 2010 Vasil Dimov
#include "dict0types.h"
#include "trx0types.h"
-enum dict_stats_upd_option {
+enum dict_stats_upd_option_t {
DICT_STATS_RECALC_PERSISTENT,/* (re) calculate the
statistics using a precise and slow
algo and save them to the persistent
storage, if the persistent storage is
not present then emit a warning and
fall back to transient stats */
- DICT_STATS_RECALC_PERSISTENT_SILENT,/* same as
- DICT_STATS_RECALC_PERSISTENT
- but do not emit a warning */
DICT_STATS_RECALC_TRANSIENT,/* (re) calculate the statistics
using an imprecise quick algo
without saving the results
persistently */
- DICT_STATS_FETCH, /* fetch the statistics from the
- persistent storage */
- DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY /* only fetch the stats
+ DICT_STATS_EMPTY_TABLE, /* Write all zeros (or 1 where it makes sense)
+ into a table and its indexes' statistics
+ members. The resulting stats correspond to an
+ empty table. If the table is using persistent
+ statistics, then they are saved on disk. */
+ DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY /* fetch the stats
from the persistent storage if the in-memory
structures have not been initialized yet,
otherwise do nothing */
};
-typedef enum dict_stats_upd_option dict_stats_upd_option_t;
+/*********************************************************************//**
+Calculates new estimates for table and index statistics. This function
+is relatively quick and is used to calculate transient statistics that
+are not saved on disk.
+This was the only way to calculate statistics before the
+Persistent Statistics feature was introduced. */
+UNIV_INTERN
+void
+dict_stats_update_transient(
+/*========================*/
+ dict_table_t* table); /*!< in/out: table */
+
+/*********************************************************************//**
+Set the persistent statistics flag for a given table. This is set only
+in the in-memory table object and is not saved on disk. It will be read
+from the .frm file upon first open from MySQL after a server restart. */
+UNIV_INLINE
+void
+dict_stats_set_persistent(
+/*======================*/
+ dict_table_t* table, /*!< in/out: table */
+ ibool ps_on, /*!< in: persistent stats explicitly enabled */
+ ibool ps_off) /*!< in: persistent stats explicitly disabled */
+ __attribute__((nonnull));
+
+/*********************************************************************//**
+Check whether persistent statistics is enabled for a given table.
+@return TRUE if enabled, FALSE otherwise */
+UNIV_INLINE
+ibool
+dict_stats_is_persistent_enabled(
+/*=============================*/
+ const dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull, warn_unused_result));
+
+/*********************************************************************//**
+Set the auto recalc flag for a given table (only honored for a persistent
+stats enabled table). The flag is set only in the in-memory table object
+and is not saved in InnoDB files. It will be read from the .frm file upon
+first open from MySQL after a server restart. */
+UNIV_INLINE
+void
+dict_stats_auto_recalc_set(
+/*=======================*/
+ dict_table_t* table, /*!< in/out: table */
+ ibool auto_recalc_on, /*!< in: explicitly enabled */
+ ibool auto_recalc_off); /*!< in: explicitly disabled */
+
+/*********************************************************************//**
+Check whether auto recalc is enabled for a given table.
+@return TRUE if enabled, FALSE otherwise */
+UNIV_INLINE
+ibool
+dict_stats_auto_recalc_is_enabled(
+/*==============================*/
+ const dict_table_t* table); /*!< in: table */
+
+/*********************************************************************//**
+Initialize table's stats for the first time when opening a table. */
+UNIV_INLINE
+void
+dict_stats_init(
+/*============*/
+ dict_table_t* table); /*!< in/out: table */
+
+/*********************************************************************//**
+Deinitialize table's stats after the last close of the table. This is
+used to detect "FLUSH TABLE" and refresh the stats upon next open. */
+UNIV_INLINE
+void
+dict_stats_deinit(
+/*==============*/
+ dict_table_t* table) /*!< in/out: table */
+ __attribute__((nonnull));
/*********************************************************************//**
Calculates new estimates for table and index statistics. The statistics
are used in query optimization.
@return DB_* error code or DB_SUCCESS */
UNIV_INTERN
-enum db_err
+dberr_t
dict_stats_update(
/*==============*/
dict_table_t* table, /*!< in/out: table */
- dict_stats_upd_option_t stats_upd_option,
+ dict_stats_upd_option_t stats_upd_option);
/*!< in: whether to (re) calc
the stats or to fetch them from
the persistent storage */
- ibool caller_has_dict_sys_mutex);
- /*!< in: TRUE if the caller
- owns dict_sys->mutex */
/*********************************************************************//**
Removes the information for a particular index's stats from the persistent
storage if it exists and if there is data stored for this index.
-The transaction is not committed, it must not be committed in this
-function because this is the user trx that is running DROP INDEX.
-The transaction will be committed at the very end when dropping an
-index.
+This function creates its own trx and commits it.
@return DB_SUCCESS or error code */
UNIV_INTERN
-enum db_err
-dict_stats_delete_index_stats(
-/*==========================*/
- dict_index_t* index, /*!< in: index */
- trx_t* trx, /*!< in: transaction to use */
+dberr_t
+dict_stats_drop_index(
+/*==================*/
+ const char* tname, /*!< in: table name */
+ const char* iname, /*!< in: index name */
char* errstr, /*!< out: error message if != DB_SUCCESS
is returned */
ulint errstr_sz);/*!< in: size of the errstr buffer */
@@ -97,12 +164,39 @@ persistent storage if it exists and if there is data stored for the table.
This function creates its own transaction and commits it.
@return DB_SUCCESS or error code */
UNIV_INTERN
-enum db_err
-dict_stats_delete_table_stats(
-/*==========================*/
+dberr_t
+dict_stats_drop_table(
+/*==================*/
const char* table_name, /*!< in: table name */
char* errstr, /*!< out: error message
if != DB_SUCCESS is returned */
ulint errstr_sz); /*!< in: size of errstr buffer */
+/*********************************************************************//**
+Fetches or calculates new estimates for index statistics. */
+UNIV_INTERN
+void
+dict_stats_update_for_index(
+/*========================*/
+ dict_index_t* index) /*!< in/out: index */
+ __attribute__((nonnull));
+
+/*********************************************************************//**
+Renames a table in InnoDB persistent stats storage.
+This function creates its own transaction and commits it.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+dict_stats_rename_table(
+/*====================*/
+ const char* old_name, /*!< in: old table name */
+ const char* new_name, /*!< in: new table name */
+ char* errstr, /*!< out: error string if != DB_SUCCESS
+ is returned */
+ size_t errstr_sz); /*!< in: errstr size */
+
+#ifndef UNIV_NONINL
+#include "dict0stats.ic"
+#endif
+
#endif /* dict0stats_h */
diff --git a/storage/innobase/include/dict0stats.ic b/storage/innobase/include/dict0stats.ic
new file mode 100644
index 00000000000..04763f174d0
--- /dev/null
+++ b/storage/innobase/include/dict0stats.ic
@@ -0,0 +1,250 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/dict0stats.ic
+Code used for calculating and manipulating table statistics.
+
+Created Jan 23, 2012 Vasil Dimov
+*******************************************************/
+
+#include "univ.i"
+#include "dict0dict.h" /* dict_table_stats_lock() */
+#include "dict0types.h" /* dict_table_t */
+#include "srv0srv.h" /* srv_stats_persistent, srv_stats_auto_recalc */
+
+/*********************************************************************//**
+Set the persistent statistics flag for a given table. This is set only
+in the in-memory table object and is not saved on disk. It will be read
+from the .frm file upon first open from MySQL after a server restart.
+dict_stats_set_persistent() @{ */
+UNIV_INLINE
+void
+dict_stats_set_persistent(
+/*======================*/
+ dict_table_t* table, /*!< in/out: table */
+ ibool ps_on, /*!< in: persistent stats explicitly enabled */
+ ibool ps_off) /*!< in: persistent stats explicitly disabled */
+{
+ /* Not allowed to have both flags set, but a CREATE or ALTER
+ statement that contains "STATS_PERSISTENT=0 STATS_PERSISTENT=1" would
+ end up having both set. In this case we clear the OFF flag. */
+ if (ps_on && ps_off) {
+ ps_off = FALSE;
+ }
+
+ ib_uint32_t stat_persistent = 0;
+
+ if (ps_on) {
+ stat_persistent |= DICT_STATS_PERSISTENT_ON;
+ }
+
+ if (ps_off) {
+ stat_persistent |= DICT_STATS_PERSISTENT_OFF;
+ }
+
+ /* we rely on this assignment to be atomic */
+ table->stat_persistent = stat_persistent;
+}
+/* @} */
+
+/*********************************************************************//**
+Check whether persistent statistics is enabled for a given table.
+dict_stats_is_persistent_enabled() @{
+@return TRUE if enabled, FALSE otherwise */
+UNIV_INLINE
+ibool
+dict_stats_is_persistent_enabled(
+/*=============================*/
+ const dict_table_t* table) /*!< in: table */
+{
+ /* Because of the nature of this check (non-locking) it is possible
+ that a table becomes:
+ * PS-disabled immediately after this function has returned TRUE or
+ * PS-enabled immediately after this function has returned FALSE.
+ This means that it is possible that we do:
+ + dict_stats_update(DICT_STATS_RECALC_PERSISTENT) on a table that has
+ just been PS-disabled or
+ + dict_stats_update(DICT_STATS_RECALC_TRANSIENT) on a table that has
+ just been PS-enabled.
+ This is acceptable. Avoiding this would mean that we would have to
+ protect the ::stat_persistent with dict_table_stats_lock() like the
+ other ::stat_ members which would be too big performance penalty,
+ especially when this function is called from
+ row_update_statistics_if_needed(). */
+
+ /* we rely on this read to be atomic */
+ ib_uint32_t stat_persistent = table->stat_persistent;
+
+ if (stat_persistent & DICT_STATS_PERSISTENT_ON) {
+ ut_ad(!(stat_persistent & DICT_STATS_PERSISTENT_OFF));
+ return(TRUE);
+ } else if (stat_persistent & DICT_STATS_PERSISTENT_OFF) {
+ return(FALSE);
+ } else {
+ return(srv_stats_persistent);
+ }
+}
+/* @} */
+
+/*********************************************************************//**
+Set the auto recalc flag for a given table (only honored for a persistent
+stats enabled table). The flag is set only in the in-memory table object
+and is not saved in InnoDB files. It will be read from the .frm file upon
+first open from MySQL after a server restart.
+dict_stats_auto_recalc_set() @{ */
+UNIV_INLINE
+void
+dict_stats_auto_recalc_set(
+/*=======================*/
+ dict_table_t* table, /*!< in/out: table */
+ ibool auto_recalc_on, /*!< in: explicitly enabled */
+ ibool auto_recalc_off) /*!< in: explicitly disabled */
+{
+ ut_ad(!auto_recalc_on || !auto_recalc_off);
+
+ ib_uint32_t stats_auto_recalc = 0;
+
+ if (auto_recalc_on) {
+ stats_auto_recalc |= DICT_STATS_AUTO_RECALC_ON;
+ }
+
+ if (auto_recalc_off) {
+ stats_auto_recalc |= DICT_STATS_AUTO_RECALC_OFF;
+ }
+
+ /* we rely on this assignment to be atomic */
+ table->stats_auto_recalc = stats_auto_recalc;
+}
+/* @} */
+
+/*********************************************************************//**
+Check whether auto recalc is enabled for a given table.
+dict_stats_auto_recalc_is_enabled() @{
+@return TRUE if enabled, FALSE otherwise */
+UNIV_INLINE
+ibool
+dict_stats_auto_recalc_is_enabled(
+/*==============================*/
+ const dict_table_t* table) /*!< in: table */
+{
+ /* we rely on this read to be atomic */
+ ib_uint32_t stats_auto_recalc = table->stats_auto_recalc;
+
+ if (stats_auto_recalc & DICT_STATS_AUTO_RECALC_ON) {
+ ut_ad(!(stats_auto_recalc & DICT_STATS_AUTO_RECALC_OFF));
+ return(TRUE);
+ } else if (stats_auto_recalc & DICT_STATS_AUTO_RECALC_OFF) {
+ return(FALSE);
+ } else {
+ return(srv_stats_auto_recalc);
+ }
+}
+/* @} */
+
+/*********************************************************************//**
+Initialize table's stats for the first time when opening a table.
+dict_stats_init() @{ */
+UNIV_INLINE
+void
+dict_stats_init(
+/*============*/
+ dict_table_t* table) /*!< in/out: table */
+{
+ ut_ad(!mutex_own(&dict_sys->mutex));
+
+ if (table->stat_initialized) {
+ return;
+ }
+
+ dict_stats_upd_option_t opt;
+
+ if (dict_stats_is_persistent_enabled(table)) {
+ opt = DICT_STATS_FETCH_ONLY_IF_NOT_IN_MEMORY;
+ } else {
+ opt = DICT_STATS_RECALC_TRANSIENT;
+ }
+
+ dict_stats_update(table, opt);
+}
+/* @} */
+
+/*********************************************************************//**
+Deinitialize table's stats after the last close of the table. This is
+used to detect "FLUSH TABLE" and refresh the stats upon next open.
+dict_stats_deinit() @{ */
+UNIV_INLINE
+void
+dict_stats_deinit(
+/*==============*/
+ dict_table_t* table) /*!< in/out: table */
+{
+ ut_ad(mutex_own(&dict_sys->mutex));
+
+ ut_a(table->n_ref_count == 0);
+
+ dict_table_stats_lock(table, RW_X_LATCH);
+
+ if (!table->stat_initialized) {
+ dict_table_stats_unlock(table, RW_X_LATCH);
+ return;
+ }
+
+ table->stat_initialized = FALSE;
+
+#ifdef UNIV_DEBUG_VALGRIND
+ UNIV_MEM_INVALID(&table->stat_n_rows,
+ sizeof(table->stat_n_rows));
+ UNIV_MEM_INVALID(&table->stat_clustered_index_size,
+ sizeof(table->stat_clustered_index_size));
+ UNIV_MEM_INVALID(&table->stat_sum_of_other_index_sizes,
+ sizeof(table->stat_sum_of_other_index_sizes));
+ UNIV_MEM_INVALID(&table->stat_modified_counter,
+ sizeof(table->stat_modified_counter));
+
+ dict_index_t* index;
+
+ for (index = dict_table_get_first_index(table);
+ index != NULL;
+ index = dict_table_get_next_index(index)) {
+
+ ulint n_uniq = dict_index_get_n_unique(index);
+
+ UNIV_MEM_INVALID(
+ index->stat_n_diff_key_vals,
+ n_uniq * sizeof(index->stat_n_diff_key_vals[0]));
+ UNIV_MEM_INVALID(
+ index->stat_n_sample_sizes,
+ n_uniq * sizeof(index->stat_n_sample_sizes[0]));
+ UNIV_MEM_INVALID(
+ index->stat_n_non_null_key_vals,
+ n_uniq * sizeof(index->stat_n_non_null_key_vals[0]));
+ UNIV_MEM_INVALID(
+ &index->stat_index_size,
+ sizeof(index->stat_index_size));
+ UNIV_MEM_INVALID(
+ &index->stat_n_leaf_pages,
+ sizeof(index->stat_n_leaf_pages));
+ }
+#endif /* UNIV_DEBUG_VALGRIND */
+
+ dict_table_stats_unlock(table, RW_X_LATCH);
+}
+/* @} */
+
+/* vim: set foldmethod=marker foldmarker=@{,@}: */
diff --git a/storage/innobase/include/dict0stats_bg.h b/storage/innobase/include/dict0stats_bg.h
new file mode 100644
index 00000000000..dd85088c7ba
--- /dev/null
+++ b/storage/innobase/include/dict0stats_bg.h
@@ -0,0 +1,116 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/dict0stats_bg.h
+Code used for background table and index stats gathering.
+
+Created Apr 26, 2012 Vasil Dimov
+*******************************************************/
+
+#ifndef dict0stats_bg_h
+#define dict0stats_bg_h
+
+#include "univ.i"
+
+#include "dict0types.h" /* dict_table_t, table_id_t */
+#include "os0sync.h" /* os_event_t */
+#include "os0thread.h" /* DECLARE_THREAD */
+
+/** Event to wake up the stats thread */
+extern os_event_t dict_stats_event;
+
+/*****************************************************************//**
+Add a table to the recalc pool, which is processed by the
+background stats gathering thread. Only the table id is added to the
+list, so the table can be closed after being enqueued and it will be
+opened when needed. If the table does not exist later (has been DROPped),
+then it will be removed from the pool and skipped.
+dict_stats_recalc_pool_add() @{ */
+UNIV_INTERN
+void
+dict_stats_recalc_pool_add(
+/*=======================*/
+ const dict_table_t* table); /*!< in: table to add */
+/* @} */
+
+/*****************************************************************//**
+Delete a given table from the auto recalc pool.
+dict_stats_recalc_pool_del() */
+UNIV_INTERN
+void
+dict_stats_recalc_pool_del(
+/*=======================*/
+ const dict_table_t* table); /*!< in: table to remove */
+/* @} */
+
+/*****************************************************************//**
+Wait until background stats thread has stopped using the specified table(s).
+The caller must have locked the data dictionary using
+row_mysql_lock_data_dictionary() and this function may unlock it temporarily
+and restore the lock before it exits.
+The background stats thead is guaranteed not to start using the specified
+tables after this function returns and before the caller unlocks the data
+dictionary because it sets the BG_STAT_IN_PROGRESS bit in table->stats_bg_flag
+under dict_sys->mutex.
+dict_stats_wait_bg_to_stop_using_table() @{ */
+UNIV_INTERN
+void
+dict_stats_wait_bg_to_stop_using_tables(
+/*====================================*/
+ dict_table_t* table1, /*!< in/out: table1 */
+ dict_table_t* table2, /*!< in/out: table2, could be NULL */
+ trx_t* trx); /*!< in/out: transaction to use for
+ unlocking/locking the data dict */
+/* @} */
+
+/*****************************************************************//**
+Initialize global variables needed for the operation of dict_stats_thread().
+Must be called before dict_stats_thread() is started.
+dict_stats_thread_init() @{ */
+UNIV_INTERN
+void
+dict_stats_thread_init();
+/*====================*/
+/* @} */
+
+/*****************************************************************//**
+Free resources allocated by dict_stats_thread_init(), must be called
+after dict_stats_thread() has exited.
+dict_stats_thread_deinit() @{ */
+UNIV_INTERN
+void
+dict_stats_thread_deinit();
+/*======================*/
+/* @} */
+
+/*****************************************************************//**
+This is the thread for background stats gathering. It pops tables, from
+the auto recalc list and proceeds them, eventually recalculating their
+statistics.
+dict_stats_thread() @{
+@return this function does not return, it calls os_thread_exit() */
+extern "C" UNIV_INTERN
+os_thread_ret_t
+DECLARE_THREAD(dict_stats_thread)(
+/*==============================*/
+ void* arg); /*!< in: a dummy parameter
+ required by os_thread_create */
+/* @} */
+
+#endif /* dict0stats_bg_h */
diff --git a/storage/innobase/include/dict0types.h b/storage/innobase/include/dict0types.h
index cd2863582c1..b7f7c2d9df9 100644
--- a/storage/innobase/include/dict0types.h
+++ b/storage/innobase/include/dict0types.h
@@ -26,15 +26,15 @@ Created 1/8/1996 Heikki Tuuri
#ifndef dict0types_h
#define dict0types_h
-typedef struct dict_sys_struct dict_sys_t;
-typedef struct dict_col_struct dict_col_t;
-typedef struct dict_field_struct dict_field_t;
-typedef struct dict_index_struct dict_index_t;
-typedef struct dict_table_struct dict_table_t;
-typedef struct dict_foreign_struct dict_foreign_t;
+struct dict_sys_t;
+struct dict_col_t;
+struct dict_field_t;
+struct dict_index_t;
+struct dict_table_t;
+struct dict_foreign_t;
-typedef struct ind_node_struct ind_node_t;
-typedef struct tab_node_struct tab_node_t;
+struct ind_node_t;
+struct tab_node_t;
/* Space id and page no where the dictionary header resides */
#define DICT_HDR_SPACE 0 /* the SYSTEM tablespace */
@@ -52,7 +52,7 @@ the table and index will be marked as "corrupted", and caller will
be responsible to deal with corrupted table or index.
Note: please define the IGNORE_ERR_* as bits, so their value can
be or-ed together */
-enum dict_err_ignore {
+enum dict_err_ignore_t {
DICT_ERR_IGNORE_NONE = 0, /*!< no error to ignore */
DICT_ERR_IGNORE_INDEX_ROOT = 1, /*!< ignore error if index root
page is FIL_NULL or incorrect value */
@@ -60,6 +60,11 @@ enum dict_err_ignore {
DICT_ERR_IGNORE_ALL = 0xFFFF /*!< ignore all errors */
};
-typedef enum dict_err_ignore dict_err_ignore_t;
+/** Quiescing states for flushing tables to disk. */
+enum ib_quiesce_t {
+ QUIESCE_NONE,
+ QUIESCE_START, /*!< Initialise, prepare to start */
+ QUIESCE_COMPLETE /*!< All done */
+};
#endif
diff --git a/storage/innobase/include/dyn0dyn.h b/storage/innobase/include/dyn0dyn.h
index 5e69cb13122..ffb4f270d0e 100644
--- a/storage/innobase/include/dyn0dyn.h
+++ b/storage/innobase/include/dyn0dyn.h
@@ -31,10 +31,9 @@ Created 2/5/1996 Heikki Tuuri
#include "mem0mem.h"
/** A block in a dynamically allocated array */
-typedef struct dyn_block_struct dyn_block_t;
+struct dyn_block_t;
/** Dynamically allocated array */
-typedef dyn_block_t dyn_array_t;
-
+typedef dyn_block_t dyn_array_t;
/** This is the initial 'payload' size of a dynamic array;
this must be > MLOG_BUF_MARGIN + 30! */
@@ -159,7 +158,7 @@ dyn_push_string(
/** @brief A block in a dynamically allocated array.
NOTE! Do not access the fields of the struct directly: the definition
appears here only for the compiler to know its size! */
-struct dyn_block_struct{
+struct dyn_block_t{
mem_heap_t* heap; /*!< in the first block this is != NULL
if dynamic allocation has been needed */
ulint used; /*!< number of data bytes used in this block;
diff --git a/storage/innobase/include/dyn0dyn.ic b/storage/innobase/include/dyn0dyn.ic
index b86697d6865..39254e632a8 100644
--- a/storage/innobase/include/dyn0dyn.ic
+++ b/storage/innobase/include/dyn0dyn.ic
@@ -23,9 +23,9 @@ The dynamically allocated array
Created 2/5/1996 Heikki Tuuri
*******************************************************/
-/** Value of dyn_block_struct::magic_n */
+/** Value of dyn_block_t::magic_n */
#define DYN_BLOCK_MAGIC_N 375767
-/** Flag for dyn_block_struct::used that indicates a full block */
+/** Flag for dyn_block_t::used that indicates a full block */
#define DYN_BLOCK_FULL_FLAG 0x1000000UL
/************************************************************//**
diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h
index 4bd9f9fa51f..56fda8b39b1 100644
--- a/storage/innobase/include/fil0fil.h
+++ b/storage/innobase/include/fil0fil.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,6 +39,14 @@ Created 10/25/1995 Heikki Tuuri
#include "log0log.h"
#endif /* !UNIV_HOTBACKUP */
+#include <list>
+
+// Forward declaration
+struct trx_t;
+struct fil_space_t;
+
+typedef std::list<const char*> space_name_list_t;
+
/** When mysqld is run, the default directory "." is the mysqld datadir,
but in the MySQL Embedded Server Library and ibbackup it is not the default
directory, and we must set the base file path explicitly */
@@ -61,12 +69,8 @@ typedef byte fil_faddr_t; /*!< 'type' definition in C: an address
#define FIL_ADDR_SIZE 6 /* address size is 6 bytes */
-/** A struct for storing a space address FIL_ADDR, when it is used
-in C program data structures. */
-
-typedef struct fil_addr_struct fil_addr_t;
/** File space address */
-struct fil_addr_struct{
+struct fil_addr_t{
ulint page; /*!< page number within a space */
ulint boffset; /*!< byte offset within the page */
};
@@ -200,17 +204,19 @@ fil_space_get_type(
ulint id); /*!< in: space id */
#endif /* !UNIV_HOTBACKUP */
/*******************************************************************//**
-Appends a new file to the chain of files of a space. File must be closed. */
+Appends a new file to the chain of files of a space. File must be closed.
+@return pointer to the file name, or NULL on error */
UNIV_INTERN
-void
+char*
fil_node_create(
/*============*/
const char* name, /*!< in: file name (file must be closed) */
ulint size, /*!< in: file size in database blocks, rounded
downwards to an integer */
ulint id, /*!< in: space id where to append */
- ibool is_raw);/*!< in: TRUE if a raw device or
+ ibool is_raw) /*!< in: TRUE if a raw device or
a raw disk partition */
+ __attribute__((nonnull, warn_unused_result));
#ifdef UNIV_LOG_ARCHIVE
/****************************************************************//**
Drops files from the start of a file space, so that its size is cut by
@@ -248,6 +254,16 @@ fil_assign_new_space_id(
/*====================*/
ulint* space_id); /*!< in/out: space id */
/*******************************************************************//**
+Returns the path from the first fil_node_t found for the space ID sent.
+The caller is responsible for freeing the memory allocated here for the
+value returned.
+@return a copy of fil_node_t::path, NULL if space is zero or not found. */
+UNIV_INTERN
+char*
+fil_space_get_first_path(
+/*=====================*/
+ ulint id); /*!< in: space id */
+/*******************************************************************//**
Returns the size of the space in pages. The tablespace must be cached in the
memory cache.
@return space size, 0 if space not found */
@@ -316,6 +332,14 @@ void
fil_close_all_files(void);
/*=====================*/
/*******************************************************************//**
+Closes the redo log files. There must not be any pending i/o's or not
+flushed modifications in the files. */
+UNIV_INTERN
+void
+fil_close_log_files(
+/*================*/
+ bool free); /*!< in: whether to free the memory object */
+/*******************************************************************//**
Sets the max tablespace id counter if the given number is bigger than the
previous value. */
UNIV_INTERN
@@ -329,7 +353,7 @@ Writes the flushed lsn and the latest archived log number to the page
header of the first page of each data file in the system tablespace.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
fil_write_flushed_lsn_to_data_files(
/*================================*/
lsn_t lsn, /*!< in: lsn to write */
@@ -346,6 +370,7 @@ fil_read_first_page(
parameters below already
contain sensible data */
ulint* flags, /*!< out: tablespace flags */
+ ulint* space_id, /*!< out: tablespace ID */
#ifdef UNIV_LOG_ARCHIVE
ulint* min_arch_log_no, /*!< out: min of archived
log numbers in data files */
@@ -405,25 +430,44 @@ Deletes a single-table tablespace. The tablespace must be cached in the
memory cache.
@return TRUE if success */
UNIV_INTERN
-ibool
+dberr_t
fil_delete_tablespace(
/*==================*/
+ ulint id, /*!< in: space id */
+ buf_remove_t buf_remove); /*!< in: specify the action to take
+ on the tables pages in the buffer
+ pool */
+/*******************************************************************//**
+Closes a single-table tablespace. The tablespace must be cached in the
+memory cache. Free all pages used by the tablespace.
+@return DB_SUCCESS or error */
+UNIV_INTERN
+dberr_t
+fil_close_tablespace(
+/*=================*/
+ trx_t* trx, /*!< in/out: Transaction covering the close */
ulint id); /*!< in: space id */
#ifndef UNIV_HOTBACKUP
/*******************************************************************//**
Discards a single-table tablespace. The tablespace must be cached in the
memory cache. Discarding is like deleting a tablespace, but
-1) we do not drop the table from the data dictionary;
-2) we remove all insert buffer entries for the tablespace immediately; in DROP
-TABLE they are only removed gradually in the background;
-3) when the user does IMPORT TABLESPACE, the tablespace will have the same id
-as it originally had.
-@return TRUE if success */
+
+ 1. We do not drop the table from the data dictionary;
+
+ 2. We remove all insert buffer entries for the tablespace immediately;
+ in DROP TABLE they are only removed gradually in the background;
+
+ 3. When the user does IMPORT TABLESPACE, the tablespace will have the
+ same id as it originally had.
+
+ 4. Free all the pages in use by the tablespace if rename=TRUE.
+@return DB_SUCCESS or error */
UNIV_INTERN
-ibool
+dberr_t
fil_discard_tablespace(
/*===================*/
- ulint id); /*!< in: space id */
+ ulint id) /*!< in: space id */
+ __attribute__((warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/*******************************************************************//**
Renames a single-table tablespace. The tablespace must be cached in the
@@ -433,16 +477,70 @@ UNIV_INTERN
ibool
fil_rename_tablespace(
/*==================*/
- const char* old_name_in, /*!< in: old table name in the standard
- databasename/tablename format of
- InnoDB, or NULL if we do the rename
- based on the space id only */
+ const char* old_name_in, /*!< in: old table name in the
+ standard databasename/tablename
+ format of InnoDB, or NULL if we
+ do the rename based on the space
+ id only */
ulint id, /*!< in: space id */
- const char* new_name); /*!< in: new table name in the standard
- databasename/tablename format
- of InnoDB */
+ const char* new_name, /*!< in: new table name in the
+ standard databasename/tablename
+ format of InnoDB */
+ const char* new_path); /*!< in: new full datafile path
+ if the tablespace is remotely
+ located, or NULL if it is located
+ in the normal data directory. */
/*******************************************************************//**
+Allocates a file name for a single-table tablespace. The string must be freed
+by caller with mem_free().
+@return own: file name */
+UNIV_INTERN
+char*
+fil_make_ibd_name(
+/*==============*/
+ const char* name, /*!< in: table name or a dir path */
+ bool is_full_path); /*!< in: TRUE if it is a dir path */
+/*******************************************************************//**
+Allocates a file name for a tablespace ISL file (InnoDB Symbolic Link).
+The string must be freed by caller with mem_free().
+@return own: file name */
+UNIV_INTERN
+char*
+fil_make_isl_name(
+/*==============*/
+ const char* name); /*!< in: table name */
+/*******************************************************************//**
+Creates a new InnoDB Symbolic Link (ISL) file. It is always created
+under the 'datadir' of MySQL. The datadir is the directory of a
+running mysqld program. We can refer to it by simply using the path '.'.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+fil_create_link_file(
+/*=================*/
+ const char* tablename, /*!< in: tablename */
+ const char* filepath); /*!< in: pathname of tablespace */
+/*******************************************************************//**
+Deletes an InnoDB Symbolic Link (ISL) file. */
+UNIV_INTERN
+void
+fil_delete_link_file(
+/*==================*/
+ const char* tablename); /*!< in: name of table */
+/*******************************************************************//**
+Reads an InnoDB Symbolic Link (ISL) file.
+It is always created under the 'datadir' of MySQL. The name is of the
+form {databasename}/{tablename}. and the isl file is expected to be in a
+'{databasename}' directory called '{tablename}.isl'. The caller must free
+the memory of the null-terminated path returned if it is not null.
+@return own: filepath found in link file, NULL if not found. */
+UNIV_INTERN
+char*
+fil_read_link_file(
+/*===============*/
+ const char* name); /*!< in: tablespace name */
+/*******************************************************************//**
Creates a new single-table tablespace to a database directory of MySQL.
Database directories are under the 'datadir' of MySQL. The datadir is the
directory of a running mysqld program. We can refer to it by simply the
@@ -450,21 +548,20 @@ path '.'. Tables created with CREATE TEMPORARY TABLE we place in the temp
dir of the mysqld server.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fil_create_new_single_table_tablespace(
/*===================================*/
ulint space_id, /*!< in: space id */
const char* tablename, /*!< in: the table name in the usual
databasename/tablename format
- of InnoDB, or a dir path to a temp
- table */
- ibool is_temp, /*!< in: TRUE if a table created with
- CREATE TEMPORARY TABLE */
+ of InnoDB */
+ const char* dir_path, /*!< in: NULL or a dir path */
ulint flags, /*!< in: tablespace flags */
ulint flags2, /*!< in: table flags2 */
- ulint size); /*!< in: the initial size of the
+ ulint size) /*!< in: the initial size of the
tablespace file in pages,
must be >= FIL_IBD_FILE_INITIAL_SIZE */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_HOTBACKUP
/********************************************************************//**
Tries to open a single-table tablespace and optionally checks the space id is
@@ -475,41 +572,31 @@ NOTE that we assume this operation is used either at the database startup
or under the protection of the dictionary mutex, so that two users cannot
race here. This operation does not leave the file associated with the
tablespace open, but closes it after we have looked at the space id in it.
-@return TRUE if success */
+
+If the validate boolean is set, we read the first page of the file and
+check that the space id in the file is what we expect. We assume that
+this function runs much faster if no check is made, since accessing the
+file inode probably is much faster (the OS caches them) than accessing
+the first page of the file. This boolean may be initially FALSE, but if
+a remote tablespace is found it will be changed to true.
+
+If the fix_dict boolean is set, then it is safe to use an internal SQL
+statement to update the dictionary tables if they are incorrect.
+
+@return DB_SUCCESS or error code */
UNIV_INTERN
-ibool
+dberr_t
fil_open_single_table_tablespace(
/*=============================*/
- ibool check_space_id, /*!< in: should we check that the space
- id in the file is right; we assume
- that this function runs much faster
- if no check is made, since accessing
- the file inode probably is much
- faster (the OS caches them) than
- accessing the first page of the file */
+ bool validate, /*!< in: Do we validate tablespace? */
+ bool fix_dict, /*!< in: Can we fix the dictionary? */
ulint id, /*!< in: space id */
ulint flags, /*!< in: tablespace flags */
- const char* name); /*!< in: table name in the
+ const char* tablename, /*!< in: table name in the
databasename/tablename format */
-/********************************************************************//**
-It is possible, though very improbable, that the lsn's in the tablespace to be
-imported have risen above the current system lsn, if a lengthy purge, ibuf
-merge, or rollback was performed on a backup taken with ibbackup. If that is
-the case, reset page lsn's in the file. We assume that mysqld was shut down
-after it performed these cleanup operations on the .ibd file, so that it at
-the shutdown stamped the latest lsn to the FIL_PAGE_FILE_FLUSH_LSN in the
-first page of the .ibd file, and we can determine whether we need to reset the
-lsn's just by looking at that flush lsn.
-@return TRUE if success */
-UNIV_INTERN
-ibool
-fil_reset_too_high_lsns(
-/*====================*/
- const char* name, /*!< in: table name in the
- databasename/tablename format */
- lsn_t current_lsn); /*!< in: reset lsn's if the lsn stamped
- to FIL_PAGE_FILE_FLUSH_LSN in the
- first page is too high */
+ const char* filepath) /*!< in: tablespace filepath */
+ __attribute__((nonnull(5), warn_unused_result));
+
#endif /* !UNIV_HOTBACKUP */
/********************************************************************//**
At the server startup, if we need crash recovery, scans the database
@@ -520,7 +607,7 @@ in the doublewrite buffer, also to know where to apply log records where the
space id is != 0.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
fil_load_single_table_tablespaces(void);
/*===================================*/
/*******************************************************************//**
@@ -562,11 +649,15 @@ fil_space_for_table_exists_in_mem(
data dictionary, so that
we can print a warning about orphaned
tablespaces */
- ibool print_error_if_does_not_exist);
+ ibool print_error_if_does_not_exist,
/*!< in: print detailed error
information to the .err log if a
matching tablespace is not found from
memory */
+ bool adjust_space, /*!< in: whether to adjust space id
+ when find table space mismatch */
+ mem_heap_t* heap, /*!< in: heap memory */
+ table_id_t table_id); /*!< in: table id */
#else /* !UNIV_HOTBACKUP */
/********************************************************************//**
Extends all tablespaces to the size stored in the space header. During the
@@ -625,7 +716,7 @@ Reads or writes data. This operation is asynchronous (aio).
@return DB_SUCCESS, or DB_TABLESPACE_DELETED if we are trying to do
i/o on a tablespace which does not exist */
UNIV_INTERN
-ulint
+dberr_t
fil_io(
/*===*/
ulint type, /*!< in: OS_FILE_READ or OS_FILE_WRITE,
@@ -651,8 +742,9 @@ fil_io(
void* buf, /*!< in/out: buffer where to store read data
or from where to write; in aio this must be
appropriately aligned */
- void* message); /*!< in: message for aio handler if non-sync
+ void* message) /*!< in: message for aio handler if non-sync
aio used, else ignored */
+ __attribute__((nonnull(8)));
/**********************************************************************//**
Waits for an aio operation to complete. This function is used to write the
handler for completed requests. The aio array of pending requests is divided
@@ -739,6 +831,140 @@ fil_tablespace_is_being_deleted(
/*============================*/
ulint id); /*!< in: space id */
+/********************************************************************//**
+Delete the tablespace file and any related files like .cfg.
+This should not be called for temporary tables. */
+UNIV_INTERN
+void
+fil_delete_file(
+/*============*/
+ const char* path); /*!< in: filepath of the ibd tablespace */
+
+/** Callback functor. */
+struct PageCallback {
+
+ /**
+ Default constructor */
+ PageCallback()
+ :
+ m_zip_size(),
+ m_page_size(),
+ m_filepath() UNIV_NOTHROW {}
+
+ virtual ~PageCallback() UNIV_NOTHROW {}
+
+ /**
+ Called for page 0 in the tablespace file at the start.
+ @param file_size - size of the file in bytes
+ @param block - contents of the first page in the tablespace file
+ @retval DB_SUCCESS or error code.*/
+ virtual dberr_t init(
+ os_offset_t file_size,
+ const buf_block_t* block) UNIV_NOTHROW = 0;
+
+ /**
+ Called for every page in the tablespace. If the page was not
+ updated then its state must be set to BUF_PAGE_NOT_USED. For
+ compressed tables the page descriptor memory will be at offset:
+ block->frame + UNIV_PAGE_SIZE;
+ @param offset - physical offset within the file
+ @param block - block read from file, note it is not from the buffer pool
+ @retval DB_SUCCESS or error code. */
+ virtual dberr_t operator()(
+ os_offset_t offset,
+ buf_block_t* block) UNIV_NOTHROW = 0;
+
+ /**
+ Set the name of the physical file and the file handle that is used
+ to open it for the file that is being iterated over.
+ @param filename - then physical name of the tablespace file.
+ @param file - OS file handle */
+ void set_file(const char* filename, os_file_t file) UNIV_NOTHROW
+ {
+ m_file = file;
+ m_filepath = filename;
+ }
+
+ /**
+ @return the space id of the tablespace */
+ virtual ulint get_space_id() const UNIV_NOTHROW = 0;
+
+ /** The compressed page size
+ @return the compressed page size */
+ ulint get_zip_size() const
+ {
+ return(m_zip_size);
+ }
+
+ /**
+ Set the tablespace compressed table size.
+ @return DB_SUCCESS if it is valie or DB_CORRUPTION if not */
+ dberr_t set_zip_size(const buf_frame_t* page) UNIV_NOTHROW;
+
+ /** The compressed page size
+ @return the compressed page size */
+ ulint get_page_size() const
+ {
+ return(m_page_size);
+ }
+
+ /** Compressed table page size */
+ ulint m_zip_size;
+
+ /** The tablespace page size. */
+ ulint m_page_size;
+
+ /** File handle to the tablespace */
+ os_file_t m_file;
+
+ /** Physical file path. */
+ const char* m_filepath;
+
+protected:
+ // Disable copying
+ PageCallback(const PageCallback&);
+ PageCallback& operator=(const PageCallback&);
+};
+
+/********************************************************************//**
+Iterate over all the pages in the tablespace.
+@param table - the table definiton in the server
+@param n_io_buffers - number of blocks to read and write together
+@param callback - functor that will do the page updates
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+fil_tablespace_iterate(
+/*===================*/
+ dict_table_t* table,
+ ulint n_io_buffers,
+ PageCallback& callback)
+ __attribute__((nonnull, warn_unused_result));
+
+/*******************************************************************//**
+Checks if a single-table tablespace for a given table name exists in the
+tablespace memory cache.
+@return space id, ULINT_UNDEFINED if not found */
+UNIV_INTERN
+ulint
+fil_get_space_id_for_table(
+/*=======================*/
+ const char* name); /*!< in: table name in the standard
+ 'databasename/tablename' format */
+
+/**
+Iterate over all the spaces in the space list and fetch the
+tablespace names. It will return a copy of the name that must be
+freed by the caller using: delete[].
+@return DB_SUCCESS if all OK. */
+UNIV_INTERN
+dberr_t
+fil_get_space_names(
+/*================*/
+ space_name_list_t& space_name_list)
+ /*!< in/out: Vector for collecting the names. */
+ __attribute__((warn_unused_result));
+
/****************************************************************//**
Generate redo logs for swapping two .ibd files */
UNIV_INTERN
@@ -754,8 +980,5 @@ fil_mtr_rename_log(
const char* tmp_name); /*!< in: temp table name used while
swapping */
-typedef struct fil_space_struct fil_space_t;
-
#endif /* !UNIV_INNOCHECKSUM */
-
-#endif
+#endif /* fil0fil_h */
diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h
index 994783c2db9..a587ccc9f20 100644
--- a/storage/innobase/include/fsp0fsp.h
+++ b/storage/innobase/include/fsp0fsp.h
@@ -50,11 +50,15 @@ to the two Barracuda row formats COMPRESSED and DYNAMIC. */
#define FSP_FLAGS_WIDTH_ATOMIC_BLOBS 1
/** Number of flag bits used to indicate the tablespace page size */
#define FSP_FLAGS_WIDTH_PAGE_SSIZE 4
+/** Width of the DATA_DIR flag. This flag indicates that the tablespace
+is found in a remote location, not the default data directory. */
+#define FSP_FLAGS_WIDTH_DATA_DIR 1
/** Width of all the currently known tablespace flags */
#define FSP_FLAGS_WIDTH (FSP_FLAGS_WIDTH_POST_ANTELOPE \
+ FSP_FLAGS_WIDTH_ZIP_SSIZE \
+ FSP_FLAGS_WIDTH_ATOMIC_BLOBS \
- + FSP_FLAGS_WIDTH_PAGE_SSIZE)
+ + FSP_FLAGS_WIDTH_PAGE_SSIZE \
+ + FSP_FLAGS_WIDTH_DATA_DIR)
/** A mask of all the known/used bits in tablespace flags */
#define FSP_FLAGS_MASK (~(~0 << FSP_FLAGS_WIDTH))
@@ -71,8 +75,11 @@ to the two Barracuda row formats COMPRESSED and DYNAMIC. */
#define FSP_FLAGS_POS_PAGE_SSIZE (FSP_FLAGS_POS_ATOMIC_BLOBS \
+ FSP_FLAGS_WIDTH_ATOMIC_BLOBS)
/** Zero relative shift position of the start of the UNUSED bits */
-#define FSP_FLAGS_POS_UNUSED (FSP_FLAGS_POS_PAGE_SSIZE \
+#define FSP_FLAGS_POS_DATA_DIR (FSP_FLAGS_POS_PAGE_SSIZE \
+ FSP_FLAGS_WIDTH_PAGE_SSIZE)
+/** Zero relative shift position of the start of the UNUSED bits */
+#define FSP_FLAGS_POS_UNUSED (FSP_FLAGS_POS_DATA_DIR \
+ + FSP_FLAGS_WIDTH_DATA_DIR)
/** Bit mask of the POST_ANTELOPE field */
#define FSP_FLAGS_MASK_POST_ANTELOPE \
@@ -90,6 +97,10 @@ to the two Barracuda row formats COMPRESSED and DYNAMIC. */
#define FSP_FLAGS_MASK_PAGE_SSIZE \
((~(~0 << FSP_FLAGS_WIDTH_PAGE_SSIZE)) \
<< FSP_FLAGS_POS_PAGE_SSIZE)
+/** Bit mask of the DATA_DIR field */
+#define FSP_FLAGS_MASK_DATA_DIR \
+ ((~(~0 << FSP_FLAGS_WIDTH_DATA_DIR)) \
+ << FSP_FLAGS_POS_DATA_DIR)
/** Return the value of the POST_ANTELOPE field */
#define FSP_FLAGS_GET_POST_ANTELOPE(flags) \
@@ -107,6 +118,10 @@ to the two Barracuda row formats COMPRESSED and DYNAMIC. */
#define FSP_FLAGS_GET_PAGE_SSIZE(flags) \
((flags & FSP_FLAGS_MASK_PAGE_SSIZE) \
>> FSP_FLAGS_POS_PAGE_SSIZE)
+/** Return the value of the DATA_DIR field */
+#define FSP_FLAGS_HAS_DATA_DIR(flags) \
+ ((flags & FSP_FLAGS_MASK_DATA_DIR) \
+ >> FSP_FLAGS_POS_DATA_DIR)
/** Return the contents of the UNUSED bits */
#define FSP_FLAGS_GET_UNUSED(flags) \
(flags >> FSP_FLAGS_POS_UNUSED)
@@ -555,6 +570,17 @@ fseg_free_page(
ulint page, /*!< in: page offset */
mtr_t* mtr); /*!< in/out: mini-transaction */
/**********************************************************************//**
+Checks if a single page of a segment is free.
+@return true if free */
+UNIV_INTERN
+bool
+fseg_page_is_free(
+/*==============*/
+ fseg_header_t* seg_header, /*!< in: segment header */
+ ulint space, /*!< in: space id */
+ ulint page) /*!< in: page offset */
+ __attribute__((nonnull, warn_unused_result));
+/**********************************************************************//**
Frees part of a segment. This function can be used to free a segment
by repeatedly calling this function in different mini-transactions.
Doing the freeing in a single mini-transaction might result in
@@ -643,12 +669,13 @@ tablespace header at offset FSP_SPACE_FLAGS. They should be 0 for
ROW_FORMAT=COMPACT and ROW_FORMAT=REDUNDANT. The newer row formats,
COMPRESSED and DYNAMIC, use a file format > Antelope so they should
have a file format number plus the DICT_TF_COMPACT bit set.
-@return ulint containing the validated tablespace flags. */
+@return true if check ok */
UNIV_INLINE
-ulint
-fsp_flags_validate(
+bool
+fsp_flags_is_valid(
/*===============*/
- ulint flags); /*!< in: tablespace flags */
+ ulint flags) /*!< in: tablespace flags */
+ __attribute__((warn_unused_result, const));
/********************************************************************//**
Determine if the tablespace is compressed from dict_table_t::flags.
@return TRUE if compressed, FALSE if not compressed */
@@ -658,6 +685,40 @@ fsp_flags_is_compressed(
/*====================*/
ulint flags); /*!< in: tablespace flags */
+/********************************************************************//**
+Calculates the descriptor index within a descriptor page.
+@return descriptor index */
+UNIV_INLINE
+ulint
+xdes_calc_descriptor_index(
+/*=======================*/
+ ulint zip_size, /*!< in: compressed page size in bytes;
+ 0 for uncompressed pages */
+ ulint offset); /*!< in: page offset */
+
+/**********************************************************************//**
+Gets a descriptor bit of a page.
+@return TRUE if free */
+UNIV_INLINE
+ibool
+xdes_get_bit(
+/*=========*/
+ const xdes_t* descr, /*!< in: descriptor */
+ ulint bit, /*!< in: XDES_FREE_BIT or XDES_CLEAN_BIT */
+ ulint offset);/*!< in: page offset within extent:
+ 0 ... FSP_EXTENT_SIZE - 1 */
+
+/********************************************************************//**
+Calculates the page where the descriptor of a page resides.
+@return descriptor page offset */
+UNIV_INLINE
+ulint
+xdes_calc_descriptor_page(
+/*======================*/
+ ulint zip_size, /*!< in: compressed page size in bytes;
+ 0 for uncompressed pages */
+ ulint offset); /*!< in: page offset */
+
#endif /* !UNIV_INNOCHECKSUM */
/********************************************************************//**
@@ -669,7 +730,7 @@ UNIV_INLINE
ulint
fsp_flags_get_zip_size(
/*====================*/
- ulint flags); /*!< in: tablespace flags */
+ ulint flags); /*!< in: tablespace flags */
/********************************************************************//**
Extract the page size from tablespace flags.
@return page size of the tablespace in bytes */
@@ -677,16 +738,7 @@ UNIV_INLINE
ulint
fsp_flags_get_page_size(
/*====================*/
- ulint flags); /*!< in: tablespace flags */
-
-/********************************************************************//**
-Set page size */
-UNIV_INLINE
-ulint
-fsp_flags_set_page_size(
-/*====================*/
- ulint flags, /*!< in: tablespace flags */
- ulint page_size); /*!< in: page size in bytes */
+ ulint flags); /*!< in: tablespace flags */
#ifndef UNIV_NONINL
#include "fsp0fsp.ic"
diff --git a/storage/innobase/include/fsp0fsp.ic b/storage/innobase/include/fsp0fsp.ic
index 498f9000888..0d81e817cc9 100644
--- a/storage/innobase/include/fsp0fsp.ic
+++ b/storage/innobase/include/fsp0fsp.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -51,11 +51,10 @@ tablespace header at offset FSP_SPACE_FLAGS. They should be 0 for
ROW_FORMAT=COMPACT and ROW_FORMAT=REDUNDANT. The newer row formats,
COMPRESSED and DYNAMIC, use a file format > Antelope so they should
have a file format number plus the DICT_TF_COMPACT bit set.
-@return Same as input after validating it as FSP_SPACE_FLAGS.
-If there is an error, trigger assertion failure. */
+@return true if check ok */
UNIV_INLINE
-ulint
-fsp_flags_validate(
+bool
+fsp_flags_is_valid(
/*===============*/
ulint flags) /*!< in: tablespace flags */
{
@@ -65,16 +64,20 @@ fsp_flags_validate(
ulint page_ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags);
ulint unused = FSP_FLAGS_GET_UNUSED(flags);
- /* Make sure there are no bits that we do not know about. */
- ut_a(unused == 0);
+ DBUG_EXECUTE_IF("fsp_flags_is_valid_failure", return(false););
/* fsp_flags is zero unless atomic_blobs is set. */
- ut_a(flags != 1);
- if (post_antelope) {
+ /* Make sure there are no bits that we do not know about. */
+ if (unused != 0 || flags == 1) {
+ return(false);
+ } else if (post_antelope) {
/* The Antelope row formats REDUNDANT and COMPACT did
not use tablespace flags, so this flag and the entire
4-byte field is zero for Antelope row formats. */
- ut_a(atomic_blobs);
+
+ if (!atomic_blobs) {
+ return(false);
+ }
}
if (!atomic_blobs) {
@@ -82,27 +85,33 @@ fsp_flags_validate(
the page structure introduced for the COMPACT row format
by allowing long fields to be broken into prefix and
externally stored parts. */
- ut_a(!post_antelope);
- ut_a(zip_ssize == 0);
- } else {
- ut_a(post_antelope);
- /* Validate the zip shift size is within allowed range. */
- ut_a(zip_ssize <= PAGE_ZIP_SSIZE_MAX);
- }
+ if (post_antelope || zip_ssize != 0) {
+ return(false);
+ }
+
+ } else if (!post_antelope || zip_ssize > PAGE_ZIP_SSIZE_MAX) {
+ return(false);
+ } else if (page_ssize > UNIV_PAGE_SSIZE_MAX) {
+
+ /* The page size field can be used for any row type, or it may
+ be zero for an original 16k page size.
+ Validate the page shift size is within allowed range. */
+
+ return(false);
- /* The page size field can be used for any row type, or it may
- be zero for an original 16k page size.
- Validate the page shift size is within allowed range. */
- ut_a(page_ssize <= UNIV_PAGE_SSIZE_MAX);
- ut_a((UNIV_PAGE_SIZE == UNIV_PAGE_SIZE_ORIG) || (page_ssize));
+ } else if (UNIV_PAGE_SIZE != UNIV_PAGE_SIZE_ORIG && !page_ssize) {
+ return(false);
+ }
#if UNIV_FORMAT_MAX != UNIV_FORMAT_B
# error "UNIV_FORMAT_MAX != UNIV_FORMAT_B, Add more validations."
#endif
- /* Return the flags sent in if we did not fail an assert. */
- return(flags);
+ /* The DATA_DIR field can be used for any row type so there is
+ nothing here to validate. */
+
+ return(true);
}
/********************************************************************//**
@@ -208,9 +217,98 @@ fsp_flags_set_page_size(
flags = FSP_FLAGS_SET_PAGE_SSIZE(flags, ssize);
- ut_ad(flags == fsp_flags_validate(flags));
+ ut_ad(fsp_flags_is_valid(flags));
return(flags);
}
+/********************************************************************//**
+Calculates the descriptor index within a descriptor page.
+@return descriptor index */
+UNIV_INLINE
+ulint
+xdes_calc_descriptor_index(
+/*=======================*/
+ ulint zip_size, /*!< in: compressed page size in bytes;
+ 0 for uncompressed pages */
+ ulint offset) /*!< in: page offset */
+{
+ ut_ad(ut_is_2pow(zip_size));
+
+ if (zip_size == 0) {
+ return(ut_2pow_remainder(offset, UNIV_PAGE_SIZE)
+ / FSP_EXTENT_SIZE);
+ } else {
+ return(ut_2pow_remainder(offset, zip_size) / FSP_EXTENT_SIZE);
+ }
+}
+
+/**********************************************************************//**
+Gets a descriptor bit of a page.
+@return TRUE if free */
+UNIV_INLINE
+ibool
+xdes_get_bit(
+/*=========*/
+ const xdes_t* descr, /*!< in: descriptor */
+ ulint bit, /*!< in: XDES_FREE_BIT or XDES_CLEAN_BIT */
+ ulint offset) /*!< in: page offset within extent:
+ 0 ... FSP_EXTENT_SIZE - 1 */
+{
+ ut_ad(offset < FSP_EXTENT_SIZE);
+ ut_ad(bit == XDES_FREE_BIT || bit == XDES_CLEAN_BIT);
+
+ ulint index = bit + XDES_BITS_PER_PAGE * offset;
+
+ ulint bit_index = index % 8;
+ ulint byte_index = index / 8;
+
+ return(ut_bit_get_nth(
+ mach_read_ulint(descr + XDES_BITMAP + byte_index,
+ MLOG_1BYTE),
+ bit_index));
+}
+
+/********************************************************************//**
+Calculates the page where the descriptor of a page resides.
+@return descriptor page offset */
+UNIV_INLINE
+ulint
+xdes_calc_descriptor_page(
+/*======================*/
+ ulint zip_size, /*!< in: compressed page size in bytes;
+ 0 for uncompressed pages */
+ ulint offset) /*!< in: page offset */
+{
+#ifndef DOXYGEN /* Doxygen gets confused by these */
+# if UNIV_PAGE_SIZE_MAX <= XDES_ARR_OFFSET \
+ + (UNIV_PAGE_SIZE_MAX / FSP_EXTENT_SIZE_MAX) \
+ * XDES_SIZE_MAX
+# error
+# endif
+# if UNIV_ZIP_SIZE_MIN <= XDES_ARR_OFFSET \
+ + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE_MIN) \
+ * XDES_SIZE_MIN
+# error
+# endif
+#endif /* !DOXYGEN */
+
+ ut_ad(UNIV_PAGE_SIZE > XDES_ARR_OFFSET
+ + (UNIV_PAGE_SIZE / FSP_EXTENT_SIZE)
+ * XDES_SIZE);
+ ut_ad(UNIV_ZIP_SIZE_MIN > XDES_ARR_OFFSET
+ + (UNIV_ZIP_SIZE_MIN / FSP_EXTENT_SIZE)
+ * XDES_SIZE);
+
+ ut_ad(ut_is_2pow(zip_size));
+
+ if (zip_size == 0) {
+ return(ut_2pow_round(offset, UNIV_PAGE_SIZE));
+ } else {
+ ut_ad(zip_size > XDES_ARR_OFFSET
+ + (zip_size / FSP_EXTENT_SIZE) * XDES_SIZE);
+ return(ut_2pow_round(offset, zip_size));
+ }
+}
+
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/fts0ast.h b/storage/innobase/include/fts0ast.h
index da40e2bbc96..7f2525dc450 100644
--- a/storage/innobase/include/fts0ast.h
+++ b/storage/innobase/include/fts0ast.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,7 +29,7 @@ Created 2007/03/16/03 Sunny Bains
#include "mem0mem.h"
/* The type of AST Node */
-enum fts_ast_type_enum {
+enum fts_ast_type_t {
FTS_AST_OPER, /*!< Operator */
FTS_AST_NUMB, /*!< Number */
FTS_AST_TERM, /*!< Term (or word) */
@@ -39,7 +39,7 @@ enum fts_ast_type_enum {
};
/* The FTS query operators that we support */
-enum fts_ast_oper_enum {
+enum fts_ast_oper_t {
FTS_NONE, /*!< No operator */
FTS_IGNORE, /*!< Ignore rows that contain
@@ -58,20 +58,18 @@ enum fts_ast_oper_enum {
FTS_DECR_RATING, /*!< Decrease the rank for this
word*/
- FTS_DISTANCE /*!< Proximity distance */
+ FTS_DISTANCE, /*!< Proximity distance */
+ FTS_IGNORE_SKIP /*!< Transient node operator
+ signifies that this is a
+ FTS_IGNORE node, and ignored in
+ the first pass of
+ fts_ast_visit() */
};
-/* Enum types used by the FTS parser */
-typedef enum fts_ast_type_enum fts_ast_type_t;
-typedef enum fts_ast_oper_enum fts_ast_oper_t;
-
/* Data types used by the FTS parser */
-typedef struct fts_lexer_struct fts_lexer_t;
-typedef struct fts_ast_text_struct fts_ast_text_t;
-typedef struct fts_ast_term_struct fts_ast_term_t;
-typedef struct fts_ast_node_struct fts_ast_node_t;
-typedef struct fts_ast_list_struct fts_ast_list_t;
-typedef struct fts_ast_state_struct fts_ast_state_t;
+struct fts_lexer_t;
+struct fts_ast_node_t;
+struct fts_ast_state_t;
typedef ulint (*fts_ast_callback)(fts_ast_oper_t, fts_ast_node_t*, void*);
@@ -180,60 +178,76 @@ fts_ast_state_free(
/*===============*/
fts_ast_state_t*state); /*!< in: state instance
to free */
-/********************************************************************
-Traverse the AST.*/
-ulint
+/******************************************************************//**
+Traverse the AST - in-order traversal.
+@return DB_SUCCESS if all went well */
+UNIV_INTERN
+dberr_t
fts_ast_visit(
/*==========*/
fts_ast_oper_t oper, /*!< in: FTS operator */
fts_ast_node_t* node, /*!< in: instance to traverse*/
fts_ast_callback visitor, /*!< in: callback */
- void* arg); /*!< in: callback arg */
-/********************************************************************
-Traverse the sub expression list.*/
-ulint
+ void* arg, /*!< in: callback arg */
+ bool* has_ignore) /*!< out: whether we encounter
+ and ignored processing an
+ operator, currently we only
+ ignore FTS_IGNORE operator */
+ __attribute__((nonnull, warn_unused_result));
+/*****************************************************************//**
+Process (nested) sub-expression, create a new result set to store the
+sub-expression result by processing nodes under current sub-expression
+list. Merge the sub-expression result with that of parent expression list.
+@return DB_SUCCESS if all went well */
+UNIV_INTERN
+dberr_t
fts_ast_visit_sub_exp(
-/*==========*/
+/*==================*/
fts_ast_node_t* node, /*!< in: instance to traverse*/
fts_ast_callback visitor, /*!< in: callback */
- void* arg); /*!< in: callback arg */
+ void* arg) /*!< in: callback arg */
+ __attribute__((nonnull, warn_unused_result));
/********************************************************************
Create a lex instance.*/
+UNIV_INTERN
fts_lexer_t*
fts_lexer_create(
/*=============*/
ibool boolean_mode, /*!< in: query type */
const byte* query, /*!< in: query string */
- ulint query_len); /*!< in: query string len */
+ ulint query_len) /*!< in: query string len */
+ __attribute__((nonnull, malloc, warn_unused_result));
/********************************************************************
Free an fts_lexer_t instance.*/
+UNIV_INTERN
void
fts_lexer_free(
/*===========*/
- fts_lexer_t* fts_lexer); /*!< in: lexer instance to
+ fts_lexer_t* fts_lexer) /*!< in: lexer instance to
free */
+ __attribute__((nonnull));
/* Query term type */
-struct fts_ast_term_struct {
+struct fts_ast_term_t {
byte* ptr; /*!< Pointer to term string.*/
ibool wildcard; /*!< TRUE if wild card set.*/
};
/* Query text type */
-struct fts_ast_text_struct {
+struct fts_ast_text_t {
byte* ptr; /*!< Pointer to term string.*/
ulint distance; /*!< > 0 if proximity distance
set */
};
/* The list of nodes in an expr list */
-struct fts_ast_list_struct {
+struct fts_ast_list_t {
fts_ast_node_t* head; /*!< Children list head */
fts_ast_node_t* tail; /*!< Children list tail */
};
/* FTS AST node to store the term, text, operator and sub-expressions.*/
-struct fts_ast_node_struct {
+struct fts_ast_node_t {
fts_ast_type_t type; /*!< The type of node */
fts_ast_text_t text; /*!< Text node */
fts_ast_term_t term; /*!< Term node */
@@ -241,10 +255,12 @@ struct fts_ast_node_struct {
fts_ast_list_t list; /*!< Expression list */
fts_ast_node_t* next; /*!< Link for expr list */
fts_ast_node_t* next_alloc; /*!< For tracking allocations */
+ bool visited; /*!< whether this node is
+ already processed */
};
/* To track state during parsing */
-struct fts_ast_state_struct {
+struct fts_ast_state_t {
mem_heap_t* heap; /*!< Heap to use for alloc */
fts_ast_node_t* root; /*!< If all goes OK, then this
will point to the root.*/
diff --git a/storage/innobase/include/fts0fts.h b/storage/innobase/include/fts0fts.h
index e515772bdbd..f2f8617012a 100644
--- a/storage/innobase/include/fts0fts.h
+++ b/storage/innobase/include/fts0fts.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -114,16 +114,16 @@ to mark invalid states.
NOTE: Do not change the order or value of these, fts_trx_row_get_new_state
depends on them being exactly as they are. */
-typedef enum {
+enum fts_row_state {
FTS_INSERT = 0,
FTS_MODIFY,
FTS_DELETE,
FTS_NOTHING,
FTS_INVALID
-} fts_row_state;
+};
/** The FTS table types. */
-enum fts_table_type_enum {
+enum fts_table_type_t {
FTS_INDEX_TABLE, /*!< FTS auxiliary table that is
specific to a particular FTS index
on a table */
@@ -132,21 +132,11 @@ enum fts_table_type_enum {
for all FTS index on a table */
};
-typedef struct fts_struct fts_t;
-typedef struct fts_doc_struct fts_doc_t;
-typedef struct fts_trx_struct fts_trx_t;
-typedef struct fts_table_struct fts_table_t;
-typedef struct fts_cache_struct fts_cache_t;
-typedef struct fts_token_struct fts_token_t;
-typedef struct fts_string_struct fts_string_t;
-typedef struct fts_result_struct fts_result_t;
-typedef struct fts_ranking_struct fts_ranking_t;
-typedef struct fts_trx_row_struct fts_trx_row_t;
-typedef struct fts_doc_ids_struct fts_doc_ids_t;
-typedef enum fts_table_type_enum fts_table_type_t;
-typedef struct fts_trx_table_struct fts_trx_table_t;
-typedef struct fts_savepoint_struct fts_savepoint_t;
-typedef struct fts_index_cache_struct fts_index_cache_t;
+struct fts_doc_t;
+struct fts_cache_t;
+struct fts_token_t;
+struct fts_doc_ids_t;
+struct fts_index_cache_t;
/** Initialize the "fts_table" for internal query into FTS auxiliary
@@ -172,7 +162,7 @@ do { \
/** Information about changes in a single transaction affecting
the FTS system. */
-struct fts_trx_struct {
+struct fts_trx_t {
trx_t* trx; /*!< InnoDB transaction */
ib_vector_t* savepoints; /*!< Active savepoints, must have at
@@ -184,7 +174,7 @@ struct fts_trx_struct {
};
/** Information required for transaction savepoint handling. */
-struct fts_savepoint_struct {
+struct fts_savepoint_t {
char* name; /*!< First entry is always NULL, the
default instance. Otherwise the name
of the savepoint */
@@ -193,7 +183,7 @@ struct fts_savepoint_struct {
};
/** Information about changed rows in a transaction for a single table. */
-struct fts_trx_table_struct {
+struct fts_trx_table_t {
dict_table_t* table; /*!< table */
fts_trx_t* fts_trx; /*!< link to parent */
@@ -209,7 +199,7 @@ struct fts_trx_table_struct {
};
/** Information about one changed row in a transaction. */
-struct fts_trx_row_struct {
+struct fts_trx_row_t {
doc_id_t doc_id; /*!< Id of the ins/upd/del document */
fts_row_state state; /*!< state of the row */
@@ -220,7 +210,7 @@ struct fts_trx_row_struct {
/** List of document ids that were added during a transaction. This
list is passed on to a background 'Add' thread and OPTIMIZE, so it
needs its own memory heap. */
-struct fts_doc_ids_struct {
+struct fts_doc_ids_t {
ib_vector_t* doc_ids; /*!< document ids (each element is
of type doc_id_t). */
@@ -237,7 +227,7 @@ as our in-memory format. This typedef is a single such character. */
typedef unsigned short ib_uc_t;
/** An UTF-16 ro UTF-8 string. */
-struct fts_string_struct {
+struct fts_string_t {
byte* f_str; /*!< string, not necessary terminated in
any way */
ulint f_len; /*!< Length of the string in bytes */
@@ -245,7 +235,7 @@ struct fts_string_struct {
};
/** Query ranked doc ids. */
-struct fts_ranking_struct {
+struct fts_ranking_t {
doc_id_t doc_id; /*!< Document id */
fts_rank_t rank; /*!< Rank is between 0 .. 1 */
@@ -256,7 +246,7 @@ struct fts_ranking_struct {
};
/** Query result. */
-struct fts_result_struct {
+struct fts_result_t {
ib_rbt_node_t* current; /*!< Current element */
ib_rbt_t* rankings_by_id; /*!< RB tree of type fts_ranking_t
@@ -268,7 +258,7 @@ struct fts_result_struct {
/** This is used to generate the FTS auxiliary table name, we need the
table id and the index id to generate the column specific FTS auxiliary
table name. */
-struct fts_table_struct {
+struct fts_table_t {
const char* parent; /*!< Parent table name, this is
required only for the database
name */
@@ -311,10 +301,10 @@ enum fts_status {
typedef enum fts_status fts_status_t;
/** The state of the FTS sub system. */
-struct fts_struct {
+struct fts_t {
/*!< mutex protecting bg_threads* and
fts_add_wq. */
- mutex_t bg_threads_mutex;
+ ib_mutex_t bg_threads_mutex;
ulint bg_threads; /*!< number of background threads
accessing this table */
@@ -339,10 +329,10 @@ struct fts_struct {
ib_vector_t* indexes; /*!< Vector of FTS indexes, this is
mainly for caching purposes. */
- mem_heap_t* fts_heap; /*!< heap for fts_struct allocation */
+ mem_heap_t* fts_heap; /*!< heap for fts_t allocation */
};
-typedef struct fts_stopword_struct fts_stopword_t;
+struct fts_stopword_t;
/** status bits for fts_stopword_t status field. */
#define STOPWORD_NOT_INIT 0x1
@@ -395,15 +385,15 @@ fts_cache_index_cache_create(
/******************************************************************//**
Get the next available document id. This function creates a new
-transaction to generate the document id. */
+transaction to generate the document id.
+@return DB_SUCCESS if OK */
UNIV_INTERN
-ulint
+dberr_t
fts_get_next_doc_id(
/*================*/
- /*!< out: DB_SUCCESS if OK */
- const dict_table_t* table, /*!< in: table */
- doc_id_t* doc_id); /*!< out: new document id */
-
+ const dict_table_t* table, /*!< in: table */
+ doc_id_t* doc_id) /*!< out: new document id */
+ __attribute__((nonnull));
/*********************************************************************//**
Update the next and last Doc ID in the CONFIG table to be the input
"doc_id" value (+ 1). We would do so after each FTS index build or
@@ -412,28 +402,17 @@ UNIV_INTERN
void
fts_update_next_doc_id(
/*===================*/
+ trx_t* trx, /*!< in/out: transaction */
const dict_table_t* table, /*!< in: table */
- const char* table_name, /*!< in: table name */
- doc_id_t doc_id); /*!< in: DOC ID to set */
-
-/******************************************************************//**
-Update the last document id. This function could create a new
-transaction to update the last document id. */
-UNIV_INTERN
-ulint
-fts_update_sync_doc_id(
-/*===================*/
- /*!< out: DB_SUCCESS if OK */
- const dict_table_t* table, /*!< in: table */
- const char* table_name, /*!< in: table name */
- doc_id_t doc_id, /*!< in: last document id */
- trx_t* trx); /*!< in: update trx */
+ const char* table_name, /*!< in: table name, or NULL */
+ doc_id_t doc_id) /*!< in: DOC ID to set */
+ __attribute__((nonnull(2)));
/******************************************************************//**
Create a new document id .
@return DB_SUCCESS if all went well else error */
UNIV_INTERN
-ulint
+dberr_t
fts_create_doc_id(
/*==============*/
dict_table_t* table, /*!< in: row is of this
@@ -442,8 +421,8 @@ fts_create_doc_id(
value to this row. This is the
current row that is being
inserted. */
- mem_heap_t* heap); /*!< in: heap */
-
+ mem_heap_t* heap) /*!< in: heap */
+ __attribute__((nonnull));
/******************************************************************//**
Create a new fts_doc_ids_t.
@return new fts_doc_ids_t. */
@@ -488,7 +467,7 @@ on the given table. row_mysql_lock_data_dictionary must have been
called before this.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_create_common_tables(
/*=====================*/
trx_t* trx, /*!< in: transaction handle */
@@ -496,27 +475,27 @@ fts_create_common_tables(
table, /*!< in: table with one FTS
index */
const char* name, /*!< in: table name */
- ibool skip_doc_id_index);
- /*!< in: Skip index on doc id */
+ bool skip_doc_id_index) /*!< in: Skip index on doc id */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Wrapper function of fts_create_index_tables_low(), create auxiliary
tables for an FTS index
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_create_index_tables(
/*====================*/
trx_t* trx, /*!< in: transaction handle */
- const dict_index_t* index); /*!< in: the FTS index
+ const dict_index_t* index) /*!< in: the FTS index
instance */
-
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Creates the column specific ancillary tables needed for supporting an
FTS index on the given table. row_mysql_lock_data_dictionary must have
been called before this.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_create_index_tables_low(
/*========================*/
trx_t* trx, /*!< in: transaction handle */
@@ -524,16 +503,17 @@ fts_create_index_tables_low(
index, /*!< in: the FTS index
instance */
const char* table_name, /*!< in: the table name */
- table_id_t table_id); /*!< in: the table id */
-
+ table_id_t table_id) /*!< in: the table id */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Add the FTS document id hidden column. */
UNIV_INTERN
void
fts_add_doc_id_column(
/*==================*/
- dict_table_t* table); /*!< in/out: Table with
- FTS index */
+ dict_table_t* table, /*!< in/out: Table with FTS index */
+ mem_heap_t* heap) /*!< in: temporary memory heap, or NULL */
+ __attribute__((nonnull(1)));
/*********************************************************************//**
Drops the ancillary tables needed for supporting an FTS index on the
@@ -541,28 +521,29 @@ given table. row_mysql_lock_data_dictionary must have been called before
this.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_tables(
/*============*/
trx_t* trx, /*!< in: transaction */
- dict_table_t* table); /*!< in: table has the FTS
+ dict_table_t* table) /*!< in: table has the FTS
index */
-
+ __attribute__((nonnull));
/******************************************************************//**
The given transaction is about to be committed; do whatever is necessary
from the FTS system's POV.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_commit(
/*=======*/
- trx_t* trx); /*!< in: transaction */
+ trx_t* trx) /*!< in: transaction */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
FTS Query entry point.
@return DB_SUCCESS if successful otherwise error code */
UNIV_INTERN
-ulint
+dberr_t
fts_query(
/*======*/
trx_t* trx, /*!< in: transaction */
@@ -571,8 +552,9 @@ fts_query(
const byte* query, /*!< in: FTS query */
ulint query_len, /*!< in: FTS query string len
in bytes */
- fts_result_t** result); /*!< out: query result, to be
+ fts_result_t** result) /*!< out: query result, to be
freed by the caller.*/
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Retrieve the FTS Relevance Ranking result for doc with doc_id
@@ -686,10 +668,11 @@ fts_free(
Run OPTIMIZE on the given table.
@return DB_SUCCESS if all OK */
UNIV_INTERN
-ulint
+dberr_t
fts_optimize_table(
/*===============*/
- dict_table_t* table); /*!< in: table to optimiza */
+ dict_table_t* table) /*!< in: table to optimiza */
+ __attribute__((nonnull));
/**********************************************************************//**
Startup the optimize thread and create the work queue. */
@@ -710,11 +693,12 @@ fts_optimize_is_init(void);
Drops index ancillary tables for a FTS index
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_index_tables(
/*==================*/
trx_t* trx, /*!< in: transaction */
- dict_index_t* index); /*!< in: Index to drop */
+ dict_index_t* index) /*!< in: Index to drop */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Remove the table from the OPTIMIZER's list. We do wait for
@@ -740,24 +724,22 @@ fts_optimize_end(void);
/*===================*/
/**********************************************************************//**
-Take a FTS savepoint.
-@return DB_SUCCESS or error code */
+Take a FTS savepoint. */
UNIV_INTERN
void
fts_savepoint_take(
/*===============*/
trx_t* trx, /*!< in: transaction */
- const char* name); /*!< in: savepoint name */
-
+ const char* name) /*!< in: savepoint name */
+ __attribute__((nonnull));
/**********************************************************************//**
-Refresh last statement savepoint.
-@return DB_SUCCESS or error code */
+Refresh last statement savepoint. */
UNIV_INTERN
void
fts_savepoint_laststmt_refresh(
/*===========================*/
- trx_t* trx); /*!< in: transaction */
-
+ trx_t* trx) /*!< in: transaction */
+ __attribute__((nonnull));
/**********************************************************************//**
Release the savepoint data identified by name. */
UNIV_INTERN
@@ -821,26 +803,26 @@ fts_drop_orphaned_tables(void);
/*==========================*/
/******************************************************************//**
-Since we do a horizontal split on the index table, we need to drop the
-all the split tables. */
+Since we do a horizontal split on the index table, we need to drop
+all the split tables.
+@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_index_split_tables(
/*========================*/
- /*!< out: DB_SUCCESS
- or error code */
trx_t* trx, /*!< in: transaction */
- dict_index_t* index); /*!< in: fts instance */
+ dict_index_t* index) /*!< in: fts instance */
+ __attribute__((nonnull, warn_unused_result));
/****************************************************************//**
Run SYNC on the table, i.e., write out data from the cache to the
-FTS auxiliary INDEX table and clear the cache at the end.
-@return DB_SUCCESS if all OK */
+FTS auxiliary INDEX table and clear the cache at the end. */
UNIV_INTERN
-ulint
+void
fts_sync_table(
/*===========*/
- dict_table_t* table); /*!< in: table */
+ dict_table_t* table) /*!< in: table */
+ __attribute__((nonnull));
/****************************************************************//**
Free the query graph but check whether dict_sys->mutex is already
@@ -978,9 +960,9 @@ fts_get_docs_create(
/****************************************************************//**
Read the rows from the FTS index
-@return vector of rows fetched */
+@return DB_SUCCESS if OK */
UNIV_INTERN
-ulint
+dberr_t
fts_table_fetch_doc_ids(
/*====================*/
trx_t* trx, /*!< in: transaction */
@@ -1011,12 +993,13 @@ fts_add_index(
Drop auxiliary tables related to an FTS index
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
fts_drop_index(
/*===========*/
dict_table_t* table, /*!< in: Table where indexes are dropped */
dict_index_t* index, /*!< in: Index to be dropped */
- trx_t* trx); /*!< in: Transaction for the drop */
+ trx_t* trx) /*!< in: Transaction for the drop */
+ __attribute__((nonnull));
/*******************************************************************//**
Check indexes in the fts->indexes is also present in index cache and
diff --git a/storage/innobase/include/fts0priv.h b/storage/innobase/include/fts0priv.h
index 8524f988e47..c6aca27f6ec 100644
--- a/storage/innobase/include/fts0priv.h
+++ b/storage/innobase/include/fts0priv.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -113,23 +113,25 @@ component.
/******************************************************************//**
Parse an SQL string. %s is replaced with the table's id.
-@return DB_SUCCESS or error code */
+@return query graph */
UNIV_INTERN
que_t*
fts_parse_sql(
/*==========*/
fts_table_t* fts_table, /*!< in: FTS aux table */
pars_info_t* info, /*!< in: info struct, or NULL */
- const char* sql); /*!< in: SQL string to evaluate */
+ const char* sql) /*!< in: SQL string to evaluate */
+ __attribute__((nonnull(3), malloc, warn_unused_result));
/******************************************************************//**
Evaluate a parsed SQL statement
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_eval_sql(
/*=========*/
trx_t* trx, /*!< in: transaction */
- que_t* graph); /*!< in: Parsed statement */
+ que_t* graph) /*!< in: Parsed statement */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Construct the name of an ancillary FTS table for the given table.
@return own: table name, must be freed with mem_free() */
@@ -138,7 +140,8 @@ char*
fts_get_table_name(
/*===============*/
const fts_table_t*
- fts_table); /*!< in: FTS aux table info */
+ fts_table) /*!< in: FTS aux table info */
+ __attribute__((nonnull, malloc, warn_unused_result));
/******************************************************************//**
Construct the column specification part of the SQL string for selecting the
indexed FTS columns for the given table. Adds the necessary bound
@@ -160,7 +163,8 @@ fts_get_select_columns_str(
/*=======================*/
dict_index_t* index, /*!< in: FTS index */
pars_info_t* info, /*!< in/out: parser info */
- mem_heap_t* heap); /*!< in: memory heap */
+ mem_heap_t* heap) /*!< in: memory heap */
+ __attribute__((nonnull, warn_unused_result));
/** define for fts_doc_fetch_by_doc_id() "option" value, defines whether
we want to get Doc whose ID is equal to or greater or smaller than supplied
@@ -174,41 +178,45 @@ Fetch document (= a single row's indexed text) with the given
document id.
@return: DB_SUCCESS if fetch is successful, else error */
UNIV_INTERN
-ulint
+dberr_t
fts_doc_fetch_by_doc_id(
/*====================*/
fts_get_doc_t* get_doc, /*!< in: state */
doc_id_t doc_id, /*!< in: id of document to fetch */
- dict_index_t* index_to_use, /*!< in: caller supplied FTS index */
+ dict_index_t* index_to_use, /*!< in: caller supplied FTS index,
+ or NULL */
ulint option, /*!< in: search option, if it is
greater than doc_id or equal */
fts_sql_callback
callback, /*!< in: callback to read
records */
- void* arg); /*!< in: callback arg */
+ void* arg) /*!< in: callback arg */
+ __attribute__((nonnull(6)));
/*******************************************************************//**
Callback function for fetch that stores the text of an FTS document,
converting each column to UTF-16.
-@return: always returns NULL */
+@return always FALSE */
UNIV_INTERN
ibool
fts_query_expansion_fetch_doc(
/*==========================*/
void* row, /*!< in: sel_node_t* */
- void* user_arg); /*!< in: fts_doc_t* */
+ void* user_arg) /*!< in: fts_doc_t* */
+ __attribute__((nonnull));
/********************************************************************
Write out a single word's data as new entry/entries in the INDEX table.
@return DB_SUCCESS if all OK. */
UNIV_INTERN
-ulint
+dberr_t
fts_write_node(
/*===========*/
trx_t* trx, /*!< in: transaction */
que_t** graph, /*!< in: query graph */
fts_table_t* fts_table, /*!< in: the FTS aux index */
fts_string_t* word, /*!< in: word in UTF-8 */
- fts_node_t* node); /*!< in: node columns */
+ fts_node_t* node) /*!< in: node columns */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Tokenize a document. */
UNIV_INTERN
@@ -217,8 +225,10 @@ fts_tokenize_document(
/*==================*/
fts_doc_t* doc, /*!< in/out: document to
tokenize */
- fts_doc_t* result); /*!< out: if provided, save
+ fts_doc_t* result) /*!< out: if provided, save
result tokens here */
+ __attribute__((nonnull(1)));
+
/*******************************************************************//**
Continue to tokenize a document. */
UNIV_INTERN
@@ -229,16 +239,18 @@ fts_tokenize_document_next(
tokenize */
ulint add_pos, /*!< in: add this position to all
tokens from this tokenization */
- fts_doc_t* result); /*!< out: if provided, save
+ fts_doc_t* result) /*!< out: if provided, save
result tokens here */
+ __attribute__((nonnull(1)));
/******************************************************************//**
-Create a new empty document.
-@return own: new document */
+Initialize a document. */
UNIV_INTERN
-fts_doc_t*
+void
fts_doc_init(
/*=========*/
- fts_doc_t* doc); /*!< in: doc to initialize */
+ fts_doc_t* doc) /*!< in: doc to initialize */
+ __attribute__((nonnull));
+
/******************************************************************//**
Do a binary search for a doc id in the array
@return +ve index if found -ve index where it should be
@@ -250,26 +262,29 @@ fts_bsearch(
fts_update_t* array, /*!< in: array to sort */
int lower, /*!< in: lower bound of array*/
int upper, /*!< in: upper bound of array*/
- doc_id_t doc_id); /*!< in: doc id to lookup */
+ doc_id_t doc_id) /*!< in: doc id to lookup */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Free document. */
UNIV_INTERN
void
fts_doc_free(
/*=========*/
- fts_doc_t* doc); /*!< in: document */
+ fts_doc_t* doc) /*!< in: document */
+ __attribute__((nonnull));
/******************************************************************//**
Free fts_optimizer_word_t instanace.*/
-
+UNIV_INTERN
void
fts_word_free(
/*==========*/
- fts_word_t* word); /*!< in: instance to free.*/
+ fts_word_t* word) /*!< in: instance to free.*/
+ __attribute__((nonnull));
/******************************************************************//**
Read the rows from the FTS inde
-@return vector of rows fetched */
+@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_index_fetch_nodes(
/*==================*/
trx_t* trx, /*!< in: transaction */
@@ -277,7 +292,8 @@ fts_index_fetch_nodes(
fts_table_t* fts_table, /*!< in: FTS aux table */
const fts_string_t*
word, /*!< in: the word to fetch */
- fts_fetch_t* fetch); /*!< in: fetch callback.*/
+ fts_fetch_t* fetch) /*!< in: fetch callback.*/
+ __attribute__((nonnull));
/******************************************************************//**
Create a fts_optimizer_word_t instance.
@return new instance */
@@ -287,7 +303,8 @@ fts_word_init(
/*==========*/
fts_word_t* word, /*!< in: word to initialize */
byte* utf8, /*!< in: UTF-8 string */
- ulint len); /*!< in: length of string in bytes */
+ ulint len) /*!< in: length of string in bytes */
+ __attribute__((nonnull));
/******************************************************************//**
Compare two fts_trx_table_t instances, we actually compare the
table id's here.
@@ -297,7 +314,8 @@ int
fts_trx_table_cmp(
/*==============*/
const void* v1, /*!< in: id1 */
- const void* v2); /*!< in: id2 */
+ const void* v2) /*!< in: id2 */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Compare a table id with a trx_table_t table id.
@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */
@@ -306,23 +324,26 @@ int
fts_trx_table_id_cmp(
/*=================*/
const void* p1, /*!< in: id1 */
- const void* p2); /*!< in: id2 */
+ const void* p2) /*!< in: id2 */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Commit a transaction.
@return DB_SUCCESS if all OK */
UNIV_INTERN
-ulint
+dberr_t
fts_sql_commit(
/*===========*/
- trx_t* trx); /*!< in: transaction */
+ trx_t* trx) /*!< in: transaction */
+ __attribute__((nonnull));
/******************************************************************//**
Rollback a transaction.
@return DB_SUCCESS if all OK */
UNIV_INTERN
-ulint
+dberr_t
fts_sql_rollback(
/*=============*/
- trx_t* trx); /*!< in: transaction */
+ trx_t* trx) /*!< in: transaction */
+ __attribute__((nonnull));
/******************************************************************//**
Parse an SQL string. %s is replaced with the table's id. Don't acquire
the dict mutex
@@ -333,41 +354,44 @@ fts_parse_sql_no_dict_lock(
/*=======================*/
fts_table_t* fts_table, /*!< in: table with FTS index */
pars_info_t* info, /*!< in: parser info */
- const char* sql); /*!< in: SQL string to evaluate */
+ const char* sql) /*!< in: SQL string to evaluate */
+ __attribute__((nonnull(3), malloc, warn_unused_result));
/******************************************************************//**
Get value from config table. The caller must ensure that enough
space is allocated for value to hold the column contents
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_value(
/*=================*/
trx_t* trx, /* transaction */
fts_table_t* fts_table, /*!< in: the indexed FTS table */
const char* name, /*!< in: get config value for
this parameter name */
- fts_string_t* value); /*!< out: value read from
+ fts_string_t* value) /*!< out: value read from
config table */
+ __attribute__((nonnull));
/******************************************************************//**
Get value specific to an FTS index from the config table. The caller
must ensure that enough space is allocated for value to hold the
column contents.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_index_value(
/*=======================*/
trx_t* trx, /*!< transaction */
dict_index_t* index, /*!< in: index */
const char* param, /*!< in: get config value for
this parameter name */
- fts_string_t* value); /*!< out: value read from
+ fts_string_t* value) /*!< out: value read from
config table */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Set the value in the config table for name.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_value(
/*=================*/
trx_t* trx, /*!< transaction */
@@ -375,89 +399,96 @@ fts_config_set_value(
const char* name, /*!< in: get config value for
this parameter name */
const fts_string_t*
- value); /*!< in: value to update */
+ value) /*!< in: value to update */
+ __attribute__((nonnull));
/****************************************************************//**
Set an ulint value in the config table.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_ulint(
/*=================*/
trx_t* trx, /*!< in: transaction */
fts_table_t* fts_table, /*!< in: the indexed FTS table */
const char* name, /*!< in: param name */
- ulint int_value); /*!< in: value */
-
+ ulint int_value) /*!< in: value */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Set the value specific to an FTS index in the config table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_index_value(
/*=======================*/
trx_t* trx, /*!< transaction */
dict_index_t* index, /*!< in: index */
const char* param, /*!< in: get config value for
this parameter name */
- fts_string_t* value); /*!< out: value read from
+ fts_string_t* value) /*!< out: value read from
config table */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Increment the value in the config table for column name.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_increment_value(
/*=======================*/
trx_t* trx, /*!< transaction */
fts_table_t* fts_table, /*!< in: the indexed FTS table */
const char* name, /*!< in: increment config value
for this parameter name */
- ulint delta); /*!< in: increment by this much */
+ ulint delta) /*!< in: increment by this much */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Increment the per index value in the config table for column name.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_increment_index_value(
/*=============================*/
trx_t* trx, /*!< transaction */
dict_index_t* index, /*!< in: FTS index */
const char* name, /*!< in: increment config value
for this parameter name */
- ulint delta); /*!< in: increment by this much */
+ ulint delta) /*!< in: increment by this much */
+ __attribute__((nonnull));
/******************************************************************//**
Get an ulint value from the config table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_index_ulint(
/*=======================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
const char* name, /*!< in: param name */
- ulint* int_value); /*!< out: value */
+ ulint* int_value) /*!< out: value */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Set an ulint value int the config table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_set_index_ulint(
/*=======================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: FTS index */
const char* name, /*!< in: param name */
- ulint int_value); /*!< in: value */
+ ulint int_value) /*!< in: value */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Get an ulint value from the config table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_config_get_ulint(
/*=================*/
trx_t* trx, /*!< in: transaction */
fts_table_t* fts_table, /*!< in: the indexed FTS table */
const char* name, /*!< in: param name */
- ulint* int_value); /*!< out: value */
+ ulint* int_value) /*!< out: value */
+ __attribute__((nonnull));
/******************************************************************//**
Search cache for word.
@return the word node vector if found else NULL */
@@ -468,7 +499,8 @@ fts_cache_find_word(
const fts_index_cache_t*
index_cache, /*!< in: cache to search */
const fts_string_t*
- text); /*!< in: word to search for */
+ text) /*!< in: word to search for */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Check cache for deleted doc id.
@return TRUE if deleted */
@@ -478,7 +510,8 @@ fts_cache_is_deleted_doc_id(
/*========================*/
const fts_cache_t*
cache, /*!< in: cache ito search */
- doc_id_t doc_id); /*!< in: doc id to search for */
+ doc_id_t doc_id) /*!< in: doc id to search for */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Append deleted doc ids to vector and sort the vector. */
UNIV_INTERN
@@ -502,35 +535,31 @@ fts_wait_for_background_thread_to_start(
ulint max_wait); /*!< in: time in microseconds, if set
to 0 then it disables timeout
checking */
-/*********************************************************************//**
-Get the total number of documents in the FTS.
-@return estimated number of rows in the table */
-UNIV_INTERN
-ulint
-fts_get_total_document_count(
-/*=========================*/
- dict_table_t* table); /*!< in: table instance */
+#ifdef FTS_DOC_STATS_DEBUG
/******************************************************************//**
Get the total number of words in the FTS for a particular FTS index.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
fts_get_total_word_count(
/*=====================*/
trx_t* trx, /*!< in: transaction */
dict_index_t* index, /*!< in: for this index */
- ulint* total); /*!< out: total words */
+ ulint* total) /*!< out: total words */
+ __attribute__((nonnull, warn_unused_result));
+#endif
/******************************************************************//**
Search the index specific cache for a particular FTS index.
@return the index specific cache else NULL */
UNIV_INTERN
-const fts_index_cache_t*
+fts_index_cache_t*
fts_find_index_cache(
/*================*/
const fts_cache_t*
cache, /*!< in: cache to search */
const dict_index_t*
- index); /*!< in: index to search for */
+ index) /*!< in: index to search for */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Write the table id to the given buffer (including final NUL). Buffer must be
at least FTS_AUX_MIN_TABLE_ID_LENGTH bytes long.
@@ -539,8 +568,9 @@ UNIV_INLINE
int
fts_write_object_id(
/*================*/
- ib_id_t id, /*!< in: a table/index id */
- char* str); /*!< in: buffer to write the id to */
+ ib_id_t id, /*!< in: a table/index id */
+ char* str) /*!< in: buffer to write the id to */
+ __attribute__((nonnull));
/******************************************************************//**
Read the table id from the string generated by fts_write_object_id().
@return TRUE if parse successful */
@@ -549,7 +579,8 @@ ibool
fts_read_object_id(
/*===============*/
ib_id_t* id, /*!< out: a table id */
- const char* str); /*!< in: buffer to read from */
+ const char* str) /*!< in: buffer to read from */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Get the table id.
@return number of bytes written */
@@ -559,23 +590,26 @@ fts_get_table_id(
/*=============*/
const fts_table_t*
fts_table, /*!< in: FTS Auxiliary table */
- char* table_id); /*!< out: table id, must be at least
+ char* table_id) /*!< out: table id, must be at least
FTS_AUX_MIN_TABLE_ID_LENGTH bytes
long */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Add the table to add to the OPTIMIZER's list. */
UNIV_INTERN
void
fts_optimize_add_table(
/*===================*/
- dict_table_t* table); /*!< in: table to add */
+ dict_table_t* table) /*!< in: table to add */
+ __attribute__((nonnull));
/******************************************************************//**
Optimize a table. */
UNIV_INTERN
void
fts_optimize_do_table(
/*==================*/
- dict_table_t* table); /*!< in: table to optimize */
+ dict_table_t* table) /*!< in: table to optimize */
+ __attribute__((nonnull));
/******************************************************************//**
Construct the prefix name of an FTS table.
@return own: table name, must be freed with mem_free() */
@@ -584,7 +618,8 @@ char*
fts_get_table_name_prefix(
/*======================*/
const fts_table_t*
- fts_table); /*!< in: Auxiliary table type */
+ fts_table) /*!< in: Auxiliary table type */
+ __attribute__((nonnull, malloc, warn_unused_result));
/******************************************************************//**
Add node positions. */
UNIV_INTERN
@@ -594,7 +629,8 @@ fts_cache_node_add_positions(
fts_cache_t* cache, /*!< in: cache */
fts_node_t* node, /*!< in: word node */
doc_id_t doc_id, /*!< in: doc id */
- ib_vector_t* positions); /*!< in: fts_token_t::positions */
+ ib_vector_t* positions) /*!< in: fts_token_t::positions */
+ __attribute__((nonnull(2,4)));
/******************************************************************//**
Create the config table name for retrieving index specific value.
@@ -604,7 +640,8 @@ char*
fts_config_create_index_param_name(
/*===============================*/
const char* param, /*!< in: base name of param */
- const dict_index_t* index); /*!< in: index for config */
+ const dict_index_t* index) /*!< in: index for config */
+ __attribute__((nonnull, malloc, warn_unused_result));
#ifndef UNIV_NONINL
#include "fts0priv.ic"
diff --git a/storage/innobase/include/fts0priv.ic b/storage/innobase/include/fts0priv.ic
index 716ea4713b5..268bb7e2227 100644
--- a/storage/innobase/include/fts0priv.ic
+++ b/storage/innobase/include/fts0priv.ic
@@ -31,15 +31,9 @@ UNIV_INLINE
int
fts_write_object_id(
/*================*/
- ib_id_t id, /* in: a table/index id */
+ ib_id_t id, /* in: a table/index id */
char* str) /* in: buffer to write the id to */
{
-#ifdef __WIN__
-# define UINT64PFx "%016I64u"
-#else
-# define UINT64PFx "%016"PRIx64
-# endif /* __WIN__ */
-
// FIXME: Use ut_snprintf()
return(sprintf(str, UINT64PFx, id));
}
@@ -54,6 +48,45 @@ fts_read_object_id(
ib_id_t* id, /* out: an id */
const char* str) /* in: buffer to read from */
{
- return(sscanf(str, IB_ID_FMT, id) == 2);
+ return(sscanf(str, UINT64PFx, id) == 1);
+}
+
+/******************************************************************//**
+Compare two fts_trx_table_t instances.
+@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */
+UNIV_INLINE
+int
+fts_trx_table_cmp(
+/*==============*/
+ const void* p1, /*!< in: id1 */
+ const void* p2) /*!< in: id2 */
+{
+ const dict_table_t* table1 = (*(const fts_trx_table_t**) p1)->table;
+ const dict_table_t* table2 = (*(const fts_trx_table_t**) p2)->table;
+
+ return((table1->id > table2->id)
+ ? 1
+ : (table1->id == table2->id)
+ ? 0
+ : -1);
}
+/******************************************************************//**
+Compare a table id with a fts_trx_table_t table id.
+@return < 0 if n1 < n2, 0 if n1 == n2,> 0 if n1 > n2 */
+UNIV_INLINE
+int
+fts_trx_table_id_cmp(
+/*=================*/
+ const void* p1, /*!< in: id1 */
+ const void* p2) /*!< in: id2 */
+{
+ const ullint* table_id = (const ullint*) p1;
+ const dict_table_t* table2 = (*(const fts_trx_table_t**) p2)->table;
+
+ return((*table_id > table2->id)
+ ? 1
+ : (*table_id == table2->id)
+ ? 0
+ : -1);
+}
diff --git a/storage/innobase/include/fts0types.h b/storage/innobase/include/fts0types.h
index 5b28f2c9473..8fc52c9fc5e 100644
--- a/storage/innobase/include/fts0types.h
+++ b/storage/innobase/include/fts0types.h
@@ -32,41 +32,35 @@ Created 2007-03-27 Sunny Bains
#include "ut0rbt.h"
#include "fts0fts.h"
-/** Types (aliases) used within FTS. */
-typedef struct fts_que_struct fts_que_t;
-typedef struct fts_node_struct fts_node_t;
-typedef struct fts_word_struct fts_word_t;
-typedef struct fts_fetch_struct fts_fetch_t;
-typedef struct fts_update_struct fts_update_t;
-typedef struct fts_get_doc_struct fts_get_doc_t;
-typedef struct fts_utf8_str_struct fts_utf8_str_t;
-typedef struct fts_doc_stats_struct fts_doc_stats_t;
-typedef struct fts_tokenizer_word_struct fts_tokenizer_word_t;
-typedef struct fts_index_selector_struct fts_index_selector_t;
+/** Types used within FTS. */
+struct fts_que_t;
+struct fts_node_t;
+struct fts_utf8_str_t;
/** Callbacks used within FTS. */
typedef pars_user_func_cb_t fts_sql_callback;
typedef void (*fts_filter)(void*, fts_node_t*, void*, ulint len);
/** Statistics relevant to a particular document, used during retrieval. */
-struct fts_doc_stats_struct {
+struct fts_doc_stats_t {
doc_id_t doc_id; /*!< Document id */
ulint word_count; /*!< Total words in the document */
};
/** It's main purpose is to store the SQL prepared statements that
are required to retrieve a document from the database. */
-struct fts_get_doc_struct {
+struct fts_get_doc_t {
fts_index_cache_t*
index_cache; /*!< The index cache instance */
/*!< Parsed sql statement */
que_t* get_document_graph;
+ fts_cache_t* cache; /*!< The parent cache */
};
/** Since we can have multiple FTS indexes on a table, we keep a
per index cache of words etc. */
-struct fts_index_cache_struct {
+struct fts_index_cache_t {
dict_index_t* index; /*!< The FTS index instance */
ib_rbt_t* words; /*!< Nodes; indexed by fts_string_t*,
@@ -88,7 +82,7 @@ struct fts_index_cache_struct {
/** For supporting the tracking of updates on multiple FTS indexes we need
to track which FTS indexes need to be updated. For INSERT and DELETE we
update all fts indexes. */
-struct fts_update_struct {
+struct fts_update_t {
doc_id_t doc_id; /*!< The doc id affected */
ib_vector_t* fts_indexes; /*!< The FTS indexes that need to be
@@ -100,7 +94,7 @@ struct fts_update_struct {
};
/** Stop word control infotmation. */
-struct fts_stopword_struct {
+struct fts_stopword_t {
ulint status; /*!< Status of the stopword tree */
ib_alloc_t* heap; /*!< The memory allocator to use */
ib_rbt_t* cached_stopword;/*!< This stores all active stopwords */
@@ -109,7 +103,7 @@ struct fts_stopword_struct {
/** The SYNC state of the cache. There is one instance of this struct
associated with each ADD thread. */
-struct fts_sync_struct {
+struct fts_sync_t {
trx_t* trx; /*!< The transaction used for SYNCing
the cache to disk */
dict_table_t* table; /*!< Table with FTS index(es) */
@@ -131,12 +125,10 @@ struct fts_sync_struct {
ib_time_t start_time; /*!< SYNC start time */
};
-typedef struct fts_sync_struct fts_sync_t;
-
/** The cache for the FTS system. It is a memory-based inverted index
that new entries are added to, until it grows over the configured maximum
size, at which time its contents are written to the INDEX table. */
-struct fts_cache_struct {
+struct fts_cache_t {
rw_lock_t lock; /*!< lock protecting all access to the
memory buffer. FIXME: this needs to
be our new upgrade-capable rw-lock */
@@ -145,11 +137,11 @@ struct fts_cache_struct {
intialization, it has different
SYNC level as above cache lock */
- mutex_t optimize_lock; /*!< Lock for OPTIMIZE */
+ ib_mutex_t optimize_lock; /*!< Lock for OPTIMIZE */
- mutex_t deleted_lock; /*!< Lock covering deleted_doc_ids */
+ ib_mutex_t deleted_lock; /*!< Lock covering deleted_doc_ids */
- mutex_t doc_id_lock; /*!< Lock covering Doc ID */
+ ib_mutex_t doc_id_lock; /*!< Lock covering Doc ID */
ib_vector_t* deleted_doc_ids;/*!< Array of deleted doc ids, each
element is of type fts_update_t */
@@ -200,7 +192,7 @@ struct fts_cache_struct {
};
/** Columns of the FTS auxiliary INDEX table */
-struct fts_node_struct {
+struct fts_node_t {
doc_id_t first_doc_id; /*!< First document id in ilist. */
doc_id_t last_doc_id; /*!< Last document id in ilist. */
@@ -223,7 +215,7 @@ struct fts_node_struct {
};
/** A tokenizer word. Contains information about one word. */
-struct fts_tokenizer_word_struct {
+struct fts_tokenizer_word_t {
fts_string_t text; /*!< Token text. */
ib_vector_t* nodes; /*!< Word node ilists, each element is
@@ -231,7 +223,7 @@ struct fts_tokenizer_word_struct {
};
/** Word text plus it's array of nodes as on disk in FTS index */
-struct fts_word_struct {
+struct fts_word_t {
fts_string_t text; /*!< Word value in UTF-8 */
ib_vector_t* nodes; /*!< Nodes read from disk */
@@ -239,7 +231,7 @@ struct fts_word_struct {
};
/** Callback for reading and filtering nodes that are read from FTS index */
-struct fts_fetch_struct {
+struct fts_fetch_t {
void* read_arg; /*!< Arg for the sql_callback */
fts_sql_callback
@@ -248,7 +240,7 @@ struct fts_fetch_struct {
};
/** For horizontally splitting an FTS auxiliary index */
-struct fts_index_selector_struct {
+struct fts_index_selector_t {
ulint value; /*!< Character value at which
to split */
@@ -256,7 +248,7 @@ struct fts_index_selector_struct {
};
/** This type represents a single document. */
-struct fts_doc_struct {
+struct fts_doc_t {
fts_string_t text; /*!< document text */
ibool found; /*!< TRUE if the document was found
@@ -276,7 +268,7 @@ struct fts_doc_struct {
};
/** A token and its positions within a document. */
-struct fts_token_struct {
+struct fts_token_t {
fts_string_t text; /*!< token text */
ib_vector_t* positions; /*!< an array of the positions the
diff --git a/storage/innobase/include/fts0types.ic b/storage/innobase/include/fts0types.ic
index 2734a331a86..b96c3f9dac8 100644
--- a/storage/innobase/include/fts0types.ic
+++ b/storage/innobase/include/fts0types.ic
@@ -37,46 +37,6 @@ extern const ulint UTF8_ERROR;
#define fts_utf8_is_valid(b) (((b) & 0xC0) == 0x80)
/******************************************************************//**
-Compare two fts_trx_table_t instances.
-@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */
-UNIV_INLINE
-int
-fts_trx_table_cmp(
-/*==============*/
- const void* p1, /*!< in: id1 */
- const void* p2) /*!< in: id2 */
-{
- const dict_table_t* table1 = (*(const fts_trx_table_t**) p1)->table;
- const dict_table_t* table2 = (*(const fts_trx_table_t**) p2)->table;
-
- return((table1->id > table2->id)
- ? 1
- : (table1->id == table2->id)
- ? 0
- : -1);
-}
-
-/******************************************************************//**
-Compare a table id with a fts_trx_table_t table id.
-@return < 0 if n1 < n2, 0 if n1 == n2,> 0 if n1 > n2 */
-UNIV_INLINE
-int
-fts_trx_table_id_cmp(
-/*=================*/
- const void* p1, /*!< in: id1 */
- const void* p2) /*!< in: id2 */
-{
- const ullint* table_id = (const ullint*) p1;
- const dict_table_t* table2 = (*(const fts_trx_table_t**) p2)->table;
-
- return((*table_id > table2->id)
- ? 1
- : (*table_id == table2->id)
- ? 0
- : -1);
-}
-
-/******************************************************************//**
Duplicate an UTF-8 string.
@return < 0 if n1 < n2, 0 if n1 == n2, > 0 if n1 > n2 */
UNIV_INLINE
diff --git a/storage/innobase/include/ha0ha.h b/storage/innobase/include/ha0ha.h
index 1a2b8dac014..07ab20ab995 100644
--- a/storage/innobase/include/ha0ha.h
+++ b/storage/innobase/include/ha0ha.h
@@ -38,7 +38,7 @@ Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */
UNIV_INLINE
-rec_t*
+const rec_t*
ha_search_and_get_data(
/*===================*/
hash_table_t* table, /*!< in: hash table */
@@ -53,11 +53,11 @@ ha_search_and_update_if_found_func(
/*===============================*/
hash_table_t* table, /*!< in/out: hash table */
ulint fold, /*!< in: folded value of the searched data */
- rec_t* data, /*!< in: pointer to the data */
+ const rec_t* data, /*!< in: pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* new_block,/*!< in: block containing new_data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- rec_t* new_data);/*!< in: new pointer to the data */
+ const rec_t* new_data);/*!< in: new pointer to the data */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Looks for an element when we know the pointer to the data and
@@ -221,15 +221,12 @@ ha_print_info(
#endif /* !UNIV_HOTBACKUP */
/** The hash table external chain node */
-typedef struct ha_node_struct ha_node_t;
-
-/** The hash table external chain node */
-struct ha_node_struct {
+struct ha_node_t {
ha_node_t* next; /*!< next chain node or NULL if none */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block; /*!< buffer block containing the data, or NULL */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- rec_t* data; /*!< pointer to the data */
+ const rec_t* data; /*!< pointer to the data */
ulint fold; /*!< fold value for the data */
};
diff --git a/storage/innobase/include/ha0ha.ic b/storage/innobase/include/ha0ha.ic
index 91794e8f1fc..c478ff54303 100644
--- a/storage/innobase/include/ha0ha.ic
+++ b/storage/innobase/include/ha0ha.ic
@@ -58,7 +58,7 @@ ha_node_set_data_func(
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
- rec_t* data) /*!< in: pointer to the data */
+ const rec_t* data) /*!< in: pointer to the data */
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
node->block = block;
@@ -161,7 +161,7 @@ Looks for an element in a hash table.
@return pointer to the data of the first hash table node in chain
having the fold number, NULL if not found */
UNIV_INLINE
-rec_t*
+const rec_t*
ha_search_and_get_data(
/*===================*/
hash_table_t* table, /*!< in: hash table */
diff --git a/storage/innobase/include/ha0storage.h b/storage/innobase/include/ha0storage.h
index caf42abfcfe..0073930b502 100644
--- a/storage/innobase/include/ha0storage.h
+++ b/storage/innobase/include/ha0storage.h
@@ -39,7 +39,7 @@ constant per ha_storage's lifetime. */
#define HA_STORAGE_DEFAULT_HASH_CELLS 4096
/** Hash storage */
-typedef struct ha_storage_struct ha_storage_t;
+struct ha_storage_t;
/*******************************************************************//**
Creates a hash storage. If any of the parameters is 0, then a default
diff --git a/storage/innobase/include/ha0storage.ic b/storage/innobase/include/ha0storage.ic
index ce6e7406b43..7150ca045ec 100644
--- a/storage/innobase/include/ha0storage.ic
+++ b/storage/innobase/include/ha0storage.ic
@@ -31,7 +31,7 @@ Created September 24, 2007 Vasil Dimov
#include "mem0mem.h"
/** Hash storage for strings */
-struct ha_storage_struct {
+struct ha_storage_t {
mem_heap_t* heap; /*!< memory heap from which memory is
allocated */
hash_table_t* hash; /*!< hash table used to avoid
@@ -39,9 +39,7 @@ struct ha_storage_struct {
};
/** Objects of this type are stored in ha_storage_t */
-typedef struct ha_storage_node_struct ha_storage_node_t;
-/** Objects of this type are stored in ha_storage_struct */
-struct ha_storage_node_struct {
+struct ha_storage_node_t {
ulint data_len;/*!< length of the data */
const void* data; /*!< pointer to data */
ha_storage_node_t* next; /*!< next node in hash chain */
diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h
index 5512bf7c62f..fb4b0120bbb 100644
--- a/storage/innobase/include/ha_prototypes.h
+++ b/storage/innobase/include/ha_prototypes.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2006, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2006, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -28,15 +28,19 @@ Created 5/11/2006 Osku Salerma
#define HA_INNODB_PROTOTYPES_H
#include "my_dbug.h"
+#include "mysqld_error.h"
#include "my_compare.h"
#include "my_sys.h"
#include "m_string.h"
+#include "debug_sync.h"
+#include "my_base.h"
#include "trx0types.h"
#include "m_ctype.h" /* CHARSET_INFO */
-// Forward declaration
-typedef struct fts_string_struct fts_string_t;
+// Forward declarations
+class Field;
+struct fts_string_t;
/*********************************************************************//**
Wrapper around MySQL's copy_and_convert function.
@@ -105,7 +109,7 @@ innobase_convert_name(
ulint buflen, /*!< in: length of buf, in bytes */
const char* id, /*!< in: identifier to convert */
ulint idlen, /*!< in: length of id, in bytes */
- void* thd, /*!< in: MySQL connection thread, or NULL */
+ THD* thd, /*!< in: MySQL connection thread, or NULL */
ibool table_id);/*!< in: TRUE=id is a table or database name;
FALSE=id is an index name */
@@ -120,7 +124,19 @@ UNIV_INTERN
ibool
thd_is_replication_slave_thread(
/*============================*/
- void* thd); /*!< in: thread handle (THD*) */
+ THD* thd); /*!< in: thread handle */
+
+/******************************************************************//**
+Gets information on the durability property requested by thread.
+Used when writing either a prepare or commit record to the log
+buffer.
+@return the durability property. */
+UNIV_INTERN
+enum durability_properties
+thd_requested_durability(
+/*=====================*/
+ const THD* thd) /*!< in: thread handle */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Returns true if the transaction this thread is processing has edited
@@ -132,7 +148,7 @@ UNIV_INTERN
ibool
thd_has_edited_nontrans_tables(
/*===========================*/
- void* thd); /*!< in: thread handle (THD*) */
+ THD* thd); /*!< in: thread handle */
/*************************************************************//**
Prints info of a THD object (== user session thread) to the given file. */
@@ -141,21 +157,10 @@ void
innobase_mysql_print_thd(
/*=====================*/
FILE* f, /*!< in: output stream */
- void* thd, /*!< in: pointer to a MySQL THD object */
+ THD* thd, /*!< in: pointer to a MySQL THD object */
uint max_query_len); /*!< in: max query length to print, or 0 to
use the default max length */
-/*****************************************************************//**
-Log code calls this whenever log has been written and/or flushed up
-to a new position. We use this to notify upper layer of a new commit
-checkpoint when necessary.*/
-UNIV_INTERN
-void
-innobase_mysql_log_notify(
-/*===============*/
- ib_uint64_t write_lsn, /*!< in: LSN written to log file */
- ib_uint64_t flush_lsn); /*!< in: LSN flushed to disk */
-
/*************************************************************//**
InnoDB uses this function to compare two data fields for which the data type
is such that we must use MySQL code to compare them.
@@ -173,6 +178,18 @@ innobase_mysql_cmp(
unsigned int b_length) /*!< in: data field length,
not UNIV_SQL_NULL */
__attribute__((nonnull, warn_unused_result));
+
+/*****************************************************************//**
+Log code calls this whenever log has been written and/or flushed up
+to a new position. We use this to notify upper layer of a new commit
+checkpoint when necessary.*/
+extern "C" UNIV_INTERN
+void
+innobase_mysql_log_notify(
+/*===============*/
+ ib_uint64_t write_lsn, /*!< in: LSN written to log file */
+ ib_uint64_t flush_lsn); /*!< in: LSN flushed to disk */
+
/**************************************************************//**
Converts a MySQL type to an InnoDB type. Note that this function returns
the 'mtype' of InnoDB. InnoDB differentiates between MySQL's old <= 4.1
@@ -233,11 +250,11 @@ innobase_basename(
/******************************************************************//**
Returns true if the thread is executing a SELECT statement.
@return true if thd is executing SELECT */
-
+UNIV_INTERN
ibool
thd_is_select(
/*==========*/
- const void* thd); /*!< in: thread handle (THD*) */
+ const THD* thd); /*!< in: thread handle */
/******************************************************************//**
Converts an identifier to a table name. */
@@ -276,7 +293,7 @@ UNIV_INTERN
struct charset_info_st*
innobase_get_charset(
/*=================*/
- void* mysql_thd); /*!< in: MySQL thread handle */
+ THD* thd); /*!< in: MySQL thread handle */
/**********************************************************************//**
Determines the current SQL statement.
@return SQL statement string */
@@ -284,7 +301,7 @@ UNIV_INTERN
const char*
innobase_get_stmt(
/*==============*/
- void* mysql_thd, /*!< in: MySQL thread handle */
+ THD* thd, /*!< in: MySQL thread handle */
size_t* length) /*!< out: length of the SQL statement */
__attribute__((nonnull));
/******************************************************************//**
@@ -321,17 +338,17 @@ UNIV_INTERN
ibool
thd_supports_xa(
/*============*/
- void* thd); /*!< in: thread handle (THD*), or NULL to query
+ THD* thd); /*!< in: thread handle, or NULL to query
the global innodb_supports_xa */
/******************************************************************//**
Returns the lock wait timeout for the current connection.
@return the lock wait timeout, in seconds */
-
+UNIV_INTERN
ulong
thd_lock_wait_timeout(
/*==================*/
- void* thd); /*!< in: thread handle (THD*), or NULL to query
+ THD* thd); /*!< in: thread handle, or NULL to query
the global innodb_lock_wait_timeout */
/******************************************************************//**
Add up the time waited for the lock for the current query. */
@@ -339,7 +356,7 @@ UNIV_INTERN
void
thd_set_lock_wait_time(
/*===================*/
- void* thd, /*!< in: thread handle (THD*) */
+ THD* thd, /*!< in/out: thread handle */
ulint value); /*!< in: time waited for the lock */
/**********************************************************************//**
@@ -363,6 +380,15 @@ ulint
innobase_get_lower_case_table_names(void);
/*=====================================*/
+/*****************************************************************//**
+Frees a possible InnoDB trx object associated with the current THD.
+@return 0 or error number */
+UNIV_INTERN
+int
+innobase_close_thd(
+/*===============*/
+ THD* thd); /*!< in: MySQL thread handle for
+ which to close the connection */
/*************************************************************//**
Get the next token from the given string and store it in *token. */
UNIV_INTERN
@@ -414,7 +440,7 @@ UNIV_INTERN
ibool
thd_trx_is_read_only(
/*=================*/
- void* thd); /*!< in: thread handle (THD*) */
+ THD* thd); /*!< in/out: thread handle */
/******************************************************************//**
Check if the transaction is an auto-commit transaction. TRUE also
@@ -424,5 +450,139 @@ UNIV_INTERN
ibool
thd_trx_is_auto_commit(
/*===================*/
- void* thd); /*!< in: thread handle (THD*) can be NULL */
+ THD* thd); /*!< in: thread handle, or NULL */
+
+/*****************************************************************//**
+A wrapper function of innobase_convert_name(), convert a table or
+index name to the MySQL system_charset_info (UTF-8) and quote it if needed.
+@return pointer to the end of buf */
+UNIV_INTERN
+void
+innobase_format_name(
+/*==================*/
+ char* buf, /*!< out: buffer for converted
+ identifier */
+ ulint buflen, /*!< in: length of buf, in bytes */
+ const char* name, /*!< in: index or table name
+ to format */
+ ibool is_index_name) /*!< in: index name */
+ __attribute__((nonnull));
+
+/** Corresponds to Sql_condition:enum_warning_level. */
+enum ib_log_level_t {
+ IB_LOG_LEVEL_INFO,
+ IB_LOG_LEVEL_WARN,
+ IB_LOG_LEVEL_ERROR,
+ IB_LOG_LEVEL_FATAL
+};
+
+/******************************************************************//**
+Use this when the args are first converted to a formatted string and then
+passed to the format string from errmsg-utf8.txt. The error message format
+must be: "Some string ... %s".
+
+Push a warning message to the client, it is a wrapper around:
+
+void push_warning_printf(
+ THD *thd, Sql_condition::enum_warning_level level,
+ uint code, const char *format, ...);
+*/
+UNIV_INTERN
+void
+ib_errf(
+/*====*/
+ THD* thd, /*!< in/out: session */
+ ib_log_level_t level, /*!< in: warning level */
+ ib_uint32_t code, /*!< MySQL error code */
+ const char* format, /*!< printf format */
+ ...) /*!< Args */
+ __attribute__((format(printf, 4, 5)));
+
+/******************************************************************//**
+Use this when the args are passed to the format string from
+errmsg-utf8.txt directly as is.
+
+Push a warning message to the client, it is a wrapper around:
+
+void push_warning_printf(
+ THD *thd, Sql_condition::enum_warning_level level,
+ uint code, const char *format, ...);
+*/
+UNIV_INTERN
+void
+ib_senderrf(
+/*========*/
+ THD* thd, /*!< in/out: session */
+ ib_log_level_t level, /*!< in: warning level */
+ ib_uint32_t code, /*!< MySQL error code */
+ ...); /*!< Args */
+
+/******************************************************************//**
+Write a message to the MySQL log, prefixed with "InnoDB: ".
+Wrapper around sql_print_information() */
+UNIV_INTERN
+void
+ib_logf(
+/*====*/
+ ib_log_level_t level, /*!< in: warning level */
+ const char* format, /*!< printf format */
+ ...) /*!< Args */
+ __attribute__((format(printf, 2, 3)));
+
+/******************************************************************//**
+Returns the NUL terminated value of glob_hostname.
+@return pointer to glob_hostname. */
+UNIV_INTERN
+const char*
+server_get_hostname();
+/*=================*/
+
+/******************************************************************//**
+Get the error message format string.
+@return the format string or 0 if not found. */
+UNIV_INTERN
+const char*
+innobase_get_err_msg(
+/*=================*/
+ int error_code); /*!< in: MySQL error code */
+
+/*********************************************************************//**
+Compute the next autoinc value.
+
+For MySQL replication the autoincrement values can be partitioned among
+the nodes. The offset is the start or origin of the autoincrement value
+for a particular node. For n nodes the increment will be n and the offset
+will be in the interval [1, n]. The formula tries to allocate the next
+value for a particular node.
+
+Note: This function is also called with increment set to the number of
+values we want to reserve for multi-value inserts e.g.,
+
+ INSERT INTO T VALUES(), (), ();
+
+innobase_next_autoinc() will be called with increment set to 3 where
+autoinc_lock_mode != TRADITIONAL because we want to reserve 3 values for
+the multi-value INSERT above.
+@return the next value */
+UNIV_INTERN
+ulonglong
+innobase_next_autoinc(
+/*==================*/
+ ulonglong current, /*!< in: Current value */
+ ulonglong need, /*!< in: count of values needed */
+ ulonglong step, /*!< in: AUTOINC increment step */
+ ulonglong offset, /*!< in: AUTOINC offset */
+ ulonglong max_value) /*!< in: max value for type */
+ __attribute__((pure, warn_unused_result));
+
+/********************************************************************//**
+Get the upper limit of the MySQL integral and floating-point type.
+@return maximum allowed value for the field */
+UNIV_INTERN
+ulonglong
+innobase_get_int_col_max_value(
+/*===========================*/
+ const Field* field) /*!< in: MySQL field */
+ __attribute__((nonnull, pure, warn_unused_result));
+
#endif /* HA_INNODB_PROTOTYPES_H */
diff --git a/storage/innobase/include/handler0alter.h b/storage/innobase/include/handler0alter.h
index c5d439ef21b..52aaf2d25ef 100644
--- a/storage/innobase/include/handler0alter.h
+++ b/storage/innobase/include/handler0alter.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -27,11 +27,34 @@ UNIV_INTERN
void
innobase_rec_to_mysql(
/*==================*/
- struct TABLE* table, /*!< in/out: MySQL table */
- const rec_t* rec, /*!< in: record */
- const dict_index_t* index, /*!< in: index */
- const ulint* offsets); /*!< in: rec_get_offsets(
- rec, index, ...) */
+ struct TABLE* table, /*!< in/out: MySQL table */
+ const rec_t* rec, /*!< in: record */
+ const dict_index_t* index, /*!< in: index */
+ const ulint* offsets)/*!< in: rec_get_offsets(
+ rec, index, ...) */
+ __attribute__((nonnull));
+
+/*************************************************************//**
+Copies an InnoDB index entry to table->record[0]. */
+UNIV_INTERN
+void
+innobase_fields_to_mysql(
+/*=====================*/
+ struct TABLE* table, /*!< in/out: MySQL table */
+ const dict_index_t* index, /*!< in: InnoDB index */
+ const dfield_t* fields) /*!< in: InnoDB index fields */
+ __attribute__((nonnull));
+
+/*************************************************************//**
+Copies an InnoDB row to table->record[0]. */
+UNIV_INTERN
+void
+innobase_row_to_mysql(
+/*==================*/
+ struct TABLE* table, /*!< in/out: MySQL table */
+ const dict_table_t* itab, /*!< in: InnoDB table */
+ const dtuple_t* row) /*!< in: InnoDB row */
+ __attribute__((nonnull));
/*************************************************************//**
Resets table->record[0]. */
@@ -39,4 +62,53 @@ UNIV_INTERN
void
innobase_rec_reset(
/*===============*/
- struct TABLE* table); /*!< in/out: MySQL table */
+ struct TABLE* table) /*!< in/out: MySQL table */
+ __attribute__((nonnull));
+
+/** Generate the next autoinc based on a snapshot of the session
+auto_increment_increment and auto_increment_offset variables. */
+struct ib_sequence_t {
+
+ /**
+ @param thd - the session
+ @param start_value - the lower bound
+ @param max_value - the upper bound (inclusive) */
+ ib_sequence_t(THD* thd, ulonglong start_value, ulonglong max_value);
+
+ /**
+ Postfix increment
+ @return the value to insert */
+ ulonglong operator++(int) UNIV_NOTHROW;
+
+ /** Check if the autoinc "sequence" is exhausted.
+ @return true if the sequence is exhausted */
+ bool eof() const UNIV_NOTHROW
+ {
+ return(m_eof);
+ }
+
+ /**
+ @return the next value in the sequence */
+ ulonglong last() const UNIV_NOTHROW
+ {
+ ut_ad(m_next_value > 0);
+
+ return(m_next_value);
+ }
+
+ /** Maximum calumn value if adding an AUTOINC column else 0. Once
+ we reach the end of the sequence it will be set to ~0. */
+ const ulonglong m_max_value;
+
+ /** Value of auto_increment_increment */
+ ulong m_increment;
+
+ /** Value of auto_increment_offset */
+ ulong m_offset;
+
+ /** Next value in the sequence */
+ ulonglong m_next_value;
+
+ /** true if no more values left in the sequence */
+ bool m_eof;
+};
diff --git a/storage/innobase/include/hash0hash.h b/storage/innobase/include/hash0hash.h
index 1c19ea53a23..6f9a628df5d 100644
--- a/storage/innobase/include/hash0hash.h
+++ b/storage/innobase/include/hash0hash.h
@@ -33,8 +33,8 @@ Created 5/20/1997 Heikki Tuuri
# include "sync0rw.h"
#endif /* !UNIV_HOTBACKUP */
-typedef struct hash_table_struct hash_table_t;
-typedef struct hash_cell_struct hash_cell_t;
+struct hash_table_t;
+struct hash_cell_t;
typedef void* hash_node_t;
@@ -382,7 +382,7 @@ hash_get_heap(
Gets the nth mutex in a hash table.
@return mutex */
UNIV_INLINE
-mutex_t*
+ib_mutex_t*
hash_get_nth_mutex(
/*===============*/
hash_table_t* table, /*!< in: hash table */
@@ -400,7 +400,7 @@ hash_get_nth_lock(
Gets the mutex for a fold value in a hash table.
@return mutex */
UNIV_INLINE
-mutex_t*
+ib_mutex_t*
hash_get_mutex(
/*===========*/
hash_table_t* table, /*!< in: hash table */
@@ -451,7 +451,7 @@ void
hash_mutex_exit_all_but(
/*====================*/
hash_table_t* table, /*!< in: hash table */
- mutex_t* keep_mutex); /*!< in: mutex to keep */
+ ib_mutex_t* keep_mutex); /*!< in: mutex to keep */
/************************************************************//**
s-lock a lock for a fold value in a hash table. */
UNIV_INTERN
@@ -524,12 +524,12 @@ hash_unlock_x_all_but(
# define hash_unlock_x_all_but(t, l) ((void) 0)
#endif /* !UNIV_HOTBACKUP */
-struct hash_cell_struct{
+struct hash_cell_t{
void* node; /*!< hash chain node, NULL if none */
};
/* The hash table structure */
-struct hash_table_struct {
+struct hash_table_t {
enum hash_table_sync_t type; /*<! type of hash_table. */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# ifndef UNIV_HOTBACKUP
@@ -547,7 +547,7 @@ struct hash_table_struct {
rw_locks depending on the type.
Must be a power of 2 */
union {
- mutex_t* mutexes;/* NULL, or an array of mutexes
+ ib_mutex_t* mutexes;/* NULL, or an array of mutexes
used to protect segments of the
hash table */
rw_lock_t* rw_locks;/* NULL, or an array of rw_lcoks
diff --git a/storage/innobase/include/hash0hash.ic b/storage/innobase/include/hash0hash.ic
index 1e5474601d5..254f3f82e5d 100644
--- a/storage/innobase/include/hash0hash.ic
+++ b/storage/innobase/include/hash0hash.ic
@@ -150,7 +150,7 @@ hash_get_heap(
Gets the nth mutex in a hash table.
@return mutex */
UNIV_INLINE
-mutex_t*
+ib_mutex_t*
hash_get_nth_mutex(
/*===============*/
hash_table_t* table, /*!< in: hash table */
@@ -168,7 +168,7 @@ hash_get_nth_mutex(
Gets the mutex for a fold value in a hash table.
@return mutex */
UNIV_INLINE
-mutex_t*
+ib_mutex_t*
hash_get_mutex(
/*===========*/
hash_table_t* table, /*!< in: hash table */
diff --git a/storage/innobase/include/ibuf0ibuf.h b/storage/innobase/include/ibuf0ibuf.h
index f405ebf8d11..e64f067d364 100644
--- a/storage/innobase/include/ibuf0ibuf.h
+++ b/storage/innobase/include/ibuf0ibuf.h
@@ -376,24 +376,16 @@ will be merged from ibuf trees to the pages read, 0 if ibuf is
empty */
UNIV_INTERN
ulint
-ibuf_contract(
-/*==========*/
- ibool sync); /*!< in: TRUE if the caller wants to wait for the
- issued read with the highest tablespace address
- to complete */
-/*********************************************************************//**
-Contracts insert buffer trees by reading pages to the buffer pool.
-@return a lower limit for the combined size in bytes of entries which
-will be merged from ibuf trees to the pages read, 0 if ibuf is
-empty */
-UNIV_INTERN
-ulint
ibuf_contract_in_background(
/*========================*/
- ibool full); /*!< in: TRUE if the caller wants to do a full
- contract based on PCT_IO(100). If FALSE then
- the size of contract batch is determined based
- on the current size of the ibuf tree. */
+ table_id_t table_id, /*!< in: if merge should be done only
+ for a specific table, for all tables
+ this should be 0 */
+ ibool full); /*!< in: TRUE if the caller wants to
+ do a full contract based on PCT_IO(100).
+ If FALSE then the size of contract
+ batch is determined based on the
+ current size of the ibuf tree. */
#endif /* !UNIV_HOTBACKUP */
/*********************************************************************//**
Parses a redo log record of an ibuf bitmap page init.
@@ -449,6 +441,17 @@ void
ibuf_close(void);
/*============*/
+/******************************************************************//**
+Checks the insert buffer bitmaps on IMPORT TABLESPACE.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+ibuf_check_bitmap_on_import(
+/*========================*/
+ const trx_t* trx, /*!< in: transaction */
+ ulint space_id) /*!< in: tablespace identifier */
+ __attribute__((nonnull, warn_unused_result));
+
#define IBUF_HEADER_PAGE_NO FSP_IBUF_HEADER_PAGE_NO
#define IBUF_TREE_ROOT_PAGE_NO FSP_IBUF_TREE_ROOT_PAGE_NO
@@ -462,36 +465,6 @@ for the file segment from which the pages for the ibuf tree are allocated */
/* The insert buffer tree itself is always located in space 0. */
#define IBUF_SPACE_ID 0
-/** Insert buffer struct */
-struct ibuf_struct{
- ulint size; /*!< current size of the ibuf index
- tree, in pages */
- ulint max_size; /*!< recommended maximum size of the
- ibuf index tree, in pages */
- ulint seg_size; /*!< allocated pages of the file
- segment containing ibuf header and
- tree */
- ibool empty; /*!< Protected by the page
- latch of the root page of the
- insert buffer tree
- (FSP_IBUF_TREE_ROOT_PAGE_NO). TRUE
- if and only if the insert
- buffer tree is empty. */
- ulint free_list_len; /*!< length of the free list */
- ulint height; /*!< tree height */
- dict_index_t* index; /*!< insert buffer index */
-
- ulint n_merges; /*!< number of pages merged */
- ulint n_merged_ops[IBUF_OP_COUNT];
- /*!< number of operations of each type
- merged to index pages */
- ulint n_discarded_ops[IBUF_OP_COUNT];
- /*!< number of operations of each type
- discarded without merging due to the
- tablespace being deleted or the
- index being dropped */
-};
-
#ifndef UNIV_NONINL
#include "ibuf0ibuf.ic"
#endif
diff --git a/storage/innobase/include/ibuf0ibuf.ic b/storage/innobase/include/ibuf0ibuf.ic
index 8a4ec633b01..92ca2cbb9a2 100644
--- a/storage/innobase/include/ibuf0ibuf.ic
+++ b/storage/innobase/include/ibuf0ibuf.ic
@@ -58,6 +58,36 @@ ibuf_mtr_commit(
mtr_commit(mtr);
}
+/** Insert buffer struct */
+struct ibuf_t{
+ ulint size; /*!< current size of the ibuf index
+ tree, in pages */
+ ulint max_size; /*!< recommended maximum size of the
+ ibuf index tree, in pages */
+ ulint seg_size; /*!< allocated pages of the file
+ segment containing ibuf header and
+ tree */
+ ibool empty; /*!< Protected by the page
+ latch of the root page of the
+ insert buffer tree
+ (FSP_IBUF_TREE_ROOT_PAGE_NO). TRUE
+ if and only if the insert
+ buffer tree is empty. */
+ ulint free_list_len; /*!< length of the free list */
+ ulint height; /*!< tree height */
+ dict_index_t* index; /*!< insert buffer index */
+
+ ulint n_merges; /*!< number of pages merged */
+ ulint n_merged_ops[IBUF_OP_COUNT];
+ /*!< number of operations of each type
+ merged to index pages */
+ ulint n_discarded_ops[IBUF_OP_COUNT];
+ /*!< number of operations of each type
+ discarded without merging due to the
+ tablespace being deleted or the
+ index being dropped */
+};
+
/************************************************************************//**
Sets the free bit of the page in the ibuf bitmap. This is done in a separate
mini-transaction, hence this operation does not restrict further work to only
@@ -97,6 +127,7 @@ ibuf_should_try(
return(ibuf_use != IBUF_USE_NONE
&& ibuf->max_size != 0
&& !dict_index_is_clust(index)
+ && index->table->quiesce == QUIESCE_NONE
&& (ignore_sec_unique || !dict_index_is_unique(index)));
}
diff --git a/storage/innobase/include/ibuf0types.h b/storage/innobase/include/ibuf0types.h
index e404b62a011..3fdbf078b0b 100644
--- a/storage/innobase/include/ibuf0types.h
+++ b/storage/innobase/include/ibuf0types.h
@@ -26,6 +26,6 @@ Created 7/29/1997 Heikki Tuuri
#ifndef ibuf0types_h
#define ibuf0types_h
-typedef struct ibuf_struct ibuf_t;
+struct ibuf_t;
#endif
diff --git a/storage/innobase/include/lock0iter.h b/storage/innobase/include/lock0iter.h
index 42b4f7281e4..0054850b526 100644
--- a/storage/innobase/include/lock0iter.h
+++ b/storage/innobase/include/lock0iter.h
@@ -29,13 +29,13 @@ Created July 16, 2007 Vasil Dimov
#include "univ.i"
#include "lock0types.h"
-typedef struct lock_queue_iterator_struct {
+struct lock_queue_iterator_t {
const lock_t* current_lock;
/* In case this is a record lock queue (not table lock queue)
then bit_no is the record number within the heap in which the
record is stored. */
ulint bit_no;
-} lock_queue_iterator_t;
+};
/*******************************************************************//**
Initialize lock queue iterator so that it starts to iterate from
diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h
index f97a11b9483..8e6fdaed3d5 100644
--- a/storage/innobase/include/lock0lock.h
+++ b/storage/innobase/include/lock0lock.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -275,7 +275,7 @@ the query thread to the lock wait state and inserts a waiting request
for a gap x-lock to the lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_rec_insert_check_and_lock(
/*===========================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG bit is
@@ -285,10 +285,11 @@ lock_rec_insert_check_and_lock(
dict_index_t* index, /*!< in: index */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr, /*!< in/out: mini-transaction */
- ibool* inherit);/*!< out: set to TRUE if the new
+ ibool* inherit)/*!< out: set to TRUE if the new
inserted record maybe should inherit
LOCK_GAP type locks from the successor
record */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify (update,
delete mark, or delete unmark) of a clustered index record. If they do,
@@ -298,7 +299,7 @@ lock wait state and inserts a waiting request for a record x-lock to the
lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_clust_rec_modify_check_and_lock(
/*=================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -308,13 +309,14 @@ lock_clust_rec_modify_check_and_lock(
modified */
dict_index_t* index, /*!< in: clustered index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
- que_thr_t* thr); /*!< in: query thread */
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((warn_unused_result, nonnull));
/*********************************************************************//**
Checks if locks of other transactions prevent an immediate modify
(delete mark or delete unmark) of a secondary index record.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_sec_rec_modify_check_and_lock(
/*===============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -326,15 +328,17 @@ lock_sec_rec_modify_check_and_lock(
clustered index record first: see the
comment below */
dict_index_t* index, /*!< in: secondary index */
- que_thr_t* thr, /*!< in: query thread */
- mtr_t* mtr); /*!< in/out: mini-transaction */
+ que_thr_t* thr, /*!< in: query thread
+ (can be NULL if BTR_NO_LOCKING_FLAG) */
+ mtr_t* mtr) /*!< in/out: mini-transaction */
+ __attribute__((warn_unused_result, nonnull(2,3,4,6)));
/*********************************************************************//**
Like lock_clust_rec_read_check_and_lock(), but reads a
secondary index record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-enum db_err
+dberr_t
lock_sec_rec_read_check_and_lock(
/*=============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -364,7 +368,7 @@ lock on the record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-enum db_err
+dberr_t
lock_clust_rec_read_check_and_lock(
/*===============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -395,7 +399,7 @@ lock_clust_rec_read_check_and_lock() that does not require the parameter
"offsets".
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_clust_rec_read_check_and_lock_alt(
/*===================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -413,13 +417,14 @@ lock_clust_rec_read_check_and_lock_alt(
SELECT FOR UPDATE */
ulint gap_mode,/*!< in: LOCK_ORDINARY, LOCK_GAP, or
LOCK_REC_NOT_GAP */
- que_thr_t* thr); /*!< in: query thread */
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Checks that a record is seen in a consistent read.
-@return TRUE if sees, or FALSE if an earlier version of the record
+@return true if sees, or false if an earlier version of the record
should be retrieved */
UNIV_INTERN
-ibool
+bool
lock_clust_rec_cons_read_sees(
/*==========================*/
const rec_t* rec, /*!< in: user record which should be read or
@@ -431,26 +436,27 @@ lock_clust_rec_cons_read_sees(
Checks that a non-clustered index record is seen in a consistent read.
NOTE that a non-clustered index page contains so little information on
-its modifications that also in the case FALSE, the present version of
+its modifications that also in the case false, the present version of
rec may be the right, but we must check this from the clustered index
record.
-@return TRUE if certainly sees, or FALSE if an earlier version of the
+@return true if certainly sees, or false if an earlier version of the
clustered index record might be needed */
UNIV_INTERN
-ulint
+bool
lock_sec_rec_cons_read_sees(
/*========================*/
const rec_t* rec, /*!< in: user record which
should be read or passed over
by a read cursor */
- const read_view_t* view); /*!< in: consistent read view */
+ const read_view_t* view) /*!< in: consistent read view */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Locks the specified database table in the mode given. If the lock cannot
be granted immediately, the query thread is put to wait.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_table(
/*=======*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG bit is set,
@@ -458,7 +464,8 @@ lock_table(
dict_table_t* table, /*!< in/out: database table
in dictionary cache */
enum lock_mode mode, /*!< in: lock mode */
- que_thr_t* thr); /*!< in: query thread */
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((nonnull, warn_unused_result));
/*************************************************************//**
Removes a granted record lock of a transaction from the queue and grants
locks to other transactions waiting in the queue if they now are entitled
@@ -780,7 +787,7 @@ was selected as a deadlock victim, or if it has to wait then cancel
the wait lock.
@return DB_DEADLOCK, DB_LOCK_WAIT or DB_SUCCESS */
UNIV_INTERN
-enum db_err
+dberr_t
lock_trx_handle_wait(
/*=================*/
trx_t* trx) /*!< in/out: trx lock state */
@@ -881,20 +888,18 @@ lock_trx_has_sys_table_locks(
((type_mode & (LOCK_CONV_BY_OTHER | LOCK_WAIT)) == LOCK_WAIT)
/** Lock operation struct */
-typedef struct lock_op_struct lock_op_t;
-/** Lock operation struct */
-struct lock_op_struct{
+struct lock_op_t{
dict_table_t* table; /*!< table to be locked */
enum lock_mode mode; /*!< lock mode */
};
/** The lock system struct */
-struct lock_sys_struct{
- mutex_t mutex; /*!< Mutex protecting the
+struct lock_sys_t{
+ ib_mutex_t mutex; /*!< Mutex protecting the
locks */
hash_table_t* rec_hash; /*!< hash table of the record
locks */
- mutex_t wait_mutex; /*!< Mutex protecting the
+ ib_mutex_t wait_mutex; /*!< Mutex protecting the
next two fields */
srv_slot_t* waiting_threads; /*!< Array of user threads
suspended while waiting for
@@ -909,6 +914,16 @@ struct lock_sys_struct{
recovered transactions is
complete. Protected by
lock_sys->mutex */
+
+ ulint n_lock_max_wait_time; /*!< Max wait time */
+
+ os_event_t timeout_event; /*!< Set to the event that is
+ created in the lock wait monitor
+ thread. A value of 0 means the
+ thread is not active */
+
+ bool timeout_thread_active; /*!< True if the timeout thread
+ is running */
};
/** The lock system */
@@ -943,14 +958,6 @@ extern lock_sys_t* lock_sys;
mutex_exit(&lock_sys->wait_mutex); \
} while (0)
-// FIXME: Move these to lock_sys_t
-extern ibool srv_lock_timeout_active;
-extern ulint srv_n_lock_wait_count;
-extern ulint srv_n_lock_wait_current_count;
-extern ib_int64_t srv_n_lock_wait_time;
-extern ulint srv_n_lock_max_wait_time;
-extern os_event_t srv_lock_timeout_thread_event;
-
#ifndef UNIV_NONINL
#include "lock0lock.ic"
#endif
diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h
index d516289e1f2..9f7ab9f76b6 100644
--- a/storage/innobase/include/lock0priv.h
+++ b/storage/innobase/include/lock0priv.h
@@ -40,9 +40,7 @@ those functions in lock/ */
#include "ut0lst.h"
/** A table lock */
-typedef struct lock_table_struct lock_table_t;
-/** A table lock */
-struct lock_table_struct {
+struct lock_table_t {
dict_table_t* table; /*!< database table in dictionary
cache */
UT_LIST_NODE_T(lock_t)
@@ -51,9 +49,7 @@ struct lock_table_struct {
};
/** Record lock for a page */
-typedef struct lock_rec_struct lock_rec_t;
-/** Record lock for a page */
-struct lock_rec_struct {
+struct lock_rec_t {
ulint space; /*!< space id */
ulint page_no; /*!< page number */
ulint n_bits; /*!< number of bits in the lock
@@ -63,7 +59,7 @@ struct lock_rec_struct {
};
/** Lock struct; protected by lock_sys->mutex */
-struct lock_struct {
+struct lock_t {
trx_t* trx; /*!< transaction owning the
lock */
UT_LIST_NODE_T(lock_t)
diff --git a/storage/innobase/include/lock0types.h b/storage/innobase/include/lock0types.h
index 16e6b2e0113..cf32e72f864 100644
--- a/storage/innobase/include/lock0types.h
+++ b/storage/innobase/include/lock0types.h
@@ -27,8 +27,8 @@ Created 5/7/1996 Heikki Tuuri
#define lock0types_h
#define lock_t ib_lock_t
-typedef struct lock_struct lock_t;
-typedef struct lock_sys_struct lock_sys_t;
+struct lock_t;
+struct lock_sys_t;
/* Basic lock modes */
enum lock_mode {
diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h
index 5d72c7a96da..dd5e37012b7 100644
--- a/storage/innobase/include/log0log.h
+++ b/storage/innobase/include/log0log.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All rights reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All rights reserved.
Copyright (c) 2009, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -48,9 +48,9 @@ typedef ib_uint64_t lsn_t;
#define LSN_PF UINT64PF
/** Redo log buffer */
-typedef struct log_struct log_t;
+struct log_t;
/** Redo log group */
-typedef struct log_group_struct log_group_t;
+struct log_group_t;
#ifdef UNIV_DEBUG
/** Flag: write to log file? */
@@ -67,7 +67,7 @@ extern ibool log_debug_writes;
#define LOG_WAIT_ONE_GROUP 92
#define LOG_WAIT_ALL_GROUPS 93
/* @} */
-/** Maximum number of log groups in log_group_struct::checkpoint_buf */
+/** Maximum number of log groups in log_group_t::checkpoint_buf */
#define LOG_MAX_N_GROUPS 32
/*******************************************************************//**
@@ -161,6 +161,14 @@ UNIV_INLINE
lsn_t
log_get_capacity(void);
/*==================*/
+/****************************************************************
+Get log_sys::max_modified_age_async. It is OK to read the value without
+holding log_sys::mutex because it is constant.
+@return max_modified_age_async */
+UNIV_INLINE
+lsn_t
+log_get_max_modified_age_async(void);
+/*================================*/
/******************************************************//**
Initializes the log. */
UNIV_INTERN
@@ -223,15 +231,6 @@ void
log_buffer_sync_in_background(
/*==========================*/
ibool flush); /*<! in: flush the logs to disk */
-/****************************************************************//**
-Checks if an asynchronous flushing of dirty pages is required in the
-background. This function is only called from the page cleaner thread.
-@return lsn to which the flushing should happen or LSN_MAX
-if flushing is not required */
-UNIV_INTERN
-lsn_t
-log_async_flush_lsn(void);
-/*=====================*/
/******************************************************//**
Makes a checkpoint. Note that this function does not flush dirty
blocks from the buffer pool: it only checks what is lsn of the oldest
@@ -550,13 +549,19 @@ UNIV_INTERN
void
log_refresh_stats(void);
/*===================*/
-/**********************************************************
+/********************************************************//**
+Closes all log groups. */
+UNIV_INTERN
+void
+log_group_close_all(void);
+/*=====================*/
+/********************************************************//**
Shutdown the log system but do not release all the memory. */
UNIV_INTERN
void
log_shutdown(void);
/*==============*/
-/**********************************************************
+/********************************************************//**
Free the log system data structures. */
UNIV_INTERN
void
@@ -712,7 +717,7 @@ extern log_t* log_sys;
/** Log group consists of a number of log files, each of the same size; a log
group is implemented as a space in the sense of the module fil0fil. */
-struct log_group_struct{
+struct log_group_t{
/* The following fields are protected by log_sys->mutex */
ulint id; /*!< log group id */
ulint n_files; /*!< number of files in the group */
@@ -764,7 +769,7 @@ struct log_group_struct{
};
/** Redo log buffer */
-struct log_struct{
+struct log_t{
byte pad[64]; /*!< padding to prevent other memory
update hotspots from residing on the
same memory cache line */
@@ -772,9 +777,9 @@ struct log_struct{
ulint buf_free; /*!< first free offset within the log
buffer */
#ifndef UNIV_HOTBACKUP
- mutex_t mutex; /*!< mutex protecting the log */
+ ib_mutex_t mutex; /*!< mutex protecting the log */
- mutex_t log_flush_order_mutex;/*!< mutex to serialize access to
+ ib_mutex_t log_flush_order_mutex;/*!< mutex to serialize access to
the flush list when we are putting
dirty blocks in the list. The idea
behind this mutex is to be able
diff --git a/storage/innobase/include/log0log.ic b/storage/innobase/include/log0log.ic
index ad7b7e790a2..67792395ac9 100644
--- a/storage/innobase/include/log0log.ic
+++ b/storage/innobase/include/log0log.ic
@@ -446,6 +446,18 @@ log_get_capacity(void)
return(log_sys->log_group_capacity);
}
+/****************************************************************
+Get log_sys::max_modified_age_async. It is OK to read the value without
+holding log_sys::mutex because it is constant.
+@return max_modified_age_async */
+UNIV_INLINE
+lsn_t
+log_get_max_modified_age_async(void)
+/*================================*/
+{
+ return(log_sys->max_modified_age_async);
+}
+
/***********************************************************************//**
Checks if there is need for a log buffer flush or a new checkpoint, and does
this if yes. Any database operation should call this when it has modified
diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h
index 218298a1698..dcdd4bdd8aa 100644
--- a/storage/innobase/include/log0recv.h
+++ b/storage/innobase/include/log0recv.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -128,7 +128,7 @@ recv_recovery_from_checkpoint_finish should be called later to complete
the recovery and free the resources used in it.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
recv_recovery_from_checkpoint_start_func(
/*=====================================*/
#ifdef UNIV_LOG_ARCHIVE
@@ -212,18 +212,18 @@ UNIV_INTERN
void
recv_reset_logs(
/*============*/
- lsn_t lsn, /*!< in: reset to this lsn
- rounded up to be divisible by
- OS_FILE_LOG_BLOCK_SIZE, after
- which we add
- LOG_BLOCK_HDR_SIZE */
#ifdef UNIV_LOG_ARCHIVE
ulint arch_log_no, /*!< in: next archived log file number */
-#endif /* UNIV_LOG_ARCHIVE */
- ibool new_logs_created);/*!< in: TRUE if resetting logs
+ ibool new_logs_created,/*!< in: TRUE if resetting logs
is done at the log creation;
FALSE if it is done after
archive recovery */
+#endif /* UNIV_LOG_ARCHIVE */
+ lsn_t lsn); /*!< in: reset to this lsn
+ rounded up to be divisible by
+ OS_FILE_LOG_BLOCK_SIZE, after
+ which we add
+ LOG_BLOCK_HDR_SIZE */
#ifdef UNIV_HOTBACKUP
/******************************************************//**
Creates new log files after a backup has been restored. */
@@ -318,9 +318,7 @@ recv_recovery_from_archive_finish(void);
#endif /* UNIV_LOG_ARCHIVE */
/** Block of log record data */
-typedef struct recv_data_struct recv_data_t;
-/** Block of log record data */
-struct recv_data_struct{
+struct recv_data_t{
recv_data_t* next; /*!< pointer to the next block or NULL */
/*!< the log record data is stored physically
immediately after this struct, max amount
@@ -328,9 +326,7 @@ struct recv_data_struct{
};
/** Stored log record struct */
-typedef struct recv_struct recv_t;
-/** Stored log record struct */
-struct recv_struct{
+struct recv_t{
byte type; /*!< log record type */
ulint len; /*!< log record body length in bytes */
recv_data_t* data; /*!< chain of blocks containing the log record
@@ -347,7 +343,7 @@ struct recv_struct{
rec_list;/*!< list of log records for this page */
};
-/** States of recv_addr_struct */
+/** States of recv_addr_t */
enum recv_addr_state {
/** not yet processed */
RECV_NOT_PROCESSED,
@@ -361,9 +357,7 @@ enum recv_addr_state {
};
/** Hashed page file address struct */
-typedef struct recv_addr_struct recv_addr_t;
-/** Hashed page file address struct */
-struct recv_addr_struct{
+struct recv_addr_t{
enum recv_addr_state state;
/*!< recovery state of the page */
unsigned space:32;/*!< space id */
@@ -374,13 +368,14 @@ struct recv_addr_struct{
};
/** Recovery system data structure */
-typedef struct recv_sys_struct recv_sys_t;
-/** Recovery system data structure */
-struct recv_sys_struct{
+struct recv_sys_t{
#ifndef UNIV_HOTBACKUP
- mutex_t mutex; /*!< mutex protecting the fields apply_log_recs,
+ ib_mutex_t mutex; /*!< mutex protecting the fields apply_log_recs,
n_addrs, and the state field in each recv_addr
struct */
+ ib_mutex_t writer_mutex;/*!< mutex coordinating
+ flushing between recv_writer_thread and
+ the recovery thread. */
#endif /* !UNIV_HOTBACKUP */
ibool apply_log_recs;
/*!< this is TRUE when log rec application to
diff --git a/storage/innobase/include/mach0data.h b/storage/innobase/include/mach0data.h
index 3066070ef39..d0087f56aaa 100644
--- a/storage/innobase/include/mach0data.h
+++ b/storage/innobase/include/mach0data.h
@@ -374,6 +374,40 @@ mach_read_int_type(
const byte* src, /*!< in: where to read from */
ulint len, /*!< in: length of src */
ibool unsigned_type); /*!< in: signed or unsigned flag */
+/***********************************************************//**
+Convert integral type from host byte order to (big-endian) storage
+byte order. */
+UNIV_INLINE
+void
+mach_write_int_type(
+/*================*/
+ byte* dest, /*!< in: where to write*/
+ const byte* src, /*!< in: where to read from */
+ ulint len, /*!< in: length of src */
+ bool usign); /*!< in: signed or unsigned flag */
+
+/*************************************************************
+Convert a ulonglong integer from host byte order to (big-endian)
+storage byte order. */
+UNIV_INLINE
+void
+mach_write_ulonglong(
+/*=================*/
+ byte* dest, /*!< in: where to write */
+ ulonglong src, /*!< in: where to read from */
+ ulint len, /*!< in: length of dest */
+ bool usign); /*!< in: signed or unsigned flag */
+
+/********************************************************//**
+Reads 1 - 4 bytes from a file page buffered in the buffer pool.
+@return value read */
+UNIV_INLINE
+ulint
+mach_read_ulint(
+/*============*/
+ const byte* ptr, /*!< in: pointer from where to read */
+ ulint type); /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
+
#endif /* !UNIV_HOTBACKUP */
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/mach0data.ic b/storage/innobase/include/mach0data.ic
index ec1a28bca47..fffef87f09d 100644
--- a/storage/innobase/include/mach0data.ic
+++ b/storage/innobase/include/mach0data.ic
@@ -776,5 +776,104 @@ mach_read_int_type(
return(ret);
}
+/*********************************************************//**
+Swap byte ordering. */
+UNIV_INLINE
+void
+mach_swap_byte_order(
+/*=================*/
+ byte* dest, /*!< out: where to write */
+ const byte* from, /*!< in: where to read from */
+ ulint len) /*!< in: length of src */
+{
+ ut_ad(len > 0);
+ ut_ad(len <= 8);
+
+ dest += len;
+
+ switch (len & 0x7) {
+ case 0: *--dest = *from++;
+ case 7: *--dest = *from++;
+ case 6: *--dest = *from++;
+ case 5: *--dest = *from++;
+ case 4: *--dest = *from++;
+ case 3: *--dest = *from++;
+ case 2: *--dest = *from++;
+ case 1: *--dest = *from;
+ }
+}
+
+/*************************************************************
+Convert integral type from host byte order (big-endian) storage
+byte order. */
+UNIV_INLINE
+void
+mach_write_int_type(
+/*================*/
+ byte* dest, /*!< in: where to write */
+ const byte* src, /*!< in: where to read from */
+ ulint len, /*!< in: length of src */
+ bool usign) /*!< in: signed or unsigned flag */
+{
+#ifdef WORDS_BIGENDIAN
+ memcpy(dest, src, len);
+#else
+ mach_swap_byte_order(dest, src, len);
+#endif /* WORDS_BIGENDIAN */
+
+ if (!usign) {
+ *dest ^= 0x80;
+ }
+}
+
+/*************************************************************
+Convert a ulonglong integer from host byte order to (big-endian)
+storage byte order. */
+UNIV_INLINE
+void
+mach_write_ulonglong(
+/*=================*/
+ byte* dest, /*!< in: where to write */
+ ulonglong src, /*!< in: where to read from */
+ ulint len, /*!< in: length of dest */
+ bool usign) /*!< in: signed or unsigned flag */
+{
+ byte* ptr = reinterpret_cast<byte*>(&src);
+
+ ut_ad(len <= sizeof(ulonglong));
+
+#ifdef WORDS_BIGENDIAN
+ memcpy(dest, ptr + (sizeof(src) - len), len);
+#else
+ mach_swap_byte_order(dest, reinterpret_cast<byte*>(ptr), len);
+#endif /* WORDS_BIGENDIAN */
+
+ if (!usign) {
+ *dest ^= 0x80;
+ }
+}
+
+/********************************************************//**
+Reads 1 - 4 bytes from a file page buffered in the buffer pool.
+@return value read */
+UNIV_INLINE
+ulint
+mach_read_ulint(
+/*============*/
+ const byte* ptr, /*!< in: pointer from where to read */
+ ulint type) /*!< in: 1,2 or 4 bytes */
+{
+ switch (type) {
+ case 1:
+ return(mach_read_from_1(ptr));
+ case 2:
+ return(mach_read_from_2(ptr));
+ case 4:
+ return(mach_read_from_4(ptr));
+ default:
+ ut_error;
+ }
+}
+
#endif /* !UNIV_HOTBACKUP */
#endif /* !UNIV_INNOCHECKSUM */
diff --git a/storage/innobase/include/mem0dbg.h b/storage/innobase/include/mem0dbg.h
index 9f95e84c81e..cc339b82910 100644
--- a/storage/innobase/include/mem0dbg.h
+++ b/storage/innobase/include/mem0dbg.h
@@ -32,7 +32,7 @@ check fields whose sizes are given below */
/* The mutex which protects in the debug version the hash table
containing the list of live memory heaps, and also the global
variables in mem0dbg.cc. */
-extern mutex_t mem_hash_mutex;
+extern ib_mutex_t mem_hash_mutex;
# endif /* !UNIV_HOTBACKUP */
#define MEM_FIELD_HEADER_SIZE ut_calc_align(2 * sizeof(ulint),\
diff --git a/storage/innobase/include/mem0mem.h b/storage/innobase/include/mem0mem.h
index 6851a5bc01b..c36ef06b554 100644
--- a/storage/innobase/include/mem0mem.h
+++ b/storage/innobase/include/mem0mem.h
@@ -38,15 +38,12 @@ Created 6/9/1994 Heikki Tuuri
/* -------------------- MEMORY HEAPS ----------------------------- */
-/* The info structure stored at the beginning of a heap block */
-typedef struct mem_block_info_struct mem_block_info_t;
-
/* A block of a memory heap consists of the info structure
followed by an area of memory */
-typedef mem_block_info_t mem_block_t;
+typedef struct mem_block_info_t mem_block_t;
/* A memory heap is a nonempty linear list of memory blocks */
-typedef mem_block_t mem_heap_t;
+typedef mem_block_t mem_heap_t;
/* Types of allocation for memory heaps: DYNAMIC means allocation from the
dynamic memory pool of the C compiler, BUFFER means allocation from the
@@ -343,9 +340,8 @@ mem_validate_all_blocks(void);
/*#######################################################################*/
-/* The info header of a block in a memory heap */
-
-struct mem_block_info_struct {
+/** The info structure stored at the beginning of a heap block */
+struct mem_block_info_t {
ulint magic_n;/* magic number for debugging */
char file_name[8];/* file name where the mem heap was created */
ulint line; /*!< line number where the mem heap was created */
diff --git a/storage/innobase/include/mem0mem.ic b/storage/innobase/include/mem0mem.ic
index eee3806dd52..7f0e128cc40 100644
--- a/storage/innobase/include/mem0mem.ic
+++ b/storage/innobase/include/mem0mem.ic
@@ -247,16 +247,13 @@ mem_heap_free_heap_top(
{
mem_block_t* block;
mem_block_t* prev_block;
-#ifdef UNIV_MEM_DEBUG
+#if defined UNIV_MEM_DEBUG || defined UNIV_DEBUG
ibool error;
ulint total_size;
ulint size;
-#endif
ut_ad(mem_heap_check(heap));
-#ifdef UNIV_MEM_DEBUG
-
/* Validate the heap and get its total allocated size */
mem_heap_validate_or_print(heap, NULL, FALSE, &error, &total_size,
NULL, NULL);
@@ -294,9 +291,9 @@ mem_heap_free_heap_top(
/* Set the free field of block */
mem_block_set_free(block, old_top - (byte*) block);
-#ifdef UNIV_MEM_DEBUG
ut_ad(mem_block_get_start(block) <= mem_block_get_free(block));
-
+ UNIV_MEM_ASSERT_W(old_top, (byte*) block + block->len - old_top);
+#if defined UNIV_MEM_DEBUG
/* In the debug version erase block from top up */
mem_erase_buf(old_top, (byte*) block + block->len - old_top);
@@ -304,8 +301,6 @@ mem_heap_free_heap_top(
mutex_enter(&mem_hash_mutex);
mem_current_allocated_memory -= (total_size - size);
mutex_exit(&mem_hash_mutex);
-#else /* UNIV_MEM_DEBUG */
- UNIV_MEM_ASSERT_W(old_top, (byte*) block + block->len - old_top);
#endif /* UNIV_MEM_DEBUG */
UNIV_MEM_ALLOC(old_top, (byte*) block + block->len - old_top);
diff --git a/storage/innobase/include/mem0pool.h b/storage/innobase/include/mem0pool.h
index 451055e857f..a65ba50fdf9 100644
--- a/storage/innobase/include/mem0pool.h
+++ b/storage/innobase/include/mem0pool.h
@@ -30,17 +30,14 @@ Created 6/9/1994 Heikki Tuuri
#include "os0file.h"
#include "ut0lst.h"
-/** Memory area header */
-typedef struct mem_area_struct mem_area_t;
/** Memory pool */
-typedef struct mem_pool_struct mem_pool_t;
+struct mem_pool_t;
/** The common memory pool */
extern mem_pool_t* mem_comm_pool;
/** Memory area header */
-
-struct mem_area_struct{
+struct mem_area_t{
ulint size_and_free; /*!< memory area size is obtained by
anding with ~MEM_AREA_FREE; area in
a free list if ANDing with
@@ -50,7 +47,7 @@ struct mem_area_struct{
};
/** Each memory area takes this many extra bytes for control information */
-#define MEM_AREA_EXTRA_SIZE (ut_calc_align(sizeof(struct mem_area_struct),\
+#define MEM_AREA_EXTRA_SIZE (ut_calc_align(sizeof(struct mem_area_t),\
UNIV_MEM_ALIGNMENT))
/********************************************************************//**
diff --git a/storage/innobase/include/mtr0log.h b/storage/innobase/include/mtr0log.h
index 1427a981bef..18a345d050f 100644
--- a/storage/innobase/include/mtr0log.h
+++ b/storage/innobase/include/mtr0log.h
@@ -32,8 +32,8 @@ Created 12/7/1995 Heikki Tuuri
#ifndef UNIV_HOTBACKUP
/********************************************************//**
-Writes 1 - 4 bytes to a file page buffered in the buffer pool.
-Writes the corresponding log record to the mini-transaction log. */
+Writes 1, 2 or 4 bytes to a file page. Writes the corresponding log
+record to the mini-transaction log if mtr is not NULL. */
UNIV_INTERN
void
mlog_write_ulint(
@@ -43,8 +43,8 @@ mlog_write_ulint(
byte type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
mtr_t* mtr); /*!< in: mini-transaction handle */
/********************************************************//**
-Writes 8 bytes to a file page buffered in the buffer pool.
-Writes the corresponding log record to the mini-transaction log. */
+Writes 8 bytes to a file page. Writes the corresponding log
+record to the mini-transaction log, only if mtr is not NULL */
UNIV_INTERN
void
mlog_write_ull(
@@ -217,12 +217,13 @@ UNIV_INTERN
byte*
mlog_open_and_write_index(
/*======================*/
- mtr_t* mtr, /*!< in: mtr */
- const byte* rec, /*!< in: index record or page */
- dict_index_t* index, /*!< in: record descriptor */
- byte type, /*!< in: log item type */
- ulint size); /*!< in: requested buffer size in bytes
- (if 0, calls mlog_close() and returns NULL) */
+ mtr_t* mtr, /*!< in: mtr */
+ const byte* rec, /*!< in: index record or page */
+ const dict_index_t* index, /*!< in: record descriptor */
+ byte type, /*!< in: log item type */
+ ulint size); /*!< in: requested buffer size in bytes
+ (if 0, calls mlog_close() and
+ returns NULL) */
#endif /* !UNIV_HOTBACKUP */
/********************************************************//**
diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h
index fd84f1119cc..f8c1874412c 100644
--- a/storage/innobase/include/mtr0mtr.h
+++ b/storage/innobase/include/mtr0mtr.h
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,6 +40,7 @@ Created 11/26/1995 Heikki Tuuri
#define MTR_LOG_ALL 21 /* default mode: log all operations
modifying disk-based data */
#define MTR_LOG_NONE 22 /* log no operations */
+#define MTR_LOG_NO_REDO 23 /* Don't generate REDO */
/*#define MTR_LOG_SPACE 23 */ /* log only operations modifying
file space page allocation data
(operations in fsp0fsp.* ) */
@@ -180,7 +182,11 @@ For 1 - 8 bytes, the flag value must give the length also! @{ */
#define MLOG_ZIP_WRITE_HEADER ((byte)50) /*!< write to compressed page
header */
#define MLOG_ZIP_PAGE_COMPRESS ((byte)51) /*!< compress an index page */
-#define MLOG_BIGGEST_TYPE ((byte)51) /*!< biggest value (used in
+#define MLOG_ZIP_PAGE_COMPRESS_NO_DATA ((byte)52)/*!< compress an index page
+ without logging it's image */
+#define MLOG_ZIP_PAGE_REORGANIZE ((byte)53) /*!< reorganize a compressed
+ page */
+#define MLOG_BIGGEST_TYPE ((byte)53) /*!< biggest value (used in
assertions) */
/* @} */
@@ -358,15 +364,14 @@ mtr_memo_push(
void* object, /*!< in: object */
ulint type); /*!< in: object type: MTR_MEMO_S_LOCK, ... */
-/* Type definition of a mini-transaction memo stack slot. */
-typedef struct mtr_memo_slot_struct mtr_memo_slot_t;
-struct mtr_memo_slot_struct{
+/** Mini-transaction memo stack slot. */
+struct mtr_memo_slot_t{
ulint type; /*!< type of the stored object (MTR_MEMO_S_LOCK, ...) */
void* object; /*!< pointer to the object */
};
/* Mini-transaction handle and buffer */
-struct mtr_struct{
+struct mtr_t{
#ifdef UNIV_DEBUG
ulint state; /*!< MTR_ACTIVE, MTR_COMMITTING, MTR_COMMITTED */
#endif
diff --git a/storage/innobase/include/mtr0mtr.ic b/storage/innobase/include/mtr0mtr.ic
index dcd9826b380..bb24734c9bb 100644
--- a/storage/innobase/include/mtr0mtr.ic
+++ b/storage/innobase/include/mtr0mtr.ic
@@ -269,7 +269,7 @@ mtr_s_lock_func(
ut_ad(mtr);
ut_ad(lock);
- rw_lock_s_lock_func(lock, 0, file, line);
+ rw_lock_s_lock_inline(lock, 0, file, line);
mtr_memo_push(mtr, lock, MTR_MEMO_S_LOCK);
}
@@ -288,7 +288,7 @@ mtr_x_lock_func(
ut_ad(mtr);
ut_ad(lock);
- rw_lock_x_lock_func(lock, 0, file, line);
+ rw_lock_x_lock_inline(lock, 0, file, line);
mtr_memo_push(mtr, lock, MTR_MEMO_X_LOCK);
}
diff --git a/storage/innobase/include/mtr0types.h b/storage/innobase/include/mtr0types.h
index 7a2bcefadb9..43368c0b726 100644
--- a/storage/innobase/include/mtr0types.h
+++ b/storage/innobase/include/mtr0types.h
@@ -26,6 +26,6 @@ Created 11/26/1995 Heikki Tuuri
#ifndef mtr0types_h
#define mtr0types_h
-typedef struct mtr_struct mtr_t;
+struct mtr_t;
#endif
diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h
index 8f84193cb0f..4a744c1b268 100644
--- a/storage/innobase/include/os0file.h
+++ b/storage/innobase/include/os0file.h
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Portions of this file contain modifications contributed and copyrighted
@@ -44,7 +44,7 @@ Created 10/21/1995 Heikki Tuuri
#endif
/** File node of a tablespace or the log data space */
-typedef struct fil_node_struct fil_node_t;
+struct fil_node_t;
extern ibool os_has_said_disk_full;
/** Flag: enable debug printout for asynchronous i/o */
@@ -102,7 +102,7 @@ log. */
#define OS_FILE_LOG_BLOCK_SIZE 512
/** Options for os_file_create_func @{ */
-typedef enum os_file_create_enum {
+enum os_file_create_t {
OS_FILE_OPEN = 51, /*!< to open an existing file (if
doesn't exist, error) */
OS_FILE_CREATE, /*!< to create new file (if
@@ -122,7 +122,7 @@ typedef enum os_file_create_enum {
the log unless it is a fatal error,
this flag is only used if
ON_ERROR_NO_EXIT is set */
-} os_file_create_t;
+};
#define OS_FILE_READ_ONLY 333
#define OS_FILE_READ_WRITE 444
@@ -217,10 +217,10 @@ used to register actual file read, write and flush */
# define register_pfs_file_open_begin(state, locker, key, op, name, \
src_file, src_line) \
do { \
- locker = PSI_CALL(get_thread_file_name_locker)( \
+ locker = PSI_FILE_CALL(get_thread_file_name_locker)( \
state, key, op, name, &locker); \
if (UNIV_LIKELY(locker != NULL)) { \
- PSI_CALL(start_file_open_wait)( \
+ PSI_FILE_CALL(start_file_open_wait)( \
locker, src_file, src_line); \
} \
} while (0)
@@ -228,7 +228,7 @@ do { \
# define register_pfs_file_open_end(locker, file) \
do { \
if (UNIV_LIKELY(locker != NULL)) { \
- PSI_CALL(end_file_open_wait_and_bind_to_descriptor)( \
+ PSI_FILE_CALL(end_file_open_wait_and_bind_to_descriptor)(\
locker, file); \
} \
} while (0)
@@ -236,10 +236,10 @@ do { \
# define register_pfs_file_io_begin(state, locker, file, count, op, \
src_file, src_line) \
do { \
- locker = PSI_CALL(get_thread_file_descriptor_locker)( \
+ locker = PSI_FILE_CALL(get_thread_file_descriptor_locker)( \
state, file, op); \
if (UNIV_LIKELY(locker != NULL)) { \
- PSI_CALL(start_file_wait)( \
+ PSI_FILE_CALL(start_file_wait)( \
locker, count, src_file, src_line); \
} \
} while (0)
@@ -247,7 +247,7 @@ do { \
# define register_pfs_file_io_end(locker, count) \
do { \
if (UNIV_LIKELY(locker != NULL)) { \
- PSI_CALL(end_file_wait)(locker, count); \
+ PSI_FILE_CALL(end_file_wait)(locker, count); \
} \
} while (0)
#endif /* UNIV_PFS_IO */
@@ -345,13 +345,12 @@ to original un-instrumented file I/O APIs */
/* File types for directory entry data type */
-enum os_file_type_enum{
+enum os_file_type_t {
OS_FILE_TYPE_UNKNOWN = 0,
OS_FILE_TYPE_FILE, /* regular file */
OS_FILE_TYPE_DIR, /* directory */
OS_FILE_TYPE_LINK /* symbolic link */
};
-typedef enum os_file_type_enum os_file_type_t;
/* Maximum path string length in bytes when referring to tables with in the
'./databasename/tablename.ibd' path format; we can allocate at least 2 buffers
@@ -359,16 +358,18 @@ of this size from the thread stack; that is why this should not be made much
bigger than 4000 bytes */
#define OS_FILE_MAX_PATH 4000
-/* Struct used in fetching information of a file in a directory */
-struct os_file_stat_struct{
+/** Struct used in fetching information of a file in a directory */
+struct os_file_stat_t {
char name[OS_FILE_MAX_PATH]; /*!< path to a file */
os_file_type_t type; /*!< file type */
ib_int64_t size; /*!< file size */
time_t ctime; /*!< creation time */
time_t mtime; /*!< modification time */
time_t atime; /*!< access time */
+ bool rw_perm; /*!< true if can be opened
+ in read-write mode. Only valid
+ if type == OS_FILE_TYPE_FILE */
};
-typedef struct os_file_stat_struct os_file_stat_t;
#ifdef __WIN__
typedef HANDLE os_file_dir_t; /*!< directory stream */
@@ -525,7 +526,7 @@ os_file_create_func(
Deletes a file. The file has to be closed before calling this.
@return TRUE if success */
UNIV_INTERN
-ibool
+bool
os_file_delete(
/*===========*/
const char* name); /*!< in: file path as a null-terminated
@@ -535,7 +536,7 @@ os_file_delete(
Deletes a file if it exists. The file has to be closed before calling this.
@return TRUE if success */
UNIV_INTERN
-ibool
+bool
os_file_delete_if_exists(
/*=====================*/
const char* name); /*!< in: file path as a null-terminated
@@ -826,7 +827,7 @@ UNIV_INTERN
ulint
os_file_get_last_error(
/*===================*/
- ibool report_all_errors); /*!< in: TRUE if we want an error message
+ bool report_all_errors); /*!< in: TRUE if we want an error message
printed of all errors */
/*******************************************************************//**
NOTE! Use the corresponding macro os_file_read(), not directly this function!
@@ -925,6 +926,60 @@ os_file_dirname(
/*============*/
const char* path); /*!< in: pathname */
/****************************************************************//**
+This function returns a new path name after replacing the basename
+in an old path with a new basename. The old_path is a full path
+name including the extension. The tablename is in the normal
+form "databasename/tablename". The new base name is found after
+the forward slash. Both input strings are null terminated.
+
+This function allocates memory to be returned. It is the callers
+responsibility to free the return value after it is no longer needed.
+
+@return own: new full pathname */
+UNIV_INTERN
+char*
+os_file_make_new_pathname(
+/*======================*/
+ const char* old_path, /*!< in: pathname */
+ const char* new_name); /*!< in: new file name */
+/****************************************************************//**
+This function returns a remote path name by combining a data directory
+path provided in a DATA DIRECTORY clause with the tablename which is
+in the form 'database/tablename'. It strips the file basename (which
+is the tablename) found after the last directory in the path provided.
+The full filepath created will include the database name as a directory
+under the path provided. The filename is the tablename with the '.ibd'
+extension. All input and output strings are null-terminated.
+
+This function allocates memory to be returned. It is the callers
+responsibility to free the return value after it is no longer needed.
+
+@return own: A full pathname; data_dir_path/databasename/tablename.ibd */
+UNIV_INTERN
+char*
+os_file_make_remote_pathname(
+/*=========================*/
+ const char* data_dir_path, /*!< in: pathname */
+ const char* tablename, /*!< in: tablename */
+ const char* extention); /*!< in: file extention; ibd,cfg*/
+/****************************************************************//**
+This function reduces a null-terminated full remote path name into
+the path that is sent by MySQL for DATA DIRECTORY clause. It replaces
+the 'databasename/tablename.ibd' found at the end of the path with just
+'tablename'.
+
+Since the result is always smaller than the path sent in, no new memory
+is allocated. The caller should allocate memory for the path sent in.
+This function manipulates that path in place.
+
+If the path format is not as expected, just return. The result is used
+to inform a SHOW CREATE TABLE command. */
+UNIV_INTERN
+void
+os_file_make_data_dir_path(
+/*========================*/
+ char* data_dir_path); /*!< in/out: full path/data_dir_path */
+/****************************************************************//**
Creates all missing subdirectories along the given path.
@return TRUE if call succeeded FALSE otherwise */
UNIV_INTERN
@@ -1108,14 +1163,16 @@ os_aio_all_slots_free(void);
/*******************************************************************//**
This function returns information about the specified file
-@return TRUE if stat information found */
+@return DB_SUCCESS if all OK */
UNIV_INTERN
-ibool
+dberr_t
os_file_get_status(
/*===============*/
- const char* path, /*!< in: pathname of the file */
- os_file_stat_t* stat_info); /*!< information of a file in a
+ const char* path, /*!< in: pathname of the file */
+ os_file_stat_t* stat_info, /*!< information of a file in a
directory */
+ bool check_rw_perm); /*!< in: for testing whether the
+ file can be opened in RW mode */
#if !defined(UNIV_HOTBACKUP)
/*********************************************************************//**
diff --git a/storage/innobase/include/os0sync.h b/storage/innobase/include/os0sync.h
index d68823b72ca..d3ce68253ec 100644
--- a/storage/innobase/include/os0sync.h
+++ b/storage/innobase/include/os0sync.h
@@ -54,22 +54,19 @@ typedef pthread_cond_t os_cond_t;
/** Structure that includes Performance Schema Probe pfs_psi
in the os_fast_mutex structure if UNIV_PFS_MUTEX is defined */
-typedef struct os_fast_mutex_struct {
+struct os_fast_mutex_t {
fast_mutex_t mutex; /*!< os_fast_mutex */
#ifdef UNIV_PFS_MUTEX
struct PSI_mutex* pfs_psi;/*!< The performance schema
instrumentation hook */
#endif
-} os_fast_mutex_t;
-
+};
-/** Operating system event */
-typedef struct os_event_struct os_event_struct_t;
/** Operating system event handle */
-typedef os_event_struct_t* os_event_t;
+typedef struct os_event* os_event_t;
/** An asynchronous signal sent between threads */
-struct os_event_struct {
+struct os_event {
#ifdef __WIN__
HANDLE handle; /*!< kernel event object, slow,
used on older Windows */
@@ -84,7 +81,7 @@ struct os_event_struct {
the event becomes signaled */
os_cond_t cond_var; /*!< condition variable is used in
waiting for the event */
- UT_LIST_NODE_T(os_event_struct_t) os_event_list;
+ UT_LIST_NODE_T(os_event_t) os_event_list;
/*!< list of all created events */
};
@@ -94,16 +91,11 @@ struct os_event_struct {
/** Return value of os_event_wait_time() when the time is exceeded */
#define OS_SYNC_TIME_EXCEEDED 1
-/** Operating system mutex */
-typedef struct os_mutex_struct os_mutex_str_t;
/** Operating system mutex handle */
-typedef os_mutex_str_t* os_mutex_t;
-
-/** Return value of os_event_wait_time() when the time is exceeded */
-#define OS_SYNC_TIME_EXCEEDED 1
+typedef struct os_mutex_t* os_ib_mutex_t;
/** Mutex protecting counts and the event and OS 'slow' mutex lists */
-extern os_mutex_t os_sync_mutex;
+extern os_ib_mutex_t os_sync_mutex;
/** This is incremented by 1 in os_thread_create and decremented by 1 in
os_thread_exit */
@@ -132,10 +124,8 @@ explicitly by calling sync_os_reset_event.
@return the event handle */
UNIV_INTERN
os_event_t
-os_event_create(
-/*============*/
- const char* name); /*!< in: the name of the event, if NULL
- the event is created without a name */
+os_event_create(void);
+/*==================*/
/**********************************************************//**
Sets an event semaphore to the signaled state: lets waiting threads
proceed. */
@@ -191,7 +181,7 @@ os_event_wait_low(
os_event_reset(). */
#define os_event_wait(event) os_event_wait_low(event, 0)
-#define os_event_wait_time(e, t) os_event_wait_time_low(event, t, 0)
+#define os_event_wait_time(event, t) os_event_wait_time_low(event, t, 0)
/**********************************************************//**
Waits for an event object until it is in the signaled state or
@@ -210,10 +200,10 @@ os_event_wait_time_low(
os_event_reset(). */
/*********************************************************//**
Creates an operating system mutex semaphore. Because these are slow, the
-mutex semaphore of InnoDB itself (mutex_t) should be used where possible.
+mutex semaphore of InnoDB itself (ib_mutex_t) should be used where possible.
@return the mutex handle */
UNIV_INTERN
-os_mutex_t
+os_ib_mutex_t
os_mutex_create(void);
/*=================*/
/**********************************************************//**
@@ -222,21 +212,21 @@ UNIV_INTERN
void
os_mutex_enter(
/*===========*/
- os_mutex_t mutex); /*!< in: mutex to acquire */
+ os_ib_mutex_t mutex); /*!< in: mutex to acquire */
/**********************************************************//**
Releases ownership of a mutex. */
UNIV_INTERN
void
os_mutex_exit(
/*==========*/
- os_mutex_t mutex); /*!< in: mutex to release */
+ os_ib_mutex_t mutex); /*!< in: mutex to release */
/**********************************************************//**
Frees an mutex object. */
UNIV_INTERN
void
os_mutex_free(
/*==========*/
- os_mutex_t mutex); /*!< in: mutex to free */
+ os_ib_mutex_t mutex); /*!< in: mutex to free */
/**********************************************************//**
Acquires ownership of a fast mutex. Currently in Windows this is the same
as os_fast_mutex_lock!
@@ -365,7 +355,11 @@ Atomic compare-and-swap and increment for InnoDB. */
#if defined(HAVE_IB_GCC_ATOMIC_BUILTINS)
-#define HAVE_ATOMIC_BUILTINS
+# define HAVE_ATOMIC_BUILTINS
+
+# ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_64
+# define HAVE_ATOMIC_BUILTINS_64
+# endif
/**********************************************************//**
Returns true if swapped, ptr is pointer to target, old_val is value to
@@ -419,6 +413,9 @@ amount to decrement. */
# define os_atomic_decrement_ulint(ptr, amount) \
os_atomic_decrement(ptr, amount)
+# define os_atomic_decrement_uint64(ptr, amount) \
+ os_atomic_decrement(ptr, amount)
+
/**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */
@@ -430,12 +427,13 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */
#elif defined(HAVE_IB_SOLARIS_ATOMICS)
-#define HAVE_ATOMIC_BUILTINS
+# define HAVE_ATOMIC_BUILTINS
+# define HAVE_ATOMIC_BUILTINS_64
/* If not compiling with GCC or GCC doesn't support the atomic
intrinsics and running on Solaris >= 10 use Solaris atomics */
-#include <atomic.h>
+# include <atomic.h>
/**********************************************************//**
Returns true if swapped, ptr is pointer to target, old_val is value to
@@ -487,6 +485,9 @@ amount to decrement. */
# define os_atomic_decrement_ulint(ptr, amount) \
os_atomic_increment_ulint(ptr, -(amount))
+# define os_atomic_decrement_uint64(ptr, amount) \
+ os_atomic_increment_uint64(ptr, -(amount))
+
/**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */
@@ -498,7 +499,11 @@ Returns the old value of *ptr, atomically sets *ptr to new_val */
#elif defined(HAVE_WINDOWS_ATOMICS)
-#define HAVE_ATOMIC_BUILTINS
+# define HAVE_ATOMIC_BUILTINS
+
+# ifndef _WIN32
+# define HAVE_ATOMIC_BUILTINS_64
+# endif
/**********************************************************//**
Atomic compare and exchange of signed integers (both 32 and 64 bit).
@@ -574,8 +579,10 @@ amount of increment. */
# define os_atomic_increment_ulint(ptr, amount) \
((ulint) (win_xchg_and_add((lint*) ptr, (lint) amount) + amount))
-# define os_atomic_increment_uint64(ptr, amount) \
- ((ulint) (win_xchg_and_add(ptr, (lint) amount) + amount))
+# define os_atomic_increment_uint64(ptr, amount) \
+ ((ib_uint64_t) (InterlockedExchangeAdd64( \
+ (ib_int64_t*) ptr, \
+ (ib_int64_t) amount) + amount))
/**********************************************************//**
Returns the resulting value, ptr is pointer to target, amount is the
@@ -587,6 +594,11 @@ amount to decrement. There is no atomic substract function on Windows */
# define os_atomic_decrement_ulint(ptr, amount) \
((ulint) (win_xchg_and_add((lint*) ptr, -(lint) amount) - amount))
+# define os_atomic_decrement_uint64(ptr, amount) \
+ ((ib_uint64_t) (InterlockedExchangeAdd64( \
+ (ib_int64_t*) ptr, \
+ -(ib_int64_t) amount) - amount))
+
/**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val.
InterlockedExchange() operates on LONG, and the LONG will be
diff --git a/storage/innobase/include/os0sync.ic b/storage/innobase/include/os0sync.ic
index 0d907b31366..33c238ceb47 100644
--- a/storage/innobase/include/os0sync.ic
+++ b/storage/innobase/include/os0sync.ic
@@ -66,7 +66,7 @@ pfs_os_fast_mutex_init(
os_fast_mutex_t* fast_mutex) /*!< out: fast mutex */
{
#ifdef HAVE_PSI_MUTEX_INTERFACE
- fast_mutex->pfs_psi = PSI_CALL(init_mutex)(key, &fast_mutex->mutex);
+ fast_mutex->pfs_psi = PSI_MUTEX_CALL(init_mutex)(key, &fast_mutex->mutex);
#else
fast_mutex->pfs_psi = NULL;
#endif
@@ -86,7 +86,7 @@ pfs_os_fast_mutex_free(
{
#ifdef HAVE_PSI_MUTEX_INTERFACE
if (fast_mutex->pfs_psi != NULL)
- PSI_CALL(destroy_mutex)(fast_mutex->pfs_psi);
+ PSI_MUTEX_CALL(destroy_mutex)(fast_mutex->pfs_psi);
#endif
fast_mutex->pfs_psi = NULL;
@@ -112,13 +112,13 @@ pfs_os_fast_mutex_lock(
PSI_mutex_locker* locker;
PSI_mutex_locker_state state;
- locker = PSI_CALL(start_mutex_wait)(&state, fast_mutex->pfs_psi,
+ locker = PSI_MUTEX_CALL(start_mutex_wait)(&state, fast_mutex->pfs_psi,
PSI_MUTEX_LOCK, file_name, line);
os_fast_mutex_lock_func(&fast_mutex->mutex);
if (locker != NULL)
- PSI_CALL(end_mutex_wait)(locker, 0);
+ PSI_MUTEX_CALL(end_mutex_wait)(locker, 0);
}
else
#endif
@@ -141,7 +141,7 @@ pfs_os_fast_mutex_unlock(
{
#ifdef HAVE_PSI_MUTEX_INTERFACE
if (fast_mutex->pfs_psi != NULL)
- PSI_CALL(unlock_mutex)(fast_mutex->pfs_psi);
+ PSI_MUTEX_CALL(unlock_mutex)(fast_mutex->pfs_psi);
#endif
os_fast_mutex_unlock_func(&fast_mutex->mutex);
diff --git a/storage/innobase/include/page0cur.h b/storage/innobase/include/page0cur.h
index 52f5c5de58a..038a05edbd0 100644
--- a/storage/innobase/include/page0cur.h
+++ b/storage/innobase/include/page0cur.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -170,8 +170,11 @@ page_cur_tuple_insert(
page_cur_t* cursor, /*!< in/out: a page cursor */
const dtuple_t* tuple, /*!< in: pointer to a data tuple */
dict_index_t* index, /*!< in: record descriptor */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
ulint n_ext, /*!< in: number of externally stored columns */
- mtr_t* mtr); /*!< in: mini-transaction handle, or NULL */
+ mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */
+ __attribute__((nonnull(1,2,3,4,5), warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Inserts a record next to page cursor. Returns pointer to inserted record if
@@ -238,10 +241,11 @@ UNIV_INTERN
void
page_cur_delete_rec(
/*================*/
- page_cur_t* cursor, /*!< in/out: a page cursor */
- dict_index_t* index, /*!< in: record descriptor */
- const ulint* offsets,/*!< in: rec_get_offsets(cursor->rec, index) */
- mtr_t* mtr); /*!< in: mini-transaction handle */
+ page_cur_t* cursor, /*!< in/out: a page cursor */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const ulint* offsets,/*!< in: rec_get_offsets(
+ cursor->rec, index) */
+ mtr_t* mtr); /*!< in: mini-transaction handle */
#ifndef UNIV_HOTBACKUP
/****************************************************************//**
Searches the right position for a page cursor.
@@ -331,10 +335,24 @@ page_cur_parse_delete_rec(
buf_block_t* block, /*!< in: page or NULL */
dict_index_t* index, /*!< in: record descriptor */
mtr_t* mtr); /*!< in: mtr or NULL */
+/*******************************************************//**
+Removes the record from a leaf page. This function does not log
+any changes. It is used by the IMPORT tablespace functions.
+@return true if success, i.e., the page did not become too empty */
+UNIV_INTERN
+bool
+page_delete_rec(
+/*============*/
+ const dict_index_t* index, /*!< in: The index that the record
+ belongs to */
+ page_cur_t* pcur, /*!< in/out: page cursor on record
+ to delete */
+ page_zip_des_t* page_zip,/*!< in: compressed page descriptor */
+ const ulint* offsets);/*!< in: offsets for record */
/** Index page cursor */
-struct page_cur_struct{
+struct page_cur_t{
byte* rec; /*!< pointer to a record on page */
buf_block_t* block; /*!< pointer to the block containing rec */
};
diff --git a/storage/innobase/include/page0cur.ic b/storage/innobase/include/page0cur.ic
index a065f9ff30d..90a5a690487 100644
--- a/storage/innobase/include/page0cur.ic
+++ b/storage/innobase/include/page0cur.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -245,33 +245,36 @@ page_cur_tuple_insert(
page_cur_t* cursor, /*!< in/out: a page cursor */
const dtuple_t* tuple, /*!< in: pointer to a data tuple */
dict_index_t* index, /*!< in: record descriptor */
+ ulint** offsets,/*!< out: offsets on *rec */
+ mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
ulint n_ext, /*!< in: number of externally stored columns */
mtr_t* mtr) /*!< in: mini-transaction handle, or NULL */
{
- mem_heap_t* heap;
- ulint* offsets;
ulint size
= rec_get_converted_size(index, tuple, n_ext);
rec_t* rec;
- heap = mem_heap_create(size
- + (4 + REC_OFFS_HEADER_SIZE
- + dtuple_get_n_fields(tuple))
- * sizeof *offsets);
- rec = rec_convert_dtuple_to_rec((byte*) mem_heap_alloc(heap, size),
+ if (!*heap) {
+ *heap = mem_heap_create(size
+ + (4 + REC_OFFS_HEADER_SIZE
+ + dtuple_get_n_fields(tuple))
+ * sizeof **offsets);
+ }
+
+ rec = rec_convert_dtuple_to_rec((byte*) mem_heap_alloc(*heap, size),
index, tuple, n_ext);
- offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
+ *offsets = rec_get_offsets(
+ rec, index, *offsets, ULINT_UNDEFINED, heap);
if (buf_block_get_page_zip(cursor->block)) {
rec = page_cur_insert_rec_zip(&cursor->rec, cursor->block,
- index, rec, offsets, mtr);
+ index, rec, *offsets, mtr);
} else {
rec = page_cur_insert_rec_low(cursor->rec,
- index, rec, offsets, mtr);
+ index, rec, *offsets, mtr);
}
- ut_ad(!rec || !cmp_dtuple_rec(tuple, rec, offsets));
- mem_heap_free(heap);
+ ut_ad(!rec || !cmp_dtuple_rec(tuple, rec, *offsets));
return(rec);
}
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h
index e4571b69376..773ec4c2177 100644
--- a/storage/innobase/include/page0page.h
+++ b/storage/innobase/include/page0page.h
@@ -551,6 +551,16 @@ page_rec_get_next_const(
/*====================*/
const rec_t* rec); /*!< in: pointer to record */
/************************************************************//**
+Gets the pointer to the next non delete-marked record on the page.
+If all subsequent records are delete-marked, then this function
+will return the supremum record.
+@return pointer to next non delete-marked record or pointer to supremum */
+UNIV_INLINE
+const rec_t*
+page_rec_get_next_non_del_marked(
+/*=============================*/
+ const rec_t* rec); /*!< in: pointer to record */
+/************************************************************//**
Sets the pointer to the next record on the page. */
UNIV_INLINE
void
@@ -737,11 +747,14 @@ UNIV_INLINE
void
page_mem_free(
/*==========*/
- page_t* page, /*!< in/out: index page */
- page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
- rec_t* rec, /*!< in: pointer to the (origin of) record */
- dict_index_t* index, /*!< in: index of rec */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ page_t* page, /*!< in/out: index page */
+ page_zip_des_t* page_zip,/*!< in/out: compressed page,
+ or NULL */
+ rec_t* rec, /*!< in: pointer to the (origin of)
+ record */
+ const dict_index_t* index, /*!< in: index of rec */
+ const ulint* offsets);/*!< in: array returned by
+ rec_get_offsets() */
/**********************************************************//**
Create an uncompressed B-tree index page.
@return pointer to the page */
@@ -1031,7 +1044,6 @@ page_find_rec_with_heap_no(
/*=======================*/
const page_t* page, /*!< in: index page */
ulint heap_no);/*!< in: heap number */
-
#ifdef UNIV_MATERIALIZE
#undef UNIV_INLINE
#define UNIV_INLINE UNIV_INLINE_ORIGINAL
diff --git a/storage/innobase/include/page0page.ic b/storage/innobase/include/page0page.ic
index e73e547e92b..c2e20d81a29 100644
--- a/storage/innobase/include/page0page.ic
+++ b/storage/innobase/include/page0page.ic
@@ -776,6 +776,30 @@ page_rec_get_next_const(
}
/************************************************************//**
+Gets the pointer to the next non delete-marked record on the page.
+If all subsequent records are delete-marked, then this function
+will return the supremum record.
+@return pointer to next non delete-marked record or pointer to supremum */
+UNIV_INLINE
+const rec_t*
+page_rec_get_next_non_del_marked(
+/*=============================*/
+ const rec_t* rec) /*!< in: pointer to record */
+{
+ const rec_t* r;
+ ulint page_is_compact = page_rec_is_comp(rec);
+
+ for (r = page_rec_get_next_const(rec);
+ !page_rec_is_supremum(r)
+ && rec_get_deleted_flag(r, page_is_compact);
+ r = page_rec_get_next_const(r)) {
+ /* noop */
+ }
+
+ return(r);
+}
+
+/************************************************************//**
Sets the pointer to the next record on the page. */
UNIV_INLINE
void
@@ -1085,11 +1109,14 @@ UNIV_INLINE
void
page_mem_free(
/*==========*/
- page_t* page, /*!< in/out: index page */
- page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
- rec_t* rec, /*!< in: pointer to the (origin of) record */
- dict_index_t* index, /*!< in: index of rec */
- const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ page_t* page, /*!< in/out: index page */
+ page_zip_des_t* page_zip, /*!< in/out: compressed page,
+ or NULL */
+ rec_t* rec, /*!< in: pointer to the
+ (origin of) record */
+ const dict_index_t* index, /*!< in: index of rec */
+ const ulint* offsets) /*!< in: array returned by
+ rec_get_offsets() */
{
rec_t* free;
ulint garbage;
diff --git a/storage/innobase/include/page0types.h b/storage/innobase/include/page0types.h
index da2ac1c7de2..533b0d3cf98 100644
--- a/storage/innobase/include/page0types.h
+++ b/storage/innobase/include/page0types.h
@@ -26,6 +26,10 @@ Created 2/2/1994 Heikki Tuuri
#ifndef page0types_h
#define page0types_h
+using namespace std;
+
+#include <map>
+
#include "univ.i"
#include "dict0types.h"
#include "mtr0types.h"
@@ -35,12 +39,12 @@ Created 2/2/1994 Heikki Tuuri
/** Type of the index page */
typedef byte page_t;
/** Index page cursor */
-typedef struct page_cur_struct page_cur_t;
+struct page_cur_t;
/** Compressed index page */
typedef byte page_zip_t;
/** Compressed page descriptor */
-typedef struct page_zip_des_struct page_zip_des_t;
+struct page_zip_des_t;
/* The following definitions would better belong to page0zip.h,
but we cannot include page0zip.h from rem0rec.ic, because
@@ -60,12 +64,14 @@ ssize, which is the number of shifts from 512. */
#endif
/** Compressed page descriptor */
-struct page_zip_des_struct
+struct page_zip_des_t
{
page_zip_t* data; /*!< compressed page data */
#ifdef UNIV_DEBUG
unsigned m_start:16; /*!< start offset of modification log */
+ bool m_external; /*!< Allocated externally, not from the
+ buffer pool */
#endif /* UNIV_DEBUG */
unsigned m_end:16; /*!< end offset of modification log */
unsigned m_nonempty:1; /*!< TRUE if the modification log
@@ -80,7 +86,7 @@ struct page_zip_des_struct
};
/** Compression statistics for a given page size */
-struct page_zip_stat_struct {
+struct page_zip_stat_t {
/** Number of page compressions */
ulint compressed;
/** Number of successful page compressions */
@@ -91,13 +97,29 @@ struct page_zip_stat_struct {
ib_uint64_t compressed_usec;
/** Duration of page decompressions in microseconds */
ib_uint64_t decompressed_usec;
+ page_zip_stat_t() :
+ /* Initialize members to 0 so that when we do
+ stlmap[key].compressed++ and element with "key" does not
+ exist it gets inserted with zeroed members. */
+ compressed(0),
+ compressed_ok(0),
+ decompressed(0),
+ compressed_usec(0),
+ decompressed_usec(0)
+ { }
};
-/** Compression statistics */
-typedef struct page_zip_stat_struct page_zip_stat_t;
-
-/** Statistics on compression, indexed by page_zip_des_struct::ssize - 1 */
-extern page_zip_stat_t page_zip_stat[PAGE_ZIP_SSIZE_MAX];
+/** Compression statistics types */
+typedef map<index_id_t, page_zip_stat_t> page_zip_stat_per_index_t;
+
+/** Statistics on compression, indexed by page_zip_des_t::ssize - 1 */
+extern page_zip_stat_t page_zip_stat[PAGE_ZIP_SSIZE_MAX];
+/** Statistics on compression, indexed by dict_index_t::id */
+extern page_zip_stat_per_index_t page_zip_stat_per_index;
+extern ib_mutex_t page_zip_stat_per_index_mutex;
+#ifdef HAVE_PSI_INTERFACE
+extern mysql_pfs_key_t page_zip_stat_per_index_mutex_key;
+#endif /* HAVE_PSI_INTERFACE */
/**********************************************************************//**
Write the "deleted" flag of a record on a compressed page. The flag must
diff --git a/storage/innobase/include/page0zip.h b/storage/innobase/include/page0zip.h
index 2c84f75b2ab..12781bd61b8 100644
--- a/storage/innobase/include/page0zip.h
+++ b/storage/innobase/include/page0zip.h
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 2005, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -39,6 +40,16 @@ Created June 2005 by Marko Makela
#include "trx0types.h"
#include "mem0mem.h"
+/* Compression level to be used by zlib. Settable by user. */
+extern ulint page_compression_level;
+
+/* Default compression level. */
+#define DEFAULT_COMPRESSION_LEVEL 6
+
+/* Whether or not to log compressed page images to avoid possible
+compression algorithm changes in zlib. */
+extern bool page_log_compressed_pages;
+
/**********************************************************************//**
Determine the size of a compressed page in bytes.
@return size in bytes */
@@ -114,6 +125,7 @@ page_zip_compress(
m_start, m_end, m_nonempty */
const page_t* page, /*!< in: uncompressed page */
dict_index_t* index, /*!< in: index of the B-tree node */
+ ulint level, /*!< in: commpression level */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
__attribute__((nonnull(1,2,3)));
@@ -337,11 +349,12 @@ UNIV_INTERN
void
page_zip_dir_delete(
/*================*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page */
- byte* rec, /*!< in: deleted record */
- dict_index_t* index, /*!< in: index of rec */
- const ulint* offsets,/*!< in: rec_get_offsets(rec) */
- const byte* free) /*!< in: previous start of the free list */
+ page_zip_des_t* page_zip, /*!< in/out: compressed page */
+ byte* rec, /*!< in: deleted record */
+ const dict_index_t* index, /*!< in: index of rec */
+ const ulint* offsets, /*!< in: rec_get_offsets(rec) */
+ const byte* free) /*!< in: previous start of
+ the free list */
__attribute__((nonnull(1,2,3,4)));
/**********************************************************************//**
@@ -461,14 +474,49 @@ page_zip_verify_checksum(
/*=====================*/
const void* data, /*!< in: compressed page */
ulint size); /*!< in: size of compressed page */
+/**********************************************************************//**
+Write a log record of compressing an index page without the data on the page. */
+UNIV_INLINE
+void
+page_zip_compress_write_log_no_data(
+/*================================*/
+ ulint level, /*!< in: compression level */
+ const page_t* page, /*!< in: page that is compressed */
+ dict_index_t* index, /*!< in: index */
+ mtr_t* mtr); /*!< in: mtr */
+/**********************************************************************//**
+Parses a log record of compressing an index page without the data.
+@return end of log record or NULL */
+UNIV_INLINE
+byte*
+page_zip_parse_compress_no_data(
+/*============================*/
+ byte* ptr, /*!< in: buffer */
+ byte* end_ptr, /*!< in: buffer end */
+ page_t* page, /*!< in: uncompressed page */
+ page_zip_des_t* page_zip, /*!< out: compressed page */
+ dict_index_t* index) /*!< in: index */
+ __attribute__((nonnull(1,2)));
+
+/**********************************************************************//**
+Reset the counters used for filling
+INFORMATION_SCHEMA.innodb_cmp_per_index. */
+UNIV_INLINE
+void
+page_zip_reset_stat_per_index();
+/*===========================*/
#ifndef UNIV_HOTBACKUP
/** Check if a pointer to an uncompressed page matches a compressed page.
+When we IMPORT a tablespace the blocks and accompanying frames are allocted
+from outside the buffer pool.
@param ptr pointer to an uncompressed page frame
@param page_zip compressed page descriptor
@return TRUE if ptr and page_zip refer to the same block */
-# define PAGE_ZIP_MATCH(ptr, page_zip) \
- (buf_frame_get_page_zip(ptr) == (page_zip))
+# define PAGE_ZIP_MATCH(ptr, page_zip) \
+ (((page_zip)->m_external \
+ && (page_align(ptr) + UNIV_PAGE_SIZE == (page_zip)->data)) \
+ || buf_frame_get_page_zip(ptr) == (page_zip))
#else /* !UNIV_HOTBACKUP */
/** Check if a pointer to an uncompressed page matches a compressed page.
@param ptr pointer to an uncompressed page frame
diff --git a/storage/innobase/include/page0zip.ic b/storage/innobase/include/page0zip.ic
index c9300aa4e9f..0062e1cb39f 100644
--- a/storage/innobase/include/page0zip.ic
+++ b/storage/innobase/include/page0zip.ic
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 2005, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,6 +30,7 @@ Created June 2005 by Marko Makela
#endif
#include "page0zip.h"
+#include "mtr0log.h"
#include "page0page.h"
/* The format of compressed pages is as follows.
@@ -389,6 +391,75 @@ page_zip_write_header(
}
}
+/**********************************************************************//**
+Write a log record of compressing an index page without the data on the page. */
+UNIV_INLINE
+void
+page_zip_compress_write_log_no_data(
+/*================================*/
+ ulint level, /*!< in: compression level */
+ const page_t* page, /*!< in: page that is compressed */
+ dict_index_t* index, /*!< in: index */
+ mtr_t* mtr) /*!< in: mtr */
+{
+ byte* log_ptr = mlog_open_and_write_index(
+ mtr, page, index, MLOG_ZIP_PAGE_COMPRESS_NO_DATA, 1);
+
+ if (log_ptr) {
+ mach_write_to_1(log_ptr, level);
+ mlog_close(mtr, log_ptr + 1);
+ }
+}
+
+/**********************************************************************//**
+Parses a log record of compressing an index page without the data.
+@return end of log record or NULL */
+UNIV_INLINE
+byte*
+page_zip_parse_compress_no_data(
+/*============================*/
+ byte* ptr, /*!< in: buffer */
+ byte* end_ptr, /*!< in: buffer end */
+ page_t* page, /*!< in: uncompressed page */
+ page_zip_des_t* page_zip, /*!< out: compressed page */
+ dict_index_t* index) /*!< in: index */
+{
+ ulint level;
+ if (end_ptr == ptr) {
+ return(NULL);
+ }
+
+ level = mach_read_from_1(ptr);
+
+ /* If page compression fails then there must be something wrong
+ because a compress log record is logged only if the compression
+ was successful. Crash in this case. */
+
+ if (page
+ && !page_zip_compress(page_zip, page, index, level, NULL)) {
+ ut_error;
+ }
+
+ return(ptr + 1);
+}
+
+/**********************************************************************//**
+Reset the counters used for filling
+INFORMATION_SCHEMA.innodb_cmp_per_index. */
+UNIV_INLINE
+void
+page_zip_reset_stat_per_index()
+/*===========================*/
+{
+ mutex_enter(&page_zip_stat_per_index_mutex);
+
+ page_zip_stat_per_index.erase(
+ page_zip_stat_per_index.begin(),
+ page_zip_stat_per_index.end());
+
+ mutex_exit(&page_zip_stat_per_index_mutex);
+}
+
#ifdef UNIV_MATERIALIZE
# undef UNIV_INLINE
# define UNIV_INLINE UNIV_INLINE_ORIGINAL
diff --git a/storage/innobase/include/pars0pars.h b/storage/innobase/include/pars0pars.h
index 9eb8aeb747f..65ff7533828 100644
--- a/storage/innobase/include/pars0pars.h
+++ b/storage/innobase/include/pars0pars.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -469,9 +469,10 @@ que_thr_t*
pars_complete_graph_for_exec(
/*=========================*/
que_node_t* node, /*!< in: root node for an incomplete
- query graph */
+ query graph, or NULL for dummy graph */
trx_t* trx, /*!< in: transaction handle */
- mem_heap_t* heap); /*!< in: memory heap from which allocated */
+ mem_heap_t* heap) /*!< in: memory heap from which allocated */
+ __attribute__((nonnull(2,3), warn_unused_result));
/****************************************************************//**
Create parser info struct.
@@ -618,6 +619,18 @@ pars_info_add_ull_literal(
ib_uint64_t val); /*!< in: value */
/****************************************************************//**
+If the literal value already exists then it rebinds otherwise it
+creates a new entry. */
+UNIV_INTERN
+void
+pars_info_bind_ull_literal(
+/*=======================*/
+ pars_info_t* info, /*!< in: info struct */
+ const char* name, /*!< in: name */
+ const ib_uint64_t* val) /*!< in: value */
+ __attribute__((nonnull));
+
+/****************************************************************//**
Add bound id. */
UNIV_INTERN
void
@@ -628,16 +641,6 @@ pars_info_add_id(
const char* id); /*!< in: id */
/****************************************************************//**
-Get user function with the given name.
-@return user func, or NULL if not found */
-UNIV_INTERN
-pars_user_func_t*
-pars_info_get_user_func(
-/*====================*/
- pars_info_t* info, /*!< in: info struct */
- const char* name); /*!< in: function name to find*/
-
-/****************************************************************//**
Get bound literal with the given name.
@return bound literal, or NULL if not found */
UNIV_INTERN
@@ -665,7 +668,7 @@ pars_lexer_close(void);
/*==================*/
/** Extra information supplied for pars_sql(). */
-struct pars_info_struct {
+struct pars_info_t {
mem_heap_t* heap; /*!< our own memory heap */
ib_vector_t* funcs; /*!< user functions, or NUll
@@ -680,14 +683,14 @@ struct pars_info_struct {
};
/** User-supplied function and argument. */
-struct pars_user_func_struct {
+struct pars_user_func_t {
const char* name; /*!< function name */
pars_user_func_cb_t func; /*!< function address */
void* arg; /*!< user-supplied argument */
};
/** Bound literal. */
-struct pars_bound_lit_struct {
+struct pars_bound_lit_t {
const char* name; /*!< name */
const void* address; /*!< address */
ulint length; /*!< length of data */
@@ -697,20 +700,20 @@ struct pars_bound_lit_struct {
};
/** Bound identifier. */
-struct pars_bound_id_struct {
+struct pars_bound_id_t {
const char* name; /*!< name */
const char* id; /*!< identifier */
};
/** Struct used to denote a reserved word in a parsing tree */
-struct pars_res_word_struct{
+struct pars_res_word_t{
int code; /*!< the token code for the reserved word from
pars0grm.h */
};
/** A predefined function or operator node in a parsing tree; this construct
is also used for some non-functions like the assignment ':=' */
-struct func_node_struct{
+struct func_node_t{
que_common_t common; /*!< type: QUE_NODE_FUNC */
int func; /*!< token code of the function name */
ulint fclass; /*!< class of the function */
@@ -725,14 +728,14 @@ struct func_node_struct{
};
/** An order-by node in a select */
-struct order_node_struct{
+struct order_node_t{
que_common_t common; /*!< type: QUE_NODE_ORDER */
sym_node_t* column; /*!< order-by column */
ibool asc; /*!< TRUE if ascending, FALSE if descending */
};
/** Procedure definition node */
-struct proc_node_struct{
+struct proc_node_t{
que_common_t common; /*!< type: QUE_NODE_PROC */
sym_node_t* proc_id; /*!< procedure name symbol in the symbol
table of this same procedure */
@@ -742,14 +745,14 @@ struct proc_node_struct{
};
/** elsif-element node */
-struct elsif_node_struct{
+struct elsif_node_t{
que_common_t common; /*!< type: QUE_NODE_ELSIF */
que_node_t* cond; /*!< if condition */
que_node_t* stat_list; /*!< statement list */
};
/** if-statement node */
-struct if_node_struct{
+struct if_node_t{
que_common_t common; /*!< type: QUE_NODE_IF */
que_node_t* cond; /*!< if condition */
que_node_t* stat_list; /*!< statement list */
@@ -758,14 +761,14 @@ struct if_node_struct{
};
/** while-statement node */
-struct while_node_struct{
+struct while_node_t{
que_common_t common; /*!< type: QUE_NODE_WHILE */
que_node_t* cond; /*!< while condition */
que_node_t* stat_list; /*!< statement list */
};
/** for-loop-statement node */
-struct for_node_struct{
+struct for_node_t{
que_common_t common; /*!< type: QUE_NODE_FOR */
sym_node_t* loop_var; /*!< loop variable: this is the
dereferenced symbol from the
@@ -782,24 +785,24 @@ struct for_node_struct{
};
/** exit statement node */
-struct exit_node_struct{
+struct exit_node_t{
que_common_t common; /*!< type: QUE_NODE_EXIT */
};
/** return-statement node */
-struct return_node_struct{
+struct return_node_t{
que_common_t common; /*!< type: QUE_NODE_RETURN */
};
/** Assignment statement node */
-struct assign_node_struct{
+struct assign_node_t{
que_common_t common; /*!< type: QUE_NODE_ASSIGNMENT */
sym_node_t* var; /*!< variable to set */
que_node_t* val; /*!< value to assign */
};
/** Column assignment node */
-struct col_assign_node_struct{
+struct col_assign_node_t{
que_common_t common; /*!< type: QUE_NODE_COL_ASSIGN */
sym_node_t* col; /*!< column to set */
que_node_t* val; /*!< value to assign */
diff --git a/storage/innobase/include/pars0sym.h b/storage/innobase/include/pars0sym.h
index 4b3b342a533..bcf73639228 100644
--- a/storage/innobase/include/pars0sym.h
+++ b/storage/innobase/include/pars0sym.h
@@ -119,9 +119,9 @@ sym_tab_add_bound_id(
sym_tab_t* sym_tab, /*!< in: symbol table */
const char* name); /*!< in: name of bound id */
-/** Index of sym_node_struct::field_nos corresponding to the clustered index */
+/** Index of sym_node_t::field_nos corresponding to the clustered index */
#define SYM_CLUST_FIELD_NO 0
-/** Index of sym_node_struct::field_nos corresponding to a secondary index */
+/** Index of sym_node_t::field_nos corresponding to a secondary index */
#define SYM_SEC_FIELD_NO 1
/** Types of a symbol table node */
@@ -143,7 +143,7 @@ enum sym_tab_entry {
};
/** Symbol table node */
-struct sym_node_struct{
+struct sym_node_t{
que_common_t common; /*!< node type:
QUE_NODE_SYMBOL */
/* NOTE: if the data field in 'common.val' is not NULL and the symbol
@@ -227,7 +227,7 @@ struct sym_node_struct{
};
/** Symbol table */
-struct sym_tab_struct{
+struct sym_tab_t{
que_t* query_graph;
/*!< query graph generated by the
parser */
diff --git a/storage/innobase/include/pars0types.h b/storage/innobase/include/pars0types.h
index 13ae53f3fd6..47f4b432d20 100644
--- a/storage/innobase/include/pars0types.h
+++ b/storage/innobase/include/pars0types.h
@@ -26,24 +26,24 @@ Created 1/11/1998 Heikki Tuuri
#ifndef pars0types_h
#define pars0types_h
-typedef struct pars_info_struct pars_info_t;
-typedef struct pars_user_func_struct pars_user_func_t;
-typedef struct pars_bound_lit_struct pars_bound_lit_t;
-typedef struct pars_bound_id_struct pars_bound_id_t;
-typedef struct sym_node_struct sym_node_t;
-typedef struct sym_tab_struct sym_tab_t;
-typedef struct pars_res_word_struct pars_res_word_t;
-typedef struct func_node_struct func_node_t;
-typedef struct order_node_struct order_node_t;
-typedef struct proc_node_struct proc_node_t;
-typedef struct elsif_node_struct elsif_node_t;
-typedef struct if_node_struct if_node_t;
-typedef struct while_node_struct while_node_t;
-typedef struct for_node_struct for_node_t;
-typedef struct exit_node_struct exit_node_t;
-typedef struct return_node_struct return_node_t;
-typedef struct assign_node_struct assign_node_t;
-typedef struct col_assign_node_struct col_assign_node_t;
+struct pars_info_t;
+struct pars_user_func_t;
+struct pars_bound_lit_t;
+struct pars_bound_id_t;
+struct sym_node_t;
+struct sym_tab_t;
+struct pars_res_word_t;
+struct func_node_t;
+struct order_node_t;
+struct proc_node_t;
+struct elsif_node_t;
+struct if_node_t;
+struct while_node_t;
+struct for_node_t;
+struct exit_node_t;
+struct return_node_t;
+struct assign_node_t;
+struct col_assign_node_t;
typedef UT_LIST_BASE_NODE_T(sym_node_t) sym_node_list_t;
diff --git a/storage/innobase/include/que0que.h b/storage/innobase/include/que0que.h
index 531794ce688..ba8828623af 100644
--- a/storage/innobase/include/que0que.h
+++ b/storage/innobase/include/que0que.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -42,7 +42,7 @@ of SQL execution in the UNIV_SQL_DEBUG version */
extern ibool que_trace_on;
/** Mutex protecting the query threads. */
-extern mutex_t que_thr_mutex;
+extern ib_mutex_t que_thr_mutex;
/***********************************************************************//**
Creates a query graph fork node.
@@ -310,7 +310,7 @@ que_node_print_info(
Evaluate the given SQL
@return error code or DB_SUCCESS */
UNIV_INTERN
-enum db_err
+dberr_t
que_eval_sql(
/*=========*/
pars_info_t* info, /*!< in: info struct, or NULL */
@@ -349,7 +349,7 @@ que_close(void);
/* Query graph query thread node: the fields are protected by the
trx_t::mutex with the exceptions named below */
-struct que_thr_struct{
+struct que_thr_t{
que_common_t common; /*!< type: QUE_NODE_THR */
ulint magic_n; /*!< magic number to catch memory
corruption */
@@ -374,7 +374,7 @@ struct que_thr_struct{
thus far */
ulint lock_state; /*!< lock state of thread (table or
row) */
- struct srv_slot_struct*
+ struct srv_slot_t*
slot; /* The thread slot in the wait
array in srv_sys_t */
/*------------------------------*/
@@ -398,7 +398,7 @@ struct que_thr_struct{
#define QUE_THR_MAGIC_FREED 123461526
/* Query graph fork node: its fields are protected by the query thread mutex */
-struct que_fork_struct{
+struct que_fork_t{
que_common_t common; /*!< type: QUE_NODE_FORK */
que_t* graph; /*!< query graph of this node */
ulint fork_type; /*!< fork type */
diff --git a/storage/innobase/include/que0types.h b/storage/innobase/include/que0types.h
index b165b817d87..0f11cad301a 100644
--- a/storage/innobase/include/que0types.h
+++ b/storage/innobase/include/que0types.h
@@ -32,18 +32,15 @@ Created 5/27/1996 Heikki Tuuri
/* Pseudotype for all graph nodes */
typedef void que_node_t;
-typedef struct que_fork_struct que_fork_t;
-
/* Query graph root is a fork node */
-typedef que_fork_t que_t;
+typedef struct que_fork_t que_t;
-typedef struct que_thr_struct que_thr_t;
-typedef struct que_common_struct que_common_t;
+struct que_thr_t;
/* Common struct at the beginning of each query graph node; the name of this
substruct must be 'common' */
-struct que_common_struct{
+struct que_common_t{
ulint type; /*!< query node type */
que_node_t* parent; /*!< back pointer to parent node, or NULL */
que_node_t* brother;/* pointer to a possible brother node */
diff --git a/storage/innobase/include/read0read.h b/storage/innobase/include/read0read.h
index 6ea57fffcd2..980faddf98e 100644
--- a/storage/innobase/include/read0read.h
+++ b/storage/innobase/include/read0read.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -57,12 +57,14 @@ read_view_purge_open(
mem_heap_t* heap); /*!< in: memory heap from which
allocated */
/*********************************************************************//**
-Remove read view from the trx_sys->view_list. */
-UNIV_INTERN
+Remove a read view from the trx_sys->view_list. */
+UNIV_INLINE
void
read_view_remove(
/*=============*/
- read_view_t* view); /*!< in: read view */
+ read_view_t* view, /*!< in: read view, can be 0 */
+ bool own_mutex); /*!< in: true if caller owns the
+ trx_sys_t::mutex */
/*********************************************************************//**
Closes a consistent read view for MySQL. This function is called at an SQL
statement end if the trx isolation level is <= TRX_ISO_READ_COMMITTED. */
@@ -73,13 +75,14 @@ read_view_close_for_mysql(
trx_t* trx); /*!< in: trx which has a read view */
/*********************************************************************//**
Checks if a read view sees the specified transaction.
-@return TRUE if sees */
+@return true if sees */
UNIV_INLINE
-ibool
+bool
read_view_sees_trx_id(
/*==================*/
const read_view_t* view, /*!< in: read view */
- trx_id_t trx_id);/*!< in: trx id */
+ trx_id_t trx_id) /*!< in: trx id */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Prints a read view to stderr. */
UNIV_INTERN
@@ -119,7 +122,7 @@ read_cursor_set_for_mysql(
/** Read view lists the trx ids of those transactions for which a consistent
read should not see the modifications to the database. */
-struct read_view_struct{
+struct read_view_t{
ulint type; /*!< VIEW_NORMAL, VIEW_HIGH_GRANULARITY */
undo_no_t undo_no;/*!< 0 or if type is
VIEW_HIGH_GRANULARITY
@@ -145,7 +148,7 @@ struct read_view_struct{
trx_id_t* trx_ids;/*!< Additional trx ids which the read should
not see: typically, these are the read-write
active transactions at the time when the read
- is serialized, except the reading transaction
+ is serialized, except the reading transaction
itself; the trx ids in this array are in a
descending order. These trx_ids should be
between the "low" and "high" water marks,
@@ -173,7 +176,7 @@ struct read_view_struct{
cursors. This struct holds both heap where consistent read view
is allocated and pointer to a read view. */
-struct cursor_view_struct{
+struct cursor_view_t{
mem_heap_t* heap;
/*!< Memory heap for the cursor view */
read_view_t* read_view;
diff --git a/storage/innobase/include/read0read.ic b/storage/innobase/include/read0read.ic
index 436800e1585..82c1028f12e 100644
--- a/storage/innobase/include/read0read.ic
+++ b/storage/innobase/include/read0read.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -23,11 +23,64 @@ Cursor read
Created 2/16/1997 Heikki Tuuri
*******************************************************/
+#include "trx0sys.h"
+
+#ifdef UNIV_DEBUG
+/*********************************************************************//**
+Validates a read view object. */
+static
+bool
+read_view_validate(
+/*===============*/
+ const read_view_t* view) /*!< in: view to validate */
+{
+ ut_ad(mutex_own(&trx_sys->mutex));
+
+ /* Check that the view->trx_ids array is in descending order. */
+ for (ulint i = 1; i < view->n_trx_ids; ++i) {
+
+ ut_a(view->trx_ids[i] < view->trx_ids[i - 1]);
+ }
+
+ return(true);
+}
+
+/** Functor to validate the view list. */
+struct ViewCheck {
+
+ ViewCheck() : m_prev_view(0) { }
+
+ void operator()(const read_view_t* view)
+ {
+ ut_a(m_prev_view == NULL
+ || m_prev_view->low_limit_no >= view->low_limit_no);
+
+ m_prev_view = view;
+ }
+
+ const read_view_t* m_prev_view;
+};
+
+/*********************************************************************//**
+Validates a read view list. */
+static
+bool
+read_view_list_validate(void)
+/*=========================*/
+{
+ ut_ad(mutex_own(&trx_sys->mutex));
+
+ ut_list_map(trx_sys->view_list, &read_view_t::view_list, ViewCheck());
+
+ return(true);
+}
+#endif /* UNIV_DEBUG */
+
/*********************************************************************//**
Checks if a read view sees the specified transaction.
-@return TRUE if sees */
+@return true if sees */
UNIV_INLINE
-ibool
+bool
read_view_sees_trx_id(
/*==================*/
const read_view_t* view, /*!< in: read view */
@@ -35,10 +88,10 @@ read_view_sees_trx_id(
{
if (trx_id < view->up_limit_id) {
- return(TRUE);
+ return(true);
} else if (trx_id >= view->low_limit_id) {
- return(FALSE);
+ return(false);
} else {
ulint lower = 0;
ulint upper = view->n_trx_ids - 1;
@@ -63,5 +116,33 @@ read_view_sees_trx_id(
} while (lower <= upper);
}
- return(TRUE);
+ return(true);
+}
+
+/*********************************************************************//**
+Remove a read view from the trx_sys->view_list. */
+UNIV_INLINE
+void
+read_view_remove(
+/*=============*/
+ read_view_t* view, /*!< in: read view, can be 0 */
+ bool own_mutex) /*!< in: true if caller owns the
+ trx_sys_t::mutex */
+{
+ if (view != 0) {
+ if (!own_mutex) {
+ mutex_enter(&trx_sys->mutex);
+ }
+
+ ut_ad(read_view_validate(view));
+
+ UT_LIST_REMOVE(view_list, trx_sys->view_list, view);
+
+ ut_ad(read_view_list_validate());
+
+ if (!own_mutex) {
+ mutex_exit(&trx_sys->mutex);
+ }
+ }
}
+
diff --git a/storage/innobase/include/read0types.h b/storage/innobase/include/read0types.h
index 0b6aa132b88..969f4ebb637 100644
--- a/storage/innobase/include/read0types.h
+++ b/storage/innobase/include/read0types.h
@@ -26,7 +26,7 @@ Created 2/16/1997 Heikki Tuuri
#ifndef read0types_h
#define read0types_h
-typedef struct read_view_struct read_view_t;
-typedef struct cursor_view_struct cursor_view_t;
+struct read_view_t;
+struct cursor_view_t;
#endif
diff --git a/storage/innobase/include/rem0cmp.h b/storage/innobase/include/rem0cmp.h
index ed6486aa603..cb3c85ac2c8 100644
--- a/storage/innobase/include/rem0cmp.h
+++ b/storage/innobase/include/rem0cmp.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -156,21 +156,28 @@ respectively, when only the common first fields are compared, or until
the first externally stored field in rec */
UNIV_INTERN
int
-cmp_dtuple_rec_with_match(
-/*======================*/
+cmp_dtuple_rec_with_match_low(
+/*==========================*/
const dtuple_t* dtuple, /*!< in: data tuple */
const rec_t* rec, /*!< in: physical record which differs from
dtuple in some of the common fields, or which
has an equal number or more fields than
dtuple */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
- ulint* matched_fields, /*!< in/out: number of already completely
+ ulint n_cmp, /*!< in: number of fields to compare */
+ ulint* matched_fields,
+ /*!< in/out: number of already completely
matched fields; when function returns,
contains the value for current comparison */
- ulint* matched_bytes); /*!< in/out: number of already matched
+ ulint* matched_bytes)
+ /*!< in/out: number of already matched
bytes within the first field not completely
matched; when function returns, contains the
value for current comparison */
+ __attribute__((nonnull));
+#define cmp_dtuple_rec_with_match(tuple,rec,offsets,fields,bytes) \
+ cmp_dtuple_rec_with_match_low( \
+ tuple,rec,offsets,dtuple_get_n_fields_cmp(tuple),fields,bytes)
/**************************************************************//**
Compares a data tuple to a physical record.
@see cmp_dtuple_rec_with_match
@@ -196,7 +203,9 @@ cmp_dtuple_is_prefix_of_rec(
/*************************************************************//**
Compare two physical records that contain the same number of columns,
none of which are stored externally.
-@return 1, 0, -1 if rec1 is greater, equal, less, respectively, than rec2 */
+@retval 1 if rec1 (including non-ordering columns) is greater than rec2
+@retval -1 if rec1 (including non-ordering columns) is less than rec2
+@retval 0 if rec1 is a duplicate of rec2 */
UNIV_INTERN
int
cmp_rec_rec_simple(
@@ -206,8 +215,10 @@ cmp_rec_rec_simple(
const ulint* offsets1,/*!< in: rec_get_offsets(rec1, ...) */
const ulint* offsets2,/*!< in: rec_get_offsets(rec2, ...) */
const dict_index_t* index, /*!< in: data dictionary index */
- ibool* null_eq);/*!< out: set to TRUE if
- found matching null values */
+ struct TABLE* table) /*!< in: MySQL table, for reporting
+ duplicate key value if applicable,
+ or NULL */
+ __attribute__((nonnull(1,2,3,4), warn_unused_result));
/*************************************************************//**
This function is used to compare two physical records. Only the common
first fields are compared, and if an externally stored field is
diff --git a/storage/innobase/include/rem0rec.h b/storage/innobase/include/rem0rec.h
index 671f8052afa..2a84aee7a6f 100644
--- a/storage/innobase/include/rem0rec.h
+++ b/storage/innobase/include/rem0rec.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -66,6 +66,15 @@ The status is stored in the low-order bits. */
/* Length of a B-tree node pointer, in bytes */
#define REC_NODE_PTR_SIZE 4
+/** SQL null flag in a 1-byte offset of ROW_FORMAT=REDUNDANT records */
+#define REC_1BYTE_SQL_NULL_MASK 0x80UL
+/** SQL null flag in a 2-byte offset of ROW_FORMAT=REDUNDANT records */
+#define REC_2BYTE_SQL_NULL_MASK 0x8000UL
+
+/** In a 2-byte offset of ROW_FORMAT=REDUNDANT records, the second most
+significant bit denotes that the tail of a field is stored off-page. */
+#define REC_2BYTE_EXTERN_MASK 0x4000UL
+
#ifdef UNIV_DEBUG
/* Length of the rec_get_offsets() header */
# define REC_OFFS_HEADER_SIZE 4
@@ -88,7 +97,8 @@ const rec_t*
rec_get_next_ptr_const(
/*===================*/
const rec_t* rec, /*!< in: physical record */
- ulint comp); /*!< in: nonzero=compact page format */
+ ulint comp) /*!< in: nonzero=compact page format */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the pointer of the next chained record
on the same page.
@@ -98,7 +108,8 @@ rec_t*
rec_get_next_ptr(
/*=============*/
rec_t* rec, /*!< in: physical record */
- ulint comp); /*!< in: nonzero=compact page format */
+ ulint comp) /*!< in: nonzero=compact page format */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the offset of the
next chained record on the same page.
@@ -108,7 +119,8 @@ ulint
rec_get_next_offs(
/*==============*/
const rec_t* rec, /*!< in: physical record */
- ulint comp); /*!< in: nonzero=compact page format */
+ ulint comp) /*!< in: nonzero=compact page format */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the next record offset field
of an old-style record. */
@@ -117,7 +129,8 @@ void
rec_set_next_offs_old(
/*==================*/
rec_t* rec, /*!< in: old-style physical record */
- ulint next); /*!< in: offset of the next record */
+ ulint next) /*!< in: offset of the next record */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to set the next record offset field
of a new-style record. */
@@ -126,7 +139,8 @@ void
rec_set_next_offs_new(
/*==================*/
rec_t* rec, /*!< in/out: new-style physical record */
- ulint next); /*!< in: offset of the next record */
+ ulint next) /*!< in: offset of the next record */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to get the number of fields
in an old-style record.
@@ -135,7 +149,8 @@ UNIV_INLINE
ulint
rec_get_n_fields_old(
/*=================*/
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the number of fields
in a record.
@@ -145,7 +160,8 @@ ulint
rec_get_n_fields(
/*=============*/
const rec_t* rec, /*!< in: physical record */
- const dict_index_t* index); /*!< in: record descriptor */
+ const dict_index_t* index) /*!< in: record descriptor */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the number of records owned by the
previous directory record.
@@ -154,7 +170,8 @@ UNIV_INLINE
ulint
rec_get_n_owned_old(
/*================*/
- const rec_t* rec); /*!< in: old-style physical record */
+ const rec_t* rec) /*!< in: old-style physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the number of owned records. */
UNIV_INLINE
@@ -162,7 +179,8 @@ void
rec_set_n_owned_old(
/*================*/
rec_t* rec, /*!< in: old-style physical record */
- ulint n_owned); /*!< in: the number of owned */
+ ulint n_owned) /*!< in: the number of owned */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to get the number of records owned by the
previous directory record.
@@ -171,7 +189,8 @@ UNIV_INLINE
ulint
rec_get_n_owned_new(
/*================*/
- const rec_t* rec); /*!< in: new-style physical record */
+ const rec_t* rec) /*!< in: new-style physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the number of owned records. */
UNIV_INLINE
@@ -180,7 +199,8 @@ rec_set_n_owned_new(
/*================*/
rec_t* rec, /*!< in/out: new-style physical record */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
- ulint n_owned);/*!< in: the number of owned */
+ ulint n_owned)/*!< in: the number of owned */
+ __attribute__((nonnull(1)));
/******************************************************//**
The following function is used to retrieve the info bits of
a record.
@@ -190,7 +210,8 @@ ulint
rec_get_info_bits(
/*==============*/
const rec_t* rec, /*!< in: physical record */
- ulint comp); /*!< in: nonzero=compact page format */
+ ulint comp) /*!< in: nonzero=compact page format */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the info bits of a record. */
UNIV_INLINE
@@ -198,7 +219,8 @@ void
rec_set_info_bits_old(
/*==================*/
rec_t* rec, /*!< in: old-style physical record */
- ulint bits); /*!< in: info bits */
+ ulint bits) /*!< in: info bits */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to set the info bits of a record. */
UNIV_INLINE
@@ -206,7 +228,8 @@ void
rec_set_info_bits_new(
/*==================*/
rec_t* rec, /*!< in/out: new-style physical record */
- ulint bits); /*!< in: info bits */
+ ulint bits) /*!< in: info bits */
+ __attribute__((nonnull));
/******************************************************//**
The following function retrieves the status bits of a new-style record.
@return status bits */
@@ -214,7 +237,8 @@ UNIV_INLINE
ulint
rec_get_status(
/*===========*/
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the status bits of a new-style record. */
@@ -223,7 +247,8 @@ void
rec_set_status(
/*===========*/
rec_t* rec, /*!< in/out: physical record */
- ulint bits); /*!< in: info bits */
+ ulint bits) /*!< in: info bits */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to retrieve the info and status
@@ -234,7 +259,8 @@ ulint
rec_get_info_and_status_bits(
/*=========================*/
const rec_t* rec, /*!< in: physical record */
- ulint comp); /*!< in: nonzero=compact page format */
+ ulint comp) /*!< in: nonzero=compact page format */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the info and status
bits of a record. (Only compact records have status bits.) */
@@ -243,7 +269,8 @@ void
rec_set_info_and_status_bits(
/*=========================*/
rec_t* rec, /*!< in/out: compact physical record */
- ulint bits); /*!< in: info bits */
+ ulint bits) /*!< in: info bits */
+ __attribute__((nonnull));
/******************************************************//**
The following function tells if record is delete marked.
@@ -253,7 +280,8 @@ ulint
rec_get_deleted_flag(
/*=================*/
const rec_t* rec, /*!< in: physical record */
- ulint comp); /*!< in: nonzero=compact page format */
+ ulint comp) /*!< in: nonzero=compact page format */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the deleted bit. */
UNIV_INLINE
@@ -261,7 +289,8 @@ void
rec_set_deleted_flag_old(
/*=====================*/
rec_t* rec, /*!< in: old-style physical record */
- ulint flag); /*!< in: nonzero if delete marked */
+ ulint flag) /*!< in: nonzero if delete marked */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to set the deleted bit. */
UNIV_INLINE
@@ -270,7 +299,8 @@ rec_set_deleted_flag_new(
/*=====================*/
rec_t* rec, /*!< in/out: new-style physical record */
page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
- ulint flag); /*!< in: nonzero if delete marked */
+ ulint flag) /*!< in: nonzero if delete marked */
+ __attribute__((nonnull(1)));
/******************************************************//**
The following function tells if a new-style record is a node pointer.
@return TRUE if node pointer */
@@ -278,7 +308,8 @@ UNIV_INLINE
ibool
rec_get_node_ptr_flag(
/*==================*/
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to get the order number
of an old-style record in the heap of the index page.
@@ -287,7 +318,8 @@ UNIV_INLINE
ulint
rec_get_heap_no_old(
/*================*/
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the heap number
field in an old-style record. */
@@ -296,7 +328,8 @@ void
rec_set_heap_no_old(
/*================*/
rec_t* rec, /*!< in: physical record */
- ulint heap_no);/*!< in: the heap number */
+ ulint heap_no)/*!< in: the heap number */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to get the order number
of a new-style record in the heap of the index page.
@@ -305,7 +338,8 @@ UNIV_INLINE
ulint
rec_get_heap_no_new(
/*================*/
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
The following function is used to set the heap number
field in a new-style record. */
@@ -314,7 +348,8 @@ void
rec_set_heap_no_new(
/*================*/
rec_t* rec, /*!< in/out: physical record */
- ulint heap_no);/*!< in: the heap number */
+ ulint heap_no)/*!< in: the heap number */
+ __attribute__((nonnull));
/******************************************************//**
The following function is used to test whether the data offsets
in the record are stored in one-byte or two-byte format.
@@ -323,7 +358,57 @@ UNIV_INLINE
ibool
rec_get_1byte_offs_flag(
/*====================*/
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
+
+/******************************************************//**
+The following function is used to set the 1-byte offsets flag. */
+UNIV_INLINE
+void
+rec_set_1byte_offs_flag(
+/*====================*/
+ rec_t* rec, /*!< in: physical record */
+ ibool flag) /*!< in: TRUE if 1byte form */
+ __attribute__((nonnull));
+
+/******************************************************//**
+Returns the offset of nth field end if the record is stored in the 1-byte
+offsets form. If the field is SQL null, the flag is ORed in the returned
+value.
+@return offset of the start of the field, SQL null flag ORed */
+UNIV_INLINE
+ulint
+rec_1_get_field_end_info(
+/*=====================*/
+ const rec_t* rec, /*!< in: record */
+ ulint n) /*!< in: field index */
+ __attribute__((nonnull, pure, warn_unused_result));
+
+/******************************************************//**
+Returns the offset of nth field end if the record is stored in the 2-byte
+offsets form. If the field is SQL null, the flag is ORed in the returned
+value.
+@return offset of the start of the field, SQL null flag and extern
+storage flag ORed */
+UNIV_INLINE
+ulint
+rec_2_get_field_end_info(
+/*=====================*/
+ const rec_t* rec, /*!< in: record */
+ ulint n) /*!< in: field index */
+ __attribute__((nonnull, pure, warn_unused_result));
+
+/******************************************************//**
+Returns nonzero if the field is stored off-page.
+@retval 0 if the field is stored in-page
+@retval REC_2BYTE_EXTERN_MASK if the field is stored externally */
+UNIV_INLINE
+ulint
+rec_2_is_field_extern(
+/*==================*/
+ const rec_t* rec, /*!< in: record */
+ ulint n) /*!< in: field index */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
Determine how many of the first n columns in a compact
@@ -333,9 +418,10 @@ UNIV_INTERN
ulint
rec_get_n_extern_new(
/*=================*/
- const rec_t* rec, /*!< in: compact physical record */
- dict_index_t* index, /*!< in: record descriptor */
- ulint n); /*!< in: number of columns to scan */
+ const rec_t* rec, /*!< in: compact physical record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint n) /*!< in: number of columns to scan */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************//**
The following function determines the offsets to each field
@@ -356,7 +442,8 @@ rec_get_offsets_func(
(ULINT_UNDEFINED if all fields) */
mem_heap_t** heap, /*!< in/out: memory heap */
const char* file, /*!< in: file name where called */
- ulint line); /*!< in: line number where called */
+ ulint line) /*!< in: line number where called */
+ __attribute__((nonnull(1,2,5,6),warn_unused_result));
#define rec_get_offsets(rec,index,offsets,n,heap) \
rec_get_offsets_func(rec,index,offsets,n,heap,__FILE__,__LINE__)
@@ -375,9 +462,10 @@ rec_get_offsets_reverse(
const dict_index_t* index, /*!< in: record descriptor */
ulint node_ptr,/*!< in: nonzero=node pointer,
0=leaf node */
- ulint* offsets);/*!< in/out: array consisting of
+ ulint* offsets)/*!< in/out: array consisting of
offsets[0] allocated elements */
-
+ __attribute__((nonnull));
+#ifdef UNIV_DEBUG
/************************************************************//**
Validates offsets returned by rec_get_offsets().
@return TRUE if valid */
@@ -387,9 +475,9 @@ rec_offs_validate(
/*==============*/
const rec_t* rec, /*!< in: record or NULL */
const dict_index_t* index, /*!< in: record descriptor or NULL */
- const ulint* offsets);/*!< in: array returned by
+ const ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
-#ifdef UNIV_DEBUG
+ __attribute__((nonnull(3), warn_unused_result));
/************************************************************//**
Updates debug data in offsets, in order to avoid bogus
rec_offs_validate() failures. */
@@ -399,8 +487,9 @@ rec_offs_make_valid(
/*================*/
const rec_t* rec, /*!< in: record */
const dict_index_t* index, /*!< in: record descriptor */
- ulint* offsets);/*!< in: array returned by
+ ulint* offsets)/*!< in: array returned by
rec_get_offsets() */
+ __attribute__((nonnull));
#else
# define rec_offs_make_valid(rec, index, offsets) ((void) 0)
#endif /* UNIV_DEBUG */
@@ -415,8 +504,9 @@ rec_get_nth_field_offs_old(
/*=======================*/
const rec_t* rec, /*!< in: record */
ulint n, /*!< in: index of the field */
- ulint* len); /*!< out: length of the field; UNIV_SQL_NULL
+ ulint* len) /*!< out: length of the field; UNIV_SQL_NULL
if SQL null */
+ __attribute__((nonnull));
#define rec_get_nth_field_old(rec, n, len) \
((rec) + rec_get_nth_field_offs_old(rec, n, len))
/************************************************************//**
@@ -429,7 +519,8 @@ ulint
rec_get_nth_field_size(
/*===================*/
const rec_t* rec, /*!< in: record */
- ulint n); /*!< in: index of the field */
+ ulint n) /*!< in: index of the field */
+ __attribute__((nonnull, pure, warn_unused_result));
/************************************************************//**
The following function is used to get an offset to the nth
data field in a record.
@@ -440,8 +531,9 @@ rec_get_nth_field_offs(
/*===================*/
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
ulint n, /*!< in: index of the field */
- ulint* len); /*!< out: length of the field; UNIV_SQL_NULL
+ ulint* len) /*!< out: length of the field; UNIV_SQL_NULL
if SQL null */
+ __attribute__((nonnull));
#define rec_get_nth_field(rec, offsets, n, len) \
((rec) + rec_get_nth_field_offs(offsets, n, len))
/******************************************************//**
@@ -452,7 +544,8 @@ UNIV_INLINE
ulint
rec_offs_comp(
/*==========*/
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
Determine if the offsets are for a record containing
externally stored columns.
@@ -461,8 +554,8 @@ UNIV_INLINE
ulint
rec_offs_any_extern(
/*================*/
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
-#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
Determine if the offsets are for a record containing null BLOB pointers.
@return first field containing a null BLOB pointer, or NULL if none found */
@@ -472,8 +565,7 @@ rec_offs_any_null_extern(
/*=====================*/
const rec_t* rec, /*!< in: record */
const ulint* offsets) /*!< in: rec_get_offsets(rec) */
- __attribute__((nonnull, warn_unused_result));
-#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns nonzero if the extern bit is set in nth field of rec.
@return nonzero if externally stored */
@@ -482,7 +574,8 @@ ulint
rec_offs_nth_extern(
/*================*/
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
- ulint n); /*!< in: nth field */
+ ulint n) /*!< in: nth field */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns nonzero if the SQL NULL bit is set in nth field of rec.
@return nonzero if SQL NULL */
@@ -491,7 +584,8 @@ ulint
rec_offs_nth_sql_null(
/*==================*/
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
- ulint n); /*!< in: nth field */
+ ulint n) /*!< in: nth field */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
Gets the physical size of a field.
@return length of field */
@@ -500,7 +594,8 @@ ulint
rec_offs_nth_size(
/*==============*/
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
- ulint n); /*!< in: nth field */
+ ulint n) /*!< in: nth field */
+ __attribute__((nonnull, pure, warn_unused_result));
/******************************************************//**
Returns the number of extern bits set in a record.
@@ -509,7 +604,8 @@ UNIV_INLINE
ulint
rec_offs_n_extern(
/*==============*/
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/***********************************************************//**
This is used to modify the value of an already existing field in a record.
The previous value must have exactly the same size as the new value. If len
@@ -524,11 +620,12 @@ rec_set_nth_field(
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
ulint n, /*!< in: index number of the field */
const void* data, /*!< in: pointer to the data if not SQL null */
- ulint len); /*!< in: length of the data or UNIV_SQL_NULL.
+ ulint len) /*!< in: length of the data or UNIV_SQL_NULL.
If not SQL null, must have the same
length as the previous value.
If SQL null, previous value must be
SQL null. */
+ __attribute__((nonnull(1,2)));
/**********************************************************//**
The following function returns the data size of an old-style physical
record, that is the sum of field lengths. SQL null fields
@@ -539,7 +636,8 @@ UNIV_INLINE
ulint
rec_get_data_size_old(
/*==================*/
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull, pure, warn_unused_result));
/**********************************************************//**
The following function returns the number of allocated elements
for an array of offsets.
@@ -548,7 +646,8 @@ UNIV_INLINE
ulint
rec_offs_get_n_alloc(
/*=================*/
- const ulint* offsets);/*!< in: array for rec_get_offsets() */
+ const ulint* offsets)/*!< in: array for rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/**********************************************************//**
The following function sets the number of allocated elements
for an array of offsets. */
@@ -558,7 +657,8 @@ rec_offs_set_n_alloc(
/*=================*/
ulint* offsets, /*!< out: array for rec_get_offsets(),
must be allocated */
- ulint n_alloc); /*!< in: number of elements */
+ ulint n_alloc) /*!< in: number of elements */
+ __attribute__((nonnull));
#define rec_offs_init(offsets) \
rec_offs_set_n_alloc(offsets, (sizeof offsets) / sizeof *offsets)
/**********************************************************//**
@@ -568,7 +668,8 @@ UNIV_INLINE
ulint
rec_offs_n_fields(
/*==============*/
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/**********************************************************//**
The following function returns the data size of a physical
record, that is the sum of field lengths. SQL null fields
@@ -579,7 +680,8 @@ UNIV_INLINE
ulint
rec_offs_data_size(
/*===============*/
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/**********************************************************//**
Returns the total size of record minus data size of record.
The value returned by the function is the distance from record
@@ -589,7 +691,8 @@ UNIV_INLINE
ulint
rec_offs_extra_size(
/*================*/
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/**********************************************************//**
Returns the total size of a physical record.
@return size */
@@ -597,7 +700,8 @@ UNIV_INLINE
ulint
rec_offs_size(
/*==========*/
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
#ifdef UNIV_DEBUG
/**********************************************************//**
Returns a pointer to the start of the record.
@@ -607,7 +711,8 @@ byte*
rec_get_start(
/*==========*/
const rec_t* rec, /*!< in: pointer to record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
/**********************************************************//**
Returns a pointer to the end of the record.
@return pointer to end */
@@ -616,7 +721,8 @@ byte*
rec_get_end(
/*========*/
const rec_t* rec, /*!< in: pointer to record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull, pure, warn_unused_result));
#else /* UNIV_DEBUG */
# define rec_get_start(rec, offsets) ((rec) - rec_offs_extra_size(offsets))
# define rec_get_end(rec, offsets) ((rec) + rec_offs_data_size(offsets))
@@ -687,7 +793,8 @@ rec_copy_prefix_to_buf(
byte** buf, /*!< in/out: memory buffer
for the copied prefix,
or NULL */
- ulint* buf_size); /*!< in/out: buffer size */
+ ulint* buf_size) /*!< in/out: buffer size */
+ __attribute__((nonnull));
/************************************************************//**
Folds a prefix of a physical record to a ulint.
@return the folded value */
@@ -703,7 +810,7 @@ rec_fold(
ulint n_bytes, /*!< in: number of bytes to fold
in an incomplete last field */
index_id_t tree_id) /*!< in: index tree id */
- __attribute__((pure));
+ __attribute__((nonnull, pure, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/*********************************************************//**
Builds a physical record out of a data tuple and
@@ -717,8 +824,9 @@ rec_convert_dtuple_to_rec(
physical record */
const dict_index_t* index, /*!< in: record descriptor */
const dtuple_t* dtuple, /*!< in: data tuple */
- ulint n_ext); /*!< in: number of
+ ulint n_ext) /*!< in: number of
externally stored columns */
+ __attribute__((nonnull, warn_unused_result));
/**********************************************************//**
Returns the extra size of an old-style physical record if we know its
data size and number of fields.
@@ -730,7 +838,7 @@ rec_get_converted_extra_size(
ulint data_size, /*!< in: data size */
ulint n_fields, /*!< in: number of fields */
ulint n_ext) /*!< in: number of externally stored columns */
- __attribute__((const));
+ __attribute__((const));
/**********************************************************//**
Determines the size of a data tuple prefix in ROW_FORMAT=COMPACT.
@return total size */
@@ -741,7 +849,8 @@ rec_get_converted_size_comp_prefix(
const dict_index_t* index, /*!< in: record descriptor */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
- ulint* extra); /*!< out: extra size */
+ ulint* extra) /*!< out: extra size */
+ __attribute__((warn_unused_result, nonnull(1,2)));
/**********************************************************//**
Determines the size of a data tuple in ROW_FORMAT=COMPACT.
@return total size */
@@ -756,7 +865,8 @@ rec_get_converted_size_comp(
ulint status, /*!< in: status bits of the record */
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
- ulint* extra); /*!< out: extra size */
+ ulint* extra) /*!< out: extra size */
+ __attribute__((nonnull(1,3)));
/**********************************************************//**
The following function returns the size of a data tuple when converted to
a physical record.
@@ -767,7 +877,8 @@ rec_get_converted_size(
/*===================*/
dict_index_t* index, /*!< in: record descriptor */
const dtuple_t* dtuple, /*!< in: data tuple */
- ulint n_ext); /*!< in: number of externally stored columns */
+ ulint n_ext) /*!< in: number of externally stored columns */
+ __attribute__((warn_unused_result, nonnull));
#ifndef UNIV_HOTBACKUP
/**************************************************************//**
Copies the first n fields of a physical record to a data tuple.
@@ -781,7 +892,8 @@ rec_copy_prefix_to_dtuple(
const dict_index_t* index, /*!< in: record descriptor */
ulint n_fields, /*!< in: number of fields
to copy */
- mem_heap_t* heap); /*!< in: memory heap */
+ mem_heap_t* heap) /*!< in: memory heap */
+ __attribute__((nonnull));
#endif /* !UNIV_HOTBACKUP */
/***************************************************************//**
Validates the consistency of a physical record.
@@ -791,7 +903,8 @@ ibool
rec_validate(
/*=========*/
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull));
/***************************************************************//**
Prints an old-style physical record. */
UNIV_INTERN
@@ -799,7 +912,8 @@ void
rec_print_old(
/*==========*/
FILE* file, /*!< in: file where to print */
- const rec_t* rec); /*!< in: physical record */
+ const rec_t* rec) /*!< in: physical record */
+ __attribute__((nonnull));
#ifndef UNIV_HOTBACKUP
/***************************************************************//**
Prints a physical record in ROW_FORMAT=COMPACT. Ignores the
@@ -810,7 +924,8 @@ rec_print_comp(
/*===========*/
FILE* file, /*!< in: file where to print */
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull));
/***************************************************************//**
Prints a physical record. */
UNIV_INTERN
@@ -819,7 +934,8 @@ rec_print_new(
/*==========*/
FILE* file, /*!< in: file where to print */
const rec_t* rec, /*!< in: physical record */
- const ulint* offsets);/*!< in: array returned by rec_get_offsets() */
+ const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
+ __attribute__((nonnull));
/***************************************************************//**
Prints a physical record. */
UNIV_INTERN
@@ -828,7 +944,21 @@ rec_print(
/*======*/
FILE* file, /*!< in: file where to print */
const rec_t* rec, /*!< in: physical record */
- const dict_index_t* index); /*!< in: record descriptor */
+ const dict_index_t* index) /*!< in: record descriptor */
+ __attribute__((nonnull));
+
+# ifdef UNIV_DEBUG
+/************************************************************//**
+Reads the DB_TRX_ID of a clustered index record.
+@return the value of DB_TRX_ID */
+UNIV_INTERN
+trx_id_t
+rec_get_trx_id(
+/*===========*/
+ const rec_t* rec, /*!< in: record */
+ const dict_index_t* index) /*!< in: clustered index */
+ __attribute__((nonnull, warn_unused_result));
+# endif /* UNIV_DEBUG */
#endif /* UNIV_HOTBACKUP */
/* Maximum lengths for the data in a physical record if the offsets
diff --git a/storage/innobase/include/rem0rec.ic b/storage/innobase/include/rem0rec.ic
index 6950263fe81..18a7deb9d26 100644
--- a/storage/innobase/include/rem0rec.ic
+++ b/storage/innobase/include/rem0rec.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -118,17 +118,6 @@ and the shift needed to obtain each bit-field of the record. */
#define REC_INFO_BITS_MASK 0xF0UL
#define REC_INFO_BITS_SHIFT 0
-/* The following masks are used to filter the SQL null bit from
-one-byte and two-byte offsets */
-
-#define REC_1BYTE_SQL_NULL_MASK 0x80UL
-#define REC_2BYTE_SQL_NULL_MASK 0x8000UL
-
-/* In a 2-byte offset the second most significant bit denotes
-a field stored to another page: */
-
-#define REC_2BYTE_EXTERN_MASK 0x4000UL
-
#if REC_OLD_SHORT_MASK << (8 * (REC_OLD_SHORT - 3)) \
^ REC_OLD_N_FIELDS_MASK << (8 * (REC_OLD_N_FIELDS - 4)) \
^ REC_HEAP_NO_MASK << (8 * (REC_OLD_HEAP_NO - 4)) \
@@ -883,6 +872,20 @@ rec_2_get_field_end_info(
return(mach_read_from_2(rec - (REC_N_OLD_EXTRA_BYTES + 2 * n + 2)));
}
+/******************************************************//**
+Returns nonzero if the field is stored off-page.
+@retval 0 if the field is stored in-page
+@retval REC_2BYTE_EXTERN_MASK if the field is stored externally */
+UNIV_INLINE
+ulint
+rec_2_is_field_extern(
+/*==================*/
+ const rec_t* rec, /*!< in: record */
+ ulint n) /*!< in: field index */
+{
+ return(rec_2_get_field_end_info(rec, n) & REC_2BYTE_EXTERN_MASK);
+}
+
/* Get the base address of offsets. The extra_size is stored at
this position, and following positions hold the end offsets of
the fields. */
@@ -1084,7 +1087,6 @@ rec_offs_any_extern(
return(*rec_offs_base(offsets) & REC_OFFS_EXTERNAL);
}
-#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
/******************************************************//**
Determine if the offsets are for a record containing null BLOB pointers.
@return first field containing a null BLOB pointer, or NULL if none found */
@@ -1120,7 +1122,6 @@ rec_offs_any_null_extern(
return(NULL);
}
-#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
/******************************************************//**
Returns nonzero if the extern bit is set in nth field of rec.
diff --git a/storage/innobase/include/rem0types.h b/storage/innobase/include/rem0types.h
index 2f1ead43c07..f8133f77466 100644
--- a/storage/innobase/include/rem0types.h
+++ b/storage/innobase/include/rem0types.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -34,6 +34,15 @@ typedef byte rec_t;
#define REC_MAX_HEAP_NO (2 * 8192 - 1)
#define REC_MAX_N_OWNED (16 - 1)
+/* Maximum number of user defined fields/columns. The reserved columns
+are the ones InnoDB adds internally: DB_ROW_ID, DB_TRX_ID, DB_ROLL_PTR.
+We need "* 2" because mlog_parse_index() creates a dummy table object
+possibly, with some of the system columns in it, and then adds the 3
+system columns (again) using dict_table_add_system_columns(). The problem
+is that mlog_parse_index() cannot recognize the system columns by
+just having n_fields, n_uniq and the lengths of the columns. */
+#define REC_MAX_N_USER_FIELDS (REC_MAX_N_FIELDS - DATA_N_SYS_COLS * 2)
+
/* REC_ANTELOPE_MAX_INDEX_COL_LEN is measured in bytes and is the maximum
indexed field length (or indexed prefix length) for indexes on tables of
ROW_FORMAT=REDUNDANT and ROW_FORMAT=COMPACT format.
diff --git a/storage/innobase/include/row0ext.h b/storage/innobase/include/row0ext.h
index 60aaf16c09a..a098e2f9b29 100644
--- a/storage/innobase/include/row0ext.h
+++ b/storage/innobase/include/row0ext.h
@@ -84,7 +84,7 @@ row_ext_lookup(
DICT_MAX_FIELD_LEN_BY_FORMAT() */
/** Prefixes of externally stored columns */
-struct row_ext_struct{
+struct row_ext_t{
ulint n_ext; /*!< number of externally stored columns */
const ulint* ext; /*!< col_no's of externally stored columns */
byte* buf; /*!< backing store of the column prefix cache */
diff --git a/storage/innobase/include/row0ftsort.h b/storage/innobase/include/row0ftsort.h
index cc5efea026f..4a486450efc 100644
--- a/storage/innobase/include/row0ftsort.h
+++ b/storage/innobase/include/row0ftsort.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2010, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -56,16 +56,16 @@ typedef UT_LIST_BASE_NODE_T(fts_doc_item_t) fts_doc_list_t;
#define FTS_PLL_MERGE 1
/** Sort information passed to each individual parallel sort thread */
-typedef struct fts_psort_struct fts_psort_t;
+struct fts_psort_t;
/** Common info passed to each parallel sort thread */
-struct fts_psort_common_struct {
- struct TABLE* table; /*!< MySQL table */
+struct fts_psort_common_t {
+ row_merge_dup_t* dup; /*!< descriptor of FTS index */
dict_table_t* new_table; /*!< source table */
trx_t* trx; /*!< transaction */
- dict_index_t* sort_index; /*!< FTS index */
fts_psort_t* all_info; /*!< all parallel sort info */
os_event_t sort_event; /*!< sort event */
+ os_event_t merge_event; /*!< merge event */
ibool opt_doc_id_size;/*!< whether to use 4 bytes
instead of 8 bytes integer to
store Doc ID during sort, if
@@ -73,9 +73,7 @@ struct fts_psort_common_struct {
to use 8 bytes value */
};
-typedef struct fts_psort_common_struct fts_psort_common_t;
-
-struct fts_psort_struct {
+struct fts_psort_t {
ulint psort_id; /*!< Parallel sort ID */
row_merge_buf_t* merge_buf[FTS_NUM_AUX_INDEX];
/*!< sort buffer */
@@ -89,6 +87,7 @@ struct fts_psort_struct {
ulint state; /*!< child thread state */
fts_doc_list_t fts_doc_list; /*!< doc list to process */
fts_psort_common_t* psort_common; /*!< ptr to all psort info */
+ os_thread_t thread_hdl; /*!< thread handler */
};
/** Structure stores information from string tokenization operation */
@@ -126,6 +125,7 @@ typedef struct fts_psort_insert fts_psort_insert_t;
/** status bit used for communication between parent and child thread */
#define FTS_PARENT_COMPLETE 1
#define FTS_CHILD_COMPLETE 1
+#define FTS_CHILD_EXITING 2
/** Print some debug information */
#define FTSORT_PRINT
@@ -171,18 +171,19 @@ ibool
row_fts_psort_info_init(
/*====================*/
trx_t* trx, /*!< in: transaction */
- struct TABLE* table, /*!< in: MySQL table object */
+ row_merge_dup_t* dup, /*!< in,own: descriptor of
+ FTS index being created */
const dict_table_t* new_table,/*!< in: table where indexes are
created */
- dict_index_t* index, /*!< in: FTS index to be created */
ibool opt_doc_id_size,
/*!< in: whether to use 4 bytes
instead of 8 bytes integer to
store Doc ID during sort */
fts_psort_t** psort, /*!< out: parallel sort info to be
instantiated */
- fts_psort_t** merge); /*!< out: parallel merge info
+ fts_psort_t** merge) /*!< out: parallel merge info
to be instantiated */
+ __attribute__((nonnull));
/********************************************************************//**
Clean up and deallocate FTS parallel sort structures, and close
temparary merge sort files */
@@ -231,19 +232,6 @@ row_fts_start_parallel_merge(
/*=========================*/
fts_psort_t* merge_info); /*!< in: parallel sort info */
/********************************************************************//**
-Insert processed FTS data to the auxillary tables.
-@return DB_SUCCESS if insertion runs fine */
-UNIV_INTERN
-ulint
-row_merge_write_fts_word(
-/*=====================*/
- trx_t* trx, /*!< in: transaction */
- que_t** ins_graph, /*!< in: Insert query graphs */
- fts_tokenizer_word_t*word, /*!< in: sorted and tokenized
- word */
- fts_table_t* fts_table, /*!< in: fts aux table instance */
- CHARSET_INFO* charset); /*!< in: charset */
-/********************************************************************//**
Read sorted FTS data files and insert data tuples to auxillary tables.
@return DB_SUCCESS or error number */
UNIV_INTERN
@@ -275,13 +263,13 @@ Read sorted file containing index data tuples and insert these data
tuples to the index
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
row_fts_merge_insert(
/*=================*/
dict_index_t* index, /*!< in: index */
dict_table_t* table, /*!< in: new table */
fts_psort_t* psort_info, /*!< parallel sort info */
- ulint id); /* !< in: which auxiliary table's data
+ ulint id) /* !< in: which auxiliary table's data
to insert to */
-
+ __attribute__((nonnull));
#endif /* row0ftsort_h */
diff --git a/storage/innobase/include/row0import.h b/storage/innobase/include/row0import.h
new file mode 100644
index 00000000000..aa46fdb7c27
--- /dev/null
+++ b/storage/innobase/include/row0import.h
@@ -0,0 +1,91 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/row0import.h
+Header file for import tablespace functions.
+
+Created 2012-02-08 by Sunny Bains
+*******************************************************/
+
+#ifndef row0import_h
+#define row0import_h
+
+#include "univ.i"
+#include "db0err.h"
+#include "dict0types.h"
+
+// Forward declarations
+struct trx_t;
+struct dict_table_t;
+struct row_prebuilt_t;
+
+/*****************************************************************//**
+Imports a tablespace. The space id in the .ibd file must match the space id
+of the table in the data dictionary.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+row_import_for_mysql(
+/*=================*/
+ dict_table_t* table, /*!< in/out: table */
+ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct
+ in MySQL */
+ __attribute__((nonnull, warn_unused_result));
+
+/*****************************************************************//**
+Update the DICT_TF2_DISCARDED flag in SYS_TABLES.
+@return DB_SUCCESS or error code. */
+UNIV_INTERN
+dberr_t
+row_import_update_discarded_flag(
+/*=============================*/
+ trx_t* trx, /*!< in/out: transaction that
+ covers the update */
+ table_id_t table_id, /*!< in: Table for which we want
+ to set the root table->flags2 */
+ bool discarded, /*!< in: set MIX_LEN column bit
+ to discarded, if true */
+ bool dict_locked) /*!< in: Set to true if the
+ caller already owns the
+ dict_sys_t:: mutex. */
+ __attribute__((nonnull, warn_unused_result));
+
+/*****************************************************************//**
+Update the (space, root page) of a table's indexes from the values
+in the data dictionary.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+row_import_update_index_root(
+/*=========================*/
+ trx_t* trx, /*!< in/out: transaction that
+ covers the update */
+ const dict_table_t* table, /*!< in: Table for which we want
+ to set the root page_no */
+ bool reset, /*!< in: if true then set to
+ FIL_NUL */
+ bool dict_locked) /*!< in: Set to true if the
+ caller already owns the
+ dict_sys_t:: mutex. */
+ __attribute__((nonnull, warn_unused_result));
+#ifndef UNIV_NONINL
+#include "row0import.ic"
+#endif
+
+#endif /* row0import_h */
diff --git a/storage/innobase/include/row0import.ic b/storage/innobase/include/row0import.ic
new file mode 100644
index 00000000000..c5bbab49f6f
--- /dev/null
+++ b/storage/innobase/include/row0import.ic
@@ -0,0 +1,25 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/row0import.ic
+
+Import tablespace inline functions.
+
+Created 2012-02-08 Sunny Bains
+*******************************************************/
diff --git a/storage/innobase/include/row0ins.h b/storage/innobase/include/row0ins.h
index 54ad7241a4f..2a892d2f5df 100644
--- a/storage/innobase/include/row0ins.h
+++ b/storage/innobase/include/row0ins.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -40,7 +40,7 @@ the caller must have a shared latch on dict_foreign_key_check_lock.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_NO_REFERENCED_ROW, or
DB_ROW_IS_REFERENCED */
UNIV_INTERN
-ulint
+dberr_t
row_ins_check_foreign_constraint(
/*=============================*/
ibool check_ref,/*!< in: TRUE If we want to check that
@@ -52,7 +52,8 @@ row_ins_check_foreign_constraint(
dict_table_t* table, /*!< in: if check_ref is TRUE, then the foreign
table, else the referenced table */
dtuple_t* entry, /*!< in: index entry for index */
- que_thr_t* thr); /*!< in: query thread */
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Creates an insert node struct.
@return own: insert node struct */
@@ -74,21 +75,110 @@ ins_node_set_new_row(
ins_node_t* node, /*!< in: insert node */
dtuple_t* row); /*!< in: new row (or first row) for the node */
/***************************************************************//**
-Inserts an index entry to index. Tries first optimistic, then pessimistic
-descent down the tree. If the entry matches enough to a delete marked record,
-performs the insert by updating or delete unmarking the delete marked
-record.
-@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
+Tries to insert an entry into a clustered index, ignoring foreign key
+constraints. If a record with the same unique key is found, the other
+record is necessarily marked deleted by a committed transaction, or a
+unique key violation error occurs. The delete marked record is then
+updated to an existing record, and we must write an undo log record on
+the delete marked record.
+@retval DB_SUCCESS on success
+@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
+@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
+@return error code */
UNIV_INTERN
-ulint
-row_ins_index_entry(
-/*================*/
- dict_index_t* index, /*!< in: index */
+dberr_t
+row_ins_clust_index_entry_low(
+/*==========================*/
+ ulint flags, /*!< in: undo logging and locking flags */
+ ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
+ depending on whether we wish optimistic or
+ pessimistic descent down the index tree */
+ dict_index_t* index, /*!< in: clustered index */
+ ulint n_uniq, /*!< in: 0 or index->n_uniq */
dtuple_t* entry, /*!< in/out: index entry to insert */
ulint n_ext, /*!< in: number of externally stored columns */
- ibool foreign,/*!< in: TRUE=check foreign key constraints
- (foreign=FALSE only during CREATE INDEX) */
- que_thr_t* thr); /*!< in: query thread */
+ que_thr_t* thr) /*!< in: query thread or NULL */
+ __attribute__((nonnull, warn_unused_result));
+/***************************************************************//**
+Tries to insert an entry into a secondary index. If a record with exactly the
+same fields is found, the other record is necessarily marked deleted.
+It is then unmarked. Otherwise, the entry is just inserted to the index.
+@retval DB_SUCCESS on success
+@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
+@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
+@return error code */
+UNIV_INTERN
+dberr_t
+row_ins_sec_index_entry_low(
+/*========================*/
+ ulint flags, /*!< in: undo logging and locking flags */
+ ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
+ depending on whether we wish optimistic or
+ pessimistic descent down the index tree */
+ dict_index_t* index, /*!< in: secondary index */
+ mem_heap_t* offsets_heap,
+ /*!< in/out: memory heap that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ dtuple_t* entry, /*!< in/out: index entry to insert */
+ trx_id_t trx_id, /*!< in: PAGE_MAX_TRX_ID during
+ row_log_table_apply(), or 0 */
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((nonnull, warn_unused_result));
+/***************************************************************//**
+Tries to insert the externally stored fields (off-page columns)
+of a clustered index entry.
+@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
+UNIV_INTERN
+dberr_t
+row_ins_index_entry_big_rec_func(
+/*=============================*/
+ const dtuple_t* entry, /*!< in/out: index entry to insert */
+ const big_rec_t* big_rec,/*!< in: externally stored fields */
+ ulint* offsets,/*!< in/out: rec offsets */
+ mem_heap_t** heap, /*!< in/out: memory heap */
+ dict_index_t* index, /*!< in: index */
+ const char* file, /*!< in: file name of caller */
+#ifndef DBUG_OFF
+ const void* thd, /*!< in: connection, or NULL */
+#endif /* DBUG_OFF */
+ ulint line) /*!< in: line number of caller */
+ __attribute__((nonnull(1,2,3,4,5,6), warn_unused_result));
+#ifdef DBUG_OFF
+# define row_ins_index_entry_big_rec(e,big,ofs,heap,index,thd,file,line) \
+ row_ins_index_entry_big_rec_func(e,big,ofs,heap,index,file,line)
+#else /* DBUG_OFF */
+# define row_ins_index_entry_big_rec(e,big,ofs,heap,index,thd,file,line) \
+ row_ins_index_entry_big_rec_func(e,big,ofs,heap,index,file,thd,line)
+#endif /* DBUG_OFF */
+/***************************************************************//**
+Inserts an entry into a clustered index. Tries first optimistic,
+then pessimistic descent down the tree. If the entry matches enough
+to a delete marked record, performs the insert by updating or delete
+unmarking the delete marked record.
+@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
+UNIV_INTERN
+dberr_t
+row_ins_clust_index_entry(
+/*======================*/
+ dict_index_t* index, /*!< in: clustered index */
+ dtuple_t* entry, /*!< in/out: index entry to insert */
+ que_thr_t* thr, /*!< in: query thread */
+ ulint n_ext) /*!< in: number of externally stored columns */
+ __attribute__((nonnull, warn_unused_result));
+/***************************************************************//**
+Inserts an entry into a secondary index. Tries first optimistic,
+then pessimistic descent down the tree. If the entry matches enough
+to a delete marked record, performs the insert by updating or delete
+unmarking the delete marked record.
+@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
+UNIV_INTERN
+dberr_t
+row_ins_sec_index_entry(
+/*====================*/
+ dict_index_t* index, /*!< in: secondary index */
+ dtuple_t* entry, /*!< in/out: index entry to insert */
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((nonnull, warn_unused_result));
/***********************************************************//**
Inserts a row to a table. This is a high-level function used in
SQL execution graphs.
@@ -98,17 +188,10 @@ que_thr_t*
row_ins_step(
/*=========*/
que_thr_t* thr); /*!< in: query thread */
-/***********************************************************//**
-Creates an entry template for each index of a table. */
-UNIV_INTERN
-void
-ins_node_create_entry_list(
-/*=======================*/
- ins_node_t* node); /*!< in: row insert node */
/* Insert node structure */
-struct ins_node_struct{
+struct ins_node_t{
que_common_t common; /*!< node type: QUE_NODE_INSERT */
ulint ins_type;/* INS_VALUES, INS_SEARCHED, or INS_DIRECT */
dtuple_t* row; /*!< row to insert */
diff --git a/storage/innobase/include/row0log.h b/storage/innobase/include/row0log.h
new file mode 100644
index 00000000000..984d907d390
--- /dev/null
+++ b/storage/innobase/include/row0log.h
@@ -0,0 +1,241 @@
+/*****************************************************************************
+
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/row0log.h
+Modification log for online index creation and online table rebuild
+
+Created 2011-05-26 Marko Makela
+*******************************************************/
+
+#ifndef row0log_h
+#define row0log_h
+
+#include "univ.i"
+#include "mtr0types.h"
+#include "row0types.h"
+#include "rem0types.h"
+#include "data0types.h"
+#include "dict0types.h"
+#include "trx0types.h"
+#include "que0types.h"
+
+/******************************************************//**
+Allocate the row log for an index and flag the index
+for online creation.
+@retval true if success, false if not */
+UNIV_INTERN
+bool
+row_log_allocate(
+/*=============*/
+ dict_index_t* index, /*!< in/out: index */
+ dict_table_t* table, /*!< in/out: new table being rebuilt,
+ or NULL when creating a secondary index */
+ bool same_pk,/*!< in: whether the definition of the
+ PRIMARY KEY has remained the same */
+ const dtuple_t* add_cols,
+ /*!< in: default values of
+ added columns, or NULL */
+ const ulint* col_map)/*!< in: mapping of old column
+ numbers to new ones, or NULL if !table */
+ __attribute__((nonnull(1), warn_unused_result));
+
+/******************************************************//**
+Free the row log for an index that was being created online. */
+UNIV_INTERN
+void
+row_log_free(
+/*=========*/
+ row_log_t*& log) /*!< in,own: row log */
+ __attribute__((nonnull));
+
+/******************************************************//**
+Free the row log for an index on which online creation was aborted. */
+UNIV_INLINE
+void
+row_log_abort_sec(
+/*==============*/
+ dict_index_t* index) /*!< in/out: index (x-latched) */
+ __attribute__((nonnull));
+
+/******************************************************//**
+Try to log an operation to a secondary index that is
+(or was) being created.
+@retval true if the operation was logged or can be ignored
+@retval false if online index creation is not taking place */
+UNIV_INLINE
+bool
+row_log_online_op_try(
+/*==================*/
+ dict_index_t* index, /*!< in/out: index, S or X latched */
+ const dtuple_t* tuple, /*!< in: index tuple */
+ trx_id_t trx_id) /*!< in: transaction ID for insert,
+ or 0 for delete */
+ __attribute__((nonnull, warn_unused_result));
+/******************************************************//**
+Logs an operation to a secondary index that is (or was) being created. */
+UNIV_INTERN
+void
+row_log_online_op(
+/*==============*/
+ dict_index_t* index, /*!< in/out: index, S or X latched */
+ const dtuple_t* tuple, /*!< in: index tuple */
+ trx_id_t trx_id) /*!< in: transaction ID for insert,
+ or 0 for delete */
+ UNIV_COLD __attribute__((nonnull));
+
+/******************************************************//**
+Gets the error status of the online index rebuild log.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+row_log_table_get_error(
+/*====================*/
+ const dict_index_t* index) /*!< in: clustered index of a table
+ that is being rebuilt online */
+ __attribute__((nonnull, warn_unused_result));
+
+/******************************************************//**
+Logs a delete operation to a table that is being rebuilt.
+This will be merged in row_log_table_apply_delete(). */
+UNIV_INTERN
+void
+row_log_table_delete(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
+ trx_id_t trx_id) /*!< in: DB_TRX_ID of the record before
+ it was deleted */
+ UNIV_COLD __attribute__((nonnull));
+
+/******************************************************//**
+Logs an update operation to a table that is being rebuilt.
+This will be merged in row_log_table_apply_update(). */
+UNIV_INTERN
+void
+row_log_table_update(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
+ const dtuple_t* old_pk) /*!< in: row_log_table_get_pk()
+ before the update */
+ UNIV_COLD __attribute__((nonnull(1,2,3)));
+
+/******************************************************//**
+Constructs the old PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR
+of a table that is being rebuilt.
+@return tuple of PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR in the rebuilt table,
+or NULL if the PRIMARY KEY definition does not change */
+UNIV_INTERN
+const dtuple_t*
+row_log_table_get_pk(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index),
+ or NULL */
+ mem_heap_t** heap) /*!< in/out: memory heap where allocated */
+ UNIV_COLD __attribute__((nonnull(1,2,4), warn_unused_result));
+
+/******************************************************//**
+Logs an insert to a table that is being rebuilt.
+This will be merged in row_log_table_apply_insert(). */
+UNIV_INTERN
+void
+row_log_table_insert(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets)/*!< in: rec_get_offsets(rec,index) */
+ UNIV_COLD __attribute__((nonnull));
+
+/******************************************************//**
+Notes that a transaction is being rolled back. */
+UNIV_INTERN
+void
+row_log_table_rollback(
+/*===================*/
+ dict_index_t* index, /*!< in/out: clustered index */
+ trx_id_t trx_id) /*!< in: transaction being rolled back */
+ UNIV_COLD __attribute__((nonnull));
+
+/******************************************************//**
+Check if a transaction rollback has been initiated.
+@return true if inserts of this transaction were rolled back */
+UNIV_INTERN
+bool
+row_log_table_is_rollback(
+/*======================*/
+ const dict_index_t* index, /*!< in: clustered index */
+ trx_id_t trx_id) /*!< in: transaction id */
+ __attribute__((nonnull));
+
+/******************************************************//**
+Apply the row_log_table log to a table upon completing rebuild.
+@return DB_SUCCESS, or error code on failure */
+UNIV_INTERN
+dberr_t
+row_log_table_apply(
+/*================*/
+ que_thr_t* thr, /*!< in: query graph */
+ dict_table_t* old_table,
+ /*!< in: old table */
+ struct TABLE* table) /*!< in/out: MySQL table
+ (for reporting duplicates) */
+ __attribute__((nonnull, warn_unused_result));
+
+/******************************************************//**
+Get the latest transaction ID that has invoked row_log_online_op()
+during online creation.
+@return latest transaction ID, or 0 if nothing was logged */
+UNIV_INTERN
+trx_id_t
+row_log_get_max_trx(
+/*================*/
+ dict_index_t* index) /*!< in: index, must be locked */
+ __attribute__((nonnull, warn_unused_result));
+
+/******************************************************//**
+Merge the row log to the index upon completing index creation.
+@return DB_SUCCESS, or error code on failure */
+UNIV_INTERN
+dberr_t
+row_log_apply(
+/*==========*/
+ trx_t* trx, /*!< in: transaction (for checking if
+ the operation was interrupted) */
+ dict_index_t* index, /*!< in/out: secondary index */
+ struct TABLE* table) /*!< in/out: MySQL table
+ (for reporting duplicates) */
+ __attribute__((nonnull, warn_unused_result));
+
+#ifndef UNIV_NONINL
+#include "row0log.ic"
+#endif
+
+#endif /* row0log.h */
diff --git a/storage/innobase/include/row0log.ic b/storage/innobase/include/row0log.ic
new file mode 100644
index 00000000000..b0f37dbd8e7
--- /dev/null
+++ b/storage/innobase/include/row0log.ic
@@ -0,0 +1,84 @@
+/*****************************************************************************
+
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/row0log.ic
+Modification log for online index creation and online table rebuild
+
+Created 2012-10-18 Marko Makela
+*******************************************************/
+
+#include "dict0dict.h"
+
+/******************************************************//**
+Free the row log for an index on which online creation was aborted. */
+UNIV_INLINE
+void
+row_log_abort_sec(
+/*===============*/
+ dict_index_t* index) /*!< in/out: index (x-latched) */
+{
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ ut_ad(!dict_index_is_clust(index));
+ dict_index_set_online_status(index, ONLINE_INDEX_ABORTED);
+ row_log_free(index->online_log);
+}
+
+/******************************************************//**
+Try to log an operation to a secondary index that is
+(or was) being created.
+@retval true if the operation was logged or can be ignored
+@retval false if online index creation is not taking place */
+UNIV_INLINE
+bool
+row_log_online_op_try(
+/*==================*/
+ dict_index_t* index, /*!< in/out: index, S or X latched */
+ const dtuple_t* tuple, /*!< in: index tuple */
+ trx_id_t trx_id) /*!< in: transaction ID for insert,
+ or 0 for delete */
+{
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_SHARED)
+ || rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_COMPLETE:
+ /* This is a normal index. Do not log anything.
+ The caller must perform the operation on the
+ index tree directly. */
+ return(false);
+ case ONLINE_INDEX_CREATION:
+ /* The index is being created online. Log the
+ operation. */
+ row_log_online_op(index, tuple, trx_id);
+ break;
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ /* The index was created online, but the operation was
+ aborted. Do not log the operation and tell the caller
+ to skip the operation. */
+ break;
+ }
+
+ return(true);
+}
diff --git a/storage/innobase/include/row0merge.h b/storage/innobase/include/row0merge.h
index 95c6d85075c..f464e46ae5b 100644
--- a/storage/innobase/include/row0merge.h
+++ b/storage/innobase/include/row0merge.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2005, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -40,15 +40,17 @@ Created 13/06/2005 Jan Lindstrom
#include "lock0types.h"
#include "srv0srv.h"
+// Forward declaration
+struct ib_sequence_t;
+
/** @brief Block size for I/O operations in merge sort.
The minimum is UNIV_PAGE_SIZE, or page_get_free_space_of_empty()
rounded to a power of 2.
When not creating a PRIMARY KEY that contains column prefixes, this
-can be set as small as UNIV_PAGE_SIZE / 2. See the comment above
-ut_ad(data_size < sizeof(row_merge_block_t)). */
-typedef byte row_merge_block_t;
+can be set as small as UNIV_PAGE_SIZE / 2. */
+typedef byte row_merge_block_t;
/** @brief Secondary buffer for I/O operations of merge records.
@@ -64,114 +66,146 @@ The format is the same as a record in ROW_FORMAT=COMPACT with the
exception that the REC_N_NEW_EXTRA_BYTES are omitted. */
typedef byte mrec_t;
+/** Merge record in row_merge_buf_t */
+struct mtuple_t {
+ dfield_t* fields; /*!< data fields */
+};
+
/** Buffer for sorting in main memory. */
-struct row_merge_buf_struct {
+struct row_merge_buf_t {
mem_heap_t* heap; /*!< memory heap where allocated */
dict_index_t* index; /*!< the index the tuples belong to */
ulint total_size; /*!< total amount of data bytes */
ulint n_tuples; /*!< number of data tuples */
ulint max_tuples; /*!< maximum number of data tuples */
- const dfield_t**tuples; /*!< array of pointers to
- arrays of fields that form
- the data tuples */
- const dfield_t**tmp_tuples; /*!< temporary copy of tuples,
+ mtuple_t* tuples; /*!< array of data tuples */
+ mtuple_t* tmp_tuples; /*!< temporary copy of tuples,
for sorting */
};
-/** Buffer for sorting in main memory. */
-typedef struct row_merge_buf_struct row_merge_buf_t;
-
/** Information about temporary files used in merge sort */
-struct merge_file_struct {
+struct merge_file_t {
int fd; /*!< file descriptor */
ulint offset; /*!< file offset (end of file) */
ib_uint64_t n_rec; /*!< number of records in the file */
};
-/** Information about temporary files used in merge sort */
-typedef struct merge_file_struct merge_file_t;
-
/** Index field definition */
-struct merge_index_field_struct {
+struct index_field_t {
+ ulint col_no; /*!< column offset */
ulint prefix_len; /*!< column prefix length, or 0
if indexing the whole column */
- const char* field_name; /*!< field name */
};
-/** Index field definition */
-typedef struct merge_index_field_struct merge_index_field_t;
-
/** Definition of an index being created */
-struct merge_index_def_struct {
- const char* name; /*!< index name */
- ulint ind_type; /*!< 0, DICT_UNIQUE,
- or DICT_CLUSTERED */
- ulint n_fields; /*!< number of fields
- in index */
- merge_index_field_t* fields; /*!< field definitions */
+struct index_def_t {
+ const char* name; /*!< index name */
+ ulint ind_type; /*!< 0, DICT_UNIQUE,
+ or DICT_CLUSTERED */
+ ulint key_number; /*!< MySQL key number,
+ or ULINT_UNDEFINED if none */
+ ulint n_fields; /*!< number of fields in index */
+ index_field_t* fields; /*!< field definitions */
};
-/** Definition of an index being created */
-typedef struct merge_index_def_struct merge_index_def_t;
-
/** Structure for reporting duplicate records. */
-struct row_merge_dup_struct {
- const dict_index_t* index; /*!< index being sorted */
- struct TABLE* table; /*!< MySQL table object */
- ulint n_dup; /*!< number of duplicates */
+struct row_merge_dup_t {
+ dict_index_t* index; /*!< index being sorted */
+ struct TABLE* table; /*!< MySQL table object */
+ const ulint* col_map;/*!< mapping of column numbers
+ in table to the rebuilt table
+ (index->table), or NULL if not
+ rebuilding table */
+ ulint n_dup; /*!< number of duplicates */
};
-/** Structure for reporting duplicate records. */
-typedef struct row_merge_dup_struct row_merge_dup_t;
-
+/*************************************************************//**
+Report a duplicate key. */
+UNIV_INTERN
+void
+row_merge_dup_report(
+/*=================*/
+ row_merge_dup_t* dup, /*!< in/out: for reporting duplicates */
+ const dfield_t* entry) /*!< in: duplicate index entry */
+ __attribute__((nonnull));
/*********************************************************************//**
Sets an exclusive lock on a table, for the duration of creating indexes.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_merge_lock_table(
/*=================*/
trx_t* trx, /*!< in/out: transaction */
dict_table_t* table, /*!< in: table to lock */
- enum lock_mode mode); /*!< in: LOCK_X or LOCK_S */
+ enum lock_mode mode) /*!< in: LOCK_X or LOCK_S */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
-Drop an index from the InnoDB system tables. The data dictionary must
-have been locked exclusively by the caller, because the transaction
-will not be committed. */
+Drop indexes that were created before an error occurred.
+The data dictionary must have been locked exclusively by the caller,
+because the transaction will not be committed. */
UNIV_INTERN
void
-row_merge_drop_index(
-/*=================*/
- dict_index_t* index, /*!< in: index to be removed */
- dict_table_t* table, /*!< in: table */
- trx_t* trx); /*!< in: transaction handle */
+row_merge_drop_indexes_dict(
+/*========================*/
+ trx_t* trx, /*!< in/out: dictionary transaction */
+ table_id_t table_id)/*!< in: table identifier */
+ __attribute__((nonnull));
/*********************************************************************//**
-Drop those indexes which were created before an error occurred when
-building an index. The data dictionary must have been locked
-exclusively by the caller, because the transaction will not be
-committed. */
+Drop those indexes which were created before an error occurred.
+The data dictionary must have been locked exclusively by the caller,
+because the transaction will not be committed. */
UNIV_INTERN
void
row_merge_drop_indexes(
/*===================*/
- trx_t* trx, /*!< in: transaction */
- dict_table_t* table, /*!< in: table containing the indexes */
- dict_index_t** index, /*!< in: indexes to drop */
- ulint num_created); /*!< in: number of elements in
- index[] */
+ trx_t* trx, /*!< in/out: transaction */
+ dict_table_t* table, /*!< in/out: table containing the indexes */
+ ibool locked) /*!< in: TRUE=table locked,
+ FALSE=may need to do a lazy drop */
+ __attribute__((nonnull));
/*********************************************************************//**
Drop all partially created indexes during crash recovery. */
UNIV_INTERN
void
row_merge_drop_temp_indexes(void);
/*=============================*/
+
+/*********************************************************************//**
+Creates temporary merge files, and if UNIV_PFS_IO defined, register
+the file descriptor with Performance Schema.
+@return File descriptor */
+UNIV_INTERN
+int
+row_merge_file_create_low(void)
+/*===========================*/
+ __attribute__((warn_unused_result));
+/*********************************************************************//**
+Destroy a merge file. And de-register the file from Performance Schema
+if UNIV_PFS_IO is defined. */
+UNIV_INTERN
+void
+row_merge_file_destroy_low(
+/*=======================*/
+ int fd); /*!< in: merge file descriptor */
+
+/*********************************************************************//**
+Provide a new pathname for a table that is being renamed if it belongs to
+a file-per-table tablespace. The caller is responsible for freeing the
+memory allocated for the return value.
+@return new pathname of tablespace file, or NULL if space = 0 */
+UNIV_INTERN
+char*
+row_make_new_pathname(
+/*==================*/
+ dict_table_t* table, /*!< in: table to be renamed */
+ const char* new_name); /*!< in: new name */
/*********************************************************************//**
Rename the tables in the data dictionary. The data dictionary must
have been locked exclusively by the caller, because the transaction
will not be committed.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_merge_rename_tables(
/*====================*/
dict_table_t* old_table, /*!< in/out: old table, renamed to
@@ -179,32 +213,35 @@ row_merge_rename_tables(
dict_table_t* new_table, /*!< in/out: new table, renamed to
old_table->name */
const char* tmp_name, /*!< in: new name for old_table */
- trx_t* trx); /*!< in: transaction handle */
+ trx_t* trx) /*!< in: transaction handle */
+ __attribute__((nonnull, warn_unused_result));
+
/*********************************************************************//**
-Create a temporary table for creating a primary key, using the definition
-of an existing table.
-@return table, or NULL on error */
+Rename an index in the dictionary that was created. The data
+dictionary must have been locked exclusively by the caller, because
+the transaction will not be committed.
+@return DB_SUCCESS if all OK */
UNIV_INTERN
-dict_table_t*
-row_merge_create_temporary_table(
-/*=============================*/
- const char* table_name, /*!< in: new table name */
- const merge_index_def_t*index_def, /*!< in: the index definition
- of the primary key */
- const dict_table_t* table, /*!< in: old table definition */
- trx_t* trx); /*!< in/out: transaction
- (sets error_state) */
+dberr_t
+row_merge_rename_index_to_add(
+/*==========================*/
+ trx_t* trx, /*!< in/out: transaction */
+ table_id_t table_id, /*!< in: table identifier */
+ index_id_t index_id) /*!< in: index identifier */
+ __attribute__((nonnull));
/*********************************************************************//**
-Rename the temporary indexes in the dictionary to permanent ones. The
-data dictionary must have been locked exclusively by the caller,
-because the transaction will not be committed.
+Rename an index in the dictionary that is to be dropped. The data
+dictionary must have been locked exclusively by the caller, because
+the transaction will not be committed.
@return DB_SUCCESS if all OK */
UNIV_INTERN
-ulint
-row_merge_rename_indexes(
-/*=====================*/
+dberr_t
+row_merge_rename_index_to_drop(
+/*===========================*/
trx_t* trx, /*!< in/out: transaction */
- dict_table_t* table); /*!< in/out: table with new indexes */
+ table_id_t table_id, /*!< in: table identifier */
+ index_id_t index_id) /*!< in: index identifier */
+ __attribute__((nonnull));
/*********************************************************************//**
Create the index and load in to the dictionary.
@return index, or NULL on error */
@@ -214,7 +251,7 @@ row_merge_create_index(
/*===================*/
trx_t* trx, /*!< in/out: trx (sets error_state) */
dict_table_t* table, /*!< in: the index is on this table */
- const merge_index_def_t*index_def);
+ const index_def_t* index_def);
/*!< in: the index definition */
/*********************************************************************//**
Check if a transaction can use an index.
@@ -226,22 +263,25 @@ row_merge_is_index_usable(
const trx_t* trx, /*!< in: transaction */
const dict_index_t* index); /*!< in: index to check */
/*********************************************************************//**
-If there are views that refer to the old table name then we "attach" to
-the new instance of the table else we drop it immediately.
+Drop a table. The caller must have ensured that the background stats
+thread is not processing the table. This can be done by calling
+dict_stats_wait_bg_to_stop_using_tables() after locking the dictionary and
+before calling this function.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_merge_drop_table(
/*=================*/
trx_t* trx, /*!< in: transaction */
- dict_table_t* table); /*!< in: table instance to drop */
+ dict_table_t* table) /*!< in: table instance to drop */
+ __attribute__((nonnull));
/*********************************************************************//**
Build indexes on a table by reading a clustered index,
creating a temporary file containing index entries, merge sorting
these index entries and inserting sorted index entries to indexes.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_merge_build_indexes(
/*====================*/
trx_t* trx, /*!< in: transaction */
@@ -250,11 +290,24 @@ row_merge_build_indexes(
dict_table_t* new_table, /*!< in: table where indexes are
created; identical to old_table
unless creating a PRIMARY KEY */
+ bool online, /*!< in: true if creating indexes
+ online */
dict_index_t** indexes, /*!< in: indexes to be created */
+ const ulint* key_numbers, /*!< in: MySQL key numbers */
ulint n_indexes, /*!< in: size of indexes[] */
- struct TABLE* table); /*!< in/out: MySQL table, for
+ struct TABLE* table, /*!< in/out: MySQL table, for
reporting erroneous key value
if applicable */
+ const dtuple_t* add_cols, /*!< in: default values of
+ added columns, or NULL */
+ const ulint* col_map, /*!< in: mapping of old column
+ numbers to new ones, or NULL
+ if old_table == new_table */
+ ulint add_autoinc, /*!< in: number of added
+ AUTO_INCREMENT column, or
+ ULINT_UNDEFINED if none is added */
+ ib_sequence_t& sequence) /*!< in/out: autoinc sequence */
+ __attribute__((nonnull(1,2,3,5,6,8), warn_unused_result));
/********************************************************************//**
Write a buffer to a block. */
UNIV_INTERN
@@ -263,15 +316,18 @@ row_merge_buf_write(
/*================*/
const row_merge_buf_t* buf, /*!< in: sorted buffer */
const merge_file_t* of, /*!< in: output file */
- row_merge_block_t* block); /*!< out: buffer for writing to file */
+ row_merge_block_t* block) /*!< out: buffer for writing to file */
+ __attribute__((nonnull));
/********************************************************************//**
Sort a buffer. */
UNIV_INTERN
void
row_merge_buf_sort(
/*===============*/
- row_merge_buf_t* buf, /*!< in/out: sort buffer */
- row_merge_dup_t* dup); /*!< in/out: for reporting duplicates */
+ row_merge_buf_t* buf, /*!< in/out: sort buffer */
+ row_merge_dup_t* dup) /*!< in/out: reporter of duplicates
+ (NULL if non-unique index) */
+ __attribute__((nonnull(1)));
/********************************************************************//**
Write a merge block to the file system.
@return TRUE if request was successful, FALSE if fail */
@@ -290,30 +346,32 @@ UNIV_INTERN
row_merge_buf_t*
row_merge_buf_empty(
/*================*/
- row_merge_buf_t* buf); /*!< in,own: sort buffer */
+ row_merge_buf_t* buf) /*!< in,own: sort buffer */
+ __attribute__((warn_unused_result, nonnull));
/*********************************************************************//**
-Create a merge file. */
+Create a merge file.
+@return file descriptor, or -1 on failure */
UNIV_INTERN
int
row_merge_file_create(
/*==================*/
- merge_file_t* merge_file); /*!< out: merge file structure */
+ merge_file_t* merge_file) /*!< out: merge file structure */
+ __attribute__((nonnull));
/*********************************************************************//**
Merge disk files.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_merge_sort(
/*===========*/
trx_t* trx, /*!< in: transaction */
- const dict_index_t* index, /*!< in: index being created */
+ const row_merge_dup_t* dup, /*!< in: descriptor of
+ index being created */
merge_file_t* file, /*!< in/out: file containing
index entries */
row_merge_block_t* block, /*!< in/out: 3 buffers */
- int* tmpfd, /*!< in/out: temporary file handle */
- struct TABLE* table); /*!< in/out: MySQL table, for
- reporting erroneous key value
- if applicable */
+ int* tmpfd) /*!< in/out: temporary file handle */
+ __attribute__((nonnull));
/*********************************************************************//**
Allocate a sort buffer.
@return own: sort buffer */
@@ -321,37 +379,24 @@ UNIV_INTERN
row_merge_buf_t*
row_merge_buf_create(
/*=================*/
- dict_index_t* index); /*!< in: secondary index */
+ dict_index_t* index) /*!< in: secondary index */
+ __attribute__((warn_unused_result, nonnull, malloc));
/*********************************************************************//**
Deallocate a sort buffer. */
UNIV_INTERN
void
row_merge_buf_free(
/*===============*/
- row_merge_buf_t* buf); /*!< in,own: sort buffer, to be freed */
+ row_merge_buf_t* buf) /*!< in,own: sort buffer to be freed */
+ __attribute__((nonnull));
/*********************************************************************//**
Destroy a merge file. */
UNIV_INTERN
void
row_merge_file_destroy(
/*===================*/
- merge_file_t* merge_file); /*!< out: merge file structure */
-/*********************************************************************//**
-Compare two merge records.
-@return 1, 0, -1 if mrec1 is greater, equal, less, respectively, than mrec2 */
-UNIV_INTERN
-int
-row_merge_cmp(
-/*==========*/
- const mrec_t* mrec1, /*!< in: first merge
- record to be compared */
- const mrec_t* mrec2, /*!< in: second merge
- record to be compared */
- const ulint* offsets1, /*!< in: first record offsets */
- const ulint* offsets2, /*!< in: second record offsets */
- const dict_index_t* index, /*!< in: index */
- ibool* null_eq); /*!< out: set to TRUE if
- found matching null values */
+ merge_file_t* merge_file) /*!< in/out: merge file structure */
+ __attribute__((nonnull));
/********************************************************************//**
Read a merge block from the file system.
@return TRUE if request was successful, FALSE if fail */
@@ -367,7 +412,7 @@ row_merge_read(
/********************************************************************//**
Read a merge record.
@return pointer to next record, or NULL on I/O error or end of list */
-UNIV_INTERN __attribute__((nonnull))
+UNIV_INTERN
const byte*
row_merge_read_rec(
/*===============*/
@@ -380,5 +425,6 @@ row_merge_read_rec(
const mrec_t** mrec, /*!< out: pointer to merge record,
or NULL on end of list
(non-NULL on I/O error) */
- ulint* offsets);/*!< out: offsets of mrec */
+ ulint* offsets)/*!< out: offsets of mrec */
+ __attribute__((nonnull, warn_unused_result));
#endif /* row0merge.h */
diff --git a/storage/innobase/include/row0mysql.h b/storage/innobase/include/row0mysql.h
index 17a29e38ec7..1e0f3b30f8c 100644
--- a/storage/innobase/include/row0mysql.h
+++ b/storage/innobase/include/row0mysql.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2000, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2000, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -36,9 +36,12 @@ Created 9/17/2000 Heikki Tuuri
#include "btr0pcur.h"
#include "trx0types.h"
+// Forward declaration
+struct SysIndexCallback;
+
extern ibool row_rollback_on_timeout;
-typedef struct row_prebuilt_struct row_prebuilt_t;
+struct row_prebuilt_t;
/*******************************************************************//**
Frees the blob heap in prebuilt when no longer needed. */
@@ -152,18 +155,19 @@ row_mysql_store_col_in_innobase_format(
ulint comp); /*!< in: nonzero=compact format */
/****************************************************************//**
Handles user errors and lock waits detected by the database engine.
-@return TRUE if it was a lock wait and we should continue running the
+@return true if it was a lock wait and we should continue running the
query thread */
UNIV_INTERN
-ibool
+bool
row_mysql_handle_errors(
/*====================*/
- ulint* new_err,/*!< out: possible new error encountered in
+ dberr_t* new_err,/*!< out: possible new error encountered in
rollback, or the old error which was
during the function entry */
trx_t* trx, /*!< in: transaction */
- que_thr_t* thr, /*!< in: query thread */
- trx_savept_t* savept);/*!< in: savepoint */
+ que_thr_t* thr, /*!< in: query thread, or NULL */
+ trx_savept_t* savept) /*!< in: savepoint, or NULL */
+ __attribute__((nonnull(1,2)));
/********************************************************************//**
Create a prebuilt struct for a MySQL table handle.
@return own: a prebuilt struct */
@@ -200,16 +204,17 @@ It is not compatible with another AUTO_INC or exclusive lock on the
table.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_lock_table_autoinc_for_mysql(
/*=============================*/
- row_prebuilt_t* prebuilt); /*!< in: prebuilt struct in the MySQL
+ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in the MySQL
table handle */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Sets a table lock on the table mentioned in prebuilt.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_lock_table_for_mysql(
/*=====================*/
row_prebuilt_t* prebuilt, /*!< in: prebuilt struct in the MySQL
@@ -218,19 +223,20 @@ row_lock_table_for_mysql(
if prebuilt->table should be
locked as
prebuilt->select_lock_type */
- ulint mode); /*!< in: lock mode of table
+ ulint mode) /*!< in: lock mode of table
(ignored if table==NULL) */
-
+ __attribute__((nonnull(1)));
/*********************************************************************//**
Does an insert for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_insert_for_mysql(
/*=================*/
byte* mysql_rec, /*!< in: row in the MySQL format */
- row_prebuilt_t* prebuilt); /*!< in: prebuilt struct in MySQL
+ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL
handle */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Builds a dummy query graph used in selects. */
UNIV_INTERN
@@ -263,13 +269,14 @@ row_table_got_default_clust_index(
Does an update or delete of a row for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_update_for_mysql(
/*=================*/
byte* mysql_rec, /*!< in: the row to be updated, in
the MySQL format */
- row_prebuilt_t* prebuilt); /*!< in: prebuilt struct in MySQL
+ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL
handle */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
This can only be used when srv_locks_unsafe_for_binlog is TRUE or this
session is using a READ COMMITTED or READ UNCOMMITTED isolation level.
@@ -278,19 +285,31 @@ initialized prebuilt->new_rec_locks to store the information which new
record locks really were set. This function removes a newly set
clustered index record lock under prebuilt->pcur or
prebuilt->clust_pcur. Thus, this implements a 'mini-rollback' that
-releases the latest clustered index record lock we set.
-@return error code or DB_SUCCESS */
+releases the latest clustered index record lock we set. */
UNIV_INTERN
-int
+void
row_unlock_for_mysql(
/*=================*/
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct in MySQL
handle */
- ibool has_latches_on_recs);/*!< in: TRUE if called
+ ibool has_latches_on_recs)/*!< in: TRUE if called
so that we have the latches on
the records under pcur and
clust_pcur, and we do not need
to reposition the cursors. */
+ __attribute__((nonnull));
+/*********************************************************************//**
+Checks if a table name contains the string "/#sql" which denotes temporary
+tables in MySQL.
+@return true if temporary table */
+UNIV_INTERN
+bool
+row_is_mysql_tmp_table_name(
+/*========================*/
+ const char* name) __attribute__((warn_unused_result));
+ /*!< in: table name in the form
+ 'database/tablename' */
+
/*********************************************************************//**
Creates an query graph node of 'update' type to be used in the MySQL
interface.
@@ -305,13 +324,14 @@ row_create_update_node_for_mysql(
Does a cascaded delete or set null in a foreign key operation.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_update_cascade_for_mysql(
/*=========================*/
que_thr_t* thr, /*!< in: query thread */
upd_node_t* node, /*!< in: update node used in the cascade
or set null operation */
- dict_table_t* table); /*!< in: table where we do the operation */
+ dict_table_t* table) /*!< in: table where we do the operation */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Locks the data dictionary exclusively for performing a table create or other
data dictionary modification operation. */
@@ -355,33 +375,38 @@ Creates a table for MySQL. If the name of the table ends in
one of "innodb_monitor", "innodb_lock_monitor", "innodb_tablespace_monitor",
"innodb_table_monitor", then this will also start the printing of monitor
output by the master thread. If the table name ends in "innodb_mem_validate",
-InnoDB will try to invoke mem_validate().
+InnoDB will try to invoke mem_validate(). On failure the transaction will
+be rolled back.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_create_table_for_mysql(
/*=======================*/
- dict_table_t* table, /*!< in, own: table definition
- (will be freed) */
- trx_t* trx); /*!< in: transaction handle */
+ dict_table_t* table, /*!< in, own: table definition
+ (will be freed, or on DB_SUCCESS
+ added to the data dictionary cache) */
+ trx_t* trx, /*!< in/out: transaction */
+ bool commit) /*!< in: if true, commit the transaction */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Does an index creation operation for MySQL. TODO: currently failure
to create an index results in dropping the whole table! This is no problem
currently as all indexes must be created at the same time as the table.
@return error number or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_create_index_for_mysql(
/*=======================*/
dict_index_t* index, /*!< in, own: index definition
(will be freed) */
trx_t* trx, /*!< in: transaction handle */
- const ulint* field_lengths); /*!< in: if not NULL, must contain
+ const ulint* field_lengths) /*!< in: if not NULL, must contain
dict_index_get_n_fields(index)
actual field lengths for the
index columns, which are
then checked for not being too
large. */
+ __attribute__((nonnull(1,2), warn_unused_result));
/*********************************************************************//**
Scans a table create SQL string and adds to the data dictionary
the foreign key constraints declared in the string. This function
@@ -391,7 +416,7 @@ bot participating tables. The indexes are allowed to contain more
fields than mentioned in the constraint.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_table_add_foreign_constraints(
/*==============================*/
trx_t* trx, /*!< in: transaction */
@@ -404,10 +429,10 @@ row_table_add_foreign_constraints(
const char* name, /*!< in: table full name in the
normalized form
database_name/table_name */
- ibool reject_fks); /*!< in: if TRUE, fail with error
+ ibool reject_fks) /*!< in: if TRUE, fail with error
code DB_CANNOT_ADD_CONSTRAINT if
any foreign keys are found. */
-
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
The master thread in srv0srv.cc calls this regularly to drop tables which
we must drop in background after queries to them have ended. Such lazy
@@ -426,14 +451,28 @@ ulint
row_get_background_drop_list_len_low(void);
/*======================================*/
/*********************************************************************//**
+Sets an exclusive lock on a table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+row_mysql_lock_table(
+/*=================*/
+ trx_t* trx, /*!< in/out: transaction */
+ dict_table_t* table, /*!< in: table to lock */
+ enum lock_mode mode, /*!< in: LOCK_X or LOCK_S */
+ const char* op_info) /*!< in: string for trx->op_info */
+ __attribute__((nonnull, warn_unused_result));
+
+/*********************************************************************//**
Truncates a table for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_truncate_table_for_mysql(
/*=========================*/
dict_table_t* table, /*!< in: table handle */
- trx_t* trx); /*!< in: transaction handle */
+ trx_t* trx) /*!< in: transaction handle */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Drops a table for MySQL. If the name of the dropped table ends in
one of "innodb_monitor", "innodb_lock_monitor", "innodb_tablespace_monitor",
@@ -443,12 +482,16 @@ by the transaction, the transaction will be committed. Otherwise, the
data dictionary will remain locked.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_drop_table_for_mysql(
/*=====================*/
const char* name, /*!< in: table name */
- trx_t* trx, /*!< in: transaction handle */
- ibool drop_db);/*!< in: TRUE=dropping whole database */
+ trx_t* trx, /*!< in: dictionary transaction handle */
+ bool drop_db,/*!< in: true=dropping whole database */
+ bool nonatomic = true)
+ /*!< in: whether it is permitted
+ to release and reacquire dict_operation_lock */
+ __attribute__((nonnull));
/*********************************************************************//**
Drop all temporary tables during crash recovery. */
UNIV_INTERN
@@ -462,66 +505,70 @@ means that this function deletes the .ibd file and assigns a new table id for
the table. Also the flag table->ibd_file_missing is set TRUE.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_discard_tablespace_for_mysql(
/*=============================*/
const char* name, /*!< in: table name */
- trx_t* trx); /*!< in: transaction handle */
+ trx_t* trx) /*!< in: transaction handle */
+ __attribute__((nonnull, warn_unused_result));
/*****************************************************************//**
Imports a tablespace. The space id in the .ibd file must match the space id
of the table in the data dictionary.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_import_tablespace_for_mysql(
/*============================*/
- const char* name, /*!< in: table name */
- trx_t* trx); /*!< in: transaction handle */
+ dict_table_t* table, /*!< in/out: table */
+ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Drops a database for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_drop_database_for_mysql(
/*========================*/
const char* name, /*!< in: database name which ends to '/' */
- trx_t* trx); /*!< in: transaction handle */
+ trx_t* trx) /*!< in: transaction handle */
+ __attribute__((nonnull));
/*********************************************************************//**
Renames a table for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_rename_table_for_mysql(
/*=======================*/
const char* old_name, /*!< in: old table name */
const char* new_name, /*!< in: new table name */
- trx_t* trx, /*!< in: transaction handle */
- ibool commit); /*!< in: if TRUE then commit trx */
+ trx_t* trx, /*!< in/out: transaction */
+ bool commit) /*!< in: whether to commit trx */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Checks that the index contains entries in an ascending order, unique
constraint is not broken, and calculates the number of index entries
in the read view of the current transaction.
-@return DB_SUCCESS if ok */
+@return true if ok */
UNIV_INTERN
-ulint
+bool
row_check_index_for_mysql(
/*======================*/
row_prebuilt_t* prebuilt, /*!< in: prebuilt struct
in MySQL handle */
const dict_index_t* index, /*!< in: index */
- ulint* n_rows); /*!< out: number of entries
+ ulint* n_rows) /*!< out: number of entries
seen in the consistent read */
-
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Determines if a table is a magic monitor table.
-@return TRUE if monitor table */
+@return true if monitor table */
UNIV_INTERN
-ibool
+bool
row_is_magic_monitor_table(
/*=======================*/
- const char* table_name); /*!< in: name of the table, in the
+ const char* table_name) /*!< in: name of the table, in the
form database/table_name */
-
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Initialize this module */
UNIV_INTERN
@@ -536,13 +583,24 @@ void
row_mysql_close(void);
/*=================*/
+/*********************************************************************//**
+Reassigns the table identifier of a table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+row_mysql_table_id_reassign(
+/*========================*/
+ dict_table_t* table, /*!< in/out: table */
+ trx_t* trx, /*!< in/out: transaction */
+ table_id_t* new_id) /*!< out: new table id */
+ __attribute__((nonnull, warn_unused_result));
+
/* A struct describing a place for an individual column in the MySQL
row format which is presented to the table handler in ha_innobase.
This template struct is used to speed up row transformations between
Innobase and MySQL. */
-typedef struct mysql_row_templ_struct mysql_row_templ_t;
-struct mysql_row_templ_struct {
+struct mysql_row_templ_t {
ulint col_no; /*!< column number of the column */
ulint rec_field_no; /*!< field number of the column in an
Innobase record in the current index;
@@ -597,7 +655,7 @@ struct mysql_row_templ_struct {
/** A struct for (sometimes lazily) prebuilt structures in an Innobase table
handle used within MySQL; these are used to save CPU time. */
-struct row_prebuilt_struct {
+struct row_prebuilt_t {
ulint magic_n; /*!< this magic number is set to
ROW_PREBUILT_ALLOCATED when created,
or ROW_PREBUILT_FREED when the
@@ -682,8 +740,11 @@ struct row_prebuilt_struct {
columns in the table */
upd_node_t* upd_node; /*!< Innobase SQL update node used
to perform updates and deletes */
+ trx_id_t trx_id; /*!< The table->def_trx_id when
+ ins_graph was built */
que_fork_t* ins_graph; /*!< Innobase SQL query graph used
- in inserts */
+ in inserts. Will be rebuilt on
+ trx_id or n_indexes mismatch. */
que_fork_t* upd_graph; /*!< Innobase SQL query graph used
in updates or deletes */
btr_pcur_t pcur; /*!< persistent cursor used in selects
@@ -780,7 +841,7 @@ struct row_prebuilt_struct {
to this heap */
mem_heap_t* old_vers_heap; /*!< memory heap where a previous
version is built in consistent read */
- fts_result_t* result; /* The result of an FTS query */
+ bool in_fts_query; /*!< Whether we are in a FTS query */
/*----------------------*/
ulonglong autoinc_last_value;
/*!< last value of AUTO-INC interval */
@@ -791,7 +852,7 @@ struct row_prebuilt_struct {
ulonglong autoinc_offset; /*!< The offset passed to
get_auto_increment() by MySQL. Required
to calculate the next value */
- ulint autoinc_error; /*!< The actual error code encountered
+ dberr_t autoinc_error; /*!< The actual error code encountered
while trying to init or read the
autoinc value from the table. We
store it here so that we can return
@@ -806,6 +867,20 @@ struct row_prebuilt_struct {
/*----------------------*/
ulint magic_n2; /*!< this should be the same as
magic_n */
+ /*----------------------*/
+ unsigned innodb_api:1; /*!< whether this is a InnoDB API
+ query */
+ const rec_t* innodb_api_rec; /*!< InnoDB API search result */
+};
+
+/** Callback for row_mysql_sys_index_iterate() */
+struct SysIndexCallback {
+ virtual ~SysIndexCallback() { }
+
+ /** Callback method
+ @param mtr - current mini transaction
+ @param pcur - persistent cursor. */
+ virtual void operator()(mtr_t* mtr, btr_pcur_t* pcur) throw() = 0;
};
#define ROW_PREBUILT_FETCH_MAGIC_N 465765687
@@ -829,4 +904,4 @@ struct row_prebuilt_struct {
#include "row0mysql.ic"
#endif
-#endif
+#endif /* row0mysql.h */
diff --git a/storage/innobase/include/row0purge.h b/storage/innobase/include/row0purge.h
index 740771fa3eb..93dcf9cf49b 100644
--- a/storage/innobase/include/row0purge.h
+++ b/storage/innobase/include/row0purge.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -46,7 +46,8 @@ row_purge_node_create(
/*==================*/
que_thr_t* parent, /*!< in: parent node, i.e., a
thr node */
- mem_heap_t* heap); /*!< in: memory heap where created */
+ mem_heap_t* heap) /*!< in: memory heap where created */
+ __attribute__((nonnull, warn_unused_result));
/***********************************************************//**
Determines if it is possible to remove a secondary index entry.
Removal is possible if the secondary index entry does not refer to any
@@ -56,19 +57,20 @@ is newer than the purge view.
NOTE: This function should only be called by the purge thread, only
while holding a latch on the leaf page of the secondary index entry
(or keeping the buffer pool watch on the page). It is possible that
-this function first returns TRUE and then FALSE, if a user transaction
+this function first returns true and then false, if a user transaction
inserts a record that the secondary index entry would refer to.
However, in that case, the user transaction would also re-insert the
secondary index entry after purge has removed it and released the leaf
page latch.
-@return TRUE if the secondary index record can be purged */
+@return true if the secondary index record can be purged */
UNIV_INTERN
-ibool
+bool
row_purge_poss_sec(
/*===============*/
purge_node_t* node, /*!< in/out: row purge node */
dict_index_t* index, /*!< in: secondary index */
- const dtuple_t* entry); /*!< in: secondary index entry */
+ const dtuple_t* entry) /*!< in: secondary index entry */
+ __attribute__((nonnull, warn_unused_result));
/***************************************************************
Does the purge operation for a single undo log record. This is a high-level
function used in an SQL execution graph.
@@ -77,11 +79,12 @@ UNIV_INTERN
que_thr_t*
row_purge_step(
/*===========*/
- que_thr_t* thr); /*!< in: query thread */
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((nonnull, warn_unused_result));
/* Purge node structure */
-struct purge_node_struct{
+struct purge_node_t{
que_common_t common; /*!< node type: QUE_NODE_PURGE */
/*----------------------*/
/* Local storage for this graph node */
diff --git a/storage/innobase/include/row0quiesce.h b/storage/innobase/include/row0quiesce.h
new file mode 100644
index 00000000000..1d6d11291b8
--- /dev/null
+++ b/storage/innobase/include/row0quiesce.h
@@ -0,0 +1,74 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/row0quiesce.h
+
+Header file for tablespace quiesce functions.
+
+Created 2012-02-08 by Sunny Bains
+*******************************************************/
+
+#ifndef row0quiesce_h
+#define row0quiesce_h
+
+#include "univ.i"
+#include "dict0types.h"
+
+struct trx_t;
+
+/** The version number of the export meta-data text file. */
+#define IB_EXPORT_CFG_VERSION_V1 0x1UL
+
+/*********************************************************************//**
+Quiesce the tablespace that the table resides in. */
+UNIV_INTERN
+void
+row_quiesce_table_start(
+/*====================*/
+ dict_table_t* table, /*!< in: quiesce this table */
+ trx_t* trx) /*!< in/out: transaction/session */
+ __attribute__((nonnull));
+
+/*********************************************************************//**
+Set a table's quiesce state.
+@return DB_SUCCESS or errro code. */
+UNIV_INTERN
+dberr_t
+row_quiesce_set_state(
+/*==================*/
+ dict_table_t* table, /*!< in: quiesce this table */
+ ib_quiesce_t state, /*!< in: quiesce state to set */
+ trx_t* trx) /*!< in/out: transaction */
+ __attribute__((nonnull, warn_unused_result));
+
+/*********************************************************************//**
+Cleanup after table quiesce. */
+UNIV_INTERN
+void
+row_quiesce_table_complete(
+/*=======================*/
+ dict_table_t* table, /*!< in: quiesce this table */
+ trx_t* trx) /*!< in/out: transaction/session */
+ __attribute__((nonnull));
+
+#ifndef UNIV_NONINL
+#include "row0quiesce.ic"
+#endif
+
+#endif /* row0quiesce_h */
diff --git a/storage/innobase/include/row0quiesce.ic b/storage/innobase/include/row0quiesce.ic
new file mode 100644
index 00000000000..f570a6aed05
--- /dev/null
+++ b/storage/innobase/include/row0quiesce.ic
@@ -0,0 +1,26 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/row0quiesce.ic
+
+Quiesce a tablespace.
+
+Created 2012-02-08 Sunny Bains
+*******************************************************/
+
diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h
index cf253ab2347..a4e5e0dd2fa 100644
--- a/storage/innobase/include/row0row.h
+++ b/storage/innobase/include/row0row.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -73,20 +73,41 @@ row_get_rec_roll_ptr(
/*****************************************************************//**
When an insert or purge to a table is performed, this function builds
the entry to be inserted into or purged from an index on the table.
+@return index entry which should be inserted or purged
+@retval NULL if the externally stored columns in the clustered index record
+are unavailable and ext != NULL, or row is missing some needed columns. */
+UNIV_INTERN
+dtuple_t*
+row_build_index_entry_low(
+/*======================*/
+ const dtuple_t* row, /*!< in: row which should be
+ inserted or purged */
+ const row_ext_t* ext, /*!< in: externally stored column
+ prefixes, or NULL */
+ dict_index_t* index, /*!< in: index on the table */
+ mem_heap_t* heap) /*!< in: memory heap from which
+ the memory for the index entry
+ is allocated */
+ __attribute__((warn_unused_result, nonnull(1,3,4)));
+/*****************************************************************//**
+When an insert or purge to a table is performed, this function builds
+the entry to be inserted into or purged from an index on the table.
@return index entry which should be inserted or purged, or NULL if the
externally stored columns in the clustered index record are
unavailable and ext != NULL */
-UNIV_INTERN
+UNIV_INLINE
dtuple_t*
row_build_index_entry(
/*==================*/
- const dtuple_t* row, /*!< in: row which should be
- inserted or purged */
- row_ext_t* ext, /*!< in: externally stored column prefixes,
- or NULL */
- dict_index_t* index, /*!< in: index on the table */
- mem_heap_t* heap); /*!< in: memory heap from which the memory for
- the index entry is allocated */
+ const dtuple_t* row, /*!< in: row which should be
+ inserted or purged */
+ const row_ext_t* ext, /*!< in: externally stored column
+ prefixes, or NULL */
+ dict_index_t* index, /*!< in: index on the table */
+ mem_heap_t* heap) /*!< in: memory heap from which
+ the memory for the index entry
+ is allocated */
+ __attribute__((warn_unused_result, nonnull(1,3,4)));
/*******************************************************************//**
An inverse function to row_build_index_entry. Builds a row from a
record in a clustered index.
@@ -124,11 +145,17 @@ row_build(
consulted instead; the user
columns in this table should be
the same columns as in index->table */
+ const dtuple_t* add_cols,
+ /*!< in: default values of
+ added columns, or NULL */
+ const ulint* col_map,/*!< in: mapping of old column
+ numbers to new ones, or NULL */
row_ext_t** ext, /*!< out, own: cache of
externally stored column
prefixes, or NULL */
- mem_heap_t* heap); /*!< in: memory heap from which
+ mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
+ __attribute__((nonnull(2,3,9)));
/*******************************************************************//**
Converts an index record to a typed data tuple.
@return index entry built; does not set info_bits, and the data fields
@@ -142,37 +169,25 @@ row_rec_to_index_entry_low(
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
ulint* n_ext, /*!< out: number of externally
stored columns */
- mem_heap_t* heap); /*!< in: memory heap from which
+ mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Converts an index record to a typed data tuple. NOTE that externally
stored (often big) fields are NOT copied to heap.
-@return own: index entry built; see the NOTE below! */
+@return own: index entry built */
UNIV_INTERN
dtuple_t*
row_rec_to_index_entry(
/*===================*/
- ulint type, /*!< in: ROW_COPY_DATA, or
- ROW_COPY_POINTERS: the former
- copies also the data fields to
- heap as the latter only places
- pointers to data fields on the
- index page */
- const rec_t* rec, /*!< in: record in the index;
- NOTE: in the case
- ROW_COPY_POINTERS the data
- fields in the row will point
- directly into this record,
- therefore, the buffer page of
- this record must be at least
- s-latched and the latch held
- as long as the dtuple is used! */
+ const rec_t* rec, /*!< in: record in the index */
const dict_index_t* index, /*!< in: index */
- ulint* offsets,/*!< in/out: rec_get_offsets(rec) */
+ const ulint* offsets,/*!< in/out: rec_get_offsets(rec) */
ulint* n_ext, /*!< out: number of externally
stored columns */
- mem_heap_t* heap); /*!< in: memory heap from which
+ mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record.
@@ -193,8 +208,9 @@ row_build_row_ref(
the buffer page of this record must be
at least s-latched and the latch held
as long as the row reference is used! */
- mem_heap_t* heap); /*!< in: memory heap from which the memory
+ mem_heap_t* heap) /*!< in: memory heap from which the memory
needed is allocated */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record. */
@@ -215,7 +231,8 @@ row_build_row_ref_in_tuple(
const dict_index_t* index, /*!< in: secondary index */
ulint* offsets,/*!< in: rec_get_offsets(rec, index)
or NULL */
- trx_t* trx); /*!< in: transaction */
+ trx_t* trx) /*!< in: transaction or NULL */
+ __attribute__((nonnull(1,2,3)));
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record. */
@@ -245,7 +262,8 @@ row_search_on_row_ref(
ulint mode, /*!< in: BTR_MODIFY_LEAF, ... */
const dict_table_t* table, /*!< in: table */
const dtuple_t* ref, /*!< in: row reference */
- mtr_t* mtr); /*!< in/out: mtr */
+ mtr_t* mtr) /*!< in/out: mtr */
+ __attribute__((nonnull, warn_unused_result));
/*********************************************************************//**
Fetches the clustered index record for a secondary index record. The latches
on the secondary index record are preserved.
@@ -258,7 +276,8 @@ row_get_clust_rec(
const rec_t* rec, /*!< in: record in a secondary index */
dict_index_t* index, /*!< in: secondary index */
dict_index_t** clust_index,/*!< out: clustered index */
- mtr_t* mtr); /*!< in: mtr */
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull, warn_unused_result));
/** Result of row_search_index_entry */
enum row_search_result {
@@ -285,8 +304,8 @@ row_search_index_entry(
ulint mode, /*!< in: BTR_MODIFY_LEAF, ... */
btr_pcur_t* pcur, /*!< in/out: persistent cursor, which must
be closed by the caller */
- mtr_t* mtr); /*!< in: mtr */
-
+ mtr_t* mtr) /*!< in: mtr */
+ __attribute__((nonnull, warn_unused_result));
#define ROW_COPY_DATA 1
#define ROW_COPY_POINTERS 2
@@ -313,8 +332,9 @@ row_raw_format(
in bytes */
const dict_field_t* dict_field, /*!< in: index field */
char* buf, /*!< out: output buffer */
- ulint buf_size); /*!< in: output buffer size
+ ulint buf_size) /*!< in: output buffer size
in bytes */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0row.ic"
diff --git a/storage/innobase/include/row0row.ic b/storage/innobase/include/row0row.ic
index 8e9f3460519..ac62422be1f 100644
--- a/storage/innobase/include/row0row.ic
+++ b/storage/innobase/include/row0row.ic
@@ -104,6 +104,33 @@ row_get_rec_roll_ptr(
return(trx_read_roll_ptr(rec + offset + DATA_TRX_ID_LEN));
}
+/*****************************************************************//**
+When an insert or purge to a table is performed, this function builds
+the entry to be inserted into or purged from an index on the table.
+@return index entry which should be inserted or purged, or NULL if the
+externally stored columns in the clustered index record are
+unavailable and ext != NULL */
+UNIV_INLINE
+dtuple_t*
+row_build_index_entry(
+/*==================*/
+ const dtuple_t* row, /*!< in: row which should be
+ inserted or purged */
+ const row_ext_t* ext, /*!< in: externally stored column
+ prefixes, or NULL */
+ dict_index_t* index, /*!< in: index on the table */
+ mem_heap_t* heap) /*!< in: memory heap from which
+ the memory for the index entry
+ is allocated */
+{
+ dtuple_t* entry;
+
+ ut_ad(dtuple_check_typed(row));
+ entry = row_build_index_entry_low(row, ext, index, heap);
+ ut_ad(!entry || dtuple_check_typed(entry));
+ return(entry);
+}
+
/*******************************************************************//**
Builds from a secondary index record a row reference with which we can
search the clustered index record. */
diff --git a/storage/innobase/include/row0sel.h b/storage/innobase/include/row0sel.h
index fa3c93b6b9a..c8be80f89d9 100644
--- a/storage/innobase/include/row0sel.h
+++ b/storage/innobase/include/row0sel.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -148,7 +148,7 @@ position and fetch next or fetch prev must not be tried to the cursor!
@return DB_SUCCESS, DB_RECORD_NOT_FOUND, DB_END_OF_INDEX, DB_DEADLOCK,
DB_LOCK_TABLE_FULL, or DB_TOO_BIG_RECORD */
UNIV_INTERN
-ulint
+dberr_t
row_search_for_mysql(
/*=================*/
byte* buf, /*!< in/out: buffer for the fetched
@@ -163,11 +163,12 @@ row_search_for_mysql(
'mode' */
ulint match_mode, /*!< in: 0 or ROW_SEL_EXACT or
ROW_SEL_EXACT_PREFIX */
- ulint direction); /*!< in: 0 or ROW_SEL_NEXT or
+ ulint direction) /*!< in: 0 or ROW_SEL_NEXT or
ROW_SEL_PREV; NOTE: if this is != 0,
then prebuilt must have a pcur
with stored position! In opening of a
cursor 'direction' should be 0. */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Checks if MySQL at the moment is allowed for this table to retrieve a
consistent read result, or store it to the query cache.
@@ -179,28 +180,20 @@ row_search_check_if_query_cache_permitted(
trx_t* trx, /*!< in: transaction object */
const char* norm_name); /*!< in: concatenation of database name,
'/' char, table name */
-void
-row_create_key(
-/*===========*/
- dtuple_t* tuple, /* in: tuple where to build;
- NOTE: we assume that the type info
- in the tuple is already according
- to index! */
- dict_index_t* index, /* in: index of the key value */
- doc_id_t* doc_id); /* in: doc id to lookup.*/
/*******************************************************************//**
Read the max AUTOINC value from an index.
@return DB_SUCCESS if all OK else error code */
UNIV_INTERN
-ulint
+dberr_t
row_search_max_autoinc(
/*===================*/
dict_index_t* index, /*!< in: index to search */
const char* col_name, /*!< in: autoinc column name */
- ib_uint64_t* value); /*!< out: AUTOINC value read */
+ ib_uint64_t* value) /*!< out: AUTOINC value read */
+ __attribute__((nonnull, warn_unused_result));
/** A structure for caching column values for prefetched rows */
-struct sel_buf_struct{
+struct sel_buf_t{
byte* data; /*!< data, or NULL; if not NULL, this field
has allocated memory which must be explicitly
freed; can be != NULL even when len is
@@ -213,7 +206,7 @@ struct sel_buf_struct{
};
/** Query plan */
-struct plan_struct{
+struct plan_t{
dict_table_t* table; /*!< table struct in the dictionary
cache */
dict_index_t* index; /*!< table index used in the search */
@@ -299,7 +292,7 @@ enum sel_node_state {
};
/** Select statement node */
-struct sel_node_struct{
+struct sel_node_t{
que_common_t common; /*!< node type: QUE_NODE_SELECT */
enum sel_node_state
state; /*!< node state */
@@ -352,7 +345,7 @@ struct sel_node_struct{
};
/** Fetch statement node */
-struct fetch_node_struct{
+struct fetch_node_t{
que_common_t common; /*!< type: QUE_NODE_FETCH */
sel_node_t* cursor_def; /*!< cursor definition */
sym_node_t* into_list; /*!< variables to set */
@@ -379,7 +372,7 @@ enum open_node_op {
};
/** Open or close cursor statement node */
-struct open_node_struct{
+struct open_node_t{
que_common_t common; /*!< type: QUE_NODE_OPEN */
enum open_node_op
op_type; /*!< operation type: open or
@@ -388,7 +381,7 @@ struct open_node_struct{
};
/** Row printf statement node */
-struct row_printf_node_struct{
+struct row_printf_node_t{
que_common_t common; /*!< type: QUE_NODE_ROW_PRINTF */
sel_node_t* sel_node; /*!< select */
};
diff --git a/storage/innobase/include/row0types.h b/storage/innobase/include/row0types.h
index 463651b43b8..52c89cb01fa 100644
--- a/storage/innobase/include/row0types.h
+++ b/storage/innobase/include/row0types.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -26,32 +26,28 @@ Created 12/27/1996 Heikki Tuuri
#ifndef row0types_h
#define row0types_h
-typedef struct plan_struct plan_t;
+struct plan_t;
-typedef struct upd_struct upd_t;
+struct upd_t;
+struct upd_field_t;
+struct upd_node_t;
+struct del_node_t;
+struct ins_node_t;
+struct sel_node_t;
+struct open_node_t;
+struct fetch_node_t;
-typedef struct upd_field_struct upd_field_t;
+struct row_printf_node_t;
+struct sel_buf_t;
-typedef struct upd_node_struct upd_node_t;
+struct undo_node_t;
-typedef struct del_node_struct del_node_t;
+struct purge_node_t;
-typedef struct ins_node_struct ins_node_t;
+struct row_ext_t;
-typedef struct sel_node_struct sel_node_t;
-
-typedef struct open_node_struct open_node_t;
-
-typedef struct fetch_node_struct fetch_node_t;
-
-typedef struct row_printf_node_struct row_printf_node_t;
-typedef struct sel_buf_struct sel_buf_t;
-
-typedef struct undo_node_struct undo_node_t;
-
-typedef struct purge_node_struct purge_node_t;
-
-typedef struct row_ext_struct row_ext_t;
+/** Buffer for logging modifications during online index creation */
+struct row_log_t;
/* MySQL data types */
struct TABLE;
diff --git a/storage/innobase/include/row0uins.h b/storage/innobase/include/row0uins.h
index 5f3a7212ee1..ebf4881208a 100644
--- a/storage/innobase/include/row0uins.h
+++ b/storage/innobase/include/row0uins.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -42,11 +42,11 @@ if it figures out that an index record will be removed in the purge
anyway, it will remove it in the rollback.
@return DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_undo_ins(
/*=========*/
- undo_node_t* node); /*!< in: row undo node */
-
+ undo_node_t* node) /*!< in: row undo node */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0uins.ic"
#endif
diff --git a/storage/innobase/include/row0umod.h b/storage/innobase/include/row0umod.h
index 84831e59d90..f89d5a334fc 100644
--- a/storage/innobase/include/row0umod.h
+++ b/storage/innobase/include/row0umod.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -38,12 +38,12 @@ Created 2/27/1997 Heikki Tuuri
Undoes a modify operation on a row of a table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_undo_mod(
/*=========*/
undo_node_t* node, /*!< in: row undo node */
- que_thr_t* thr); /*!< in: query thread */
-
+ que_thr_t* thr) /*!< in: query thread */
+ __attribute__((nonnull, warn_unused_result));
#ifndef UNIV_NONINL
#include "row0umod.ic"
diff --git a/storage/innobase/include/row0undo.h b/storage/innobase/include/row0undo.h
index 47f9afdc74a..5dddfb4eae1 100644
--- a/storage/innobase/include/row0undo.h
+++ b/storage/innobase/include/row0undo.h
@@ -95,7 +95,7 @@ enum undo_exec {
};
/** Undo node structure */
-struct undo_node_struct{
+struct undo_node_t{
que_common_t common; /*!< node type: QUE_NODE_UNDO */
enum undo_exec state; /*!< node execution state */
trx_t* trx; /*!< trx for which undo is done */
diff --git a/storage/innobase/include/row0upd.h b/storage/innobase/include/row0upd.h
index a7687bb1ded..27dedeb65a7 100644
--- a/storage/innobase/include/row0upd.h
+++ b/storage/innobase/include/row0upd.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -101,7 +101,7 @@ byte*
row_upd_write_sys_vals_to_log(
/*==========================*/
dict_index_t* index, /*!< in: clustered index */
- trx_t* trx, /*!< in: transaction */
+ trx_id_t trx_id, /*!< in: transaction id */
roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */
byte* log_ptr,/*!< pointer to a buffer of size > 20 opened
in mlog */
@@ -118,8 +118,9 @@ row_upd_rec_sys_fields(
uncompressed part will be updated, or NULL */
dict_index_t* index, /*!< in: clustered index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
- trx_t* trx, /*!< in: transaction */
- roll_ptr_t roll_ptr);/*!< in: roll ptr of the undo log record */
+ const trx_t* trx, /*!< in: transaction */
+ roll_ptr_t roll_ptr);/*!< in: roll ptr of the undo log record,
+ can be 0 during IMPORT */
/*********************************************************************//**
Sets the trx id or roll ptr field of a clustered index entry. */
UNIV_INTERN
@@ -165,6 +166,15 @@ row_upd_changes_field_size_or_external(
dict_index_t* index, /*!< in: index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const upd_t* update);/*!< in: update vector */
+/***********************************************************//**
+Returns true if row update contains disowned external fields.
+@return true if the update contains disowned external fields. */
+UNIV_INTERN
+bool
+row_upd_changes_disowned_external(
+/*==============================*/
+ const upd_t* update) /*!< in: update vector */
+ __attribute__((nonnull, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Replaces the new column values stored in the update vector to the
@@ -192,11 +202,12 @@ UNIV_INTERN
upd_t*
row_upd_build_sec_rec_difference_binary(
/*====================================*/
+ const rec_t* rec, /*!< in: secondary index record */
dict_index_t* index, /*!< in: index */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const dtuple_t* entry, /*!< in: entry to insert */
- const rec_t* rec, /*!< in: secondary index record */
- trx_t* trx, /*!< in: transaction */
- mem_heap_t* heap); /*!< in: memory heap from which allocated */
+ mem_heap_t* heap) /*!< in: memory heap from which allocated */
+ __attribute__((warn_unused_result, nonnull));
/***************************************************************//**
Builds an update vector from those fields, excluding the roll ptr and
trx id fields, which in an index entry differ from a record that has
@@ -204,14 +215,19 @@ the equal ordering fields. NOTE: we compare the fields as binary strings!
@return own: update vector of differing fields, excluding roll ptr and
trx id */
UNIV_INTERN
-upd_t*
+const upd_t*
row_upd_build_difference_binary(
/*============================*/
dict_index_t* index, /*!< in: clustered index */
const dtuple_t* entry, /*!< in: entry to insert */
const rec_t* rec, /*!< in: clustered index record */
- trx_t* trx, /*!< in: transaction */
- mem_heap_t* heap); /*!< in: memory heap from which allocated */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index), or NULL */
+ bool no_sys, /*!< in: skip the system columns
+ DB_TRX_ID and DB_ROLL_PTR */
+ trx_t* trx, /*!< in: transaction (for diagnostics),
+ or NULL */
+ mem_heap_t* heap) /*!< in: memory heap from which allocated */
+ __attribute__((nonnull(1,2,3,7), warn_unused_result));
/***********************************************************//**
Replaces the new column values stored in the update vector to the index entry
given. */
@@ -315,25 +331,14 @@ row_upd_changes_fts_column(
upd_field_t* upd_field); /*!< in: field to check */
/***********************************************************//**
Checks if an FTS Doc ID column is affected by an UPDATE.
-@return TRUE if Doc ID column is affected */
+@return whether Doc ID column is affected */
UNIV_INTERN
-ulint
+bool
row_upd_changes_doc_id(
/*===================*/
dict_table_t* table, /*!< in: table */
- upd_field_t* upd_field); /*!< in: field to check */
-/***********************************************************//**
-Checks if an update vector changes the table's FTS-indexed columns.
-NOTE: must not be called for tables which do not have an FTS-index.
-Also, the vector returned must be explicitly freed as it's allocated
-using the ut_malloc() allocator.
-@return vector of FTS indexes that were affected by the update else NULL */
-UNIV_INTERN
-ib_vector_t*
-row_upd_changes_fts_columns(
-/*========================*/
- dict_table_t* table, /*!< in: table */
- upd_t* update); /*!< in: update vector for the row */
+ upd_field_t* upd_field) /*!< in: field to check */
+ __attribute__((nonnull, warn_unused_result));
/***********************************************************//**
Checks if an update vector changes an ordering field of an index record.
This function is fast if the update vector is short or the number of ordering
@@ -397,7 +402,7 @@ row_upd_index_parse(
/* Update vector field */
-struct upd_field_struct{
+struct upd_field_t{
unsigned field_no:16; /*!< field number in an index, usually
the clustered index, but in updating
a secondary index record in btr0cur.cc
@@ -416,7 +421,7 @@ struct upd_field_struct{
};
/* Update vector structure */
-struct upd_struct{
+struct upd_t{
ulint info_bits; /*!< new value of info bits to record;
default is 0 */
ulint n_fields; /*!< number of update fields */
@@ -427,7 +432,7 @@ struct upd_struct{
/* Update node structure which also implements the delete operation
of a row */
-struct upd_node_struct{
+struct upd_node_t{
que_common_t common; /*!< node type: QUE_NODE_UPDATE */
ibool is_delete;/* TRUE if delete, FALSE if update */
ibool searched_update;
diff --git a/storage/innobase/include/row0upd.ic b/storage/innobase/include/row0upd.ic
index d054662c080..618a77fa4bf 100644
--- a/storage/innobase/include/row0upd.ic
+++ b/storage/innobase/include/row0upd.ic
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -46,7 +46,6 @@ upd_create(
update = (upd_t*) mem_heap_zalloc(heap, sizeof(upd_t));
- update->info_bits = 0;
update->n_fields = n;
update->fields = (upd_field_t*)
mem_heap_zalloc(heap, sizeof(upd_field_t) * n);
@@ -111,6 +110,7 @@ upd_field_set_field_no(
fprintf(stderr, "\n"
"InnoDB: but index only has %lu fields\n",
(ulong) dict_index_get_n_fields(index));
+ ut_ad(0);
}
dict_col_copy_type(dict_index_get_nth_col(index, field_no),
@@ -152,8 +152,9 @@ row_upd_rec_sys_fields(
uncompressed part will be updated, or NULL */
dict_index_t* index, /*!< in: clustered index */
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
- trx_t* trx, /*!< in: transaction */
- roll_ptr_t roll_ptr)/*!< in: roll ptr of the undo log record */
+ const trx_t* trx, /*!< in: transaction */
+ roll_ptr_t roll_ptr)/*!< in: roll ptr of the undo log record,
+ can be 0 during IMPORT */
{
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -172,8 +173,14 @@ row_upd_rec_sys_fields(
#if DATA_TRX_ID + 1 != DATA_ROLL_PTR
# error "DATA_TRX_ID + 1 != DATA_ROLL_PTR"
#endif
- ut_ad(lock_check_trx_id_sanity(trx_read_trx_id(rec + offset),
- rec, index, offsets));
+ /* During IMPORT the trx id in the record can be in the
+ future, if the .ibd file is being imported from another
+ instance. During IMPORT roll_ptr will be 0. */
+ ut_ad(roll_ptr == 0
+ || lock_check_trx_id_sanity(
+ trx_read_trx_id(rec + offset),
+ rec, index, offsets));
+
trx_write_trx_id(rec + offset, trx->id);
trx_write_roll_ptr(rec + offset + DATA_TRX_ID_LEN, roll_ptr);
}
diff --git a/storage/innobase/include/row0vers.h b/storage/innobase/include/row0vers.h
index d9e3471b3dc..1df5b4d3e98 100644
--- a/storage/innobase/include/row0vers.h
+++ b/storage/innobase/include/row0vers.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -87,7 +87,7 @@ read should see. We assume that the trx id stored in rec is such that
the consistent read should not see rec in its present version.
@return DB_SUCCESS or DB_MISSING_HISTORY */
UNIV_INTERN
-ulint
+dberr_t
row_vers_build_for_consistent_read(
/*===============================*/
const rec_t* rec, /*!< in: record in a clustered index; the
@@ -106,16 +106,17 @@ row_vers_build_for_consistent_read(
*old_vers is allocated; memory for possible
intermediate versions is allocated and freed
locally within the function */
- rec_t** old_vers);/*!< out, own: old version, or NULL if the
- record does not exist in the view, that is,
+ rec_t** old_vers)/*!< out, own: old version, or NULL
+ if the history is missing or the record
+ does not exist in the view, that is,
it was freshly inserted afterwards */
+ __attribute__((nonnull(1,2,3,4,5,6,7)));
/*****************************************************************//**
Constructs the last committed version of a clustered index record,
-which should be seen by a semi-consistent read.
-@return DB_SUCCESS or DB_MISSING_HISTORY */
+which should be seen by a semi-consistent read. */
UNIV_INTERN
-ulint
+void
row_vers_build_for_semi_consistent_read(
/*====================================*/
const rec_t* rec, /*!< in: record in a clustered index; the
@@ -132,9 +133,10 @@ row_vers_build_for_semi_consistent_read(
*old_vers is allocated; memory for possible
intermediate versions is allocated and freed
locally within the function */
- const rec_t** old_vers);/*!< out: rec, old version, or NULL if the
+ const rec_t** old_vers)/*!< out: rec, old version, or NULL if the
record does not exist in the view, that is,
it was freshly inserted afterwards */
+ __attribute__((nonnull(1,2,3,4,5)));
#ifndef UNIV_NONINL
diff --git a/storage/innobase/include/srv0mon.h b/storage/innobase/include/srv0mon.h
index 5e47f82f416..48d4b94dcae 100644
--- a/storage/innobase/include/srv0mon.h
+++ b/storage/innobase/include/srv0mon.h
@@ -1,6 +1,7 @@
/***********************************************************************
-Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2010, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@@ -55,7 +56,7 @@ fill in counter information as described in "monitor_info_t" and
create the internal counter ID in "monitor_id_t". */
/** Structure containing the actual values of a monitor counter. */
-struct monitor_value_struct {
+struct monitor_value_t {
ib_time_t mon_start_time; /*!< Start time of monitoring */
ib_time_t mon_stop_time; /*!< Stop time of monitoring */
ib_time_t mon_reset_time; /*!< Time counter resetted */
@@ -70,11 +71,9 @@ struct monitor_value_struct {
monitor_running_t mon_status; /* whether monitor still running */
};
-typedef struct monitor_value_struct monitor_value_t;
-
/** Follwoing defines are possible values for "monitor_type" field in
"struct monitor_info" */
-enum monitor_type_value {
+enum monitor_type_t {
MONITOR_NONE = 0, /*!< No monitoring */
MONITOR_MODULE = 1, /*!< This is a monitor module type,
not a counter */
@@ -97,8 +96,6 @@ enum monitor_type_value {
metrics table */
};
-typedef enum monitor_type_value monitor_type_t;
-
/** Counter minimum value is initialized to be max value of
mon_type_t (ib_int64_t) */
#define MIN_RESERVED ((mon_type_t) (IB_ULONGLONG_MAX >> 1))
@@ -117,7 +114,7 @@ name shall start with MONITOR_OVLD
Please refer to "innodb_counter_info" in srv/srv0mon.cc for detail
information for each monitor counter */
-enum monitor_id_value {
+enum monitor_id_t {
/* This is to identify the default value set by the metrics
control global variables */
MONITOR_DEFAULT_START = 0,
@@ -154,14 +151,15 @@ enum monitor_id_value {
MONITOR_OVLD_BUF_POOL_READS,
MONITOR_OVLD_BUF_POOL_READ_REQUESTS,
MONITOR_OVLD_BUF_POOL_WRITE_REQUEST,
- MONITOR_PAGE_INFLUSH,
MONITOR_OVLD_BUF_POOL_WAIT_FREE,
MONITOR_OVLD_BUF_POOL_READ_AHEAD,
MONITOR_OVLD_BUF_POOL_READ_AHEAD_EVICTED,
MONITOR_OVLD_BUF_POOL_PAGE_TOTAL,
MONITOR_OVLD_BUF_POOL_PAGE_MISC,
MONITOR_OVLD_BUF_POOL_PAGES_DATA,
+ MONITOR_OVLD_BUF_POOL_BYTES_DATA,
MONITOR_OVLD_BUF_POOL_PAGES_DIRTY,
+ MONITOR_OVLD_BUF_POOL_BYTES_DIRTY,
MONITOR_OVLD_BUF_POOL_PAGES_FREE,
MONITOR_OVLD_PAGE_CREATED,
MONITOR_OVLD_PAGES_WRITTEN,
@@ -177,15 +175,15 @@ enum monitor_id_value {
MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE,
MONITOR_FLUSH_NEIGHBOR_COUNT,
MONITOR_FLUSH_NEIGHBOR_PAGES,
- MONITOR_FLUSH_MAX_DIRTY_TOTAL_PAGE,
- MONITOR_FLUSH_MAX_DIRTY_COUNT,
- MONITOR_FLUSH_MAX_DIRTY_PAGES,
+ MONITOR_FLUSH_N_TO_FLUSH_REQUESTED,
+ MONITOR_FLUSH_AVG_PAGE_RATE,
+ MONITOR_FLUSH_LSN_AVG_RATE,
+ MONITOR_FLUSH_PCT_FOR_DIRTY,
+ MONITOR_FLUSH_PCT_FOR_LSN,
+ MONITOR_FLUSH_SYNC_WAITS,
MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE,
MONITOR_FLUSH_ADAPTIVE_COUNT,
MONITOR_FLUSH_ADAPTIVE_PAGES,
- MONITOR_FLUSH_ASYNC_TOTAL_PAGE,
- MONITOR_FLUSH_ASYNC_COUNT,
- MONITOR_FLUSH_ASYNC_PAGES,
MONITOR_FLUSH_SYNC_TOTAL_PAGE,
MONITOR_FLUSH_SYNC_COUNT,
MONITOR_FLUSH_SYNC_PAGES,
@@ -303,6 +301,8 @@ enum monitor_id_value {
MONITOR_MODULE_PAGE,
MONITOR_PAGE_COMPRESS,
MONITOR_PAGE_DECOMPRESS,
+ MONITOR_PAD_INCREMENTS,
+ MONITOR_PAD_DECREMENTS,
/* Index related counters */
MONITOR_MODULE_INDEX,
@@ -367,7 +367,10 @@ enum monitor_id_value {
/* Data DDL related counters */
MONITOR_MODULE_DDL_STATS,
+ MONITOR_BACKGROUND_DROP_INDEX,
MONITOR_BACKGROUND_DROP_TABLE,
+ MONITOR_ONLINE_CREATE_INDEX,
+ MONITOR_PENDING_ALTER_TABLE,
MONITOR_MODULE_ICP,
MONITOR_ICP_ATTEMPTS,
@@ -383,8 +386,6 @@ enum monitor_id_value {
NUM_MONITOR
};
-typedef enum monitor_id_value monitor_id_t;
-
/** This informs the monitor control system to turn
on/off and reset monitor counters through wild card match */
#define MONITOR_WILDCARD_MATCH (NUM_MONITOR + 1)
@@ -394,7 +395,7 @@ on/off and reset monitor counters through wild card match */
/** struct monitor_info describes the basic/static information
about each monitor counter. */
-struct monitor_info_struct {
+struct monitor_info_t {
const char* monitor_name; /*!< Monitor name */
const char* monitor_module; /*!< Sub Module the monitor
belongs to */
@@ -408,12 +409,10 @@ struct monitor_info_struct {
monitor_id_t */
};
-typedef struct monitor_info_struct monitor_info_t;
-
/** Following are the "set_option" values allowed for
srv_mon_process_existing_counter() and srv_mon_process_existing_counter()
functions. To turn on/off/reset the monitor counters. */
-enum mon_set_option {
+enum mon_option_t {
MONITOR_TURN_ON = 1, /*!< Turn on the counter */
MONITOR_TURN_OFF, /*!< Turn off the counter */
MONITOR_RESET_VALUE, /*!< Reset current values */
@@ -423,8 +422,6 @@ enum mon_set_option {
function */
};
-typedef enum mon_set_option mon_option_t;
-
/** Number of bit in a ulint datatype */
#define NUM_BITS_ULINT (sizeof(ulint) * CHAR_BIT)
@@ -533,8 +530,37 @@ on the counters */
} \
}
-#ifdef HAVE_ATOMIC_BUILTINS
+/** Increment a monitor counter under mutex protection.
+Use MONITOR_INC if appropriate mutex protection already exists.
+@param monitor monitor to be incremented by 1
+@param mutex mutex to acquire and relese */
+# define MONITOR_MUTEX_INC(mutex, monitor) \
+ ut_ad(!mutex_own(mutex)); \
+ if (MONITOR_IS_ON(monitor)) { \
+ mutex_enter(mutex); \
+ if (++MONITOR_VALUE(monitor) > MONITOR_MAX_VALUE(monitor)) { \
+ MONITOR_MAX_VALUE(monitor) = MONITOR_VALUE(monitor); \
+ } \
+ mutex_exit(mutex); \
+ }
+/** Decrement a monitor counter under mutex protection.
+Use MONITOR_DEC if appropriate mutex protection already exists.
+@param monitor monitor to be decremented by 1
+@param mutex mutex to acquire and relese */
+# define MONITOR_MUTEX_DEC(mutex, monitor) \
+ ut_ad(!mutex_own(mutex)); \
+ if (MONITOR_IS_ON(monitor)) { \
+ mutex_enter(mutex); \
+ if (--MONITOR_VALUE(monitor) < MONITOR_MIN_VALUE(monitor)) { \
+ MONITOR_MIN_VALUE(monitor) = MONITOR_VALUE(monitor); \
+ } \
+ mutex_exit(mutex); \
+ }
+#if defined HAVE_ATOMIC_BUILTINS_64
+/** Atomically increment a monitor counter.
+Use MONITOR_INC if appropriate mutex protection exists.
+@param monitor monitor to be incremented by 1 */
# define MONITOR_ATOMIC_INC(monitor) \
if (MONITOR_IS_ON(monitor)) { \
ib_uint64_t value; \
@@ -547,10 +573,13 @@ on the counters */
} \
}
+/** Atomically decrement a monitor counter.
+Use MONITOR_DEC if appropriate mutex protection exists.
+@param monitor monitor to be decremented by 1 */
# define MONITOR_ATOMIC_DEC(monitor) \
if (MONITOR_IS_ON(monitor)) { \
ib_uint64_t value; \
- value = os_atomic_decrement_ulint( \
+ value = os_atomic_decrement_uint64( \
(ib_uint64_t*) &MONITOR_VALUE(monitor), 1); \
/* Note: This is not 100% accurate because of the \
inherent race, we ignore it due to performance. */ \
@@ -558,7 +587,34 @@ on the counters */
MONITOR_MIN_VALUE(monitor) = value; \
} \
}
-#endif /* HAVE_ATOMIC_BUILTINS */
+# define srv_mon_create() ((void) 0)
+# define srv_mon_free() ((void) 0)
+#else /* HAVE_ATOMIC_BUILTINS_64 */
+/** Mutex protecting atomic operations on platforms that lack
+built-in operations for atomic memory access */
+extern ib_mutex_t monitor_mutex;
+/****************************************************************//**
+Initialize the monitor subsystem. */
+UNIV_INTERN
+void
+srv_mon_create(void);
+/*================*/
+/****************************************************************//**
+Close the monitor subsystem. */
+UNIV_INTERN
+void
+srv_mon_free(void);
+/*==============*/
+
+/** Atomically increment a monitor counter.
+Use MONITOR_INC if appropriate mutex protection exists.
+@param monitor monitor to be incremented by 1 */
+# define MONITOR_ATOMIC_INC(monitor) MONITOR_MUTEX_INC(&monitor_mutex, monitor)
+/** Atomically decrement a monitor counter.
+Use MONITOR_DEC if appropriate mutex protection exists.
+@param monitor monitor to be decremented by 1 */
+# define MONITOR_ATOMIC_DEC(monitor) MONITOR_MUTEX_DEC(&monitor_mutex, monitor)
+#endif /* HAVE_ATOMIC_BUILTINS_64 */
#define MONITOR_DEC(monitor) \
if (MONITOR_IS_ON(monitor)) { \
@@ -568,7 +624,17 @@ on the counters */
} \
}
+#ifdef UNIV_DEBUG_VALGRIND
+# define MONITOR_CHECK_DEFINED(value) do { \
+ mon_type_t m = value; \
+ UNIV_MEM_ASSERT_RW(&m, sizeof m); \
+} while (0)
+#else /* UNIV_DEBUG_VALGRIND */
+# define MONITOR_CHECK_DEFINED(value) (void) 0
+#endif /* UNIV_DEBUG_VALGRIND */
+
#define MONITOR_INC_VALUE(monitor, value) \
+ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
MONITOR_VALUE(monitor) += (mon_type_t) (value); \
if (MONITOR_VALUE(monitor) > MONITOR_MAX_VALUE(monitor)) { \
@@ -577,6 +643,7 @@ on the counters */
}
#define MONITOR_DEC_VALUE(monitor, value) \
+ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
ut_ad(MONITOR_VALUE(monitor) >= (mon_type_t) (value); \
MONITOR_VALUE(monitor) -= (mon_type_t) (value); \
@@ -605,6 +672,7 @@ could already be checked as a module group */
/** Directly set a monitor counter's value */
#define MONITOR_SET(monitor, value) \
+ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
MONITOR_VALUE(monitor) = (mon_type_t) (value); \
if (MONITOR_VALUE(monitor) > MONITOR_MAX_VALUE(monitor)) { \
@@ -617,9 +685,10 @@ could already be checked as a module group */
/** Add time difference between now and input "value" (in seconds) to the
monitor counter
-@monitor monitor to update for the time difference
-@value the start time value */
+@param monitor monitor to update for the time difference
+@param value the start time value */
#define MONITOR_INC_TIME_IN_MICRO_SECS(monitor, value) \
+ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
ullint old_time = (value); \
value = ut_time_us(NULL); \
@@ -629,15 +698,16 @@ monitor counter
/** This macro updates 3 counters in one call. However, it only checks the
main/first monitor counter 'monitor', to see it is on or off to decide
whether to do the update.
-@monitor the main monitor counter to update. It accounts for
+@param monitor the main monitor counter to update. It accounts for
the accumulative value for the counter.
-@monitor_n_calls counter that counts number of times this macro is
+@param monitor_n_calls counter that counts number of times this macro is
called
-@monitor_per_call counter that records the current and max value of
+@param monitor_per_call counter that records the current and max value of
each incremental value
-@value incremental value to record this time */
+@param value incremental value to record this time */
#define MONITOR_INC_VALUE_CUMULATIVE( \
monitor, monitor_n_calls, monitor_per_call, value) \
+ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
MONITOR_VALUE(monitor_n_calls)++; \
MONITOR_VALUE(monitor_per_call) = (mon_type_t) (value); \
@@ -655,6 +725,7 @@ whether to do the update.
/** Directly set a monitor counter's value, and if the value
is monotonically increasing, only max value needs to be updated */
#define MONITOR_SET_UPD_MAX_ONLY(monitor, value) \
+ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
MONITOR_VALUE(monitor) = (mon_type_t) (value); \
if (MONITOR_VALUE(monitor) > MONITOR_MAX_VALUE(monitor)) { \
@@ -665,6 +736,7 @@ is monotonically increasing, only max value needs to be updated */
/** Some values such as log sequence number are montomically increasing
number, do not need to record max/min values */
#define MONITOR_SET_SIMPLE(monitor, value) \
+ MONITOR_CHECK_DEFINED(value); \
if (MONITOR_IS_ON(monitor)) { \
MONITOR_VALUE(monitor) = (mon_type_t) (value); \
}
@@ -693,9 +765,11 @@ consolidate information from existing system status variables. */
/** Save the passed-in value to mon_start_value field of monitor
counters */
-#define MONITOR_SAVE_START(monitor, value) \
+#define MONITOR_SAVE_START(monitor, value) do { \
+ MONITOR_CHECK_DEFINED(value); \
(MONITOR_START_VALUE(monitor) = \
- (mon_type_t) (value) - MONITOR_VALUE_RESET(monitor))
+ (mon_type_t) (value) - MONITOR_VALUE_RESET(monitor)); \
+ } while (0)
/** Save the passed-in value to mon_last_value field of monitor
counters */
diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h
index 65257baa4bb..1e98cf690d8 100644
--- a/storage/innobase/include/srv0srv.h
+++ b/storage/innobase/include/srv0srv.h
@@ -50,22 +50,91 @@ Created 10/10/1995 Heikki Tuuri
#include "trx0types.h"
#include "srv0conc.h"
#include "buf0checksum.h"
+#include "ut0counter.h"
+
+/* Global counters used inside InnoDB. */
+struct srv_stats_t {
+ typedef ib_counter_t<lsn_t, 1, single_indexer_t> lsn_ctr_1_t;
+ typedef ib_counter_t<ulint, 1, single_indexer_t> ulint_ctr_1_t;
+ typedef ib_counter_t<lint, 1, single_indexer_t> lint_ctr_1_t;
+ typedef ib_counter_t<ulint, 64> ulint_ctr_64_t;
+ typedef ib_counter_t<ib_int64_t, 1, single_indexer_t> ib_int64_ctr_1_t;
+
+ /** Count the amount of data written in total (in bytes) */
+ ulint_ctr_1_t data_written;
+
+ /** Number of the log write requests done */
+ ulint_ctr_1_t log_write_requests;
+
+ /** Number of physical writes to the log performed */
+ ulint_ctr_1_t log_writes;
+
+ /** Amount of data written to the log files in bytes */
+ lsn_ctr_1_t os_log_written;
+
+ /** Number of writes being done to the log files */
+ lint_ctr_1_t os_log_pending_writes;
+
+ /** We increase this counter, when we don't have enough
+ space in the log buffer and have to flush it */
+ ulint_ctr_1_t log_waits;
+
+ /** Count the number of times the doublewrite buffer was flushed */
+ ulint_ctr_1_t dblwr_writes;
+
+ /** Store the number of pages that have been flushed to the
+ doublewrite buffer */
+ ulint_ctr_1_t dblwr_pages_written;
+
+ /** Store the number of write requests issued */
+ ulint_ctr_1_t buf_pool_write_requests;
+
+ /** Store the number of times when we had to wait for a free page
+ in the buffer pool. It happens when the buffer pool is full and we
+ need to make a flush, in order to be able to read or create a page. */
+ ulint_ctr_1_t buf_pool_wait_free;
+
+ /** Count the number of pages that were written from buffer
+ pool to the disk */
+ ulint_ctr_1_t buf_pool_flushed;
+
+ /** Number of buffer pool reads that led to the reading of
+ a disk page */
+ ulint_ctr_1_t buf_pool_reads;
+
+ /** Number of data read in total (in bytes) */
+ ulint_ctr_1_t data_read;
+
+ /** Wait time of database locks */
+ ib_int64_ctr_1_t n_lock_wait_time;
+
+ /** Number of database lock waits */
+ ulint_ctr_1_t n_lock_wait_count;
+
+ /** Number of threads currently waiting on database locks */
+ lint_ctr_1_t n_lock_wait_current_count;
+
+ /** Number of rows read. */
+ ulint_ctr_64_t n_rows_read;
+
+ /** Number of rows updated */
+ ulint_ctr_64_t n_rows_updated;
+
+ /** Number of rows deleted */
+ ulint_ctr_64_t n_rows_deleted;
+
+ /** Number of rows inserted */
+ ulint_ctr_64_t n_rows_inserted;
+};
extern const char* srv_main_thread_op_info;
/** Prefix used by MySQL to indicate pre-5.1 table name encoding */
extern const char srv_mysql50_table_name_prefix[10];
-/* When this event is set the lock timeout and InnoDB monitor
-thread starts running */
-extern os_event_t srv_lock_timeout_thread_event;
-
/* The monitor thread waits on this event. */
extern os_event_t srv_monitor_event;
-/* The lock timeout thread waits on this event. */
-extern os_event_t srv_timeout_event;
-
/* The error monitor thread waits on this event. */
extern os_event_t srv_error_event;
@@ -89,20 +158,20 @@ at a time */
#define SRV_AUTO_EXTEND_INCREMENT \
(srv_auto_extend_increment * ((1024 * 1024) / UNIV_PAGE_SIZE))
-/* Mutex for locking srv_monitor_file */
-extern mutex_t srv_monitor_file_mutex;
+/* Mutex for locking srv_monitor_file. Not created if srv_read_only_mode */
+extern ib_mutex_t srv_monitor_file_mutex;
/* Temporary file for innodb monitor output */
extern FILE* srv_monitor_file;
-/* Mutex for locking srv_dict_tmpfile.
+/* Mutex for locking srv_dict_tmpfile. Only created if !srv_read_only_mode.
This mutex has a very high rank; threads reserving it should not
be holding any InnoDB latches. */
-extern mutex_t srv_dict_tmpfile_mutex;
+extern ib_mutex_t srv_dict_tmpfile_mutex;
/* Temporary file for output from the data dictionary */
extern FILE* srv_dict_tmpfile;
-/* Mutex for locking srv_misc_tmpfile.
+/* Mutex for locking srv_misc_tmpfile. Only created if !srv_read_only_mode.
This mutex has a very low rank; threads reserving it should not
acquire any further latches or sleep before releasing this one. */
-extern mutex_t srv_misc_tmpfile_mutex;
+extern ib_mutex_t srv_misc_tmpfile_mutex;
/* Temporary file for miscellanous diagnostic output */
extern FILE* srv_misc_tmpfile;
@@ -114,6 +183,10 @@ extern char* srv_data_home;
extern char* srv_arch_dir;
#endif /* UNIV_LOG_ARCHIVE */
+/** Set if InnoDB must operate in read-only mode. We don't do any
+recovery and open all tables in RO mode instead of RW mode. We don't
+sync the max trx id to disk either. */
+extern my_bool srv_read_only_mode;
/** store to its own file each table created by an user; data
dictionary tables are in the system tablespace 0 */
extern my_bool srv_file_per_table;
@@ -134,8 +207,10 @@ extern ulint srv_max_file_format_at_startup;
on duplicate key checking and foreign key checking */
extern ibool srv_locks_unsafe_for_binlog;
-/* Variable specifying the FTS parallel sort buffer size */
+/** Sort buffer size in index creation */
extern ulong srv_sort_buf_size;
+/** Maximum modification log file size for online index creation */
+extern unsigned long long srv_online_max_size;
/* If this flag is TRUE, then we will use the native aio of the
OS (provided we compiled Innobase with it in), otherwise we will
@@ -153,6 +228,9 @@ extern char* srv_undo_dir;
/** Number of undo tablespaces to use. */
extern ulong srv_undo_tablespaces;
+/** The number of UNDO tablespaces that are open and ready to use. */
+extern ulint srv_undo_tablespaces_open;
+
/* The number of undo segments to use */
extern ulong srv_undo_logs;
@@ -163,17 +241,20 @@ extern ulint* srv_data_file_is_raw_partition;
extern ibool srv_auto_extend_last_data_file;
extern ulint srv_last_file_size_max;
-extern char** srv_log_group_home_dirs;
+extern char* srv_log_group_home_dir;
#ifndef UNIV_HOTBACKUP
extern ulong srv_auto_extend_increment;
extern ibool srv_created_new_raw;
-extern ulint srv_n_log_groups;
-extern ulint srv_n_log_files;
+/** Maximum number of srv_n_log_files, or innodb_log_files_in_group */
+#define SRV_N_LOG_FILES_MAX 100
+extern ulong srv_n_log_files;
extern ib_uint64_t srv_log_file_size;
+extern ib_uint64_t srv_log_file_size_requested;
extern ulint srv_log_buffer_size;
extern ulong srv_flush_log_at_trx_commit;
+extern uint srv_flush_log_at_timeout;
extern char srv_adaptive_flushing;
/* If this flag is TRUE, then we will load the indexes' (and tables') metadata
@@ -195,7 +276,7 @@ extern ulong srv_n_page_hash_locks; /*!< number of locks to
protect buf_pool->page_hash */
extern ulong srv_LRU_scan_depth; /*!< Scan depth for LRU
flush batch */
-extern my_bool srv_flush_neighbors; /*!< whether or not to flush
+extern ulong srv_flush_neighbors; /*!< whether or not to flush
neighbors of a block */
extern ulint srv_buf_pool_old_size; /*!< previously requested size */
extern ulint srv_buf_pool_curr_size; /*!< current size in bytes */
@@ -210,6 +291,12 @@ extern ulint srv_n_write_io_threads;
/* Number of IO operations per second the server can do */
extern ulong srv_io_capacity;
+
+/* We use this dummy default value at startup for max_io_capacity.
+The real value is set based on the value of io_capacity. */
+#define SRV_MAX_IO_CAPACITY_DUMMY_DEFAULT (~0UL)
+#define SRV_MAX_IO_CAPACITY_LIMIT (~0UL)
+extern ulong srv_max_io_capacity;
/* Returns the number of IO operations that is X percent of the
capacity. PCT_IO(5) -> returns the number of IO operations that
is 5% of the max where max is srv_io_capacity. */
@@ -232,9 +319,16 @@ extern ulint srv_win_file_flush_method;
extern ulint srv_max_n_open_files;
-extern ulint srv_max_dirty_pages_pct;
+extern ulong srv_max_dirty_pages_pct;
+extern ulong srv_max_dirty_pages_pct_lwm;
-extern ulint srv_force_recovery;
+extern ulong srv_adaptive_flushing_lwm;
+extern ulong srv_flushing_avg_loops;
+
+extern ulong srv_force_recovery;
+#ifndef DBUG_OFF
+extern ulong srv_force_recovery_crash;
+#endif /* !DBUG_OFF */
extern ulint srv_fast_shutdown; /*!< If this is 1, do not do a
purge and index buffer merge.
@@ -246,7 +340,9 @@ extern ulint srv_fast_shutdown; /*!< If this is 1, do not do a
extern ibool srv_innodb_status;
extern unsigned long long srv_stats_transient_sample_pages;
+extern my_bool srv_stats_persistent;
extern unsigned long long srv_stats_persistent_sample_pages;
+extern my_bool srv_stats_auto_recalc;
extern ibool srv_use_doublewrite_buf;
extern ulong srv_doublewrite_batch_size;
@@ -264,11 +360,6 @@ extern ulong srv_max_purge_lag_delay;
extern ulong srv_replication_delay;
/*-------------------------------------------*/
-extern ulint srv_n_rows_inserted;
-extern ulint srv_n_rows_updated;
-extern ulint srv_n_rows_deleted;
-extern ulint srv_n_rows_read;
-
extern ibool srv_print_innodb_monitor;
extern ibool srv_print_innodb_lock_monitor;
extern ibool srv_print_innodb_tablespace_monitor;
@@ -279,21 +370,21 @@ extern ibool srv_print_verbose_log;
"tables instead, see " REFMAN "innodb-i_s-tables.html"
extern ibool srv_print_innodb_table_monitor;
-extern ibool srv_lock_timeout_active;
extern ibool srv_monitor_active;
extern ibool srv_error_monitor_active;
/* TRUE during the lifetime of the buffer pool dump/load thread */
extern ibool srv_buf_dump_thread_active;
+/* TRUE during the lifetime of the stats thread */
+extern ibool srv_dict_stats_thread_active;
+
extern ulong srv_n_spin_wait_rounds;
extern ulong srv_n_free_tickets_to_enter;
extern ulong srv_thread_sleep_delay;
extern ulong srv_spin_wait_delay;
extern ibool srv_priority_boost;
-extern ulint srv_n_lock_wait_count;
-
extern ulint srv_truncated_status_writes;
extern ulint srv_available_undo_logs;
@@ -314,16 +405,21 @@ extern ibool srv_print_latch_waits;
# define srv_print_latch_waits FALSE
#endif /* UNIV_DEBUG */
-extern ulint srv_fatal_semaphore_wait_threshold;
-extern ulint srv_dml_needed_delay;
+#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
+extern my_bool srv_ibuf_disable_background_merge;
+#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
#ifdef UNIV_DEBUG
extern my_bool srv_purge_view_update_only_debug;
#endif /* UNIV_DEBUG */
+extern ulint srv_fatal_semaphore_wait_threshold;
+#define SRV_SEMAPHORE_WAIT_EXTENSION 7200
+extern ulint srv_dml_needed_delay;
+
#ifndef HAVE_ATOMIC_BUILTINS
/** Mutex protecting some server global variables. */
-extern mutex_t server_mutex;
+extern ib_mutex_t server_mutex;
#endif /* !HAVE_ATOMIC_BUILTINS */
#define SRV_MAX_N_IO_THREADS 130
@@ -333,22 +429,6 @@ i/o handler thread */
extern const char* srv_io_thread_op_info[];
extern const char* srv_io_thread_function[];
-/* the number of the log write requests done */
-extern ulint srv_log_write_requests;
-
-/* the number of physical writes to the log performed */
-extern ulint srv_log_writes;
-
-/* amount of data written to the log files in bytes */
-extern lsn_t srv_os_log_written;
-
-/* amount of writes being done to the log files */
-extern ulint srv_os_log_pending_writes;
-
-/* we increase this counter, when there we don't have enough space in the
-log buffer and have to flush it */
-extern ulint srv_log_waits;
-
/* the number of purge threads to use from the worker pool (currently 0 or 1) */
extern ulong srv_n_purge_threads;
@@ -358,50 +438,16 @@ extern ulong srv_purge_batch_size;
/* the number of sync wait arrays */
extern ulong srv_sync_array_size;
-/* variable that counts amount of data read in total (in bytes) */
-extern ulint srv_data_read;
-
-/* here we count the amount of data written in total (in bytes) */
-extern ulint srv_data_written;
-
-/* this variable counts the amount of times, when the doublewrite buffer
-was flushed */
-extern ulint srv_dblwr_writes;
-
-/* here we store the number of pages that have been flushed to the
-doublewrite buffer */
-extern ulint srv_dblwr_pages_written;
-
-/* in this variable we store the number of write requests issued */
-extern ulint srv_buf_pool_write_requests;
-
-/* here we store the number of times when we had to wait for a free page
-in the buffer pool. It happens when the buffer pool is full and we need
-to make a flush, in order to be able to read or create a page. */
-extern ulint srv_buf_pool_wait_free;
-
-/* variable to count the number of pages that were written from the
-buffer pool to disk */
-extern ulint srv_buf_pool_flushed;
-
-/** Number of buffer pool reads that led to the
-reading of a disk page */
-extern ulint srv_buf_pool_reads;
-
-/** print all user-level transactions deadlocks to mysqld stderr */
+/* print all user-level transactions deadlocks to mysqld stderr */
extern my_bool srv_print_all_deadlocks;
-/** Status variables to be passed to MySQL */
-typedef struct export_var_struct export_struc;
-
-/** Thread slot in the thread table */
-typedef struct srv_slot_struct srv_slot_t;
-
-/** Thread table is an array of slots */
-typedef srv_slot_t srv_table_t;
+extern my_bool srv_cmp_per_index_enabled;
/** Status variables to be passed to MySQL */
-extern export_struc export_vars;
+extern struct export_var_t export_vars;
+
+/** Global counters */
+extern srv_stats_t srv_stats;
# ifdef UNIV_PFS_THREAD
/* Keys to register InnoDB threads with performance schema */
@@ -413,19 +459,20 @@ extern mysql_pfs_key_t srv_error_monitor_thread_key;
extern mysql_pfs_key_t srv_monitor_thread_key;
extern mysql_pfs_key_t srv_master_thread_key;
extern mysql_pfs_key_t srv_purge_thread_key;
+extern mysql_pfs_key_t recv_writer_thread_key;
/* This macro register the current thread and its key with performance
schema */
# define pfs_register_thread(key) \
do { \
- struct PSI_thread* psi = PSI_CALL(new_thread)(key, NULL, 0);\
- PSI_CALL(set_thread)(psi); \
+ struct PSI_thread* psi = PSI_THREAD_CALL(new_thread)(key, NULL, 0);\
+ PSI_THREAD_CALL(set_thread)(psi); \
} while (0)
/* This macro delist the current thread from performance schema */
# define pfs_delete_thread() \
do { \
- PSI_CALL(delete_current_thread)(); \
+ PSI_THREAD_CALL(delete_current_thread)(); \
} while (0)
# endif /* UNIV_PFS_THREAD */
@@ -448,8 +495,19 @@ enum {
when writing data files, but do flush
after writing to log files */
SRV_UNIX_NOSYNC, /*!< do not flush after writing */
- SRV_UNIX_O_DIRECT /*!< invoke os_file_set_nocache() on
- data files */
+ SRV_UNIX_O_DIRECT, /*!< invoke os_file_set_nocache() on
+ data files. This implies using
+ non-buffered IO but still using fsync,
+ the reason for which is that some FS
+ do not flush meta-data when
+ unbuffered IO happens */
+ SRV_UNIX_O_DIRECT_NO_FSYNC
+ /*!< do not use fsync() when using
+ direct IO i.e.: it can be set to avoid
+ the fsync() call that we make when
+ using SRV_UNIX_O_DIRECT. However, in
+ this case user/DBA should be sure about
+ the integrity of the meta-data */
};
/** Alternatives for file i/o in Windows */
@@ -508,10 +566,9 @@ enum srv_thread_type {
};
/*********************************************************************//**
-Boots Innobase server.
-@return DB_SUCCESS or error code */
+Boots Innobase server. */
UNIV_INTERN
-ulint
+void
srv_boot(void);
/*==========*/
/*********************************************************************//**
@@ -542,6 +599,12 @@ srv_set_io_thread_op_info(
ulint i, /*!< in: the 'segment' of the i/o thread */
const char* str); /*!< in: constant char string describing the
state */
+/*********************************************************************//**
+Resets the info describing an i/o thread current state. */
+UNIV_INTERN
+void
+srv_reset_io_thread_op_info();
+/*=========================*/
/*******************************************************************//**
Tells the purge thread that there has been activity in the database
and wakes up the purge thread if it is suspended (not sleeping). Note
@@ -723,7 +786,7 @@ srv_purge_wakeup(void);
/*==================*/
/** Status variables to be passed to MySQL */
-struct export_var_struct{
+struct export_var_t{
ulint innodb_data_pending_reads; /*!< Pending reads */
ulint innodb_data_pending_writes; /*!< Pending writes */
ulint innodb_data_pending_fsyncs; /*!< Pending fsyncs */
@@ -783,14 +846,14 @@ struct export_var_struct{
ulint innodb_truncated_status_writes; /*!< srv_truncated_status_writes */
ulint innodb_available_undo_logs; /*!< srv_available_undo_logs */
#ifdef UNIV_DEBUG
- ulint innodb_purge_trx_id_age; /*!< max_trx_id - purged trx_id */
+ ulint innodb_purge_trx_id_age; /*!< rw_max_trx_id - purged trx_id */
ulint innodb_purge_view_trx_id_age; /*!< rw_max_trx_id
- purged view's min trx_id */
#endif /* UNIV_DEBUG */
};
/** Thread slot in the thread table. */
-struct srv_slot_struct{
+struct srv_slot_t{
srv_thread_type type; /*!< thread type: user,
utility etc. */
ibool in_use; /*!< TRUE if this slot
@@ -819,6 +882,7 @@ struct srv_slot_struct{
# define srv_use_native_aio FALSE
# define srv_force_recovery 0UL
# define srv_set_io_thread_op_info(t,info) ((void) 0)
+# define srv_reset_io_thread_op_info() ((void) 0)
# define srv_is_being_started 0
# define srv_win_file_flush_method SRV_WIN_IO_UNBUFFERED
# define srv_unix_file_flush_method SRV_UNIX_O_DSYNC
diff --git a/storage/innobase/include/srv0start.h b/storage/innobase/include/srv0start.h
index 9d948675011..e136f30f96a 100644
--- a/storage/innobase/include/srv0start.h
+++ b/storage/innobase/include/srv0start.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -83,24 +83,50 @@ Starts Innobase and creates a new database if database files
are not found and the user wants.
@return DB_SUCCESS or error code */
UNIV_INTERN
-int
+dberr_t
innobase_start_or_create_for_mysql(void);
/*====================================*/
/****************************************************************//**
Shuts down the Innobase database.
@return DB_SUCCESS or error code */
UNIV_INTERN
-int
+dberr_t
innobase_shutdown_for_mysql(void);
/********************************************************************
Signal all per-table background threads to shutdown, and wait for them to do
so. */
-
+UNIV_INTERN
void
srv_shutdown_table_bg_threads(void);
-
/*=============================*/
+
+/*************************************************************//**
+Copy the file path component of the physical file to parameter. It will
+copy up to and including the terminating path separator.
+@return number of bytes copied or ULINT_UNDEFINED if destination buffer
+ is smaller than the path to be copied. */
+UNIV_INTERN
+ulint
+srv_path_copy(
+/*==========*/
+ char* dest, /*!< out: destination buffer */
+ ulint dest_len, /*!< in: max bytes to copy */
+ const char* basedir, /*!< in: base directory */
+ const char* table_name) /*!< in: source table name */
+ __attribute__((nonnull, warn_unused_result));
+
+/*****************************************************************//**
+Get the meta-data filename from the table name. */
+UNIV_INTERN
+void
+srv_get_meta_data_filename(
+/*======================*/
+ dict_table_t* table, /*!< in: table */
+ char* filename, /*!< out: filename */
+ ulint max_len) /*!< in: filename max length */
+ __attribute__((nonnull));
+
/** Log sequence number at shutdown */
extern lsn_t srv_shutdown_lsn;
/** Log sequence number immediately after startup */
diff --git a/storage/innobase/include/sync0arr.h b/storage/innobase/include/sync0arr.h
index 56f9ff78c49..bb4d1037a62 100644
--- a/storage/innobase/include/sync0arr.h
+++ b/storage/innobase/include/sync0arr.h
@@ -32,9 +32,9 @@ Created 9/5/1995 Heikki Tuuri
#include "os0thread.h"
/** Synchronization wait array cell */
-typedef struct sync_cell_struct sync_cell_t;
+struct sync_cell_t;
/** Synchronization wait array */
-typedef struct sync_array_struct sync_array_t;
+struct sync_array_t;
/******************************************************************//**
Reserves a wait array cell for waiting for an object.
diff --git a/storage/innobase/include/sync0rw.h b/storage/innobase/include/sync0rw.h
index b0c21d0c76b..c268098d1ea 100644
--- a/storage/innobase/include/sync0rw.h
+++ b/storage/innobase/include/sync0rw.h
@@ -36,6 +36,7 @@ Created 9/11/1995 Heikki Tuuri
#include "univ.i"
#ifndef UNIV_HOTBACKUP
#include "ut0lst.h"
+#include "ut0counter.h"
#include "sync0sync.h"
#include "os0sync.h"
@@ -44,6 +45,43 @@ in MySQL: */
#undef rw_lock_t
#endif /* !UNIV_HOTBACKUP */
+/** Counters for RW locks. */
+struct rw_lock_stats_t {
+ typedef ib_counter_t<ib_int64_t, IB_N_SLOTS> ib_int64_counter_t;
+
+ /** number of spin waits on rw-latches,
+ resulted during shared (read) locks */
+ ib_int64_counter_t rw_s_spin_wait_count;
+
+ /** number of spin loop rounds on rw-latches,
+ resulted during shared (read) locks */
+ ib_int64_counter_t rw_s_spin_round_count;
+
+ /** number of OS waits on rw-latches,
+ resulted during shared (read) locks */
+ ib_int64_counter_t rw_s_os_wait_count;
+
+ /** number of unlocks (that unlock shared locks),
+ set only when UNIV_SYNC_PERF_STAT is defined */
+ ib_int64_counter_t rw_s_exit_count;
+
+ /** number of spin waits on rw-latches,
+ resulted during exclusive (write) locks */
+ ib_int64_counter_t rw_x_spin_wait_count;
+
+ /** number of spin loop rounds on rw-latches,
+ resulted during exclusive (write) locks */
+ ib_int64_counter_t rw_x_spin_round_count;
+
+ /** number of OS waits on rw-latches,
+ resulted during exclusive (write) locks */
+ ib_int64_counter_t rw_x_os_wait_count;
+
+ /** number of unlocks (that unlock exclusive locks),
+ set only when UNIV_SYNC_PERF_STAT is defined */
+ ib_int64_counter_t rw_x_exit_count;
+};
+
/* Latch types; these are used also in btr0btr.h: keep the numerical values
smaller than 30 and the order of the numerical values like below! */
#define RW_S_LATCH 1
@@ -57,22 +95,22 @@ of concurrent read locks before the rw_lock breaks. The current value of
0x00100000 allows 1,048,575 concurrent readers and 2047 recursive writers.*/
#define X_LOCK_DECR 0x00100000
-typedef struct rw_lock_struct rw_lock_t;
+struct rw_lock_t;
#ifdef UNIV_SYNC_DEBUG
-typedef struct rw_lock_debug_struct rw_lock_debug_t;
+struct rw_lock_debug_t;
#endif /* UNIV_SYNC_DEBUG */
typedef UT_LIST_BASE_NODE_T(rw_lock_t) rw_lock_list_t;
extern rw_lock_list_t rw_lock_list;
-extern mutex_t rw_lock_list_mutex;
+extern ib_mutex_t rw_lock_list_mutex;
#ifdef UNIV_SYNC_DEBUG
/* The global mutex which protects debug info lists of all rw-locks.
To modify the debug info list of an rw-lock, this mutex has to be
acquired in addition to the mutex protecting the lock. */
-extern mutex_t rw_lock_debug_mutex;
+extern ib_mutex_t rw_lock_debug_mutex;
extern os_event_t rw_lock_debug_event; /*!< If deadlock detection does
not get immediately the mutex it
may wait for this event */
@@ -80,30 +118,8 @@ extern ibool rw_lock_debug_waiters; /*!< This is set to TRUE, if
there may be waiters for the event */
#endif /* UNIV_SYNC_DEBUG */
-/** number of spin waits on rw-latches,
-resulted during exclusive (write) locks */
-extern ib_int64_t rw_s_spin_wait_count;
-/** number of spin loop rounds on rw-latches,
-resulted during exclusive (write) locks */
-extern ib_int64_t rw_s_spin_round_count;
-/** number of unlocks (that unlock shared locks),
-set only when UNIV_SYNC_PERF_STAT is defined */
-extern ib_int64_t rw_s_exit_count;
-/** number of OS waits on rw-latches,
-resulted during shared (read) locks */
-extern ib_int64_t rw_s_os_wait_count;
-/** number of spin waits on rw-latches,
-resulted during shared (read) locks */
-extern ib_int64_t rw_x_spin_wait_count;
-/** number of spin loop rounds on rw-latches,
-resulted during shared (read) locks */
-extern ib_int64_t rw_x_spin_round_count;
-/** number of OS waits on rw-latches,
-resulted during exclusive (write) locks */
-extern ib_int64_t rw_x_os_wait_count;
-/** number of unlocks (that unlock exclusive locks),
-set only when UNIV_SYNC_PERF_STAT is defined */
-extern ib_int64_t rw_x_exit_count;
+/** Counters for RW locks. */
+extern rw_lock_stats_t rw_lock_stats;
#ifdef UNIV_PFS_RWLOCK
/* Following are rwlock keys used to register with MySQL
@@ -121,10 +137,10 @@ extern mysql_pfs_key_t checkpoint_lock_key;
extern mysql_pfs_key_t fil_space_latch_key;
extern mysql_pfs_key_t fts_cache_rw_lock_key;
extern mysql_pfs_key_t fts_cache_init_rw_lock_key;
-extern mysql_pfs_key_t index_tree_rw_lock_key;
extern mysql_pfs_key_t trx_i_s_cache_lock_key;
extern mysql_pfs_key_t trx_purge_latch_key;
extern mysql_pfs_key_t index_tree_rw_lock_key;
+extern mysql_pfs_key_t index_online_log_key;
extern mysql_pfs_key_t dict_table_stats_latch_key;
extern mysql_pfs_key_t trx_sys_rw_lock_key;
extern mysql_pfs_key_t hash_table_rw_lock_key;
@@ -159,6 +175,9 @@ unlocking, not the corresponding function. */
# define rw_lock_s_lock(M) \
rw_lock_s_lock_func((M), 0, __FILE__, __LINE__)
+# define rw_lock_s_lock_inline(M, P, F, L) \
+ rw_lock_s_lock_func((M), (P), (F), (L))
+
# define rw_lock_s_lock_gen(M, P) \
rw_lock_s_lock_func((M), (P), __FILE__, __LINE__)
@@ -175,12 +194,18 @@ unlocking, not the corresponding function. */
# define rw_lock_x_lock(M) \
rw_lock_x_lock_func((M), 0, __FILE__, __LINE__)
+# define rw_lock_x_lock_inline(M, P, F, L) \
+ rw_lock_x_lock_func((M), (P), (F), (L))
+
# define rw_lock_x_lock_gen(M, P) \
rw_lock_x_lock_func((M), (P), __FILE__, __LINE__)
# define rw_lock_x_lock_nowait(M) \
rw_lock_x_lock_func_nowait((M), __FILE__, __LINE__)
+# define rw_lock_x_lock_func_nowait_inline(M, F, L) \
+ rw_lock_x_lock_func_nowait((M), (F), (L))
+
# ifdef UNIV_SYNC_DEBUG
# define rw_lock_x_unlock_gen(L, P) rw_lock_x_unlock_func(P, L)
# else
@@ -212,6 +237,9 @@ unlocking, not the corresponding function. */
# define rw_lock_s_lock(M) \
pfs_rw_lock_s_lock_func((M), 0, __FILE__, __LINE__)
+# define rw_lock_s_lock_inline(M, P, F, L) \
+ pfs_rw_lock_s_lock_func((M), (P), (F), (L))
+
# define rw_lock_s_lock_gen(M, P) \
pfs_rw_lock_s_lock_func((M), (P), __FILE__, __LINE__)
@@ -227,12 +255,18 @@ unlocking, not the corresponding function. */
# define rw_lock_x_lock(M) \
pfs_rw_lock_x_lock_func((M), 0, __FILE__, __LINE__)
+# define rw_lock_x_lock_inline(M, P, F, L) \
+ pfs_rw_lock_x_lock_func((M), (P), (F), (L))
+
# define rw_lock_x_lock_gen(M, P) \
pfs_rw_lock_x_lock_func((M), (P), __FILE__, __LINE__)
# define rw_lock_x_lock_nowait(M) \
pfs_rw_lock_x_lock_func_nowait((M), __FILE__, __LINE__)
+# define rw_lock_x_lock_func_nowait_inline(M, F, L) \
+ pfs_rw_lock_x_lock_func_nowait((M), (F), (L))
+
# ifdef UNIV_SYNC_DEBUG
# define rw_lock_x_unlock_gen(L, P) pfs_rw_lock_x_unlock_func(P, L)
# else
@@ -367,30 +401,6 @@ rw_lock_x_unlock_func(
been passed to another thread to unlock */
#endif
rw_lock_t* lock); /*!< in/out: rw-lock */
-
-
-/******************************************************************//**
-Low-level function which locks an rw-lock in s-mode when we know that it
-is possible and none else is currently accessing the rw-lock structure.
-Then we can do the locking without reserving the mutex. */
-UNIV_INLINE
-void
-rw_lock_s_lock_direct(
-/*==================*/
- rw_lock_t* lock, /*!< in/out: rw-lock */
- const char* file_name, /*!< in: file name where requested */
- ulint line); /*!< in: line where lock requested */
-/******************************************************************//**
-Low-level function which locks an rw-lock in x-mode when we know that it
-is not locked and none else is currently accessing the rw-lock structure.
-Then we can do the locking without reserving the mutex. */
-UNIV_INLINE
-void
-rw_lock_x_lock_direct(
-/*==================*/
- rw_lock_t* lock, /*!< in/out: rw-lock */
- const char* file_name, /*!< in: file name where requested */
- ulint line); /*!< in: line where lock requested */
/******************************************************************//**
This function is used in the insert buffer to move the ownership of an
x-latch on a buffer frame to the current thread. The x-latch was set by
@@ -558,7 +568,7 @@ shared locks are allowed. To prevent starving of a writer blocked by
readers, a writer may queue for x-lock by decrementing lock_word: no
new readers will be let in while the thread waits for readers to
exit. */
-struct rw_lock_struct {
+struct rw_lock_t {
volatile lint lock_word;
/*!< Holds the state of the lock. */
volatile ulint waiters;/*!< 1: there are waiters */
@@ -583,7 +593,7 @@ struct rw_lock_struct {
/*!< Event for next-writer to wait on. A thread
must decrement lock_word before waiting. */
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
- mutex_t mutex; /*!< The mutex protecting rw_lock_struct */
+ ib_mutex_t mutex; /*!< The mutex protecting rw_lock_t */
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
UT_LIST_NODE_T(rw_lock_t) list;
@@ -615,7 +625,7 @@ struct rw_lock_struct {
unsigned last_x_line:14; /*!< Line number where last time x-locked */
#ifdef UNIV_DEBUG
ulint magic_n; /*!< RW_LOCK_MAGIC_N */
-/** Value of rw_lock_struct::magic_n */
+/** Value of rw_lock_t::magic_n */
#define RW_LOCK_MAGIC_N 22643
#endif /* UNIV_DEBUG */
};
@@ -623,7 +633,7 @@ struct rw_lock_struct {
#ifdef UNIV_SYNC_DEBUG
/** The structure for storing debug info of an rw-lock. All access to this
structure must be protected by rw_lock_debug_mutex_enter(). */
-struct rw_lock_debug_struct {
+struct rw_lock_debug_t {
os_thread_id_t thread_id; /*!< The thread id of the thread which
locked the rw-lock */
diff --git a/storage/innobase/include/sync0rw.ic b/storage/innobase/include/sync0rw.ic
index eab89e2619e..8786ad84643 100644
--- a/storage/innobase/include/sync0rw.ic
+++ b/storage/innobase/include/sync0rw.ic
@@ -90,7 +90,7 @@ rw_lock_set_waiter_flag(
rw_lock_t* lock) /*!< in/out: rw-lock */
{
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
- os_compare_and_swap_ulint(&lock->waiters, 0, 1);
+ (void) os_compare_and_swap_ulint(&lock->waiters, 0, 1);
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
lock->waiters = 1;
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
@@ -107,7 +107,7 @@ rw_lock_reset_waiter_flag(
rw_lock_t* lock) /*!< in/out: rw-lock */
{
#ifdef INNODB_RW_LOCKS_USE_ATOMICS
- os_compare_and_swap_ulint(&lock->waiters, 1, 0);
+ (void) os_compare_and_swap_ulint(&lock->waiters, 1, 0);
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
lock->waiters = 0;
#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
@@ -128,7 +128,7 @@ rw_lock_get_writer(
/* return NOT_LOCKED in s-lock state, like the writer
member of the old lock implementation. */
return(RW_LOCK_NOT_LOCKED);
- } else if (((-lock_word) % X_LOCK_DECR) == 0) {
+ } else if ((lock_word == 0) || (lock_word <= -X_LOCK_DECR)) {
return(RW_LOCK_EX);
} else {
ut_ad(lock_word > -X_LOCK_DECR);
@@ -158,7 +158,7 @@ rw_lock_get_reader_count(
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
UNIV_INLINE
-mutex_t*
+ib_mutex_t*
rw_lock_get_mutex(
/*==============*/
rw_lock_t* lock)
@@ -178,11 +178,10 @@ rw_lock_get_x_lock_count(
const rw_lock_t* lock) /*!< in: rw-lock */
{
lint lock_copy = lock->lock_word;
- /* If there is a reader, lock_word is not divisible by X_LOCK_DECR */
- if (lock_copy > 0 || (-lock_copy) % X_LOCK_DECR != 0) {
+ if ((lock_copy != 0) && (lock_copy > -X_LOCK_DECR)) {
return(0);
}
- return(((-lock_copy) / X_LOCK_DECR) + 1);
+ return((lock_copy == 0) ? 1 : (2 - (lock_copy + X_LOCK_DECR)));
}
/******************************************************************//**
@@ -325,58 +324,6 @@ rw_lock_s_lock_low(
}
/******************************************************************//**
-Low-level function which locks an rw-lock in s-mode when we know that it
-is possible and none else is currently accessing the rw-lock structure.
-Then we can do the locking without reserving the mutex. */
-UNIV_INLINE
-void
-rw_lock_s_lock_direct(
-/*==================*/
- rw_lock_t* lock, /*!< in/out: rw-lock */
- const char* file_name, /*!< in: file name where requested */
- ulint line) /*!< in: line where lock requested */
-{
- ut_ad(lock->lock_word == X_LOCK_DECR);
-
- /* Indicate there is a new reader by decrementing lock_word */
- lock->lock_word--;
-
- lock->last_s_file_name = file_name;
- lock->last_s_line = line;
-
-#ifdef UNIV_SYNC_DEBUG
- rw_lock_add_debug_info(lock, 0, RW_LOCK_SHARED, file_name, line);
-#endif
-}
-
-/******************************************************************//**
-Low-level function which locks an rw-lock in x-mode when we know that it
-is not locked and none else is currently accessing the rw-lock structure.
-Then we can do the locking without reserving the mutex. */
-UNIV_INLINE
-void
-rw_lock_x_lock_direct(
-/*==================*/
- rw_lock_t* lock, /*!< in/out: rw-lock */
- const char* file_name, /*!< in: file name where requested */
- ulint line) /*!< in: line where lock requested */
-{
- ut_ad(rw_lock_validate(lock));
- ut_ad(lock->lock_word == X_LOCK_DECR);
-
- lock->lock_word -= X_LOCK_DECR;
- lock->writer_thread = os_thread_get_curr_id();
- lock->recursive = TRUE;
-
- lock->last_x_file_name = file_name;
- lock->last_x_line = line;
-
-#ifdef UNIV_SYNC_DEBUG
- rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
-#endif
-}
-
-/******************************************************************//**
NOTE! Use the corresponding macro, not directly this function! Lock an
rw-lock in shared mode for the current thread. If the rw-lock is locked
in exclusive mode, or there is an exclusive lock request waiting, the
@@ -458,10 +405,11 @@ rw_lock_x_lock_func_nowait(
/* Relock: this lock_word modification is safe since no other
threads can modify (lock, unlock, or reserve) lock_word while
there is an exclusive writer and this is the writer thread. */
- lock->lock_word -= X_LOCK_DECR;
-
- /* Recursive x-locks must be multiples of X_LOCK_DECR. */
- ut_ad(((-lock->lock_word) % X_LOCK_DECR) == 0);
+ if (lock->lock_word == 0) {
+ lock->lock_word = -X_LOCK_DECR;
+ } else {
+ lock->lock_word--;
+ }
/* Watch for too many recursive locks */
ut_ad(lock->lock_word < 0);
@@ -494,7 +442,9 @@ rw_lock_s_unlock_func(
#endif
rw_lock_t* lock) /*!< in/out: rw-lock */
{
- ut_ad((lock->lock_word % X_LOCK_DECR) != 0);
+ ut_ad(lock->lock_word > -X_LOCK_DECR);
+ ut_ad(lock->lock_word != 0);
+ ut_ad(lock->lock_word < X_LOCK_DECR);
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_SHARED);
@@ -530,7 +480,7 @@ rw_lock_x_unlock_func(
#endif
rw_lock_t* lock) /*!< in/out: rw-lock */
{
- ut_ad((lock->lock_word % X_LOCK_DECR) == 0);
+ ut_ad(lock->lock_word == 0 || lock->lock_word <= -X_LOCK_DECR);
/* lock->recursive flag also indicates if lock->writer_thread is
valid or stale. If we are the last of the recursive callers
@@ -541,15 +491,23 @@ rw_lock_x_unlock_func(
if (lock->lock_word == 0) {
/* Last caller in a possible recursive chain. */
lock->recursive = FALSE;
- UNIV_MEM_INVALID(&lock->writer_thread,
- sizeof lock->writer_thread);
}
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
#endif
- if (rw_lock_lock_word_incr(lock, X_LOCK_DECR) == X_LOCK_DECR) {
+ ulint x_lock_incr;
+ if (lock->lock_word == 0) {
+ x_lock_incr = X_LOCK_DECR;
+ } else if (lock->lock_word == -X_LOCK_DECR) {
+ x_lock_incr = X_LOCK_DECR;
+ } else {
+ ut_ad(lock->lock_word < -X_LOCK_DECR);
+ x_lock_incr = 1;
+ }
+
+ if (rw_lock_lock_word_incr(lock, x_lock_incr) == X_LOCK_DECR) {
/* Lock is now free. May have to signal read/write waiters.
We do not need to signal wait_ex waiters, since they cannot
exist when there is a writer. */
@@ -590,7 +548,7 @@ pfs_rw_lock_create_func(
ulint cline) /*!< in: file line where created */
{
/* Initialize the rwlock for performance schema */
- lock->pfs_psi = PSI_CALL(init_rwlock)(key, lock);
+ lock->pfs_psi = PSI_RWLOCK_CALL(init_rwlock)(key, lock);
/* The actual function to initialize an rwlock */
rw_lock_create_func(lock,
@@ -623,13 +581,13 @@ pfs_rw_lock_x_lock_func(
PSI_rwlock_locker_state state;
/* Record the entry of rw x lock request in performance schema */
- locker = PSI_CALL(start_rwlock_wrwait)(
+ locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
&state, lock->pfs_psi, PSI_RWLOCK_WRITELOCK, file_name, line);
rw_lock_x_lock_func(lock, pass, file_name, line);
if (locker != NULL)
- PSI_CALL(end_rwlock_wrwait)(locker, 0);
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
}
else
{
@@ -659,13 +617,13 @@ pfs_rw_lock_x_lock_func_nowait(
PSI_rwlock_locker_state state;
/* Record the entry of rw x lock request in performance schema */
- locker = PSI_CALL(start_rwlock_wrwait)(
+ locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
&state, lock->pfs_psi, PSI_RWLOCK_WRITELOCK, file_name, line);
ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
if (locker != NULL)
- PSI_CALL(end_rwlock_wrwait)(locker, ret);
+ PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, ret);
}
else
{
@@ -686,7 +644,7 @@ pfs_rw_lock_free_func(
{
if (lock->pfs_psi != NULL)
{
- PSI_CALL(destroy_rwlock)(lock->pfs_psi);
+ PSI_RWLOCK_CALL(destroy_rwlock)(lock->pfs_psi);
lock->pfs_psi = NULL;
}
@@ -714,13 +672,13 @@ pfs_rw_lock_s_lock_func(
PSI_rwlock_locker_state state;
/* Instrumented to inform we are aquiring a shared rwlock */
- locker = PSI_CALL(start_rwlock_rdwait)(
+ locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
&state, lock->pfs_psi, PSI_RWLOCK_READLOCK, file_name, line);
rw_lock_s_lock_func(lock, pass, file_name, line);
if (locker != NULL)
- PSI_CALL(end_rwlock_rdwait)(locker, 0);
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
}
else
{
@@ -753,13 +711,13 @@ pfs_rw_lock_s_lock_low(
PSI_rwlock_locker_state state;
/* Instrumented to inform we are aquiring a shared rwlock */
- locker = PSI_CALL(start_rwlock_rdwait)(
+ locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
&state, lock->pfs_psi, PSI_RWLOCK_READLOCK, file_name, line);
ret = rw_lock_s_lock_low(lock, pass, file_name, line);
if (locker != NULL)
- PSI_CALL(end_rwlock_rdwait)(locker, ret);
+ PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, ret);
}
else
{
@@ -786,7 +744,7 @@ pfs_rw_lock_x_unlock_func(
{
/* Inform performance schema we are unlocking the lock */
if (lock->pfs_psi != NULL)
- PSI_CALL(unlock_rwlock)(lock->pfs_psi);
+ PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
rw_lock_x_unlock_func(
#ifdef UNIV_SYNC_DEBUG
@@ -812,7 +770,7 @@ pfs_rw_lock_s_unlock_func(
{
/* Inform performance schema we are unlocking the lock */
if (lock->pfs_psi != NULL)
- PSI_CALL(unlock_rwlock)(lock->pfs_psi);
+ PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
rw_lock_s_unlock_func(
#ifdef UNIV_SYNC_DEBUG
diff --git a/storage/innobase/include/sync0sync.h b/storage/innobase/include/sync0sync.h
index 1adcf938903..9950a6fbf6b 100644
--- a/storage/innobase/include/sync0sync.h
+++ b/storage/innobase/include/sync0sync.h
@@ -1,7 +1,8 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
+Copyright (c) 2012, Facebook Inc.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -95,6 +96,7 @@ extern mysql_pfs_key_t mem_pool_mutex_key;
extern mysql_pfs_key_t mutex_list_mutex_key;
extern mysql_pfs_key_t purge_sys_bh_mutex_key;
extern mysql_pfs_key_t recv_sys_mutex_key;
+extern mysql_pfs_key_t recv_writer_mutex_key;
extern mysql_pfs_key_t rseg_mutex_key;
# ifdef UNIV_SYNC_DEBUG
extern mysql_pfs_key_t rw_lock_debug_mutex_key;
@@ -120,9 +122,13 @@ extern mysql_pfs_key_t srv_sys_tasks_mutex_key;
#ifndef HAVE_ATOMIC_BUILTINS
extern mysql_pfs_key_t srv_conc_mutex_key;
#endif /* !HAVE_ATOMIC_BUILTINS */
+#ifndef HAVE_ATOMIC_BUILTINS_64
+extern mysql_pfs_key_t monitor_mutex_key;
+#endif /* !HAVE_ATOMIC_BUILTINS_64 */
extern mysql_pfs_key_t event_os_mutex_key;
extern mysql_pfs_key_t ut_list_mutex_key;
extern mysql_pfs_key_t os_mutex_key;
+extern mysql_pfs_key_t zip_pad_mutex_key;
#endif /* UNIV_PFS_MUTEX */
/******************************************************************//**
@@ -223,7 +229,7 @@ UNIV_INTERN
void
mutex_create_func(
/*==============*/
- mutex_t* mutex, /*!< in: pointer to memory */
+ ib_mutex_t* mutex, /*!< in: pointer to memory */
#ifdef UNIV_DEBUG
const char* cmutex_name, /*!< in: mutex name */
# ifdef UNIV_SYNC_DEBUG
@@ -242,7 +248,7 @@ UNIV_INTERN
void
mutex_free_func(
/*============*/
- mutex_t* mutex); /*!< in: mutex */
+ ib_mutex_t* mutex); /*!< in: mutex */
/**************************************************************//**
NOTE! The following macro should be used in mutex locking, not the
corresponding function. */
@@ -259,7 +265,7 @@ UNIV_INLINE
void
mutex_enter_func(
/*=============*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where locked */
ulint line); /*!< in: line where locked */
/********************************************************************//**
@@ -271,7 +277,7 @@ UNIV_INTERN
ulint
mutex_enter_nowait_func(
/*====================*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where mutex
requested */
ulint line); /*!< in: line where requested */
@@ -282,7 +288,7 @@ UNIV_INLINE
void
mutex_exit_func(
/*============*/
- mutex_t* mutex); /*!< in: pointer to mutex */
+ ib_mutex_t* mutex); /*!< in: pointer to mutex */
#ifdef UNIV_PFS_MUTEX
@@ -297,7 +303,7 @@ void
pfs_mutex_create_func(
/*==================*/
PSI_mutex_key key, /*!< in: Performance Schema key */
- mutex_t* mutex, /*!< in: pointer to memory */
+ ib_mutex_t* mutex, /*!< in: pointer to memory */
# ifdef UNIV_DEBUG
const char* cmutex_name, /*!< in: mutex name */
# ifdef UNIV_SYNC_DEBUG
@@ -315,7 +321,7 @@ UNIV_INLINE
void
pfs_mutex_enter_func(
/*=================*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where locked */
ulint line); /*!< in: line where locked */
/********************************************************************//**
@@ -328,7 +334,7 @@ UNIV_INLINE
ulint
pfs_mutex_enter_nowait_func(
/*========================*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where mutex
requested */
ulint line); /*!< in: line where requested */
@@ -341,7 +347,7 @@ UNIV_INLINE
void
pfs_mutex_exit_func(
/*================*/
- mutex_t* mutex); /*!< in: pointer to mutex */
+ ib_mutex_t* mutex); /*!< in: pointer to mutex */
/******************************************************************//**
NOTE! Please use the corresponding macro mutex_free(), not directly
@@ -352,7 +358,7 @@ UNIV_INLINE
void
pfs_mutex_free_func(
/*================*/
- mutex_t* mutex); /*!< in: mutex */
+ ib_mutex_t* mutex); /*!< in: mutex */
#endif /* UNIV_PFS_MUTEX */
@@ -390,7 +396,7 @@ UNIV_INTERN
ibool
mutex_validate(
/*===========*/
- const mutex_t* mutex); /*!< in: mutex */
+ const ib_mutex_t* mutex); /*!< in: mutex */
/******************************************************************//**
Checks that the current thread owns the mutex. Works only
in the debug version.
@@ -399,7 +405,7 @@ UNIV_INTERN
ibool
mutex_own(
/*======*/
- const mutex_t* mutex) /*!< in: mutex */
+ const ib_mutex_t* mutex) /*!< in: mutex */
__attribute__((warn_unused_result));
#endif /* UNIV_DEBUG */
#ifdef UNIV_SYNC_DEBUG
@@ -470,7 +476,7 @@ UNIV_INTERN
void
mutex_get_debug_info(
/*=================*/
- mutex_t* mutex, /*!< in: mutex */
+ ib_mutex_t* mutex, /*!< in: mutex */
const char** file_name, /*!< out: file where requested */
ulint* line, /*!< out: line where requested */
os_thread_id_t* thread_id); /*!< out: id of the thread which owns
@@ -490,7 +496,7 @@ UNIV_INLINE
lock_word_t
mutex_get_lock_word(
/*================*/
- const mutex_t* mutex); /*!< in: mutex */
+ const ib_mutex_t* mutex); /*!< in: mutex */
#ifdef UNIV_SYNC_DEBUG
/******************************************************************//**
NOT to be used outside this module except in debugging! Gets the waiters
@@ -500,7 +506,7 @@ UNIV_INLINE
ulint
mutex_get_waiters(
/*==============*/
- const mutex_t* mutex); /*!< in: mutex */
+ const ib_mutex_t* mutex); /*!< in: mutex */
#endif /* UNIV_SYNC_DEBUG */
/*
@@ -662,6 +668,7 @@ or row lock! */
#define SYNC_FTS_CACHE 1005 /* FTS cache rwlock */
#define SYNC_DICT 1000
#define SYNC_DICT_AUTOINC_MUTEX 999
+#define SYNC_STATS_AUTO_RECALC 997
#define SYNC_DICT_HEADER 995
#define SYNC_IBUF_HEADER 914
#define SYNC_IBUF_PESS_INSERT_MUTEX 912
@@ -679,14 +686,16 @@ or row lock! */
#define SYNC_EXTERN_STORAGE 500
#define SYNC_FSP 400
#define SYNC_FSP_PAGE 395
-/*------------------------------------- Insert buffer headers */
+/*------------------------------------- Change buffer headers */
#define SYNC_IBUF_MUTEX 370 /* ibuf_mutex */
-/*------------------------------------- Insert buffer tree */
+/*------------------------------------- Change buffer tree */
#define SYNC_IBUF_INDEX_TREE 360
#define SYNC_IBUF_TREE_NODE_NEW 359
#define SYNC_IBUF_TREE_NODE 358
#define SYNC_IBUF_BITMAP_MUTEX 351
#define SYNC_IBUF_BITMAP 350
+/*------------------------------------- Change log for online create index */
+#define SYNC_INDEX_ONLINE_LOG 340
/*------------------------------------- MySQL query cache mutex */
/*------------------------------------- MySQL binlog mutex */
/*-------------------------------*/
@@ -733,7 +742,7 @@ Do not use its fields directly! The structure used in the spin lock
implementation of a mutual exclusion semaphore. */
/** InnoDB mutex */
-struct mutex_struct {
+struct ib_mutex_t {
os_event_t event; /*!< Used by sync0arr.cc for the wait queue */
volatile lock_word_t lock_word; /*!< lock_word is the target
of the atomic test-and-set instruction when
@@ -748,7 +757,7 @@ struct mutex_struct {
may be) threads waiting in the global wait
array for this mutex to be released.
Otherwise, this is 0. */
- UT_LIST_NODE_T(mutex_t) list; /*!< All allocated mutexes are put into
+ UT_LIST_NODE_T(ib_mutex_t) list; /*!< All allocated mutexes are put into
a list. Pointers to the next and prev. */
#ifdef UNIV_SYNC_DEBUG
const char* file_name; /*!< File where the mutex was locked */
@@ -757,23 +766,17 @@ struct mutex_struct {
#endif /* UNIV_SYNC_DEBUG */
const char* cfile_name;/*!< File name where mutex created */
ulint cline; /*!< Line where created */
+ ulong count_os_wait; /*!< count of os_wait */
#ifdef UNIV_DEBUG
+
+/** Value of mutex_t::magic_n */
+# define MUTEX_MAGIC_N 979585UL
+
os_thread_id_t thread_id; /*!< The thread id of the thread
which locked the mutex. */
ulint magic_n; /*!< MUTEX_MAGIC_N */
-/** Value of mutex_struct::magic_n */
-# define MUTEX_MAGIC_N (ulint)979585
-#endif /* UNIV_DEBUG */
- ulong count_os_wait; /*!< count of os_wait */
-#ifdef UNIV_DEBUG
- ulong count_using; /*!< count of times mutex used */
- ulong count_spin_loop; /*!< count of spin loops */
- ulong count_spin_rounds;/*!< count of spin rounds */
- ulong count_os_yield; /*!< count of os_wait */
- ulonglong lspent_time; /*!< mutex os_wait timer msec */
- ulonglong lmax_spent_time;/*!< mutex os_wait timer msec */
const char* cmutex_name; /*!< mutex name */
- ulint mutex_type; /*!< 0=usual mutex, 1=rw_lock mutex */
+ ulint ib_mutex_type; /*!< 0=usual mutex, 1=rw_lock mutex */
#endif /* UNIV_DEBUG */
#ifdef UNIV_PFS_MUTEX
struct PSI_mutex* pfs_psi; /*!< The performance schema
@@ -799,12 +802,12 @@ extern ibool sync_order_checks_on;
extern ibool sync_initialized;
/** Global list of database mutexes (not OS mutexes) created. */
-typedef UT_LIST_BASE_NODE_T(mutex_t) ut_list_base_node_t;
+typedef UT_LIST_BASE_NODE_T(ib_mutex_t) ut_list_base_node_t;
/** Global list of database mutexes (not OS mutexes) created. */
extern ut_list_base_node_t mutex_list;
/** Mutex protecting the mutex_list variable */
-extern mutex_t mutex_list_mutex;
+extern ib_mutex_t mutex_list_mutex;
#ifndef HAVE_ATOMIC_BUILTINS
/**********************************************************//**
@@ -813,7 +816,7 @@ UNIV_INLINE
void
os_atomic_dec_ulint_func(
/*=====================*/
- mutex_t* mutex, /*!< in: mutex guarding the
+ ib_mutex_t* mutex, /*!< in: mutex guarding the
decrement */
volatile ulint* var, /*!< in/out: variable to
decrement */
@@ -824,7 +827,7 @@ UNIV_INLINE
void
os_atomic_inc_ulint_func(
/*=====================*/
- mutex_t* mutex, /*!< in: mutex guarding the
+ ib_mutex_t* mutex, /*!< in: mutex guarding the
increment */
volatile ulint* var, /*!< in/out: variable to
increment */
diff --git a/storage/innobase/include/sync0sync.ic b/storage/innobase/include/sync0sync.ic
index 746e73ebee7..ad77ad6d5a4 100644
--- a/storage/innobase/include/sync0sync.ic
+++ b/storage/innobase/include/sync0sync.ic
@@ -36,7 +36,7 @@ UNIV_INTERN
void
mutex_set_waiters(
/*==============*/
- mutex_t* mutex, /*!< in: mutex */
+ ib_mutex_t* mutex, /*!< in: mutex */
ulint n); /*!< in: value to set */
/******************************************************************//**
Reserves a mutex for the current thread. If the mutex is reserved, the
@@ -46,7 +46,7 @@ UNIV_INTERN
void
mutex_spin_wait(
/*============*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where mutex
requested */
ulint line); /*!< in: line where requested */
@@ -57,7 +57,7 @@ UNIV_INTERN
void
mutex_set_debug_info(
/*=================*/
- mutex_t* mutex, /*!< in: mutex */
+ ib_mutex_t* mutex, /*!< in: mutex */
const char* file_name, /*!< in: file where requested */
ulint line); /*!< in: line where requested */
#endif /* UNIV_SYNC_DEBUG */
@@ -67,7 +67,7 @@ UNIV_INTERN
void
mutex_signal_object(
/*================*/
- mutex_t* mutex); /*!< in: mutex */
+ ib_mutex_t* mutex); /*!< in: mutex */
/******************************************************************//**
Performs an atomic test-and-set instruction to the lock_word field of a
@@ -75,9 +75,9 @@ mutex.
@return the previous value of lock_word: 0 or 1 */
UNIV_INLINE
byte
-mutex_test_and_set(
+ib_mutex_test_and_set(
/*===============*/
- mutex_t* mutex) /*!< in: mutex */
+ ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
return(os_atomic_test_and_set_byte(&mutex->lock_word, 1));
@@ -105,7 +105,7 @@ UNIV_INLINE
void
mutex_reset_lock_word(
/*==================*/
- mutex_t* mutex) /*!< in: mutex */
+ ib_mutex_t* mutex) /*!< in: mutex */
{
#if defined(HAVE_ATOMIC_BUILTINS)
/* In theory __sync_lock_release should be used to release the lock.
@@ -125,7 +125,7 @@ UNIV_INLINE
lock_word_t
mutex_get_lock_word(
/*================*/
- const mutex_t* mutex) /*!< in: mutex */
+ const ib_mutex_t* mutex) /*!< in: mutex */
{
ut_ad(mutex);
@@ -139,7 +139,7 @@ UNIV_INLINE
ulint
mutex_get_waiters(
/*==============*/
- const mutex_t* mutex) /*!< in: mutex */
+ const ib_mutex_t* mutex) /*!< in: mutex */
{
const volatile ulint* ptr; /*!< declared volatile to ensure that
the value is read from memory */
@@ -158,7 +158,7 @@ UNIV_INLINE
void
mutex_exit_func(
/*============*/
- mutex_t* mutex) /*!< in: pointer to mutex */
+ ib_mutex_t* mutex) /*!< in: pointer to mutex */
{
ut_ad(mutex_own(mutex));
@@ -199,7 +199,7 @@ UNIV_INLINE
void
mutex_enter_func(
/*=============*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where locked */
ulint line) /*!< in: line where locked */
{
@@ -209,9 +209,7 @@ mutex_enter_func(
/* Note that we do not peek at the value of lock_word before trying
the atomic test_and_set; we could peek, and possibly save time. */
- ut_d(mutex->count_using++);
-
- if (!mutex_test_and_set(mutex)) {
+ if (!ib_mutex_test_and_set(mutex)) {
ut_d(mutex->thread_id = os_thread_get_curr_id());
#ifdef UNIV_SYNC_DEBUG
mutex_set_debug_info(mutex, file_name, line);
@@ -232,28 +230,28 @@ UNIV_INLINE
void
pfs_mutex_enter_func(
/*=================*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where locked */
ulint line) /*!< in: line where locked */
{
- if (mutex->pfs_psi != NULL)
- {
+ if (mutex->pfs_psi != NULL) {
PSI_mutex_locker* locker;
PSI_mutex_locker_state state;
- locker = PSI_CALL(start_mutex_wait)(&state, mutex->pfs_psi,
+ locker = PSI_MUTEX_CALL(start_mutex_wait)(
+ &state, mutex->pfs_psi,
PSI_MUTEX_LOCK, file_name, line);
mutex_enter_func(mutex, file_name, line);
- if (locker != NULL)
- PSI_CALL(end_mutex_wait)(locker, 0);
- }
- else
- {
+ if (locker != NULL) {
+ PSI_MUTEX_CALL(end_mutex_wait)(locker, 0);
+ }
+ } else {
mutex_enter_func(mutex, file_name, line);
}
}
+
/********************************************************************//**
NOTE! Please use the corresponding macro mutex_enter_nowait(), not directly
this function!
@@ -264,33 +262,33 @@ UNIV_INLINE
ulint
pfs_mutex_enter_nowait_func(
/*========================*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where mutex
requested */
ulint line) /*!< in: line where requested */
{
- ulint ret;
+ ulint ret;
- if (mutex->pfs_psi != NULL)
- {
+ if (mutex->pfs_psi != NULL) {
PSI_mutex_locker* locker;
PSI_mutex_locker_state state;
- locker = PSI_CALL(start_mutex_wait)(&state, mutex->pfs_psi,
+ locker = PSI_MUTEX_CALL(start_mutex_wait)(
+ &state, mutex->pfs_psi,
PSI_MUTEX_TRYLOCK, file_name, line);
ret = mutex_enter_nowait_func(mutex, file_name, line);
- if (locker != NULL)
- PSI_CALL(end_mutex_wait)(locker, (int) ret);
- }
- else
- {
+ if (locker != NULL) {
+ PSI_MUTEX_CALL(end_mutex_wait)(locker, (int) ret);
+ }
+ } else {
ret = mutex_enter_nowait_func(mutex, file_name, line);
}
return(ret);
}
+
/******************************************************************//**
NOTE! Please use the corresponding macro mutex_exit(), not directly
this function!
@@ -300,10 +298,11 @@ UNIV_INLINE
void
pfs_mutex_exit_func(
/*================*/
- mutex_t* mutex) /*!< in: pointer to mutex */
+ ib_mutex_t* mutex) /*!< in: pointer to mutex */
{
- if (mutex->pfs_psi != NULL)
- PSI_CALL(unlock_mutex)(mutex->pfs_psi);
+ if (mutex->pfs_psi != NULL) {
+ PSI_MUTEX_CALL(unlock_mutex)(mutex->pfs_psi);
+ }
mutex_exit_func(mutex);
}
@@ -319,7 +318,7 @@ void
pfs_mutex_create_func(
/*==================*/
mysql_pfs_key_t key, /*!< in: Performance Schema key */
- mutex_t* mutex, /*!< in: pointer to memory */
+ ib_mutex_t* mutex, /*!< in: pointer to memory */
# ifdef UNIV_DEBUG
const char* cmutex_name, /*!< in: mutex name */
# ifdef UNIV_SYNC_DEBUG
@@ -329,7 +328,7 @@ pfs_mutex_create_func(
const char* cfile_name, /*!< in: file name where created */
ulint cline) /*!< in: file line where created */
{
- mutex->pfs_psi = PSI_CALL(init_mutex)(key, mutex);
+ mutex->pfs_psi = PSI_MUTEX_CALL(init_mutex)(key, mutex);
mutex_create_func(mutex,
# ifdef UNIV_DEBUG
@@ -341,6 +340,7 @@ pfs_mutex_create_func(
cfile_name,
cline);
}
+
/******************************************************************//**
NOTE! Please use the corresponding macro mutex_free(), not directly
this function!
@@ -350,11 +350,10 @@ UNIV_INLINE
void
pfs_mutex_free_func(
/*================*/
- mutex_t* mutex) /*!< in: mutex */
+ ib_mutex_t* mutex) /*!< in: mutex */
{
- if (mutex->pfs_psi != NULL)
- {
- PSI_CALL(destroy_mutex)(mutex->pfs_psi);
+ if (mutex->pfs_psi != NULL) {
+ PSI_MUTEX_CALL(destroy_mutex)(mutex->pfs_psi);
mutex->pfs_psi = NULL;
}
@@ -370,7 +369,7 @@ UNIV_INLINE
void
os_atomic_dec_ulint_func(
/*=====================*/
- mutex_t* mutex, /*!< in: mutex guarding the dec */
+ ib_mutex_t* mutex, /*!< in: mutex guarding the dec */
volatile ulint* var, /*!< in/out: variable to decrement */
ulint delta) /*!< in: delta to decrement */
{
@@ -391,7 +390,7 @@ UNIV_INLINE
void
os_atomic_inc_ulint_func(
/*=====================*/
- mutex_t* mutex, /*!< in: mutex guarding the increment */
+ ib_mutex_t* mutex, /*!< in: mutex guarding the increment */
volatile ulint* var, /*!< in/out: variable to increment */
ulint delta) /*!< in: delta to increment */
{
diff --git a/storage/innobase/include/sync0types.h b/storage/innobase/include/sync0types.h
index 679cf6a9074..0d143004a7a 100644
--- a/storage/innobase/include/sync0types.h
+++ b/storage/innobase/include/sync0types.h
@@ -26,9 +26,6 @@ Created 9/5/1995 Heikki Tuuri
#ifndef sync0types_h
#define sync0types_h
-/** Rename mutex_t to avoid name space collision on some systems */
-#define mutex_t ib_mutex_t
-/** InnoDB mutex */
-typedef struct mutex_struct mutex_t;
+struct ib_mutex_t;
#endif
diff --git a/storage/innobase/include/trx0i_s.h b/storage/innobase/include/trx0i_s.h
index c286fc4d9ae..662971a7841 100644
--- a/storage/innobase/include/trx0i_s.h
+++ b/storage/innobase/include/trx0i_s.h
@@ -79,25 +79,21 @@ do { \
} while (0)
/** A row of INFORMATION_SCHEMA.innodb_locks */
-typedef struct i_s_locks_row_struct i_s_locks_row_t;
-/** A row of INFORMATION_SCHEMA.innodb_trx */
-typedef struct i_s_trx_row_struct i_s_trx_row_t;
-/** A row of INFORMATION_SCHEMA.innodb_lock_waits */
-typedef struct i_s_lock_waits_row_struct i_s_lock_waits_row_t;
+struct i_s_locks_row_t;
/** Objects of trx_i_s_cache_t::locks_hash */
-typedef struct i_s_hash_chain_struct i_s_hash_chain_t;
+struct i_s_hash_chain_t;
/** Objects of this type are added to the hash table
trx_i_s_cache_t::locks_hash */
-struct i_s_hash_chain_struct {
+struct i_s_hash_chain_t {
i_s_locks_row_t* value; /*!< row of
INFORMATION_SCHEMA.innodb_locks*/
i_s_hash_chain_t* next; /*!< next item in the hash chain */
};
/** This structure represents INFORMATION_SCHEMA.innodb_locks row */
-struct i_s_locks_row_struct {
+struct i_s_locks_row_t {
trx_id_t lock_trx_id; /*!< transaction identifier */
const char* lock_mode; /*!< lock mode from
lock_get_mode_str() */
@@ -128,16 +124,16 @@ struct i_s_locks_row_struct {
};
/** This structure represents INFORMATION_SCHEMA.innodb_trx row */
-struct i_s_trx_row_struct {
+struct i_s_trx_row_t {
trx_id_t trx_id; /*!< transaction identifier */
const char* trx_state; /*!< transaction state from
trx_get_que_state_str() */
- ib_time_t trx_started; /*!< trx_struct::start_time */
+ ib_time_t trx_started; /*!< trx_t::start_time */
const i_s_locks_row_t* requested_lock_row;
/*!< pointer to a row
in innodb_locks if trx
is waiting, or NULL */
- ib_time_t trx_wait_started; /*!< trx_struct::wait_started */
+ ib_time_t trx_wait_started; /*!< trx_t::wait_started */
ullint trx_weight; /*!< TRX_WEIGHT() */
ulint trx_mysql_thread_id; /*!< thd_get_thread_id() */
const char* trx_query; /*!< MySQL statement being
@@ -145,36 +141,34 @@ struct i_s_trx_row_struct {
struct charset_info_st* trx_query_cs;
/*!< charset encode the MySQL
statement */
- const char* trx_operation_state; /*!< trx_struct::op_info */
+ const char* trx_operation_state; /*!< trx_t::op_info */
ulint trx_tables_in_use;/*!< n_mysql_tables_in_use in
- trx_struct */
+ trx_t */
ulint trx_tables_locked;
/*!< mysql_n_tables_locked in
- trx_struct */
+ trx_t */
ulint trx_lock_structs;/*!< list len of trx_locks in
- trx_struct */
+ trx_t */
ulint trx_lock_memory_bytes;
/*!< mem_heap_get_size(
trx->lock_heap) */
ulint trx_rows_locked;/*!< lock_number_of_rows_locked() */
- ullint trx_rows_modified;/*!< trx_struct::undo_no */
+ ullint trx_rows_modified;/*!< trx_t::undo_no */
ulint trx_concurrency_tickets;
/*!< n_tickets_to_enter_innodb in
- trx_struct */
+ trx_t */
const char* trx_isolation_level;
- /*!< isolation_level in trx_struct*/
+ /*!< isolation_level in trx_t */
ibool trx_unique_checks;
- /*!< check_unique_secondary in
- trx_struct*/
+ /*!< check_unique_secondary in trx_t*/
ibool trx_foreign_key_checks;
- /*!< check_foreigns in trx_struct */
+ /*!< check_foreigns in trx_t */
const char* trx_foreign_key_error;
- /*!< detailed_error in trx_struct */
+ /*!< detailed_error in trx_t */
ibool trx_has_search_latch;
- /*!< has_search_latch in trx_struct */
+ /*!< has_search_latch in trx_t */
ulint trx_search_latch_timeout;
- /*!< search_latch_timeout in
- trx_struct */
+ /*!< search_latch_timeout in trx_t */
ulint trx_is_read_only;
/*!< trx_t::read_only */
ulint trx_is_autocommit_non_locking;
@@ -183,13 +177,13 @@ struct i_s_trx_row_struct {
};
/** This structure represents INFORMATION_SCHEMA.innodb_lock_waits row */
-struct i_s_lock_waits_row_struct {
+struct i_s_lock_waits_row_t {
const i_s_locks_row_t* requested_lock_row; /*!< requested lock */
const i_s_locks_row_t* blocking_lock_row; /*!< blocking lock */
};
/** Cache of INFORMATION_SCHEMA table data */
-typedef struct trx_i_s_cache_struct trx_i_s_cache_t;
+struct trx_i_s_cache_t;
/** Auxiliary enum used by functions that need to select one of the
INFORMATION_SCHEMA tables */
diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h
index 0199083467c..1e13c883800 100644
--- a/storage/innobase/include/trx0purge.h
+++ b/storage/innobase/include/trx0purge.h
@@ -108,7 +108,8 @@ enum purge_state_t {
PURGE_STATE_INIT, /*!< Purge instance created */
PURGE_STATE_RUN, /*!< Purge should be running */
PURGE_STATE_STOP, /*!< Purge should be stopped */
- PURGE_STATE_EXIT /*!< Purge has been shutdown */
+ PURGE_STATE_EXIT, /*!< Purge has been shutdown */
+ PURGE_STATE_DISABLED /*!< Purge was never started */
};
/*******************************************************************//**
@@ -121,16 +122,16 @@ trx_purge_state(void);
/** This is the purge pointer/iterator. We need both the undo no and the
transaction no up to which purge has parsed and applied the records. */
-typedef struct purge_iter_struct {
+struct purge_iter_t {
trx_id_t trx_no; /*!< Purge has advanced past all
transactions whose number is less
than this */
undo_no_t undo_no; /*!< Purge has advanced past all records
whose undo number is less than this */
-} purge_iter_t;
+};
/** The control structure used in the purge operation */
-struct trx_purge_struct{
+struct trx_purge_t{
sess_t* sess; /*!< System session running the purge
query */
trx_t* trx; /*!< System transaction running the
@@ -146,7 +147,8 @@ struct trx_purge_struct{
protects state and running */
os_event_t event; /*!< State signal event */
ulint n_stop; /*!< Counter to track number stops */
- bool running; /*!< true, if purge is active */
+ volatile bool running; /*!< true, if purge is active,
+ we check this without the latch too */
volatile purge_state_t state; /*!< Purge coordinator thread states,
we check this in several places
without holding the latch. */
@@ -171,6 +173,10 @@ struct trx_purge_struct{
purge_iter_t limit; /* The 'purge pointer' which advances
during a purge, and which is used in
history list truncation */
+#ifdef UNIV_DEBUG
+ purge_iter_t done; /* Indicate 'purge pointer' which have
+ purged already accurately. */
+#endif /* UNIV_DEBUG */
/*-----------------------------*/
ibool next_stored; /*!< TRUE if the info of the next record
to purge is stored below: if yes, then
@@ -196,17 +202,15 @@ struct trx_purge_struct{
ib_bh_t* ib_bh; /*!< Binary min-heap, ordered on
rseg_queue_t::trx_no. It is protected
by the bh_mutex */
- mutex_t bh_mutex; /*!< Mutex protecting ib_bh */
+ ib_mutex_t bh_mutex; /*!< Mutex protecting ib_bh */
};
/** Info required to purge a record */
-struct trx_purge_rec_struct {
+struct trx_purge_rec_t {
trx_undo_rec_t* undo_rec; /*!< Record to purge */
roll_ptr_t roll_ptr; /*!< File pointr to UNDO record */
};
-typedef struct trx_purge_rec_struct trx_purge_rec_t;
-
#ifndef UNIV_NONINL
#include "trx0purge.ic"
#endif
diff --git a/storage/innobase/include/trx0rec.h b/storage/innobase/include/trx0rec.h
index c9fae45dad4..cd1ecc096fd 100644
--- a/storage/innobase/include/trx0rec.h
+++ b/storage/innobase/include/trx0rec.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2009, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -105,10 +105,11 @@ trx_undo_rec_get_pars(
TRX_UNDO_INSERT_REC, ... */
ulint* cmpl_info, /*!< out: compiler info, relevant only
for update type records */
- ibool* updated_extern, /*!< out: TRUE if we updated an
+ bool* updated_extern, /*!< out: true if we updated an
externally stored fild */
undo_no_t* undo_no, /*!< out: undo log record number */
- table_id_t* table_id); /*!< out: table id */
+ table_id_t* table_id) /*!< out: table id */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Builds a row reference from an undo log record.
@return pointer to remaining part of undo record */
@@ -178,8 +179,9 @@ trx_undo_update_rec_get_update(
needed is allocated */
upd_t** upd); /*!< out, own: update vector */
/*******************************************************************//**
-Builds a partial row from an update undo log record. It contains the
-columns which occur as ordering in any index of the table.
+Builds a partial row from an update undo log record, for purge.
+It contains the columns which occur as ordering in any index of the table.
+Any missing columns are indicated by col->mtype == DATA_MISSING.
@return pointer to remaining part of undo record */
UNIV_INTERN
byte*
@@ -197,8 +199,9 @@ trx_undo_rec_get_partial_row(
ibool ignore_prefix, /*!< in: flag to indicate if we
expect blob prefixes in undo. Used
only in the assertion. */
- mem_heap_t* heap); /*!< in: memory heap from which the memory
+ mem_heap_t* heap) /*!< in: memory heap from which the memory
needed is allocated */
+ __attribute__((nonnull, warn_unused_result));
/***********************************************************************//**
Writes information to an undo log about an insert, update, or a delete marking
of a clustered index record. This information is used in a rollback of the
@@ -206,7 +209,7 @@ transaction and in consistent reads that must look to the history of this
transaction.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
trx_undo_report_row_operation(
/*==========================*/
ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is
@@ -225,10 +228,12 @@ trx_undo_report_row_operation(
const rec_t* rec, /*!< in: case of an update or delete
marking, the record in the clustered
index, otherwise NULL */
- roll_ptr_t* roll_ptr); /*!< out: rollback pointer to the
+ const ulint* offsets, /*!< in: rec_get_offsets(rec) */
+ roll_ptr_t* roll_ptr) /*!< out: rollback pointer to the
inserted undo log record,
0 if BTR_NO_UNDO_LOG
flag was specified */
+ __attribute__((nonnull(3,4,10), warn_unused_result));
/******************************************************************//**
Copies an undo record to heap. This function can be called if we know that
the undo log record exists.
@@ -238,16 +243,17 @@ trx_undo_rec_t*
trx_undo_get_undo_rec_low(
/*======================*/
roll_ptr_t roll_ptr, /*!< in: roll pointer to record */
- mem_heap_t* heap); /*!< in: memory heap where copied */
+ mem_heap_t* heap) /*!< in: memory heap where copied */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Build a previous version of a clustered index record. The caller must
-hold a latch on the index page of the clustered index record, to
-guarantee that the stack of versions is locked all the way down to the
-purge_sys->view.
-@return DB_SUCCESS, or DB_MISSING_HISTORY if the previous version is
-earlier than purge_view, which means that it may have been removed */
+hold a latch on the index page of the clustered index record.
+@retval true if previous version was built, or if it was an insert
+or the table has been rebuilt
+@retval false if the previous version is earlier than purge_view,
+which means that it may have been removed */
UNIV_INTERN
-ulint
+bool
trx_undo_prev_version_build(
/*========================*/
const rec_t* index_rec,/*!< in: clustered index record in the
@@ -256,12 +262,13 @@ trx_undo_prev_version_build(
index_rec page and purge_view */
const rec_t* rec, /*!< in: version of a clustered index record */
dict_index_t* index, /*!< in: clustered index */
- ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
+ ulint* offsets,/*!< in/out: rec_get_offsets(rec, index) */
mem_heap_t* heap, /*!< in: memory heap from which the memory
needed is allocated */
- rec_t** old_vers);/*!< out, own: previous version, or NULL if
+ rec_t** old_vers)/*!< out, own: previous version, or NULL if
rec is the first inserted version, or if
history data has been deleted */
+ __attribute__((nonnull));
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
Parses a redo log record of adding an undo log record.
diff --git a/storage/innobase/include/trx0rec.ic b/storage/innobase/include/trx0rec.ic
index 847c26f03a8..08704f6b821 100644
--- a/storage/innobase/include/trx0rec.ic
+++ b/storage/innobase/include/trx0rec.ic
@@ -90,7 +90,7 @@ trx_undo_rec_get_offset(
/*====================*/
undo_no_t undo_no) /*!< in: undo no read from node */
{
- return (3 + mach_ull_get_much_compressed_size(undo_no));
+ return(3 + mach_ull_get_much_compressed_size(undo_no));
}
/***********************************************************************//**
diff --git a/storage/innobase/include/trx0roll.h b/storage/innobase/include/trx0roll.h
index 3b724e03830..9d020a10725 100644
--- a/storage/innobase/include/trx0roll.h
+++ b/storage/innobase/include/trx0roll.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -146,29 +146,32 @@ trx_rollback_step(
Rollback a transaction used in MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
trx_rollback_for_mysql(
/*===================*/
- trx_t* trx); /*!< in/out: transaction */
+ trx_t* trx) /*!< in/out: transaction */
+ __attribute__((nonnull));
/*******************************************************************//**
Rollback the latest SQL statement for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
trx_rollback_last_sql_stat_for_mysql(
/*=================================*/
- trx_t* trx); /*!< in/out: transaction */
+ trx_t* trx) /*!< in/out: transaction */
+ __attribute__((nonnull));
/*******************************************************************//**
Rollback a transaction to a given savepoint or do a complete rollback.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
trx_rollback_to_savepoint(
/*======================*/
trx_t* trx, /*!< in: transaction handle */
- trx_savept_t* savept);/*!< in: pointer to savepoint undo number, if
+ trx_savept_t* savept) /*!< in: pointer to savepoint undo number, if
partial rollback requested, or NULL for
complete rollback */
+ __attribute__((nonnull(1)));
/*******************************************************************//**
Rolls back a transaction back to a named savepoint. Modifications after the
savepoint are undone but InnoDB does NOT release the corresponding locks
@@ -179,17 +182,18 @@ were set after this savepoint are deleted.
@return if no savepoint of the name found then DB_NO_SAVEPOINT,
otherwise DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
trx_rollback_to_savepoint_for_mysql(
/*================================*/
trx_t* trx, /*!< in: transaction handle */
const char* savepoint_name, /*!< in: savepoint name */
- ib_int64_t* mysql_binlog_cache_pos);/*!< out: the MySQL binlog cache
+ ib_int64_t* mysql_binlog_cache_pos) /*!< out: the MySQL binlog cache
position corresponding to this
savepoint; MySQL needs this
information to remove the
binlog entries of the queries
executed after the savepoint */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Creates a named savepoint. If the transaction is not yet started, starts it.
If there is already a savepoint of the same name, this call erases that old
@@ -197,28 +201,28 @@ savepoint and replaces it with a new. Savepoints are deleted in a transaction
commit or rollback.
@return always DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
trx_savepoint_for_mysql(
/*====================*/
trx_t* trx, /*!< in: transaction handle */
const char* savepoint_name, /*!< in: savepoint name */
- ib_int64_t binlog_cache_pos); /*!< in: MySQL binlog cache
+ ib_int64_t binlog_cache_pos) /*!< in: MySQL binlog cache
position corresponding to this
connection at the time of the
savepoint */
-
+ __attribute__((nonnull));
/*******************************************************************//**
Releases a named savepoint. Savepoints which
were set after this savepoint are deleted.
@return if no savepoint of the name found then DB_NO_SAVEPOINT,
otherwise DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
trx_release_savepoint_for_mysql(
/*============================*/
trx_t* trx, /*!< in: transaction handle */
- const char* savepoint_name); /*!< in: savepoint name */
-
+ const char* savepoint_name) /*!< in: savepoint name */
+ __attribute__((nonnull, warn_unused_result));
/*******************************************************************//**
Frees savepoint structs starting from savep. */
UNIV_INTERN
@@ -230,8 +234,8 @@ trx_roll_savepoints_free(
if this is NULL, free all savepoints
of trx */
-/** A cell of trx_undo_arr_struct; used during a rollback and a purge */
-struct trx_undo_inf_struct{
+/** A cell of trx_undo_arr_t; used during a rollback and a purge */
+struct trx_undo_inf_t{
ibool in_use; /*!< true if cell is being used */
trx_id_t trx_no; /*!< transaction number: not defined during
a rollback */
@@ -241,7 +245,7 @@ struct trx_undo_inf_struct{
/** During a rollback and a purge, undo numbers of undo records currently being
processed are stored in this array */
-struct trx_undo_arr_struct{
+struct trx_undo_arr_t{
ulint n_cells; /*!< number of cells in the array */
ulint n_used; /*!< number of cells in use */
trx_undo_inf_t* infos; /*!< the array of undo infos */
@@ -258,7 +262,7 @@ enum roll_node_state {
};
/** Rollback command node in a query graph */
-struct roll_node_struct{
+struct roll_node_t{
que_common_t common; /*!< node type: QUE_NODE_ROLLBACK */
enum roll_node_state state; /*!< node execution state */
ibool partial;/*!< TRUE if we want a partial
@@ -270,7 +274,7 @@ struct roll_node_struct{
};
/** A savepoint set with SQL's "SAVEPOINT savepoint_id" command */
-struct trx_named_savept_struct{
+struct trx_named_savept_t{
char* name; /*!< savepoint name */
trx_savept_t savept; /*!< the undo number corresponding to
the savepoint */
diff --git a/storage/innobase/include/trx0rseg.h b/storage/innobase/include/trx0rseg.h
index 66e5449cf57..185b05876b4 100644
--- a/storage/innobase/include/trx0rseg.h
+++ b/storage/innobase/include/trx0rseg.h
@@ -151,11 +151,11 @@ trx_rseg_get_n_undo_tablespaces(
#define TRX_RSEG_MAX_N_TRXS (TRX_RSEG_N_SLOTS / 2)
/* The rollback segment memory object */
-struct trx_rseg_struct{
+struct trx_rseg_t{
/*--------------------------------------------------------*/
ulint id; /*!< rollback segment id == the index of
its slot in the trx system file copy */
- mutex_t mutex; /*!< mutex protecting the fields in this
+ ib_mutex_t mutex; /*!< mutex protecting the fields in this
struct except id, which is constant */
ulint space; /*!< space where the rollback segment is
header is placed */
@@ -192,13 +192,11 @@ struct trx_rseg_struct{
};
/** For prioritising the rollback segments for purge. */
-struct rseg_queue_struct {
+struct rseg_queue_t {
trx_id_t trx_no; /*!< trx_rseg_t::last_trx_no */
trx_rseg_t* rseg; /*!< Rollback segment */
};
-typedef struct rseg_queue_struct rseg_queue_t;
-
/* Undo log segment slot in a rollback segment header */
/*-------------------------------------------------------------*/
#define TRX_RSEG_SLOT_PAGE_NO 0 /* Page number of the header page of
diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h
index b1aa3d2224c..70f214d1ac7 100644
--- a/storage/innobase/include/trx0sys.h
+++ b/storage/innobase/include/trx0sys.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -426,7 +426,7 @@ trx_sys_file_format_max_get(void);
Check for the max file format tag stored on disk.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
trx_sys_file_format_max_check(
/*==========================*/
ulint max_format_id); /*!< in: the max format id to check */
@@ -600,18 +600,28 @@ identifier is added to this 64-bit constant. */
#ifndef UNIV_HOTBACKUP
/** The transaction system central memory data structure. */
-struct trx_sys_struct{
+struct trx_sys_t{
- mutex_t mutex; /*!< mutex protecting most fields in
+ ib_mutex_t mutex; /*!< mutex protecting most fields in
this structure except when noted
otherwise */
- ulint n_mysql_trx; /*!< Number of transactions currently
- allocated for MySQL */
ulint n_prepared_trx; /*!< Number of transactions currently
in the XA PREPARED state */
+ ulint n_prepared_recovered_trx; /*!< Number of transactions
+ currently in XA PREPARED state that are
+ also recovered. Such transactions cannot
+ be added during runtime. They can only
+ occur after recovery if mysqld crashed
+ while there were XA PREPARED
+ transactions. We disable query cache
+ if such transactions exist. */
trx_id_t max_trx_id; /*!< The smallest number not yet
assigned as a transaction id or
transaction number */
+#ifdef UNIV_DEBUG
+ trx_id_t rw_max_trx_id; /*!< Max trx id of read-write transactions
+ which exist or existed */
+#endif
trx_list_t rw_trx_list; /*!< List of active and committed in
memory read-write transactions, sorted
on trx id, biggest first. Recovered
diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h
index 3e6cfc7d0da..bb84c1806f2 100644
--- a/storage/innobase/include/trx0trx.h
+++ b/storage/innobase/include/trx0trx.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -121,20 +121,69 @@ UNIV_INTERN
void
trx_lists_init_at_db_start(void);
/*============================*/
+
+#ifdef UNIV_DEBUG
+#define trx_start_if_not_started_xa(t) \
+ { \
+ (t)->start_line = __LINE__; \
+ (t)->start_file = __FILE__; \
+ trx_start_if_not_started_xa_low((t)); \
+ }
+#else
+#define trx_start_if_not_started_xa(t) \
+ trx_start_if_not_started_xa_low((t))
+#endif /* UNIV_DEBUG */
+
/*************************************************************//**
Starts the transaction if it is not yet started. */
UNIV_INTERN
void
-trx_start_if_not_started_xa(
-/*========================*/
+trx_start_if_not_started_xa_low(
+/*============================*/
trx_t* trx); /*!< in: transaction */
/*************************************************************//**
Starts the transaction if it is not yet started. */
UNIV_INTERN
void
-trx_start_if_not_started(
-/*=====================*/
+trx_start_if_not_started_low(
+/*=========================*/
trx_t* trx); /*!< in: transaction */
+
+#ifdef UNIV_DEBUG
+#define trx_start_if_not_started(t) \
+ { \
+ (t)->start_line = __LINE__; \
+ (t)->start_file = __FILE__; \
+ trx_start_if_not_started_low((t)); \
+ }
+#else
+#define trx_start_if_not_started(t) \
+ trx_start_if_not_started_low((t))
+#endif /* UNIV_DEBUG */
+
+/*************************************************************//**
+Starts the transaction for a DDL operation. */
+UNIV_INTERN
+void
+trx_start_for_ddl_low(
+/*==================*/
+ trx_t* trx, /*!< in/out: transaction */
+ trx_dict_op_t op) /*!< in: dictionary operation type */
+ __attribute__((nonnull));
+
+#ifdef UNIV_DEBUG
+#define trx_start_for_ddl(t, o) \
+ { \
+ ut_ad((t)->start_file == 0); \
+ (t)->start_line = __LINE__; \
+ (t)->start_file = __FILE__; \
+ trx_start_for_ddl_low((t), (o)); \
+ }
+#else
+#define trx_start_for_ddl(t, o) \
+ trx_start_for_ddl_low((t), (o))
+#endif /* UNIV_DEBUG */
+
/****************************************************************//**
Commits a transaction. */
UNIV_INTERN
@@ -155,7 +204,7 @@ trx_cleanup_at_db_startup(
Does the transaction commit for MySQL.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
trx_commit_for_mysql(
/*=================*/
trx_t* trx); /*!< in/out: transaction */
@@ -189,13 +238,13 @@ trx_get_trx_by_xid(
const XID* xid); /*!< in: X/Open XA transaction identifier */
/**********************************************************************//**
If required, flushes the log to disk if we called trx_commit_for_mysql()
-with trx->flush_log_later == TRUE.
-@return 0 or error number */
+with trx->flush_log_later == TRUE. */
UNIV_INTERN
-ulint
+void
trx_commit_complete_for_mysql(
/*==========================*/
- trx_t* trx); /*!< in: trx handle */
+ trx_t* trx) /*!< in/out: transaction */
+ __attribute__((nonnull));
/**********************************************************************//**
Marks the latest SQL statement ended. */
UNIV_INTERN
@@ -251,9 +300,9 @@ trx_print_low(
ulint max_query_len,
/*!< in: max query length to print,
or 0 to use the default max length */
- ulint n_lock_rec,
+ ulint n_rec_locks,
/*!< in: lock_number_of_rows_locked(&trx->lock) */
- ulint n_lock_struct,
+ ulint n_trx_locks,
/*!< in: length of trx->lock.trx_locks */
ulint heap_size)
/*!< in: mem_heap_get_size(trx->lock.lock_heap) */
@@ -286,26 +335,11 @@ trx_print(
or 0 to use the default max length */
__attribute__((nonnull));
-/** Type of data dictionary operation */
-typedef enum trx_dict_op {
- /** The transaction is not modifying the data dictionary. */
- TRX_DICT_OP_NONE = 0,
- /** The transaction is creating a table or an index, or
- dropping a table. The table must be dropped in crash
- recovery. This and TRX_DICT_OP_NONE are the only possible
- operation modes in crash recovery. */
- TRX_DICT_OP_TABLE = 1,
- /** The transaction is creating or dropping an index in an
- existing table. In crash recovery, the data dictionary
- must be locked, but the table must not be dropped. */
- TRX_DICT_OP_INDEX = 2
-} trx_dict_op_t;
-
/**********************************************************************//**
Determine if a transaction is a dictionary operation.
@return dictionary operation mode */
UNIV_INLINE
-enum trx_dict_op
+enum trx_dict_op_t
trx_get_dict_operation(
/*===================*/
const trx_t* trx) /*!< in: transaction */
@@ -317,7 +351,7 @@ void
trx_set_dict_operation(
/*===================*/
trx_t* trx, /*!< in/out: transaction */
- enum trx_dict_op op); /*!< in: operation, not
+ enum trx_dict_op_t op); /*!< in: operation, not
TRX_DICT_OP_NONE */
#ifndef UNIV_HOTBACKUP
@@ -359,7 +393,7 @@ UNIV_INTERN
ibool
trx_is_interrupted(
/*===============*/
- trx_t* trx); /*!< in: transaction */
+ const trx_t* trx); /*!< in: transaction */
/**********************************************************************//**
Determines if the currently running transaction is in strict mode.
@return TRUE if strict */
@@ -405,6 +439,15 @@ trx_get_que_state_str(
/*==================*/
const trx_t* trx); /*!< in: transaction */
+/****************************************************************//**
+Assign a read-only transaction a rollback-segment, if it is attempting
+to write to a TEMPORARY table. */
+UNIV_INTERN
+void
+trx_assign_rseg(
+/*============*/
+ trx_t* trx); /*!< A read-only transaction that
+ needs to be assigned a RBS. */
/*******************************************************************//**
Transactions that aren't started by the MySQL server don't set
the trx_t::mysql_thd field. For such transactions we set the lock
@@ -450,7 +493,6 @@ non-locking select */
ut_ad(!trx_is_autocommit_non_locking((t))); \
switch ((t)->state) { \
case TRX_STATE_PREPARED: \
- ut_a(!(t)->read_only); \
/* fall through */ \
case TRX_STATE_ACTIVE: \
case TRX_STATE_COMMITTED_IN_MEMORY: \
@@ -463,7 +505,7 @@ non-locking select */
#ifdef UNIV_DEBUG
/*******************************************************************//**
-Assert that an autocommit non-locking slect cannot be in the
+Assert that an autocommit non-locking select cannot be in the
ro_trx_list nor the rw_trx_list and that it is a read-only transaction.
The tranasction must be in the mysql_trx_list. */
# define assert_trx_nonlocking_or_in_list(t) \
@@ -511,7 +553,7 @@ code and no mutex is required when the query thread is no longer waiting. */
/** The locks and state of an active transaction. Protected by
lock_sys->mutex, trx->mutex or both. */
-struct trx_lock_struct {
+struct trx_lock_t {
ulint n_active_thrs; /*!< number of active query threads */
trx_que_t que_state; /*!< valid when trx->state
@@ -620,10 +662,10 @@ lock_rec_convert_impl_to_expl()) will access transactions associated
to other connections. The locks of transactions are protected by
lock_sys->mutex and sometimes by trx->mutex. */
-struct trx_struct{
+struct trx_t{
ulint magic_n;
- mutex_t mutex; /*!< Mutex protecting the fields
+ ib_mutex_t mutex; /*!< Mutex protecting the fields
state and lock
(except some fields of lock, which
are protected by lock_sys->mutex) */
@@ -657,8 +699,7 @@ struct trx_struct{
Latching and various transaction lists membership rules:
- XA (2PC) transactions are always treated as read-write and
- non-autocommit.
+ XA (2PC) transactions are always treated as non-autocommit.
Transitions to ACTIVE or NOT_STARTED occur when
!in_rw_trx_list and !in_ro_trx_list (no trx_sys->mutex needed).
@@ -793,9 +834,9 @@ struct trx_struct{
transaction branch */
lsn_t commit_lsn; /*!< lsn at the time of the commit */
table_id_t table_id; /*!< Table to drop iff dict_operation
- is TRUE, or 0. */
+ == TRX_DICT_OP_TABLE, or 0. */
/*------------------------------*/
- void* mysql_thd; /*!< MySQL thread handle corresponding
+ THD* mysql_thd; /*!< MySQL thread handle corresponding
to this trx, or NULL */
const char* mysql_log_file_name;
/*!< if MySQL binlog is used, this field
@@ -838,7 +879,7 @@ struct trx_struct{
trx_sys->mysql_trx_list */
#endif /* UNIV_DEBUG */
/*------------------------------*/
- enum db_err error_state; /*!< 0 if no error, otherwise error
+ dberr_t error_state; /*!< 0 if no error, otherwise error
number; NOTE That ONLY the thread
doing the transaction is allowed to
set this field: this is NOT protected
@@ -873,7 +914,7 @@ struct trx_struct{
trx_savepoints; /*!< savepoints set with SAVEPOINT ...,
oldest first */
/*------------------------------*/
- mutex_t undo_mutex; /*!< mutex protecting the fields in this
+ ib_mutex_t undo_mutex; /*!< mutex protecting the fields in this
section (down to undo_no_arr), EXCEPT
last_sql_stat_start, which can be
accessed only when we know that there
@@ -929,12 +970,24 @@ struct trx_struct{
ulint will_lock; /*!< Will acquire some locks. Increment
each time we determine that a lock will
be acquired by the MySQL layer. */
+ bool ddl; /*!< true if it is a transaction that
+ is being started for a DDL operation */
/*------------------------------*/
- fts_trx_t* fts_trx; /* FTS information, or NULL if
+ fts_trx_t* fts_trx; /*!< FTS information, or NULL if
transaction hasn't modified tables
with FTS indexes (yet). */
doc_id_t fts_next_doc_id;/* The document id used for updates */
/*------------------------------*/
+ ulint flush_tables; /*!< if "covering" the FLUSH TABLES",
+ count of tables being flushed. */
+
+ /*------------------------------*/
+#ifdef UNIV_DEBUG
+ ulint start_line; /*!< Track where it was started from */
+ const char* start_file; /*!< Filename where it was started */
+#endif /* UNIV_DEBUG */
+
+ /*------------------------------*/
char detailed_error[256]; /*!< detailed error message for last
error, or empty. */
};
@@ -1003,7 +1056,7 @@ enum commit_node_state {
};
/** Commit command node in a query graph */
-struct commit_node_struct{
+struct commit_node_t{
que_common_t common; /*!< node type: QUE_NODE_COMMIT */
enum commit_node_state
state; /*!< node execution state */
diff --git a/storage/innobase/include/trx0trx.ic b/storage/innobase/include/trx0trx.ic
index ceeb121ab70..69ee17ea98b 100644
--- a/storage/innobase/include/trx0trx.ic
+++ b/storage/innobase/include/trx0trx.ic
@@ -44,7 +44,7 @@ trx_state_eq(
#ifdef UNIV_DEBUG
switch (trx->state) {
case TRX_STATE_PREPARED:
- assert_trx_in_rw_list(trx);
+ ut_ad(!trx_is_autocommit_non_locking(trx));
return(trx->state == state);
case TRX_STATE_ACTIVE:
@@ -108,12 +108,12 @@ trx_get_que_state_str(
Determine if a transaction is a dictionary operation.
@return dictionary operation mode */
UNIV_INLINE
-enum trx_dict_op
+enum trx_dict_op_t
trx_get_dict_operation(
/*===================*/
const trx_t* trx) /*!< in: transaction */
{
- enum trx_dict_op op = (enum trx_dict_op) trx->dict_operation;
+ trx_dict_op_t op = static_cast<trx_dict_op_t>(trx->dict_operation);
#ifdef UNIV_DEBUG
switch (op) {
@@ -124,7 +124,7 @@ trx_get_dict_operation(
}
ut_error;
#endif /* UNIV_DEBUG */
- return((enum trx_dict_op) op);
+ return(op);
}
/**********************************************************************//**
Flag a transaction a dictionary operation. */
@@ -133,11 +133,11 @@ void
trx_set_dict_operation(
/*===================*/
trx_t* trx, /*!< in/out: transaction */
- enum trx_dict_op op) /*!< in: operation, not
+ enum trx_dict_op_t op) /*!< in: operation, not
TRX_DICT_OP_NONE */
{
#ifdef UNIV_DEBUG
- enum trx_dict_op old_op = trx_get_dict_operation(trx);
+ enum trx_dict_op_t old_op = trx_get_dict_operation(trx);
switch (op) {
case TRX_DICT_OP_NONE:
@@ -159,6 +159,7 @@ trx_set_dict_operation(
ok:
#endif /* UNIV_DEBUG */
+ trx->ddl = true;
trx->dict_operation = op;
}
diff --git a/storage/innobase/include/trx0types.h b/storage/innobase/include/trx0types.h
index 650d5878e64..4f515cb5248 100644
--- a/storage/innobase/include/trx0types.h
+++ b/storage/innobase/include/trx0types.h
@@ -36,7 +36,7 @@ the terminating NUL character. */
#define TRX_ID_MAX_LEN 17
/** Transaction execution states when trx->state == TRX_STATE_ACTIVE */
-enum trx_que_enum {
+enum trx_que_t {
TRX_QUE_RUNNING, /*!< transaction is running */
TRX_QUE_LOCK_WAIT, /*!< transaction is waiting for
a lock */
@@ -45,43 +45,54 @@ enum trx_que_enum {
};
/** Transaction states (trx_t::state) */
-enum trx_state_enum {
+enum trx_state_t {
TRX_STATE_NOT_STARTED,
TRX_STATE_ACTIVE,
TRX_STATE_PREPARED, /* Support for 2PC/XA */
TRX_STATE_COMMITTED_IN_MEMORY
};
+/** Type of data dictionary operation */
+enum trx_dict_op_t {
+ /** The transaction is not modifying the data dictionary. */
+ TRX_DICT_OP_NONE = 0,
+ /** The transaction is creating a table or an index, or
+ dropping a table. The table must be dropped in crash
+ recovery. This and TRX_DICT_OP_NONE are the only possible
+ operation modes in crash recovery. */
+ TRX_DICT_OP_TABLE = 1,
+ /** The transaction is creating or dropping an index in an
+ existing table. In crash recovery, the data dictionary
+ must be locked, but the table must not be dropped. */
+ TRX_DICT_OP_INDEX = 2
+};
+
/** Memory objects */
/* @{ */
/** Transaction */
-typedef struct trx_struct trx_t;
+struct trx_t;
/** The locks and state of an active transaction */
-typedef struct trx_lock_struct trx_lock_t;
+struct trx_lock_t;
/** Transaction system */
-typedef struct trx_sys_struct trx_sys_t;
+struct trx_sys_t;
/** Signal */
-typedef struct trx_sig_struct trx_sig_t;
+struct trx_sig_t;
/** Rollback segment */
-typedef struct trx_rseg_struct trx_rseg_t;
+struct trx_rseg_t;
/** Transaction undo log */
-typedef struct trx_undo_struct trx_undo_t;
+struct trx_undo_t;
/** Array of undo numbers of undo records being rolled back or purged */
-typedef struct trx_undo_arr_struct trx_undo_arr_t;
+struct trx_undo_arr_t;
/** A cell of trx_undo_arr_t */
-typedef struct trx_undo_inf_struct trx_undo_inf_t;
+struct trx_undo_inf_t;
/** The control structure used in the purge operation */
-typedef struct trx_purge_struct trx_purge_t;
+struct trx_purge_t;
/** Rollback command node in a query graph */
-typedef struct roll_node_struct roll_node_t;
+struct roll_node_t;
/** Commit command node in a query graph */
-typedef struct commit_node_struct commit_node_t;
+struct commit_node_t;
/** SAVEPOINT command node in a query graph */
-typedef struct trx_named_savept_struct trx_named_savept_t;
-/** Transaction concurrency state */
-typedef enum trx_state_enum trx_state_t;
-/** Transaction query thread state */
-typedef enum trx_que_enum trx_que_t;
+struct trx_named_savept_t;
/* @} */
/** Rollback contexts */
@@ -109,9 +120,7 @@ typedef ib_id_t roll_ptr_t;
typedef ib_id_t undo_no_t;
/** Transaction savepoint */
-typedef struct trx_savept_struct trx_savept_t;
-/** Transaction savepoint */
-struct trx_savept_struct{
+struct trx_savept_t{
undo_no_t least_undo_no; /*!< least undo number to undo */
};
diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h
index ed2ce66bbb6..4021d71c68a 100644
--- a/storage/innobase/include/trx0undo.h
+++ b/storage/innobase/include/trx0undo.h
@@ -65,6 +65,15 @@ ibool
trx_undo_roll_ptr_is_insert(
/*========================*/
roll_ptr_t roll_ptr); /*!< in: roll pointer */
+/***********************************************************************//**
+Returns true if the record is of the insert type.
+@return true if the record was freshly inserted (not updated). */
+UNIV_INLINE
+bool
+trx_undo_trx_id_is_insert(
+/*======================*/
+ const byte* trx_id) /*!< in: DB_TRX_ID, followed by DB_ROLL_PTR */
+ __attribute__((nonnull, pure, warn_unused_result));
#endif /* !UNIV_HOTBACKUP */
/*****************************************************************//**
Writes a roll ptr to an index page. In case that the size changes in
@@ -285,11 +294,12 @@ undo log reused.
are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE DB_READ_ONLY
DB_OUT_OF_MEMORY */
UNIV_INTERN
-ulint
+dberr_t
trx_undo_assign_undo(
/*=================*/
trx_t* trx, /*!< in: transaction */
- ulint type); /*!< in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */
+ ulint type) /*!< in: TRX_UNDO_INSERT or TRX_UNDO_UPDATE */
+ __attribute__((nonnull, warn_unused_result));
/******************************************************************//**
Sets the state of the undo log segment at a transaction finish.
@return undo log segment header page, x-latched */
@@ -404,7 +414,7 @@ trx_undo_mem_free(
/** Transaction undo log memory object; this is protected by the undo_mutex
in the corresponding transaction object */
-struct trx_undo_struct{
+struct trx_undo_t{
/*-----------------------------*/
ulint id; /*!< undo log slot number within the
rollback segment */
diff --git a/storage/innobase/include/trx0undo.ic b/storage/innobase/include/trx0undo.ic
index 4b38e63297c..577759d6c3d 100644
--- a/storage/innobase/include/trx0undo.ic
+++ b/storage/innobase/include/trx0undo.ic
@@ -101,6 +101,21 @@ trx_undo_roll_ptr_is_insert(
ut_ad(roll_ptr < (1ULL << 56));
return((ibool) (roll_ptr >> 55));
}
+
+/***********************************************************************//**
+Returns true if the record is of the insert type.
+@return true if the record was freshly inserted (not updated). */
+UNIV_INLINE
+bool
+trx_undo_trx_id_is_insert(
+/*======================*/
+ const byte* trx_id) /*!< in: DB_TRX_ID, followed by DB_ROLL_PTR */
+{
+#if DATA_TRX_ID + 1 != DATA_ROLL_PTR
+# error
+#endif
+ return(static_cast<bool>(trx_id[DATA_TRX_ID_LEN] >> 7));
+}
#endif /* !UNIV_HOTBACKUP */
/*****************************************************************//**
diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i
index 422828e76f4..fbb62e8de01 100644
--- a/storage/innobase/include/univ.i
+++ b/storage/innobase/include/univ.i
@@ -42,8 +42,6 @@ Created 1/20/1994 Heikki Tuuri
#define _IB_TO_STR(s) #s
#define IB_TO_STR(s) _IB_TO_STR(s)
-#include <mysql_version.h>
-
#define INNODB_VERSION_MAJOR 1
#define INNODB_VERSION_MINOR 2
#define INNODB_VERSION_BUGFIX MYSQL_VERSION_PATCH
@@ -57,7 +55,10 @@ component, i.e. we show M.N.P as M.N */
#define INNODB_VERSION_SHORT \
(INNODB_VERSION_MAJOR << 8 | INNODB_VERSION_MINOR)
-#define INNODB_VERSION_STR MYSQL_SERVER_VERSION
+#define INNODB_VERSION_STR \
+ IB_TO_STR(INNODB_VERSION_MAJOR) "." \
+ IB_TO_STR(INNODB_VERSION_MINOR) "." \
+ IB_TO_STR(INNODB_VERSION_BUGFIX)
#define REFMAN "http://dev.mysql.com/doc/refman/" \
IB_TO_STR(MYSQL_VERSION_MAJOR) "." \
@@ -379,11 +380,16 @@ This number varies depending on UNIV_PAGE_SIZE. */
/** Maximum number of parallel threads in a parallelized operation */
#define UNIV_MAX_PARALLELISM 32
-/** The maximum length of a table name. This is the MySQL limit and is
-defined in mysql_com.h like NAME_CHAR_LEN*SYSTEM_CHARSET_MBMAXLEN, the
-number does not include a terminating '\0'. InnoDB probably can handle
-longer names internally */
-#define MAX_TABLE_NAME_LEN 192
+/** This is the "mbmaxlen" for my_charset_filename (defined in
+strings/ctype-utf8.c), which is used to encode File and Database names. */
+#define FILENAME_CHARSET_MAXNAMLEN 5
+
+/** The maximum length of an encode table name in bytes. The max
+table and database names are NAME_CHAR_LEN (64) characters. After the
+encoding, the max length would be NAME_CHAR_LEN (64) *
+FILENAME_CHARSET_MAXNAMLEN (5) = 320 bytes. The number does not include a
+terminating '\0'. InnoDB can handle longer names internally */
+#define MAX_TABLE_NAME_LEN 320
/** The maximum length of a database name. Like MAX_TABLE_NAME_LEN this is
the MySQL's NAME_LEN, see check_and_convert_db_name(). */
@@ -397,6 +403,16 @@ database name and table name. In addition, 14 bytes is added for:
#define MAX_FULL_NAME_LEN \
(MAX_TABLE_NAME_LEN + MAX_DATABASE_NAME_LEN + 14)
+/** The maximum length in bytes that a database name can occupy when stored in
+UTF8, including the terminating '\0', see dict_fs2utf8(). You must include
+mysql_com.h if you are to use this macro. */
+#define MAX_DB_UTF8_LEN (NAME_LEN + 1)
+
+/** The maximum length in bytes that a table name can occupy when stored in
+UTF8, including the terminating '\0', see dict_fs2utf8(). You must include
+mysql_com.h if you are to use this macro. */
+#define MAX_TABLE_UTF8_LEN (NAME_LEN + sizeof(srv_mysql50_table_name_prefix))
+
/*
UNIVERSAL TYPE DEFINITIONS
==========================
@@ -416,6 +432,7 @@ macro ULINTPF. */
# define UINT32PF "%I32u"
# define INT64PF "%I64d"
# define UINT64PF "%I64u"
+# define UINT64PFx "%016I64u"
typedef __int64 ib_int64_t;
typedef unsigned __int64 ib_uint64_t;
typedef unsigned __int32 ib_uint32_t;
@@ -424,6 +441,7 @@ typedef unsigned __int32 ib_uint32_t;
# define UINT32PF "%"PRIu32
# define INT64PF "%"PRId64
# define UINT64PF "%"PRIu64
+# define UINT64PFx "%016"PRIx64
typedef int64_t ib_int64_t;
typedef uint64_t ib_uint64_t;
typedef uint32_t ib_uint32_t;
@@ -488,6 +506,8 @@ headers may define 'bool' differently. Do not assume that 'bool' is a ulint! */
#endif
+#define UNIV_NOTHROW
+
/** The following number as the length of a logical field means that the field
has the SQL NULL as its value. NOTE that because we assume that the length
of a field is a 32-bit integer when we store it, for example, to an undo log
@@ -587,15 +607,23 @@ typedef void* os_thread_ret_t;
# define UNIV_MEM_ALLOC(addr, size) VALGRIND_MAKE_MEM_UNDEFINED(addr, size)
# define UNIV_MEM_DESC(addr, size) VALGRIND_CREATE_BLOCK(addr, size, #addr)
# define UNIV_MEM_UNDESC(b) VALGRIND_DISCARD(b)
-# define UNIV_MEM_ASSERT_RW(addr, size) do { \
+# define UNIV_MEM_ASSERT_RW_LOW(addr, size, should_abort) do { \
const void* _p = (const void*) (ulint) \
VALGRIND_CHECK_MEM_IS_DEFINED(addr, size); \
- if (UNIV_LIKELY_NULL(_p)) \
+ if (UNIV_LIKELY_NULL(_p)) { \
fprintf(stderr, "%s:%d: %p[%u] undefined at %ld\n", \
__FILE__, __LINE__, \
(const void*) (addr), (unsigned) (size), (long) \
(((const char*) _p) - ((const char*) (addr)))); \
- } while (0)
+ if (should_abort) { \
+ ut_error; \
+ } \
+ } \
+} while (0)
+# define UNIV_MEM_ASSERT_RW(addr, size) \
+ UNIV_MEM_ASSERT_RW_LOW(addr, size, false)
+# define UNIV_MEM_ASSERT_RW_ABORT(addr, size) \
+ UNIV_MEM_ASSERT_RW_LOW(addr, size, true)
# define UNIV_MEM_ASSERT_W(addr, size) do { \
const void* _p = (const void*) (ulint) \
VALGRIND_CHECK_MEM_IS_ADDRESSABLE(addr, size); \
@@ -612,7 +640,9 @@ typedef void* os_thread_ret_t;
# define UNIV_MEM_ALLOC(addr, size) do {} while(0)
# define UNIV_MEM_DESC(addr, size) do {} while(0)
# define UNIV_MEM_UNDESC(b) do {} while(0)
+# define UNIV_MEM_ASSERT_RW_LOW(addr, size, should_abort) do {} while(0)
# define UNIV_MEM_ASSERT_RW(addr, size) do {} while(0)
+# define UNIV_MEM_ASSERT_RW_ABORT(addr, size) do {} while(0)
# define UNIV_MEM_ASSERT_W(addr, size) do {} while(0)
#endif
#define UNIV_MEM_ASSERT_AND_FREE(addr, size) do { \
diff --git a/storage/innobase/include/usr0sess.h b/storage/innobase/include/usr0sess.h
index 4a0710c5060..b5c80b97b43 100644
--- a/storage/innobase/include/usr0sess.h
+++ b/storage/innobase/include/usr0sess.h
@@ -53,7 +53,7 @@ sess_close(
/* The session handle. This data structure is only used by purge and is
not really necessary. We should get rid of it. */
-struct sess_struct{
+struct sess_t{
ulint state; /*!< state of the session */
trx_t* trx; /*!< transaction object permanently
assigned for the session: the
diff --git a/storage/innobase/include/usr0types.h b/storage/innobase/include/usr0types.h
index 403ad0223a8..6ba937cacc8 100644
--- a/storage/innobase/include/usr0types.h
+++ b/storage/innobase/include/usr0types.h
@@ -26,6 +26,6 @@ Created 6/25/1996 Heikki Tuuri
#ifndef usr0types_h
#define usr0types_h
-typedef struct sess_struct sess_t;
+struct sess_t;
#endif
diff --git a/storage/innobase/include/ut0bh.h b/storage/innobase/include/ut0bh.h
index 4c029e256a9..84ea6dd915a 100644
--- a/storage/innobase/include/ut0bh.h
+++ b/storage/innobase/include/ut0bh.h
@@ -31,7 +31,7 @@ Created 2010-05-28 by Sunny Bains
/** Comparison function for objects in the binary heap. */
typedef int (*ib_bh_cmp_t)(const void* p1, const void* p2);
-typedef struct ib_bh_struct ib_bh_t;
+struct ib_bh_t;
/**********************************************************************//**
Get the number of elements in the binary heap.
@@ -138,7 +138,7 @@ ib_bh_pop(
ib_bh_t* ib_bh); /*!< in/out: instance */
/** Binary heap data structure */
-struct ib_bh_struct {
+struct ib_bh_t {
ulint max_elems; /*!< max elements allowed */
ulint n_elems; /*!< current size */
ulint sizeof_elem; /*!< sizeof element */
diff --git a/storage/innobase/include/ut0counter.h b/storage/innobase/include/ut0counter.h
new file mode 100644
index 00000000000..fe0f36dfff2
--- /dev/null
+++ b/storage/innobase/include/ut0counter.h
@@ -0,0 +1,203 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file include/ut0counter.h
+
+Counter utility class
+
+Created 2012/04/12 by Sunny Bains
+*******************************************************/
+
+#ifndef UT0COUNTER_H
+#define UT0COUNTER_H
+
+#include "univ.i"
+#include <string.h>
+#include "os0thread.h"
+
+/** CPU cache line size */
+#define CACHE_LINE_SIZE 64
+
+/** Default number of slots to use in ib_counter_t */
+#define IB_N_SLOTS 64
+
+/** Get the offset into the counter array. */
+template <typename Type, int N>
+struct generic_indexer_t {
+ /** Default constructor/destructor should be OK. */
+
+ /** @return offset within m_counter */
+ size_t offset(size_t index) const UNIV_NOTHROW {
+ return(((index % N) + 1) * (CACHE_LINE_SIZE / sizeof(Type)));
+ }
+};
+
+#ifdef HAVE_SCHED_GETCPU
+#include <utmpx.h>
+/** Use the cpu id to index into the counter array. If it fails then
+use the thread id. */
+template <typename Type, int N>
+struct get_sched_indexer_t : public generic_indexer_t<Type, N> {
+ /** Default constructor/destructor should be OK. */
+
+ /* @return result from sched_getcpu(), the thread id if it fails. */
+ size_t get_rnd_index() const UNIV_NOTHROW {
+
+ size_t cpu = sched_getcpu();
+ if (cpu == -1) {
+ cpu = (lint) os_thread_get_curr_id();
+ }
+
+ return(cpu);
+ }
+};
+#endif /* HAVE_SCHED_GETCPU */
+
+/** Use the thread id to index into the counter array. */
+template <typename Type, int N>
+struct thread_id_indexer_t : public generic_indexer_t<Type, N> {
+ /** Default constructor/destructor should are OK. */
+
+ /* @return a random number, currently we use the thread id. Where
+ thread id is represented as a pointer, it may not work as
+ effectively. */
+ size_t get_rnd_index() const UNIV_NOTHROW {
+ return((lint) os_thread_get_curr_id());
+ }
+};
+
+/** For counters wher N=1 */
+template <typename Type, int N=1>
+struct single_indexer_t {
+ /** Default constructor/destructor should are OK. */
+
+ /** @return offset within m_counter */
+ size_t offset(size_t index) const UNIV_NOTHROW {
+ ut_ad(N == 1);
+ return((CACHE_LINE_SIZE / sizeof(Type)));
+ }
+
+ /* @return 1 */
+ size_t get_rnd_index() const UNIV_NOTHROW {
+ ut_ad(N == 1);
+ return(1);
+ }
+};
+
+/** Class for using fuzzy counters. The counter is not protected by any
+mutex and the results are not guaranteed to be 100% accurate but close
+enough. Creates an array of counters and separates each element by the
+CACHE_LINE_SIZE bytes */
+template <
+ typename Type,
+ int N = IB_N_SLOTS,
+ template<typename, int> class Indexer = thread_id_indexer_t>
+class ib_counter_t {
+public:
+ ib_counter_t() { memset(m_counter, 0x0, sizeof(m_counter)); }
+
+ ~ib_counter_t()
+ {
+ ut_ad(validate());
+ }
+
+ bool validate() UNIV_NOTHROW {
+#ifdef UNIV_DEBUG
+ size_t n = (CACHE_LINE_SIZE / sizeof(Type));
+
+ /* Check that we aren't writing outside our defined bounds. */
+ for (size_t i = 0; i < UT_ARR_SIZE(m_counter); i += n) {
+ for (size_t j = 1; j < n - 1; ++j) {
+ ut_ad(m_counter[i + j] == 0);
+ }
+ }
+#endif /* UNIV_DEBUG */
+ return(true);
+ }
+
+ /** If you can't use a good index id. Increment by 1. */
+ void inc() UNIV_NOTHROW { add(1); }
+
+ /** If you can't use a good index id.
+ * @param n - is the amount to increment */
+ void add(Type n) UNIV_NOTHROW {
+ size_t i = m_policy.offset(m_policy.get_rnd_index());
+
+ ut_ad(i < UT_ARR_SIZE(m_counter));
+
+ m_counter[i] += n;
+ }
+
+ /** Use this if you can use a unique indentifier, saves a
+ call to get_rnd_index().
+ @param i - index into a slot
+ @param n - amount to increment */
+ void add(size_t index, Type n) UNIV_NOTHROW {
+ size_t i = m_policy.offset(index);
+
+ ut_ad(i < UT_ARR_SIZE(m_counter));
+
+ m_counter[i] += n;
+ }
+
+ /** If you can't use a good index id. Decrement by 1. */
+ void dec() UNIV_NOTHROW { sub(1); }
+
+ /** If you can't use a good index id.
+ * @param - n is the amount to decrement */
+ void sub(Type n) UNIV_NOTHROW {
+ size_t i = m_policy.offset(m_policy.get_rnd_index());
+
+ ut_ad(i < UT_ARR_SIZE(m_counter));
+
+ m_counter[i] -= n;
+ }
+
+ /** Use this if you can use a unique indentifier, saves a
+ call to get_rnd_index().
+ @param i - index into a slot
+ @param n - amount to decrement */
+ void sub(size_t index, Type n) UNIV_NOTHROW {
+ size_t i = m_policy.offset(index);
+
+ ut_ad(i < UT_ARR_SIZE(m_counter));
+
+ m_counter[i] -= n;
+ }
+
+ /* @return total value - not 100% accurate, since it is not atomic. */
+ operator Type() const UNIV_NOTHROW {
+ Type total = 0;
+
+ for (size_t i = 0; i < N; ++i) {
+ total += m_counter[m_policy.offset(i)];
+ }
+
+ return(total);
+ }
+
+private:
+ /** Indexer into the array */
+ Indexer<Type, N>m_policy;
+
+ /** Slot 0 is unused. */
+ Type m_counter[(N + 1) * (CACHE_LINE_SIZE / sizeof(Type))];
+};
+
+#endif /* UT0COUNTER_H */
diff --git a/storage/innobase/include/ut0crc32.h b/storage/innobase/include/ut0crc32.h
index 456648001aa..86217692764 100644
--- a/storage/innobase/include/ut0crc32.h
+++ b/storage/innobase/include/ut0crc32.h
@@ -45,4 +45,7 @@ or 0x1EDC6F41 without the high-order bit) */
typedef ib_uint32_t (*ib_ut_crc32_t)(const byte* ptr, ulint len);
extern ib_ut_crc32_t ut_crc32;
+
+extern bool ut_crc32_sse2_enabled;
+
#endif /* ut0crc32_h */
diff --git a/storage/innobase/include/ut0dbg.h b/storage/innobase/include/ut0dbg.h
index e9ad62fb81b..0f2da165da7 100644
--- a/storage/innobase/include/ut0dbg.h
+++ b/storage/innobase/include/ut0dbg.h
@@ -145,10 +145,10 @@ ut_dbg_stop_thread(
#include <sys/resource.h>
/** structure used for recording usage statistics */
-typedef struct speedo_struct {
+struct speedo_t {
struct rusage ru; /*!< getrusage() result */
struct timeval tv; /*!< gettimeofday() result */
-} speedo_t;
+};
/*******************************************************************//**
Resets a speedo (records the current time in it). */
diff --git a/storage/innobase/include/ut0list.h b/storage/innobase/include/ut0list.h
index 57d6bdc33a6..29fc8669ce4 100644
--- a/storage/innobase/include/ut0list.h
+++ b/storage/innobase/include/ut0list.h
@@ -48,9 +48,8 @@ automatically freeing the list node when the item's heap is freed.
#include "mem0mem.h"
-typedef struct ib_list_struct ib_list_t;
-typedef struct ib_list_node_struct ib_list_node_t;
-typedef struct ib_list_helper_struct ib_list_helper_t;
+struct ib_list_t;
+struct ib_list_node_t;
/****************************************************************//**
Create a new list using mem_alloc. Lists created with this function must be
@@ -152,7 +151,7 @@ ib_list_is_empty(
const ib_list_t* list); /* in: list */
/* List. */
-struct ib_list_struct {
+struct ib_list_t {
ib_list_node_t* first; /*!< first node */
ib_list_node_t* last; /*!< last node */
ibool is_heap_list; /*!< TRUE if this list was
@@ -160,7 +159,7 @@ struct ib_list_struct {
};
/* A list node. */
-struct ib_list_node_struct {
+struct ib_list_node_t {
ib_list_node_t* prev; /*!< previous node */
ib_list_node_t* next; /*!< next node */
void* data; /*!< user data */
@@ -169,7 +168,7 @@ struct ib_list_node_struct {
/* Quite often, the only additional piece of data you need is the per-item
memory heap, so we have this generic struct available to use in those
cases. */
-struct ib_list_helper_struct {
+struct ib_list_helper_t {
mem_heap_t* heap; /*!< memory heap */
void* data; /*!< user data */
};
diff --git a/storage/innobase/include/ut0lst.h b/storage/innobase/include/ut0lst.h
index 51c89f15a77..b53e7ade4c1 100644
--- a/storage/innobase/include/ut0lst.h
+++ b/storage/innobase/include/ut0lst.h
@@ -65,8 +65,7 @@ The name of the field in the node struct should be the name given
to the list.
@param TYPE the list node type name */
/* Example:
-typedef struct LRU_node_struct LRU_node_t;
-struct LRU_node_struct {
+struct LRU_node_t {
UT_LIST_NODE_T(LRU_node_t) LRU_list;
...
}
diff --git a/storage/innobase/include/ut0rbt.h b/storage/innobase/include/ut0rbt.h
index e8a4430e76b..e0593e99bde 100644
--- a/storage/innobase/include/ut0rbt.h
+++ b/storage/innobase/include/ut0rbt.h
@@ -44,25 +44,19 @@ Created 2007-03-20 Sunny Bains
#define FALSE 0
#endif
-/* Red black tree typedefs */
-typedef struct ib_rbt_struct ib_rbt_t;
-typedef struct ib_rbt_node_struct ib_rbt_node_t;
-/* FIXME: Iterator is a better name than _bound_ */
-typedef struct ib_rbt_bound_struct ib_rbt_bound_t;
+struct ib_rbt_node_t;
typedef void (*ib_rbt_print_node)(const ib_rbt_node_t* node);
typedef int (*ib_rbt_compare)(const void* p1, const void* p2);
typedef int (*ib_rbt_arg_compare)(const void*, const void* p1, const void* p2);
/** Red black tree color types */
-enum ib_rbt_color_enum {
+enum ib_rbt_color_t {
IB_RBT_RED,
IB_RBT_BLACK
};
-typedef enum ib_rbt_color_enum ib_rbt_color_t;
-
/** Red black tree node */
-struct ib_rbt_node_struct {
+struct ib_rbt_node_t {
ib_rbt_color_t color; /* color of this node */
ib_rbt_node_t* left; /* points left child */
@@ -73,7 +67,7 @@ struct ib_rbt_node_struct {
};
/** Red black tree instance.*/
-struct ib_rbt_struct {
+struct ib_rbt_t {
ib_rbt_node_t* nil; /* Black colored node that is
used as a sentinel. This is
pre-allocated too.*/
@@ -89,12 +83,12 @@ struct ib_rbt_struct {
compare_with_arg; /* Fn. to use for comparison
with argument */
ulint sizeof_value; /* Sizeof the item in bytes */
- const void* cmp_arg; /* Compare func argument */
+ void* cmp_arg; /* Compare func argument */
};
/** The result of searching for a key in the tree, this is useful for
a speedy lookup and insert if key doesn't exist.*/
-struct ib_rbt_bound_struct {
+struct ib_rbt_bound_t {
const ib_rbt_node_t*
last; /* Last node visited */
@@ -142,7 +136,7 @@ rbt_create_arg_cmp(
size_t sizeof_value, /*!< in: size in bytes */
ib_rbt_arg_compare
compare, /*!< in: comparator */
- const void* cmp_arg); /*!< in: compare fn arg */
+ void* cmp_arg); /*!< in: compare fn arg */
/**********************************************************************//**
Delete a node from the red black tree, identified by key */
UNIV_INTERN
diff --git a/storage/innobase/include/ut0ut.h b/storage/innobase/include/ut0ut.h
index 35b8a580e68..1260e0381bf 100644
--- a/storage/innobase/include/ut0ut.h
+++ b/storage/innobase/include/ut0ut.h
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -65,16 +65,16 @@ typedef time_t ib_time_t;
# elif defined(HAVE_FAKE_PAUSE_INSTRUCTION)
# define UT_RELAX_CPU() __asm__ __volatile__ ("rep; nop")
-# elif defined(HAVE_ATOMIC_BUILTINS)
-# define UT_RELAX_CPU() do { \
- volatile lint volatile_var; \
- os_compare_and_swap_lint(&volatile_var, 0, 1); \
- } while (0)
# elif defined(HAVE_WINDOWS_ATOMICS)
/* In the Win32 API, the x86 PAUSE instruction is executed by calling
the YieldProcessor macro defined in WinNT.h. It is a CPU architecture-
independent way by using YieldProcessor. */
# define UT_RELAX_CPU() YieldProcessor()
+# elif defined(HAVE_ATOMIC_BUILTINS)
+# define UT_RELAX_CPU() do { \
+ volatile lint volatile_var; \
+ os_compare_and_swap_lint(&volatile_var, 0, 1); \
+ } while (0)
# else
# define UT_RELAX_CPU() ((void)0) /* avoid warning for an empty statement */
# endif
@@ -345,7 +345,7 @@ ut_print_filename(
#ifndef UNIV_HOTBACKUP
/* Forward declaration of transaction handle */
-struct trx_struct;
+struct trx_t;
/**********************************************************************//**
Outputs a fixed-length string, quoted as an SQL identifier.
@@ -357,7 +357,7 @@ void
ut_print_name(
/*==========*/
FILE* f, /*!< in: output stream */
- struct trx_struct*trx, /*!< in: transaction */
+ const trx_t* trx, /*!< in: transaction */
ibool table_id,/*!< in: TRUE=print a table name,
FALSE=print other identifier */
const char* name); /*!< in: name to print */
@@ -372,13 +372,31 @@ void
ut_print_namel(
/*===========*/
FILE* f, /*!< in: output stream */
- struct trx_struct*trx, /*!< in: transaction (NULL=no quotes) */
+ const trx_t* trx, /*!< in: transaction (NULL=no quotes) */
ibool table_id,/*!< in: TRUE=print a table name,
FALSE=print other identifier */
const char* name, /*!< in: name to print */
ulint namelen);/*!< in: length of name */
/**********************************************************************//**
+Formats a table or index name, quoted as an SQL identifier. If the name
+contains a slash '/', the result will contain two identifiers separated by
+a period (.), as in SQL database_name.identifier.
+@return pointer to 'formatted' */
+UNIV_INTERN
+char*
+ut_format_name(
+/*===========*/
+ const char* name, /*!< in: table or index name, must be
+ '\0'-terminated */
+ ibool is_table, /*!< in: if TRUE then 'name' is a table
+ name */
+ char* formatted, /*!< out: formatted result, will be
+ '\0'-terminated */
+ ulint formatted_size);/*!< out: no more than this number of
+ bytes will be written to 'formatted' */
+
+/**********************************************************************//**
Catenate files. */
UNIV_INTERN
void
@@ -442,7 +460,7 @@ UNIV_INTERN
const char*
ut_strerr(
/*======*/
- enum db_err num); /*!< in: error number */
+ dberr_t num); /*!< in: error number */
/****************************************************************
Sort function for ulint arrays. */
diff --git a/storage/innobase/include/ut0vec.h b/storage/innobase/include/ut0vec.h
index f2a5aba8116..432fb348a09 100644
--- a/storage/innobase/include/ut0vec.h
+++ b/storage/innobase/include/ut0vec.h
@@ -29,8 +29,8 @@ Created 4/6/2006 Osku Salerma
#include "univ.i"
#include "mem0mem.h"
-typedef struct ib_alloc_struct ib_alloc_t;
-typedef struct ib_vector_struct ib_vector_t;
+struct ib_alloc_t;
+struct ib_vector_t;
typedef void* (*ib_mem_alloc_t)(
/* out: Pointer to allocated memory */
@@ -64,7 +64,7 @@ freeing it when done with the vector.
/********************************************************************
Create a new vector with the given initial size. */
-
+UNIV_INTERN
ib_vector_t*
ib_vector_create(
/*=============*/
@@ -124,7 +124,7 @@ ib_vector_size(
/********************************************************************
Increase the size of the vector. */
-
+UNIV_INTERN
void
ib_vector_resize(
/*=============*/
@@ -311,7 +311,7 @@ ib_ut_allocator_free(
ib_alloc_t* ib_ut_alloc); /* in: alloc instace to free */
/* Allocator used by ib_vector_t. */
-struct ib_alloc_struct {
+struct ib_alloc_t {
ib_mem_alloc_t mem_malloc; /* For allocating memory */
ib_mem_free_t mem_release; /* For freeing memory */
ib_mem_resize_t mem_resize; /* For resizing memory */
@@ -320,7 +320,7 @@ struct ib_alloc_struct {
};
/* See comment at beginning of file. */
-struct ib_vector_struct {
+struct ib_vector_t {
ib_alloc_t* allocator; /* Allocator, because one size
doesn't fit all */
void* data; /* data elements */
diff --git a/storage/innobase/include/ut0vec.ic b/storage/innobase/include/ut0vec.ic
index 1255caee2d9..f41a85e1d1d 100644
--- a/storage/innobase/include/ut0vec.ic
+++ b/storage/innobase/include/ut0vec.ic
@@ -346,9 +346,10 @@ ib_vector_remove(
ib_vector_t* vec, /*!< in: vector */
const void* elem) /*!< in: value to remove */
{
- void* current;
+ void* current = NULL;
void* next;
ulint i;
+ ulint old_used_count = vec->used;
for (i = 0; i < vec->used; i++) {
current = ib_vector_get(vec, i);
@@ -359,14 +360,14 @@ ib_vector_remove(
}
next = ib_vector_get(vec, i + 1);
- memcpy(current, next, vec->sizeof_value
- * (vec->used - i - 1));
+ memmove(current, next, vec->sizeof_value
+ * (vec->used - i - 1));
+ --vec->used;
+ break;
}
}
- --vec->used;
-
- return(current);
+ return((old_used_count != vec->used) ? current : NULL);
}
/********************************************************************
diff --git a/storage/innobase/include/ut0wqueue.h b/storage/innobase/include/ut0wqueue.h
index ed4e65e4dc6..33385ddf2d4 100644
--- a/storage/innobase/include/ut0wqueue.h
+++ b/storage/innobase/include/ut0wqueue.h
@@ -37,7 +37,7 @@ processing.
#include "os0sync.h"
#include "sync0types.h"
-typedef struct ib_wqueue_struct ib_wqueue_t;
+struct ib_wqueue_t;
/****************************************************************//**
Create a new work queue.
@@ -96,8 +96,8 @@ ib_wqueue_timedwait(
ib_time_t wait_in_usecs); /* in: wait time in micro seconds */
/* Work queue. */
-struct ib_wqueue_struct {
- mutex_t mutex; /*!< mutex protecting everything */
+struct ib_wqueue_t {
+ ib_mutex_t mutex; /*!< mutex protecting everything */
ib_list_t* items; /*!< work item list */
os_event_t event; /*!< event we use to signal additions to list */
};
diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc
index fff59852704..1152152cc77 100644
--- a/storage/innobase/lock/lock0lock.cc
+++ b/storage/innobase/lock/lock0lock.cc
@@ -37,15 +37,17 @@ Created 5/7/1996 Heikki Tuuri
#include "usr0sess.h"
#include "trx0purge.h"
#include "dict0mem.h"
+#include "dict0boot.h"
#include "trx0sys.h"
#include "pars0pars.h" /* pars_complete_graph_for_exec() */
#include "que0que.h" /* que_node_get_parent() */
#include "row0mysql.h" /* row_mysql_handle_errors() */
-#include "row0sel.h" /* sel_node_create(), sel_node_struct */
+#include "row0sel.h" /* sel_node_create(), sel_node_t */
#include "row0types.h" /* sel_node_t */
#include "srv0mon.h"
#include "ut0vec.h"
#include "btr0btr.h"
+#include "dict0boot.h"
/* Restricts the length of search we will do in the waits-for
graph of transactions */
@@ -345,10 +347,7 @@ static const byte lock_strength_matrix[5][5] = {
};
/** Deadlock check context. */
-typedef struct lock_deadlock_ctx_struct lock_deadlock_ctx_t;
-
-/** Deadlock check context. */
-struct lock_deadlock_ctx_struct {
+struct lock_deadlock_ctx_t {
const trx_t* start; /*!< Joining transaction that is
requesting a lock in an incompatible
mode */
@@ -366,10 +365,8 @@ struct lock_deadlock_ctx_struct {
was aborted */
};
-typedef struct lock_stack_struct lock_stack_t;
-
/** DFS visited node information used during deadlock checking. */
-struct lock_stack_struct {
+struct lock_stack_t {
const lock_t* lock; /*!< Current lock */
const lock_t* wait_lock; /*!< Waiting for lock */
unsigned heap_no:16; /*!< heap number if rec lock */
@@ -415,9 +412,10 @@ lock_rec_validate_page(
/* The lock system */
UNIV_INTERN lock_sys_t* lock_sys = NULL;
-/* We store info on the latest deadlock error to this buffer. InnoDB
+/** We store info on the latest deadlock error to this buffer. InnoDB
Monitor will then fetch it and print */
UNIV_INTERN ibool lock_deadlock_found = FALSE;
+/** Only created if !srv_read_only_mode */
static FILE* lock_latest_err_file;
/********************************************************************//**
@@ -502,7 +500,7 @@ lock_check_trx_id_sanity(
dict_index_t* index, /*!< in: index */
const ulint* offsets) /*!< in: rec_get_offsets(rec, index) */
{
- ibool is_ok;
+ bool is_ok;
trx_id_t max_trx_id;
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -520,10 +518,10 @@ lock_check_trx_id_sanity(
/*********************************************************************//**
Checks that a record is seen in a consistent read.
-@return TRUE if sees, or FALSE if an earlier version of the record
+@return true if sees, or false if an earlier version of the record
should be retrieved */
UNIV_INTERN
-ibool
+bool
lock_clust_rec_cons_read_sees(
/*==========================*/
const rec_t* rec, /*!< in: user record which should be read or
@@ -550,14 +548,14 @@ lock_clust_rec_cons_read_sees(
Checks that a non-clustered index record is seen in a consistent read.
NOTE that a non-clustered index page contains so little information on
-its modifications that also in the case FALSE, the present version of
+its modifications that also in the case false, the present version of
rec may be the right, but we must check this from the clustered index
record.
-@return TRUE if certainly sees, or FALSE if an earlier version of the
+@return true if certainly sees, or false if an earlier version of the
clustered index record might be needed */
UNIV_INTERN
-ulint
+bool
lock_sec_rec_cons_read_sees(
/*========================*/
const rec_t* rec, /*!< in: user record which
@@ -574,7 +572,7 @@ lock_sec_rec_cons_read_sees(
if (recv_recovery_is_on()) {
- return(FALSE);
+ return(false);
}
max_trx_id = page_get_max_trx_id(page_align(rec));
@@ -593,12 +591,6 @@ lock_sys_create(
{
ulint lock_sys_sz;
- srv_n_lock_wait_count = 0;
- srv_n_lock_wait_time = 0;
- srv_n_lock_max_wait_time = 0;
- srv_lock_timeout_active = FALSE;
- srv_n_lock_wait_current_count = 0;
-
lock_sys_sz = sizeof(*lock_sys)
+ OS_THREAD_MAX_N * sizeof(srv_slot_t);
@@ -618,12 +610,14 @@ lock_sys_create(
mutex_create(lock_sys_wait_mutex_key,
&lock_sys->wait_mutex, SYNC_LOCK_WAIT_SYS);
- lock_sys->rec_hash = hash_create(n_cells);
+ lock_sys->timeout_event = os_event_create();
- lock_latest_err_file = os_file_create_tmpfile();
- ut_a(lock_latest_err_file);
+ lock_sys->rec_hash = hash_create(n_cells);
- srv_timeout_event = os_event_create(NULL);
+ if (!srv_read_only_mode) {
+ lock_latest_err_file = os_file_create_tmpfile();
+ ut_a(lock_latest_err_file);
+ }
}
/*********************************************************************//**
@@ -862,7 +856,6 @@ lock_reset_lock_and_trx_wait(
ut_ad(lock_mutex_own());
/* Reset the back pointer in trx to this waiting lock request */
-
if (!(lock->type_mode & LOCK_CONV_BY_OTHER)) {
ut_ad(lock->trx->lock.wait_lock == lock);
lock->trx->lock.wait_lock = NULL;
@@ -1760,6 +1753,7 @@ lock_rec_create(
ut_ad(lock_mutex_own());
ut_ad(caller_owns_trx_mutex == trx_mutex_own(trx));
+ ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index));
/* Non-locking autocommit read-only transactions should not set
any locks. */
@@ -1842,7 +1836,7 @@ DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that
there was a deadlock, but another transaction was chosen as a victim,
and we got the lock immediately: no need to wait then */
static
-enum db_err
+dberr_t
lock_rec_enqueue_waiting(
/*=====================*/
ulint type_mode,/*!< in: lock mode this
@@ -1866,6 +1860,7 @@ lock_rec_enqueue_waiting(
trx_id_t victim_trx_id;
ut_ad(lock_mutex_own());
+ ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index));
trx = thr_get_trx(thr);
@@ -1994,6 +1989,7 @@ lock_rec_add_to_queue(
ut_ad(lock_mutex_own());
ut_ad(caller_owns_trx_mutex == trx_mutex_own(trx));
+ ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index));
#ifdef UNIV_DEBUG
switch (type_mode & LOCK_MODE_MASK) {
case LOCK_X:
@@ -2115,6 +2111,7 @@ lock_rec_lock_fast(
ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
|| mode - (LOCK_MODE_MASK & mode) == 0
|| mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
+ ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index));
lock = lock_rec_get_first_on_page(block);
@@ -2161,7 +2158,7 @@ lock, or in the case of a page supremum record, a gap type lock.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
static
-enum db_err
+dberr_t
lock_rec_lock_slow(
/*===============*/
ibool impl, /*!< in: if TRUE, no lock is set
@@ -2179,7 +2176,7 @@ lock_rec_lock_slow(
{
trx_t* trx;
lock_t* lock;
- enum db_err err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ut_ad(lock_mutex_own());
ut_ad((LOCK_MODE_MASK & mode) != LOCK_S
@@ -2191,6 +2188,7 @@ lock_rec_lock_slow(
ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
|| mode - (LOCK_MODE_MASK & mode) == 0
|| mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP);
+ ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index));
trx = thr_get_trx(thr);
@@ -2258,7 +2256,7 @@ of a page supremum record, a gap type lock.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
static
-enum db_err
+dberr_t
lock_rec_lock(
/*==========*/
ibool impl, /*!< in: if TRUE, no lock is set
@@ -2284,6 +2282,7 @@ lock_rec_lock(
ut_ad(mode - (LOCK_MODE_MASK & mode) == LOCK_GAP
|| mode - (LOCK_MODE_MASK & mode) == LOCK_REC_NOT_GAP
|| mode - (LOCK_MODE_MASK & mode) == 0);
+ ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index));
/* We try a simplified and faster subroutine for the most
common cases */
@@ -3483,11 +3482,13 @@ lock_deadlock_start_print()
/*=======================*/
{
ut_ad(lock_mutex_own());
+ ut_ad(!srv_read_only_mode);
rewind(lock_latest_err_file);
ut_print_timestamp(lock_latest_err_file);
if (srv_print_all_deadlocks) {
+ ut_print_timestamp(stderr);
fprintf(stderr, "InnoDB: transactions deadlock detected, "
"dumping detailed information.\n");
ut_print_timestamp(stderr);
@@ -3502,10 +3503,12 @@ lock_deadlock_fputs(
/*================*/
const char* msg) /*!< in: message to print */
{
- fputs(msg, lock_latest_err_file);
+ if (!srv_read_only_mode) {
+ fputs(msg, lock_latest_err_file);
- if (srv_print_all_deadlocks) {
- fputs(msg, stderr);
+ if (srv_print_all_deadlocks) {
+ fputs(msg, stderr);
+ }
}
}
@@ -3519,24 +3522,21 @@ lock_deadlock_trx_print(
ulint max_query_len) /*!< in: max query length to print,
or 0 to use the default max length */
{
- ulint n_lock_rec;
- ulint n_lock_struct;
- ulint heap_size;
-
ut_ad(lock_mutex_own());
+ ut_ad(!srv_read_only_mode);
- n_lock_rec = lock_number_of_rows_locked(&trx->lock);
- n_lock_struct = UT_LIST_GET_LEN(trx->lock.trx_locks);
- heap_size = mem_heap_get_size(trx->lock.lock_heap);
+ ulint n_rec_locks = lock_number_of_rows_locked(&trx->lock);
+ ulint n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
+ ulint heap_size = mem_heap_get_size(trx->lock.lock_heap);
mutex_enter(&trx_sys->mutex);
trx_print_low(lock_latest_err_file, trx, max_query_len,
- n_lock_rec, n_lock_struct, heap_size);
+ n_rec_locks, n_trx_locks, heap_size);
if (srv_print_all_deadlocks) {
trx_print_low(stderr, trx, max_query_len,
- n_lock_rec, n_lock_struct, heap_size);
+ n_rec_locks, n_trx_locks, heap_size);
}
mutex_exit(&trx_sys->mutex);
@@ -3551,6 +3551,7 @@ lock_deadlock_lock_print(
const lock_t* lock) /*!< in: record or table type lock */
{
ut_ad(lock_mutex_own());
+ ut_ad(!srv_read_only_mode);
if (lock_get_type_low(lock) == LOCK_REC) {
lock_rec_print(lock_latest_err_file, lock);
@@ -3673,6 +3674,7 @@ lock_deadlock_notify(
deadlock */
{
ut_ad(lock_mutex_own());
+ ut_ad(!srv_read_only_mode);
lock_deadlock_start_print();
@@ -3692,9 +3694,15 @@ lock_deadlock_notify(
lock_deadlock_lock_print(lock);
- lock_deadlock_fputs("*** (2) WAITING FOR THIS LOCK TO BE GRANTED:\n");
+ /* It is possible that the joining transaction was granted its
+ lock when we rolled back some other waiting transaction. */
- lock_deadlock_lock_print(ctx->start->lock.wait_lock);
+ if (ctx->start->lock.wait_lock != 0) {
+ lock_deadlock_fputs(
+ "*** (2) WAITING FOR THIS LOCK TO BE GRANTED:\n");
+
+ lock_deadlock_lock_print(ctx->start->lock.wait_lock);
+ }
#ifdef UNIV_DEBUG
if (lock_print_waits) {
@@ -3713,6 +3721,7 @@ lock_deadlock_select_victim(
const lock_deadlock_ctx_t* ctx) /*!< in: deadlock context */
{
ut_ad(lock_mutex_own());
+ ut_ad(ctx->start->lock.wait_lock != 0);
ut_ad(ctx->wait_lock->trx != ctx->start);
if (trx_weight_ge(ctx->wait_lock->trx, ctx->start)) {
@@ -3738,8 +3747,10 @@ lock_deadlock_check(
{
ut_ad(lock_mutex_own());
- /* If it is the joining transaction wait lock. */
- if (lock == ctx->start->lock.wait_lock) {
+ /* If it is the joining transaction wait lock or the joining
+ transaction was granted its lock due to deadlock detection. */
+ if (lock == ctx->start->lock.wait_lock
+ || ctx->start->lock.wait_lock == NULL) {
; /* Skip */
} else if (lock == ctx->wait_lock) {
@@ -3820,7 +3831,8 @@ lock_deadlock_push(
}
/********************************************************************//**
-Looks iteratively for a deadlock.
+Looks iteratively for a deadlock. Note: the joining transaction may
+have been granted its lock by the deadlock checks.
@return 0 if no deadlock else the victim transaction id.*/
static
trx_id_t
@@ -3855,7 +3867,9 @@ lock_deadlock_search(
/* Found a cycle. */
- lock_deadlock_notify(ctx, lock);
+ if (!srv_read_only_mode) {
+ lock_deadlock_notify(ctx, lock);
+ }
return(lock_deadlock_select_victim(ctx)->id);
@@ -3926,6 +3940,7 @@ lock_deadlock_joining_trx_print(
const lock_t* lock) /*!< in: lock trx wants */
{
ut_ad(lock_mutex_own());
+ ut_ad(!srv_read_only_mode);
/* If the lock search exceeds the max step
or the max depth, the current trx will be
@@ -4012,7 +4027,9 @@ lock_deadlock_check_and_resolve(
ut_a(trx == ctx.start);
ut_a(victim_trx_id == trx->id);
- lock_deadlock_joining_trx_print(trx, lock);
+ if (!srv_read_only_mode) {
+ lock_deadlock_joining_trx_print(trx, lock);
+ }
MONITOR_INC(MONITOR_DEADLOCK);
@@ -4248,7 +4265,7 @@ DB_SUCCESS; DB_SUCCESS means that there was a deadlock, but another
transaction was chosen as a victim, and we got the lock immediately:
no need to wait then */
static
-ulint
+dberr_t
lock_table_enqueue_waiting(
/*=======================*/
ulint mode, /*!< in: lock mode this transaction is
@@ -4378,7 +4395,7 @@ Locks the specified database table in the mode given. If the lock cannot
be granted immediately, the query thread is put to wait.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_table(
/*=======*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG bit is set,
@@ -4389,7 +4406,7 @@ lock_table(
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
- ulint err;
+ dberr_t err;
const lock_t* wait_for;
ut_ad(table && thr);
@@ -4615,11 +4632,38 @@ lock_release(
lock = UT_LIST_GET_LAST(trx->lock.trx_locks)) {
if (lock_get_type_low(lock) == LOCK_REC) {
- lock_rec_dequeue_from_page(lock);
+#ifdef UNIV_DEBUG
+ /* Check if the transcation locked a record
+ in a system table in X mode. It should have set
+ the dict_op code correctly if it did. */
+ if (lock->index->table->id < DICT_HDR_FIRST_ID
+ && lock_get_mode(lock) == LOCK_X) {
+
+ ut_ad(lock_get_mode(lock) != LOCK_IX);
+ ut_ad(trx->dict_operation != TRX_DICT_OP_NONE);
+ }
+#endif /* UNIV_DEBUG */
+
+ lock_rec_dequeue_from_page(lock);
} else {
+ dict_table_t* table;
+
+ table = lock->un_member.tab_lock.table;
+#ifdef UNIV_DEBUG
ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
+ /* Check if the transcation locked a system table
+ in IX mode. It should have set the dict_op code
+ correctly if it did. */
+ if (table->id < DICT_HDR_FIRST_ID
+ && (lock_get_mode(lock) == LOCK_X
+ || lock_get_mode(lock) == LOCK_IX)) {
+
+ ut_ad(trx->dict_operation != TRX_DICT_OP_NONE);
+ }
+#endif /* UNIV_DEBUG */
+
if (lock_get_mode(lock) != LOCK_IS
&& trx->undo_no != 0) {
@@ -4627,8 +4671,7 @@ lock_release(
block the use of the MySQL query cache for
all currently active transactions. */
- lock->un_member.tab_lock.table
- ->query_cache_inv_trx_id = max_trx_id;
+ table->query_cache_inv_trx_id = max_trx_id;
}
lock_table_dequeue(lock);
@@ -5104,7 +5147,9 @@ lock_print_info_summary(
"LATEST DETECTED DEADLOCK\n"
"------------------------\n", file);
- ut_copy_file(file, lock_latest_err_file);
+ if (!srv_read_only_mode) {
+ ut_copy_file(file, lock_latest_err_file);
+ }
}
fputs("------------\n"
@@ -5130,6 +5175,10 @@ lock_print_info_summary(
/* Should never be in this state while the system is running. */
ut_error;
+ case PURGE_STATE_DISABLED:
+ fprintf(file, "disabled");
+ break;
+
case PURGE_STATE_RUN:
fprintf(file, "running");
/* Check if it is waiting for more data to arrive. */
@@ -5463,6 +5512,8 @@ lock_rec_queue_validate(
ut_ad(rec_offs_validate(rec, index, offsets));
ut_ad(!page_rec_is_comp(rec) == !rec_offs_comp(offsets));
ut_ad(lock_mutex_own() == locked_lock_trx_sys);
+ ut_ad(!index || dict_index_is_clust(index)
+ || !dict_index_is_online_ddl(index));
heap_no = page_rec_get_heap_no(rec);
@@ -5739,20 +5790,26 @@ lock_rec_block_validate(
If the lock exists in lock_rec_validate_page() we assert
!block->page.file_page_was_freed. */
+ buf_block_t* block;
mtr_t mtr;
- mtr_start(&mtr);
+ /* Make sure that the tablespace is not deleted while we are
+ trying to access the page. */
+ if (!fil_inc_pending_ops(space)) {
+ mtr_start(&mtr);
+ block = buf_page_get_gen(
+ space, fil_space_get_zip_size(space),
+ page_no, RW_X_LATCH, NULL,
+ BUF_GET_POSSIBLY_FREED,
+ __FILE__, __LINE__, &mtr);
- buf_block_t* block = buf_page_get_gen(
- space, fil_space_get_zip_size(space),
- page_no, RW_X_LATCH, NULL,
- BUF_GET_POSSIBLY_FREED,
- __FILE__, __LINE__, &mtr);
+ buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
- buf_block_dbg_add_level(block, SYNC_NO_ORDER_CHECK);
+ ut_ad(lock_rec_validate_page(block));
+ mtr_commit(&mtr);
- ut_ad(lock_rec_validate_page(block));
- mtr_commit(&mtr);
+ fil_decr_pending_ops(space);
+ }
}
/*********************************************************************//**
@@ -5810,7 +5867,7 @@ the query thread to the lock wait state and inserts a waiting request
for a gap x-lock to the lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_rec_insert_check_and_lock(
/*===========================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG bit is
@@ -5828,10 +5885,13 @@ lock_rec_insert_check_and_lock(
const rec_t* next_rec;
trx_t* trx;
lock_t* lock;
- ulint err;
+ dberr_t err;
ulint next_rec_heap_no;
ut_ad(block->frame == page_align(rec));
+ ut_ad(!dict_index_is_online_ddl(index)
+ || dict_index_is_clust(index)
+ || (flags & BTR_CREATE_FLAG));
if (flags & BTR_NO_LOCKING_FLAG) {
@@ -5848,11 +5908,9 @@ lock_rec_insert_check_and_lock(
to hold trx->mutex here. */
/* When inserting a record into an index, the table must be at
- least IX-locked or we must be building an index, in which case
- the table must be at least S-locked. */
- ut_ad(lock_table_has(trx, index->table, LOCK_IX)
- || (*index->name == TEMP_INDEX_PREFIX
- && lock_table_has(trx, index->table, LOCK_S)));
+ least IX-locked. When we are building an index, we would pass
+ BTR_NO_LOCKING_FLAG and skip the locking altogether. */
+ ut_ad(lock_table_has(trx, index->table, LOCK_IX));
lock = lock_rec_get_first(block, next_rec_heap_no);
@@ -5916,6 +5974,9 @@ lock_rec_insert_check_and_lock(
page_update_max_trx_id(block,
buf_block_get_page_zip(block),
trx->id, mtr);
+ default:
+ /* We only care about the two return values. */
+ break;
}
#ifdef UNIV_DEBUG
@@ -5965,6 +6026,7 @@ lock_rec_convert_impl_to_expl(
this transaction. The transaction may have been
committed a long time ago. */
} else {
+ ut_ad(!dict_index_is_online_ddl(index));
trx_id = lock_sec_rec_some_has_impl(rec, index, offsets);
/* The transaction can be committed before the
trx_is_active(trx_id, NULL) check below, because we are not
@@ -6001,7 +6063,8 @@ lock_rec_convert_impl_to_expl(
(LOCK_X | LOCK_REC_NOT_GAP), block,
heap_no, impl_trx)) {
- type_mode |= (LOCK_WAIT | LOCK_CONV_BY_OTHER);
+ type_mode |= (LOCK_WAIT
+ | LOCK_CONV_BY_OTHER);
}
lock_rec_add_to_queue(
@@ -6022,7 +6085,7 @@ lock wait state and inserts a waiting request for a record x-lock to the
lock queue.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_clust_rec_modify_check_and_lock(
/*=================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -6034,7 +6097,7 @@ lock_clust_rec_modify_check_and_lock(
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
+ dberr_t err;
ulint heap_no;
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -6080,7 +6143,7 @@ Checks if locks of other transactions prevent an immediate modify (delete
mark or delete unmark) of a secondary index record.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_sec_rec_modify_check_and_lock(
/*===============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -6092,13 +6155,15 @@ lock_sec_rec_modify_check_and_lock(
clustered index record first: see the
comment below */
dict_index_t* index, /*!< in: secondary index */
- que_thr_t* thr, /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread
+ (can be NULL if BTR_NO_LOCKING_FLAG) */
mtr_t* mtr) /*!< in/out: mini-transaction */
{
- ulint err;
+ dberr_t err;
ulint heap_no;
ut_ad(!dict_index_is_clust(index));
+ ut_ad(!dict_index_is_online_ddl(index) || (flags & BTR_CREATE_FLAG));
ut_ad(block->frame == page_align(rec));
if (flags & BTR_NO_LOCKING_FLAG) {
@@ -6163,7 +6228,7 @@ secondary index record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-enum db_err
+dberr_t
lock_sec_rec_read_check_and_lock(
/*=============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -6184,10 +6249,11 @@ lock_sec_rec_read_check_and_lock(
LOCK_REC_NOT_GAP */
que_thr_t* thr) /*!< in: query thread */
{
- enum db_err err;
- ulint heap_no;
+ dberr_t err;
+ ulint heap_no;
ut_ad(!dict_index_is_clust(index));
+ ut_ad(!dict_index_is_online_ddl(index));
ut_ad(block->frame == page_align(rec));
ut_ad(page_rec_is_user_rec(rec) || page_rec_is_supremum(rec));
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -6240,7 +6306,7 @@ lock on the record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK,
or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-enum db_err
+dberr_t
lock_clust_rec_read_check_and_lock(
/*===============================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -6261,8 +6327,8 @@ lock_clust_rec_read_check_and_lock(
LOCK_REC_NOT_GAP */
que_thr_t* thr) /*!< in: query thread */
{
- enum db_err err;
- ulint heap_no;
+ dberr_t err;
+ ulint heap_no;
ut_ad(dict_index_is_clust(index));
ut_ad(block->frame == page_align(rec));
@@ -6290,7 +6356,8 @@ lock_clust_rec_read_check_and_lock(
ut_ad(mode != LOCK_S
|| lock_table_has(thr_get_trx(thr), index->table, LOCK_IS));
- err = lock_rec_lock(FALSE, mode | gap_mode, block, heap_no, index, thr);
+ err = lock_rec_lock(FALSE, mode | gap_mode,
+ block, heap_no, index, thr);
MONITOR_INC(MONITOR_NUM_RECLOCK_REQ);
@@ -6311,7 +6378,7 @@ lock_clust_rec_read_check_and_lock() that does not require the parameter
"offsets".
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */
UNIV_INTERN
-ulint
+dberr_t
lock_clust_rec_read_check_and_lock_alt(
/*===================================*/
ulint flags, /*!< in: if BTR_NO_LOCKING_FLAG
@@ -6334,7 +6401,7 @@ lock_clust_rec_read_check_and_lock_alt(
mem_heap_t* tmp_heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
- ulint err;
+ dberr_t err;
rec_offs_init(offsets_);
offsets = rec_get_offsets(rec, index, offsets,
@@ -6529,6 +6596,8 @@ lock_get_table(
{
switch (lock_get_type_low(lock)) {
case LOCK_REC:
+ ut_ad(dict_index_is_clust(lock->index)
+ || !dict_index_is_online_ddl(lock->index));
return(lock->index->table);
case LOCK_TABLE:
return(lock->un_member.tab_lock.table);
@@ -6581,6 +6650,8 @@ lock_rec_get_index(
const lock_t* lock) /*!< in: lock */
{
ut_a(lock_get_type_low(lock) == LOCK_REC);
+ ut_ad(dict_index_is_clust(lock->index)
+ || !dict_index_is_online_ddl(lock->index));
return(lock->index);
}
@@ -6596,6 +6667,8 @@ lock_rec_get_index_name(
const lock_t* lock) /*!< in: lock */
{
ut_a(lock_get_type_low(lock) == LOCK_REC);
+ ut_ad(dict_index_is_clust(lock->index)
+ || !dict_index_is_online_ddl(lock->index));
return(lock->index->name);
}
@@ -6717,10 +6790,14 @@ lock_trx_release_locks(
{
assert_trx_in_list(trx);
- if (UNIV_UNLIKELY(trx_state_eq(trx, TRX_STATE_PREPARED))) {
+ if (trx_state_eq(trx, TRX_STATE_PREPARED)) {
mutex_enter(&trx_sys->mutex);
ut_a(trx_sys->n_prepared_trx > 0);
trx_sys->n_prepared_trx--;
+ if (trx->is_recovered) {
+ ut_a(trx_sys->n_prepared_recovered_trx > 0);
+ trx_sys->n_prepared_recovered_trx--;
+ }
mutex_exit(&trx_sys->mutex);
} else {
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
@@ -6775,12 +6852,12 @@ was selected as a deadlock victim, or if it has to wait then cancel
the wait lock.
@return DB_DEADLOCK, DB_LOCK_WAIT or DB_SUCCESS */
UNIV_INTERN
-enum db_err
+dberr_t
lock_trx_handle_wait(
/*=================*/
trx_t* trx) /*!< in/out: trx lock state */
{
- enum db_err err;
+ dberr_t err;
lock_mutex_enter();
@@ -6861,6 +6938,8 @@ lock_table_locks_lookup(
ut_a(lock->trx == trx);
if (lock_get_type_low(lock) == LOCK_REC) {
+ ut_ad(!dict_index_is_online_ddl(lock->index)
+ || dict_index_is_clust(lock->index));
if (lock->index->table == table) {
return(lock);
}
@@ -6889,18 +6968,89 @@ lock_table_has_locks(
lock_mutex_enter();
+ has_locks = UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0;
+
#ifdef UNIV_DEBUG
- mutex_enter(&trx_sys->mutex);
+ if (!has_locks) {
+ mutex_enter(&trx_sys->mutex);
- ut_ad(lock_table_locks_lookup(table, &trx_sys->rw_trx_list) == NULL);
- ut_ad(lock_table_locks_lookup(table, &trx_sys->ro_trx_list) == NULL);
+ ut_ad(!lock_table_locks_lookup(table, &trx_sys->rw_trx_list));
+ ut_ad(!lock_table_locks_lookup(table, &trx_sys->ro_trx_list));
- mutex_exit(&trx_sys->mutex);
+ mutex_exit(&trx_sys->mutex);
+ }
#endif /* UNIV_DEBUG */
- has_locks = UT_LIST_GET_LEN(table->locks) > 0 || table->n_rec_locks > 0;
-
lock_mutex_exit();
return(has_locks);
}
+
+#ifdef UNIV_DEBUG
+/*******************************************************************//**
+Check if the transaction holds any locks on the sys tables
+or its records.
+@return the strongest lock found on any sys table or 0 for none */
+UNIV_INTERN
+const lock_t*
+lock_trx_has_sys_table_locks(
+/*=========================*/
+ const trx_t* trx) /*!< in: transaction to check */
+{
+ lint i;
+ const lock_t* strongest_lock = 0;
+ lock_mode strongest = LOCK_NONE;
+
+ lock_mutex_enter();
+
+ /* Find a valid mode. Note: ib_vector_size() can be 0. */
+ for (i = ib_vector_size(trx->lock.table_locks) - 1; i >= 0; --i) {
+ const lock_t* lock;
+
+ lock = *static_cast<const lock_t**>(
+ ib_vector_get(trx->lock.table_locks, i));
+
+ if (lock != NULL
+ && dict_is_sys_table(lock->un_member.tab_lock.table->id)) {
+
+ strongest = lock_get_mode(lock);
+ ut_ad(strongest != LOCK_NONE);
+ strongest_lock = lock;
+ break;
+ }
+ }
+
+ if (strongest == LOCK_NONE) {
+ lock_mutex_exit();
+ return(NULL);
+ }
+
+ for (/* No op */; i >= 0; --i) {
+ const lock_t* lock;
+
+ lock = *static_cast<const lock_t**>(
+ ib_vector_get(trx->lock.table_locks, i));
+
+ if (lock == NULL) {
+ continue;
+ }
+
+ ut_ad(trx == lock->trx);
+ ut_ad(lock_get_type_low(lock) & LOCK_TABLE);
+ ut_ad(lock->un_member.tab_lock.table != NULL);
+
+ lock_mode mode = lock_get_mode(lock);
+
+ if (dict_is_sys_table(lock->un_member.tab_lock.table->id)
+ && lock_mode_stronger_or_eq(mode, strongest)) {
+
+ strongest = mode;
+ strongest_lock = lock;
+ }
+ }
+
+ lock_mutex_exit();
+
+ return(strongest_lock);
+}
+#endif /* UNIV_DEBUG */
diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc
index 99059f19813..fc355d8bb6d 100644
--- a/storage/innobase/lock/lock0wait.cc
+++ b/storage/innobase/lock/lock0wait.cc
@@ -33,14 +33,6 @@ Created 25/5/2010 Sunny Bains
#include "ha_prototypes.h"
#include "lock0priv.h"
-UNIV_INTERN ibool srv_lock_timeout_active = FALSE;
-UNIV_INTERN ulint srv_n_lock_wait_count = 0;
-UNIV_INTERN ulint srv_n_lock_wait_current_count = 0;
-UNIV_INTERN ib_int64_t srv_n_lock_wait_time = 0;
-UNIV_INTERN ulint srv_n_lock_max_wait_time = 0;
-
-UNIV_INTERN os_event_t srv_timeout_event;
-
/*********************************************************************//**
Print the contents of the lock_sys_t::waiting_threads array. */
static
@@ -156,7 +148,7 @@ lock_wait_table_reserve_slot(
slot->thr->slot = slot;
if (slot->event == NULL) {
- slot->event = os_event_create(NULL);
+ slot->event = os_event_create();
ut_a(slot->event);
}
@@ -257,8 +249,8 @@ lock_wait_suspend_thread(
slot = lock_wait_table_reserve_slot(thr, lock_wait_timeout);
if (thr->lock_state == QUE_THR_LOCK_ROW) {
- srv_n_lock_wait_count++;
- srv_n_lock_wait_current_count++;
+ srv_stats.n_lock_wait_count.inc();
+ srv_stats.n_lock_wait_current_count.inc();
if (ut_usectime(&sec, &ms) == -1) {
start_time = -1;
@@ -269,7 +261,7 @@ lock_wait_suspend_thread(
/* Wake the lock timeout monitor thread, if it is suspended */
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
lock_wait_mutex_exit();
trx_mutex_exit(trx);
@@ -282,6 +274,8 @@ lock_wait_suspend_thread(
case RW_S_LATCH:
/* Release foreign key check latch */
row_mysql_unfreeze_data_dictionary(trx);
+
+ DEBUG_SYNC_C("lock_wait_release_s_latch_before_sleep");
break;
default:
/* There should never be a lock wait when the
@@ -341,14 +335,16 @@ lock_wait_suspend_thread(
diff_time = (ulint) (finish_time - start_time);
- srv_n_lock_wait_current_count--;
- srv_n_lock_wait_time = srv_n_lock_wait_time + diff_time;
+ srv_stats.n_lock_wait_current_count.dec();
+ srv_stats.n_lock_wait_time.add(diff_time);
- if (diff_time > srv_n_lock_max_wait_time &&
- /* only update the variable if we successfully
- retrieved the start and finish times. See Bug#36819. */
- start_time != -1 && finish_time != -1) {
- srv_n_lock_max_wait_time = diff_time;
+ /* Only update the variable if we successfully
+ retrieved the start and finish times. See Bug#36819. */
+ if (diff_time > lock_sys->n_lock_max_wait_time
+ && start_time != -1
+ && finish_time != -1) {
+
+ lock_sys->n_lock_max_wait_time = diff_time;
}
}
@@ -463,11 +459,15 @@ DECLARE_THREAD(lock_wait_timeout_thread)(
os_thread_create */
{
ib_int64_t sig_count = 0;
+ os_event_t event = lock_sys->timeout_event;
+
+ ut_ad(!srv_read_only_mode);
#ifdef UNIV_PFS_THREAD
pfs_register_thread(srv_lock_timeout_thread_key);
-#endif
- srv_lock_timeout_active = TRUE;
+#endif /* UNIV_PFS_THREAD */
+
+ lock_sys->timeout_thread_active = true;
do {
srv_slot_t* slot;
@@ -475,7 +475,8 @@ DECLARE_THREAD(lock_wait_timeout_thread)(
/* When someone is waiting for a lock, we wake up every second
and check if a timeout has passed for a lock wait */
- os_event_wait_time_low(srv_timeout_event, 1000000, sig_count);
+ os_event_wait_time_low(event, 1000000, sig_count);
+ sig_count = os_event_reset(event);
if (srv_shutdown_state >= SRV_SHUTDOWN_CLEANUP) {
break;
@@ -500,13 +501,13 @@ DECLARE_THREAD(lock_wait_timeout_thread)(
}
}
- sig_count = os_event_reset(srv_timeout_event);
+ sig_count = os_event_reset(event);
lock_wait_mutex_exit();
} while (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP);
- srv_lock_timeout_active = FALSE;
+ lock_sys->timeout_thread_active = false;
/* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */
diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc
index 5e4a9dcf515..b6909f4771a 100644
--- a/storage/innobase/log/log0log.cc
+++ b/storage/innobase/log/log0log.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
@@ -37,7 +37,6 @@ Created 12/9/1995 Heikki Tuuri
#endif
#ifndef UNIV_HOTBACKUP
-#include "ha_prototypes.h"
#include "mem0mem.h"
#include "buf0buf.h"
#include "buf0flu.h"
@@ -49,6 +48,7 @@ Created 12/9/1995 Heikki Tuuri
#include "srv0start.h"
#include "trx0sys.h"
#include "trx0trx.h"
+#include "ha_prototypes.h"
#include "srv0mon.h"
/*
@@ -223,7 +223,7 @@ loop:
log_buffer_flush_to_disk();
- srv_log_waits++;
+ srv_stats.log_waits.inc();
ut_ad(++count < 50);
@@ -328,7 +328,7 @@ part_loop:
goto part_loop;
}
- srv_log_write_requests++;
+ srv_stats.log_write_requests.inc();
}
/************************************************************//**
@@ -748,9 +748,6 @@ log_init(void)
log_sys->lsn = LOG_START_LSN;
- MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE,
- log_sys->lsn - log_sys->last_checkpoint_lsn);
-
ut_a(LOG_BUFFER_SIZE >= 16 * OS_FILE_LOG_BLOCK_SIZE);
ut_a(LOG_BUFFER_SIZE >= 4 * UNIV_PAGE_SIZE);
@@ -784,11 +781,11 @@ log_init(void)
log_sys->n_pending_writes = 0;
- log_sys->no_flush_event = os_event_create(NULL);
+ log_sys->no_flush_event = os_event_create();
os_event_set(log_sys->no_flush_event);
- log_sys->one_flushed_event = os_event_create(NULL);
+ log_sys->one_flushed_event = os_event_create();
os_event_set(log_sys->one_flushed_event);
@@ -796,7 +793,6 @@ log_init(void)
log_sys->next_checkpoint_no = 0;
log_sys->last_checkpoint_lsn = log_sys->lsn;
- MONITOR_SET(MONITOR_LSN_CHECKPOINT_AGE, 0);
log_sys->n_pending_checkpoint_writes = 0;
@@ -832,7 +828,7 @@ log_init(void)
/* memset(log_sys->archive_buf, '\0', LOG_ARCHIVE_BUF_SIZE); */
- log_sys->archiving_on = os_event_create(NULL);
+ log_sys->archiving_on = os_event_create();
#endif /* UNIV_LOG_ARCHIVE */
/*----------------------------*/
@@ -1163,7 +1159,7 @@ log_group_file_header_flush(
MONITOR_INC(MONITOR_LOG_IO);
- srv_os_log_pending_writes++;
+ srv_stats.os_log_pending_writes.inc();
fil_io(OS_FILE_WRITE | OS_FILE_LOG, TRUE, group->space_id, 0,
(ulint) (dest_offset / UNIV_PAGE_SIZE),
@@ -1171,7 +1167,7 @@ log_group_file_header_flush(
OS_FILE_LOG_BLOCK_SIZE,
buf, group);
- srv_os_log_pending_writes--;
+ srv_stats.os_log_pending_writes.dec();
}
}
@@ -1238,8 +1234,9 @@ loop:
log_group_file_header_flush(group, (ulint)
(next_offset / group->file_size),
start_lsn);
- srv_os_log_written += OS_FILE_LOG_BLOCK_SIZE;
- srv_log_writes++;
+ srv_stats.os_log_written.add(OS_FILE_LOG_BLOCK_SIZE);
+
+ srv_stats.log_writes.inc();
}
if ((next_offset % group->file_size) + len > group->file_size) {
@@ -1289,7 +1286,7 @@ loop:
MONITOR_INC(MONITOR_LOG_IO);
- srv_os_log_pending_writes++;
+ srv_stats.os_log_pending_writes.inc();
ut_a(next_offset / UNIV_PAGE_SIZE <= ULINT_MAX);
@@ -1298,10 +1295,10 @@ loop:
(ulint) (next_offset % UNIV_PAGE_SIZE), write_len, buf,
group);
- srv_os_log_pending_writes--;
+ srv_stats.os_log_pending_writes.dec();
- srv_os_log_written += write_len;
- srv_log_writes++;
+ srv_stats.os_log_written.add(write_len);
+ srv_stats.log_writes.inc();
}
if (write_len < len) {
@@ -1345,6 +1342,8 @@ log_write_up_to(
ib_uint64_t write_lsn;
ib_uint64_t flush_lsn;
+ ut_ad(!srv_read_only_mode);
+
if (recv_no_ibuf_operations) {
/* Recovery is running and no operations on the log files are
allowed yet (the variable name .._no_ibuf_.. is misleading) */
@@ -1560,6 +1559,7 @@ log_buffer_flush_to_disk(void)
{
lsn_t lsn;
+ ut_ad(!srv_read_only_mode);
mutex_enter(&(log_sys->mutex));
lsn = log_sys->lsn;
@@ -1626,15 +1626,16 @@ log_flush_margin(void)
Advances the smallest lsn for which there are unflushed dirty blocks in the
buffer pool. NOTE: this function may only be called if the calling thread owns
no synchronization objects!
-@return FALSE if there was a flush batch of the same type running,
+@return false if there was a flush batch of the same type running,
which means that we could not start this flush batch */
static
-ibool
+bool
log_preflush_pool_modified_pages(
/*=============================*/
lsn_t new_oldest) /*!< in: try to advance oldest_modified_lsn
at least to this lsn */
{
+ bool success;
ulint n_pages;
if (recv_recovery_on) {
@@ -1650,13 +1651,12 @@ log_preflush_pool_modified_pages(
recv_apply_hashed_log_recs(TRUE);
}
- n_pages = buf_flush_list(ULINT_MAX, new_oldest);
+ success = buf_flush_list(ULINT_MAX, new_oldest, &n_pages);
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
- if (n_pages == ULINT_UNDEFINED) {
-
- return(FALSE);
+ if (!success) {
+ MONITOR_INC(MONITOR_FLUSH_SYNC_WAITS);
}
MONITOR_INC_VALUE_CUMULATIVE(
@@ -1665,7 +1665,7 @@ log_preflush_pool_modified_pages(
MONITOR_FLUSH_SYNC_PAGES,
n_pages);
- return(TRUE);
+ return(success);
}
/******************************************************//**
@@ -1765,6 +1765,7 @@ log_group_checkpoint(
byte* buf;
ulint i;
+ ut_ad(!srv_read_only_mode);
ut_ad(mutex_own(&(log_sys->mutex)));
#if LOG_CHECKPOINT_SIZE > OS_FILE_LOG_BLOCK_SIZE
# error "LOG_CHECKPOINT_SIZE > OS_FILE_LOG_BLOCK_SIZE"
@@ -1952,12 +1953,13 @@ log_groups_write_checkpoint_info(void)
ut_ad(mutex_own(&(log_sys->mutex)));
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
-
- while (group) {
- log_group_checkpoint(group);
+ if (!srv_read_only_mode) {
+ for (group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ group;
+ group = UT_LIST_GET_NEXT(log_groups, group)) {
- group = UT_LIST_GET_NEXT(log_groups, group);
+ log_group_checkpoint(group);
+ }
}
}
@@ -1982,6 +1984,8 @@ log_checkpoint(
{
lsn_t oldest_lsn;
+ ut_ad(!srv_read_only_mode);
+
if (recv_recovery_is_on()) {
recv_apply_hashed_log_recs(TRUE);
}
@@ -2088,38 +2092,6 @@ log_make_checkpoint_at(
}
/****************************************************************//**
-Checks if an asynchronous flushing of dirty pages is required in the
-background. This function is only called from the page cleaner thread.
-@return lsn to which the flushing should happen or LSN_MAX
-if flushing is not required */
-UNIV_INTERN
-lsn_t
-log_async_flush_lsn(void)
-/*=====================*/
-{
- lsn_t age;
- lsn_t oldest_lsn;
- lsn_t new_lsn = LSN_MAX;
-
- mutex_enter(&log_sys->mutex);
-
- oldest_lsn = log_buf_pool_get_oldest_modification();
-
- ut_a(log_sys->lsn >= oldest_lsn);
- age = log_sys->lsn - oldest_lsn;
-
- if (age > log_sys->max_modified_age_async) {
- /* An asynchronous preflush is required */
- ut_a(log_sys->lsn >= log_sys->max_modified_age_async);
- new_lsn = log_sys->lsn - log_sys->max_modified_age_async;
- }
-
- mutex_exit(&log_sys->mutex);
-
- return(new_lsn);
-}
-
-/****************************************************************//**
Tries to establish a big enough margin of free space in the log groups, such
that a new log entry can be catenated without an immediate need for a
checkpoint. NOTE: this function may only be called if the calling thread
@@ -2136,7 +2108,7 @@ log_checkpoint_margin(void)
lsn_t oldest_lsn;
ibool checkpoint_sync;
ibool do_checkpoint;
- ibool success;
+ bool success;
loop:
checkpoint_sync = FALSE;
do_checkpoint = FALSE;
@@ -3131,10 +3103,8 @@ logs_empty_and_mark_files_at_shutdown(void)
const char* thread_name;
ibool server_busy;
- if (srv_print_verbose_log) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Starting shutdown...\n");
- }
+ ib_logf(IB_LOG_LEVEL_INFO, "Starting shutdown...");
+
/* Wait until the master thread and all other operations are idle: our
algorithm only works if the server is idle at shutdown */
@@ -3155,9 +3125,8 @@ loop:
threads check will be done later. */
if (srv_print_verbose_log && count > 600) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Waiting for %s to exit\n",
- thread_name);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for %s to exit", thread_name);
count = 0;
}
@@ -3174,9 +3143,8 @@ loop:
if (total_trx > 0) {
if (srv_print_verbose_log && count > 600) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Waiting for %lu "
- "active transactions to finish\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for %lu active transactions to finish",
(ulong) total_trx);
count = 0;
@@ -3221,9 +3189,9 @@ loop:
break;
}
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Waiting for %s "
- "to be suspended\n", thread_type);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for %s to be suspended",
+ thread_type);
count = 0;
}
@@ -3239,10 +3207,9 @@ loop:
++count;
os_thread_sleep(100000);
if (srv_print_verbose_log && count > 600) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Waiting for page_cleaner to "
- "finish flushing of buffer pool\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for page_cleaner to "
+ "finish flushing of buffer pool");
count = 0;
}
}
@@ -3257,10 +3224,9 @@ loop:
if (server_busy) {
if (srv_print_verbose_log && count > 600) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Pending checkpoint_writes: %lu\n"
- " InnoDB: Pending log flush writes: %lu\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Pending checkpoint_writes: %lu. "
+ "Pending log flush writes: %lu",
(ulong) log_sys->n_pending_checkpoint_writes,
(ulong) log_sys->n_pending_writes);
count = 0;
@@ -3272,9 +3238,8 @@ loop:
if (pending_io) {
if (srv_print_verbose_log && count > 600) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Waiting for %lu buffer page "
- "I/Os to complete\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for %lu buffer page I/Os to complete",
(ulong) pending_io);
count = 0;
}
@@ -3286,41 +3251,50 @@ loop:
log_archive_all();
#endif /* UNIV_LOG_ARCHIVE */
if (srv_fast_shutdown == 2) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: MySQL has requested a very fast shutdown"
- " without flushing "
- "the InnoDB buffer pool to data files."
- " At the next mysqld startup "
- "InnoDB will do a crash recovery!\n");
-
- /* In this fastest shutdown we do not flush the buffer pool:
- it is essentially a 'crash' of the InnoDB server. Make sure
- that the log is all flushed to disk, so that we can recover
- all committed transactions in a crash recovery. We must not
- write the lsn stamps to the data files, since at a startup
- InnoDB deduces from the stamps if the previous shutdown was
- clean. */
-
- log_buffer_flush_to_disk();
-
- /* Check that the background threads stay suspended */
- thread_name = srv_any_background_threads_are_active();
- if (thread_name != NULL) {
- fprintf(stderr,
- "InnoDB: Warning: background thread %s"
- " woke up during shutdown\n", thread_name);
- goto loop;
+ if (!srv_read_only_mode) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "MySQL has requested a very fast shutdown "
+ "without flushing the InnoDB buffer pool to "
+ "data files. At the next mysqld startup "
+ "InnoDB will do a crash recovery!");
+
+ /* In this fastest shutdown we do not flush the
+ buffer pool:
+
+ it is essentially a 'crash' of the InnoDB server.
+ Make sure that the log is all flushed to disk, so
+ that we can recover all committed transactions in
+ a crash recovery. We must not write the lsn stamps
+ to the data files, since at a startup InnoDB deduces
+ from the stamps if the previous shutdown was clean. */
+
+ log_buffer_flush_to_disk();
+
+ /* Check that the background threads stay suspended */
+ thread_name = srv_any_background_threads_are_active();
+
+ if (thread_name != NULL) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Background thread %s woke up "
+ "during shutdown", thread_name);
+ goto loop;
+ }
}
srv_shutdown_state = SRV_SHUTDOWN_LAST_PHASE;
+
fil_close_all_files();
+
thread_name = srv_any_background_threads_are_active();
+
ut_a(!thread_name);
+
return;
}
- log_make_checkpoint_at(LSN_MAX, TRUE);
+ if (!srv_read_only_mode) {
+ log_make_checkpoint_at(LSN_MAX, TRUE);
+ }
mutex_enter(&log_sys->mutex);
@@ -3356,15 +3330,17 @@ loop:
/* Check that the background threads stay suspended */
thread_name = srv_any_background_threads_are_active();
if (thread_name != NULL) {
- fprintf(stderr,
- "InnoDB: Warning: background thread %s"
- " woke up during shutdown\n", thread_name);
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Background thread %s woke up during shutdown",
+ thread_name);
goto loop;
}
- fil_flush_file_spaces(FIL_TABLESPACE);
- fil_flush_file_spaces(FIL_LOG);
+ if (!srv_read_only_mode) {
+ fil_flush_file_spaces(FIL_TABLESPACE);
+ fil_flush_file_spaces(FIL_LOG);
+ }
/* The call fil_write_flushed_lsn_to_data_files() will pass the buffer
pool: therefore it is essential that the buffer pool has been
@@ -3374,9 +3350,8 @@ loop:
if (!buf_all_freed()) {
if (srv_print_verbose_log && count > 600) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Waiting for dirty buffer "
- "pages to be flushed\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for dirty buffer pages to be flushed");
count = 0;
}
@@ -3386,31 +3361,38 @@ loop:
srv_shutdown_state = SRV_SHUTDOWN_LAST_PHASE;
/* Make some checks that the server really is quiet */
- ut_a(srv_get_active_thread_type() == SRV_NONE);
+ srv_thread_type type = srv_get_active_thread_type();
+ ut_a(type == SRV_NONE);
+
+ bool freed = buf_all_freed();
+ ut_a(freed);
- ut_a(buf_all_freed());
ut_a(lsn == log_sys->lsn);
if (lsn < srv_start_lsn) {
- fprintf(stderr,
- "InnoDB: Error: log sequence number"
- " at shutdown " LSN_PF "\n"
- "InnoDB: is lower than at startup " LSN_PF "!\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Log sequence number at shutdown " LSN_PF " "
+ "is lower than at startup " LSN_PF "!",
lsn, srv_start_lsn);
}
srv_shutdown_lsn = lsn;
- fil_write_flushed_lsn_to_data_files(lsn, arch_log_no);
+ if (!srv_read_only_mode) {
+ fil_write_flushed_lsn_to_data_files(lsn, arch_log_no);
- fil_flush_file_spaces(FIL_TABLESPACE);
+ fil_flush_file_spaces(FIL_TABLESPACE);
+ }
fil_close_all_files();
/* Make some checks that the server really is quiet */
- ut_a(srv_get_active_thread_type() == SRV_NONE);
+ type = srv_get_active_thread_type();
+ ut_a(type == SRV_NONE);
+
+ freed = buf_all_freed();
+ ut_a(freed);
- ut_a(buf_all_freed());
ut_a(lsn == log_sys->lsn);
}
@@ -3544,7 +3526,7 @@ log_refresh_stats(void)
log_sys->last_printout_time = time(NULL);
}
-/**********************************************************************
+/********************************************************//**
Closes a log group. */
static
void
@@ -3574,12 +3556,12 @@ log_group_close(
mem_free(group);
}
-/**********************************************************
-Shutdown the log system but do not release all the memory. */
+/********************************************************//**
+Closes all log groups. */
UNIV_INTERN
void
-log_shutdown(void)
-/*==============*/
+log_group_close_all(void)
+/*=====================*/
{
log_group_t* group;
@@ -3593,6 +3575,16 @@ log_shutdown(void)
log_group_close(prev_group);
}
+}
+
+/********************************************************//**
+Shutdown the log system but do not release all the memory. */
+UNIV_INTERN
+void
+log_shutdown(void)
+/*==============*/
+{
+ log_group_close_all();
mem_free(log_sys->buf_ptr);
log_sys->buf_ptr = NULL;
@@ -3610,7 +3602,7 @@ log_shutdown(void)
#ifdef UNIV_LOG_ARCHIVE
rw_lock_free(&log_sys->archive_lock);
- os_event_create(log_sys->archiving_on);
+ os_event_create();
#endif /* UNIV_LOG_ARCHIVE */
#ifdef UNIV_LOG_DEBUG
@@ -3620,7 +3612,7 @@ log_shutdown(void)
recv_sys_close();
}
-/**********************************************************
+/********************************************************//**
Free the log system data structures. */
UNIV_INTERN
void
diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc
index c2b9c152a44..8cefa9e4b70 100644
--- a/storage/innobase/log/log0recv.cc
+++ b/storage/innobase/log/log0recv.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -42,8 +43,6 @@ Created 9/20/1997 Heikki Tuuri
#include "trx0undo.h"
#include "trx0rec.h"
#include "fil0fil.h"
-#include "buf0dblwr.h"
-#include "srv0mon.h"
#ifndef UNIV_HOTBACKUP
# include "buf0rea.h"
# include "srv0srv.h"
@@ -158,6 +157,20 @@ UNIV_INTERN mysql_pfs_key_t trx_rollback_clean_thread_key;
UNIV_INTERN mysql_pfs_key_t recv_sys_mutex_key;
#endif /* UNIV_PFS_MUTEX */
+#ifndef UNIV_HOTBACKUP
+# ifdef UNIV_PFS_THREAD
+UNIV_INTERN mysql_pfs_key_t recv_writer_thread_key;
+# endif /* UNIV_PFS_THREAD */
+
+# ifdef UNIV_PFS_MUTEX
+UNIV_INTERN mysql_pfs_key_t recv_writer_mutex_key;
+# endif /* UNIV_PFS_MUTEX */
+
+/** Flag indicating if recv_writer thread is active. */
+UNIV_INTERN bool recv_writer_thread_active = false;
+UNIV_INTERN os_thread_t recv_writer_thread_handle = 0;
+#endif /* !UNIV_HOTBACKUP */
+
/* prototypes */
#ifndef UNIV_HOTBACKUP
@@ -186,6 +199,11 @@ recv_sys_create(void)
mutex_create(recv_sys_mutex_key, &recv_sys->mutex, SYNC_RECV);
+#ifndef UNIV_HOTBACKUP
+ mutex_create(recv_writer_mutex_key, &recv_sys->writer_mutex,
+ SYNC_LEVEL_VARYING);
+#endif /* !UNIV_HOTBACKUP */
+
recv_sys->heap = NULL;
recv_sys->addr_hash = NULL;
}
@@ -214,6 +232,11 @@ recv_sys_close(void)
mem_free(recv_sys->last_block_buf_start);
}
+#ifndef UNIV_HOTBACKUP
+ ut_ad(!recv_writer_thread_active);
+ mutex_free(&recv_sys->writer_mutex);
+#endif /* !UNIV_HOTBACKUP */
+
mutex_free(&recv_sys->mutex);
mem_free(recv_sys);
@@ -290,6 +313,58 @@ recv_sys_var_init(void)
recv_max_page_lsn = 0;
}
+
+/******************************************************************//**
+recv_writer thread tasked with flushing dirty pages from the buffer
+pools.
+@return a dummy parameter */
+extern "C" UNIV_INTERN
+os_thread_ret_t
+DECLARE_THREAD(recv_writer_thread)(
+/*===============================*/
+ void* arg __attribute__((unused)))
+ /*!< in: a dummy parameter required by
+ os_thread_create */
+{
+ ut_ad(!srv_read_only_mode);
+
+#ifdef UNIV_PFS_THREAD
+ pfs_register_thread(recv_writer_thread_key);
+#endif /* UNIV_PFS_THREAD */
+
+#ifdef UNIV_DEBUG_THREAD_CREATION
+ fprintf(stderr, "InnoDB: recv_writer thread running, id %lu\n",
+ os_thread_pf(os_thread_get_curr_id()));
+#endif /* UNIV_DEBUG_THREAD_CREATION */
+
+ recv_writer_thread_active = true;
+
+ while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
+
+ os_thread_sleep(100000);
+
+ mutex_enter(&recv_sys->writer_mutex);
+
+ if (!recv_recovery_on) {
+ mutex_exit(&recv_sys->writer_mutex);
+ break;
+ }
+
+ /* Flush pages from end of LRU if required */
+ buf_flush_LRU_tail();
+
+ mutex_exit(&recv_sys->writer_mutex);
+ }
+
+ recv_writer_thread_active = false;
+
+ /* We count the number of threads in os_thread_exit().
+ A created thread should always use that to exit and not
+ use return() to exit. */
+ os_thread_exit(NULL);
+
+ OS_THREAD_DUMMY_RETURN;
+}
#endif /* !UNIV_HOTBACKUP */
/************************************************************
@@ -310,9 +385,7 @@ recv_sys_init(
flush_list during recovery process.
As this initialization is done while holding the buffer pool
mutex we perform it before acquiring recv_sys->mutex. */
-#ifndef UNIV_HOTBACKUP
buf_flush_init_flush_rbt();
-#endif /* !UNIV_HOTBACKUP */
mutex_enter(&(recv_sys->mutex));
@@ -406,6 +479,7 @@ recv_sys_debug_free(void)
}
# endif /* UNIV_LOG_DEBUG */
+# ifdef UNIV_LOG_ARCHIVE
/********************************************************//**
Truncates possible corrupted or extra records from a log group. */
static
@@ -427,7 +501,6 @@ recv_truncate_group(
lsn_t finish_lsn1;
lsn_t finish_lsn2;
lsn_t finish_lsn;
- ulint i;
if (archived_lsn == LSN_MAX) {
/* Checkpoint was taken in the NOARCHIVELOG mode */
@@ -455,11 +528,7 @@ recv_truncate_group(
ut_a(RECV_SCAN_SIZE <= log_sys->buf_size);
- /* Write the log buffer full of zeros */
- for (i = 0; i < RECV_SCAN_SIZE; i++) {
-
- *(log_sys->buf + i) = '\0';
- }
+ memset(log_sys->buf, 0, RECV_SCAN_SIZE);
start_lsn = ut_uint64_align_down(recovered_lsn,
OS_FILE_LOG_BLOCK_SIZE);
@@ -499,11 +568,7 @@ recv_truncate_group(
return;
}
- /* Write the log buffer full of zeros */
- for (i = 0; i < RECV_SCAN_SIZE; i++) {
-
- *(log_sys->buf + i) = '\0';
- }
+ memset(log_sys->buf, 0, RECV_SCAN_SIZE);
start_lsn = end_lsn;
}
@@ -560,6 +625,7 @@ recv_copy_group(
start_lsn = end_lsn;
}
}
+# endif /* UNIV_LOG_ARCHIVE */
/********************************************************//**
Copies a log segment from the most up-to-date log group to the other log
@@ -570,10 +636,12 @@ static
void
recv_synchronize_groups(
/*====================*/
- log_group_t* up_to_date_group) /*!< in: the most up-to-date
+#ifdef UNIV_LOG_ARCHIVE
+ log_group_t* up_to_date_group /*!< in: the most up-to-date
log group */
+#endif
+ )
{
- log_group_t* group;
lsn_t start_lsn;
lsn_t end_lsn;
lsn_t recovered_lsn;
@@ -590,11 +658,17 @@ recv_synchronize_groups(
ut_a(start_lsn != end_lsn);
log_group_read_log_seg(LOG_RECOVER, recv_sys->last_block,
- up_to_date_group, start_lsn, end_lsn);
-
- group = UT_LIST_GET_FIRST(log_sys->log_groups);
+#ifdef UNIV_LOG_ARCHIVE
+ up_to_date_group,
+#else /* UNIV_LOG_ARCHIVE */
+ UT_LIST_GET_FIRST(log_sys->log_groups),
+#endif /* UNIV_LOG_ARCHIVE */
+ start_lsn, end_lsn);
- while (group) {
+ for (log_group_t* group = UT_LIST_GET_FIRST(log_sys->log_groups);
+ group;
+ group = UT_LIST_GET_NEXT(log_groups, group)) {
+#ifdef UNIV_LOG_ARCHIVE
if (group != up_to_date_group) {
/* Copy log data if needed */
@@ -602,13 +676,11 @@ recv_synchronize_groups(
recv_copy_group(group, up_to_date_group,
recovered_lsn);
}
-
+#endif /* UNIV_LOG_ARCHIVE */
/* Update the fields in the group struct to correspond to
recovered_lsn */
log_group_set_fields(group, recovered_lsn);
-
- group = UT_LIST_GET_NEXT(log_groups, group);
}
/* Copy the checkpoint info to the groups; remember that we have
@@ -661,8 +733,8 @@ recv_check_cp_is_consistent(
/********************************************************//**
Looks for the maximum consistent checkpoint from the log groups.
@return error code or DB_SUCCESS */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
recv_find_max_checkpoint(
/*=====================*/
log_group_t** max_group, /*!< out: max group */
@@ -1154,18 +1226,22 @@ recv_parse_or_apply_log_rec_body(
ptr, end_ptr, block, index, mtr);
}
break;
- case MLOG_PAGE_REORGANIZE: case MLOG_COMP_PAGE_REORGANIZE:
+ case MLOG_PAGE_REORGANIZE:
+ case MLOG_COMP_PAGE_REORGANIZE:
+ case MLOG_ZIP_PAGE_REORGANIZE:
ut_ad(!page || page_type == FIL_PAGE_INDEX);
if (NULL != (ptr = mlog_parse_index(
ptr, end_ptr,
- type == MLOG_COMP_PAGE_REORGANIZE,
+ type != MLOG_PAGE_REORGANIZE,
&index))) {
ut_a(!page
|| (ibool)!!page_is_comp(page)
== dict_table_is_comp(index->table));
- ptr = btr_parse_page_reorganize(ptr, end_ptr, index,
- block, mtr);
+ ptr = btr_parse_page_reorganize(
+ ptr, end_ptr, index,
+ type == MLOG_ZIP_PAGE_REORGANIZE,
+ block, mtr);
}
break;
case MLOG_PAGE_CREATE: case MLOG_COMP_PAGE_CREATE:
@@ -1263,6 +1339,16 @@ recv_parse_or_apply_log_rec_body(
ptr = page_zip_parse_compress(ptr, end_ptr,
page, page_zip);
break;
+ case MLOG_ZIP_PAGE_COMPRESS_NO_DATA:
+ if (NULL != (ptr = mlog_parse_index(
+ ptr, end_ptr, TRUE, &index))) {
+
+ ut_a(!page || ((ibool)!!page_is_comp(page)
+ == dict_table_is_comp(index->table)));
+ ptr = page_zip_parse_compress_no_data(
+ ptr, end_ptr, page, page_zip, index);
+ }
+ break;
default:
ptr = NULL;
recv_sys->found_corrupt_log = TRUE;
@@ -1747,7 +1833,6 @@ recv_apply_hashed_log_recs(
{
recv_addr_t* recv_addr;
ulint i;
- ulint n_pages;
ibool has_printed = FALSE;
mtr_t mtr;
loop:
@@ -1785,11 +1870,11 @@ loop:
if (recv_addr->state == RECV_NOT_PROCESSED) {
if (!has_printed) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Starting an"
- " apply batch of log records"
- " to the database...\n"
- "InnoDB: Progress in percents: ",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Starting an apply batch"
+ " of log records"
+ " to the database...");
+ fputs("InnoDB: Progress in percent: ",
stderr);
has_printed = TRUE;
}
@@ -1846,6 +1931,8 @@ loop:
}
if (!allow_ibuf) {
+ bool success;
+
/* Flush all the file pages to disk and invalidate them in
the buffer pool */
@@ -1853,13 +1940,24 @@ loop:
mutex_exit(&(recv_sys->mutex));
mutex_exit(&(log_sys->mutex));
- n_pages = buf_flush_list(ULINT_MAX, LSN_MAX);
- ut_a(n_pages != ULINT_UNDEFINED);
+ /* Stop the recv_writer thread from issuing any LRU
+ flush batches. */
+ mutex_enter(&recv_sys->writer_mutex);
+
+ /* Wait for any currently run batch to end. */
+ buf_flush_wait_LRU_batch_end();
+
+ success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL);
+
+ ut_a(success);
buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
buf_pool_invalidate();
+ /* Allow batches from recv_writer thread. */
+ mutex_exit(&recv_sys->writer_mutex);
+
mutex_enter(&(log_sys->mutex));
mutex_enter(&(recv_sys->mutex));
ut_d(recv_no_log_write = FALSE);
@@ -1899,9 +1997,10 @@ recv_apply_log_recs_for_backup(void)
block = back_block1;
- fputs("InnoDB: Starting an apply batch of log records"
- " to the database...\n"
- "InnoDB: Progress in percents: ", stderr);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Starting an apply batch of log records to the database...");
+
+ fputs("InnoDB: Progress in percent: ", stderr);
n_hash_cells = hash_get_n_cells(recv_sys->addr_hash);
@@ -2693,11 +2792,21 @@ recv_scan_log_recs(
if (recv_log_scan_is_startup_type
&& !recv_needed_recovery) {
- fprintf(stderr,
- "InnoDB: Log scan progressed"
- " past the checkpoint lsn " LSN_PF "\n",
- recv_sys->scanned_lsn);
- recv_init_crash_recovery();
+ if (!srv_read_only_mode) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Log scan progressed past the "
+ "checkpoint lsn " LSN_PF "",
+ recv_sys->scanned_lsn);
+
+ recv_init_crash_recovery();
+ } else {
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Recovery skipped, "
+ "--innodb-read-only set!");
+
+ return(TRUE);
+ }
}
#endif /* !UNIV_HOTBACKUP */
@@ -2845,20 +2954,15 @@ void
recv_init_crash_recovery(void)
/*==========================*/
{
+ ut_ad(!srv_read_only_mode);
ut_a(!recv_needed_recovery);
recv_needed_recovery = TRUE;
- ut_print_timestamp(stderr);
-
- fprintf(stderr,
- " InnoDB: Database was not"
- " shut down normally!\n"
- "InnoDB: Starting crash recovery.\n");
-
- fprintf(stderr,
- "InnoDB: Reading tablespace information"
- " from the .ibd files...\n");
+ ib_logf(IB_LOG_LEVEL_INFO, "Database was not shutdown normally!");
+ ib_logf(IB_LOG_LEVEL_INFO, "Starting crash recovery.");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Reading tablespace information from the .ibd files...");
fil_load_single_table_tablespaces();
@@ -2869,11 +2973,12 @@ recv_init_crash_recovery(void)
if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) {
- fprintf(stderr,
- "InnoDB: Restoring possible"
- " half-written data pages from"
- " the doublewrite\n"
- "InnoDB: buffer...\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Restoring possible half-written data pages ");
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "from the doublewrite buffer...");
+
buf_dblwr_init_or_restore_pages(TRUE);
}
}
@@ -2885,7 +2990,7 @@ recv_recovery_from_checkpoint_finish should be called later to complete
the recovery and free the resources used in it.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
recv_recovery_from_checkpoint_start_func(
/*=====================================*/
#ifdef UNIV_LOG_ARCHIVE
@@ -2897,19 +3002,18 @@ recv_recovery_from_checkpoint_start_func(
{
log_group_t* group;
log_group_t* max_cp_group;
- log_group_t* up_to_date_group;
ulint max_cp_field;
lsn_t checkpoint_lsn;
ib_uint64_t checkpoint_no;
- lsn_t old_scanned_lsn;
lsn_t group_scanned_lsn = 0;
lsn_t contiguous_lsn;
#ifdef UNIV_LOG_ARCHIVE
+ log_group_t* up_to_date_group;
lsn_t archived_lsn;
#endif /* UNIV_LOG_ARCHIVE */
byte* buf;
byte log_hdr_buf[LOG_FILE_HDR_SIZE];
- ulint err;
+ dberr_t err;
#ifdef UNIV_LOG_ARCHIVE
ut_ad(type != LOG_CHECKPOINT || limit_lsn == LSN_MAX);
@@ -2930,10 +3034,10 @@ recv_recovery_from_checkpoint_start_func(
}
if (srv_force_recovery >= SRV_FORCE_NO_LOG_REDO) {
- fprintf(stderr,
- "InnoDB: The user has set SRV_FORCE_NO_LOG_REDO on\n");
- fprintf(stderr,
- "InnoDB: Skipping log redo\n");
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "The user has set SRV_FORCE_NO_LOG_REDO on, "
+ "skipping log redo");
return(DB_SUCCESS);
}
@@ -2974,17 +3078,24 @@ recv_recovery_from_checkpoint_start_func(
if (0 == ut_memcmp(log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP,
(byte*)"ibbackup", (sizeof "ibbackup") - 1)) {
+
+ if (srv_read_only_mode) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot restore from ibbackup, InnoDB running "
+ "in read-only mode!");
+
+ return(DB_ERROR);
+ }
+
/* This log file was created by ibbackup --restore: print
a note to the user about it */
- fprintf(stderr,
- "InnoDB: The log file was created by"
- " ibbackup --apply-log at\n"
- "InnoDB: %s\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "The log file was created by ibbackup --apply-log "
+ "at %s. The following crash recovery is part of a "
+ "normal restore.",
log_hdr_buf + LOG_FILE_WAS_CREATED_BY_HOT_BACKUP);
- fprintf(stderr,
- "InnoDB: NOTE: the following crash recovery"
- " is part of a normal restore.\n");
/* Wipe over the label now */
@@ -3024,9 +3135,9 @@ recv_recovery_from_checkpoint_start_func(
contiguous_lsn = ut_uint64_align_down(recv_sys->scanned_lsn,
OS_FILE_LOG_BLOCK_SIZE);
+#ifdef UNIV_LOG_ARCHIVE
if (TYPE_CHECKPOINT) {
up_to_date_group = max_cp_group;
-#ifdef UNIV_LOG_ARCHIVE
} else {
ulint capacity;
@@ -3062,8 +3173,8 @@ recv_recovery_from_checkpoint_start_func(
group->scanned_lsn = group_scanned_lsn;
up_to_date_group = group;
-#endif /* UNIV_LOG_ARCHIVE */
}
+#endif /* UNIV_LOG_ARCHIVE */
ut_ad(RECV_SCAN_SIZE <= log_sys->buf_size);
@@ -3078,19 +3189,21 @@ recv_recovery_from_checkpoint_start_func(
/* Set the flag to publish that we are doing startup scan. */
recv_log_scan_is_startup_type = TYPE_CHECKPOINT;
while (group) {
- old_scanned_lsn = recv_sys->scanned_lsn;
+#ifdef UNIV_LOG_ARCHIVE
+ lsn_t old_scanned_lsn = recv_sys->scanned_lsn;
+#endif /* UNIV_LOG_ARCHIVE */
recv_group_scan_log_recs(group, &contiguous_lsn,
&group_scanned_lsn);
group->scanned_lsn = group_scanned_lsn;
+#ifdef UNIV_LOG_ARCHIVE
if (old_scanned_lsn < group_scanned_lsn) {
/* We found a more up-to-date group */
up_to_date_group = group;
}
-#ifdef UNIV_LOG_ARCHIVE
if ((type == LOG_ARCHIVE)
&& (group == recv_sys->archive_group)) {
group = UT_LIST_GET_NEXT(log_groups, group);
@@ -3111,70 +3224,73 @@ recv_recovery_from_checkpoint_start_func(
|| checkpoint_lsn != min_flushed_lsn) {
if (checkpoint_lsn < max_flushed_lsn) {
- fprintf(stderr,
- "InnoDB: #########################"
- "#################################\n"
- "InnoDB: "
- "WARNING!\n"
- "InnoDB: The log sequence number"
- " in ibdata files is higher\n"
- "InnoDB: than the log sequence number"
- " in the ib_logfiles! Are you sure\n"
- "InnoDB: you are using the right"
- " ib_logfiles to start up"
- " the database?\n"
- "InnoDB: Log sequence number in"
- " ib_logfiles is " LSN_PF ", log\n"
- "InnoDB: sequence numbers stamped"
- " to ibdata file headers are between\n"
- "InnoDB: " LSN_PF " and " LSN_PF ".\n"
- "InnoDB: #########################"
- "#################################\n",
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "The log sequence number "
+ "in the ibdata files is higher "
+ "than the log sequence number "
+ "in the ib_logfiles! Are you sure "
+ "you are using the right "
+ "ib_logfiles to start up the database. "
+ "Log sequence number in the "
+ "ib_logfiles is " LSN_PF ", log"
+ "sequence numbers stamped "
+ "to ibdata file headers are between "
+ "" LSN_PF " and " LSN_PF ".",
checkpoint_lsn,
min_flushed_lsn,
max_flushed_lsn);
}
if (!recv_needed_recovery) {
- fprintf(stderr,
- "InnoDB: The log sequence number"
- " in ibdata files does not match\n"
- "InnoDB: the log sequence number"
- " in the ib_logfiles!\n");
- recv_init_crash_recovery();
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "The log sequence numbers "
+ LSN_PF " and " LSN_PF
+ " in ibdata files do not match"
+ " the log sequence number "
+ LSN_PF
+ " in the ib_logfiles!",
+ min_flushed_lsn,
+ max_flushed_lsn,
+ checkpoint_lsn);
+
+ if (!srv_read_only_mode) {
+ recv_init_crash_recovery();
+ } else {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Can't initiate database "
+ "recovery, running "
+ "in read-only-mode.");
+ return(DB_READ_ONLY);
+ }
}
}
- if (!recv_needed_recovery) {
- /* Init the doublewrite buffer memory structure */
- buf_dblwr_init_or_restore_pages(FALSE);
+ if (!srv_read_only_mode) {
+ if (recv_needed_recovery) {
+ /* Spawn the background thread to
+ flush dirty pages from the buffer
+ pools. */
+ recv_writer_thread_handle =
+ os_thread_create(
+ recv_writer_thread, 0, 0);
+ } else {
+ /* Init the doublewrite buffer memory
+ structure */
+ buf_dblwr_init_or_restore_pages(FALSE);
+ }
}
}
/* We currently have only one log group */
- if (group_scanned_lsn < checkpoint_lsn) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: ERROR: We were only able to scan the log"
- " up to\n"
- "InnoDB: " LSN_PF ", but a checkpoint was at "
- LSN_PF ".\n"
- "InnoDB: It is possible that"
- " the database is now corrupt!\n",
- group_scanned_lsn,
- checkpoint_lsn);
- }
-
- if (group_scanned_lsn < recv_max_page_lsn) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: ERROR: We were only able to scan the log"
- " up to " LSN_PF "\n"
- "InnoDB: but a database page a had an lsn " LSN_PF "."
- " It is possible that the\n"
- "InnoDB: database is now corrupt!\n",
- group_scanned_lsn,
- recv_max_page_lsn);
+ if (group_scanned_lsn < checkpoint_lsn
+ || group_scanned_lsn < recv_max_page_lsn) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "We scanned the log up to "
+ LSN_PF ". A checkpoint was at " LSN_PF
+ " and the maximum LSN on a database page was " LSN_PF
+ ". It is possible that the database is now corrupt!",
+ group_scanned_lsn, checkpoint_lsn, recv_max_page_lsn);
}
if (recv_sys->recovered_lsn < checkpoint_lsn) {
@@ -3186,7 +3302,10 @@ recv_recovery_from_checkpoint_start_func(
return(DB_SUCCESS);
}
- ut_error;
+ /* No harm in trying to do RO access. */
+ if (!srv_read_only_mode) {
+ ut_error;
+ }
return(DB_ERROR);
}
@@ -3199,9 +3318,11 @@ recv_recovery_from_checkpoint_start_func(
#ifdef UNIV_LOG_ARCHIVE
log_sys->archived_lsn = archived_lsn;
-#endif /* UNIV_LOG_ARCHIVE */
recv_synchronize_groups(up_to_date_group);
+#else /* UNIV_LOG_ARCHIVE */
+ recv_synchronize_groups();
+#endif /* UNIV_LOG_ARCHIVE */
if (!recv_needed_recovery) {
ut_a(checkpoint_lsn == recv_sys->recovered_lsn);
@@ -3232,13 +3353,13 @@ recv_recovery_from_checkpoint_start_func(
}
#endif /* UNIV_LOG_ARCHIVE */
- mutex_enter(&(recv_sys->mutex));
+ mutex_enter(&recv_sys->mutex);
recv_sys->apply_log_recs = TRUE;
- mutex_exit(&(recv_sys->mutex));
+ mutex_exit(&recv_sys->mutex);
- mutex_exit(&(log_sys->mutex));
+ mutex_exit(&log_sys->mutex);
recv_lsn_checks_on = TRUE;
@@ -3294,10 +3415,40 @@ recv_recovery_from_checkpoint_finish(void)
"InnoDB: a backup!\n");
}
- /* Free the resources of the recovery system */
+ /* Make sure that the recv_writer thread is done. This is
+ required because it grabs various mutexes and we want to
+ ensure that when we enable sync_order_checks there is no
+ mutex currently held by any thread. */
+ mutex_enter(&recv_sys->writer_mutex);
+ /* Free the resources of the recovery system */
recv_recovery_on = FALSE;
+ /* By acquring the mutex we ensure that the recv_writer thread
+ won't trigger any more LRU batchtes. Now wait for currently
+ in progress batches to finish. */
+ buf_flush_wait_LRU_batch_end();
+
+ mutex_exit(&recv_sys->writer_mutex);
+
+ ulint count = 0;
+ while (recv_writer_thread_active) {
+ ++count;
+ os_thread_sleep(100000);
+ if (srv_print_verbose_log && count > 600) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for recv_writer to "
+ "finish flushing of buffer pool");
+ count = 0;
+ }
+ }
+
+#ifdef __WIN__
+ if (recv_writer_thread_handle) {
+ CloseHandle(recv_writer_thread_handle);
+ }
+#endif /* __WIN__ */
+
#ifndef UNIV_LOG_DEBUG
recv_sys_debug_free();
#endif
@@ -3317,20 +3468,22 @@ void
recv_recovery_rollback_active(void)
/*===============================*/
{
- int i;
-
#ifdef UNIV_SYNC_DEBUG
/* Wait for a while so that created threads have time to suspend
themselves before we switch the latching order checks on */
os_thread_sleep(1000000);
+ ut_ad(!recv_writer_thread_active);
+
/* Switch latching order checks on in sync0sync.cc */
sync_order_checks_on = TRUE;
#endif
/* We can't start any (DDL) transactions if UNDO logging
has been disabled, additionally disable ROLLBACK of recovered
user transactions. */
- if (srv_force_recovery < SRV_FORCE_NO_TRX_UNDO) {
+ if (srv_force_recovery < SRV_FORCE_NO_TRX_UNDO
+ && !srv_read_only_mode) {
+
/* Drop partially created indexes. */
row_merge_drop_temp_indexes();
/* Drop temporary tables. */
@@ -3345,7 +3498,7 @@ recv_recovery_rollback_active(void)
/* Rollback the uncommitted transactions which have no user
session */
- os_thread_create(trx_rollback_or_clean_all_recovered, &i, NULL);
+ os_thread_create(trx_rollback_or_clean_all_recovered, 0, 0);
}
}
@@ -3355,18 +3508,18 @@ UNIV_INTERN
void
recv_reset_logs(
/*============*/
- lsn_t lsn, /*!< in: reset to this lsn
- rounded up to be divisible by
- OS_FILE_LOG_BLOCK_SIZE, after
- which we add
- LOG_BLOCK_HDR_SIZE */
#ifdef UNIV_LOG_ARCHIVE
ulint arch_log_no, /*!< in: next archived log file number */
-#endif /* UNIV_LOG_ARCHIVE */
- ibool new_logs_created)/*!< in: TRUE if resetting logs
+ ibool new_logs_created,/*!< in: TRUE if resetting logs
is done at the log creation;
FALSE if it is done after
archive recovery */
+#endif /* UNIV_LOG_ARCHIVE */
+ lsn_t lsn) /*!< in: reset to this lsn
+ rounded up to be divisible by
+ OS_FILE_LOG_BLOCK_SIZE, after
+ which we add
+ LOG_BLOCK_HDR_SIZE */
{
log_group_t* group;
@@ -3382,12 +3535,12 @@ recv_reset_logs(
#ifdef UNIV_LOG_ARCHIVE
group->archived_file_no = arch_log_no;
group->archived_offset = 0;
-#endif /* UNIV_LOG_ARCHIVE */
if (!new_logs_created) {
recv_truncate_group(group, group->lsn, group->lsn,
group->lsn, group->lsn);
}
+#endif /* UNIV_LOG_ARCHIVE */
group = UT_LIST_GET_NEXT(log_groups, group);
}
@@ -3812,7 +3965,7 @@ recv_recovery_from_archive_start(
recv_apply_hashed_log_recs(FALSE);
- recv_reset_logs(recv_sys->recovered_lsn, 0, FALSE);
+ recv_reset_logs(0, FALSE, recv_sys->recovered_lsn);
}
mutex_exit(&(log_sys->mutex));
diff --git a/storage/innobase/mem/mem0dbg.cc b/storage/innobase/mem/mem0dbg.cc
index 83e14ad6071..308c2979551 100644
--- a/storage/innobase/mem/mem0dbg.cc
+++ b/storage/innobase/mem/mem0dbg.cc
@@ -30,7 +30,7 @@ Created 6/9/1994 Heikki Tuuri
/* The mutex which protects in the debug version the hash table
containing the list of live memory heaps, and also the global
variables below. */
-UNIV_INTERN mutex_t mem_hash_mutex;
+UNIV_INTERN ib_mutex_t mem_hash_mutex;
#ifdef UNIV_PFS_MUTEX
/* Key to register mem_hash_mutex with performance schema */
@@ -58,8 +58,7 @@ static ibool mem_hash_initialized = FALSE;
/* The node of the list containing currently allocated memory heaps */
-typedef struct mem_hash_node_struct mem_hash_node_t;
-struct mem_hash_node_struct {
+struct mem_hash_node_t {
UT_LIST_NODE_T(mem_hash_node_t)
list; /*!< hash list node */
mem_heap_t* heap; /*!< memory heap */
diff --git a/storage/innobase/mem/mem0pool.cc b/storage/innobase/mem/mem0pool.cc
index 2135926a26f..fe9a84d21fa 100644
--- a/storage/innobase/mem/mem0pool.cc
+++ b/storage/innobase/mem/mem0pool.cc
@@ -100,12 +100,12 @@ pool, and after that its locks will grow into the buffer pool. */
/** Data structure for a memory pool. The space is allocated using the buddy
algorithm, where free list i contains areas of size 2 to power i. */
-struct mem_pool_struct{
+struct mem_pool_t{
byte* buf; /*!< memory pool */
ulint size; /*!< memory common pool size */
ulint reserved; /*!< amount of currently allocated
memory */
- mutex_t mutex; /*!< mutex protecting this struct */
+ ib_mutex_t mutex; /*!< mutex protecting this struct */
UT_LIST_BASE_NODE_T(mem_area_t)
free_list[64]; /*!< lists of free memory areas: an
area is put to the list whose number
@@ -116,7 +116,7 @@ struct mem_pool_struct{
UNIV_INTERN mem_pool_t* mem_comm_pool = NULL;
#ifdef UNIV_PFS_MUTEX
-/* Key to register mutex in mem_pool_struct with performance schema */
+/* Key to register mutex in mem_pool_t with performance schema */
UNIV_INTERN mysql_pfs_key_t mem_pool_mutex_key;
#endif /* UNIV_PFS_MUTEX */
diff --git a/storage/innobase/mtr/mtr0log.cc b/storage/innobase/mtr/mtr0log.cc
index d549de8802e..5335cb4c9ef 100644
--- a/storage/innobase/mtr/mtr0log.cc
+++ b/storage/innobase/mtr/mtr0log.cc
@@ -240,8 +240,8 @@ mlog_parse_nbytes(
}
/********************************************************//**
-Writes 1 - 4 bytes to a file page buffered in the buffer pool.
-Writes the corresponding log record to the mini-transaction log. */
+Writes 1, 2 or 4 bytes to a file page. Writes the corresponding log
+record to the mini-transaction log if mtr is not NULL. */
UNIV_INTERN
void
mlog_write_ulint(
@@ -251,8 +251,6 @@ mlog_write_ulint(
byte type, /*!< in: MLOG_1BYTE, MLOG_2BYTES, MLOG_4BYTES */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
- byte* log_ptr;
-
switch (type) {
case MLOG_1BYTE:
mach_write_to_1(ptr, val);
@@ -267,27 +265,29 @@ mlog_write_ulint(
ut_error;
}
- log_ptr = mlog_open(mtr, 11 + 2 + 5);
+ if (mtr != 0) {
+ byte* log_ptr = mlog_open(mtr, 11 + 2 + 5);
- /* If no logging is requested, we may return now */
- if (log_ptr == NULL) {
+ /* If no logging is requested, we may return now */
- return;
- }
+ if (log_ptr != 0) {
- log_ptr = mlog_write_initial_log_record_fast(ptr, type, log_ptr, mtr);
+ log_ptr = mlog_write_initial_log_record_fast(
+ ptr, type, log_ptr, mtr);
- mach_write_to_2(log_ptr, page_offset(ptr));
- log_ptr += 2;
+ mach_write_to_2(log_ptr, page_offset(ptr));
+ log_ptr += 2;
- log_ptr += mach_write_compressed(log_ptr, val);
+ log_ptr += mach_write_compressed(log_ptr, val);
- mlog_close(mtr, log_ptr);
+ mlog_close(mtr, log_ptr);
+ }
+ }
}
/********************************************************//**
-Writes 8 bytes to a file page buffered in the buffer pool.
-Writes the corresponding log record to the mini-transaction log. */
+Writes 8 bytes to a file page. Writes the corresponding log
+record to the mini-transaction log, only if mtr is not NULL */
UNIV_INTERN
void
mlog_write_ull(
@@ -296,29 +296,25 @@ mlog_write_ull(
ib_uint64_t val, /*!< in: value to write */
mtr_t* mtr) /*!< in: mini-transaction handle */
{
- byte* log_ptr;
-
- ut_ad(ptr && mtr);
-
mach_write_to_8(ptr, val);
- log_ptr = mlog_open(mtr, 11 + 2 + 9);
-
- /* If no logging is requested, we may return now */
- if (log_ptr == NULL) {
+ if (mtr != 0) {
+ byte* log_ptr = mlog_open(mtr, 11 + 2 + 9);
- return;
- }
+ /* If no logging is requested, we may return now */
+ if (log_ptr != 0) {
- log_ptr = mlog_write_initial_log_record_fast(ptr, MLOG_8BYTES,
- log_ptr, mtr);
+ log_ptr = mlog_write_initial_log_record_fast(
+ ptr, MLOG_8BYTES, log_ptr, mtr);
- mach_write_to_2(log_ptr, page_offset(ptr));
- log_ptr += 2;
+ mach_write_to_2(log_ptr, page_offset(ptr));
+ log_ptr += 2;
- log_ptr += mach_ull_write_compressed(log_ptr, val);
+ log_ptr += mach_ull_write_compressed(log_ptr, val);
- mlog_close(mtr, log_ptr);
+ mlog_close(mtr, log_ptr);
+ }
+ }
}
#ifndef UNIV_HOTBACKUP
@@ -439,12 +435,13 @@ UNIV_INTERN
byte*
mlog_open_and_write_index(
/*======================*/
- mtr_t* mtr, /*!< in: mtr */
- const byte* rec, /*!< in: index record or page */
- dict_index_t* index, /*!< in: record descriptor */
- byte type, /*!< in: log item type */
- ulint size) /*!< in: requested buffer size in bytes
- (if 0, calls mlog_close() and returns NULL) */
+ mtr_t* mtr, /*!< in: mtr */
+ const byte* rec, /*!< in: index record or page */
+ const dict_index_t* index, /*!< in: record descriptor */
+ byte type, /*!< in: log item type */
+ ulint size) /*!< in: requested buffer size in bytes
+ (if 0, calls mlog_close() and
+ returns NULL) */
{
byte* log_ptr;
const byte* log_start;
diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc
index 4832e8c7710..10b4686b720 100644
--- a/storage/innobase/mtr/mtr0mtr.cc
+++ b/storage/innobase/mtr/mtr0mtr.cc
@@ -142,9 +142,9 @@ mtr_memo_slot_note_modification(
mtr_t* mtr, /*!< in: mtr */
mtr_memo_slot_t* slot) /*!< in: memo slot */
{
- ut_ad(mtr);
- ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->modifications);
+ ut_ad(!srv_read_only_mode);
+ ut_ad(mtr->magic_n == MTR_MAGIC_N);
if (slot->object != NULL && slot->type == MTR_MEMO_PAGE_X_FIX) {
buf_block_t* block = (buf_block_t*) slot->object;
@@ -170,7 +170,7 @@ mtr_memo_note_modifications(
dyn_array_t* memo;
ulint offset;
- ut_ad(mtr);
+ ut_ad(!srv_read_only_mode);
ut_ad(mtr->magic_n == MTR_MAGIC_N);
ut_ad(mtr->state == MTR_COMMITTING); /* Currently only used in
commit */
@@ -191,19 +191,51 @@ mtr_memo_note_modifications(
}
/************************************************************//**
+Append the dirty pages to the flush list. */
+static
+void
+mtr_add_dirtied_pages_to_flush_list(
+/*================================*/
+ mtr_t* mtr) /*!< in/out: mtr */
+{
+ ut_ad(!srv_read_only_mode);
+
+ /* No need to acquire log_flush_order_mutex if this mtr has
+ not dirtied a clean page. log_flush_order_mutex is used to
+ ensure ordered insertions in the flush_list. We need to
+ insert in the flush_list iff the page in question was clean
+ before modifications. */
+ if (mtr->made_dirty) {
+ log_flush_order_mutex_enter();
+ }
+
+ /* It is now safe to release the log mutex because the
+ flush_order mutex will ensure that we are the first one
+ to insert into the flush list. */
+ log_release();
+
+ if (mtr->modifications) {
+ mtr_memo_note_modifications(mtr);
+ }
+
+ if (mtr->made_dirty) {
+ log_flush_order_mutex_exit();
+ }
+}
+
+/************************************************************//**
Writes the contents of a mini-transaction log, if any, to the database log. */
static
void
mtr_log_reserve_and_write(
/*======================*/
- mtr_t* mtr) /*!< in: mtr */
+ mtr_t* mtr) /*!< in/out: mtr */
{
dyn_array_t* mlog;
- dyn_block_t* block;
ulint data_size;
byte* first_data;
- ut_ad(mtr);
+ ut_ad(!srv_read_only_mode);
mlog = &(mtr->log);
@@ -217,14 +249,21 @@ mtr_log_reserve_and_write(
}
if (mlog->heap == NULL) {
+ ulint len;
+
+ len = mtr->log_mode != MTR_LOG_NO_REDO
+ ? dyn_block_get_used(mlog) : 0;
+
mtr->end_lsn = log_reserve_and_write_fast(
- first_data, dyn_block_get_used(mlog),
- &mtr->start_lsn);
+ first_data, len, &mtr->start_lsn);
+
if (mtr->end_lsn) {
/* Success. We have the log mutex.
Add pages to flush list and exit */
- goto func_exit;
+ mtr_add_dirtied_pages_to_flush_list(mtr);
+
+ return;
}
}
@@ -235,43 +274,24 @@ mtr_log_reserve_and_write(
if (mtr->log_mode == MTR_LOG_ALL) {
- block = mlog;
+ for (dyn_block_t* block = mlog;
+ block != 0;
+ block = dyn_array_get_next_block(mlog, block)) {
- while (block != NULL) {
- log_write_low(dyn_block_get_data(block),
- dyn_block_get_used(block));
- block = dyn_array_get_next_block(mlog, block);
+ log_write_low(
+ dyn_block_get_data(block),
+ dyn_block_get_used(block));
}
+
} else {
- ut_ad(mtr->log_mode == MTR_LOG_NONE);
+ ut_ad(mtr->log_mode == MTR_LOG_NONE
+ || mtr->log_mode == MTR_LOG_NO_REDO);
/* Do nothing */
}
mtr->end_lsn = log_close();
-func_exit:
-
- /* No need to acquire log_flush_order_mutex if this mtr has
- not dirtied a clean page. log_flush_order_mutex is used to
- ensure ordered insertions in the flush_list. We need to
- insert in the flush_list iff the page in question was clean
- before modifications. */
- if (mtr->made_dirty) {
- log_flush_order_mutex_enter();
- }
-
- /* It is now safe to release the log mutex because the
- flush_order mutex will ensure that we are the first one
- to insert into the flush list. */
- log_release();
-
- if (mtr->modifications) {
- mtr_memo_note_modifications(mtr);
- }
-
- if (mtr->made_dirty) {
- log_flush_order_mutex_exit();
- }
+ mtr_add_dirtied_pages_to_flush_list(mtr);
}
#endif /* !UNIV_HOTBACKUP */
@@ -294,6 +314,7 @@ mtr_commit(
ut_ad(!recv_no_log_write);
if (mtr->modifications && mtr->n_log_recs) {
+ ut_ad(!srv_read_only_mode);
mtr_log_reserve_and_write(mtr);
}
@@ -376,14 +397,8 @@ mtr_read_ulint(
ut_ad(mtr->state == MTR_ACTIVE);
ut_ad(mtr_memo_contains_page(mtr, ptr, MTR_MEMO_PAGE_S_FIX)
|| mtr_memo_contains_page(mtr, ptr, MTR_MEMO_PAGE_X_FIX));
- if (type == MLOG_1BYTE) {
- return(mach_read_from_1(ptr));
- } else if (type == MLOG_2BYTES) {
- return(mach_read_from_2(ptr));
- } else {
- ut_ad(type == MLOG_4BYTES);
- return(mach_read_from_4(ptr));
- }
+
+ return(mach_read_ulint(ptr, type));
}
#ifdef UNIV_DEBUG
diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc
index 49d6b00d271..b4e4f52a0f7 100644
--- a/storage/innobase/os/os0file.cc
+++ b/storage/innobase/os/os0file.cc
@@ -1,6 +1,6 @@
/***********************************************************************
-Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2009, Percona Inc.
Portions of this file contain modifications contributed and copyrighted
@@ -60,24 +60,29 @@ Created 10/21/1995 Heikki Tuuri
#include <libaio.h>
#endif
+/** Insert buffer segment id */
+static const ulint IO_IBUF_SEGMENT = 0;
+
+/** Log segment id */
+static const ulint IO_LOG_SEGMENT = 1;
+
/* This specifies the file permissions InnoDB uses when it creates files in
Unix; the value of os_innodb_umask is initialized in ha_innodb.cc to
my_umask */
#ifndef __WIN__
/** Umask for creating files */
-UNIV_INTERN ulint os_innodb_umask
- = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
+UNIV_INTERN ulint os_innodb_umask = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
#else
/** Umask for creating files */
-UNIV_INTERN ulint os_innodb_umask = 0;
-#endif
+UNIV_INTERN ulint os_innodb_umask = 0;
+#endif /* __WIN__ */
#ifndef UNIV_HOTBACKUP
/* We use these mutexes to protect lseek + file i/o operation, if the
OS does not provide an atomic pread or pwrite, or similar */
#define OS_FILE_N_SEEK_MUTEXES 16
-UNIV_INTERN os_mutex_t os_file_seek_mutexes[OS_FILE_N_SEEK_MUTEXES];
+UNIV_INTERN os_ib_mutex_t os_file_seek_mutexes[OS_FILE_N_SEEK_MUTEXES];
/* In simulated aio, merge at most this many consecutive i/os */
#define OS_AIO_MERGE_N_CONSECUTIVE 64
@@ -147,10 +152,7 @@ UNIV_INTERN mysql_pfs_key_t innodb_file_temp_key;
#endif /* UNIV_PFS_IO */
/** The asynchronous i/o array slot structure */
-typedef struct os_aio_slot_struct os_aio_slot_t;
-
-/** The asynchronous i/o array slot structure */
-struct os_aio_slot_struct{
+struct os_aio_slot_t{
ibool is_read; /*!< TRUE if a read operation */
ulint pos; /*!< index of the slot in the aio
array */
@@ -182,15 +184,12 @@ struct os_aio_slot_struct{
struct iocb control; /* Linux control block for aio */
int n_bytes; /* bytes written/read. */
int ret; /* AIO return code */
-#endif
+#endif /* WIN_ASYNC_IO */
};
/** The asynchronous i/o array structure */
-typedef struct os_aio_array_struct os_aio_array_t;
-
-/** The asynchronous i/o array structure */
-struct os_aio_array_struct{
- os_mutex_t mutex; /*!< the mutex protecting the aio array */
+struct os_aio_array_t{
+ os_ib_mutex_t mutex; /*!< the mutex protecting the aio array */
os_event_t not_full;
/*!< The event which is set to the
signaled state when there is space in
@@ -223,7 +222,7 @@ struct os_aio_array_struct{
order. This can be used in
WaitForMultipleObjects; used only in
Windows */
-#endif
+#endif /* __WIN__ */
#if defined(LINUX_NATIVE_AIO)
io_context_t* aio_ctx;
@@ -235,7 +234,7 @@ struct os_aio_array_struct{
There is one such event for each
possible pending IO. The size of the
array is equal to n_slots. */
-#endif
+#endif /* LINUX_NATIV_AIO */
};
#if defined(LINUX_NATIVE_AIO)
@@ -283,7 +282,7 @@ UNIV_INTERN ibool os_has_said_disk_full = FALSE;
#if !defined(UNIV_HOTBACKUP) \
&& (!defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8)
/** The mutex protecting the following counts of pending I/O operations */
-static os_mutex_t os_file_count_mutex;
+static os_ib_mutex_t os_file_count_mutex;
#endif /* !UNIV_HOTBACKUP && (!HAVE_ATOMIC_BUILTINS || UNIV_WORD_SIZE < 8) */
/** Number of pending os_file_pread() operations */
@@ -336,7 +335,7 @@ ulint
os_get_os_version(void)
/*===================*/
{
- OSVERSIONINFO os_info;
+ OSVERSIONINFO os_info;
os_info.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
@@ -350,15 +349,15 @@ os_get_os_version(void)
switch (os_info.dwMajorVersion) {
case 3:
case 4:
- return OS_WINNT;
+ return(OS_WINNT);
case 5:
- return (os_info.dwMinorVersion == 0) ? OS_WIN2000
- : OS_WINXP;
+ return (os_info.dwMinorVersion == 0)
+ ? OS_WIN2000 : OS_WINXP;
case 6:
- return (os_info.dwMinorVersion == 0) ? OS_WINVISTA
- : OS_WIN7;
+ return (os_info.dwMinorVersion == 0)
+ ? OS_WINVISTA : OS_WIN7;
default:
- return OS_WIN7;
+ return(OS_WIN7);
}
} else {
ut_error;
@@ -377,16 +376,17 @@ static
ulint
os_file_get_last_error_low(
/*=======================*/
- ibool report_all_errors, /*!< in: TRUE if we want an error
+ bool report_all_errors, /*!< in: TRUE if we want an error
message printed of all errors */
- ibool on_error_silent) /*!< in: TRUE then don't print any
+ bool on_error_silent) /*!< in: TRUE then don't print any
diagnostic to the log */
{
- ulint err;
-
#ifdef __WIN__
- err = (ulint) GetLastError();
+ ulint err = (ulint) GetLastError();
+ if (err == ERROR_SUCCESS) {
+ return(0);
+ }
if (report_all_errors
|| (!on_error_silent
@@ -469,15 +469,18 @@ os_file_get_last_error_low(
return(100 + err);
}
#else
- err = (ulint) errno;
+ int err = errno;
+ if (err == 0) {
+ return(0);
+ }
if (report_all_errors
|| (err != ENOSPC && err != EEXIST && !on_error_silent)) {
ut_print_timestamp(stderr);
fprintf(stderr,
- " InnoDB: Operating system error number %lu"
- " in a file operation.\n", (ulong) err);
+ " InnoDB: Operating system error number %d"
+ " in a file operation.\n", err);
if (err == ENOENT) {
fprintf(stderr,
@@ -497,11 +500,11 @@ os_file_get_last_error_low(
" the access rights to\n"
"InnoDB: the directory.\n");
} else {
- if (strerror((int) err) != NULL) {
+ if (strerror(err) != NULL) {
fprintf(stderr,
- "InnoDB: Error number %lu"
+ "InnoDB: Error number %d"
" means '%s'.\n",
- err, strerror((int) err));
+ err, strerror(err));
}
@@ -552,10 +555,10 @@ UNIV_INTERN
ulint
os_file_get_last_error(
/*===================*/
- ibool report_all_errors) /*!< in: TRUE if we want an error
+ bool report_all_errors) /*!< in: TRUE if we want an error
message printed of all errors */
{
- return(os_file_get_last_error_low(report_all_errors, FALSE));
+ return(os_file_get_last_error_low(report_all_errors, false));
}
/****************************************************************//**
@@ -577,7 +580,7 @@ os_file_handle_error_cond_exit(
{
ulint err;
- err = os_file_get_last_error_low(FALSE, on_error_silent);
+ err = os_file_get_last_error_low(false, on_error_silent);
switch (err) {
case OS_FILE_DISK_FULL:
@@ -645,7 +648,8 @@ os_file_handle_error_cond_exit(
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: File operation call: "
- "'%s'.\n", operation);
+ "'%s' returned OS error " ULINTPF ".\n",
+ operation, err);
}
if (should_exit) {
@@ -654,7 +658,9 @@ os_file_handle_error_cond_exit(
"operation.\n");
fflush(stderr);
- ut_error;
+
+ ut_ad(0); /* Report call stack, etc only in debug code. */
+ exit(1);
}
}
@@ -712,19 +718,23 @@ os_file_lock(
const char* name) /*!< in: file name */
{
struct flock lk;
+
+ ut_ad(!srv_read_only_mode);
+
lk.l_type = F_WRLCK;
lk.l_whence = SEEK_SET;
lk.l_start = lk.l_len = 0;
+
if (fcntl(fd, F_SETLK, &lk) == -1) {
- fprintf(stderr,
- "InnoDB: Unable to lock %s, error: %d\n", name, errno);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to lock %s, error: %d", name, errno);
if (errno == EAGAIN || errno == EACCES) {
- fprintf(stderr,
- "InnoDB: Check that you do not already have"
- " another mysqld process\n"
- "InnoDB: using the same InnoDB data"
- " or log files.\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Check that you do not already have "
+ "another mysqld process using the "
+ "same InnoDB data or log files.");
}
return(-1);
@@ -742,13 +752,11 @@ void
os_io_init_simple(void)
/*===================*/
{
- ulint i;
-
#if !defined(HAVE_ATOMIC_BUILTINS) || UNIV_WORD_SIZE < 8
os_file_count_mutex = os_mutex_create();
#endif /* !HAVE_ATOMIC_BUILTINS || UNIV_WORD_SIZE < 8 */
- for (i = 0; i < OS_FILE_N_SEEK_MUTEXES; i++) {
+ for (ulint i = 0; i < OS_FILE_N_SEEK_MUTEXES; i++) {
os_file_seek_mutexes[i] = os_mutex_create();
}
}
@@ -765,6 +773,8 @@ os_file_create_tmpfile(void)
FILE* file = NULL;
int fd = innobase_mysql_tmpfile();
+ ut_ad(!srv_read_only_mode);
+
if (fd >= 0) {
file = fdopen(fd, "w+b");
}
@@ -840,7 +850,7 @@ os_file_opendir(
}
return(dir);
-#endif
+#endif /* __WIN__ */
}
/***********************************************************************//**
@@ -874,7 +884,7 @@ os_file_closedir(
}
return(ret);
-#endif
+#endif /* __WIN__ */
}
/***********************************************************************//**
@@ -1054,10 +1064,12 @@ next_file:
}
/*****************************************************************//**
-This function attempts to create a directory named pathname. The new directory
-gets default permissions. On Unix the permissions are (0770 & ~umask). If the
-directory exists already, nothing is done and the call succeeds, unless the
-fail_if_exists arguments is true.
+This function attempts to create a directory named pathname. The new
+directory gets default permissions. On Unix the permissions are
+(0770 & ~umask). If the directory exists already, nothing is done and
+the call succeeds, unless the fail_if_exists arguments is true.
+If another error occurs, such as a permission error, this does not crash,
+but reports the error and returns FALSE.
@return TRUE if call succeeds, FALSE on error */
UNIV_INTERN
ibool
@@ -1075,13 +1087,14 @@ os_file_create_directory(
if (!(rcode != 0
|| (GetLastError() == ERROR_ALREADY_EXISTS
&& !fail_if_exists))) {
- /* failure */
- os_file_handle_error(pathname, "CreateDirectory");
+
+ os_file_handle_error_no_exit(
+ pathname, "CreateDirectory", FALSE);
return(FALSE);
}
- return (TRUE);
+ return(TRUE);
#else
int rcode;
@@ -1089,13 +1102,13 @@ os_file_create_directory(
if (!(rcode == 0 || (errno == EEXIST && !fail_if_exists))) {
/* failure */
- os_file_handle_error(pathname, "mkdir");
+ os_file_handle_error_no_exit(pathname, "mkdir", FALSE);
return(FALSE);
}
return (TRUE);
-#endif
+#endif /* __WIN__ */
}
/****************************************************************//**
@@ -1115,129 +1128,180 @@ os_file_create_simple_func(
OS_FILE_READ_WRITE */
ibool* success)/*!< out: TRUE if succeed, FALSE if error */
{
-#ifdef __WIN__
os_file_t file;
- DWORD create_flag;
+ ibool retry;
+
+#ifdef __WIN__
DWORD access;
+ DWORD create_flag;
DWORD attributes = 0;
- ibool retry;
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
-try_again:
- ut_a(name);
if (create_mode == OS_FILE_OPEN) {
+
+ create_flag = OPEN_EXISTING;
+
+ } else if (srv_read_only_mode) {
+
create_flag = OPEN_EXISTING;
+
} else if (create_mode == OS_FILE_CREATE) {
+
create_flag = CREATE_NEW;
+
} else if (create_mode == OS_FILE_CREATE_PATH) {
- /* create subdirs along the path if needed */
+
+ ut_a(!srv_read_only_mode);
+
+ /* Create subdirs along the path if needed */
*success = os_file_create_subdirs_if_needed(name);
+
if (!*success) {
- ut_error;
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to create subdirectories '%s'",
+ name);
+
+ return((os_file_t) -1);
}
+
create_flag = CREATE_NEW;
create_mode = OS_FILE_CREATE;
+
} else {
- create_flag = 0;
- ut_error;
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file create mode (%lu) for file '%s'",
+ create_mode, name);
+
+ return((os_file_t) -1);
}
if (access_type == OS_FILE_READ_ONLY) {
access = GENERIC_READ;
+ } else if (srv_read_only_mode) {
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "read only mode set. Unable to "
+ "open file '%s' in RW mode, trying RO mode", name);
+
+ access = GENERIC_READ;
+
} else if (access_type == OS_FILE_READ_WRITE) {
access = GENERIC_READ | GENERIC_WRITE;
} else {
- access = 0;
- ut_error;
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file access type (%lu) for file '%s'",
+ access_type, name);
+
+ return((os_file_t) -1);
}
- file = CreateFile((LPCTSTR) name,
- access,
- FILE_SHARE_READ | FILE_SHARE_WRITE,
- /* file can be read and written also
- by other processes */
- NULL, /* default security attributes */
- create_flag,
- attributes,
- NULL); /*!< no template file */
+ do {
+ /* Use default security attributes and no template file. */
- if (file == INVALID_HANDLE_VALUE) {
- *success = FALSE;
+ file = CreateFile(
+ (LPCTSTR) name, access, FILE_SHARE_READ, NULL,
+ create_flag, attributes, NULL);
+
+ if (file == INVALID_HANDLE_VALUE) {
- retry = os_file_handle_error(name,
- create_mode == OS_FILE_OPEN ?
- "open" : "create");
- if (retry) {
- goto try_again;
+ *success = FALSE;
+
+ retry = os_file_handle_error(
+ name, create_mode == OS_FILE_OPEN ?
+ "open" : "create");
+
+ } else {
+ *success = TRUE;
+ retry = false;
}
- } else {
- *success = TRUE;
- }
- return(file);
+ } while (retry);
+
#else /* __WIN__ */
- os_file_t file;
int create_flag;
- ibool retry;
ut_a(!(create_mode & OS_FILE_ON_ERROR_SILENT));
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
-try_again:
- ut_a(name);
-
if (create_mode == OS_FILE_OPEN) {
+
if (access_type == OS_FILE_READ_ONLY) {
create_flag = O_RDONLY;
+ } else if (srv_read_only_mode) {
+ create_flag = O_RDONLY;
} else {
create_flag = O_RDWR;
}
+
+ } else if (srv_read_only_mode) {
+
+ create_flag = O_RDONLY;
+
} else if (create_mode == OS_FILE_CREATE) {
+
create_flag = O_RDWR | O_CREAT | O_EXCL;
+
} else if (create_mode == OS_FILE_CREATE_PATH) {
- /* create subdirs along the path if needed */
+
+ /* Create subdirs along the path if needed */
+
*success = os_file_create_subdirs_if_needed(name);
+
if (!*success) {
- return (-1);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to create subdirectories '%s'",
+ name);
+
+ return((os_file_t) -1);
}
+
create_flag = O_RDWR | O_CREAT | O_EXCL;
create_mode = OS_FILE_CREATE;
} else {
- create_flag = 0;
- ut_error;
- }
- if (create_mode == OS_FILE_CREATE) {
- file = open(name, create_flag, S_IRUSR | S_IWUSR
- | S_IRGRP | S_IWGRP);
- } else {
- file = open(name, create_flag);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file create mode (%lu) for file '%s'",
+ create_mode, name);
+
+ return((os_file_t) -1);
}
- if (file == -1) {
- *success = FALSE;
+ do {
+ file = ::open(name, create_flag, os_innodb_umask);
- retry = os_file_handle_error(name,
- create_mode == OS_FILE_OPEN ?
- "open" : "create");
- if (retry) {
- goto try_again;
+ if (file == -1) {
+ *success = FALSE;
+
+ retry = os_file_handle_error(
+ name,
+ create_mode == OS_FILE_OPEN
+ ? "open" : "create");
+ } else {
+ *success = TRUE;
+ retry = false;
}
+
+ } while (retry);
+
#ifdef USE_FILE_LOCK
- } else if (access_type == OS_FILE_READ_WRITE
- && os_file_lock(file, name)) {
+ if (!srv_read_only_mode
+ && *success
+ && access_type == OS_FILE_READ_WRITE
+ && os_file_lock(file, name)) {
+
*success = FALSE;
close(file);
file = -1;
-#endif
- } else {
- *success = TRUE;
}
+#endif /* USE_FILE_LOCK */
- return(file);
#endif /* __WIN__ */
+
+ return(file);
}
/****************************************************************//**
@@ -1259,12 +1323,13 @@ os_file_create_simple_no_error_handling_func(
used by a backup program reading the file */
ibool* success)/*!< out: TRUE if succeed, FALSE if error */
{
-#ifdef __WIN__
os_file_t file;
- DWORD create_flag;
+
+#ifdef __WIN__
DWORD access;
+ DWORD create_flag;
DWORD attributes = 0;
- DWORD share_mode = FILE_SHARE_READ | FILE_SHARE_WRITE;
+ DWORD share_mode = FILE_SHARE_READ;
ut_a(name);
@@ -1273,46 +1338,53 @@ os_file_create_simple_no_error_handling_func(
if (create_mode == OS_FILE_OPEN) {
create_flag = OPEN_EXISTING;
+ } else if (srv_read_only_mode) {
+ create_flag = OPEN_EXISTING;
} else if (create_mode == OS_FILE_CREATE) {
create_flag = CREATE_NEW;
} else {
- create_flag = 0;
- ut_error;
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file create mode (%lu) for file '%s'",
+ create_mode, name);
+
+ return((os_file_t) -1);
}
if (access_type == OS_FILE_READ_ONLY) {
access = GENERIC_READ;
+ } else if (srv_read_only_mode) {
+ access = GENERIC_READ;
} else if (access_type == OS_FILE_READ_WRITE) {
access = GENERIC_READ | GENERIC_WRITE;
} else if (access_type == OS_FILE_READ_ALLOW_DELETE) {
+
+ ut_a(!srv_read_only_mode);
+
access = GENERIC_READ;
- share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ
- | FILE_SHARE_WRITE; /*!< A backup program has to give
- mysqld the maximum freedom to
- do what it likes with the
- file */
+
+ /*!< A backup program has to give mysqld the maximum
+ freedom to do what it likes with the file */
+
+ share_mode |= FILE_SHARE_DELETE | FILE_SHARE_WRITE;
} else {
- access = 0;
- ut_error;
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file access type (%lu) for file '%s'",
+ access_type, name);
+
+ return((os_file_t) -1);
}
file = CreateFile((LPCTSTR) name,
access,
share_mode,
- NULL, /* default security attributes */
+ NULL, // Security attributes
create_flag,
attributes,
- NULL); /*!< no template file */
-
- if (file == INVALID_HANDLE_VALUE) {
- *success = FALSE;
- } else {
- *success = TRUE;
- }
+ NULL); // No template file
- return(file);
+ *success = (file != INVALID_HANDLE_VALUE);
#else /* __WIN__ */
- os_file_t file;
int create_flag;
ut_a(name);
@@ -1321,40 +1393,59 @@ os_file_create_simple_no_error_handling_func(
ut_a(!(create_mode & OS_FILE_ON_ERROR_NO_EXIT));
if (create_mode == OS_FILE_OPEN) {
+
if (access_type == OS_FILE_READ_ONLY) {
+
create_flag = O_RDONLY;
+
+ } else if (srv_read_only_mode) {
+
+ create_flag = O_RDONLY;
+
} else {
+
+ ut_a(access_type == OS_FILE_READ_WRITE
+ || access_type == OS_FILE_READ_ALLOW_DELETE);
+
create_flag = O_RDWR;
}
+
+ } else if (srv_read_only_mode) {
+
+ create_flag = O_RDONLY;
+
} else if (create_mode == OS_FILE_CREATE) {
+
create_flag = O_RDWR | O_CREAT | O_EXCL;
- } else {
- create_flag = 0;
- ut_error;
- }
- if (create_mode == OS_FILE_CREATE) {
- file = open(name, create_flag, S_IRUSR | S_IWUSR
- | S_IRGRP | S_IWGRP);
} else {
- file = open(name, create_flag);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file create mode (%lu) for file '%s'",
+ create_mode, name);
+
+ return((os_file_t) -1);
}
- if (file == -1) {
- *success = FALSE;
+ file = ::open(name, create_flag, os_innodb_umask);
+
+ *success = file == -1 ? FALSE : TRUE;
+
#ifdef USE_FILE_LOCK
- } else if (access_type == OS_FILE_READ_WRITE
- && os_file_lock(file, name)) {
+ if (!srv_read_only_mode
+ && *success
+ && access_type == OS_FILE_READ_WRITE
+ && os_file_lock(file, name)) {
+
*success = FALSE;
close(file);
file = -1;
-#endif
- } else {
- *success = TRUE;
+
}
+#endif /* USE_FILE_LOCK */
- return(file);
#endif /* __WIN__ */
+
+ return(file);
}
/****************************************************************//**
@@ -1364,80 +1455,43 @@ void
os_file_set_nocache(
/*================*/
int fd /*!< in: file descriptor to alter */
- __attribute__((unused)),
- const char* file_name /*!< in: used in the diagnostic message */
- __attribute__((unused)),
+ __attribute__((unused)),
+ const char* file_name /*!< in: used in the diagnostic
+ message */
+ __attribute__((unused)),
const char* operation_name __attribute__((unused)))
- /*!< in: "open" or "create"; used in the
- diagnostic message */
+ /*!< in: "open" or "create"; used
+ in the diagnostic message */
{
/* some versions of Solaris may not have DIRECTIO_ON */
#if defined(UNIV_SOLARIS) && defined(DIRECTIO_ON)
if (directio(fd, DIRECTIO_ON) == -1) {
- int errno_save;
- errno_save = (int) errno;
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Failed to set DIRECTIO_ON "
- "on file %s: %s: %s, continuing anyway\n",
+ int errno_save = errno;
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Failed to set DIRECTIO_ON on file %s: %s: %s, "
+ "continuing anyway.",
file_name, operation_name, strerror(errno_save));
}
#elif defined(O_DIRECT)
if (fcntl(fd, F_SETFL, O_DIRECT) == -1) {
- int errno_save;
- errno_save = (int) errno;
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Failed to set O_DIRECT "
- "on file %s: %s: %s, continuing anyway\n",
+ int errno_save = errno;
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Failed to set O_DIRECT on file %s: %s: %s, "
+ "continuing anyway",
file_name, operation_name, strerror(errno_save));
+
if (errno_save == EINVAL) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: O_DIRECT is known to result in "
- "'Invalid argument' on Linux on tmpfs, "
- "see MySQL Bug#26662\n");
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "O_DIRECT is known to result in 'Invalid "
+ "argument' on Linux on tmpfs, see MySQL "
+ "Bug#26662");
}
}
-#endif
+#endif /* defined(UNIV_SOLARIS) && defined(DIRECTIO_ON) */
}
-
-#ifdef __linux__
-#include <sys/ioctl.h>
-#ifndef DFS_IOCTL_ATOMIC_WRITE_SET
-#define DFS_IOCTL_ATOMIC_WRITE_SET _IOW(0x95, 2, uint)
-#endif
-static int os_file_set_atomic_writes(os_file_t file, const char *name)
-{
- int atomic_option = 1;
-
- int ret = ioctl (file, DFS_IOCTL_ATOMIC_WRITE_SET, &atomic_option);
-
- if (ret) {
- fprintf(stderr,
- "InnoDB : can't use atomic write on %s, errno %d\n",
- name, errno);
- return ret;
- }
- return ret;
-}
-#else
-static int os_file_set_atomic_writes(os_file_t file, const char *name)
-{
- fprintf(stderr,
- "InnoDB : can't use atomic writes on %s - not implemented on this platform."
- "innodb_use_atomic_writes needs to be 0.\n",
- name);
-#ifdef _WIN32
- SetLastError(ERROR_INVALID_FUNCTION);
-#else
- errno = EINVAL;
-#endif
- return -1;
-}
-#endif
-
/****************************************************************//**
NOTE! Use the corresponding macro os_file_create(), not directly
this function!
@@ -1461,145 +1515,155 @@ os_file_create_func(
ulint type, /*!< in: OS_DATA_FILE or OS_LOG_FILE */
ibool* success)/*!< out: TRUE if succeed, FALSE if error */
{
+ os_file_t file;
+ ibool retry;
ibool on_error_no_exit;
ibool on_error_silent;
#ifdef __WIN__
- os_file_t file;
- DWORD share_mode = FILE_SHARE_READ;
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_disk_full",
+ *success = FALSE;
+ SetLastError(ERROR_DISK_FULL);
+ return((os_file_t) -1);
+ );
+#else /* __WIN__ */
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_disk_full",
+ *success = FALSE;
+ errno = ENOSPC;
+ return((os_file_t) -1);
+ );
+#endif /* __WIN__ */
+
+#ifdef __WIN__
DWORD create_flag;
- DWORD attributes;
- ibool retry;
+ DWORD share_mode = FILE_SHARE_READ;
on_error_no_exit = create_mode & OS_FILE_ON_ERROR_NO_EXIT
? TRUE : FALSE;
+
on_error_silent = create_mode & OS_FILE_ON_ERROR_SILENT
? TRUE : FALSE;
create_mode &= ~OS_FILE_ON_ERROR_NO_EXIT;
create_mode &= ~OS_FILE_ON_ERROR_SILENT;
+ if (create_mode == OS_FILE_OPEN_RAW) {
- DBUG_EXECUTE_IF(
- "ib_create_table_fail_disk_full",
- *success = FALSE;
- SetLastError(ERROR_DISK_FULL);
- return((os_file_t) -1);
- );
-try_again:
- ut_a(name);
+ ut_a(!srv_read_only_mode);
- if (create_mode == OS_FILE_OPEN_RAW) {
create_flag = OPEN_EXISTING;
- share_mode = FILE_SHARE_WRITE;
+
+ /* On Windows Physical devices require admin privileges and
+ have to have the write-share mode set. See the remarks
+ section for the CreateFile() function documentation in MSDN. */
+
+ share_mode |= FILE_SHARE_WRITE;
+
} else if (create_mode == OS_FILE_OPEN
|| create_mode == OS_FILE_OPEN_RETRY) {
+
create_flag = OPEN_EXISTING;
+
+ } else if (srv_read_only_mode) {
+
+ create_flag = OPEN_EXISTING;
+
} else if (create_mode == OS_FILE_CREATE) {
+
create_flag = CREATE_NEW;
+
} else if (create_mode == OS_FILE_OVERWRITE) {
+
create_flag = CREATE_ALWAYS;
+
} else {
- create_flag = 0;
- ut_error;
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file create mode (%lu) for file '%s'",
+ create_mode, name);
+
+ return((os_file_t) -1);
}
+ DWORD attributes = 0;
+
+#ifdef UNIV_HOTBACKUP
+ attributes |= FILE_FLAG_NO_BUFFERING;
+#else
if (purpose == OS_FILE_AIO) {
+
+#ifdef WIN_ASYNC_IO
/* If specified, use asynchronous (overlapped) io and no
buffering of writes in the OS */
- attributes = 0;
-#ifdef WIN_ASYNC_IO
+
if (srv_use_native_aio) {
- attributes = attributes | FILE_FLAG_OVERLAPPED;
+ attributes |= FILE_FLAG_OVERLAPPED;
}
-#endif
-#ifdef UNIV_NON_BUFFERED_IO
-# ifndef UNIV_HOTBACKUP
- if (type == OS_LOG_FILE && srv_flush_log_at_trx_commit == 2) {
- /* Do not use unbuffered i/o to log files because
- value 2 denotes that we do not flush the log at every
- commit, but only once per second */
- } else if (srv_win_file_flush_method
- == SRV_WIN_IO_UNBUFFERED) {
- attributes = attributes | FILE_FLAG_NO_BUFFERING;
- }
-# else /* !UNIV_HOTBACKUP */
- attributes = attributes | FILE_FLAG_NO_BUFFERING;
-# endif /* !UNIV_HOTBACKUP */
-#endif /* UNIV_NON_BUFFERED_IO */
+#endif /* WIN_ASYNC_IO */
+
} else if (purpose == OS_FILE_NORMAL) {
- attributes = 0;
-#ifdef UNIV_NON_BUFFERED_IO
-# ifndef UNIV_HOTBACKUP
- if (type == OS_LOG_FILE && srv_flush_log_at_trx_commit == 2) {
- /* Do not use unbuffered i/o to log files because
- value 2 denotes that we do not flush the log at every
- commit, but only once per second */
- } else if (srv_win_file_flush_method
- == SRV_WIN_IO_UNBUFFERED) {
- attributes = attributes | FILE_FLAG_NO_BUFFERING;
- }
-# else /* !UNIV_HOTBACKUP */
- attributes = attributes | FILE_FLAG_NO_BUFFERING;
-# endif /* !UNIV_HOTBACKUP */
-#endif /* UNIV_NON_BUFFERED_IO */
+ /* Use default setting. */
} else {
- attributes = 0;
- ut_error;
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown purpose flag (%lu) while opening file '%s'",
+ purpose, name);
+
+ return((os_file_t)(-1));
}
- file = CreateFile((LPCTSTR) name,
- GENERIC_READ | GENERIC_WRITE, /* read and write
- access */
- share_mode, /* File can be read also by other
- processes; we must give the read
- permission because of ibbackup. We do
- not give the write permission to
- others because if one would succeed to
- start 2 instances of mysqld on the
- SAME files, that could cause severe
- database corruption! When opening
- raw disk partitions, Microsoft manuals
- say that we must give also the write
- permission. */
- NULL, /* default security attributes */
- create_flag,
- attributes,
- NULL); /*!< no template file */
+#ifdef UNIV_NON_BUFFERED_IO
+ // TODO: Create a bug, this looks wrong. The flush log
+ // parameter is dynamic.
+ if (type == OS_LOG_FILE && srv_flush_log_at_trx_commit == 2) {
- if (file == INVALID_HANDLE_VALUE) {
- const char* operation;
+ /* Do not use unbuffered i/o for the log files because
+ value 2 denotes that we do not flush the log at every
+ commit, but only once per second */
- operation = create_mode == OS_FILE_CREATE ? "create" : "open";
+ } else if (srv_win_file_flush_method == SRV_WIN_IO_UNBUFFERED) {
- *success = FALSE;
+ attributes |= FILE_FLAG_NO_BUFFERING;
+ }
+#endif /* UNIV_NON_BUFFERED_IO */
- if (on_error_no_exit) {
- retry = os_file_handle_error_no_exit(
- name, operation, on_error_silent);
- } else {
- retry = os_file_handle_error(name, operation);
- }
+#endif /* UNIV_HOTBACKUP */
+ DWORD access = GENERIC_READ;
- if (retry) {
- goto try_again;
- }
- } else {
- *success = TRUE;
+ if (!srv_read_only_mode) {
+ access |= GENERIC_WRITE;
}
- if (srv_use_atomic_writes && type == OS_DATA_FILE &&
- os_file_set_atomic_writes(file, name)) {
- CloseHandle(file);
+ do {
+ /* Use default security attributes and no template file. */
+ file = CreateFile(
+ (LPCTSTR) name, access, share_mode, NULL,
+ create_flag, attributes, NULL);
+
+ if (file == INVALID_HANDLE_VALUE) {
+ const char* operation;
+
+ operation = (create_mode == OS_FILE_CREATE
+ && !srv_read_only_mode)
+ ? "create" : "open";
+
*success = FALSE;
- file = INVALID_HANDLE_VALUE;
- }
- return(file);
+ if (on_error_no_exit) {
+ retry = os_file_handle_error_no_exit(
+ name, operation, on_error_silent);
+ } else {
+ retry = os_file_handle_error(name, operation);
+ }
+ } else {
+ *success = TRUE;
+ retry = FALSE;
+ }
+
+ } while (retry);
+
#else /* __WIN__ */
- os_file_t file;
int create_flag;
- ibool retry;
const char* mode_str = NULL;
on_error_no_exit = create_mode & OS_FILE_ON_ERROR_NO_EXIT
@@ -1610,28 +1674,36 @@ try_again:
create_mode &= ~OS_FILE_ON_ERROR_NO_EXIT;
create_mode &= ~OS_FILE_ON_ERROR_SILENT;
- DBUG_EXECUTE_IF(
- "ib_create_table_fail_disk_full",
- *success = FALSE;
- errno = ENOSPC;
- return((os_file_t) -1);
- );
-try_again:
- ut_a(name);
-
- if (create_mode == OS_FILE_OPEN || create_mode == OS_FILE_OPEN_RAW
+ if (create_mode == OS_FILE_OPEN
+ || create_mode == OS_FILE_OPEN_RAW
|| create_mode == OS_FILE_OPEN_RETRY) {
+
+ mode_str = "OPEN";
+
+ create_flag = srv_read_only_mode ? O_RDONLY : O_RDWR;
+
+ } else if (srv_read_only_mode) {
+
mode_str = "OPEN";
- create_flag = O_RDWR;
+
+ create_flag = O_RDONLY;
+
} else if (create_mode == OS_FILE_CREATE) {
+
mode_str = "CREATE";
create_flag = O_RDWR | O_CREAT | O_EXCL;
+
} else if (create_mode == OS_FILE_OVERWRITE) {
+
mode_str = "OVERWRITE";
create_flag = O_RDWR | O_CREAT | O_TRUNC;
+
} else {
- create_flag = 0;
- ut_error;
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unknown file create mode (%lu) for file '%s'",
+ create_mode, name);
+
+ return((os_file_t) -1);
}
ut_a(type == OS_LOG_FILE || type == OS_DATA_FILE);
@@ -1641,69 +1713,75 @@ try_again:
/* We let O_SYNC only affect log files; note that we map O_DSYNC to
O_SYNC because the datasync options seemed to corrupt files in 2001
in both Linux and Solaris */
- if (type == OS_LOG_FILE
- && srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) {
-# if 0
- fprintf(stderr, "Using O_SYNC for file %s\n", name);
-# endif
+ if (!srv_read_only_mode
+ && type == OS_LOG_FILE
+ && srv_unix_file_flush_method == SRV_UNIX_O_DSYNC) {
- create_flag = create_flag | O_SYNC;
+ create_flag |= O_SYNC;
}
#endif /* O_SYNC */
- file = open(name, create_flag, os_innodb_umask);
-
- if (file == -1) {
- const char* operation;
+ do {
+ file = ::open(name, create_flag, os_innodb_umask);
- operation = create_mode == OS_FILE_CREATE ? "create" : "open";
+ if (file == -1) {
+ const char* operation;
- *success = FALSE;
+ operation = (create_mode == OS_FILE_CREATE
+ && !srv_read_only_mode)
+ ? "create" : "open";
- if (on_error_no_exit) {
- retry = os_file_handle_error_no_exit(
- name, operation, on_error_silent);
- } else {
- retry = os_file_handle_error(name, operation);
- }
+ *success = FALSE;
- if (retry) {
- goto try_again;
+ if (on_error_no_exit) {
+ retry = os_file_handle_error_no_exit(
+ name, operation, on_error_silent);
+ } else {
+ retry = os_file_handle_error(name, operation);
+ }
} else {
- return(file /* -1 */);
+ *success = TRUE;
+ retry = false;
}
- }
- /* else */
- *success = TRUE;
+ } while (retry);
/* We disable OS caching (O_DIRECT) only on data files */
- if (type != OS_LOG_FILE
- && srv_unix_file_flush_method == SRV_UNIX_O_DIRECT) {
+
+ if (!srv_read_only_mode
+ && *success
+ && type != OS_LOG_FILE
+ && (srv_unix_file_flush_method == SRV_UNIX_O_DIRECT
+ || srv_unix_file_flush_method == SRV_UNIX_O_DIRECT_NO_FSYNC)) {
os_file_set_nocache(file, name, mode_str);
}
#ifdef USE_FILE_LOCK
- if (create_mode != OS_FILE_OPEN_RAW && os_file_lock(file, name)) {
+ if (!srv_read_only_mode
+ && *success
+ && create_mode != OS_FILE_OPEN_RAW
+ && os_file_lock(file, name)) {
if (create_mode == OS_FILE_OPEN_RETRY) {
- int i;
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Retrying to lock"
- " the first data file\n",
- stderr);
- for (i = 0; i < 100; i++) {
+
+ ut_a(!srv_read_only_mode);
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Retrying to lock the first data file");
+
+ for (int i = 0; i < 100; i++) {
os_thread_sleep(1000000);
+
if (!os_file_lock(file, name)) {
*success = TRUE;
return(file);
}
}
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Unable to open the first data file\n",
- stderr);
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Unable to open the first data file");
}
*success = FALSE;
@@ -1711,29 +1789,24 @@ try_again:
file = -1;
}
#endif /* USE_FILE_LOCK */
- if (srv_use_atomic_writes && type == OS_DATA_FILE
- && os_file_set_atomic_writes(file, name)) {
- close(file);
- *success = FALSE;
- file = -1;
- }
- return(file);
#endif /* __WIN__ */
+
+ return(file);
}
/***********************************************************************//**
Deletes a file if it exists. The file has to be closed before calling this.
@return TRUE if success */
UNIV_INTERN
-ibool
+bool
os_file_delete_if_exists(
/*=====================*/
const char* name) /*!< in: file path as a null-terminated
string */
{
#ifdef __WIN__
- BOOL ret;
+ bool ret;
ulint count = 0;
loop:
/* In Windows, deleting an .ibd file may fail if ibbackup is copying
@@ -1742,31 +1815,30 @@ loop:
ret = DeleteFile((LPCTSTR) name);
if (ret) {
- return(TRUE);
+ return(true);
}
- if (GetLastError() == ERROR_FILE_NOT_FOUND) {
+ DWORD lasterr = GetLastError();
+ if (lasterr == ERROR_FILE_NOT_FOUND
+ || lasterr == ERROR_PATH_NOT_FOUND) {
/* the file does not exist, this not an error */
- return(TRUE);
+ return(true);
}
count++;
if (count > 100 && 0 == (count % 10)) {
- fprintf(stderr,
- "InnoDB: Warning: cannot delete file %s\n"
- "InnoDB: Are you running ibbackup"
- " to back up the file?\n", name);
+ os_file_get_last_error(true); /* print error information */
- os_file_get_last_error(TRUE); /* print error information */
+ ib_logf(IB_LOG_LEVEL_WARN, "Delete of file %s failed.", name);
}
os_thread_sleep(1000000); /* sleep for a second */
if (count > 2000) {
- return(FALSE);
+ return(false);
}
goto loop;
@@ -1778,18 +1850,18 @@ loop:
if (ret != 0 && errno != ENOENT) {
os_file_handle_error_no_exit(name, "delete", FALSE);
- return(FALSE);
+ return(false);
}
- return(TRUE);
-#endif
+ return(true);
+#endif /* __WIN__ */
}
/***********************************************************************//**
Deletes a file. The file has to be closed before calling this.
@return TRUE if success */
UNIV_INTERN
-ibool
+bool
os_file_delete(
/*===========*/
const char* name) /*!< in: file path as a null-terminated
@@ -1805,32 +1877,32 @@ loop:
ret = DeleteFile((LPCTSTR) name);
if (ret) {
- return(TRUE);
+ return(true);
}
if (GetLastError() == ERROR_FILE_NOT_FOUND) {
/* If the file does not exist, we classify this as a 'mild'
error and return */
- return(FALSE);
+ return(false);
}
count++;
if (count > 100 && 0 == (count % 10)) {
+ os_file_get_last_error(true); /* print error information */
+
fprintf(stderr,
"InnoDB: Warning: cannot delete file %s\n"
"InnoDB: Are you running ibbackup"
" to back up the file?\n", name);
-
- os_file_get_last_error(TRUE); /* print error information */
}
os_thread_sleep(1000000); /* sleep for a second */
if (count > 2000) {
- return(FALSE);
+ return(false);
}
goto loop;
@@ -1842,10 +1914,10 @@ loop:
if (ret != 0) {
os_file_handle_error_no_exit(name, "delete", FALSE);
- return(FALSE);
+ return(false);
}
- return(TRUE);
+ return(true);
#endif
}
@@ -1862,6 +1934,19 @@ os_file_rename_func(
string */
const char* newpath)/*!< in: new file path */
{
+#ifdef UNIV_DEBUG
+ os_file_type_t type;
+ ibool exists;
+
+ /* New path must not exist. */
+ ut_ad(os_file_status(newpath, &exists, &type));
+ ut_ad(!exists);
+
+ /* Old path must exist. */
+ ut_ad(os_file_status(oldpath, &exists, &type));
+ ut_ad(exists);
+#endif /* UNIV_DEBUG */
+
#ifdef __WIN__
BOOL ret;
@@ -1886,7 +1971,7 @@ os_file_rename_func(
}
return(TRUE);
-#endif
+#endif /* __WIN__ */
}
/***********************************************************************//**
@@ -1926,7 +2011,7 @@ os_file_close_func(
}
return(TRUE);
-#endif
+#endif /* __WIN__ */
}
#ifdef UNIV_HOTBACKUP
@@ -1962,7 +2047,7 @@ os_file_close_no_error_handling(
}
return(TRUE);
-#endif
+#endif /* __WIN__ */
}
#endif /* UNIV_HOTBACKUP */
@@ -1991,7 +2076,7 @@ os_file_get_size(
return(offset);
#else
return((os_offset_t) lseek(file, 0, SEEK_END));
-#endif
+#endif /* __WIN__ */
}
/***********************************************************************//**
@@ -2014,28 +2099,6 @@ os_file_set_size(
current_size = 0;
-#ifdef HAVE_POSIX_FALLOCATE
- if (srv_use_posix_fallocate) {
- if (posix_fallocate(file, current_size, size) == -1) {
- fprintf(stderr,
- "InnoDB: Error: preallocating data for"
- " file %s failed at\n"
- "InnoDB: offset 0 size %lld. Operating system"
- " error number %d.\n"
- "InnoDB: Check that the disk is not full"
- " or a disk quota exceeded.\n"
- "InnoDB: Some operating system error numbers"
- " are described at\n"
- "InnoDB: "
- REFMAN "operating-system-error-codes.html\n",
- name, (longlong)size, errno);
-
- return (FALSE);
- }
- return (TRUE);
- }
-#endif
-
/* Write up to 1 megabyte at a time. */
buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
@@ -2246,10 +2309,7 @@ os_file_flush_func(
return(TRUE);
}
- ut_print_timestamp(stderr);
-
- fprintf(stderr,
- " InnoDB: Error: the OS said file flush did not succeed\n");
+ ib_logf(IB_LOG_LEVEL_ERROR, "The OS said file flush did not succeed");
os_file_handle_error(NULL, "flush");
@@ -2286,9 +2346,9 @@ os_file_pread(
offs = (off_t) offset;
if (sizeof(off_t) <= 4) {
- if (UNIV_UNLIKELY(offset != (os_offset_t) offs)) {
- fprintf(stderr,
- "InnoDB: Error: file read at offset > 4 GB\n");
+ if (offset != (os_offset_t) offs) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "File read at offset > 4 GB");
}
}
@@ -2389,16 +2449,16 @@ os_file_pwrite(
off_t offs;
ut_ad(n);
+ ut_ad(!srv_read_only_mode);
/* If off_t is > 4 bytes in size, then we assume we can pass a
64-bit address */
offs = (off_t) offset;
if (sizeof(off_t) <= 4) {
- if (UNIV_UNLIKELY(offset != (os_offset_t) offs)) {
- fprintf(stderr,
- "InnoDB: Error: file write"
- " at offset > 4 GB\n");
+ if (offset != (os_offset_t) offs) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "File write at offset > 4 GB.");
}
}
@@ -2473,7 +2533,7 @@ func_exit:
return(ret);
}
-#endif
+#endif /* !UNIV_HOTBACKUP */
}
#endif
@@ -2574,11 +2634,9 @@ try_again:
return(TRUE);
}
- fprintf(stderr,
- "InnoDB: Error: tried to read "ULINTPF" bytes at offset "
- UINT64PF"\n"
- "InnoDB: Was only able to read %ld.\n",
- n, offset, (lint) ret);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Tried to read "ULINTPF" bytes at offset " UINT64PF". "
+ "Was only able to read %ld.", n, offset, (lint) ret);
#endif /* __WIN__ */
#ifdef __WIN__
error_handling:
@@ -2596,7 +2654,7 @@ error_handling:
(ulong) GetLastError()
#else
(ulong) errno
-#endif
+#endif /* __WIN__ */
);
fflush(stderr);
@@ -2754,6 +2812,8 @@ os_file_write_func(
os_offset_t offset, /*!< in: file offset where to write */
ulint n) /*!< in: number of bytes to write */
{
+ ut_ad(!srv_read_only_mode);
+
#ifdef __WIN__
BOOL ret;
DWORD len;
@@ -2913,8 +2973,8 @@ retry:
(ulint) errno);
if (strerror(errno) != NULL) {
fprintf(stderr,
- "InnoDB: Error number %lu means '%s'.\n",
- (ulint) errno, strerror(errno));
+ "InnoDB: Error number %d means '%s'.\n",
+ errno, strerror(errno));
}
fprintf(stderr,
@@ -2937,15 +2997,15 @@ UNIV_INTERN
ibool
os_file_status(
/*===========*/
- const char* path, /*!< in: pathname of the file */
+ const char* path, /*!< in: pathname of the file */
ibool* exists, /*!< out: TRUE if file exists */
os_file_type_t* type) /*!< out: type of the file (if it exists) */
{
#ifdef __WIN__
int ret;
- struct _stat statinfo;
+ struct _stat64 statinfo;
- ret = _stat(path, &statinfo);
+ ret = _stat64(path, &statinfo);
if (ret && (errno == ENOENT || errno == ENOTDIR)) {
/* file does not exist */
*exists = FALSE;
@@ -3004,47 +3064,73 @@ os_file_status(
/*******************************************************************//**
This function returns information about the specified file
-@return TRUE if stat information found */
+@return DB_SUCCESS if all OK */
UNIV_INTERN
-ibool
+dberr_t
os_file_get_status(
/*===============*/
const char* path, /*!< in: pathname of the file */
- os_file_stat_t* stat_info) /*!< information of a file in a
+ os_file_stat_t* stat_info, /*!< information of a file in a
directory */
+ bool check_rw_perm) /*!< in: for testing whether the
+ file can be opened in RW mode */
{
-#ifdef __WIN__
int ret;
- struct _stat statinfo;
- ret = _stat(path, &statinfo);
+#ifdef __WIN__
+ struct _stat64 statinfo;
+
+ ret = _stat64(path, &statinfo);
+
if (ret && (errno == ENOENT || errno == ENOTDIR)) {
/* file does not exist */
- return(FALSE);
+ return(DB_NOT_FOUND);
+
} else if (ret) {
/* file exists, but stat call failed */
os_file_handle_error_no_exit(path, "stat", FALSE);
- return(FALSE);
- }
- if (_S_IFDIR & statinfo.st_mode) {
+ return(DB_FAIL);
+
+ } else if (_S_IFDIR & statinfo.st_mode) {
stat_info->type = OS_FILE_TYPE_DIR;
} else if (_S_IFREG & statinfo.st_mode) {
+
+ DWORD access = GENERIC_READ;
+
+ if (!srv_read_only_mode) {
+ access |= GENERIC_WRITE;
+ }
+
stat_info->type = OS_FILE_TYPE_FILE;
+
+ /* Check if we can open it in read-only mode. */
+
+ if (check_rw_perm) {
+ HANDLE fh;
+
+ fh = CreateFile(
+ (LPCTSTR) path, // File to open
+ access,
+ 0, // No sharing
+ NULL, // Default security
+ OPEN_EXISTING, // Existing file only
+ FILE_ATTRIBUTE_NORMAL, // Normal file
+ NULL); // No attr. template
+
+ if (fh == INVALID_HANDLE_VALUE) {
+ stat_info->rw_perm = false;
+ } else {
+ stat_info->rw_perm = true;
+ CloseHandle(fh);
+ }
+ }
} else {
stat_info->type = OS_FILE_TYPE_UNKNOWN;
}
-
- stat_info->ctime = statinfo.st_ctime;
- stat_info->atime = statinfo.st_atime;
- stat_info->mtime = statinfo.st_mtime;
- stat_info->size = statinfo.st_size;
-
- return(TRUE);
#else
- int ret;
struct stat statinfo;
ret = stat(path, &statinfo);
@@ -3052,32 +3138,49 @@ os_file_get_status(
if (ret && (errno == ENOENT || errno == ENOTDIR)) {
/* file does not exist */
- return(FALSE);
+ return(DB_NOT_FOUND);
+
} else if (ret) {
/* file exists, but stat call failed */
os_file_handle_error_no_exit(path, "stat", FALSE);
- return(FALSE);
- }
+ return(DB_FAIL);
- if (S_ISDIR(statinfo.st_mode)) {
+ } else if (S_ISDIR(statinfo.st_mode)) {
stat_info->type = OS_FILE_TYPE_DIR;
} else if (S_ISLNK(statinfo.st_mode)) {
stat_info->type = OS_FILE_TYPE_LINK;
} else if (S_ISREG(statinfo.st_mode)) {
stat_info->type = OS_FILE_TYPE_FILE;
+
+ if (check_rw_perm) {
+ int fh;
+ int access;
+
+ access = !srv_read_only_mode ? O_RDWR : O_RDONLY;
+
+ fh = ::open(path, access, os_innodb_umask);
+
+ if (fh == -1) {
+ stat_info->rw_perm = false;
+ } else {
+ stat_info->rw_perm = true;
+ close(fh);
+ }
+ }
} else {
stat_info->type = OS_FILE_TYPE_UNKNOWN;
}
+#endif /* _WIN_ */
+
stat_info->ctime = statinfo.st_ctime;
stat_info->atime = statinfo.st_atime;
stat_info->mtime = statinfo.st_mtime;
- stat_info->size = statinfo.st_size;
+ stat_info->size = statinfo.st_size;
- return(TRUE);
-#endif
+ return(DB_SUCCESS);
}
/* path name separator character */
@@ -3088,6 +3191,153 @@ os_file_get_status(
#endif
/****************************************************************//**
+This function returns a new path name after replacing the basename
+in an old path with a new basename. The old_path is a full path
+name including the extension. The tablename is in the normal
+form "databasename/tablename". The new base name is found after
+the forward slash. Both input strings are null terminated.
+
+This function allocates memory to be returned. It is the callers
+responsibility to free the return value after it is no longer needed.
+
+@return own: new full pathname */
+UNIV_INTERN
+char*
+os_file_make_new_pathname(
+/*======================*/
+ const char* old_path, /*!< in: pathname */
+ const char* tablename) /*!< in: contains new base name */
+{
+ ulint dir_len;
+ char* last_slash;
+ char* base_name;
+ char* new_path;
+ ulint new_path_len;
+
+ /* Split the tablename into its database and table name components.
+ They are separated by a '/'. */
+ last_slash = strrchr((char*) tablename, '/');
+ base_name = last_slash ? last_slash + 1 : (char*) tablename;
+
+ /* Find the offset of the last slash. We will strip off the
+ old basename.ibd which starts after that slash. */
+ last_slash = strrchr((char*) old_path, OS_FILE_PATH_SEPARATOR);
+ dir_len = last_slash ? last_slash - old_path : strlen(old_path);
+
+ /* allocate a new path and move the old directory path to it. */
+ new_path_len = dir_len + strlen(base_name) + sizeof "/.ibd";
+ new_path = static_cast<char*>(mem_alloc(new_path_len));
+ memcpy(new_path, old_path, dir_len);
+
+ ut_snprintf(new_path + dir_len,
+ new_path_len - dir_len,
+ "%c%s.ibd",
+ OS_FILE_PATH_SEPARATOR,
+ base_name);
+
+ return(new_path);
+}
+
+/****************************************************************//**
+This function returns a remote path name by combining a data directory
+path provided in a DATA DIRECTORY clause with the tablename which is
+in the form 'database/tablename'. It strips the file basename (which
+is the tablename) found after the last directory in the path provided.
+The full filepath created will include the database name as a directory
+under the path provided. The filename is the tablename with the '.ibd'
+extension. All input and output strings are null-terminated.
+
+This function allocates memory to be returned. It is the callers
+responsibility to free the return value after it is no longer needed.
+
+@return own: A full pathname; data_dir_path/databasename/tablename.ibd */
+UNIV_INTERN
+char*
+os_file_make_remote_pathname(
+/*=========================*/
+ const char* data_dir_path, /*!< in: pathname */
+ const char* tablename, /*!< in: tablename */
+ const char* extention) /*!< in: file extention; ibd,cfg */
+{
+ ulint data_dir_len;
+ char* last_slash;
+ char* new_path;
+ ulint new_path_len;
+
+ ut_ad(extention && strlen(extention) == 3);
+
+ /* Find the offset of the last slash. We will strip off the
+ old basename or tablename which starts after that slash. */
+ last_slash = strrchr((char*) data_dir_path, OS_FILE_PATH_SEPARATOR);
+ data_dir_len = last_slash ? last_slash - data_dir_path : strlen(data_dir_path);
+
+ /* allocate a new path and move the old directory path to it. */
+ new_path_len = data_dir_len + strlen(tablename)
+ + sizeof "/." + strlen(extention);
+ new_path = static_cast<char*>(mem_alloc(new_path_len));
+ memcpy(new_path, data_dir_path, data_dir_len);
+ ut_snprintf(new_path + data_dir_len,
+ new_path_len - data_dir_len,
+ "%c%s.%s",
+ OS_FILE_PATH_SEPARATOR,
+ tablename,
+ extention);
+
+ srv_normalize_path_for_win(new_path);
+
+ return(new_path);
+}
+
+/****************************************************************//**
+This function reduces a null-terminated full remote path name into
+the path that is sent by MySQL for DATA DIRECTORY clause. It replaces
+the 'databasename/tablename.ibd' found at the end of the path with just
+'tablename'.
+
+Since the result is always smaller than the path sent in, no new memory
+is allocated. The caller should allocate memory for the path sent in.
+This function manipulates that path in place.
+
+If the path format is not as expected, just return. The result is used
+to inform a SHOW CREATE TABLE command. */
+UNIV_INTERN
+void
+os_file_make_data_dir_path(
+/*========================*/
+ char* data_dir_path) /*!< in/out: full path/data_dir_path */
+{
+ char* ptr;
+ char* tablename;
+ ulint tablename_len;
+
+ /* Replace the period before the extension with a null byte. */
+ ptr = strrchr((char*) data_dir_path, '.');
+ if (!ptr) {
+ return;
+ }
+ ptr[0] = '\0';
+
+ /* The tablename starts after the last slash. */
+ ptr = strrchr((char*) data_dir_path, OS_FILE_PATH_SEPARATOR);
+ if (!ptr) {
+ return;
+ }
+ ptr[0] = '\0';
+ tablename = ptr + 1;
+
+ /* The databasename starts after the next to last slash. */
+ ptr = strrchr((char*) data_dir_path, OS_FILE_PATH_SEPARATOR);
+ if (!ptr) {
+ return;
+ }
+ tablename_len = ut_strlen(tablename);
+
+ ut_memmove(++ptr, tablename, tablename_len);
+
+ ptr[tablename_len] = '\0';
+}
+
+/****************************************************************//**
The function os_file_dirname returns a directory component of a
null-terminated pathname string. In the usual case, dirname returns
the string up to, but not including, the final '/', and basename
@@ -3151,11 +3401,18 @@ os_file_create_subdirs_if_needed(
/*=============================*/
const char* path) /*!< in: path name */
{
- char* subdir;
- ibool success, subdir_exists;
- os_file_type_t type;
+ if (srv_read_only_mode) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "read only mode set. Can't create subdirectories '%s'",
+ path);
+
+ return(FALSE);
+
+ }
+
+ char* subdir = os_file_dirname(path);
- subdir = os_file_dirname(path);
if (strlen(subdir) == 1
&& (*subdir == OS_FILE_PATH_SEPARATOR || *subdir == '.')) {
/* subdir is root or cwd, nothing to do */
@@ -3165,15 +3422,21 @@ os_file_create_subdirs_if_needed(
}
/* Test if subdir exists */
- success = os_file_status(subdir, &subdir_exists, &type);
+ os_file_type_t type;
+ ibool subdir_exists;
+ ibool success = os_file_status(subdir, &subdir_exists, &type);
+
if (success && !subdir_exists) {
+
/* subdir does not exist, create it */
success = os_file_create_subdirs_if_needed(subdir);
+
if (!success) {
mem_free(subdir);
return(FALSE);
}
+
success = os_file_create_directory(subdir, FALSE);
}
@@ -3195,7 +3458,7 @@ os_aio_array_get_nth_slot(
{
ut_a(index < array->n_slots);
- return((array->slots) + index);
+ return(&array->slots[index]);
}
#if defined(LINUX_NATIVE_AIO)
@@ -3297,43 +3560,74 @@ os_aio_native_aio_supported(void)
/*=============================*/
{
int fd;
- byte* buf;
- byte* ptr;
- struct io_event io_event;
io_context_t io_ctx;
- struct iocb iocb;
- struct iocb* p_iocb;
- int err;
+ char name[1000];
if (!os_aio_linux_create_io_ctx(1, &io_ctx)) {
/* The platform does not support native aio. */
return(FALSE);
- }
+ } else if (!srv_read_only_mode) {
+ /* Now check if tmpdir supports native aio ops. */
+ fd = innobase_mysql_tmpfile();
- /* Now check if tmpdir supports native aio ops. */
- fd = innobase_mysql_tmpfile();
+ if (fd < 0) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Unable to create temp file to check "
+ "native AIO support.");
- if (fd < 0) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: unable to create "
- "temp file to check native AIO support.\n");
+ return(FALSE);
+ }
+ } else {
- return(FALSE);
+ srv_normalize_path_for_win(srv_log_group_home_dir);
+
+ ulint dirnamelen = strlen(srv_log_group_home_dir);
+ ut_a(dirnamelen < (sizeof name) - 10 - sizeof "ib_logfile");
+ memcpy(name, srv_log_group_home_dir, dirnamelen);
+
+ /* Add a path separator if needed. */
+ if (dirnamelen && name[dirnamelen - 1] != SRV_PATH_SEPARATOR) {
+ name[dirnamelen++] = SRV_PATH_SEPARATOR;
+ }
+
+ strcpy(name + dirnamelen, "ib_logfile0");
+
+ fd = ::open(name, O_RDONLY);
+
+ if (fd == -1) {
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Unable to open \"%s\" to check "
+ "native AIO read support.", name);
+
+ return(FALSE);
+ }
}
+ struct io_event io_event;
+
memset(&io_event, 0x0, sizeof(io_event));
- buf = static_cast<byte*>(ut_malloc(UNIV_PAGE_SIZE * 2));
- ptr = static_cast<byte*>(ut_align(buf, UNIV_PAGE_SIZE));
+ byte* buf = static_cast<byte*>(ut_malloc(UNIV_PAGE_SIZE * 2));
+ byte* ptr = static_cast<byte*>(ut_align(buf, UNIV_PAGE_SIZE));
+
+ struct iocb iocb;
/* Suppress valgrind warning. */
memset(buf, 0x00, UNIV_PAGE_SIZE * 2);
-
memset(&iocb, 0x0, sizeof(iocb));
- p_iocb = &iocb;
- io_prep_pwrite(p_iocb, fd, ptr, UNIV_PAGE_SIZE, 0);
- err = io_submit(io_ctx, 1, &p_iocb);
+ struct iocb* p_iocb = &iocb;
+
+ if (!srv_read_only_mode) {
+ io_prep_pwrite(p_iocb, fd, ptr, UNIV_PAGE_SIZE, 0);
+ } else {
+ ut_a(UNIV_PAGE_SIZE >= 512);
+ io_prep_pread(p_iocb, fd, ptr, 512, 0);
+ }
+
+ int err = io_submit(io_ctx, 1, &p_iocb);
+
if (err >= 1) {
/* Now collect the submitted IO request. */
err = io_getevents(io_ctx, 1, 1, &io_event, NULL);
@@ -3348,22 +3642,18 @@ os_aio_native_aio_supported(void)
case -EINVAL:
case -ENOSYS:
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: Linux Native AIO is not"
- " supported on tmpdir.\n"
- "InnoDB: You can either move tmpdir to a"
- " file system that supports native AIO\n"
- "InnoDB: or you can set"
- " innodb_use_native_aio to FALSE to avoid"
- " this message.\n");
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Linux Native AIO not supported. You can either "
+ "move %s to a file system that supports native "
+ "AIO or you can set innodb_use_native_aio to "
+ "FALSE to avoid this message.",
+ srv_read_only_mode ? name : "tmpdir");
/* fall through. */
default:
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: Linux Native AIO check"
- " on tmpdir returned error[%d]\n", -err);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Linux Native AIO check on %s returned error[%d]",
+ srv_read_only_mode ? name : "tmpdir", -err);
}
return(FALSE);
@@ -3385,34 +3675,33 @@ os_aio_array_create(
ulint n_segments) /*!< in: number of segments in the aio array */
{
os_aio_array_t* array;
- ulint i;
- os_aio_slot_t* slot;
#ifdef WIN_ASYNC_IO
OVERLAPPED* over;
#elif defined(LINUX_NATIVE_AIO)
struct io_event* io_event = NULL;
-#endif
+#endif /* WIN_ASYNC_IO */
ut_a(n > 0);
ut_a(n_segments > 0);
- array = static_cast<os_aio_array_t*>(ut_malloc(sizeof(os_aio_array_t)));
+ array = static_cast<os_aio_array_t*>(ut_malloc(sizeof(*array)));
+ memset(array, 0x0, sizeof(*array));
- array->mutex = os_mutex_create();
- array->not_full = os_event_create(NULL);
- array->is_empty = os_event_create(NULL);
+ array->mutex = os_mutex_create();
+ array->not_full = os_event_create();
+ array->is_empty = os_event_create();
os_event_set(array->is_empty);
- array->n_slots = n;
- array->n_segments = n_segments;
- array->n_reserved = 0;
- array->cur_seg = 0;
+ array->n_slots = n;
+ array->n_segments = n_segments;
array->slots = static_cast<os_aio_slot_t*>(
- ut_malloc(n * sizeof(os_aio_slot_t)));
+ ut_malloc(n * sizeof(*array->slots)));
+
+ memset(array->slots, 0x0, sizeof(n * sizeof(*array->slots)));
#ifdef __WIN__
array->handles = static_cast<HANDLE*>(ut_malloc(n * sizeof(HANDLE)));
-#endif
+#endif /* __WIN__ */
#if defined(LINUX_NATIVE_AIO)
array->aio_ctx = NULL;
@@ -3430,7 +3719,7 @@ os_aio_array_create(
array->aio_ctx = static_cast<io_context**>(
ut_malloc(n_segments * sizeof(*array->aio_ctx)));
- for (i = 0; i < n_segments; ++i) {
+ for (ulint i = 0; i < n_segments; ++i) {
if (!os_aio_linux_create_io_ctx(n/n_segments,
&array->aio_ctx[i])) {
/* If something bad happened during aio setup
@@ -3463,7 +3752,9 @@ os_aio_array_create(
skip_native_aio:
#endif /* LINUX_NATIVE_AIO */
- for (i = 0; i < n; i++) {
+ for (ulint i = 0; i < n; i++) {
+ os_aio_slot_t* slot;
+
slot = os_aio_array_get_nth_slot(array, i);
slot->pos = i;
@@ -3471,18 +3762,17 @@ skip_native_aio:
#ifdef WIN_ASYNC_IO
slot->handle = CreateEvent(NULL,TRUE, FALSE, NULL);
- over = &(slot->control);
+ over = &slot->control;
over->hEvent = slot->handle;
- *((array->handles) + i) = over->hEvent;
+ array->handles[i] = over->hEvent;
#elif defined(LINUX_NATIVE_AIO)
-
memset(&slot->control, 0x0, sizeof(slot->control));
slot->n_bytes = 0;
slot->ret = 0;
-#endif
+#endif /* WIN_ASYNC_IO */
}
return(array);
@@ -3494,7 +3784,7 @@ static
void
os_aio_array_free(
/*==============*/
- os_aio_array_t* array) /*!< in, own: array to free */
+ os_aio_array_t*& array) /*!< in, own: array to free */
{
#ifdef WIN_ASYNC_IO
ulint i;
@@ -3521,6 +3811,8 @@ os_aio_array_free(
ut_free(array->slots);
ut_free(array);
+
+ array = 0;
}
/***********************************************************************
@@ -3541,93 +3833,100 @@ os_aio_init(
ulint n_slots_sync) /*<! in: number of slots in the sync aio
array */
{
- ulint i;
- ulint n_segments = 2 + n_read_segs + n_write_segs;
-
- ut_ad(n_segments >= 4);
-
os_io_init_simple();
#if defined(LINUX_NATIVE_AIO)
/* Check if native aio is supported on this system and tmpfs */
- if (srv_use_native_aio
- && !os_aio_native_aio_supported()) {
+ if (srv_use_native_aio && !os_aio_native_aio_supported()) {
+
+ ib_logf(IB_LOG_LEVEL_WARN, "Linux Native AIO disabled.");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Warning: Linux Native AIO"
- " disabled.\n");
srv_use_native_aio = FALSE;
}
#endif /* LINUX_NATIVE_AIO */
- for (i = 0; i < n_segments; i++) {
- srv_set_io_thread_op_info(i, "not started yet");
- }
-
+ srv_reset_io_thread_op_info();
- /* fprintf(stderr, "Array n per seg %lu\n", n_per_seg); */
+ os_aio_read_array = os_aio_array_create(
+ n_read_segs * n_per_seg, n_read_segs);
- os_aio_ibuf_array = os_aio_array_create(n_per_seg, 1);
- if (os_aio_ibuf_array == NULL) {
- goto err_exit;
+ if (os_aio_read_array == NULL) {
+ return(FALSE);
}
- srv_io_thread_function[0] = "insert buffer thread";
+ ulint start = (srv_read_only_mode) ? 0 : 2;
+ ulint n_segs = n_read_segs + start;
- os_aio_log_array = os_aio_array_create(n_per_seg, 1);
- if (os_aio_log_array == NULL) {
- goto err_exit;
+ /* 0 is the ibuf segment and 1 is the insert buffer segment. */
+ for (ulint i = start; i < n_segs; ++i) {
+ ut_a(i < SRV_MAX_N_IO_THREADS);
+ srv_io_thread_function[i] = "read thread";
}
- srv_io_thread_function[1] = "log thread";
+ ulint n_segments = n_read_segs;
- os_aio_read_array = os_aio_array_create(n_read_segs * n_per_seg,
- n_read_segs);
- if (os_aio_read_array == NULL) {
- goto err_exit;
- }
+ if (!srv_read_only_mode) {
- for (i = 2; i < 2 + n_read_segs; i++) {
- ut_a(i < SRV_MAX_N_IO_THREADS);
- srv_io_thread_function[i] = "read thread";
- }
+ os_aio_log_array = os_aio_array_create(n_per_seg, 1);
- os_aio_write_array = os_aio_array_create(n_write_segs * n_per_seg,
- n_write_segs);
- if (os_aio_write_array == NULL) {
- goto err_exit;
- }
+ if (os_aio_log_array == NULL) {
+ return(FALSE);
+ }
- for (i = 2 + n_read_segs; i < n_segments; i++) {
- ut_a(i < SRV_MAX_N_IO_THREADS);
- srv_io_thread_function[i] = "write thread";
+ ++n_segments;
+
+ srv_io_thread_function[1] = "log thread";
+
+ os_aio_ibuf_array = os_aio_array_create(n_per_seg, 1);
+
+ if (os_aio_ibuf_array == NULL) {
+ return(FALSE);
+ }
+
+ ++n_segments;
+
+ srv_io_thread_function[0] = "insert buffer thread";
+
+ os_aio_write_array = os_aio_array_create(
+ n_write_segs * n_per_seg, n_write_segs);
+
+ if (os_aio_write_array == NULL) {
+ return(FALSE);
+ }
+
+ n_segments += n_write_segs;
+
+ for (ulint i = start + n_read_segs; i < n_segments; ++i) {
+ ut_a(i < SRV_MAX_N_IO_THREADS);
+ srv_io_thread_function[i] = "write thread";
+ }
+
+ ut_ad(n_segments >= 4);
+ } else {
+ ut_ad(n_segments > 0);
}
os_aio_sync_array = os_aio_array_create(n_slots_sync, 1);
+
if (os_aio_sync_array == NULL) {
- goto err_exit;
+ return(FALSE);
}
-
os_aio_n_segments = n_segments;
os_aio_validate();
- os_aio_segment_wait_events = static_cast<os_event_struct_t**>(
- ut_malloc(n_segments * sizeof(void*)));
+ os_aio_segment_wait_events = static_cast<os_event_t*>(
+ ut_malloc(n_segments * sizeof *os_aio_segment_wait_events));
- for (i = 0; i < n_segments; i++) {
- os_aio_segment_wait_events[i] = os_event_create(NULL);
+ for (ulint i = 0; i < n_segments; ++i) {
+ os_aio_segment_wait_events[i] = os_event_create();
}
- os_last_printout = time(NULL);
+ os_last_printout = ut_time();
return(TRUE);
-err_exit:
- return(FALSE);
-
}
/***********************************************************************
@@ -3637,20 +3936,25 @@ void
os_aio_free(void)
/*=============*/
{
- ulint i;
+ if (os_aio_ibuf_array != 0) {
+ os_aio_array_free(os_aio_ibuf_array);
+ }
+
+ if (os_aio_log_array != 0) {
+ os_aio_array_free(os_aio_log_array);
+ }
+
+ if (os_aio_write_array != 0) {
+ os_aio_array_free(os_aio_write_array);
+ }
+
+ if (os_aio_sync_array != 0) {
+ os_aio_array_free(os_aio_sync_array);
+ }
- os_aio_array_free(os_aio_ibuf_array);
- os_aio_ibuf_array = NULL;
- os_aio_array_free(os_aio_log_array);
- os_aio_log_array = NULL;
os_aio_array_free(os_aio_read_array);
- os_aio_read_array = NULL;
- os_aio_array_free(os_aio_write_array);
- os_aio_write_array = NULL;
- os_aio_array_free(os_aio_sync_array);
- os_aio_sync_array = NULL;
- for (i = 0; i < os_aio_n_segments; i++) {
+ for (ulint i = 0; i < os_aio_n_segments; i++) {
os_event_free(os_aio_segment_wait_events[i]);
}
@@ -3686,14 +3990,20 @@ void
os_aio_wake_all_threads_at_shutdown(void)
/*=====================================*/
{
- ulint i;
-
#ifdef WIN_ASYNC_IO
/* This code wakes up all ai/o threads in Windows native aio */
os_aio_array_wake_win_aio_at_shutdown(os_aio_read_array);
- os_aio_array_wake_win_aio_at_shutdown(os_aio_write_array);
- os_aio_array_wake_win_aio_at_shutdown(os_aio_ibuf_array);
- os_aio_array_wake_win_aio_at_shutdown(os_aio_log_array);
+ if (os_aio_write_array != 0) {
+ os_aio_array_wake_win_aio_at_shutdown(os_aio_write_array);
+ }
+
+ if (os_aio_ibuf_array != 0) {
+ os_aio_array_wake_win_aio_at_shutdown(os_aio_ibuf_array);
+ }
+
+ if (os_aio_log_array != 0) {
+ os_aio_array_wake_win_aio_at_shutdown(os_aio_log_array);
+ }
#elif defined(LINUX_NATIVE_AIO)
@@ -3705,12 +4015,14 @@ os_aio_wake_all_threads_at_shutdown(void)
if (srv_use_native_aio) {
return;
}
+
/* Fall through to simulated AIO handler wakeup if we are
not using native AIO. */
-#endif
+#endif /* !WIN_ASYNC_AIO */
+
/* This loop wakes up all simulated ai/o threads */
- for (i = 0; i < os_aio_n_segments; i++) {
+ for (ulint i = 0; i < os_aio_n_segments; i++) {
os_event_set(os_aio_segment_wait_events[i]);
}
@@ -3724,6 +4036,7 @@ void
os_aio_wait_until_no_pending_writes(void)
/*=====================================*/
{
+ ut_ad(!srv_read_only_mode);
os_event_wait(os_aio_write_array->is_empty);
}
@@ -3742,10 +4055,14 @@ os_aio_get_segment_no_from_slot(
ulint seg_len;
if (array == os_aio_ibuf_array) {
- segment = 0;
+ ut_ad(!srv_read_only_mode);
+
+ segment = IO_IBUF_SEGMENT;
} else if (array == os_aio_log_array) {
- segment = 1;
+ ut_ad(!srv_read_only_mode);
+
+ segment = IO_LOG_SEGMENT;
} else if (array == os_aio_read_array) {
seg_len = os_aio_read_array->n_slots
@@ -3753,7 +4070,9 @@ os_aio_get_segment_no_from_slot(
segment = 2 + slot->pos / seg_len;
} else {
+ ut_ad(!srv_read_only_mode);
ut_a(array == os_aio_write_array);
+
seg_len = os_aio_write_array->n_slots
/ os_aio_write_array->n_segments;
@@ -3774,15 +4093,19 @@ os_aio_get_array_and_local_segment(
os_aio_array_t** array, /*!< out: aio wait array */
ulint global_segment)/*!< in: global segment number */
{
- ulint segment;
+ ulint segment;
ut_a(global_segment < os_aio_n_segments);
- if (global_segment == 0) {
+ if (srv_read_only_mode) {
+ *array = os_aio_read_array;
+
+ return(global_segment);
+ } else if (global_segment == IO_IBUF_SEGMENT) {
*array = os_aio_ibuf_array;
segment = 0;
- } else if (global_segment == 1) {
+ } else if (global_segment == IO_LOG_SEGMENT) {
*array = os_aio_log_array;
segment = 0;
@@ -3830,7 +4153,7 @@ os_aio_array_reserve_slot(
struct iocb* iocb;
off_t aio_offset;
-#endif
+#endif /* WIN_ASYNC_IO */
ulint i;
ulint counter;
ulint slots_per_seg;
@@ -3838,7 +4161,7 @@ os_aio_array_reserve_slot(
#ifdef WIN_ASYNC_IO
ut_a((len & 0xFFFFFFFFUL) == len);
-#endif
+#endif /* WIN_ASYNC_IO */
/* No need of a mutex. Only reading constant fields */
slots_per_seg = array->n_slots / array->n_segments;
@@ -3871,9 +4194,11 @@ loop:
local segment and do a full scan of the array. We are
guaranteed to find a slot in full scan. */
for (i = local_seg * slots_per_seg, counter = 0;
- counter < array->n_slots; i++, counter++) {
+ counter < array->n_slots;
+ i++, counter++) {
i %= array->n_slots;
+
slot = os_aio_array_get_nth_slot(array, i);
if (slot->reserved == FALSE) {
@@ -3897,7 +4222,7 @@ found:
}
slot->reserved = TRUE;
- slot->reservation_time = time(NULL);
+ slot->reservation_time = ut_time();
slot->message1 = message1;
slot->message2 = message2;
slot->file = file;
@@ -3909,7 +4234,7 @@ found:
slot->io_already_done = FALSE;
#ifdef WIN_ASYNC_IO
- control = &(slot->control);
+ control = &slot->control;
control->Offset = (DWORD) offset & 0xFFFFFFFF;
control->OffsetHigh = (DWORD) (offset >> 32);
ResetEvent(slot->handle);
@@ -3940,7 +4265,6 @@ found:
iocb->data = (void*) slot;
slot->n_bytes = 0;
slot->ret = 0;
- /*fprintf(stderr, "Filled up Linux native iocb.\n");*/
skip_native_aio:
#endif /* LINUX_NATIVE_AIO */
@@ -3958,9 +4282,6 @@ os_aio_array_free_slot(
os_aio_array_t* array, /*!< in: aio array */
os_aio_slot_t* slot) /*!< in: pointer to slot */
{
- ut_ad(array);
- ut_ad(slot);
-
os_mutex_enter(array->mutex);
ut_ad(slot->reserved);
@@ -4009,36 +4330,42 @@ os_aio_simulated_wake_handler_thread(
arrays */
{
os_aio_array_t* array;
- os_aio_slot_t* slot;
ulint segment;
- ulint n;
- ulint i;
ut_ad(!srv_use_native_aio);
segment = os_aio_get_array_and_local_segment(&array, global_segment);
- n = array->n_slots / array->n_segments;
+ ulint n = array->n_slots / array->n_segments;
+
+ segment *= n;
/* Look through n slots after the segment * n'th slot */
os_mutex_enter(array->mutex);
- for (i = 0; i < n; i++) {
- slot = os_aio_array_get_nth_slot(array, i + segment * n);
+ for (ulint i = 0; i < n; ++i) {
+ const os_aio_slot_t* slot;
+
+ slot = os_aio_array_get_nth_slot(array, segment + i);
if (slot->reserved) {
+
/* Found an i/o request */
- break;
+ os_mutex_exit(array->mutex);
+
+ os_event_t event;
+
+ event = os_aio_segment_wait_events[global_segment];
+
+ os_event_set(event);
+
+ return;
}
}
os_mutex_exit(array->mutex);
-
- if (i < n) {
- os_event_set(os_aio_segment_wait_events[global_segment]);
- }
}
/**********************************************************************//**
@@ -4048,8 +4375,6 @@ void
os_aio_simulated_wake_handler_threads(void)
/*=======================================*/
{
- ulint i;
-
if (srv_use_native_aio) {
/* We do not use simulated aio: do nothing */
@@ -4058,7 +4383,7 @@ os_aio_simulated_wake_handler_threads(void)
os_aio_recommend_sleep_for_read_threads = FALSE;
- for (i = 0; i < os_aio_n_segments; i++) {
+ for (ulint i = 0; i < os_aio_n_segments; i++) {
os_aio_simulated_wake_handler_thread(i);
}
}
@@ -4080,7 +4405,6 @@ background threads too eagerly to allow for coalescing during
readahead requests. */
#ifdef __WIN__
os_aio_array_t* array;
- ulint g;
if (srv_use_native_aio) {
/* We do not use simulated aio: do nothing */
@@ -4090,12 +4414,12 @@ readahead requests. */
os_aio_recommend_sleep_for_read_threads = TRUE;
- for (g = 0; g < os_aio_n_segments; g++) {
- os_aio_get_array_and_local_segment(&array, g);
+ for (ulint i = 0; i < os_aio_n_segments; i++) {
+ os_aio_get_array_and_local_segment(&array, i);
if (array == os_aio_read_array) {
- os_event_reset(os_aio_segment_wait_events[g]);
+ os_event_reset(os_aio_segment_wait_events[i]);
}
}
#endif /* __WIN__ */
@@ -4193,11 +4517,10 @@ os_aio_func(
ibool retval;
BOOL ret = TRUE;
DWORD len = (DWORD) n;
- struct fil_node_struct * dummy_mess1;
+ struct fil_node_t* dummy_mess1;
void* dummy_mess2;
ulint dummy_type;
#endif /* WIN_ASYNC_IO */
- ibool retry;
ulint wake_later;
ut_ad(file);
@@ -4235,6 +4558,7 @@ os_aio_func(
return(os_file_read_func(file, buf, offset, n));
}
+ ut_ad(!srv_read_only_mode);
ut_a(type == OS_FILE_WRITE);
return(os_file_write_func(name, file, buf, offset, n));
@@ -4243,9 +4567,12 @@ os_aio_func(
try_again:
switch (mode) {
case OS_AIO_NORMAL:
- array = (type == OS_FILE_READ)
- ? os_aio_read_array
- : os_aio_write_array;
+ if (type == OS_FILE_READ) {
+ array = os_aio_read_array;
+ } else {
+ ut_ad(!srv_read_only_mode);
+ array = os_aio_write_array;
+ }
break;
case OS_AIO_IBUF:
ut_ad(type == OS_FILE_READ);
@@ -4254,14 +4581,21 @@ try_again:
wake_later = FALSE;
- array = os_aio_ibuf_array;
+ if (srv_read_only_mode) {
+ array = os_aio_read_array;
+ } else {
+ array = os_aio_ibuf_array;
+ }
break;
case OS_AIO_LOG:
- array = os_aio_log_array;
+ if (srv_read_only_mode) {
+ array = os_aio_read_array;
+ } else {
+ array = os_aio_log_array;
+ }
break;
case OS_AIO_SYNC:
array = os_aio_sync_array;
-
#if defined(LINUX_NATIVE_AIO)
/* In Linux native AIO we don't use sync IO array. */
ut_a(!srv_use_native_aio);
@@ -4286,7 +4620,7 @@ try_again:
if (!os_aio_linux_dispatch(array, slot)) {
goto err_exit;
}
-#endif
+#endif /* WIN_ASYNC_IO */
} else {
if (!wake_later) {
os_aio_simulated_wake_handler_thread(
@@ -4295,6 +4629,7 @@ try_again:
}
}
} else if (type == OS_FILE_WRITE) {
+ ut_ad(!srv_read_only_mode);
if (srv_use_native_aio) {
os_n_file_writes++;
#ifdef WIN_ASYNC_IO
@@ -4305,7 +4640,7 @@ try_again:
if (!os_aio_linux_dispatch(array, slot)) {
goto err_exit;
}
-#endif
+#endif /* WIN_ASYNC_IO */
} else {
if (!wake_later) {
os_aio_simulated_wake_handler_thread(
@@ -4329,11 +4664,10 @@ try_again:
we must use the same wait mechanism as for
async i/o */
- retval = os_aio_windows_handle(ULINT_UNDEFINED,
- slot->pos,
- &dummy_mess1,
- &dummy_mess2,
- &dummy_type);
+ retval = os_aio_windows_handle(
+ ULINT_UNDEFINED, slot->pos,
+ &dummy_mess1, &dummy_mess2,
+ &dummy_type);
return(retval);
}
@@ -4352,10 +4686,8 @@ err_exit:
#endif /* LINUX_NATIVE_AIO || WIN_ASYNC_IO */
os_aio_array_free_slot(array, slot);
- retry = os_file_handle_error(name,
- type == OS_FILE_READ
- ? "aio read" : "aio write");
- if (retry) {
+ if (os_file_handle_error(
+ name,type == OS_FILE_READ ? "aio read" : "aio write")) {
goto try_again;
}
@@ -4405,8 +4737,8 @@ os_aio_windows_handle(
BOOL retry = FALSE;
if (segment == ULINT_UNDEFINED) {
- array = os_aio_sync_array;
segment = 0;
+ array = os_aio_sync_array;
} else {
segment = os_aio_get_array_and_local_segment(&array, segment);
}
@@ -4420,16 +4752,21 @@ os_aio_windows_handle(
n = array->n_slots / array->n_segments;
if (array == os_aio_sync_array) {
+
WaitForSingleObject(
os_aio_array_get_nth_slot(array, pos)->handle,
INFINITE);
+
i = pos;
+
} else {
- srv_set_io_thread_op_info(orig_seg, "wait Windows aio");
- i = WaitForMultipleObjects((DWORD) n,
- array->handles + segment * n,
- FALSE,
- INFINITE);
+ if (orig_seg != ULINT_UNDEFINED) {
+ srv_set_io_thread_op_info(orig_seg, "wait Windows aio");
+ }
+
+ i = WaitForMultipleObjects(
+ (DWORD) n, array->handles + segment * n,
+ FALSE, INFINITE);
}
os_mutex_enter(array->mutex);
@@ -4449,8 +4786,8 @@ os_aio_windows_handle(
ut_a(slot->reserved);
if (orig_seg != ULINT_UNDEFINED) {
- srv_set_io_thread_op_info(orig_seg,
- "get windows aio return value");
+ srv_set_io_thread_op_info(
+ orig_seg, "get windows aio return value");
}
ret = GetOverlappedResult(slot->file, &(slot->control), &len, TRUE);
@@ -4753,7 +5090,7 @@ found:
*type = slot->type;
- if ((slot->ret == 0) && (slot->n_bytes == (long) slot->len)) {
+ if (slot->ret == 0 && slot->n_bytes == (long) slot->len) {
ret = TRUE;
} else {
@@ -4802,8 +5139,6 @@ os_aio_simulated_handle(
{
os_aio_array_t* array;
ulint segment;
- os_aio_slot_t* slot;
- os_aio_slot_t* slot2;
os_aio_slot_t* consecutive_ios[OS_AIO_MERGE_N_CONSECUTIVE];
ulint n_consecutive;
ulint total_len;
@@ -4816,7 +5151,7 @@ os_aio_simulated_handle(
ibool ret;
ibool any_reserved;
ulint n;
- ulint i;
+ os_aio_slot_t* aio_slot;
/* Fix compiler warning */
*consecutive_ios = NULL;
@@ -4854,7 +5189,9 @@ restart:
os_mutex_enter(array->mutex);
- for (i = 0; i < n; i++) {
+ for (ulint i = 0; i < n; i++) {
+ os_aio_slot_t* slot;
+
slot = os_aio_array_get_nth_slot(array, i + segment * n);
if (!slot->reserved) {
@@ -4868,8 +5205,8 @@ restart:
(ulong) i);
}
+ aio_slot = slot;
ret = TRUE;
-
goto slot_io_done;
} else {
any_reserved = TRUE;
@@ -4879,9 +5216,7 @@ restart:
/* There is no completed request.
If there is no pending request at all,
and the system is being shut down, exit. */
- if (UNIV_UNLIKELY
- (!any_reserved
- && srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS)) {
+ if (!any_reserved && srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS) {
os_mutex_exit(array->mutex);
*message1 = NULL;
*message2 = NULL;
@@ -4897,12 +5232,15 @@ restart:
biggest_age = 0;
lowest_offset = IB_UINT64_MAX;
- for (i = 0; i < n; i++) {
+ for (ulint i = 0; i < n; i++) {
+ os_aio_slot_t* slot;
+
slot = os_aio_array_get_nth_slot(array, i + segment * n);
if (slot->reserved) {
- age = (ulint) difftime(time(NULL),
- slot->reservation_time);
+
+ age = (ulint) difftime(
+ ut_time(), slot->reservation_time);
if ((age >= 2 && age > biggest_age)
|| (age >= 2 && age == biggest_age
@@ -4926,9 +5264,11 @@ restart:
lowest_offset = IB_UINT64_MAX;
- for (i = 0; i < n; i++) {
- slot = os_aio_array_get_nth_slot(array,
- i + segment * n);
+ for (ulint i = 0; i < n; i++) {
+ os_aio_slot_t* slot;
+
+ slot = os_aio_array_get_nth_slot(
+ array, i + segment * n);
if (slot->reserved && slot->offset < lowest_offset) {
@@ -4954,25 +5294,28 @@ restart:
ut_ad(n_consecutive != 0);
ut_ad(consecutive_ios[0] != NULL);
- slot = consecutive_ios[0];
+ aio_slot = consecutive_ios[0];
/* Check if there are several consecutive blocks to read or write */
consecutive_loop:
- for (i = 0; i < n; i++) {
- slot2 = os_aio_array_get_nth_slot(array, i + segment * n);
+ for (ulint i = 0; i < n; i++) {
+ os_aio_slot_t* slot;
+
+ slot = os_aio_array_get_nth_slot(array, i + segment * n);
- if (slot2->reserved && slot2 != slot
- && slot2->offset == slot->offset + slot->len
- && slot2->type == slot->type
- && slot2->file == slot->file) {
+ if (slot->reserved
+ && slot != aio_slot
+ && slot->offset == slot->offset + aio_slot->len
+ && slot->type == aio_slot->type
+ && slot->file == aio_slot->file) {
/* Found a consecutive i/o request */
- consecutive_ios[n_consecutive] = slot2;
+ consecutive_ios[n_consecutive] = slot;
n_consecutive++;
- slot = slot2;
+ aio_slot = slot;
if (n_consecutive < OS_AIO_MERGE_N_CONSECUTIVE) {
@@ -4990,15 +5333,15 @@ consecutive_loop:
i/o */
total_len = 0;
- slot = consecutive_ios[0];
+ aio_slot = consecutive_ios[0];
- for (i = 0; i < n_consecutive; i++) {
+ for (ulint i = 0; i < n_consecutive; i++) {
total_len += consecutive_ios[i]->len;
}
if (n_consecutive == 1) {
/* We can use the buffer of the i/o request */
- combined_buf = slot->buf;
+ combined_buf = aio_slot->buf;
combined_buf2 = NULL;
} else {
combined_buf2 = static_cast<byte*>(
@@ -5016,50 +5359,41 @@ consecutive_loop:
os_mutex_exit(array->mutex);
- if (slot->type == OS_FILE_WRITE && n_consecutive > 1) {
+ if (aio_slot->type == OS_FILE_WRITE && n_consecutive > 1) {
/* Copy the buffers to the combined buffer */
offs = 0;
- for (i = 0; i < n_consecutive; i++) {
+ for (ulint i = 0; i < n_consecutive; i++) {
ut_memcpy(combined_buf + offs, consecutive_ios[i]->buf,
consecutive_ios[i]->len);
+
offs += consecutive_ios[i]->len;
}
}
srv_set_io_thread_op_info(global_segment, "doing file i/o");
- if (os_aio_print_debug) {
- fprintf(stderr,
- "InnoDB: doing i/o of type %lu at offset " UINT64PF
- ", length %lu\n",
- (ulong) slot->type, slot->offset, (ulong) total_len);
- }
-
/* Do the i/o with ordinary, synchronous i/o functions: */
- if (slot->type == OS_FILE_WRITE) {
- ret = os_file_write(slot->name, slot->file, combined_buf,
- slot->offset, total_len);
+ if (aio_slot->type == OS_FILE_WRITE) {
+ ut_ad(!srv_read_only_mode);
+ ret = os_file_write(
+ aio_slot->name, aio_slot->file, combined_buf,
+ aio_slot->offset, total_len);
} else {
- ret = os_file_read(slot->file, combined_buf,
- slot->offset, total_len);
+ ret = os_file_read(
+ aio_slot->file, combined_buf,
+ aio_slot->offset, total_len);
}
ut_a(ret);
srv_set_io_thread_op_info(global_segment, "file i/o done");
-#if 0
- fprintf(stderr,
- "aio: %lu consecutive %lu:th segment, first offs %lu blocks\n",
- n_consecutive, global_segment, slot->offset / UNIV_PAGE_SIZE);
-#endif
-
- if (slot->type == OS_FILE_READ && n_consecutive > 1) {
+ if (aio_slot->type == OS_FILE_READ && n_consecutive > 1) {
/* Copy the combined buffer to individual buffers */
offs = 0;
- for (i = 0; i < n_consecutive; i++) {
+ for (ulint i = 0; i < n_consecutive; i++) {
ut_memcpy(consecutive_ios[i]->buf, combined_buf + offs,
consecutive_ios[i]->len);
@@ -5075,7 +5409,7 @@ consecutive_loop:
/* Mark the i/os done in slots */
- for (i = 0; i < n_consecutive; i++) {
+ for (ulint i = 0; i < n_consecutive; i++) {
consecutive_ios[i]->io_already_done = TRUE;
}
@@ -5085,16 +5419,16 @@ consecutive_loop:
slot_io_done:
- ut_a(slot->reserved);
+ ut_a(aio_slot->reserved);
- *message1 = slot->message1;
- *message2 = slot->message2;
+ *message1 = aio_slot->message1;
+ *message2 = aio_slot->message2;
- *type = slot->type;
+ *type = aio_slot->type;
os_mutex_exit(array->mutex);
- os_aio_array_free_slot(array, slot);
+ os_aio_array_free_slot(array, aio_slot);
return(ret);
@@ -5113,30 +5447,20 @@ recommended_sleep:
os_event_wait(os_aio_segment_wait_events[global_segment]);
- if (os_aio_print_debug) {
- fprintf(stderr,
- "InnoDB: i/o handler thread for i/o"
- " segment %lu wakes up\n",
- (ulong) global_segment);
- }
-
goto restart;
}
/**********************************************************************//**
Validates the consistency of an aio array.
-@return TRUE if ok */
+@return true if ok */
static
-ibool
+bool
os_aio_array_validate(
/*==================*/
os_aio_array_t* array) /*!< in: aio wait array */
{
- os_aio_slot_t* slot;
- ulint n_reserved = 0;
ulint i;
-
- ut_a(array);
+ ulint n_reserved = 0;
os_mutex_enter(array->mutex);
@@ -5144,6 +5468,8 @@ os_aio_array_validate(
ut_a(array->n_segments > 0);
for (i = 0; i < array->n_slots; i++) {
+ os_aio_slot_t* slot;
+
slot = os_aio_array_get_nth_slot(array, i);
if (slot->reserved) {
@@ -5156,7 +5482,7 @@ os_aio_array_validate(
os_mutex_exit(array->mutex);
- return(TRUE);
+ return(true);
}
/**********************************************************************//**
@@ -5168,10 +5494,22 @@ os_aio_validate(void)
/*=================*/
{
os_aio_array_validate(os_aio_read_array);
- os_aio_array_validate(os_aio_write_array);
- os_aio_array_validate(os_aio_ibuf_array);
- os_aio_array_validate(os_aio_log_array);
- os_aio_array_validate(os_aio_sync_array);
+
+ if (os_aio_write_array != 0) {
+ os_aio_array_validate(os_aio_write_array);
+ }
+
+ if (os_aio_ibuf_array != 0) {
+ os_aio_array_validate(os_aio_ibuf_array);
+ }
+
+ if (os_aio_log_array != 0) {
+ os_aio_array_validate(os_aio_log_array);
+ }
+
+ if (os_aio_sync_array != 0) {
+ os_aio_array_validate(os_aio_sync_array);
+ }
return(TRUE);
}
@@ -5211,65 +5549,36 @@ os_aio_print_segment_info(
}
/**********************************************************************//**
-Prints info of the aio arrays. */
+Prints info about the aio array. */
UNIV_INTERN
void
-os_aio_print(
-/*=========*/
- FILE* file) /*!< in: file where to print */
+os_aio_print_array(
+/*==============*/
+ FILE* file, /*!< in: file where to print */
+ os_aio_array_t* array) /*!< in: aio array to print */
{
- os_aio_array_t* array;
- os_aio_slot_t* slot;
- ulint n_reserved;
- ulint n_res_seg[SRV_MAX_N_IO_THREADS];
- time_t current_time;
- double time_elapsed;
- double avg_bytes_read;
- ulint i;
-
- for (i = 0; i < srv_n_file_io_threads; i++) {
- fprintf(file, "I/O thread %lu state: %s (%s)", (ulong) i,
- srv_io_thread_op_info[i],
- srv_io_thread_function[i]);
-
-#ifndef __WIN__
- if (os_aio_segment_wait_events[i]->is_set) {
- fprintf(file, " ev set");
- }
-#endif
-
- fprintf(file, "\n");
- }
-
- fputs("Pending normal aio reads:", file);
-
- array = os_aio_read_array;
-loop:
- ut_a(array);
+ ulint n_reserved = 0;
+ ulint n_res_seg[SRV_MAX_N_IO_THREADS];
os_mutex_enter(array->mutex);
ut_a(array->n_slots > 0);
ut_a(array->n_segments > 0);
- n_reserved = 0;
-
memset(n_res_seg, 0x0, sizeof(n_res_seg));
- for (i = 0; i < array->n_slots; i++) {
- ulint seg_no;
+ for (ulint i = 0; i < array->n_slots; ++i) {
+ os_aio_slot_t* slot;
+ ulint seg_no;
slot = os_aio_array_get_nth_slot(array, i);
seg_no = (i * array->n_segments) / array->n_slots;
+
if (slot->reserved) {
- n_reserved++;
- n_res_seg[seg_no]++;
-#if 0
- fprintf(stderr, "Reserved slot, messages %p %p\n",
- (void*) slot->message1,
- (void*) slot->message2);
-#endif
+ ++n_reserved;
+ ++n_res_seg[seg_no];
+
ut_a(slot->len > 0);
}
}
@@ -5281,38 +5590,61 @@ loop:
os_aio_print_segment_info(file, n_res_seg, array);
os_mutex_exit(array->mutex);
+}
- if (array == os_aio_read_array) {
- fputs(", aio writes:", file);
+/**********************************************************************//**
+Prints info of the aio arrays. */
+UNIV_INTERN
+void
+os_aio_print(
+/*=========*/
+ FILE* file) /*!< in: file where to print */
+{
+ time_t current_time;
+ double time_elapsed;
+ double avg_bytes_read;
- array = os_aio_write_array;
+ for (ulint i = 0; i < srv_n_file_io_threads; ++i) {
+ fprintf(file, "I/O thread %lu state: %s (%s)",
+ (ulong) i,
+ srv_io_thread_op_info[i],
+ srv_io_thread_function[i]);
- goto loop;
+#ifndef __WIN__
+ if (os_aio_segment_wait_events[i]->is_set) {
+ fprintf(file, " ev set");
+ }
+#endif /* __WIN__ */
+
+ fprintf(file, "\n");
}
- if (array == os_aio_write_array) {
- fputs(",\n ibuf aio reads:", file);
- array = os_aio_ibuf_array;
+ fputs("Pending normal aio reads:", file);
- goto loop;
+ os_aio_print_array(file, os_aio_read_array);
+
+ if (os_aio_write_array != 0) {
+ fputs(", aio writes:", file);
+ os_aio_print_array(file, os_aio_write_array);
}
- if (array == os_aio_ibuf_array) {
- fputs(", log i/o's:", file);
- array = os_aio_log_array;
+ if (os_aio_ibuf_array != 0) {
+ fputs(",\n ibuf aio reads:", file);
+ os_aio_print_array(file, os_aio_ibuf_array);
+ }
- goto loop;
+ if (os_aio_log_array != 0) {
+ fputs(", log i/o's:", file);
+ os_aio_print_array(file, os_aio_log_array);
}
- if (array == os_aio_log_array) {
+ if (os_aio_sync_array != 0) {
fputs(", sync i/o's:", file);
- array = os_aio_sync_array;
-
- goto loop;
+ os_aio_print_array(file, os_aio_sync_array);
}
putc('\n', file);
- current_time = time(NULL);
+ current_time = ut_time();
time_elapsed = 0.001 + difftime(current_time, os_last_printout);
fprintf(file,
@@ -5320,7 +5652,8 @@ loop:
"%lu OS file reads, %lu OS file writes, %lu OS fsyncs\n",
(ulong) fil_n_pending_log_flushes,
(ulong) fil_n_pending_tablespace_flushes,
- (ulong) os_n_file_reads, (ulong) os_n_file_writes,
+ (ulong) os_n_file_reads,
+ (ulong) os_n_file_writes,
(ulong) os_n_fsyncs);
if (os_file_n_pending_preads != 0 || os_file_n_pending_pwrites != 0) {
@@ -5392,21 +5725,29 @@ os_aio_all_slots_free(void)
os_mutex_exit(array->mutex);
- array = os_aio_write_array;
+ if (!srv_read_only_mode) {
+ ut_a(os_aio_write_array == 0);
- os_mutex_enter(array->mutex);
+ array = os_aio_write_array;
- n_res += array->n_reserved;
+ os_mutex_enter(array->mutex);
- os_mutex_exit(array->mutex);
+ n_res += array->n_reserved;
- array = os_aio_ibuf_array;
+ os_mutex_exit(array->mutex);
- os_mutex_enter(array->mutex);
+ ut_a(os_aio_ibuf_array == 0);
- n_res += array->n_reserved;
+ array = os_aio_ibuf_array;
- os_mutex_exit(array->mutex);
+ os_mutex_enter(array->mutex);
+
+ n_res += array->n_reserved;
+
+ os_mutex_exit(array->mutex);
+ }
+
+ ut_a(os_aio_log_array == 0);
array = os_aio_log_array;
diff --git a/storage/innobase/os/os0sync.cc b/storage/innobase/os/os0sync.cc
index c2e2e7e477f..392dbe0d7a7 100644
--- a/storage/innobase/os/os0sync.cc
+++ b/storage/innobase/os/os0sync.cc
@@ -38,7 +38,7 @@ Created 9/6/1995 Heikki Tuuri
#include "srv0srv.h"
/* Type definition for an operating system mutex struct */
-struct os_mutex_struct{
+struct os_mutex_t{
os_event_t event; /*!< Used by sync0arr.cc for queing threads */
void* handle; /*!< OS handle to mutex */
ulint count; /*!< we use this counter to check
@@ -47,12 +47,12 @@ struct os_mutex_struct{
do not assume that the OS mutex
supports recursive locking, though
NT seems to do that */
- UT_LIST_NODE_T(os_mutex_str_t) os_mutex_list;
+ UT_LIST_NODE_T(os_mutex_t) os_mutex_list;
/* list of all 'slow' OS mutexes created */
};
/** Mutex protecting counts and the lists of OS mutexes and events */
-UNIV_INTERN os_mutex_t os_sync_mutex;
+UNIV_INTERN os_ib_mutex_t os_sync_mutex;
/** TRUE if os_sync_mutex has been initialized */
static ibool os_sync_mutex_inited = FALSE;
/** TRUE when os_sync_free() is being executed */
@@ -63,10 +63,10 @@ os_thread_exit */
UNIV_INTERN ulint os_thread_count = 0;
/** The list of all events created */
-static UT_LIST_BASE_NODE_T(os_event_struct_t) os_event_list;
+static UT_LIST_BASE_NODE_T(os_event) os_event_list;
/** The list of all OS 'slow' mutexes */
-static UT_LIST_BASE_NODE_T(os_mutex_str_t) os_mutex_list;
+static UT_LIST_BASE_NODE_T(os_mutex_t) os_mutex_list;
UNIV_INTERN ulint os_event_count = 0;
UNIV_INTERN ulint os_mutex_count = 0;
@@ -329,7 +329,7 @@ os_sync_free(void)
/*==============*/
{
os_event_t event;
- os_mutex_t mutex;
+ os_ib_mutex_t mutex;
os_sync_free_called = TRUE;
event = UT_LIST_GET_FIRST(os_event_list);
@@ -365,10 +365,8 @@ must be reset explicitly by calling sync_os_reset_event.
@return the event handle */
UNIV_INTERN
os_event_t
-os_event_create(
-/*============*/
- const char* name) /*!< in: the name of the event, if NULL
- the event is created without a name */
+os_event_create(void)
+/*==================*/
{
os_event_t event;
@@ -377,10 +375,7 @@ os_event_create(
event = static_cast<os_event_t>(ut_malloc(sizeof(*event)));
- event->handle = CreateEvent(NULL,
- TRUE,
- FALSE,
- (LPCTSTR) name);
+ event->handle = CreateEvent(NULL, TRUE, FALSE, NULL);
if (!event->handle) {
fprintf(stderr,
"InnoDB: Could not create a Windows event"
@@ -390,10 +385,7 @@ os_event_create(
} else /* Windows with condition variables */
#endif
{
- UT_NOT_USED(name);
-
- event = static_cast<os_event_struct_t*>(
- ut_malloc(sizeof(struct os_event_struct)));
+ event = static_cast<os_event_t>(ut_malloc(sizeof *event));
#ifndef PFS_SKIP_EVENT_MUTEX
os_fast_mutex_init(event_os_mutex_key, &event->os_mutex);
@@ -739,27 +731,26 @@ os_event_wait_time_low(
/*********************************************************//**
Creates an operating system mutex semaphore. Because these are slow, the
-mutex semaphore of InnoDB itself (mutex_t) should be used where possible.
+mutex semaphore of InnoDB itself (ib_mutex_t) should be used where possible.
@return the mutex handle */
UNIV_INTERN
-os_mutex_t
+os_ib_mutex_t
os_mutex_create(void)
/*=================*/
{
os_fast_mutex_t* mutex;
- os_mutex_t mutex_str;
+ os_ib_mutex_t mutex_str;
mutex = static_cast<os_fast_mutex_t*>(
ut_malloc(sizeof(os_fast_mutex_t)));
os_fast_mutex_init(os_mutex_key, mutex);
- mutex_str = static_cast<os_mutex_t>(
- ut_malloc(sizeof(os_mutex_str_t)));
+ mutex_str = static_cast<os_ib_mutex_t>(ut_malloc(sizeof *mutex_str));
mutex_str->handle = mutex;
mutex_str->count = 0;
- mutex_str->event = os_event_create(NULL);
+ mutex_str->event = os_event_create();
if (UNIV_LIKELY(os_sync_mutex_inited)) {
/* When creating os_sync_mutex itself we cannot reserve it */
@@ -783,7 +774,7 @@ UNIV_INTERN
void
os_mutex_enter(
/*===========*/
- os_mutex_t mutex) /*!< in: mutex to acquire */
+ os_ib_mutex_t mutex) /*!< in: mutex to acquire */
{
os_fast_mutex_lock(static_cast<os_fast_mutex_t*>(mutex->handle));
@@ -798,7 +789,7 @@ UNIV_INTERN
void
os_mutex_exit(
/*==========*/
- os_mutex_t mutex) /*!< in: mutex to release */
+ os_ib_mutex_t mutex) /*!< in: mutex to release */
{
ut_a(mutex);
@@ -814,7 +805,7 @@ UNIV_INTERN
void
os_mutex_free(
/*==========*/
- os_mutex_t mutex) /*!< in: mutex to free */
+ os_ib_mutex_t mutex) /*!< in: mutex to free */
{
ut_a(mutex);
diff --git a/storage/innobase/os/os0thread.cc b/storage/innobase/os/os0thread.cc
index 48ee61e9402..9cc09a847b1 100644
--- a/storage/innobase/os/os0thread.cc
+++ b/storage/innobase/os/os0thread.cc
@@ -132,8 +132,10 @@ os_thread_create_func(
if (thread_id) {
*thread_id = win_thread_id;
}
-
- return(thread);
+ if (thread) {
+ CloseHandle(thread);
+ }
+ return((os_thread_t)win_thread_id);
#else
int ret;
os_thread_t pthread;
diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc
index 9046338f377..f416d38cc35 100644
--- a/storage/innobase/page/page0cur.cc
+++ b/storage/innobase/page/page0cur.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -973,6 +974,9 @@ page_cur_insert_rec_low(
page = page_align(current_rec);
ut_ad(dict_table_is_comp(index->table)
== (ibool) !!page_is_comp(page));
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
+ ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
+ == index->id || recv_recovery_is_on() || mtr->inside_ibuf);
ut_ad(!page_rec_is_supremum(current_rec));
@@ -1007,8 +1011,8 @@ page_cur_insert_rec_low(
rec_offs_init(foffsets_);
- foffsets = rec_get_offsets(free_rec, index, foffsets,
- ULINT_UNDEFINED, &heap);
+ foffsets = rec_get_offsets(
+ free_rec, index, foffsets, ULINT_UNDEFINED, &heap);
if (rec_offs_size(foffsets) < rec_size) {
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
@@ -1167,14 +1171,27 @@ page_cur_insert_rec_zip_reorg(
buf_block_t* block, /*!< in: buffer block */
dict_index_t* index, /*!< in: record descriptor */
rec_t* rec, /*!< in: inserted record */
+ ulint rec_size,/*!< in: size of the inserted record */
page_t* page, /*!< in: uncompressed page */
page_zip_des_t* page_zip,/*!< in: compressed page */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
{
ulint pos;
+ /* Make a local copy as the values can change dynamically. */
+ bool log_compressed = page_log_compressed_pages;
+ ulint level = page_compression_level;
+
/* Recompress or reorganize and recompress the page. */
- if (page_zip_compress(page_zip, page, index, mtr)) {
+ if (page_zip_compress(page_zip, page, index, level,
+ log_compressed ? mtr : NULL)) {
+ if (!log_compressed) {
+ page_cur_insert_rec_write_log(
+ rec, rec_size, *current_rec, index, mtr);
+ page_zip_compress_write_log_no_data(
+ level, page, index, mtr);
+ }
+
return(rec);
}
@@ -1246,6 +1263,9 @@ page_cur_insert_rec_zip(
page = page_align(*current_rec);
ut_ad(dict_table_is_comp(index->table));
ut_ad(page_is_comp(page));
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
+ ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
+ == index->id || mtr->inside_ibuf || recv_recovery_is_on());
ut_ad(!page_rec_is_supremum(*current_rec));
#ifdef UNIV_ZIP_DEBUG
@@ -1281,10 +1301,27 @@ page_cur_insert_rec_zip(
index, rec, offsets,
NULL);
- if (UNIV_LIKELY(insert_rec != NULL)) {
+ /* If recovery is on, this implies that the compression
+ of the page was successful during runtime. Had that not
+ been the case or had the redo logging of compressed
+ pages been enabled during runtime then we'd have seen
+ a MLOG_ZIP_PAGE_COMPRESS redo record. Therefore, we
+ know that we don't need to reorganize the page. We,
+ however, do need to recompress the page. That will
+ happen when the next redo record is read which must
+ be of type MLOG_ZIP_PAGE_COMPRESS_NO_DATA and it must
+ contain a valid compression level value.
+ This implies that during recovery from this point till
+ the next redo is applied the uncompressed and
+ compressed versions are not identical and
+ page_zip_validate will fail but that is OK because
+ we call page_zip_validate only after processing
+ all changes to a page under a single mtr during
+ recovery. */
+ if (insert_rec != NULL && !recv_recovery_is_on()) {
insert_rec = page_cur_insert_rec_zip_reorg(
current_rec, block, index, insert_rec,
- page, page_zip, mtr);
+ rec_size, page, page_zip, mtr);
#ifdef UNIV_DEBUG
if (insert_rec) {
rec_offs_make_valid(
@@ -1781,9 +1818,9 @@ UNIV_INLINE
void
page_cur_delete_rec_write_log(
/*==========================*/
- rec_t* rec, /*!< in: record to be deleted */
- dict_index_t* index, /*!< in: record descriptor */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+ rec_t* rec, /*!< in: record to be deleted */
+ const dict_index_t* index, /*!< in: record descriptor */
+ mtr_t* mtr) /*!< in: mini-transaction handle */
{
byte* log_ptr;
@@ -1865,10 +1902,11 @@ UNIV_INTERN
void
page_cur_delete_rec(
/*================*/
- page_cur_t* cursor, /*!< in/out: a page cursor */
- dict_index_t* index, /*!< in: record descriptor */
- const ulint* offsets,/*!< in: rec_get_offsets(cursor->rec, index) */
- mtr_t* mtr) /*!< in: mini-transaction handle */
+ page_cur_t* cursor, /*!< in/out: a page cursor */
+ const dict_index_t* index, /*!< in: record descriptor */
+ const ulint* offsets,/*!< in: rec_get_offsets(
+ cursor->rec, index) */
+ mtr_t* mtr) /*!< in: mini-transaction handle */
{
page_dir_slot_t* cur_dir_slot;
page_dir_slot_t* prev_slot;
@@ -1881,8 +1919,6 @@ page_cur_delete_rec(
ulint cur_n_owned;
rec_t* rec;
- ut_ad(cursor && mtr);
-
page = page_cur_get_page(cursor);
page_zip = page_cur_get_page_zip(cursor);
@@ -1897,17 +1933,23 @@ page_cur_delete_rec(
current_rec = cursor->rec;
ut_ad(rec_offs_validate(current_rec, index, offsets));
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
+ ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
+ == index->id || mtr->inside_ibuf || recv_recovery_is_on());
/* The record must not be the supremum or infimum record. */
ut_ad(page_rec_is_user_rec(current_rec));
/* Save to local variables some data associated with current_rec */
cur_slot_no = page_dir_find_owner_slot(current_rec);
+ ut_ad(cur_slot_no > 0);
cur_dir_slot = page_dir_get_nth_slot(page, cur_slot_no);
cur_n_owned = page_dir_slot_get_n_owned(cur_dir_slot);
/* 0. Write the log record */
- page_cur_delete_rec_write_log(current_rec, index, mtr);
+ if (mtr != 0) {
+ page_cur_delete_rec_write_log(current_rec, index, mtr);
+ }
/* 1. Reset the last insert info in the page header and increment
the modify clock for the frame */
@@ -1915,9 +1957,13 @@ page_cur_delete_rec(
page_header_set_ptr(page, page_zip, PAGE_LAST_INSERT, NULL);
/* The page gets invalid for optimistic searches: increment the
- frame modify clock */
+ frame modify clock only if there is an mini-transaction covering
+ the change. During IMPORT we allocate local blocks that are not
+ part of the buffer pool. */
- buf_block_modify_clock_inc(page_cur_get_block(cursor));
+ if (mtr != 0) {
+ buf_block_modify_clock_inc(page_cur_get_block(cursor));
+ }
/* 2. Find the next and the previous record. Note that the cursor is
left at the next record. */
@@ -1961,14 +2007,15 @@ page_cur_delete_rec(
page_dir_slot_set_n_owned(cur_dir_slot, page_zip, cur_n_owned - 1);
/* 6. Free the memory occupied by the record */
- btr_blob_dbg_remove_rec(current_rec, index, offsets, "delete");
+ btr_blob_dbg_remove_rec(current_rec, const_cast<dict_index_t*>(index),
+ offsets, "delete");
page_mem_free(page, page_zip, current_rec, index, offsets);
/* 7. Now we have decremented the number of owned records of the slot.
If the number drops below PAGE_DIR_SLOT_MIN_N_OWNED, we balance the
slots. */
- if (UNIV_UNLIKELY(cur_n_owned <= PAGE_DIR_SLOT_MIN_N_OWNED)) {
+ if (cur_n_owned <= PAGE_DIR_SLOT_MIN_N_OWNED) {
page_dir_balance_slot(page, page_zip, cur_slot_no);
}
diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc
index deef6935f08..6b7b8424856 100644
--- a/storage/innobase/page/page0page.cc
+++ b/storage/innobase/page/page0page.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -512,7 +513,8 @@ page_create_zip(
page = page_create_low(block, TRUE);
mach_write_to_2(page + PAGE_HEADER + PAGE_LEVEL, level);
- if (UNIV_UNLIKELY(!page_zip_compress(page_zip, page, index, mtr))) {
+ if (!page_zip_compress(page_zip, page, index,
+ page_compression_level, mtr)) {
/* The compression of a newly created page
should always succeed. */
ut_error;
@@ -658,7 +660,11 @@ page_copy_rec_list_end(
if (new_page_zip) {
mtr_set_log_mode(mtr, log_mode);
- if (!page_zip_compress(new_page_zip, new_page, index, mtr)) {
+ if (!page_zip_compress(new_page_zip,
+ new_page,
+ index,
+ page_compression_level,
+ mtr)) {
/* Before trying to reorganize the page,
store the number of preceding records on the page. */
ulint ret_pos
@@ -781,8 +787,9 @@ page_copy_rec_list_start(
DBUG_EXECUTE_IF("page_copy_rec_list_start_compress_fail",
goto zip_reorganize;);
- if (UNIV_UNLIKELY
- (!page_zip_compress(new_page_zip, new_page, index, mtr))) {
+ if (!page_zip_compress(new_page_zip, new_page, index,
+ page_compression_level, mtr)) {
+
ulint ret_pos;
#ifndef DBUG_OFF
zip_reorganize:
@@ -793,8 +800,8 @@ zip_reorganize:
/* Before copying, "ret" was the predecessor
of the predefined supremum record. If it was
the predefined infimum record, then it would
- still be the infimum. Thus, the assertion
- ut_a(ret_pos > 0) would fail here. */
+ still be the infimum, and we would have
+ ret_pos == 0. */
if (UNIV_UNLIKELY
(!page_zip_reorganize(new_block, index, mtr))) {
@@ -1049,6 +1056,7 @@ page_delete_rec_list_end(
n_owned = rec_get_n_owned_new(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
+ ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
} else {
rec_t* rec2 = rec;
@@ -1064,6 +1072,7 @@ page_delete_rec_list_end(
n_owned = rec_get_n_owned_old(rec2) - count;
slot_index = page_dir_find_owner_slot(rec2);
+ ut_ad(slot_index > 0);
slot = page_dir_get_nth_slot(page, slot_index);
}
@@ -1470,6 +1479,10 @@ page_rec_get_nth_const(
ulint n_owned;
const rec_t* rec;
+ if (nth == 0) {
+ return(page_get_infimum_rec(page));
+ }
+
ut_ad(nth < UNIV_PAGE_SIZE / (REC_N_NEW_EXTRA_BYTES + 1));
for (i = 0;; i++) {
@@ -2313,6 +2326,20 @@ page_validate(
}
}
+ if (dict_index_is_sec_or_ibuf(index) && page_is_leaf(page)
+ && page_get_n_recs(page) > 0) {
+ trx_id_t max_trx_id = page_get_max_trx_id(page);
+ trx_id_t sys_max_trx_id = trx_sys_get_max_trx_id();
+
+ if (max_trx_id == 0 || max_trx_id > sys_max_trx_id) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "PAGE_MAX_TRX_ID out of bounds: "
+ TRX_ID_FMT ", " TRX_ID_FMT,
+ max_trx_id, sys_max_trx_id);
+ goto func_exit2;
+ }
+ }
+
heap = mem_heap_create(UNIV_PAGE_SIZE + 200);
/* The following buffer is used to check that the
@@ -2602,3 +2629,60 @@ page_find_rec_with_heap_no(
}
}
#endif /* !UNIV_HOTBACKUP */
+
+/*******************************************************//**
+Removes the record from a leaf page. This function does not log
+any changes. It is used by the IMPORT tablespace functions.
+The cursor is moved to the next record after the deleted one.
+@return true if success, i.e., the page did not become too empty */
+UNIV_INTERN
+bool
+page_delete_rec(
+/*============*/
+ const dict_index_t* index, /*!< in: The index that the record
+ belongs to */
+ page_cur_t* pcur, /*!< in/out: page cursor on record
+ to delete */
+ page_zip_des_t* page_zip,/*!< in: compressed page descriptor */
+ const ulint* offsets)/*!< in: offsets for record */
+{
+ bool no_compress_needed;
+ buf_block_t* block = pcur->block;
+ page_t* page = buf_block_get_frame(block);
+
+ ut_ad(page_is_leaf(page));
+
+ if (!rec_offs_any_extern(offsets)
+ && ((page_get_data_size(page) - rec_offs_size(offsets)
+ < BTR_CUR_PAGE_COMPRESS_LIMIT)
+ || (mach_read_from_4(page + FIL_PAGE_NEXT) == FIL_NULL
+ && mach_read_from_4(page + FIL_PAGE_PREV) == FIL_NULL)
+ || (page_get_n_recs(page) < 2))) {
+
+ ulint root_page_no = dict_index_get_page(index);
+
+ /* The page fillfactor will drop below a predefined
+ minimum value, OR the level in the B-tree contains just
+ one page, OR the page will become empty: we recommend
+ compression if this is not the root page. */
+
+ no_compress_needed = page_get_page_no(page) == root_page_no;
+ } else {
+ no_compress_needed = true;
+ }
+
+ if (no_compress_needed) {
+#ifdef UNIV_ZIP_DEBUG
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+#endif /* UNIV_ZIP_DEBUG */
+
+ page_cur_delete_rec(pcur, index, offsets, 0);
+
+#ifdef UNIV_ZIP_DEBUG
+ ut_a(!page_zip || page_zip_validate(page_zip, page, index));
+#endif /* UNIV_ZIP_DEBUG */
+ }
+
+ return(no_compress_needed);
+}
+
diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc
index 35a8f458fb2..dee37580002 100644
--- a/storage/innobase/page/page0zip.cc
+++ b/storage/innobase/page/page0zip.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 2005, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -23,6 +24,9 @@ Compressed page interface
Created June 2005 by Marko Makela
*******************************************************/
+#include <map>
+using namespace std;
+
#define THIS_MODULE
#include "page0zip.h"
#ifdef UNIV_NONINL
@@ -54,9 +58,23 @@ Created June 2005 by Marko Makela
#ifndef UNIV_HOTBACKUP
/** Statistics on compression, indexed by page_zip_des_t::ssize - 1 */
-UNIV_INTERN page_zip_stat_t page_zip_stat[PAGE_ZIP_SSIZE_MAX];
+UNIV_INTERN page_zip_stat_t page_zip_stat[PAGE_ZIP_SSIZE_MAX];
+/** Statistics on compression, indexed by index->id */
+UNIV_INTERN page_zip_stat_per_index_t page_zip_stat_per_index;
+/** Mutex protecting page_zip_stat_per_index */
+UNIV_INTERN ib_mutex_t page_zip_stat_per_index_mutex;
+#ifdef HAVE_PSI_INTERFACE
+UNIV_INTERN mysql_pfs_key_t page_zip_stat_per_index_mutex_key;
+#endif /* HAVE_PSI_INTERFACE */
#endif /* !UNIV_HOTBACKUP */
+/* Compression level to be used by zlib. Settable by user. */
+UNIV_INTERN ulint page_compression_level = 6;
+
+/* Whether or not to log compressed page images to avoid possible
+compression algorithm changes in zlib. */
+UNIV_INTERN bool page_log_compressed_pages = true;
+
/* Please refer to ../include/page0zip.ic for a description of the
compressed page format. */
@@ -386,7 +404,7 @@ page_zip_get_n_prev_extern(
compressed page */
const rec_t* rec, /*!< in: compact physical record
on a B-tree leaf page */
- dict_index_t* index) /*!< in: record descriptor */
+ const dict_index_t* index) /*!< in: record descriptor */
{
const page_t* page = page_align(rec);
ulint n_ext = 0;
@@ -1181,6 +1199,7 @@ page_zip_compress(
m_start, m_end, m_nonempty */
const page_t* page, /*!< in: uncompressed page */
dict_index_t* index, /*!< in: index of the B-tree node */
+ ulint level, /*!< in: commpression level */
mtr_t* mtr) /*!< in: mini-transaction, or NULL */
{
z_stream c_stream;
@@ -1194,7 +1213,6 @@ page_zip_compress(
const rec_t** recs; /*!< dense page directory, sorted by address */
mem_heap_t* heap;
ulint trx_id_col;
- ulint* offsets = NULL;
ulint n_blobs = 0;
byte* storage;/* storage of uncompressed columns */
#ifndef UNIV_HOTBACKUP
@@ -1203,6 +1221,10 @@ page_zip_compress(
#ifdef PAGE_ZIP_COMPRESS_DBG
FILE* logfile = NULL;
#endif
+ /* A local copy of srv_cmp_per_index_enabled to avoid reading that
+ variable multiple times in this function since it can be changed at
+ anytime. */
+ my_bool cmp_per_index_enabled = srv_cmp_per_index_enabled;
ut_a(page_is_comp(page));
ut_a(fil_page_get_type(page) == FIL_PAGE_INDEX);
@@ -1265,6 +1287,11 @@ page_zip_compress(
#endif /* PAGE_ZIP_COMPRESS_DBG */
#ifndef UNIV_HOTBACKUP
page_zip_stat[page_zip->ssize - 1].compressed++;
+ if (cmp_per_index_enabled) {
+ mutex_enter(&page_zip_stat_per_index_mutex);
+ page_zip_stat_per_index[index->id].compressed++;
+ mutex_exit(&page_zip_stat_per_index_mutex);
+ }
#endif /* !UNIV_HOTBACKUP */
if (UNIV_UNLIKELY(n_dense * PAGE_ZIP_DIR_SLOT_SIZE
@@ -1276,7 +1303,8 @@ page_zip_compress(
MONITOR_INC(MONITOR_PAGE_COMPRESS);
heap = mem_heap_create(page_zip_get_size(page_zip)
- + n_fields * (2 + sizeof *offsets)
+ + n_fields * (2 + sizeof(ulint))
+ + REC_OFFS_HEADER_SIZE
+ n_dense * ((sizeof *recs)
- PAGE_ZIP_DIR_SLOT_SIZE)
+ UNIV_PAGE_SIZE * 4
@@ -1295,7 +1323,7 @@ page_zip_compress(
/* Compress the data payload. */
page_zip_set_alloc(&c_stream, heap);
- err = deflateInit2(&c_stream, Z_DEFAULT_COMPRESSION,
+ err = deflateInit2(&c_stream, level,
Z_DEFLATED, UNIV_PAGE_SIZE_SHIFT,
MAX_MEM_LEVEL, Z_DEFAULT_STRATEGY);
ut_a(err == Z_OK);
@@ -1408,8 +1436,19 @@ err_exit:
}
#endif /* PAGE_ZIP_COMPRESS_DBG */
#ifndef UNIV_HOTBACKUP
+ if (page_is_leaf(page)) {
+ dict_index_zip_failure(index);
+ }
+
+ ullint time_diff = ut_time_us(NULL) - usec;
page_zip_stat[page_zip->ssize - 1].compressed_usec
- += ut_time_us(NULL) - usec;
+ += time_diff;
+ if (cmp_per_index_enabled) {
+ mutex_enter(&page_zip_stat_per_index_mutex);
+ page_zip_stat_per_index[index->id].compressed_usec
+ += time_diff;
+ mutex_exit(&page_zip_stat_per_index_mutex);
+ }
#endif /* !UNIV_HOTBACKUP */
return(FALSE);
}
@@ -1469,11 +1508,18 @@ err_exit:
}
#endif /* PAGE_ZIP_COMPRESS_DBG */
#ifndef UNIV_HOTBACKUP
- {
- page_zip_stat_t* zip_stat
- = &page_zip_stat[page_zip->ssize - 1];
- zip_stat->compressed_ok++;
- zip_stat->compressed_usec += ut_time_us(NULL) - usec;
+ ullint time_diff = ut_time_us(NULL) - usec;
+ page_zip_stat[page_zip->ssize - 1].compressed_ok++;
+ page_zip_stat[page_zip->ssize - 1].compressed_usec += time_diff;
+ if (cmp_per_index_enabled) {
+ mutex_enter(&page_zip_stat_per_index_mutex);
+ page_zip_stat_per_index[index->id].compressed_ok++;
+ page_zip_stat_per_index[index->id].compressed_usec += time_diff;
+ mutex_exit(&page_zip_stat_per_index_mutex);
+ }
+
+ if (page_is_leaf(page)) {
+ dict_index_zip_success(index);
}
#endif /* !UNIV_HOTBACKUP */
@@ -1518,6 +1564,7 @@ page_zip_fields_free(
{
if (index) {
dict_table_t* table = index->table;
+ os_fast_mutex_free(&index->zip_pad.mutex);
mem_heap_free(index->heap);
mutex_free(&(table->autoinc_mutex));
ut_free(table->name);
@@ -3075,11 +3122,17 @@ err_exit:
page_zip_fields_free(index);
mem_heap_free(heap);
#ifndef UNIV_HOTBACKUP
- {
- page_zip_stat_t* zip_stat
- = &page_zip_stat[page_zip->ssize - 1];
- zip_stat->decompressed++;
- zip_stat->decompressed_usec += ut_time_us(NULL) - usec;
+ ullint time_diff = ut_time_us(NULL) - usec;
+ page_zip_stat[page_zip->ssize - 1].decompressed++;
+ page_zip_stat[page_zip->ssize - 1].decompressed_usec += time_diff;
+
+ index_id_t index_id = btr_page_get_index_id(page);
+
+ if (srv_cmp_per_index_enabled) {
+ mutex_enter(&page_zip_stat_per_index_mutex);
+ page_zip_stat_per_index[index_id].decompressed++;
+ page_zip_stat_per_index[index_id].decompressed_usec += time_diff;
+ mutex_exit(&page_zip_stat_per_index_mutex);
}
#endif /* !UNIV_HOTBACKUP */
@@ -3177,7 +3230,7 @@ page_zip_validate_low(
UNIV_MEM_ASSERT_RW() checks fail. The v-bits of page[],
page_zip->data[] or page_zip could be viewed at temp_page[] or
temp_page_zip in a debugger when running valgrind --db-attach. */
- VALGRIND_GET_VBITS(page, temp_page, UNIV_PAGE_SIZE);
+ (void) VALGRIND_GET_VBITS(page, temp_page, UNIV_PAGE_SIZE);
UNIV_MEM_ASSERT_RW(page, UNIV_PAGE_SIZE);
# if UNIV_WORD_SIZE == 4
VALGRIND_GET_VBITS(page_zip, &temp_page_zip, sizeof temp_page_zip);
@@ -3186,8 +3239,8 @@ page_zip_validate_low(
pad bytes. */
UNIV_MEM_ASSERT_RW(page_zip, sizeof *page_zip);
# endif
- VALGRIND_GET_VBITS(page_zip->data, temp_page,
- page_zip_get_size(page_zip));
+ (void) VALGRIND_GET_VBITS(page_zip->data, temp_page,
+ page_zip_get_size(page_zip));
UNIV_MEM_ASSERT_RW(page_zip->data, page_zip_get_size(page_zip));
#endif /* UNIV_DEBUG_VALGRIND */
@@ -3295,7 +3348,7 @@ page_zip_validate_low(
"record list: 0x%02x!=0x%02x\n",
(unsigned) page_offset(rec),
(unsigned) page_offset(trec)));
- valid = FALSE;
+ valid = FALSE;
break;
}
@@ -4042,10 +4095,10 @@ static
void
page_zip_clear_rec(
/*===============*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page */
- byte* rec, /*!< in: record to clear */
- dict_index_t* index, /*!< in: index of rec */
- const ulint* offsets)/*!< in: rec_get_offsets(rec, index) */
+ page_zip_des_t* page_zip, /*!< in/out: compressed page */
+ byte* rec, /*!< in: record to clear */
+ const dict_index_t* index, /*!< in: index of rec */
+ const ulint* offsets) /*!< in: rec_get_offsets(rec, index) */
{
ulint heap_no;
page_t* page = page_align(rec);
@@ -4256,11 +4309,12 @@ UNIV_INTERN
void
page_zip_dir_delete(
/*================*/
- page_zip_des_t* page_zip,/*!< in/out: compressed page */
- byte* rec, /*!< in: record to delete */
- dict_index_t* index, /*!< in: index of rec */
- const ulint* offsets,/*!< in: rec_get_offsets(rec) */
- const byte* free) /*!< in: previous start of the free list */
+ page_zip_des_t* page_zip, /*!< in/out: compressed page */
+ byte* rec, /*!< in: deleted record */
+ const dict_index_t* index, /*!< in: index of rec */
+ const ulint* offsets, /*!< in: rec_get_offsets(rec) */
+ const byte* free) /*!< in: previous start of
+ the free list */
{
byte* slot_rec;
byte* slot_free;
@@ -4576,7 +4630,8 @@ page_zip_reorganize(
/* Restore logging. */
mtr_set_log_mode(mtr, log_mode);
- if (!page_zip_compress(page_zip, page, index, mtr)) {
+ if (!page_zip_compress(page_zip, page, index,
+ page_compression_level, mtr)) {
#ifndef UNIV_HOTBACKUP
buf_block_free(temp_block);
diff --git a/storage/innobase/pars/lexyy.cc b/storage/innobase/pars/lexyy.cc
index 9de8ea51efd..48ab04e1eff 100644
--- a/storage/innobase/pars/lexyy.cc
+++ b/storage/innobase/pars/lexyy.cc
@@ -35,7 +35,7 @@
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
- * if you want the limit (max/min) macros for int types.
+ * if you want the limit (max/min) macros for int types.
*/
#ifndef __STDC_LIMIT_MACROS
#define __STDC_LIMIT_MACROS 1
@@ -55,7 +55,6 @@ typedef int flex_int32_t;
typedef unsigned char flex_uint8_t;
typedef unsigned short int flex_uint16_t;
typedef unsigned int flex_uint32_t;
-#endif /* ! C99 */
/* Limits of integral types. */
#ifndef INT8_MIN
@@ -86,6 +85,8 @@ typedef unsigned int flex_uint32_t;
#define UINT32_MAX (4294967295U)
#endif
+#endif /* ! C99 */
+
#endif /* ! FLEXINT_H */
#ifdef __cplusplus
@@ -142,7 +143,15 @@ typedef unsigned int flex_uint32_t;
/* Size of default input buffer. */
#ifndef YY_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k.
+ * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
+ * Ditto for the __ia64__ case accordingly.
+ */
+#define YY_BUF_SIZE 32768
+#else
#define YY_BUF_SIZE 16384
+#endif /* __ia64__ */
#endif
/* The state buf must be large enough to hold one state per character in the main buffer.
@@ -276,7 +285,7 @@ static yy_size_t yy_n_chars; /* number of characters read into yy_ch_buf */
yy_size_t yyleng;
/* Points to current character in buffer. */
-static char *yy_c_buf_p = (char*) 0;
+static char *yy_c_buf_p = (char *) 0;
static int yy_init = 0; /* whether we need to initialize */
static int yy_start = 0; /* start state number */
@@ -338,7 +347,7 @@ void yyfree (void * );
typedef unsigned char YY_CHAR;
-FILE *yyin = (FILE*) 0, *yyout = (FILE*) 0;
+FILE *yyin = (FILE *) 0, *yyout = (FILE *) 0;
typedef int yy_state_type;
@@ -373,7 +382,7 @@ struct yy_trans_info
flex_int32_t yy_verify;
flex_int32_t yy_nxt;
};
-static yyconst flex_int16_t yy_accept[424] =
+static yyconst flex_int16_t yy_accept[425] =
{ 0,
0, 0, 119, 119, 0, 0, 0, 0, 125, 123,
122, 122, 8, 123, 114, 5, 103, 109, 112, 110,
@@ -382,46 +391,46 @@ static yyconst flex_int16_t yy_accept[424] =
96, 96, 96, 96, 96, 96, 96, 96, 96, 96,
115, 116, 119, 120, 6, 7, 9, 10, 122, 4,
98, 118, 2, 1, 3, 99, 100, 102, 101, 0,
- 96, 96, 96, 96, 96, 96, 44, 96, 96, 96,
+ 96, 0, 96, 96, 96, 96, 96, 44, 96, 96,
96, 96, 96, 96, 96, 96, 96, 96, 96, 96,
- 96, 96, 96, 28, 17, 25, 96, 96, 96, 96,
+ 96, 96, 96, 96, 28, 17, 25, 96, 96, 96,
- 96, 96, 54, 63, 96, 14, 96, 96, 96, 96,
- 96, 96, 96, 96, 96, 96, 96, 96, 96, 96,
- 96, 96, 96, 96, 119, 120, 120, 121, 6, 7,
- 9, 10, 2, 0, 97, 13, 45, 96, 96, 96,
- 96, 96, 96, 96, 96, 96, 96, 96, 96, 96,
- 96, 96, 96, 96, 96, 96, 27, 96, 96, 96,
- 41, 96, 96, 96, 96, 21, 96, 96, 96, 96,
- 96, 15, 96, 96, 96, 18, 96, 96, 96, 96,
- 96, 82, 96, 96, 96, 51, 96, 12, 96, 36,
+ 96, 96, 96, 54, 63, 96, 14, 96, 96, 96,
96, 96, 96, 96, 96, 96, 96, 96, 96, 96,
-
- 96, 0, 97, 96, 96, 96, 96, 20, 96, 24,
+ 96, 96, 96, 96, 96, 119, 120, 120, 121, 6,
+ 7, 9, 10, 2, 0, 97, 13, 45, 96, 96,
96, 96, 96, 96, 96, 96, 96, 96, 96, 96,
- 96, 46, 96, 96, 30, 96, 89, 96, 96, 39,
- 96, 96, 96, 96, 96, 48, 96, 94, 91, 32,
- 93, 96, 11, 66, 96, 96, 96, 42, 96, 96,
- 96, 96, 96, 96, 96, 96, 96, 96, 29, 96,
- 96, 96, 96, 96, 96, 96, 96, 96, 87, 0,
- 96, 26, 96, 96, 96, 68, 96, 96, 96, 96,
- 37, 96, 96, 96, 96, 96, 96, 96, 31, 67,
- 23, 96, 59, 96, 77, 96, 96, 96, 43, 96,
-
- 96, 96, 96, 96, 96, 96, 96, 92, 96, 96,
- 56, 96, 96, 96, 96, 96, 96, 96, 40, 33,
- 0, 81, 95, 19, 96, 96, 85, 96, 76, 55,
- 96, 65, 96, 52, 96, 96, 96, 47, 96, 78,
- 96, 80, 96, 96, 34, 96, 96, 96, 35, 74,
- 96, 96, 96, 96, 60, 96, 50, 49, 96, 96,
- 96, 57, 53, 64, 96, 96, 96, 22, 96, 96,
- 75, 83, 96, 96, 79, 96, 70, 96, 96, 96,
- 96, 96, 38, 96, 90, 69, 96, 86, 96, 96,
- 96, 88, 96, 96, 61, 96, 16, 96, 72, 71,
-
- 96, 58, 96, 84, 96, 96, 96, 96, 96, 96,
- 96, 96, 96, 96, 73, 96, 96, 96, 96, 96,
- 96, 62, 0
+ 96, 96, 96, 96, 96, 96, 96, 27, 96, 96,
+ 96, 41, 96, 96, 96, 96, 21, 96, 96, 96,
+ 96, 96, 15, 96, 96, 96, 18, 96, 96, 96,
+ 96, 96, 82, 96, 96, 96, 51, 96, 12, 96,
+ 36, 96, 96, 96, 96, 96, 96, 96, 96, 96,
+
+ 96, 96, 0, 97, 96, 96, 96, 96, 20, 96,
+ 24, 96, 96, 96, 96, 96, 96, 96, 96, 96,
+ 96, 96, 46, 96, 96, 30, 96, 89, 96, 96,
+ 39, 96, 96, 96, 96, 96, 48, 96, 94, 91,
+ 32, 93, 96, 11, 66, 96, 96, 96, 42, 96,
+ 96, 96, 96, 96, 96, 96, 96, 96, 96, 29,
+ 96, 96, 96, 96, 96, 96, 96, 96, 96, 87,
+ 0, 96, 26, 96, 96, 96, 68, 96, 96, 96,
+ 96, 37, 96, 96, 96, 96, 96, 96, 96, 31,
+ 67, 23, 96, 59, 96, 77, 96, 96, 96, 43,
+
+ 96, 96, 96, 96, 96, 96, 96, 96, 92, 96,
+ 96, 56, 96, 96, 96, 96, 96, 96, 96, 40,
+ 33, 0, 81, 95, 19, 96, 96, 85, 96, 76,
+ 55, 96, 65, 96, 52, 96, 96, 96, 47, 96,
+ 78, 96, 80, 96, 96, 34, 96, 96, 96, 35,
+ 74, 96, 96, 96, 96, 60, 96, 50, 49, 96,
+ 96, 96, 57, 53, 64, 96, 96, 96, 22, 96,
+ 96, 75, 83, 96, 96, 79, 96, 70, 96, 96,
+ 96, 96, 96, 38, 96, 90, 69, 96, 86, 96,
+ 96, 96, 88, 96, 96, 61, 96, 16, 96, 72,
+
+ 71, 96, 58, 96, 84, 96, 96, 96, 96, 96,
+ 96, 96, 96, 96, 96, 73, 96, 96, 96, 96,
+ 96, 96, 62, 0
} ;
static yyconst flex_int32_t yy_ec[256] =
@@ -432,14 +441,14 @@ static yyconst flex_int32_t yy_ec[256] =
1, 2, 1, 4, 5, 6, 7, 1, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 17, 17,
17, 17, 17, 17, 17, 17, 17, 18, 19, 20,
- 21, 22, 23, 1, 24, 25, 26, 27, 28, 29,
- 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
- 1, 1, 1, 1, 50, 1, 33, 33, 33, 33,
-
- 33, 33, 33, 33, 33, 33, 33, 51, 33, 33,
- 33, 33, 52, 33, 53, 33, 33, 33, 33, 33,
- 33, 33, 54, 1, 55, 1, 1, 1, 1, 1,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+ 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 1, 1, 1, 1, 51, 1, 34, 34, 34, 34,
+
+ 34, 34, 34, 34, 34, 34, 34, 52, 34, 34,
+ 34, 34, 53, 34, 54, 34, 34, 34, 34, 34,
+ 34, 34, 55, 1, 56, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -456,328 +465,438 @@ static yyconst flex_int32_t yy_ec[256] =
1, 1, 1, 1, 1
} ;
-static yyconst flex_int32_t yy_meta[56] =
+static yyconst flex_int32_t yy_meta[57] =
{ 0,
1, 1, 1, 2, 3, 1, 1, 4, 1, 1,
5, 1, 1, 1, 1, 6, 7, 1, 1, 1,
- 8, 1, 1, 9, 9, 9, 9, 9, 9, 9,
+ 8, 1, 1, 6, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
- 9, 9, 9, 1, 1
+ 9, 9, 9, 9, 1, 1
} ;
-static yyconst flex_int16_t yy_base[436] =
+static yyconst flex_int16_t yy_base[438] =
{ 0,
- 0, 0, 849, 848, 850, 849, 852, 851, 854, 861,
- 54, 56, 861, 0, 861, 861, 861, 861, 861, 861,
- 861, 861, 838, 841, 45, 830, 861, 42, 861, 829,
- 861, 45, 49, 54, 58, 56, 72, 833, 83, 86,
- 63, 67, 90, 53, 105, 107, 106, 120, 51, 101,
- 861, 861, 0, 55, 0, 840, 0, 843, 106, 0,
- 861, 861, 829, 61, 824, 861, 861, 861, 861, 839,
- 827, 88, 124, 130, 132, 125, 826, 129, 133, 136,
- 52, 138, 148, 140, 142, 145, 149, 152, 151, 159,
- 162, 169, 165, 825, 172, 824, 173, 170, 175, 179,
-
- 176, 177, 823, 822, 180, 182, 184, 200, 201, 195,
- 189, 202, 204, 207, 205, 210, 218, 220, 213, 215,
- 223, 230, 238, 217, 0, 240, 244, 861, 0, 829,
- 0, 832, 818, 781, 0, 817, 816, 233, 237, 243,
- 248, 251, 246, 252, 255, 257, 258, 262, 264, 263,
- 265, 267, 266, 269, 273, 270, 815, 274, 275, 287,
- 814, 290, 292, 291, 293, 294, 297, 300, 304, 298,
- 307, 313, 308, 309, 317, 813, 314, 315, 323, 318,
- 324, 328, 331, 332, 333, 812, 336, 811, 338, 810,
- 340, 339, 342, 344, 343, 341, 347, 346, 348, 349,
-
- 359, 773, 0, 356, 369, 370, 360, 808, 371, 807,
- 372, 375, 376, 378, 379, 380, 382, 383, 388, 393,
- 394, 806, 396, 397, 805, 398, 804, 399, 400, 803,
- 403, 404, 408, 413, 405, 802, 415, 801, 800, 799,
- 798, 406, 797, 796, 416, 417, 420, 795, 422, 418,
- 423, 425, 424, 426, 439, 429, 437, 440, 794, 446,
- 450, 453, 454, 455, 457, 458, 459, 460, 793, 757,
- 461, 791, 463, 464, 466, 790, 467, 468, 473, 474,
- 789, 475, 476, 477, 478, 480, 485, 486, 788, 787,
- 786, 489, 785, 491, 784, 498, 493, 494, 783, 499,
-
- 504, 509, 511, 513, 516, 514, 517, 782, 520, 519,
- 781, 521, 523, 527, 525, 528, 526, 529, 780, 779,
- 780, 776, 773, 530, 533, 535, 772, 534, 771, 770,
- 541, 769, 550, 760, 543, 548, 551, 753, 552, 736,
- 554, 730, 556, 557, 723, 558, 566, 563, 693, 692,
- 569, 572, 565, 578, 691, 574, 690, 689, 567, 585,
- 588, 688, 687, 685, 571, 589, 591, 683, 592, 593,
- 681, 680, 595, 596, 679, 597, 678, 599, 604, 602,
- 605, 608, 676, 606, 675, 674, 609, 673, 607, 610,
- 614, 670, 620, 623, 668, 628, 667, 630, 665, 664,
-
- 625, 663, 629, 112, 627, 626, 631, 632, 647, 633,
- 636, 637, 644, 650, 110, 652, 659, 657, 660, 661,
- 662, 57, 861, 710, 719, 728, 731, 734, 738, 747,
- 756, 765, 774, 781, 784
+ 0, 0, 293, 287, 284, 281, 272, 256, 254, 1357,
+ 55, 57, 1357, 0, 1357, 1357, 1357, 1357, 1357, 1357,
+ 1357, 1357, 238, 227, 46, 205, 1357, 43, 1357, 203,
+ 1357, 46, 50, 56, 52, 66, 64, 51, 81, 92,
+ 91, 94, 96, 111, 113, 116, 130, 134, 53, 143,
+ 1357, 1357, 0, 106, 0, 212, 0, 210, 141, 0,
+ 1357, 1357, 192, 56, 173, 1357, 1357, 1357, 1357, 168,
+ 140, 150, 152, 154, 155, 161, 167, 171, 177, 172,
+ 184, 174, 188, 189, 191, 194, 203, 212, 215, 217,
+ 219, 221, 226, 228, 231, 240, 233, 235, 246, 251,
+
+ 258, 253, 255, 256, 269, 271, 278, 272, 285, 283,
+ 287, 289, 296, 305, 298, 315, 319, 321, 322, 326,
+ 332, 333, 342, 339, 343, 0, 112, 173, 1357, 0,
+ 155, 0, 156, 132, 93, 0, 355, 357, 358, 360,
+ 364, 367, 374, 370, 379, 380, 389, 383, 390, 392,
+ 395, 408, 411, 409, 415, 418, 425, 427, 429, 436,
+ 431, 441, 446, 448, 450, 452, 453, 462, 471, 464,
+ 473, 474, 478, 485, 488, 490, 491, 494, 500, 501,
+ 504, 506, 507, 517, 518, 519, 520, 521, 522, 523,
+ 533, 536, 538, 543, 549, 554, 555, 561, 556, 566,
+
+ 567, 576, 60, 0, 573, 578, 580, 582, 583, 593,
+ 589, 596, 598, 603, 605, 607, 610, 617, 619, 621,
+ 622, 628, 633, 634, 635, 639, 640, 649, 650, 652,
+ 653, 655, 659, 664, 668, 669, 665, 671, 674, 678,
+ 681, 685, 687, 688, 692, 697, 698, 701, 703, 704,
+ 707, 708, 717, 713, 728, 730, 724, 740, 734, 745,
+ 746, 750, 751, 756, 757, 760, 761, 762, 771, 773,
+ 42, 778, 782, 783, 787, 789, 792, 794, 793, 804,
+ 805, 808, 809, 810, 819, 823, 826, 828, 829, 830,
+ 835, 840, 844, 846, 847, 856, 857, 858, 859, 860,
+
+ 863, 872, 873, 878, 879, 882, 885, 889, 894, 895,
+ 896, 898, 905, 910, 908, 912, 914, 915, 926, 930,
+ 931, 73, 932, 933, 935, 937, 942, 944, 946, 947,
+ 948, 949, 951, 958, 961, 965, 967, 972, 978, 979,
+ 981, 984, 983, 985, 994, 988, 999, 1000, 1001, 1004,
+ 1013, 1015, 1022, 1016, 1019, 1026, 1032, 1033, 1035, 1036,
+ 1038, 1039, 1048, 1049, 1050, 1051, 1053, 1054, 1060, 1063,
+ 1065, 1066, 1069, 1070, 1072, 1082, 1084, 1085, 1087, 1096,
+ 1097, 1098, 1099, 1101, 1113, 1114, 1115, 1116, 1117, 1118,
+ 1119, 1128, 1130, 1131, 1134, 1133, 1135, 1137, 1150, 1151,
+
+ 1153, 1155, 1157, 1162, 1160, 1167, 1172, 1173, 1174, 1176,
+ 1185, 1190, 1183, 1187, 1189, 1199, 1204, 1206, 1208, 1210,
+ 1215, 1220, 1222, 1357, 1269, 1278, 1287, 1290, 1293, 1297,
+ 1306, 1315, 1324, 1333, 1340, 1344, 1347
} ;
-static yyconst flex_int16_t yy_def[436] =
+static yyconst flex_int16_t yy_def[438] =
{ 0,
- 423, 1, 424, 424, 425, 425, 426, 426, 423, 423,
- 423, 423, 423, 427, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 428, 423, 423, 423, 423,
- 423, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 423, 423, 430, 431, 432, 423, 433, 423, 423, 427,
- 423, 423, 423, 423, 428, 423, 423, 423, 423, 434,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
-
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 430, 431, 431, 423, 432, 423,
- 433, 423, 423, 423, 435, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
-
- 429, 423, 435, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 423,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
-
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 423, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
-
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 429, 429, 429, 429, 429, 429, 429, 429,
- 429, 429, 0, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423
+ 424, 1, 425, 425, 426, 426, 427, 427, 424, 424,
+ 424, 424, 424, 428, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 429, 424, 424, 424, 424,
+ 424, 430, 430, 430, 430, 430, 34, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 424, 424, 431, 432, 433, 424, 434, 424, 424, 428,
+ 424, 424, 424, 424, 429, 424, 424, 424, 424, 435,
+ 430, 436, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 431, 432, 432, 424, 433,
+ 424, 434, 424, 424, 424, 437, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+
+ 430, 430, 424, 437, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 424, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 424, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 430, 430, 430, 430, 430, 430, 430,
+ 430, 430, 430, 0, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424
} ;
-static yyconst flex_int16_t yy_nxt[917] =
+static yyconst flex_int16_t yy_nxt[1414] =
{ 0,
10, 11, 12, 13, 10, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
- 29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
- 38, 39, 38, 38, 40, 41, 42, 43, 44, 38,
- 45, 46, 47, 48, 49, 50, 38, 38, 38, 38,
- 38, 38, 38, 51, 52, 59, 59, 59, 59, 63,
- 70, 64, 67, 68, 70, 127, 70, 70, 70, 70,
- 128, 70, 70, 70, 122, 63, 74, 64, 70, 149,
- 75, 72, 70, 76, 78, 83, 73, 70, 79, 84,
- 86, 80, 87, 108, 81, 85, 77, 82, 70, 89,
-
- 100, 70, 88, 70, 101, 70, 90, 59, 59, 91,
- 102, 94, 92, 97, 136, 93, 70, 98, 103, 95,
- 70, 70, 70, 99, 96, 70, 104, 70, 105, 117,
- 106, 123, 109, 107, 112, 70, 118, 113, 124, 70,
- 70, 110, 111, 119, 70, 70, 114, 70, 70, 137,
- 115, 70, 143, 70, 116, 70, 120, 70, 121, 139,
- 70, 140, 142, 70, 70, 138, 70, 70, 141, 155,
- 144, 146, 147, 151, 70, 157, 145, 70, 150, 148,
- 70, 154, 152, 158, 70, 70, 156, 70, 70, 153,
- 70, 70, 70, 159, 70, 70, 160, 70, 164, 70,
-
- 169, 163, 161, 168, 70, 171, 162, 174, 175, 167,
- 70, 173, 170, 165, 166, 70, 70, 70, 172, 70,
- 70, 182, 70, 183, 179, 70, 176, 187, 70, 189,
- 70, 177, 70, 70, 184, 70, 185, 178, 70, 180,
- 190, 188, 192, 181, 186, 70, 195, 193, 70, 197,
- 423, 191, 70, 70, 127, 423, 196, 201, 70, 128,
- 204, 70, 194, 70, 198, 199, 70, 70, 205, 200,
- 70, 207, 70, 70, 206, 208, 209, 70, 70, 70,
- 70, 70, 70, 215, 70, 70, 210, 217, 70, 70,
- 70, 222, 213, 211, 221, 214, 212, 225, 216, 220,
-
- 228, 226, 70, 218, 219, 70, 70, 70, 70, 70,
- 229, 223, 70, 70, 224, 70, 227, 231, 232, 70,
- 233, 235, 70, 70, 70, 230, 237, 238, 70, 70,
- 70, 236, 70, 70, 241, 234, 240, 239, 70, 70,
- 247, 242, 243, 70, 245, 244, 70, 70, 70, 248,
- 246, 70, 249, 70, 70, 70, 70, 70, 70, 70,
- 254, 70, 70, 70, 70, 252, 257, 250, 260, 261,
- 265, 70, 264, 258, 70, 70, 255, 251, 259, 256,
- 262, 253, 263, 268, 70, 70, 70, 70, 267, 266,
- 70, 70, 269, 70, 70, 70, 271, 70, 70, 276,
-
- 274, 279, 280, 70, 275, 272, 273, 278, 70, 70,
- 283, 70, 70, 70, 70, 70, 285, 277, 70, 70,
- 70, 70, 281, 70, 282, 284, 289, 287, 70, 290,
- 70, 70, 70, 70, 296, 70, 286, 70, 70, 70,
- 70, 70, 291, 298, 70, 292, 288, 301, 294, 305,
- 293, 307, 70, 295, 70, 70, 299, 297, 303, 300,
- 310, 70, 306, 302, 304, 70, 308, 311, 70, 70,
- 70, 309, 70, 70, 70, 70, 70, 312, 70, 70,
- 313, 70, 70, 70, 316, 318, 319, 320, 70, 70,
- 70, 70, 70, 70, 326, 70, 314, 315, 328, 317,
-
- 70, 70, 330, 322, 70, 323, 70, 334, 70, 70,
- 327, 324, 331, 70, 70, 325, 329, 332, 333, 70,
- 337, 335, 336, 340, 70, 339, 70, 342, 70, 70,
- 343, 70, 70, 338, 70, 70, 70, 341, 70, 347,
- 70, 70, 70, 70, 70, 70, 353, 345, 70, 70,
- 70, 344, 355, 357, 348, 346, 70, 352, 70, 349,
- 350, 351, 354, 70, 356, 70, 70, 70, 365, 70,
- 358, 70, 70, 70, 360, 361, 362, 364, 70, 359,
- 70, 70, 70, 363, 70, 366, 70, 70, 367, 70,
- 369, 373, 368, 70, 374, 376, 375, 371, 372, 370,
-
- 70, 379, 378, 70, 70, 377, 70, 70, 70, 380,
- 70, 70, 70, 383, 70, 382, 381, 70, 386, 70,
- 70, 70, 70, 70, 70, 70, 391, 385, 388, 70,
- 392, 384, 389, 387, 395, 70, 397, 390, 70, 393,
- 70, 70, 70, 70, 70, 70, 70, 70, 70, 398,
- 402, 70, 70, 394, 400, 396, 403, 399, 404, 70,
- 406, 405, 70, 413, 412, 70, 409, 70, 408, 401,
- 407, 411, 70, 414, 70, 70, 70, 70, 70, 70,
- 70, 410, 70, 70, 415, 70, 418, 417, 70, 70,
- 70, 70, 419, 70, 70, 70, 70, 420, 70, 416,
-
- 70, 421, 70, 70, 70, 70, 70, 70, 70, 422,
- 53, 53, 53, 53, 53, 53, 53, 53, 53, 55,
- 55, 55, 55, 55, 55, 55, 55, 55, 57, 57,
- 57, 57, 57, 57, 57, 57, 57, 60, 70, 60,
- 65, 65, 65, 71, 71, 70, 71, 125, 125, 125,
- 125, 70, 125, 125, 125, 125, 126, 126, 126, 126,
- 126, 126, 126, 126, 126, 129, 129, 129, 70, 129,
- 129, 129, 129, 129, 131, 70, 131, 131, 131, 131,
- 131, 131, 131, 135, 70, 70, 70, 70, 70, 135,
- 203, 70, 203, 135, 70, 70, 70, 70, 70, 70,
-
- 70, 70, 70, 70, 70, 70, 70, 321, 70, 70,
- 70, 70, 70, 70, 70, 70, 70, 70, 70, 70,
- 70, 70, 70, 70, 270, 70, 70, 70, 70, 70,
- 70, 70, 70, 202, 133, 132, 130, 70, 70, 70,
- 70, 70, 70, 134, 423, 133, 132, 130, 70, 69,
- 66, 62, 61, 423, 58, 58, 56, 56, 54, 54,
- 9, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
-
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423
+ 29, 30, 31, 10, 32, 33, 34, 35, 36, 37,
+ 38, 38, 39, 38, 38, 40, 41, 42, 43, 44,
+ 38, 45, 46, 47, 48, 49, 50, 38, 38, 38,
+ 38, 38, 38, 38, 51, 52, 59, 59, 59, 59,
+ 63, 70, 64, 67, 68, 70, 70, 70, 70, 72,
+ 63, 70, 64, 72, 72, 72, 72, 123, 75, 72,
+ 84, 70, 76, 73, 85, 77, 136, 79, 74, 72,
+ 86, 80, 90, 322, 81, 71, 70, 82, 78, 91,
+
+ 83, 87, 92, 88, 72, 93, 70, 70, 94, 70,
+ 95, 70, 271, 89, 72, 72, 128, 72, 96, 72,
+ 98, 129, 424, 97, 99, 104, 70, 424, 70, 101,
+ 100, 70, 102, 105, 72, 106, 72, 107, 103, 72,
+ 108, 110, 59, 59, 113, 70, 203, 114, 134, 70,
+ 111, 112, 109, 72, 118, 70, 115, 72, 70, 133,
+ 116, 119, 131, 72, 117, 70, 72, 70, 120, 70,
+ 70, 121, 135, 122, 124, 72, 70, 72, 72, 137,
+ 138, 125, 70, 128, 72, 140, 70, 70, 129, 70,
+ 72, 141, 70, 424, 72, 72, 139, 72, 142, 70,
+
+ 72, 144, 150, 70, 70, 143, 70, 72, 134, 70,
+ 145, 72, 72, 133, 72, 152, 146, 72, 70, 131,
+ 147, 148, 156, 69, 153, 66, 72, 70, 149, 151,
+ 70, 154, 70, 155, 70, 72, 70, 62, 72, 158,
+ 72, 70, 72, 70, 72, 157, 70, 159, 70, 72,
+ 70, 72, 61, 424, 72, 70, 72, 161, 72, 58,
+ 160, 70, 162, 72, 163, 164, 70, 165, 70, 72,
+ 70, 70, 168, 70, 72, 58, 72, 170, 72, 72,
+ 169, 72, 166, 167, 70, 172, 70, 70, 56, 171,
+ 174, 56, 72, 70, 72, 72, 173, 54, 70, 175,
+
+ 70, 72, 70, 54, 70, 176, 72, 180, 72, 424,
+ 72, 70, 72, 70, 183, 177, 424, 178, 424, 72,
+ 70, 72, 181, 179, 184, 424, 182, 424, 72, 188,
+ 70, 186, 424, 189, 70, 185, 70, 70, 72, 187,
+ 190, 70, 72, 424, 72, 72, 193, 70, 70, 72,
+ 194, 191, 424, 424, 70, 72, 72, 70, 70, 424,
+ 198, 192, 72, 424, 196, 72, 72, 200, 424, 424,
+ 70, 201, 70, 70, 197, 70, 195, 199, 72, 70,
+ 72, 72, 70, 72, 202, 70, 205, 72, 424, 70,
+ 72, 208, 206, 72, 70, 70, 207, 72, 70, 209,
+
+ 210, 424, 72, 72, 70, 70, 72, 70, 424, 216,
+ 70, 211, 72, 72, 424, 72, 218, 424, 72, 424,
+ 424, 212, 213, 70, 70, 214, 70, 217, 215, 424,
+ 70, 72, 72, 70, 72, 223, 219, 220, 72, 222,
+ 70, 72, 70, 221, 70, 424, 70, 424, 72, 424,
+ 72, 70, 72, 226, 72, 230, 70, 227, 224, 72,
+ 225, 70, 229, 70, 72, 70, 424, 70, 70, 72,
+ 424, 72, 228, 72, 232, 72, 72, 70, 233, 70,
+ 234, 236, 231, 424, 424, 72, 70, 72, 70, 70,
+ 424, 237, 238, 70, 72, 235, 72, 72, 240, 239,
+
+ 70, 72, 242, 70, 424, 70, 70, 243, 72, 70,
+ 424, 72, 241, 72, 72, 70, 70, 72, 246, 70,
+ 244, 70, 70, 72, 72, 245, 248, 72, 249, 72,
+ 72, 247, 70, 70, 70, 70, 70, 70, 70, 250,
+ 72, 72, 72, 72, 72, 72, 72, 255, 70, 424,
+ 251, 70, 253, 70, 424, 424, 72, 252, 70, 72,
+ 424, 72, 256, 258, 70, 257, 72, 424, 254, 70,
+ 70, 70, 72, 259, 261, 262, 70, 72, 72, 72,
+ 260, 70, 70, 424, 72, 266, 263, 265, 70, 72,
+ 72, 70, 424, 70, 264, 70, 72, 70, 70, 72,
+
+ 267, 72, 269, 72, 70, 72, 72, 268, 70, 424,
+ 270, 70, 72, 70, 272, 273, 72, 274, 70, 72,
+ 70, 72, 70, 275, 277, 70, 72, 276, 72, 280,
+ 72, 281, 70, 72, 70, 279, 70, 70, 424, 424,
+ 72, 278, 72, 70, 72, 72, 286, 284, 70, 70,
+ 70, 72, 424, 282, 70, 70, 72, 72, 72, 285,
+ 283, 424, 72, 72, 70, 70, 288, 70, 70, 290,
+ 70, 287, 72, 72, 70, 72, 72, 424, 72, 70,
+ 70, 291, 72, 70, 70, 289, 70, 72, 72, 70,
+ 424, 72, 72, 70, 72, 292, 70, 72, 293, 297,
+
+ 70, 72, 70, 70, 72, 295, 294, 70, 72, 296,
+ 72, 72, 70, 70, 298, 72, 70, 424, 70, 70,
+ 72, 72, 70, 70, 72, 299, 72, 72, 70, 302,
+ 72, 72, 70, 424, 424, 424, 72, 424, 300, 70,
+ 72, 301, 306, 70, 424, 70, 303, 72, 304, 70,
+ 305, 72, 307, 72, 308, 70, 424, 72, 309, 424,
+ 70, 70, 312, 72, 311, 70, 70, 310, 72, 72,
+ 424, 70, 70, 72, 72, 70, 70, 70, 313, 72,
+ 72, 314, 424, 72, 72, 72, 70, 317, 70, 319,
+ 320, 424, 424, 70, 72, 315, 72, 70, 70, 321,
+
+ 316, 72, 70, 318, 70, 72, 72, 70, 70, 70,
+ 72, 424, 72, 424, 424, 72, 72, 72, 424, 70,
+ 70, 323, 327, 70, 70, 70, 324, 72, 72, 424,
+ 329, 72, 72, 72, 70, 325, 328, 331, 70, 326,
+ 424, 70, 72, 70, 70, 70, 72, 332, 330, 72,
+ 70, 72, 72, 72, 335, 70, 424, 424, 72, 70,
+ 333, 70, 70, 72, 334, 336, 337, 72, 424, 72,
+ 72, 70, 70, 70, 70, 70, 338, 424, 70, 72,
+ 72, 72, 72, 72, 424, 340, 72, 70, 70, 341,
+ 339, 424, 343, 70, 70, 72, 72, 70, 424, 344,
+
+ 70, 72, 72, 342, 70, 72, 348, 424, 72, 70,
+ 70, 70, 72, 70, 424, 346, 345, 72, 72, 72,
+ 70, 72, 347, 70, 424, 70, 349, 70, 72, 70,
+ 70, 72, 350, 72, 354, 72, 351, 72, 72, 352,
+ 356, 70, 353, 358, 355, 70, 70, 70, 70, 72,
+ 70, 357, 70, 72, 72, 72, 72, 70, 72, 70,
+ 72, 70, 70, 70, 70, 72, 70, 72, 359, 72,
+ 72, 72, 72, 70, 72, 424, 70, 424, 424, 361,
+ 70, 72, 70, 362, 72, 360, 365, 70, 72, 363,
+ 72, 366, 364, 70, 70, 72, 70, 424, 70, 70,
+
+ 70, 72, 72, 70, 72, 367, 72, 72, 72, 70,
+ 368, 72, 424, 424, 70, 70, 70, 72, 424, 70,
+ 369, 370, 72, 72, 72, 424, 374, 72, 70, 371,
+ 70, 70, 424, 375, 70, 372, 72, 70, 72, 72,
+ 373, 70, 72, 376, 379, 72, 377, 70, 70, 72,
+ 70, 70, 424, 70, 70, 72, 72, 378, 72, 72,
+ 380, 72, 72, 70, 70, 70, 70, 383, 70, 70,
+ 382, 72, 72, 72, 72, 70, 72, 72, 70, 381,
+ 70, 70, 424, 72, 70, 70, 72, 70, 72, 72,
+ 387, 386, 72, 72, 384, 72, 385, 70, 424, 70,
+
+ 70, 424, 70, 424, 389, 72, 388, 72, 72, 390,
+ 72, 70, 70, 70, 70, 392, 70, 424, 424, 72,
+ 72, 72, 72, 393, 72, 391, 396, 424, 70, 70,
+ 70, 70, 70, 70, 70, 394, 72, 72, 72, 72,
+ 72, 72, 72, 70, 398, 70, 70, 395, 70, 70,
+ 70, 72, 70, 72, 72, 424, 72, 72, 72, 424,
+ 72, 399, 403, 397, 404, 70, 70, 400, 70, 401,
+ 70, 424, 70, 72, 72, 70, 72, 70, 72, 405,
+ 72, 402, 70, 72, 424, 72, 424, 70, 70, 70,
+ 72, 70, 406, 424, 407, 72, 72, 72, 70, 72,
+
+ 70, 412, 70, 424, 70, 70, 72, 424, 72, 410,
+ 72, 408, 72, 72, 70, 409, 424, 413, 414, 70,
+ 415, 70, 72, 70, 411, 70, 424, 72, 416, 72,
+ 70, 72, 424, 72, 419, 70, 424, 70, 72, 417,
+ 418, 424, 424, 72, 420, 72, 424, 424, 421, 424,
+ 424, 424, 424, 424, 424, 424, 422, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 423, 53,
+ 53, 53, 53, 53, 53, 53, 53, 53, 55, 55,
+ 55, 55, 55, 55, 55, 55, 55, 57, 57, 57,
+ 57, 57, 57, 57, 57, 57, 60, 424, 60, 65,
+
+ 65, 65, 71, 71, 424, 71, 126, 126, 126, 126,
+ 424, 126, 126, 126, 126, 127, 127, 127, 127, 127,
+ 127, 127, 127, 127, 130, 130, 130, 424, 130, 130,
+ 130, 130, 130, 132, 424, 132, 132, 132, 132, 132,
+ 132, 132, 136, 424, 424, 424, 424, 424, 136, 72,
+ 72, 424, 72, 204, 424, 204, 9, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424
} ;
-static yyconst flex_int16_t yy_chk[917] =
+static yyconst flex_int16_t yy_chk[1414] =
{ 0,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 11, 11, 12, 12, 25,
- 32, 25, 28, 28, 33, 54, 49, 81, 44, 34,
- 54, 36, 422, 35, 49, 64, 33, 64, 41, 81,
- 33, 32, 42, 33, 34, 35, 32, 37, 34, 35,
- 36, 34, 36, 44, 34, 35, 33, 34, 39, 37,
-
- 41, 40, 36, 72, 42, 43, 37, 59, 59, 37,
- 42, 39, 37, 40, 72, 37, 50, 40, 43, 39,
- 45, 47, 46, 40, 39, 415, 43, 404, 43, 47,
- 43, 50, 45, 43, 46, 48, 47, 46, 50, 73,
- 76, 45, 45, 47, 78, 74, 46, 75, 79, 73,
- 46, 80, 78, 82, 46, 84, 48, 85, 48, 74,
- 86, 75, 76, 83, 87, 73, 89, 88, 75, 85,
- 79, 80, 80, 83, 90, 87, 79, 91, 82, 80,
- 93, 84, 83, 88, 92, 98, 86, 95, 97, 83,
- 99, 101, 102, 89, 100, 105, 90, 106, 95, 107,
-
- 99, 93, 91, 98, 111, 100, 92, 105, 106, 97,
- 110, 102, 99, 95, 95, 108, 109, 112, 101, 113,
- 115, 110, 114, 111, 109, 116, 107, 113, 119, 115,
- 120, 108, 124, 117, 111, 118, 112, 108, 121, 109,
- 115, 114, 117, 109, 112, 122, 120, 118, 138, 121,
- 126, 116, 139, 123, 127, 126, 120, 124, 140, 127,
- 138, 143, 119, 141, 122, 123, 142, 144, 139, 123,
- 145, 141, 146, 147, 140, 142, 142, 148, 150, 149,
- 151, 153, 152, 147, 154, 156, 143, 149, 155, 158,
- 159, 153, 146, 144, 152, 146, 145, 156, 148, 151,
-
- 159, 156, 160, 150, 150, 162, 164, 163, 165, 166,
- 160, 154, 167, 170, 155, 168, 158, 163, 164, 169,
- 165, 166, 171, 173, 174, 162, 167, 168, 172, 177,
- 178, 166, 175, 180, 171, 165, 170, 169, 179, 181,
- 178, 172, 173, 182, 175, 174, 183, 184, 185, 179,
- 177, 187, 180, 189, 192, 191, 196, 193, 195, 194,
- 185, 198, 197, 199, 200, 183, 191, 181, 194, 194,
- 197, 204, 196, 192, 201, 207, 187, 182, 193, 189,
- 194, 184, 195, 200, 205, 206, 209, 211, 199, 198,
- 212, 213, 201, 214, 215, 216, 204, 217, 218, 211,
-
- 207, 214, 215, 219, 209, 205, 206, 213, 220, 221,
- 218, 223, 224, 226, 228, 229, 220, 212, 231, 232,
- 235, 242, 216, 233, 217, 219, 226, 223, 234, 228,
- 237, 245, 246, 250, 235, 247, 221, 249, 251, 253,
- 252, 254, 229, 242, 256, 231, 224, 247, 233, 252,
- 232, 254, 257, 234, 255, 258, 245, 237, 250, 246,
- 257, 260, 253, 249, 251, 261, 255, 258, 262, 263,
- 264, 256, 265, 266, 267, 268, 271, 260, 273, 274,
- 261, 275, 277, 278, 264, 266, 267, 268, 279, 280,
- 282, 283, 284, 285, 277, 286, 262, 263, 279, 265,
-
- 287, 288, 282, 271, 292, 273, 294, 286, 297, 298,
- 278, 274, 283, 296, 300, 275, 280, 284, 285, 301,
- 292, 287, 288, 297, 302, 296, 303, 300, 304, 306,
- 301, 305, 307, 294, 310, 309, 312, 298, 313, 305,
- 315, 317, 314, 316, 318, 324, 313, 303, 325, 328,
- 326, 302, 315, 317, 306, 304, 331, 312, 335, 307,
- 309, 310, 314, 336, 316, 333, 337, 339, 335, 341,
- 318, 343, 344, 346, 325, 326, 328, 333, 348, 324,
- 353, 347, 359, 331, 351, 336, 365, 352, 337, 356,
- 341, 347, 339, 354, 348, 352, 351, 344, 346, 343,
-
- 360, 356, 354, 361, 366, 353, 367, 369, 370, 359,
- 373, 374, 376, 365, 378, 361, 360, 380, 369, 379,
- 381, 384, 389, 382, 387, 390, 378, 367, 373, 391,
- 379, 366, 374, 370, 382, 393, 387, 376, 394, 380,
- 401, 406, 405, 396, 403, 398, 407, 408, 410, 389,
- 394, 411, 412, 381, 391, 384, 396, 390, 398, 413,
- 403, 401, 409, 411, 410, 414, 407, 416, 406, 393,
- 405, 409, 418, 412, 417, 419, 420, 421, 402, 400,
- 399, 408, 397, 395, 413, 392, 417, 416, 388, 386,
- 385, 383, 418, 377, 375, 372, 371, 419, 368, 414,
-
- 364, 420, 363, 362, 358, 357, 355, 350, 349, 421,
- 424, 424, 424, 424, 424, 424, 424, 424, 424, 425,
+ 1, 1, 1, 1, 1, 1, 11, 11, 12, 12,
+ 25, 32, 25, 28, 28, 33, 38, 35, 49, 32,
+ 64, 34, 64, 33, 38, 35, 49, 49, 33, 34,
+ 35, 36, 33, 32, 35, 33, 322, 34, 32, 36,
+ 35, 34, 37, 271, 34, 37, 39, 34, 33, 37,
+
+ 34, 36, 37, 36, 39, 37, 41, 40, 37, 42,
+ 39, 43, 203, 36, 41, 40, 54, 42, 39, 43,
+ 40, 54, 127, 39, 40, 43, 44, 127, 45, 41,
+ 40, 46, 42, 43, 44, 43, 45, 43, 42, 46,
+ 43, 45, 59, 59, 46, 47, 135, 46, 134, 48,
+ 45, 45, 44, 47, 47, 71, 46, 48, 50, 133,
+ 46, 47, 131, 71, 46, 72, 50, 73, 47, 74,
+ 75, 48, 70, 48, 50, 73, 76, 74, 75, 73,
+ 74, 50, 77, 128, 76, 75, 78, 80, 128, 82,
+ 77, 76, 79, 65, 78, 80, 74, 82, 76, 81,
+
+ 79, 79, 82, 83, 84, 77, 85, 81, 63, 86,
+ 80, 83, 84, 58, 85, 84, 80, 86, 87, 56,
+ 81, 81, 86, 30, 84, 26, 87, 88, 81, 83,
+ 89, 84, 90, 85, 91, 88, 92, 24, 89, 88,
+ 90, 93, 91, 94, 92, 87, 95, 89, 97, 93,
+ 98, 94, 23, 9, 95, 96, 97, 91, 98, 8,
+ 90, 99, 92, 96, 93, 94, 100, 96, 102, 99,
+ 103, 104, 98, 101, 100, 7, 102, 100, 103, 104,
+ 99, 101, 96, 96, 105, 101, 106, 108, 6, 100,
+ 103, 5, 105, 107, 106, 108, 102, 4, 110, 106,
+
+ 109, 107, 111, 3, 112, 107, 110, 110, 109, 0,
+ 111, 113, 112, 115, 111, 108, 0, 109, 0, 113,
+ 114, 115, 110, 109, 112, 0, 110, 0, 114, 114,
+ 116, 113, 0, 115, 117, 112, 118, 119, 116, 113,
+ 116, 120, 117, 0, 118, 119, 118, 121, 122, 120,
+ 119, 116, 0, 0, 124, 121, 122, 123, 125, 0,
+ 122, 117, 124, 0, 121, 123, 125, 124, 0, 0,
+ 137, 124, 138, 139, 121, 140, 120, 123, 137, 141,
+ 138, 139, 142, 140, 125, 144, 139, 141, 0, 143,
+ 142, 142, 140, 144, 145, 146, 141, 143, 148, 143,
+
+ 143, 0, 145, 146, 147, 149, 148, 150, 0, 148,
+ 151, 144, 147, 149, 0, 150, 150, 0, 151, 0,
+ 0, 145, 146, 152, 154, 147, 153, 149, 147, 0,
+ 155, 152, 154, 156, 153, 154, 151, 151, 155, 153,
+ 157, 156, 158, 152, 159, 0, 161, 0, 157, 0,
+ 158, 160, 159, 157, 161, 161, 162, 157, 155, 160,
+ 156, 163, 160, 164, 162, 165, 0, 166, 167, 163,
+ 0, 164, 159, 165, 164, 166, 167, 168, 165, 170,
+ 166, 167, 163, 0, 0, 168, 169, 170, 171, 172,
+ 0, 167, 168, 173, 169, 166, 171, 172, 170, 169,
+
+ 174, 173, 172, 175, 0, 176, 177, 173, 174, 178,
+ 0, 175, 171, 176, 177, 179, 180, 178, 176, 181,
+ 174, 182, 183, 179, 180, 175, 179, 181, 180, 182,
+ 183, 178, 184, 185, 186, 187, 188, 189, 190, 181,
+ 184, 185, 186, 187, 188, 189, 190, 186, 191, 0,
+ 182, 192, 184, 193, 0, 0, 191, 183, 194, 192,
+ 0, 193, 188, 192, 195, 190, 194, 0, 185, 196,
+ 197, 199, 195, 193, 195, 195, 198, 196, 197, 199,
+ 194, 200, 201, 0, 198, 198, 195, 197, 205, 200,
+ 201, 202, 0, 206, 196, 207, 205, 208, 209, 202,
+
+ 199, 206, 201, 207, 211, 208, 209, 200, 210, 0,
+ 202, 212, 211, 213, 205, 206, 210, 207, 214, 212,
+ 215, 213, 216, 208, 212, 217, 214, 210, 215, 215,
+ 216, 216, 218, 217, 219, 214, 220, 221, 0, 0,
+ 218, 213, 219, 222, 220, 221, 221, 219, 223, 224,
+ 225, 222, 0, 217, 226, 227, 223, 224, 225, 220,
+ 218, 0, 226, 227, 228, 229, 224, 230, 231, 227,
+ 232, 222, 228, 229, 233, 230, 231, 0, 232, 234,
+ 237, 229, 233, 235, 236, 225, 238, 234, 237, 239,
+ 0, 235, 236, 240, 238, 230, 241, 239, 232, 236,
+
+ 242, 240, 243, 244, 241, 234, 233, 245, 242, 235,
+ 243, 244, 246, 247, 238, 245, 248, 0, 249, 250,
+ 246, 247, 251, 252, 248, 243, 249, 250, 254, 248,
+ 251, 252, 253, 0, 0, 0, 254, 0, 246, 257,
+ 253, 247, 253, 255, 0, 256, 250, 257, 251, 259,
+ 252, 255, 254, 256, 255, 258, 0, 259, 256, 0,
+ 260, 261, 259, 258, 258, 262, 263, 257, 260, 261,
+ 0, 264, 265, 262, 263, 266, 267, 268, 261, 264,
+ 265, 262, 0, 266, 267, 268, 269, 265, 270, 267,
+ 268, 0, 0, 272, 269, 263, 270, 273, 274, 269,
+
+ 264, 272, 275, 266, 276, 273, 274, 277, 279, 278,
+ 275, 0, 276, 0, 0, 277, 279, 278, 0, 280,
+ 281, 272, 278, 282, 283, 284, 274, 280, 281, 0,
+ 280, 282, 283, 284, 285, 275, 279, 283, 286, 276,
+ 0, 287, 285, 288, 289, 290, 286, 284, 281, 287,
+ 291, 288, 289, 290, 287, 292, 0, 0, 291, 293,
+ 285, 294, 295, 292, 286, 288, 289, 293, 0, 294,
+ 295, 296, 297, 298, 299, 300, 293, 0, 301, 296,
+ 297, 298, 299, 300, 0, 297, 301, 302, 303, 298,
+ 295, 0, 301, 304, 305, 302, 303, 306, 0, 302,
+
+ 307, 304, 305, 299, 308, 306, 306, 0, 307, 309,
+ 310, 311, 308, 312, 0, 304, 303, 309, 310, 311,
+ 313, 312, 305, 315, 0, 314, 307, 316, 313, 317,
+ 318, 315, 308, 314, 314, 316, 310, 317, 318, 311,
+ 316, 319, 313, 318, 315, 320, 321, 323, 324, 319,
+ 325, 317, 326, 320, 321, 323, 324, 327, 325, 328,
+ 326, 329, 330, 331, 332, 327, 333, 328, 319, 329,
+ 330, 331, 332, 334, 333, 0, 335, 0, 0, 326,
+ 336, 334, 337, 327, 335, 325, 334, 338, 336, 329,
+ 337, 336, 332, 339, 340, 338, 341, 0, 343, 342,
+
+ 344, 339, 340, 346, 341, 337, 343, 342, 344, 345,
+ 338, 346, 0, 0, 347, 348, 349, 345, 0, 350,
+ 340, 342, 347, 348, 349, 0, 348, 350, 351, 344,
+ 352, 354, 0, 349, 355, 345, 351, 353, 352, 354,
+ 347, 356, 355, 352, 355, 353, 353, 357, 358, 356,
+ 359, 360, 0, 361, 362, 357, 358, 354, 359, 360,
+ 357, 361, 362, 363, 364, 365, 366, 362, 367, 368,
+ 361, 363, 364, 365, 366, 369, 367, 368, 370, 360,
+ 371, 372, 0, 369, 373, 374, 370, 375, 371, 372,
+ 370, 368, 373, 374, 366, 375, 367, 376, 0, 377,
+
+ 378, 0, 379, 0, 374, 376, 371, 377, 378, 375,
+ 379, 380, 381, 382, 383, 379, 384, 0, 0, 380,
+ 381, 382, 383, 380, 384, 377, 383, 0, 385, 386,
+ 387, 388, 389, 390, 391, 381, 385, 386, 387, 388,
+ 389, 390, 391, 392, 388, 393, 394, 382, 396, 395,
+ 397, 392, 398, 393, 394, 0, 396, 395, 397, 0,
+ 398, 390, 395, 385, 397, 399, 400, 391, 401, 392,
+ 402, 0, 403, 399, 400, 405, 401, 404, 402, 399,
+ 403, 394, 406, 405, 0, 404, 0, 407, 408, 409,
+ 406, 410, 402, 0, 404, 407, 408, 409, 413, 410,
+
+ 411, 410, 414, 0, 415, 412, 413, 0, 411, 408,
+ 414, 406, 415, 412, 416, 407, 0, 411, 412, 417,
+ 413, 418, 416, 419, 409, 420, 0, 417, 414, 418,
+ 421, 419, 0, 420, 418, 422, 0, 423, 421, 415,
+ 417, 0, 0, 422, 419, 423, 0, 0, 420, 0,
+ 0, 0, 0, 0, 0, 0, 421, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 422, 425,
425, 425, 425, 425, 425, 425, 425, 425, 426, 426,
- 426, 426, 426, 426, 426, 426, 426, 427, 345, 427,
- 428, 428, 428, 429, 429, 342, 429, 430, 430, 430,
- 430, 340, 430, 430, 430, 430, 431, 431, 431, 431,
- 431, 431, 431, 431, 431, 432, 432, 432, 338, 432,
- 432, 432, 432, 432, 433, 334, 433, 433, 433, 433,
- 433, 433, 433, 434, 332, 330, 329, 327, 323, 434,
- 435, 322, 435, 321, 320, 319, 311, 308, 299, 295,
-
- 293, 291, 290, 289, 281, 276, 272, 270, 269, 259,
- 248, 244, 243, 241, 240, 239, 238, 236, 230, 227,
- 225, 222, 210, 208, 202, 190, 188, 186, 176, 161,
- 157, 137, 136, 134, 133, 132, 130, 104, 103, 96,
- 94, 77, 71, 70, 65, 63, 58, 56, 38, 30,
- 26, 24, 23, 9, 8, 7, 6, 5, 4, 3,
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
-
- 423, 423, 423, 423, 423, 423, 423, 423, 423, 423,
- 423, 423, 423, 423, 423, 423
+ 426, 426, 426, 426, 426, 426, 426, 427, 427, 427,
+ 427, 427, 427, 427, 427, 427, 428, 0, 428, 429,
+
+ 429, 429, 430, 430, 0, 430, 431, 431, 431, 431,
+ 0, 431, 431, 431, 431, 432, 432, 432, 432, 432,
+ 432, 432, 432, 432, 433, 433, 433, 0, 433, 433,
+ 433, 433, 433, 434, 0, 434, 434, 434, 434, 434,
+ 434, 434, 435, 0, 0, 0, 0, 0, 435, 436,
+ 436, 0, 436, 437, 0, 437, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+
+ 424, 424, 424, 424, 424, 424, 424, 424, 424, 424,
+ 424, 424, 424
} ;
static yy_state_type yy_last_accepting_state;
@@ -847,6 +966,7 @@ Created 12/14/1997 Heikki Tuuri
#define realloc(P, A) ut_realloc(P, A)
#define exit(A) ut_error
+/* Note: We cast &result to int* from yysize_t* */
#define YY_INPUT(buf, result, max_size) \
(result = pars_get_lex_chars(buf, max_size))
@@ -883,7 +1003,7 @@ string_append(
-#line 887 "lexyy.cc"
+#line 1006 "lexyy.cc"
#define INITIAL 0
#define comment 1
@@ -965,7 +1085,12 @@ static int input (void );
/* Amount of stuff to slurp up with each read. */
#ifndef YY_READ_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k */
+#define YY_READ_BUF_SIZE 16384
+#else
#define YY_READ_BUF_SIZE 8192
+#endif /* __ia64__ */
#endif
/* Copy whatever the last rule matched to the standard output. */
@@ -973,7 +1098,7 @@ static int input (void );
/* This used to be an fputs(), but since the string might contain NUL's,
* we now use fwrite().
*/
-#define ECHO fwrite( yytext, yyleng, 1, yyout )
+#define ECHO do { if (fwrite( yytext, yyleng, 1, yyout )) {} } while (0)
#endif
/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
@@ -984,7 +1109,7 @@ static int input (void );
if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
{ \
int c = '*'; \
- yy_size_t n; \
+ size_t n; \
for ( n = 0; n < max_size && \
(c = getc( yyin )) != EOF && c != '\n'; ++n ) \
buf[n] = (char) c; \
@@ -1069,7 +1194,7 @@ YY_DECL
#line 112 "pars0lex.l"
-#line 1073 "lexyy.cc"
+#line 1197 "lexyy.cc"
if ( !(yy_init) )
{
@@ -1122,13 +1247,13 @@ yy_match:
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 424 )
+ if ( yy_current_state >= 425 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
++yy_cp;
}
- while ( yy_current_state != 423 );
+ while ( yy_current_state != 424 );
yy_cp = (yy_last_accepting_cpos);
yy_current_state = (yy_last_accepting_state);
@@ -2109,7 +2234,7 @@ YY_RULE_SETUP
#line 691 "pars0lex.l"
YY_FATAL_ERROR( "flex scanner jammed" );
YY_BREAK
-#line 2113 "lexyy.cc"
+#line 2237 "lexyy.cc"
case YY_STATE_EOF(INITIAL):
case YY_STATE_EOF(comment):
case YY_STATE_EOF(quoted):
@@ -2299,7 +2424,7 @@ static int yy_get_next_buffer (void)
else
{
- yy_size_t num_to_read =
+ int num_to_read =
YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
while ( num_to_read <= 0 )
@@ -2313,16 +2438,16 @@ static int yy_get_next_buffer (void)
if ( b->yy_is_our_buffer )
{
- yy_size_t new_size = b->yy_buf_size * 2;
+ int new_size = b->yy_buf_size * 2;
if ( new_size <= 0 )
b->yy_buf_size += b->yy_buf_size / 8;
else
b->yy_buf_size *= 2;
- b->yy_ch_buf = (char*)
+ b->yy_ch_buf = (char *)
/* Include room in for 2 EOB chars. */
- yyrealloc((void*) b->yy_ch_buf,b->yy_buf_size + 2 );
+ yyrealloc((void *) b->yy_ch_buf,b->yy_buf_size + 2 );
}
else
/* Can't grow it, we don't own it. */
@@ -2344,7 +2469,7 @@ static int yy_get_next_buffer (void)
/* Read in more data. */
YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
- (yy_n_chars), num_to_read );
+ (yy_n_chars), (size_t) num_to_read );
YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
}
@@ -2371,7 +2496,7 @@ static int yy_get_next_buffer (void)
if ((yy_size_t) ((yy_n_chars) + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
/* Extend the array by 50%, plus the number we really need. */
yy_size_t new_size = (yy_n_chars) + number_to_move + ((yy_n_chars) >> 1);
- YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char*) yyrealloc((void*) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size );
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc((void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf,new_size );
if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
}
@@ -2387,7 +2512,7 @@ static int yy_get_next_buffer (void)
/* yy_get_previous_state - get the state just before the EOB char was reached */
- static yy_state_type yy_get_previous_state (void)
+ yy_state_type yy_get_previous_state (void)
{
register yy_state_type yy_current_state;
register char *yy_cp;
@@ -2405,7 +2530,7 @@ static int yy_get_next_buffer (void)
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 424 )
+ if ( yy_current_state >= 425 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
@@ -2419,7 +2544,7 @@ static int yy_get_next_buffer (void)
* synopsis
* next_state = yy_try_NUL_trans( current_state );
*/
- static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state )
{
register int yy_is_jam;
register char *yy_cp = (yy_c_buf_p);
@@ -2433,11 +2558,11 @@ static int yy_get_next_buffer (void)
while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
{
yy_current_state = (int) yy_def[yy_current_state];
- if ( yy_current_state >= 424 )
+ if ( yy_current_state >= 425 )
yy_c = yy_meta[(unsigned int) yy_c];
}
yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
- yy_is_jam = (yy_current_state == 423);
+ yy_is_jam = (yy_current_state == 424);
return yy_is_jam ? 0 : yy_current_state;
}
@@ -2466,7 +2591,7 @@ static int yy_get_next_buffer (void)
else
{ /* need more input */
- yy_size_t offset = (yy_c_buf_p) - (yytext_ptr);
+ int offset = (int)((yy_c_buf_p) - (yytext_ptr));
++(yy_c_buf_p);
switch ( yy_get_next_buffer( ) )
@@ -2490,7 +2615,7 @@ static int yy_get_next_buffer (void)
case EOB_ACT_END_OF_FILE:
{
if ( yywrap( ) )
- return 0;
+ return EOF;
if ( ! (yy_did_buffer_switch_on_eof) )
YY_NEW_FILE;
@@ -2508,7 +2633,7 @@ static int yy_get_next_buffer (void)
}
}
- c = *(unsigned char*) (yy_c_buf_p); /* cast for 8-bit char's */
+ c = *(unsigned char *) (yy_c_buf_p); /* cast for 8-bit char's */
*(yy_c_buf_p) = '\0'; /* preserve yytext */
(yy_hold_char) = *++(yy_c_buf_p);
@@ -2518,7 +2643,7 @@ static int yy_get_next_buffer (void)
/** Immediately switch to a different input stream.
* @param input_file A readable stream.
- *
+ *
* @note This function does not reset the start condition to @c INITIAL .
*/
void yyrestart (FILE * input_file )
@@ -2536,7 +2661,7 @@ static int yy_get_next_buffer (void)
/** Switch to a different input buffer.
* @param new_buffer The new input buffer.
- *
+ *
*/
__attribute__((unused)) static void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer )
{
@@ -2580,7 +2705,7 @@ static void yy_load_buffer_state (void)
/** Allocate and initialize an input buffer state.
* @param file A readable stream.
* @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
- *
+ *
* @return the allocated buffer state.
*/
static YY_BUFFER_STATE yy_create_buffer (FILE * file, int size )
@@ -2596,7 +2721,7 @@ static void yy_load_buffer_state (void)
/* yy_ch_buf has to be 2 characters longer than the size given because
* we need to put in 2 end-of-buffer characters.
*/
- b->yy_ch_buf = (char*) yyalloc(b->yy_buf_size + 2 );
+ b->yy_ch_buf = (char *) yyalloc(b->yy_buf_size + 2 );
if ( ! b->yy_ch_buf )
YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
@@ -2609,9 +2734,9 @@ static void yy_load_buffer_state (void)
/** Destroy the buffer.
* @param b a buffer created with yy_create_buffer()
- *
+ *
*/
- void yy_delete_buffer (YY_BUFFER_STATE b )
+ void yy_delete_buffer (YY_BUFFER_STATE b )
{
if ( ! b )
@@ -2621,20 +2746,20 @@ static void yy_load_buffer_state (void)
YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
if ( b->yy_is_our_buffer )
- yyfree((void*) b->yy_ch_buf );
+ yyfree((void *) b->yy_ch_buf );
- yyfree((void*) b );
+ yyfree((void *) b );
}
/* Initializes or reinitializes a buffer.
* This function is sometimes called more than once on the same buffer,
* such as during a yyrestart() or at EOF.
*/
- static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file )
+ static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file )
{
int oerrno = errno;
-
+
yy_flush_buffer(b );
b->yy_input_file = file;
@@ -2650,13 +2775,13 @@ static void yy_load_buffer_state (void)
}
b->yy_is_interactive = 0;
-
+
errno = oerrno;
}
/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
* @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
- *
+ *
*/
void yy_flush_buffer (YY_BUFFER_STATE b )
{
@@ -2685,7 +2810,7 @@ static void yy_load_buffer_state (void)
* the current state. This function will allocate the stack
* if necessary.
* @param new_buffer The new state.
- *
+ *
*/
void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
{
@@ -2715,7 +2840,7 @@ void yypush_buffer_state (YY_BUFFER_STATE new_buffer )
/** Removes and deletes the top of the stack, if present.
* The next element becomes the new top.
- *
+ *
*/
void yypop_buffer_state (void)
{
@@ -2738,8 +2863,8 @@ void yypop_buffer_state (void)
*/
static void yyensure_buffer_stack (void)
{
- yy_size_t num_to_alloc;
-
+ int num_to_alloc;
+
if (!(yy_buffer_stack)) {
/* First allocation is just for 2 elements, since we don't know if this
@@ -2747,7 +2872,7 @@ static void yyensure_buffer_stack (void)
* immediate realloc on the next call.
*/
num_to_alloc = 1;
- (yy_buffer_stack) = (struct yy_buffer_state**) yyalloc
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyalloc
(num_to_alloc * sizeof(struct yy_buffer_state*)
);
if ( ! (yy_buffer_stack) )
@@ -2766,7 +2891,7 @@ static void yyensure_buffer_stack (void)
int grow_size = 8 /* arbitrary grow size */;
num_to_alloc = (yy_buffer_stack_max) + grow_size;
- (yy_buffer_stack) = (struct yy_buffer_state**) yyrealloc
+ (yy_buffer_stack) = (struct yy_buffer_state**)yyrealloc
((yy_buffer_stack),
num_to_alloc * sizeof(struct yy_buffer_state*)
);
@@ -2809,7 +2934,7 @@ static void yy_fatal_error (yyconst char* msg )
/* Accessor methods (get/set functions) to struct members. */
/** Get the current line number.
- *
+ *
*/
int yyget_lineno (void)
{
@@ -2818,7 +2943,7 @@ int yyget_lineno (void)
}
/** Get the input stream.
- *
+ *
*/
FILE *yyget_in (void)
{
@@ -2826,7 +2951,7 @@ FILE *yyget_in (void)
}
/** Get the output stream.
- *
+ *
*/
FILE *yyget_out (void)
{
@@ -2834,7 +2959,7 @@ FILE *yyget_out (void)
}
/** Get the length of the current token.
- *
+ *
*/
yy_size_t yyget_leng (void)
{
@@ -2842,7 +2967,7 @@ yy_size_t yyget_leng (void)
}
/** Get the current token.
- *
+ *
*/
char *yyget_text (void)
@@ -2852,18 +2977,18 @@ char *yyget_text (void)
/** Set the current line number.
* @param line_number
- *
+ *
*/
void yyset_lineno (int line_number )
{
-
+
yylineno = line_number;
}
/** Set the input stream. This does not discard the current
* input buffer.
* @param in_str A readable stream.
- *
+ *
* @see yy_switch_to_buffer
*/
void yyset_in (FILE * in_str )
@@ -2895,7 +3020,7 @@ static int yy_init_globals (void)
(yy_buffer_stack) = 0;
(yy_buffer_stack_top) = 0;
(yy_buffer_stack_max) = 0;
- (yy_c_buf_p) = (char*) 0;
+ (yy_c_buf_p) = (char *) 0;
(yy_init) = 0;
(yy_start) = 0;
@@ -2904,8 +3029,8 @@ static int yy_init_globals (void)
yyin = stdin;
yyout = stdout;
#else
- yyin = (FILE*) 0;
- yyout = (FILE*) 0;
+ yyin = (FILE *) 0;
+ yyout = (FILE *) 0;
#endif
/* For future reference: Set errno on error, since we are called by
@@ -2917,7 +3042,7 @@ static int yy_init_globals (void)
/* yylex_destroy is for both reentrant and non-reentrant scanners. */
__attribute__((unused)) static int yylex_destroy (void)
{
-
+
/* Pop the buffer stack, destroying each element. */
while(YY_CURRENT_BUFFER){
yy_delete_buffer(YY_CURRENT_BUFFER );
@@ -2962,24 +3087,24 @@ static int yy_flex_strlen (yyconst char * s )
void *yyalloc (yy_size_t size )
{
- return (void*) malloc( size );
+ return (void *) malloc( size );
}
void *yyrealloc (void * ptr, yy_size_t size )
{
- /* The cast to (char*) in the following accommodates both
+ /* The cast to (char *) in the following accommodates both
* implementations that use char* generic pointers, and those
* that use void* generic pointers. It works with the latter
* because both ANSI C and C++ allow castless assignment from
* any pointer type to void*, and deal with argument conversions
* as though doing an assignment.
*/
- return (void*) realloc( (char*) ptr, size );
+ return (void *) realloc( (char *) ptr, size );
}
void yyfree (void * ptr )
{
- free( (char*) ptr ); /* see yyrealloc() for (char*) cast */
+ free( (char*) ptr ); /* see yyrealloc() for (char *) cast */
}
#define YYTABLES_NAME "yytables"
diff --git a/storage/innobase/pars/pars0lex.l b/storage/innobase/pars/pars0lex.l
index 2446e40cde8..83c3af4b6c5 100644
--- a/storage/innobase/pars/pars0lex.l
+++ b/storage/innobase/pars/pars0lex.l
@@ -102,7 +102,7 @@ string_append(
DIGIT [0-9]
ID [a-z_A-Z][a-z_A-Z0-9]*
-TABLE_NAME [a-z_A-Z][a-z_A-Z0-9]*\/(#sql-|[a-z_A-Z])[a-z_A-Z0-9]*
+TABLE_NAME [a-z_A-Z][@a-z_A-Z0-9]*\/(#sql-|[a-z_A-Z])[a-z_A-Z0-9]*
BOUND_LIT \:[a-z_A-Z0-9]+
BOUND_ID \$[a-z_A-Z0-9]+
diff --git a/storage/innobase/pars/pars0opt.cc b/storage/innobase/pars/pars0opt.cc
index e5f347eedd6..cbed2b39eeb 100644
--- a/storage/innobase/pars/pars0opt.cc
+++ b/storage/innobase/pars/pars0opt.cc
@@ -345,7 +345,7 @@ opt_calc_index_goodness(
/* At least for now we don't support using FTS indexes for queries
done through InnoDB's own SQL parser. */
- if (index->type == DICT_FTS) {
+ if (dict_index_is_online_ddl(index) || (index->type & DICT_FTS)) {
return(0);
}
@@ -400,7 +400,7 @@ opt_calc_index_goodness(
}
}
- /* We have to test for goodness here, as last_op may note be set */
+ /* We have to test for goodness here, as last_op may not be set */
if (goodness && dict_index_is_clust(index)) {
goodness++;
diff --git a/storage/innobase/pars/pars0pars.cc b/storage/innobase/pars/pars0pars.cc
index a4ab85adc36..f82610e62d0 100644
--- a/storage/innobase/pars/pars0pars.cc
+++ b/storage/innobase/pars/pars0pars.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -859,7 +859,8 @@ pars_retrieve_table_def(
sym_node->resolved = TRUE;
sym_node->token_type = SYM_TABLE_REF_COUNTED;
- sym_node->table = dict_table_open_on_name(sym_node->name, TRUE);
+ sym_node->table = dict_table_open_on_name(
+ sym_node->name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
ut_a(sym_node->table != NULL);
}
@@ -1115,8 +1116,8 @@ pars_function_declaration(
sym_node->token_type = SYM_FUNCTION;
/* Check that the function exists. */
- ut_a(pars_info_get_user_func(pars_sym_tab_global->info,
- sym_node->name));
+ ut_a(pars_info_lookup_user_func(
+ pars_sym_tab_global->info, sym_node->name));
return(sym_node);
}
@@ -1782,8 +1783,9 @@ pars_fetch_statement(
} else {
pars_resolve_exp_variables_and_types(NULL, user_func);
- node->func = pars_info_get_user_func(pars_sym_tab_global->info,
- user_func->name);
+ node->func = pars_info_lookup_user_func(
+ pars_sym_tab_global->info, user_func->name);
+
ut_a(node->func);
node->into_list = NULL;
@@ -1941,9 +1943,23 @@ pars_create_table(
const dtype_t* dtype;
ulint n_cols;
ulint flags = 0;
+ ulint flags2 = 0;
if (compact != NULL) {
+
+ /* System tables currently only use the REDUNDANT row
+ format therefore the check for srv_file_per_table should be
+ safe for now. */
+
flags |= DICT_TF_COMPACT;
+
+ /* FIXME: Ideally this should be part of the SQL syntax
+ or use some other mechanism. We want to reduce dependency
+ on global variables. There is an inherent race here but
+ that has always existed around this variable. */
+ if (srv_file_per_table) {
+ flags2 |= DICT_TF2_USE_TABLESPACE;
+ }
}
if (block_size != NULL) {
@@ -1974,10 +1990,8 @@ pars_create_table(
n_cols = que_node_list_get_len(column_defs);
- /* As the InnoDB SQL parser is for internal use only,
- for creating some system tables, this function will only
- create tables in the old (not compact) record format. */
- table = dict_mem_table_create(table_sym->name, 0, n_cols, flags, 0);
+ table = dict_mem_table_create(
+ table_sym->name, 0, n_cols, flags, flags2);
#ifdef UNIV_DEBUG
if (not_fit_in_memory != NULL) {
@@ -1998,7 +2012,7 @@ pars_create_table(
column = static_cast<sym_node_t*>(que_node_get_next(column));
}
- node = tab_create_graph_create(table, pars_sym_tab_global->heap);
+ node = tab_create_graph_create(table, pars_sym_tab_global->heap, true);
table_sym->resolved = TRUE;
table_sym->token_type = SYM_TABLE;
@@ -2052,7 +2066,7 @@ pars_create_index(
column = static_cast<sym_node_t*>(que_node_get_next(column));
}
- node = ind_create_graph_create(index, pars_sym_tab_global->heap);
+ node = ind_create_graph_create(index, pars_sym_tab_global->heap, true);
table_sym->resolved = TRUE;
table_sym->token_type = SYM_TABLE;
@@ -2251,7 +2265,7 @@ que_thr_t*
pars_complete_graph_for_exec(
/*=========================*/
que_node_t* node, /*!< in: root node for an incomplete
- query graph */
+ query graph, or NULL for dummy graph */
trx_t* trx, /*!< in: transaction handle */
mem_heap_t* heap) /*!< in: memory heap from which allocated */
{
@@ -2265,7 +2279,9 @@ pars_complete_graph_for_exec(
thr->child = node;
- que_node_set_parent(node, thr);
+ if (node) {
+ que_node_set_parent(node, thr);
+ }
trx->graph = NULL;
@@ -2478,7 +2494,7 @@ pars_info_bind_int8_literal(
const char* name, /* in: name */
const ib_uint64_t* val) /* in: value */
{
- pars_bound_lit_t* pbl;
+ pars_bound_lit_t* pbl;
pbl = pars_info_lookup_bound_lit(info, name);
@@ -2519,6 +2535,33 @@ pars_info_add_ull_literal(
}
/****************************************************************//**
+If the literal value already exists then it rebinds otherwise it
+creates a new entry. */
+UNIV_INTERN
+void
+pars_info_bind_ull_literal(
+/*=======================*/
+ pars_info_t* info, /*!< in: info struct */
+ const char* name, /*!< in: name */
+ const ib_uint64_t* val) /*!< in: value */
+{
+ pars_bound_lit_t* pbl;
+
+ pbl = pars_info_lookup_bound_lit(info, name);
+
+ if (!pbl) {
+ pars_info_add_literal(
+ info, name, val, sizeof(*val), DATA_FIXBINARY, 0);
+ } else {
+
+ pbl->address = val;
+ pbl->length = sizeof(*val);
+
+ sym_tab_rebind_lit(pbl->node, val, sizeof(*val));
+ }
+}
+
+/****************************************************************//**
Add user function. */
UNIV_INTERN
void
@@ -2605,19 +2648,6 @@ pars_info_get_bound_id(
}
/****************************************************************//**
-Get user function with the given name.
-@return user func, or NULL if not found */
-UNIV_INTERN
-pars_user_func_t*
-pars_info_get_user_func(
-/*====================*/
- pars_info_t* info, /*!< in: info struct */
- const char* name) /*!< in: function name to find*/
-{
- return(pars_info_lookup_user_func(info, name));
-}
-
-/****************************************************************//**
Get bound literal with the given name.
@return bound literal, or NULL if not found */
UNIV_INTERN
diff --git a/storage/innobase/pars/pars0sym.cc b/storage/innobase/pars/pars0sym.cc
index c71ad8a6b39..b01a69cb33a 100644
--- a/storage/innobase/pars/pars0sym.cc
+++ b/storage/innobase/pars/pars0sym.cc
@@ -84,7 +84,7 @@ sym_tab_free_private(
if (sym->token_type == SYM_TABLE_REF_COUNTED) {
- dict_table_close(sym->table, TRUE);
+ dict_table_close(sym->table, TRUE, FALSE);
sym->table = NULL;
sym->resolved = FALSE;
diff --git a/storage/innobase/que/que0que.cc b/storage/innobase/que/que0que.cc
index c023723685c..fb185959d56 100644
--- a/storage/innobase/que/que0que.cc
+++ b/storage/innobase/que/que0que.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -1248,7 +1248,7 @@ loop:
Evaluate the given SQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-enum db_err
+dberr_t
que_eval_sql(
/*=========*/
pars_info_t* info, /*!< in: info struct, or NULL */
diff --git a/storage/innobase/read/read0read.cc b/storage/innobase/read/read0read.cc
index 02d78d657c6..14dc9ee5e7f 100644
--- a/storage/innobase/read/read0read.cc
+++ b/storage/innobase/read/read0read.cc
@@ -174,59 +174,6 @@ The order does not matter. No new transactions can be created and no running
transaction can commit or rollback (or free views).
*/
-#ifdef UNIV_DEBUG
-/*********************************************************************//**
-Validates a read view object. */
-static
-ibool
-read_view_validate(
-/*===============*/
- const read_view_t* view) /*!< in: view to validate */
-{
- ulint i;
-
- ut_ad(mutex_own(&trx_sys->mutex));
-
- /* Check that the view->trx_ids array is in descending order. */
- for (i = 1; i < view->n_trx_ids; ++i) {
-
- ut_a(view->trx_ids[i] < view->trx_ids[i - 1]);
- }
-
- return(TRUE);
-}
-
-/** Functor to validate the view list. */
-struct Check {
-
- Check() : m_prev_view(0) { }
-
- void operator()(const read_view_t* view)
- {
- ut_a(m_prev_view == NULL
- || m_prev_view->low_limit_no >= view->low_limit_no);
-
- m_prev_view = view;
- }
-
- const read_view_t* m_prev_view;
-};
-
-/*********************************************************************//**
-Validates a read view list. */
-static
-ibool
-read_view_list_validate(void)
-/*=========================*/
-{
- ut_ad(mutex_own(&trx_sys->mutex));
-
- ut_list_map(trx_sys->view_list, &read_view_t::view_list, Check());
-
- return(TRUE);
-}
-#endif
-
/*********************************************************************//**
Creates a read view object.
@return own: read view struct */
@@ -530,25 +477,6 @@ read_view_purge_open(
}
/*********************************************************************//**
-Remove a read view from the trx_sys->view_list. */
-UNIV_INTERN
-void
-read_view_remove(
-/*=============*/
- read_view_t* view) /*!< in: read view */
-{
- mutex_enter(&trx_sys->mutex);
-
- ut_ad(read_view_validate(view));
-
- UT_LIST_REMOVE(view_list, trx_sys->view_list, view);
-
- ut_ad(read_view_list_validate());
-
- mutex_exit(&trx_sys->mutex);
-}
-
-/*********************************************************************//**
Closes a consistent read view for MySQL. This function is called at an SQL
statement end if the trx isolation level is <= TRX_ISO_READ_COMMITTED. */
UNIV_INTERN
@@ -559,7 +487,7 @@ read_view_close_for_mysql(
{
ut_a(trx->global_read_view);
- read_view_remove(trx->global_read_view);
+ read_view_remove(trx->global_read_view, false);
mem_heap_empty(trx->global_read_view_heap);
@@ -692,7 +620,7 @@ read_cursor_view_close_for_mysql(
belong to this transaction */
trx->n_mysql_tables_in_use += curview->n_mysql_tables_in_use;
- read_view_remove(curview->read_view);
+ read_view_remove(curview->read_view, false);
trx->read_view = trx->global_read_view;
diff --git a/storage/innobase/rem/rem0cmp.cc b/storage/innobase/rem/rem0cmp.cc
index 19f5633953a..db0fdf3ee21 100644
--- a/storage/innobase/rem/rem0cmp.cc
+++ b/storage/innobase/rem/rem0cmp.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -30,6 +30,7 @@ Created 7/1/1994 Heikki Tuuri
#endif
#include "ha_prototypes.h"
+#include "handler0alter.h"
#include "srv0srv.h"
/* ALPHABETICAL ORDER
@@ -69,10 +70,12 @@ cmp_debug_dtuple_rec_with_match(
has an equal number or more fields than
dtuple */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
- ulint* matched_fields);/*!< in/out: number of already
+ ulint n_cmp, /*!< in: number of fields to compare */
+ ulint* matched_fields)/*!< in/out: number of already
completely matched fields; when function
returns, contains the value for current
comparison */
+ __attribute__((nonnull, warn_unused_result));
#endif /* UNIV_DEBUG */
/*************************************************************//**
This function is used to compare two data fields for which the data type
@@ -621,14 +624,15 @@ respectively, when only the common first fields are compared, or until
the first externally stored field in rec */
UNIV_INTERN
int
-cmp_dtuple_rec_with_match(
-/*======================*/
+cmp_dtuple_rec_with_match_low(
+/*==========================*/
const dtuple_t* dtuple, /*!< in: data tuple */
const rec_t* rec, /*!< in: physical record which differs from
dtuple in some of the common fields, or which
has an equal number or more fields than
dtuple */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
+ ulint n_cmp, /*!< in: number of fields to compare */
ulint* matched_fields, /*!< in/out: number of already completely
matched fields; when function returns,
contains the value for current comparison */
@@ -652,7 +656,7 @@ cmp_dtuple_rec_with_match(
ulint cur_field; /* current field number */
ulint cur_bytes; /* number of already matched bytes
in current field */
- int ret = 3333; /* return value */
+ int ret; /* return value */
ut_ad(dtuple && rec && matched_fields && matched_bytes);
ut_ad(dtuple_check_typed(dtuple));
@@ -661,7 +665,9 @@ cmp_dtuple_rec_with_match(
cur_field = *matched_fields;
cur_bytes = *matched_bytes;
- ut_ad(cur_field <= dtuple_get_n_fields_cmp(dtuple));
+ ut_ad(n_cmp > 0);
+ ut_ad(n_cmp <= dtuple_get_n_fields(dtuple));
+ ut_ad(cur_field <= n_cmp);
ut_ad(cur_field <= rec_offs_n_fields(offsets));
if (cur_bytes == 0 && cur_field == 0) {
@@ -681,7 +687,7 @@ cmp_dtuple_rec_with_match(
/* Match fields in a loop; stop if we run out of fields in dtuple
or find an externally stored field */
- while (cur_field < dtuple_get_n_fields_cmp(dtuple)) {
+ while (cur_field < n_cmp) {
ulint mtype;
ulint prtype;
@@ -838,7 +844,7 @@ next_field:
order_resolved:
ut_ad((ret >= - 1) && (ret <= 1));
ut_ad(ret == cmp_debug_dtuple_rec_with_match(dtuple, rec, offsets,
- matched_fields));
+ n_cmp, matched_fields));
ut_ad(*matched_fields == cur_field); /* In the debug version, the
above cmp_debug_... sets
*matched_fields to a value */
@@ -909,156 +915,181 @@ cmp_dtuple_is_prefix_of_rec(
}
/*************************************************************//**
-Compare two physical records that contain the same number of columns,
-none of which are stored externally.
-@return 1, 0, -1 if rec1 is greater, equal, less, respectively, than rec2 */
-UNIV_INTERN
+Compare two physical record fields.
+@retval 1 if rec1 field is greater than rec2
+@retval -1 if rec1 field is less than rec2
+@retval 0 if rec1 field equals to rec2 */
+static __attribute__((nonnull, warn_unused_result))
int
-cmp_rec_rec_simple(
-/*===============*/
+cmp_rec_rec_simple_field(
+/*=====================*/
const rec_t* rec1, /*!< in: physical record */
const rec_t* rec2, /*!< in: physical record */
const ulint* offsets1,/*!< in: rec_get_offsets(rec1, ...) */
const ulint* offsets2,/*!< in: rec_get_offsets(rec2, ...) */
const dict_index_t* index, /*!< in: data dictionary index */
- ibool* null_eq)/*!< out: set to TRUE if
- found matching null values */
+ ulint n) /*!< in: field to compare */
{
- ulint rec1_f_len; /*!< length of current field in rec1 */
- const byte* rec1_b_ptr; /*!< pointer to the current byte
- in rec1 field */
- ulint rec1_byte; /*!< value of current byte to be
- compared in rec1 */
- ulint rec2_f_len; /*!< length of current field in rec2 */
- const byte* rec2_b_ptr; /*!< pointer to the current byte
- in rec2 field */
- ulint rec2_byte; /*!< value of current byte to be
- compared in rec2 */
- ulint cur_field; /*!< current field number */
- ulint n_uniq;
-
- n_uniq = dict_index_get_n_unique(index);
- ut_ad(rec_offs_n_fields(offsets1) >= n_uniq);
- ut_ad(rec_offs_n_fields(offsets2) >= n_uniq);
-
- ut_ad(rec_offs_comp(offsets1) == rec_offs_comp(offsets2));
+ const byte* rec1_b_ptr;
+ const byte* rec2_b_ptr;
+ ulint rec1_f_len;
+ ulint rec2_f_len;
+ const dict_col_t* col = dict_index_get_nth_col(index, n);
- for (cur_field = 0; cur_field < n_uniq; cur_field++) {
+ ut_ad(!rec_offs_nth_extern(offsets1, n));
+ ut_ad(!rec_offs_nth_extern(offsets2, n));
- ulint cur_bytes;
- ulint mtype;
- ulint prtype;
-
- {
- const dict_col_t* col
- = dict_index_get_nth_col(index, cur_field);
+ rec1_b_ptr = rec_get_nth_field(rec1, offsets1, n, &rec1_f_len);
+ rec2_b_ptr = rec_get_nth_field(rec2, offsets2, n, &rec2_f_len);
- mtype = col->mtype;
- prtype = col->prtype;
+ if (rec1_f_len == UNIV_SQL_NULL || rec2_f_len == UNIV_SQL_NULL) {
+ if (rec1_f_len == rec2_f_len) {
+ return(0);
}
+ /* We define the SQL null to be the smallest possible
+ value of a field in the alphabetical order */
+ return(rec1_f_len == UNIV_SQL_NULL ? -1 : 1);
+ }
- ut_ad(!rec_offs_nth_extern(offsets1, cur_field));
- ut_ad(!rec_offs_nth_extern(offsets2, cur_field));
-
- rec1_b_ptr = rec_get_nth_field(rec1, offsets1,
- cur_field, &rec1_f_len);
- rec2_b_ptr = rec_get_nth_field(rec2, offsets2,
- cur_field, &rec2_f_len);
+ if (col->mtype >= DATA_FLOAT
+ || (col->mtype == DATA_BLOB
+ && !(col->prtype & DATA_BINARY_TYPE)
+ && dtype_get_charset_coll(col->prtype)
+ != DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
+ return(cmp_whole_field(col->mtype, col->prtype,
+ rec1_b_ptr, (unsigned) rec1_f_len,
+ rec2_b_ptr, (unsigned) rec2_f_len));
+ }
- if (rec1_f_len == UNIV_SQL_NULL
- || rec2_f_len == UNIV_SQL_NULL) {
+ /* Compare the fields */
+ for (ulint cur_bytes = 0;; cur_bytes++, rec1_b_ptr++, rec2_b_ptr++) {
+ ulint rec1_byte;
+ ulint rec2_byte;
- if (rec1_f_len == rec2_f_len) {
- if (null_eq) {
- *null_eq = TRUE;
- }
+ if (rec2_f_len <= cur_bytes) {
+ if (rec1_f_len <= cur_bytes) {
+ return(0);
+ }
- goto next_field;
+ rec2_byte = dtype_get_pad_char(
+ col->mtype, col->prtype);
- } else if (rec2_f_len == UNIV_SQL_NULL) {
+ if (rec2_byte == ULINT_UNDEFINED) {
+ return(1);
+ }
+ } else {
+ rec2_byte = *rec2_b_ptr;
+ }
- /* We define the SQL null to be the
- smallest possible value of a field
- in the alphabetical order */
+ if (rec1_f_len <= cur_bytes) {
+ rec1_byte = dtype_get_pad_char(
+ col->mtype, col->prtype);
- return(1);
- } else {
+ if (rec1_byte == ULINT_UNDEFINED) {
return(-1);
}
+ } else {
+ rec1_byte = *rec1_b_ptr;
}
- if (mtype >= DATA_FLOAT
- || (mtype == DATA_BLOB
- && 0 == (prtype & DATA_BINARY_TYPE)
- && dtype_get_charset_coll(prtype)
- != DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL)) {
- int ret = cmp_whole_field(mtype, prtype,
- rec1_b_ptr,
- (unsigned) rec1_f_len,
- rec2_b_ptr,
- (unsigned) rec2_f_len);
- if (ret) {
- return(ret);
- }
+ if (rec1_byte == rec2_byte) {
+ /* If the bytes are equal, they will remain such
+ even after the collation transformation below */
+ continue;
+ }
- goto next_field;
+ if (col->mtype <= DATA_CHAR
+ || (col->mtype == DATA_BLOB
+ && !(col->prtype & DATA_BINARY_TYPE))) {
+
+ rec1_byte = cmp_collate(rec1_byte);
+ rec2_byte = cmp_collate(rec2_byte);
}
- /* Compare the fields */
- for (cur_bytes = 0;; cur_bytes++, rec1_b_ptr++, rec2_b_ptr++) {
- if (rec2_f_len <= cur_bytes) {
+ if (rec1_byte < rec2_byte) {
+ return(-1);
+ } else if (rec1_byte > rec2_byte) {
+ return(1);
+ }
+ }
+}
- if (rec1_f_len <= cur_bytes) {
+/*************************************************************//**
+Compare two physical records that contain the same number of columns,
+none of which are stored externally.
+@retval 1 if rec1 (including non-ordering columns) is greater than rec2
+@retval -1 if rec1 (including non-ordering columns) is less than rec2
+@retval 0 if rec1 is a duplicate of rec2 */
+UNIV_INTERN
+int
+cmp_rec_rec_simple(
+/*===============*/
+ const rec_t* rec1, /*!< in: physical record */
+ const rec_t* rec2, /*!< in: physical record */
+ const ulint* offsets1,/*!< in: rec_get_offsets(rec1, ...) */
+ const ulint* offsets2,/*!< in: rec_get_offsets(rec2, ...) */
+ const dict_index_t* index, /*!< in: data dictionary index */
+ struct TABLE* table) /*!< in: MySQL table, for reporting
+ duplicate key value if applicable,
+ or NULL */
+{
+ ulint n;
+ ulint n_uniq = dict_index_get_n_unique(index);
+ bool null_eq = false;
- goto next_field;
- }
+ ut_ad(rec_offs_n_fields(offsets1) >= n_uniq);
+ ut_ad(rec_offs_n_fields(offsets2) == rec_offs_n_fields(offsets2));
- rec2_byte = dtype_get_pad_char(mtype, prtype);
+ ut_ad(rec_offs_comp(offsets1) == rec_offs_comp(offsets2));
- if (rec2_byte == ULINT_UNDEFINED) {
- return(1);
- }
- } else {
- rec2_byte = *rec2_b_ptr;
- }
+ for (n = 0; n < n_uniq; n++) {
+ int cmp = cmp_rec_rec_simple_field(
+ rec1, rec2, offsets1, offsets2, index, n);
- if (rec1_f_len <= cur_bytes) {
- rec1_byte = dtype_get_pad_char(mtype, prtype);
+ if (cmp) {
+ return(cmp);
+ }
- if (rec1_byte == ULINT_UNDEFINED) {
- return(-1);
- }
- } else {
- rec1_byte = *rec1_b_ptr;
- }
+ /* If the fields are internally equal, they must both
+ be NULL or non-NULL. */
+ ut_ad(rec_offs_nth_sql_null(offsets1, n)
+ == rec_offs_nth_sql_null(offsets2, n));
- if (rec1_byte == rec2_byte) {
- /* If the bytes are equal, they will remain
- such even after the collation transformation
- below */
+ if (rec_offs_nth_sql_null(offsets1, n)) {
+ ut_ad(!(dict_index_get_nth_col(index, n)->prtype
+ & DATA_NOT_NULL));
+ null_eq = true;
+ }
+ }
- continue;
- }
+ /* If we ran out of fields, the ordering columns of rec1 were
+ equal to rec2. Issue a duplicate key error if needed. */
- if (mtype <= DATA_CHAR
- || (mtype == DATA_BLOB
- && !(prtype & DATA_BINARY_TYPE))) {
+ if (!null_eq && table && dict_index_is_unique(index)) {
+ /* Report erroneous row using new version of table. */
+ innobase_rec_to_mysql(table, rec1, index, offsets1);
+ return(0);
+ }
- rec1_byte = cmp_collate(rec1_byte);
- rec2_byte = cmp_collate(rec2_byte);
- }
+ /* Else, keep comparing so that we have the full internal
+ order. */
+ for (; n < dict_index_get_n_fields(index); n++) {
+ int cmp = cmp_rec_rec_simple_field(
+ rec1, rec2, offsets1, offsets2, index, n);
- if (rec1_byte < rec2_byte) {
- return(-1);
- } else if (rec1_byte > rec2_byte) {
- return(1);
- }
+ if (cmp) {
+ return(cmp);
}
-next_field:
- continue;
+
+ /* If the fields are internally equal, they must both
+ be NULL or non-NULL. */
+ ut_ad(rec_offs_nth_sql_null(offsets1, n)
+ == rec_offs_nth_sql_null(offsets2, n));
}
- /* If we ran out of fields, rec1 was equal to rec2. */
+ /* This should never be reached. Internally, an index must
+ never contain duplicate entries. */
+ ut_ad(0);
return(0);
}
@@ -1327,6 +1358,7 @@ cmp_debug_dtuple_rec_with_match(
has an equal number or more fields than
dtuple */
const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
+ ulint n_cmp, /*!< in: number of fields to compare */
ulint* matched_fields) /*!< in/out: number of already
completely matched fields; when function
returns, contains the value for current
@@ -1339,14 +1371,16 @@ cmp_debug_dtuple_rec_with_match(
field data */
ulint rec_f_len; /* length of current field in rec */
const byte* rec_f_data; /* pointer to the current rec field */
- int ret = 3333; /* return value */
+ int ret; /* return value */
ulint cur_field; /* current field number */
ut_ad(dtuple && rec && matched_fields);
ut_ad(dtuple_check_typed(dtuple));
ut_ad(rec_offs_validate(rec, NULL, offsets));
- ut_ad(*matched_fields <= dtuple_get_n_fields_cmp(dtuple));
+ ut_ad(n_cmp > 0);
+ ut_ad(n_cmp <= dtuple_get_n_fields(dtuple));
+ ut_ad(*matched_fields <= n_cmp);
ut_ad(*matched_fields <= rec_offs_n_fields(offsets));
cur_field = *matched_fields;
@@ -1372,7 +1406,7 @@ cmp_debug_dtuple_rec_with_match(
/* Match fields in a loop; stop if we run out of fields in dtuple */
- while (cur_field < dtuple_get_n_fields_cmp(dtuple)) {
+ while (cur_field < n_cmp) {
ulint mtype;
ulint prtype;
diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc
index d56eb59e0bb..3a5d2f579c3 100644
--- a/storage/innobase/rem/rem0rec.cc
+++ b/storage/innobase/rem/rem0rec.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -29,6 +29,7 @@ Created 5/30/1994 Heikki Tuuri
#include "rem0rec.ic"
#endif
+#include "page0page.h"
#include "mtr0mtr.h"
#include "mtr0log.h"
#include "fts0fts.h"
@@ -162,9 +163,9 @@ UNIV_INTERN
ulint
rec_get_n_extern_new(
/*=================*/
- const rec_t* rec, /*!< in: compact physical record */
- dict_index_t* index, /*!< in: record descriptor */
- ulint n) /*!< in: number of columns to scan */
+ const rec_t* rec, /*!< in: compact physical record */
+ const dict_index_t* index, /*!< in: record descriptor */
+ ulint n) /*!< in: number of columns to scan */
{
const byte* nulls;
const byte* lens;
@@ -246,7 +247,7 @@ rec_init_offsets_comp_ordinary(
/*===========================*/
const rec_t* rec, /*!< in: physical record in
ROW_FORMAT=COMPACT */
- ibool temp, /*!< in: whether to use the
+ bool temp, /*!< in: whether to use the
format for temporary files in
index creation */
const dict_index_t* index, /*!< in: record descriptor */
@@ -256,15 +257,15 @@ rec_init_offsets_comp_ordinary(
ulint i = 0;
ulint offs = 0;
ulint any_ext = 0;
+ ulint n_null = index->n_nullable;
const byte* nulls = temp
? rec - 1
: rec - (1 + REC_N_NEW_EXTRA_BYTES);
- const byte* lens = nulls
- - UT_BITS_IN_BYTES(index->n_nullable);
+ const byte* lens = nulls - UT_BITS_IN_BYTES(n_null);
ulint null_mask = 1;
#ifdef UNIV_DEBUG
- /* We cannot invoke rec_offs_make_valid() here if temp=TRUE.
+ /* We cannot invoke rec_offs_make_valid() here if temp=true.
Similarly, rec_offs_validate() will fail in that case, because
it invokes rec_get_status(). */
offsets[2] = (ulint) rec;
@@ -276,7 +277,7 @@ rec_init_offsets_comp_ordinary(
if (temp && dict_table_is_comp(index->table)) {
/* No need to do adjust fixed_len=0. We only need to
adjust it for ROW_FORMAT=REDUNDANT. */
- temp = FALSE;
+ temp = false;
}
/* read the lengths of fields 0..n */
@@ -289,6 +290,7 @@ rec_init_offsets_comp_ordinary(
if (!(col->prtype & DATA_NOT_NULL)) {
/* nullable field => read the null flag */
+ ut_ad(n_null--);
if (UNIV_UNLIKELY(!(byte) null_mask)) {
nulls--;
@@ -404,7 +406,7 @@ rec_init_offsets(
break;
case REC_STATUS_ORDINARY:
rec_init_offsets_comp_ordinary(
- rec, FALSE, index, offsets);
+ rec, false, index, offsets);
return;
}
@@ -793,28 +795,27 @@ rec_get_converted_size_comp_prefix_low(
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint* extra, /*!< out: extra size */
- ibool temp) /*!< in: whether this is a
+ bool temp) /*!< in: whether this is a
temporary file record */
{
ulint extra_size;
ulint data_size;
ulint i;
- ut_ad(index);
- ut_ad(fields);
+ ulint n_null = index->n_nullable;
ut_ad(n_fields > 0);
ut_ad(n_fields <= dict_index_get_n_fields(index));
ut_ad(!temp || extra);
extra_size = temp
- ? UT_BITS_IN_BYTES(index->n_nullable)
+ ? UT_BITS_IN_BYTES(n_null)
: REC_N_NEW_EXTRA_BYTES
- + UT_BITS_IN_BYTES(index->n_nullable);
+ + UT_BITS_IN_BYTES(n_null);
data_size = 0;
if (temp && dict_table_is_comp(index->table)) {
/* No need to do adjust fixed_len=0. We only need to
adjust it for ROW_FORMAT=REDUNDANT. */
- temp = FALSE;
+ temp = false;
}
/* read the lengths of fields 0..n */
@@ -830,6 +831,8 @@ rec_get_converted_size_comp_prefix_low(
ut_ad(dict_col_type_assert_equal(col,
dfield_get_type(&fields[i])));
+ /* All NULLable fields must be included in the n_null count. */
+ ut_ad((col->prtype & DATA_NOT_NULL) || n_null--);
if (dfield_is_null(&fields[i])) {
/* No length is stored for NULL fields. */
@@ -844,7 +847,7 @@ rec_get_converted_size_comp_prefix_low(
if (temp && fixed_len
&& !dict_col_get_fixed_size(col, temp)) {
fixed_len = 0;
- }
+ }
/* If the maximum length of a variable-length field
is up to 255 bytes, the actual length is always stored
in one byte. If the maximum length is more than 255
@@ -903,7 +906,7 @@ rec_get_converted_size_comp_prefix(
{
ut_ad(dict_table_is_comp(index->table));
return(rec_get_converted_size_comp_prefix_low(
- index, fields, n_fields, extra, FALSE));
+ index, fields, n_fields, extra, false));
}
/**********************************************************//**
@@ -923,8 +926,6 @@ rec_get_converted_size_comp(
ulint* extra) /*!< out: extra size */
{
ulint size;
- ut_ad(index);
- ut_ad(fields);
ut_ad(n_fields > 0);
switch (UNIV_EXPECT(status, REC_STATUS_ORDINARY)) {
@@ -951,7 +952,7 @@ rec_get_converted_size_comp(
}
return(size + rec_get_converted_size_comp_prefix_low(
- index, fields, n_fields, extra, FALSE));
+ index, fields, n_fields, extra, false));
}
/***********************************************************//**
@@ -1137,7 +1138,7 @@ rec_convert_dtuple_to_rec_comp(
const dfield_t* fields, /*!< in: array of data fields */
ulint n_fields,/*!< in: number of data fields */
ulint status, /*!< in: status bits of the record */
- ibool temp) /*!< in: whether to use the
+ bool temp) /*!< in: whether to use the
format for temporary files in
index creation */
{
@@ -1151,6 +1152,8 @@ rec_convert_dtuple_to_rec_comp(
ulint n_node_ptr_field;
ulint fixed_len;
ulint null_mask = 1;
+ ulint n_null;
+
ut_ad(temp || dict_table_is_comp(index->table));
ut_ad(n_fields > 0);
@@ -1162,7 +1165,7 @@ rec_convert_dtuple_to_rec_comp(
if (dict_table_is_comp(index->table)) {
/* No need to do adjust fixed_len=0. We only
need to adjust it for ROW_FORMAT=REDUNDANT. */
- temp = FALSE;
+ temp = false;
}
} else {
nulls = rec - (REC_N_NEW_EXTRA_BYTES + 1);
@@ -1189,7 +1192,8 @@ rec_convert_dtuple_to_rec_comp(
}
end = rec;
- lens = nulls - UT_BITS_IN_BYTES(index->n_nullable);
+ n_null = index->n_nullable;
+ lens = nulls - UT_BITS_IN_BYTES(n_null);
/* clear the SQL-null flags */
memset(lens + 1, 0, nulls - lens);
@@ -1211,7 +1215,7 @@ rec_convert_dtuple_to_rec_comp(
if (!(dtype_get_prtype(type) & DATA_NOT_NULL)) {
/* nullable field */
- ut_ad(index->n_nullable > 0);
+ ut_ad(n_null--);
if (UNIV_UNLIKELY(!(byte) null_mask)) {
nulls--;
@@ -1303,13 +1307,12 @@ rec_convert_dtuple_to_rec_new(
rec_t* rec;
status = dtuple_get_info_bits(dtuple) & REC_NEW_STATUS_MASK;
- rec_get_converted_size_comp(index, status,
- dtuple->fields, dtuple->n_fields,
- &extra_size);
+ rec_get_converted_size_comp(
+ index, status, dtuple->fields, dtuple->n_fields, &extra_size);
rec = buf + extra_size;
rec_convert_dtuple_to_rec_comp(
- rec, index, dtuple->fields, dtuple->n_fields, status, FALSE);
+ rec, index, dtuple->fields, dtuple->n_fields, status, false);
/* Set the info bits of the record */
rec_set_info_and_status_bits(rec, dtuple_get_info_bits(dtuple));
@@ -1385,7 +1388,7 @@ rec_get_converted_size_temp(
ulint* extra) /*!< out: extra size */
{
return(rec_get_converted_size_comp_prefix_low(
- index, fields, n_fields, extra, TRUE));
+ index, fields, n_fields, extra, true));
}
/******************************************************//**
@@ -1400,7 +1403,7 @@ rec_init_offsets_temp(
ulint* offsets)/*!< in/out: array of offsets;
in: n=rec_offs_n_fields(offsets) */
{
- rec_init_offsets_comp_ordinary(rec, TRUE, index, offsets);
+ rec_init_offsets_comp_ordinary(rec, true, index, offsets);
}
/*********************************************************//**
@@ -1416,7 +1419,7 @@ rec_convert_dtuple_to_temp(
ulint n_fields) /*!< in: number of fields */
{
rec_convert_dtuple_to_rec_comp(rec, index, fields, n_fields,
- REC_STATUS_ORDINARY, TRUE);
+ REC_STATUS_ORDINARY, true);
}
/**************************************************************//**
@@ -1906,4 +1909,47 @@ rec_print(
}
}
}
+
+# ifdef UNIV_DEBUG
+/************************************************************//**
+Reads the DB_TRX_ID of a clustered index record.
+@return the value of DB_TRX_ID */
+UNIV_INTERN
+trx_id_t
+rec_get_trx_id(
+/*===========*/
+ const rec_t* rec, /*!< in: record */
+ const dict_index_t* index) /*!< in: clustered index */
+{
+ const page_t* page
+ = page_align(rec);
+ ulint trx_id_col
+ = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
+ const byte* trx_id;
+ ulint len;
+ mem_heap_t* heap = NULL;
+ ulint offsets_[REC_OFFS_NORMAL_SIZE];
+ ulint* offsets = offsets_;
+ rec_offs_init(offsets_);
+
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
+ ut_ad(mach_read_from_8(page + PAGE_HEADER + PAGE_INDEX_ID)
+ == index->id);
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(trx_id_col > 0);
+ ut_ad(trx_id_col != ULINT_UNDEFINED);
+
+ offsets = rec_get_offsets(rec, index, offsets, trx_id_col + 1, &heap);
+
+ trx_id = rec_get_nth_field(rec, offsets, trx_id_col, &len);
+
+ ut_ad(len == DATA_TRX_ID_LEN);
+
+ if (heap) {
+ mem_heap_free(heap);
+ }
+
+ return(trx_read_trx_id(trx_id));
+}
+# endif /* UNIV_DEBUG */
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/innobase/row/row0ext.cc b/storage/innobase/row/row0ext.cc
index 8d4da9f034b..f084fa09c5a 100644
--- a/storage/innobase/row/row0ext.cc
+++ b/storage/innobase/row/row0ext.cc
@@ -95,6 +95,8 @@ row_ext_create(
row_ext_t* ret;
+ ut_ad(n_ext > 0);
+
ret = static_cast<row_ext_t*>(
mem_heap_alloc(heap,
(sizeof *ret) + (n_ext - 1) * sizeof ret->len));
diff --git a/storage/innobase/row/row0ftsort.cc b/storage/innobase/row/row0ftsort.cc
index 50b681361d8..9a6af50e09d 100644
--- a/storage/innobase/row/row0ftsort.cc
+++ b/storage/innobase/row/row0ftsort.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2010, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -23,6 +23,7 @@ Create Full Text Index with (parallel) merge sort
Created 10/13/2010 Jimmy Yang
*******************************************************/
+#include "dict0dict.h" /* dict_table_stats_lock() */
#include "row0merge.h"
#include "pars0pars.h"
#include "row0ftsort.h"
@@ -47,9 +48,6 @@ Created 10/13/2010 Jimmy Yang
/** Parallel sort degree */
UNIV_INTERN ulong fts_sort_pll_degree = 2;
-/** Parallel sort buffer size */
-UNIV_INTERN ulong srv_sort_buf_size = 1048576;
-
/*********************************************************************//**
Create a temporary "fts sort index" used to merge sort the
tokenized doc string. The index has three "fields":
@@ -124,7 +122,7 @@ row_merge_create_fts_sort_index(
if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) {
/* If Doc ID column is being added by this create
index, then just check the number of rows in the table */
- if (table->stat_n_rows < MAX_DOC_ID_OPT_VAL) {
+ if (dict_table_get_n_rows(table) < MAX_DOC_ID_OPT_VAL) {
*opt_doc_id_size = TRUE;
}
} else {
@@ -173,10 +171,10 @@ ibool
row_fts_psort_info_init(
/*====================*/
trx_t* trx, /*!< in: transaction */
- struct TABLE* table, /*!< in: MySQL table object */
+ row_merge_dup_t* dup, /*!< in,own: descriptor of
+ FTS index being created */
const dict_table_t* new_table,/*!< in: table on which indexes are
created */
- dict_index_t* index, /*!< in: FTS index to be created */
ibool opt_doc_id_size,
/*!< in: whether to use 4 bytes
instead of 8 bytes integer to
@@ -192,7 +190,6 @@ row_fts_psort_info_init(
fts_psort_t* psort_info = NULL;
fts_psort_t* merge_info = NULL;
ulint block_size;
- os_event_t sort_event;
ibool ret = TRUE;
block_size = 3 * srv_sort_buf_size;
@@ -201,28 +198,28 @@ row_fts_psort_info_init(
fts_sort_pll_degree * sizeof *psort_info));
if (!psort_info) {
- return FALSE;
+ ut_free(dup);
+ return(FALSE);
}
- sort_event = os_event_create(NULL);
-
/* Common Info for all sort threads */
common_info = static_cast<fts_psort_common_t*>(
mem_alloc(sizeof *common_info));
- common_info->table = table;
+ if (!common_info) {
+ ut_free(dup);
+ mem_free(psort_info);
+ return(FALSE);
+ }
+
+ common_info->dup = dup;
common_info->new_table = (dict_table_t*) new_table;
common_info->trx = trx;
- common_info->sort_index = index;
common_info->all_info = psort_info;
- common_info->sort_event = sort_event;
+ common_info->sort_event = os_event_create();
+ common_info->merge_event = os_event_create();
common_info->opt_doc_id_size = opt_doc_id_size;
- if (!common_info) {
- mem_free(psort_info);
- return FALSE;
- }
-
/* There will be FTS_NUM_AUX_INDEX number of "sort buckets" for
each parallel sort thread. Each "sort bucket" holds records for
a particular "FTS index partition" */
@@ -242,9 +239,12 @@ row_fts_psort_info_init(
}
psort_info[j].merge_buf[i] = row_merge_buf_create(
- index);
+ dup->index);
- row_merge_file_create(psort_info[j].merge_file[i]);
+ if (row_merge_file_create(psort_info[j].merge_file[i])
+ < 0) {
+ goto func_exit;
+ }
/* Need to align memory for O_DIRECT write */
psort_info[j].block_alloc[i] =
@@ -314,6 +314,9 @@ row_fts_psort_info_destroy(
}
}
+ os_event_free(merge_info[0].psort_common->sort_event);
+ os_event_free(merge_info[0].psort_common->merge_event);
+ ut_free(merge_info[0].psort_common->dup);
mem_free(merge_info[0].psort_common);
mem_free(psort_info);
}
@@ -433,12 +436,11 @@ row_merge_fts_doc_tokenize(
ut_a(t_ctx->buf_used < FTS_NUM_AUX_INDEX);
idx = t_ctx->buf_used;
- buf->tuples[buf->n_tuples + n_tuple[idx]] = field =
- static_cast<dfield_t*>(mem_heap_alloc(
- buf->heap,
- FTS_NUM_FIELDS_SORT * sizeof *field));
+ mtuple_t* mtuple = &buf->tuples[buf->n_tuples + n_tuple[idx]];
- ut_a(field);
+ field = mtuple->fields = static_cast<dfield_t*>(
+ mem_heap_alloc(buf->heap,
+ FTS_NUM_FIELDS_SORT * sizeof *field));
/* The first field is the tokenized word */
dfield_set_data(field, t_str.f_str, t_str.f_len);
@@ -522,6 +524,10 @@ row_merge_fts_doc_tokenize(
/* Update the data length and the number of new word tuples
added in this round of tokenization */
for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
+ /* The computation of total_size below assumes that no
+ delete-mark flags will be stored and that all fields
+ are NOT NULL and fixed-length. */
+
sort_buf[i]->total_size += data_size[i];
sort_buf[i]->n_tuples += n_tuple[i];
@@ -560,7 +566,7 @@ fts_parallel_tokenization(
ulint mycount[FTS_NUM_AUX_INDEX];
ib_uint64_t total_rec = 0;
ulint num_doc_processed = 0;
- doc_id_t last_doc_id;
+ doc_id_t last_doc_id = 0;
ulint zip_size;
mem_heap_t* blob_heap = NULL;
fts_doc_t doc;
@@ -581,10 +587,10 @@ fts_parallel_tokenization(
memset(mycount, 0, FTS_NUM_AUX_INDEX * sizeof(int));
doc.charset = fts_index_get_charset(
- psort_info->psort_common->sort_index);
+ psort_info->psort_common->dup->index);
idx_field = dict_index_get_nth_field(
- psort_info->psort_common->sort_index, 0);
+ psort_info->psort_common->dup->index, 0);
word_dtype.prtype = idx_field->col->prtype;
word_dtype.mbminmaxlen = idx_field->col->mbminmaxlen;
word_dtype.mtype = (strcmp(doc.charset->name, "latin1_swedish_ci") == 0)
@@ -742,7 +748,12 @@ loop:
}
if (doc_item) {
- prev_doc_item = doc_item;
+ prev_doc_item = doc_item;
+
+ if (last_doc_id != doc_item->doc_id) {
+ t_ctx.init_pos = 0;
+ }
+
retried = 0;
} else if (psort_info->state == FTS_PARENT_COMPLETE) {
retried++;
@@ -751,16 +762,51 @@ loop:
goto loop;
exit:
+ /* Do a final sort of the last (or latest) batch of records
+ in block memory. Flush them to temp file if records cannot
+ be hold in one block memory */
for (i = 0; i < FTS_NUM_AUX_INDEX; i++) {
if (t_ctx.rows_added[i]) {
row_merge_buf_sort(buf[i], NULL);
row_merge_buf_write(
- buf[i], (const merge_file_t*) merge_file[i],
- block[i]);
- row_merge_write(merge_file[i]->fd,
- merge_file[i]->offset++, block[i]);
+ buf[i], merge_file[i], block[i]);
+
+ /* Write to temp file, only if records have
+ been flushed to temp file before (offset > 0):
+ The pseudo code for sort is following:
+
+ while (there are rows) {
+ tokenize rows, put result in block[]
+ if (block[] runs out) {
+ sort rows;
+ write to temp file with
+ row_merge_write();
+ offset++;
+ }
+ }
+
+ # write out the last batch
+ if (offset > 0) {
+ row_merge_write();
+ offset++;
+ } else {
+ # no need to write anything
+ offset stay as 0
+ }
+
+ so if merge_file[i]->offset is 0 when we come to
+ here as the last batch, this means rows have
+ never flush to temp file, it can be held all in
+ memory */
+ if (merge_file[i]->offset != 0) {
+ row_merge_write(merge_file[i]->fd,
+ merge_file[i]->offset++,
+ block[i]);
+
+ UNIV_MEM_INVALID(block[i][0],
+ srv_sort_buf_size);
+ }
- UNIV_MEM_INVALID(block[i][0], srv_sort_buf_size);
buf[i] = row_merge_buf_empty(buf[i]);
t_ctx.rows_added[i] = 0;
}
@@ -776,16 +822,19 @@ exit:
continue;
}
- tmpfd[i] = innobase_mysql_tmpfile();
+ tmpfd[i] = row_merge_file_create_low();
+ if (tmpfd[i] < 0) {
+ goto func_exit;
+ }
+
row_merge_sort(psort_info->psort_common->trx,
- psort_info->psort_common->sort_index,
- merge_file[i],
- (row_merge_block_t*) block[i], &tmpfd[i],
- psort_info->psort_common->table);
+ psort_info->psort_common->dup,
+ merge_file[i], block[i], &tmpfd[i]);
total_rec += merge_file[i]->n_rec;
close(tmpfd[i]);
}
+func_exit:
if (fts_enable_diag_print) {
DEBUG_FTS_SORT_PRINT(" InnoDB_FTS: complete merge sort\n");
}
@@ -794,8 +843,14 @@ exit:
psort_info->child_status = FTS_CHILD_COMPLETE;
os_event_set(psort_info->psort_common->sort_event);
+ psort_info->child_status = FTS_CHILD_EXITING;
+
+#ifdef __WIN__
+ CloseHandle(psort_info->thread_hdl);
+#endif /*__WIN__ */
os_thread_exit(NULL);
+
OS_THREAD_DUMMY_RETURN;
}
@@ -812,8 +867,9 @@ row_fts_start_psort(
for (i = 0; i < fts_sort_pll_degree; i++) {
psort_info[i].psort_id = i;
- os_thread_create(fts_parallel_tokenization,
- (void*) &psort_info[i], &thd_id);
+ psort_info[i].thread_hdl = os_thread_create(
+ fts_parallel_tokenization,
+ (void*) &psort_info[i], &thd_id);
}
}
@@ -833,14 +889,20 @@ fts_parallel_merge(
id = psort_info->psort_id;
- row_fts_merge_insert(psort_info->psort_common->sort_index,
+ row_fts_merge_insert(psort_info->psort_common->dup->index,
psort_info->psort_common->new_table,
psort_info->psort_common->all_info, id);
psort_info->child_status = FTS_CHILD_COMPLETE;
- os_event_set(psort_info->psort_common->sort_event);
+ os_event_set(psort_info->psort_common->merge_event);
+ psort_info->child_status = FTS_CHILD_EXITING;
+
+#ifdef __WIN__
+ CloseHandle(psort_info->thread_hdl);
+#endif /*__WIN__ */
os_thread_exit(NULL);
+
OS_THREAD_DUMMY_RETURN;
}
@@ -860,16 +922,16 @@ row_fts_start_parallel_merge(
merge_info[i].psort_id = i;
merge_info[i].child_status = 0;
- os_thread_create(fts_parallel_merge,
- (void*) &merge_info[i], &thd_id);
+ merge_info[i].thread_hdl = os_thread_create(
+ fts_parallel_merge, (void*) &merge_info[i], &thd_id);
}
}
/********************************************************************//**
Insert processed FTS data to auxillary index tables.
@return DB_SUCCESS if insertion runs fine */
-UNIV_INTERN
-ulint
+static __attribute__((nonnull))
+dberr_t
row_merge_write_fts_word(
/*=====================*/
trx_t* trx, /*!< in: transaction */
@@ -880,15 +942,15 @@ row_merge_write_fts_word(
CHARSET_INFO* charset) /*!< in: charset */
{
ulint selected;
- ulint ret = DB_SUCCESS;
+ dberr_t ret = DB_SUCCESS;
selected = fts_select_index(
charset, word->text.f_str, word->text.f_len);
fts_table->suffix = fts_get_suffix(selected);
/* Pop out each fts_node in word->nodes write them to auxiliary table */
- while(ib_vector_size(word->nodes) > 0) {
- ulint error;
+ while (ib_vector_size(word->nodes) > 0) {
+ dberr_t error;
fts_node_t* fts_node;
fts_node = static_cast<fts_node_t*>(ib_vector_pop(word->nodes));
@@ -900,8 +962,8 @@ row_merge_write_fts_word(
if (error != DB_SUCCESS) {
fprintf(stderr, "InnoDB: failed to write"
" word %s to FTS auxiliary index"
- " table, error (%lu) \n",
- word->text.f_str, error);
+ " table, error (%s) \n",
+ word->text.f_str, ut_strerr(error));
ret = error;
}
@@ -1064,7 +1126,6 @@ row_fts_sel_tree_propagate(
int child_left;
int child_right;
int selected;
- ibool null_eq = FALSE;
/* Find which parent this value will be propagated to */
parent = (propogated - 1) / 2;
@@ -1083,10 +1144,10 @@ row_fts_sel_tree_propagate(
} else if (child_right == -1
|| mrec[child_right] == NULL) {
selected = child_left;
- } else if (row_merge_cmp(mrec[child_left], mrec[child_right],
- offsets[child_left],
- offsets[child_right],
- index, &null_eq) < 0) {
+ } else if (cmp_rec_rec_simple(mrec[child_left], mrec[child_right],
+ offsets[child_left],
+ offsets[child_right],
+ index, NULL) < 0) {
selected = child_left;
} else {
selected = child_right;
@@ -1143,8 +1204,6 @@ row_fts_build_sel_tree_level(
num_item = (1 << level);
for (i = 0; i < num_item; i++) {
- ibool null_eq = FALSE;
-
child_left = sel_tree[(start + i) * 2 + 1];
child_right = sel_tree[(start + i) * 2 + 2];
@@ -1174,14 +1233,12 @@ row_fts_build_sel_tree_level(
}
/* Select the smaller one to set parent pointer */
- if (row_merge_cmp(mrec[child_left], mrec[child_right],
- offsets[child_left],
- offsets[child_right],
- index, &null_eq) < 0) {
- sel_tree[start + i] = child_left;
- } else {
- sel_tree[start + i] = child_right;
- }
+ int cmp = cmp_rec_rec_simple(
+ mrec[child_left], mrec[child_right],
+ offsets[child_left], offsets[child_right],
+ index, NULL);
+
+ sel_tree[start + i] = cmp < 0 ? child_left : child_right;
}
}
@@ -1231,7 +1288,7 @@ Read sorted file containing index data tuples and insert these data
tuples to the index
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
row_fts_merge_insert(
/*=================*/
dict_index_t* index, /*!< in: index */
@@ -1243,7 +1300,7 @@ row_fts_merge_insert(
const byte** b;
mem_heap_t* tuple_heap;
mem_heap_t* heap;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
ulint* foffs;
ulint** offsets;
fts_tokenizer_word_t new_word;
@@ -1317,7 +1374,7 @@ row_fts_merge_insert(
count_diag += (int) psort_info[i].merge_file[id]->n_rec;
}
- if (fts_enable_diag_print) {
+ if (fts_enable_diag_print) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB_FTS: to inserted %lu records\n",
(ulong) count_diag);
@@ -1349,8 +1406,13 @@ row_fts_merge_insert(
/* No Rows to read */
mrec[i] = b[i] = NULL;
} else {
- if (!row_merge_read(fd[i], foffs[i],
- (row_merge_block_t*) block[i])) {
+ /* Read from temp file only if it has been
+ written to. Otherwise, block memory holds
+ all the sorted records */
+ if (psort_info[i].merge_file[id]->offset > 0
+ && (!row_merge_read(
+ fd[i], foffs[i],
+ (row_merge_block_t*) block[i]))) {
error = DB_CORRUPTION;
goto exit;
}
@@ -1386,14 +1448,14 @@ row_fts_merge_insert(
}
for (i = min_rec + 1; i < fts_sort_pll_degree; i++) {
- ibool null_eq = FALSE;
if (!mrec[i]) {
continue;
}
- if (row_merge_cmp(mrec[i], mrec[min_rec],
- offsets[i], offsets[min_rec],
- index, &null_eq) < 0) {
+ if (cmp_rec_rec_simple(
+ mrec[i], mrec[min_rec],
+ offsets[i], offsets[min_rec],
+ index, NULL) < 0) {
min_rec = i;
}
}
diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc
new file mode 100644
index 00000000000..f5eb31191a5
--- /dev/null
+++ b/storage/innobase/row/row0import.cc
@@ -0,0 +1,3806 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file row/row0import.cc
+Import a tablespace to a running instance.
+
+Created 2012-02-08 by Sunny Bains.
+*******************************************************/
+
+#include "row0import.h"
+
+#ifdef UNIV_NONINL
+#include "row0import.ic"
+#endif
+
+#include "btr0pcur.h"
+#include "que0que.h"
+#include "dict0boot.h"
+#include "ibuf0ibuf.h"
+#include "pars0pars.h"
+#include "row0upd.h"
+#include "row0sel.h"
+#include "row0mysql.h"
+#include "srv0start.h"
+#include "row0quiesce.h"
+
+#include <vector>
+
+/** The size of the buffer to use for IO. Note: os_file_read() doesn't expect
+reads to fail. If you set the buffer size to be greater than a multiple of the
+file size then it will assert. TODO: Fix this limitation of the IO functions.
+@param n - page size of the tablespace.
+@retval number of pages */
+#define IO_BUFFER_SIZE(n) ((1024 * 1024) / n)
+
+/** For gathering stats on records during phase I */
+struct row_stats_t {
+ ulint m_n_deleted; /*!< Number of deleted records
+ found in the index */
+
+ ulint m_n_purged; /*!< Number of records purged
+ optimisatically */
+
+ ulint m_n_rows; /*!< Number of rows */
+
+ ulint m_n_purge_failed; /*!< Number of deleted rows
+ that could not be purged */
+};
+
+/** Index information required by IMPORT. */
+struct row_index_t {
+ index_id_t m_id; /*!< Index id of the table
+ in the exporting server */
+ byte* m_name; /*!< Index name */
+
+ ulint m_space; /*!< Space where it is placed */
+
+ ulint m_page_no; /*!< Root page number */
+
+ ulint m_type; /*!< Index type */
+
+ ulint m_trx_id_offset; /*!< Relevant only for clustered
+ indexes, offset of transaction
+ id system column */
+
+ ulint m_n_user_defined_cols; /*!< User defined columns */
+
+ ulint m_n_uniq; /*!< Number of columns that can
+ uniquely identify the row */
+
+ ulint m_n_nullable; /*!< Number of nullable
+ columns */
+
+ ulint m_n_fields; /*!< Total number of fields */
+
+ dict_field_t* m_fields; /*!< Index fields */
+
+ const dict_index_t*
+ m_srv_index; /*!< Index instance in the
+ importing server */
+
+ row_stats_t m_stats; /*!< Statistics gathered during
+ the import phase */
+
+};
+
+/** Meta data required by IMPORT. */
+struct row_import {
+ row_import() UNIV_NOTHROW
+ :
+ m_table(),
+ m_version(),
+ m_hostname(),
+ m_table_name(),
+ m_autoinc(),
+ m_page_size(),
+ m_flags(),
+ m_n_cols(),
+ m_cols(),
+ m_col_names(),
+ m_n_indexes(),
+ m_indexes(),
+ m_missing(true) { }
+
+ ~row_import() UNIV_NOTHROW;
+
+ /**
+ Find the index entry in in the indexes array.
+ @param name - index name
+ @return instance if found else 0. */
+ row_index_t* get_index(const char* name) const UNIV_NOTHROW;
+
+ /**
+ Get the number of rows in the index.
+ @param name - index name
+ @return number of rows (doesn't include delete marked rows). */
+ ulint get_n_rows(const char* name) const UNIV_NOTHROW;
+
+ /**
+ Find the ordinal value of the column name in the cfg table columns.
+ @param name - of column to look for.
+ @return ULINT_UNDEFINED if not found. */
+ ulint find_col(const char* name) const UNIV_NOTHROW;
+
+ /**
+ Find the index field entry in in the cfg indexes fields.
+ @name - of the index to look for
+ @return instance if found else 0. */
+ const dict_field_t* find_field(
+ const row_index_t* cfg_index,
+ const char* name) const UNIV_NOTHROW;
+
+ /**
+ Get the number of rows for which purge failed during the convert phase.
+ @param name - index name
+ @return number of rows for which purge failed. */
+ ulint get_n_purge_failed(const char* name) const UNIV_NOTHROW;
+
+ /**
+ Check if the index is clean. ie. no delete-marked records
+ @param name - index name
+ @return true if index needs to be purged. */
+ bool requires_purge(const char* name) const UNIV_NOTHROW
+ {
+ return(get_n_purge_failed(name) > 0);
+ }
+
+ /**
+ Set the index root <space, pageno> using the index name */
+ void set_root_by_name() UNIV_NOTHROW;
+
+ /**
+ Set the index root <space, pageno> using a heuristic
+ @return DB_SUCCESS or error code */
+ dberr_t set_root_by_heuristic() UNIV_NOTHROW;
+
+ /** Check if the index schema that was read from the .cfg file
+ matches the in memory index definition.
+ Note: It will update row_import_t::m_srv_index to map the meta-data
+ read from the .cfg file to the server index instance.
+ @return DB_SUCCESS or error code. */
+ dberr_t match_index_columns(
+ THD* thd,
+ const dict_index_t* index) UNIV_NOTHROW;
+
+ /**
+ Check if the table schema that was read from the .cfg file matches the
+ in memory table definition.
+ @param thd - MySQL session variable
+ @return DB_SUCCESS or error code. */
+ dberr_t match_table_columns(
+ THD* thd) UNIV_NOTHROW;
+
+ /**
+ Check if the table (and index) schema that was read from the .cfg file
+ matches the in memory table definition.
+ @param thd - MySQL session variable
+ @return DB_SUCCESS or error code. */
+ dberr_t match_schema(
+ THD* thd) UNIV_NOTHROW;
+
+ dict_table_t* m_table; /*!< Table instance */
+
+ ulint m_version; /*!< Version of config file */
+
+ byte* m_hostname; /*!< Hostname where the
+ tablespace was exported */
+ byte* m_table_name; /*!< Exporting instance table
+ name */
+
+ ib_uint64_t m_autoinc; /*!< Next autoinc value */
+
+ ulint m_page_size; /*!< Tablespace page size */
+
+ ulint m_flags; /*!< Table flags */
+
+ ulint m_n_cols; /*!< Number of columns in the
+ meta-data file */
+
+ dict_col_t* m_cols; /*!< Column data */
+
+ byte** m_col_names; /*!< Column names, we store the
+ column naems separately becuase
+ there is no field to store the
+ value in dict_col_t */
+
+ ulint m_n_indexes; /*!< Number of indexes,
+ including clustered index */
+
+ row_index_t* m_indexes; /*!< Index meta data */
+
+ bool m_missing; /*!< true if a .cfg file was
+ found and was readable */
+};
+
+/** Use the page cursor to iterate over records in a block. */
+class RecIterator {
+public:
+ /**
+ Default constructor */
+ RecIterator() UNIV_NOTHROW
+ {
+ memset(&m_cur, 0x0, sizeof(m_cur));
+ }
+
+ /**
+ Position the cursor on the first user record. */
+ void open(buf_block_t* block) UNIV_NOTHROW
+ {
+ page_cur_set_before_first(block, &m_cur);
+
+ if (!end()) {
+ next();
+ }
+ }
+
+ /**
+ Move to the next record. */
+ void next() UNIV_NOTHROW
+ {
+ page_cur_move_to_next(&m_cur);
+ }
+
+ /**
+ @return the current record */
+ rec_t* current() UNIV_NOTHROW
+ {
+ ut_ad(!end());
+ return(page_cur_get_rec(&m_cur));
+ }
+
+ /**
+ @return true if cursor is at the end */
+ bool end() UNIV_NOTHROW
+ {
+ return(page_cur_is_after_last(&m_cur) == TRUE);
+ }
+
+ /** Remove the current record
+ @return true on success */
+ bool remove(
+ const dict_index_t* index,
+ page_zip_des_t* page_zip,
+ ulint* offsets) UNIV_NOTHROW
+ {
+ /* We can't end up with an empty page unless it is root. */
+ if (page_get_n_recs(m_cur.block->frame) <= 1) {
+ return(false);
+ }
+
+ return(page_delete_rec(index, &m_cur, page_zip, offsets));
+ }
+
+private:
+ page_cur_t m_cur;
+};
+
+/** Class that purges delete marked reocords from indexes, both secondary
+and cluster. It does a pessimistic delete. This should only be done if we
+couldn't purge the delete marked reocrds during Phase I. */
+class IndexPurge {
+public:
+ /** Constructor
+ @param trx - the user transaction covering the import tablespace
+ @param index - to be imported
+ @param space_id - space id of the tablespace */
+ IndexPurge(
+ trx_t* trx,
+ dict_index_t* index) UNIV_NOTHROW
+ :
+ m_trx(trx),
+ m_index(index),
+ m_n_rows(0)
+ {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Phase II - Purge records from index %s",
+ index->name);
+ }
+
+ /** Descructor */
+ ~IndexPurge() UNIV_NOTHROW { }
+
+ /** Purge delete marked records.
+ @return DB_SUCCESS or error code. */
+ dberr_t garbage_collect() UNIV_NOTHROW;
+
+ /** The number of records that are not delete marked.
+ @return total records in the index after purge */
+ ulint get_n_rows() const UNIV_NOTHROW
+ {
+ return(m_n_rows);
+ }
+
+private:
+ /**
+ Begin import, position the cursor on the first record. */
+ void open() UNIV_NOTHROW;
+
+ /**
+ Close the persistent curosr and commit the mini-transaction. */
+ void close() UNIV_NOTHROW;
+
+ /**
+ Position the cursor on the next record.
+ @return DB_SUCCESS or error code */
+ dberr_t next() UNIV_NOTHROW;
+
+ /**
+ Store the persistent cursor position and reopen the
+ B-tree cursor in BTR_MODIFY_TREE mode, because the
+ tree structure may be changed during a pessimistic delete. */
+ void purge_pessimistic_delete() UNIV_NOTHROW;
+
+ /**
+ Purge delete-marked records.
+ @param offsets - current row offsets. */
+ void purge() UNIV_NOTHROW;
+
+protected:
+ // Disable copying
+ IndexPurge();
+ IndexPurge(const IndexPurge&);
+ IndexPurge &operator=(const IndexPurge&);
+
+private:
+ trx_t* m_trx; /*!< User transaction */
+ mtr_t m_mtr; /*!< Mini-transaction */
+ btr_pcur_t m_pcur; /*!< Persistent cursor */
+ dict_index_t* m_index; /*!< Index to be processed */
+ ulint m_n_rows; /*!< Records in index */
+};
+
+/** Functor that is called for each physical page that is read from the
+tablespace file. */
+class AbstractCallback : public PageCallback {
+public:
+ /** Constructor
+ @param trx - covering transaction */
+ AbstractCallback(trx_t* trx)
+ :
+ m_trx(trx),
+ m_space(ULINT_UNDEFINED),
+ m_xdes(),
+ m_xdes_page_no(ULINT_UNDEFINED),
+ m_space_flags(ULINT_UNDEFINED),
+ m_table_flags(ULINT_UNDEFINED) UNIV_NOTHROW { }
+
+ /**
+ Free any extent descriptor instance */
+ virtual ~AbstractCallback()
+ {
+ delete [] m_xdes;
+ }
+
+ /** Determine the page size to use for traversing the tablespace
+ @param file_size - size of the tablespace file in bytes
+ @param block - contents of the first page in the tablespace file.
+ @retval DB_SUCCESS or error code. */
+ virtual dberr_t init(
+ os_offset_t file_size,
+ const buf_block_t* block) UNIV_NOTHROW;
+
+ /** @return true if compressed table. */
+ bool is_compressed_table() const UNIV_NOTHROW
+ {
+ return(get_zip_size() > 0);
+ }
+
+protected:
+ /**
+ Get the data page depending on the table type, compressed or not.
+ @param block - block read from disk
+ @retval the buffer frame */
+ buf_frame_t* get_frame(buf_block_t* block) const UNIV_NOTHROW
+ {
+ if (is_compressed_table()) {
+ return(block->page.zip.data);
+ }
+
+ return(buf_block_get_frame(block));
+ }
+
+ /** Check for session interrupt. If required we could
+ even flush to disk here every N pages.
+ @retval DB_SUCCESS or error code */
+ dberr_t periodic_check() UNIV_NOTHROW
+ {
+ if (trx_is_interrupted(m_trx)) {
+ return(DB_INTERRUPTED);
+ }
+
+ return(DB_SUCCESS);
+ }
+
+ /**
+ Get the physical offset of the extent descriptor within the page.
+ @param page_no - page number of the extent descriptor
+ @param page - contents of the page containing the extent descriptor.
+ @return the start of the xdes array in a page */
+ const xdes_t* xdes(
+ ulint page_no,
+ const page_t* page) const UNIV_NOTHROW
+ {
+ ulint offset;
+
+ offset = xdes_calc_descriptor_index(get_zip_size(), page_no);
+
+ return(page + XDES_ARR_OFFSET + XDES_SIZE * offset);
+ }
+
+ /**
+ Set the current page directory (xdes). If the extent descriptor is
+ marked as free then free the current extent descriptor and set it to
+ 0. This implies that all pages that are covered by this extent
+ descriptor are also freed.
+
+ @param page_no - offset of page within the file
+ @param page - page contents
+ @return DB_SUCCESS or error code. */
+ dberr_t set_current_xdes(
+ ulint page_no,
+ const page_t* page) UNIV_NOTHROW
+ {
+ m_xdes_page_no = page_no;
+
+ delete[] m_xdes;
+
+ m_xdes = 0;
+
+ ulint state;
+ const xdes_t* xdesc = page + XDES_ARR_OFFSET;
+
+ state = mach_read_ulint(xdesc + XDES_STATE, MLOG_4BYTES);
+
+ if (state != XDES_FREE) {
+
+ m_xdes = new(std::nothrow) xdes_t[m_page_size];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_13",
+ delete [] m_xdes; m_xdes = 0;);
+
+ if (m_xdes == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ memcpy(m_xdes, page, m_page_size);
+ }
+
+ return(DB_SUCCESS);
+ }
+
+ /**
+ @return true if it is a root page */
+ bool is_root_page(const page_t* page) const UNIV_NOTHROW
+ {
+ ut_ad(fil_page_get_type(page) == FIL_PAGE_INDEX);
+
+ return(mach_read_from_4(page + FIL_PAGE_NEXT) == FIL_NULL
+ && mach_read_from_4(page + FIL_PAGE_PREV) == FIL_NULL);
+ }
+
+ /**
+ Check if the page is marked as free in the extent descriptor.
+ @param page_no - page number to check in the extent descriptor.
+ @return true if the page is marked as free */
+ bool is_free(ulint page_no) const UNIV_NOTHROW
+ {
+ ut_a(xdes_calc_descriptor_page(get_zip_size(), page_no)
+ == m_xdes_page_no);
+
+ if (m_xdes != 0) {
+ const xdes_t* xdesc = xdes(page_no, m_xdes);
+ ulint pos = page_no % FSP_EXTENT_SIZE;
+
+ return(xdes_get_bit(xdesc, XDES_FREE_BIT, pos));
+ }
+
+ /* If the current xdes was free, the page must be free. */
+ return(true);
+ }
+
+protected:
+ /** Covering transaction. */
+ trx_t* m_trx;
+
+ /** Space id of the file being iterated over. */
+ ulint m_space;
+
+ /** Minimum page number for which the free list has not been
+ initialized: the pages >= this limit are, by definition, free;
+ note that in a single-table tablespace where size < 64 pages,
+ this number is 64, i.e., we have initialized the space about
+ the first extent, but have not physically allocted those pages
+ to the file. @see FSP_LIMIT. */
+ ulint m_free_limit;
+
+ /** Current size of the space in pages */
+ ulint m_size;
+
+ /** Current extent descriptor page */
+ xdes_t* m_xdes;
+
+ /** Physical page offset in the file of the extent descriptor */
+ ulint m_xdes_page_no;
+
+ /** Flags value read from the header page */
+ ulint m_space_flags;
+
+ /** Derived from m_space_flags and row format type, the row format
+ type is determined from the page header. */
+ ulint m_table_flags;
+};
+
+/** Determine the page size to use for traversing the tablespace
+@param file_size - size of the tablespace file in bytes
+@param block - contents of the first page in the tablespace file.
+@retval DB_SUCCESS or error code. */
+dberr_t
+AbstractCallback::init(
+ os_offset_t file_size,
+ const buf_block_t* block) UNIV_NOTHROW
+{
+ const page_t* page = block->frame;
+
+ m_space_flags = fsp_header_get_flags(page);
+
+ /* Since we don't know whether it is a compressed table
+ or not, the data is always read into the block->frame. */
+
+ dberr_t err = set_zip_size(block->frame);
+
+ if (err != DB_SUCCESS) {
+ return(DB_CORRUPTION);
+ }
+
+ /* Set the page size used to traverse the tablespace. */
+
+ m_page_size = (is_compressed_table())
+ ? get_zip_size() : fsp_flags_get_page_size(m_space_flags);
+
+ if (m_page_size == 0) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "Page size is 0");
+ return(DB_CORRUPTION);
+ } else if (!is_compressed_table() && m_page_size != UNIV_PAGE_SIZE) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Page size %lu of ibd file is not the same "
+ "as the server page size %lu",
+ m_page_size, UNIV_PAGE_SIZE);
+
+ return(DB_CORRUPTION);
+
+ } else if ((file_size % m_page_size)) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "File size " UINT64PF " is not a multiple "
+ "of the page size %lu",
+ (ib_uint64_t) file_size, (ulong) m_page_size);
+
+ return(DB_CORRUPTION);
+ }
+
+ ut_a(m_space == ULINT_UNDEFINED);
+
+ m_size = mach_read_from_4(page + FSP_SIZE);
+ m_free_limit = mach_read_from_4(page + FSP_FREE_LIMIT);
+ m_space = mach_read_from_4(page + FSP_HEADER_OFFSET + FSP_SPACE_ID);
+
+ if ((err = set_current_xdes(0, page)) != DB_SUCCESS) {
+ return(err);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/**
+Try and determine the index root pages by checking if the next/prev
+pointers are both FIL_NULL. We need to ensure that skip deleted pages. */
+struct FetchIndexRootPages : public AbstractCallback {
+
+ /** Index information gathered from the .ibd file. */
+ struct Index {
+
+ Index(index_id_t id, ulint page_no)
+ :
+ m_id(id),
+ m_page_no(page_no) { }
+
+ index_id_t m_id; /*!< Index id */
+ ulint m_page_no; /*!< Root page number */
+ };
+
+ typedef std::vector<Index> Indexes;
+
+ /** Constructor
+ @param trx - covering (user) transaction
+ @param table - table definition in server .*/
+ FetchIndexRootPages(const dict_table_t* table, trx_t* trx)
+ :
+ AbstractCallback(trx),
+ m_table(table) UNIV_NOTHROW { }
+
+ /** Destructor */
+ virtual ~FetchIndexRootPages() UNIV_NOTHROW { }
+
+ /**
+ @retval the space id of the tablespace being iterated over */
+ virtual ulint get_space_id() const UNIV_NOTHROW
+ {
+ return(m_space);
+ }
+
+ /**
+ Check if the .ibd file row format is the same as the table's.
+ @param ibd_table_flags - determined from space and page.
+ @return DB_SUCCESS or error code. */
+ dberr_t check_row_format(ulint ibd_table_flags) UNIV_NOTHROW
+ {
+ dberr_t err;
+ rec_format_t ibd_rec_format;
+ rec_format_t table_rec_format;
+
+ if (!dict_tf_is_valid(ibd_table_flags)) {
+
+ ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ ".ibd file has invlad table flags: %lx",
+ ibd_table_flags);
+
+ return(DB_CORRUPTION);
+ }
+
+ ibd_rec_format = dict_tf_get_rec_format(ibd_table_flags);
+ table_rec_format = dict_tf_get_rec_format(m_table->flags);
+
+ if (table_rec_format != ibd_rec_format) {
+
+ ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Table has %s row format, .ibd "
+ "file has %s row format.",
+ dict_tf_to_row_format_string(m_table->flags),
+ dict_tf_to_row_format_string(ibd_table_flags));
+
+ err = DB_CORRUPTION;
+ } else {
+ err = DB_SUCCESS;
+ }
+
+ return(err);
+ }
+
+ /**
+ Called for each block as it is read from the file.
+ @param offset - physical offset in the file
+ @param block - block to convert, it is not from the buffer pool.
+ @retval DB_SUCCESS or error code. */
+ virtual dberr_t operator() (
+ os_offset_t offset,
+ buf_block_t* block) UNIV_NOTHROW;
+
+ /** Update the import configuration that will be used to import
+ the tablespace. */
+ dberr_t build_row_import(row_import* cfg) const UNIV_NOTHROW;
+
+ /** Table definition in server. */
+ const dict_table_t* m_table;
+
+ /** Index information */
+ Indexes m_indexes;
+};
+
+/**
+Called for each block as it is read from the file. Check index pages to
+determine the exact row format. We can't get that from the tablespace
+header flags alone.
+
+@param offset - physical offset in the file
+@param block - block to convert, it is not from the buffer pool.
+@retval DB_SUCCESS or error code. */
+dberr_t
+FetchIndexRootPages::operator() (
+ os_offset_t offset,
+ buf_block_t* block) UNIV_NOTHROW
+{
+ dberr_t err;
+
+ if ((err = periodic_check()) != DB_SUCCESS) {
+ return(err);
+ }
+
+ const page_t* page = get_frame(block);
+
+ ulint page_type = fil_page_get_type(page);
+
+ if (block->page.offset * m_page_size != offset) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Page offset doesn't match file offset: "
+ "page offset: %lu, file offset: %lu",
+ (ulint) block->page.offset,
+ (ulint) (offset / m_page_size));
+
+ err = DB_CORRUPTION;
+ } else if (page_type == FIL_PAGE_TYPE_XDES) {
+ err = set_current_xdes(block->page.offset, page);
+ } else if (page_type == FIL_PAGE_INDEX
+ && !is_free(block->page.offset)
+ && is_root_page(page)) {
+
+ index_id_t id = btr_page_get_index_id(page);
+ ulint page_no = buf_block_get_page_no(block);
+
+ m_indexes.push_back(Index(id, page_no));
+
+ if (m_indexes.size() == 1) {
+
+ m_table_flags = dict_sys_tables_type_to_tf(
+ m_space_flags,
+ page_is_comp(page) ? DICT_N_COLS_COMPACT : 0);
+
+ err = check_row_format(m_table_flags);
+ }
+ }
+
+ return(err);
+}
+
+/**
+Update the import configuration that will be used to import the tablespace.
+@return error code or DB_SUCCESS */
+dberr_t
+FetchIndexRootPages::build_row_import(row_import* cfg) const UNIV_NOTHROW
+{
+ Indexes::const_iterator end = m_indexes.end();
+
+ ut_a(cfg->m_table == m_table);
+ cfg->m_page_size = m_page_size;
+ cfg->m_n_indexes = m_indexes.size();
+
+ if (cfg->m_n_indexes == 0) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR, "No B+Tree found in tablespace");
+
+ return(DB_CORRUPTION);
+ }
+
+ cfg->m_indexes = new(std::nothrow) row_index_t[cfg->m_n_indexes];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_11",
+ delete [] cfg->m_indexes; cfg->m_indexes = 0;);
+
+ if (cfg->m_indexes == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ memset(cfg->m_indexes, 0x0, sizeof(*cfg->m_indexes) * cfg->m_n_indexes);
+
+ row_index_t* cfg_index = cfg->m_indexes;
+
+ for (Indexes::const_iterator it = m_indexes.begin();
+ it != end;
+ ++it, ++cfg_index) {
+
+ char name[BUFSIZ];
+
+ ut_snprintf(name, sizeof(name), "index" IB_ID_FMT, it->m_id);
+
+ ulint len = strlen(name) + 1;
+
+ cfg_index->m_name = new(std::nothrow) byte[len];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_12",
+ delete [] cfg_index->m_name;
+ cfg_index->m_name = 0;);
+
+ if (cfg_index->m_name == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ memcpy(cfg_index->m_name, name, len);
+
+ cfg_index->m_id = it->m_id;
+
+ cfg_index->m_space = m_space;
+
+ cfg_index->m_page_no = it->m_page_no;
+ }
+
+ return(DB_SUCCESS);
+}
+
+/* Functor that is called for each physical page that is read from the
+tablespace file.
+
+ 1. Check each page for corruption.
+
+ 2. Update the space id and LSN on every page
+ * For the header page
+ - Validate the flags
+ - Update the LSN
+
+ 3. On Btree pages
+ * Set the index id
+ * Update the max trx id
+ * In a cluster index, update the system columns
+ * In a cluster index, update the BLOB ptr, set the space id
+ * Purge delete marked records, but only if they can be easily
+ removed from the page
+ * Keep a counter of number of rows, ie. non-delete-marked rows
+ * Keep a counter of number of delete marked rows
+ * Keep a counter of number of purge failure
+ * If a page is stamped with an index id that isn't in the .cfg file
+ we assume it is deleted and the page can be ignored.
+
+ 4. Set the page state to dirty so that it will be written to disk.
+*/
+class PageConverter : public AbstractCallback {
+public:
+ /** Constructor
+ * @param cfg - config of table being imported.
+ * @param trx - transaction covering the import */
+ PageConverter(row_import* cfg, trx_t* trx) UNIV_NOTHROW;
+
+ virtual ~PageConverter() UNIV_NOTHROW
+ {
+ if (m_heap != 0) {
+ mem_heap_free(m_heap);
+ }
+ }
+
+ /**
+ @retval the server space id of the tablespace being iterated over */
+ virtual ulint get_space_id() const UNIV_NOTHROW
+ {
+ return(m_cfg->m_table->space);
+ }
+
+ /**
+ Called for each block as it is read from the file.
+ @param offset - physical offset in the file
+ @param block - block to convert, it is not from the buffer pool.
+ @retval DB_SUCCESS or error code. */
+ virtual dberr_t operator() (
+ os_offset_t offset,
+ buf_block_t* block) UNIV_NOTHROW;
+private:
+
+ /** Status returned by PageConverter::validate() */
+ enum import_page_status_t {
+ IMPORT_PAGE_STATUS_OK, /*!< Page is OK */
+ IMPORT_PAGE_STATUS_ALL_ZERO, /*!< Page is all zeros */
+ IMPORT_PAGE_STATUS_CORRUPTED /*!< Page is corrupted */
+ };
+
+ /**
+ Update the page, set the space id, max trx id and index id.
+ @param block - block read from file
+ @param page_type - type of the page
+ @retval DB_SUCCESS or error code */
+ dberr_t update_page(
+ buf_block_t* block,
+ ulint& page_type) UNIV_NOTHROW;
+
+#if defined UNIV_DEBUG
+ /**
+ @return true error condition is enabled. */
+ bool trigger_corruption() UNIV_NOTHROW
+ {
+ return(false);
+ }
+ #else
+#define trigger_corruption() (false)
+#endif /* UNIV_DEBUG */
+
+ /**
+ Update the space, index id, trx id.
+ @param block - block to convert
+ @return DB_SUCCESS or error code */
+ dberr_t update_index_page(buf_block_t* block) UNIV_NOTHROW;
+
+ /** Update the BLOB refrences and write UNDO log entries for
+ rows that can't be purged optimistically.
+ @param block - block to update
+ @retval DB_SUCCESS or error code */
+ dberr_t update_records(buf_block_t* block) UNIV_NOTHROW;
+
+ /**
+ Validate the page, check for corruption.
+ @param offset - physical offset within file.
+ @param page - page read from file.
+ @return 0 on success, 1 if all zero, 2 if corrupted */
+ import_page_status_t validate(
+ os_offset_t offset,
+ buf_block_t* page) UNIV_NOTHROW;
+
+ /**
+ Validate the space flags and update tablespace header page.
+ @param block - block read from file, not from the buffer pool.
+ @retval DB_SUCCESS or error code */
+ dberr_t update_header(buf_block_t* block) UNIV_NOTHROW;
+
+ /**
+ Adjust the BLOB reference for a single column that is externally stored
+ @param rec - record to update
+ @param offsets - column offsets for the record
+ @param i - column ordinal value
+ @return DB_SUCCESS or error code */
+ dberr_t adjust_cluster_index_blob_column(
+ rec_t* rec,
+ const ulint* offsets,
+ ulint i) UNIV_NOTHROW;
+
+ /**
+ Adjusts the BLOB reference in the clustered index row for all
+ externally stored columns.
+ @param rec - record to update
+ @param offsets - column offsets for the record
+ @return DB_SUCCESS or error code */
+ dberr_t adjust_cluster_index_blob_columns(
+ rec_t* rec,
+ const ulint* offsets) UNIV_NOTHROW;
+
+ /**
+ In the clustered index, adjist the BLOB pointers as needed.
+ Also update the BLOB reference, write the new space id.
+ @param rec - record to update
+ @param offsets - column offsets for the record
+ @return DB_SUCCESS or error code */
+ dberr_t adjust_cluster_index_blob_ref(
+ rec_t* rec,
+ const ulint* offsets) UNIV_NOTHROW;
+
+ /**
+ Purge delete-marked records, only if it is possible to do
+ so without re-organising the B+tree.
+ @param offsets - current row offsets.
+ @retval true if purged */
+ bool purge(const ulint* offsets) UNIV_NOTHROW;
+
+ /**
+ Adjust the BLOB references and sys fields for the current record.
+ @param index - the index being converted
+ @param rec - record to update
+ @param offsets - column offsets for the record
+ @param deleted - true if row is delete marked
+ @return DB_SUCCESS or error code. */
+ dberr_t adjust_cluster_record(
+ const dict_index_t* index,
+ rec_t* rec,
+ const ulint* offsets,
+ bool deleted) UNIV_NOTHROW;
+
+ /**
+ Find an index with the matching id.
+ @return row_index_t* instance or 0 */
+ row_index_t* find_index(index_id_t id) UNIV_NOTHROW
+ {
+ row_index_t* index = &m_cfg->m_indexes[0];
+
+ for (ulint i = 0; i < m_cfg->m_n_indexes; ++i, ++index) {
+ if (id == index->m_id) {
+ return(index);
+ }
+ }
+
+ return(0);
+
+ }
+private:
+ /** Config for table that is being imported. */
+ row_import* m_cfg;
+
+ /** Current index whose pages are being imported */
+ row_index_t* m_index;
+
+ /** Current system LSN */
+ lsn_t m_current_lsn;
+
+ /** Alias for m_page_zip, only set for compressed pages. */
+ page_zip_des_t* m_page_zip_ptr;
+
+ /** Iterator over records in a block */
+ RecIterator m_rec_iter;
+
+ /** Record offset */
+ ulint m_offsets_[REC_OFFS_NORMAL_SIZE];
+
+ /** Pointer to m_offsets_ */
+ ulint* m_offsets;
+
+ /** Memory heap for the record offsets */
+ mem_heap_t* m_heap;
+
+ /** Cluster index instance */
+ dict_index_t* m_cluster_index;
+};
+
+/**
+row_import destructor. */
+row_import::~row_import() UNIV_NOTHROW
+{
+ for (ulint i = 0; m_indexes != 0 && i < m_n_indexes; ++i) {
+ delete [] m_indexes[i].m_name;
+
+ if (m_indexes[i].m_fields == 0) {
+ continue;
+ }
+
+ dict_field_t* fields = m_indexes[i].m_fields;
+ ulint n_fields = m_indexes[i].m_n_fields;
+
+ for (ulint j = 0; j < n_fields; ++j) {
+ delete [] fields[j].name;
+ }
+
+ delete [] fields;
+ }
+
+ for (ulint i = 0; m_col_names != 0 && i < m_n_cols; ++i) {
+ delete [] m_col_names[i];
+ }
+
+ delete [] m_cols;
+ delete [] m_indexes;
+ delete [] m_col_names;
+ delete [] m_table_name;
+ delete [] m_hostname;
+}
+
+/**
+Find the index entry in in the indexes array.
+@param name - index name
+@return instance if found else 0. */
+row_index_t*
+row_import::get_index(
+ const char* name) const UNIV_NOTHROW
+{
+ for (ulint i = 0; i < m_n_indexes; ++i) {
+ const char* index_name;
+ row_index_t* index = &m_indexes[i];
+
+ index_name = reinterpret_cast<const char*>(index->m_name);
+
+ if (strcmp(index_name, name) == 0) {
+
+ return(index);
+ }
+ }
+
+ return(0);
+}
+
+/**
+Get the number of rows in the index.
+@param name - index name
+@return number of rows (doesn't include delete marked rows). */
+ulint
+row_import::get_n_rows(
+ const char* name) const UNIV_NOTHROW
+{
+ const row_index_t* index = get_index(name);
+
+ ut_a(name != 0);
+
+ return(index->m_stats.m_n_rows);
+}
+
+/**
+Get the number of rows for which purge failed uding the convert phase.
+@param name - index name
+@return number of rows for which purge failed. */
+ulint
+row_import::get_n_purge_failed(
+ const char* name) const UNIV_NOTHROW
+{
+ const row_index_t* index = get_index(name);
+
+ ut_a(name != 0);
+
+ return(index->m_stats.m_n_purge_failed);
+}
+
+/**
+Find the ordinal value of the column name in the cfg table columns.
+@param name - of column to look for.
+@return ULINT_UNDEFINED if not found. */
+ulint
+row_import::find_col(
+ const char* name) const UNIV_NOTHROW
+{
+ for (ulint i = 0; i < m_n_cols; ++i) {
+ const char* col_name;
+
+ col_name = reinterpret_cast<const char*>(m_col_names[i]);
+
+ if (strcmp(col_name, name) == 0) {
+ return(i);
+ }
+ }
+
+ return(ULINT_UNDEFINED);
+}
+
+/**
+Find the index field entry in in the cfg indexes fields.
+@name - of the index to look for
+@return instance if found else 0. */
+const dict_field_t*
+row_import::find_field(
+ const row_index_t* cfg_index,
+ const char* name) const UNIV_NOTHROW
+{
+ const dict_field_t* field = cfg_index->m_fields;
+
+ for (ulint i = 0; i < cfg_index->m_n_fields; ++i, ++field) {
+ const char* field_name;
+
+ field_name = reinterpret_cast<const char*>(field->name);
+
+ if (strcmp(field_name, name) == 0) {
+ return(field);
+ }
+ }
+
+ return(0);
+}
+
+/**
+Check if the index schema that was read from the .cfg file matches the
+in memory index definition.
+@return DB_SUCCESS or error code. */
+dberr_t
+row_import::match_index_columns(
+ THD* thd,
+ const dict_index_t* index) UNIV_NOTHROW
+{
+ row_index_t* cfg_index;
+ dberr_t err = DB_SUCCESS;
+
+ cfg_index = get_index(index->name);
+
+ if (cfg_index == 0) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Index %s not found in tablespace meta-data file.",
+ index->name);
+
+ return(DB_ERROR);
+ }
+
+ cfg_index->m_srv_index = index;
+
+ const dict_field_t* field = index->fields;
+
+ for (ulint i = 0; i < index->n_fields; ++i, ++field) {
+
+ const dict_field_t* cfg_field;
+
+ cfg_field = find_field(cfg_index, field->name);
+
+ if (cfg_field == 0) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Index %s field %s not found in tablespace "
+ "meta-data file.",
+ index->name, field->name);
+
+ err = DB_ERROR;
+ } else {
+
+ if (cfg_field->prefix_len != field->prefix_len) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Index %s field %s prefix len %lu "
+ "doesn't match meta-data file value "
+ "%lu",
+ index->name, field->name,
+ (ulong) field->prefix_len,
+ (ulong) cfg_field->prefix_len);
+
+ err = DB_ERROR;
+ }
+
+ if (cfg_field->fixed_len != field->fixed_len) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Index %s field %s fixed len %lu "
+ "doesn't match meta-data file value "
+ "%lu",
+ index->name, field->name,
+ (ulong) field->fixed_len,
+ (ulong) cfg_field->fixed_len);
+
+ err = DB_ERROR;
+ }
+ }
+ }
+
+ return(err);
+}
+
+/**
+Check if the table schema that was read from the .cfg file matches the
+in memory table definition.
+@param thd - MySQL session variable
+@return DB_SUCCESS or error code. */
+dberr_t
+row_import::match_table_columns(
+ THD* thd) UNIV_NOTHROW
+{
+ dberr_t err = DB_SUCCESS;
+ const dict_col_t* col = m_table->cols;
+
+ for (ulint i = 0; i < m_table->n_cols; ++i, ++col) {
+
+ const char* col_name;
+ ulint cfg_col_index;
+
+ col_name = dict_table_get_col_name(
+ m_table, dict_col_get_no(col));
+
+ cfg_col_index = find_col(col_name);
+
+ if (cfg_col_index == ULINT_UNDEFINED) {
+
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s not found in tablespace.",
+ col_name);
+
+ err = DB_ERROR;
+ } else if (cfg_col_index != col->ind) {
+
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s ordinal value mismatch, it's at "
+ "%lu in the table and %lu in the tablespace "
+ "meta-data file",
+ col_name,
+ (ulong) col->ind, (ulong) cfg_col_index);
+
+ err = DB_ERROR;
+ } else {
+ const dict_col_t* cfg_col;
+
+ cfg_col = &m_cols[cfg_col_index];
+ ut_a(cfg_col->ind == cfg_col_index);
+
+ if (cfg_col->prtype != col->prtype) {
+ ib_errf(thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s precise type mismatch.",
+ col_name);
+ err = DB_ERROR;
+ }
+
+ if (cfg_col->mtype != col->mtype) {
+ ib_errf(thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s main type mismatch.",
+ col_name);
+ err = DB_ERROR;
+ }
+
+ if (cfg_col->len != col->len) {
+ ib_errf(thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s length mismatch.",
+ col_name);
+ err = DB_ERROR;
+ }
+
+ if (cfg_col->mbminmaxlen != col->mbminmaxlen) {
+ ib_errf(thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s multi-byte len mismatch.",
+ col_name);
+ err = DB_ERROR;
+ }
+
+ if (cfg_col->ind != col->ind) {
+ err = DB_ERROR;
+ }
+
+ if (cfg_col->ord_part != col->ord_part) {
+ ib_errf(thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s ordering mismatch.",
+ col_name);
+ err = DB_ERROR;
+ }
+
+ if (cfg_col->max_prefix != col->max_prefix) {
+ ib_errf(thd,
+ IB_LOG_LEVEL_ERROR,
+ ER_TABLE_SCHEMA_MISMATCH,
+ "Column %s max prefix mismatch.",
+ col_name);
+ err = DB_ERROR;
+ }
+ }
+ }
+
+ return(err);
+}
+
+/**
+Check if the table (and index) schema that was read from the .cfg file
+matches the in memory table definition.
+@param thd - MySQL session variable
+@return DB_SUCCESS or error code. */
+dberr_t
+row_import::match_schema(
+ THD* thd) UNIV_NOTHROW
+{
+ /* Do some simple checks. */
+
+ if (m_flags != m_table->flags) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH,
+ "Table flags don't match, server table has 0x%lx "
+ "and the meta-data file has 0x%lx",
+ (ulong) m_table->n_cols, (ulong) m_flags);
+
+ return(DB_ERROR);
+ } else if (m_table->n_cols != m_n_cols) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH,
+ "Number of columns don't match, table has %lu "
+ "columns but the tablespace meta-data file has "
+ "%lu columns",
+ (ulong) m_table->n_cols, (ulong) m_n_cols);
+
+ return(DB_ERROR);
+ } else if (UT_LIST_GET_LEN(m_table->indexes) != m_n_indexes) {
+
+ /* If the number of indexes don't match then it is better
+ to abort the IMPORT. It is easy for the user to create a
+ table matching the IMPORT definition. */
+
+ ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH,
+ "Number of indexes don't match, table has %lu "
+ "indexes but the tablespace meta-data file has "
+ "%lu indexes",
+ (ulong) UT_LIST_GET_LEN(m_table->indexes),
+ (ulong) m_n_indexes);
+
+ return(DB_ERROR);
+ }
+
+ dberr_t err = match_table_columns(thd);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ /* Check if the index definitions match. */
+
+ const dict_index_t* index;
+
+ for (index = UT_LIST_GET_FIRST(m_table->indexes);
+ index != 0;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ dberr_t index_err;
+
+ index_err = match_index_columns(thd, index);
+
+ if (index_err != DB_SUCCESS) {
+ err = index_err;
+ }
+ }
+
+ return(err);
+}
+
+/**
+Set the index root <space, pageno>, using index name. */
+void
+row_import::set_root_by_name() UNIV_NOTHROW
+{
+ row_index_t* cfg_index = m_indexes;
+
+ for (ulint i = 0; i < m_n_indexes; ++i, ++cfg_index) {
+ dict_index_t* index;
+
+ const char* index_name;
+
+ index_name = reinterpret_cast<const char*>(cfg_index->m_name);
+
+ index = dict_table_get_index_on_name(m_table, index_name);
+
+ /* We've already checked that it exists. */
+ ut_a(index != 0);
+
+ /* Set the root page number and space id. */
+ index->space = m_table->space;
+ index->page = cfg_index->m_page_no;
+ }
+}
+
+/**
+Set the index root <space, pageno>, using a heuristic.
+@return DB_SUCCESS or error code */
+dberr_t
+row_import::set_root_by_heuristic() UNIV_NOTHROW
+{
+ row_index_t* cfg_index = m_indexes;
+
+ ut_a(m_n_indexes > 0);
+
+ // TODO: For now use brute force, based on ordinality
+
+ if (UT_LIST_GET_LEN(m_table->indexes) != m_n_indexes) {
+
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name), m_table->name, FALSE);
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Table %s should have %lu indexes but the tablespace "
+ "has %lu indexes",
+ table_name,
+ UT_LIST_GET_LEN(m_table->indexes),
+ m_n_indexes);
+ }
+
+ dict_mutex_enter_for_mysql();
+
+ ulint i = 0;
+ dberr_t err = DB_SUCCESS;
+
+ for (dict_index_t* index = UT_LIST_GET_FIRST(m_table->indexes);
+ index != 0;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ if (index->type & DICT_FTS) {
+ index->type |= DICT_CORRUPT;
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Skipping FTS index: %s", index->name);
+ } else if (i < m_n_indexes) {
+
+ delete [] cfg_index[i].m_name;
+
+ ulint len = strlen(index->name) + 1;
+
+ cfg_index[i].m_name = new(std::nothrow) byte[len];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_14",
+ delete[] cfg_index[i].m_name;
+ cfg_index[i].m_name = 0;);
+
+ if (cfg_index[i].m_name == 0) {
+ err = DB_OUT_OF_MEMORY;
+ break;
+ }
+
+ memcpy(cfg_index[i].m_name, index->name, len);
+
+ cfg_index[i].m_srv_index = index;
+
+ index->space = m_table->space;
+ index->page = cfg_index[i].m_page_no;
+
+ ++i;
+ }
+ }
+
+ dict_mutex_exit_for_mysql();
+
+ return(err);
+}
+
+/**
+Purge delete marked records.
+@return DB_SUCCESS or error code. */
+dberr_t
+IndexPurge::garbage_collect() UNIV_NOTHROW
+{
+ dberr_t err;
+ ibool comp = dict_table_is_comp(m_index->table);
+
+ /* Open the persistent cursor and start the mini-transaction. */
+
+ open();
+
+ while ((err = next()) == DB_SUCCESS) {
+
+ rec_t* rec = btr_pcur_get_rec(&m_pcur);
+ ibool deleted = rec_get_deleted_flag(rec, comp);
+
+ if (!deleted) {
+ ++m_n_rows;
+ } else {
+ purge();
+ }
+ }
+
+ /* Close the persistent cursor and commit the mini-transaction. */
+
+ close();
+
+ return(err == DB_END_OF_INDEX ? DB_SUCCESS : err);
+}
+
+/**
+Begin import, position the cursor on the first record. */
+void
+IndexPurge::open() UNIV_NOTHROW
+{
+ mtr_start(&m_mtr);
+
+ mtr_set_log_mode(&m_mtr, MTR_LOG_NO_REDO);
+
+ btr_pcur_open_at_index_side(
+ true, m_index, BTR_MODIFY_LEAF, &m_pcur, true, 0, &m_mtr);
+}
+
+/**
+Close the persistent curosr and commit the mini-transaction. */
+void
+IndexPurge::close() UNIV_NOTHROW
+{
+ btr_pcur_close(&m_pcur);
+ mtr_commit(&m_mtr);
+}
+
+/**
+Position the cursor on the next record.
+@return DB_SUCCESS or error code */
+dberr_t
+IndexPurge::next() UNIV_NOTHROW
+{
+ btr_pcur_move_to_next_on_page(&m_pcur);
+
+ /* When switching pages, commit the mini-transaction
+ in order to release the latch on the old page. */
+
+ if (!btr_pcur_is_after_last_on_page(&m_pcur)) {
+ return(DB_SUCCESS);
+ } else if (trx_is_interrupted(m_trx)) {
+ /* Check after every page because the check
+ is expensive. */
+ return(DB_INTERRUPTED);
+ }
+
+ btr_pcur_store_position(&m_pcur, &m_mtr);
+
+ mtr_commit(&m_mtr);
+
+ mtr_start(&m_mtr);
+
+ mtr_set_log_mode(&m_mtr, MTR_LOG_NO_REDO);
+
+ btr_pcur_restore_position(BTR_MODIFY_LEAF, &m_pcur, &m_mtr);
+
+ if (!btr_pcur_move_to_next_user_rec(&m_pcur, &m_mtr)) {
+
+ return(DB_END_OF_INDEX);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/**
+Store the persistent cursor position and reopen the
+B-tree cursor in BTR_MODIFY_TREE mode, because the
+tree structure may be changed during a pessimistic delete. */
+void
+IndexPurge::purge_pessimistic_delete() UNIV_NOTHROW
+{
+ dberr_t err;
+
+ btr_pcur_restore_position(BTR_MODIFY_TREE, &m_pcur, &m_mtr);
+
+ ut_ad(rec_get_deleted_flag(
+ btr_pcur_get_rec(&m_pcur),
+ dict_table_is_comp(m_index->table)));
+
+ btr_cur_pessimistic_delete(
+ &err, FALSE, btr_pcur_get_btr_cur(&m_pcur), 0, RB_NONE, &m_mtr);
+
+ ut_a(err == DB_SUCCESS);
+
+ /* Reopen the B-tree cursor in BTR_MODIFY_LEAF mode */
+ mtr_commit(&m_mtr);
+}
+
+/**
+Purge delete-marked records. */
+void
+IndexPurge::purge() UNIV_NOTHROW
+{
+ btr_pcur_store_position(&m_pcur, &m_mtr);
+
+ purge_pessimistic_delete();
+
+ mtr_start(&m_mtr);
+
+ mtr_set_log_mode(&m_mtr, MTR_LOG_NO_REDO);
+
+ btr_pcur_restore_position(BTR_MODIFY_LEAF, &m_pcur, &m_mtr);
+}
+
+/**
+Constructor
+* @param cfg - config of table being imported.
+* @param trx - transaction covering the import */
+PageConverter::PageConverter(
+ row_import* cfg,
+ trx_t* trx)
+ :
+ AbstractCallback(trx),
+ m_cfg(cfg),
+ m_page_zip_ptr(0),
+ m_heap(0) UNIV_NOTHROW
+{
+ m_index = m_cfg->m_indexes;
+
+ m_current_lsn = log_get_lsn();
+ ut_a(m_current_lsn > 0);
+
+ m_offsets = m_offsets_;
+ rec_offs_init(m_offsets_);
+
+ m_cluster_index = dict_table_get_first_index(m_cfg->m_table);
+}
+
+/**
+Adjust the BLOB reference for a single column that is externally stored
+@param rec - record to update
+@param offsets - column offsets for the record
+@param i - column ordinal value
+@return DB_SUCCESS or error code */
+dberr_t
+PageConverter::adjust_cluster_index_blob_column(
+ rec_t* rec,
+ const ulint* offsets,
+ ulint i) UNIV_NOTHROW
+{
+ ulint len;
+ byte* field;
+
+ field = rec_get_nth_field(rec, offsets, i, &len);
+
+ DBUG_EXECUTE_IF("ib_import_trigger_corruption_2",
+ len = BTR_EXTERN_FIELD_REF_SIZE - 1;);
+
+ if (len < BTR_EXTERN_FIELD_REF_SIZE) {
+
+ char index_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ index_name, sizeof(index_name),
+ m_cluster_index->name, TRUE);
+
+ ib_errf(m_trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_INNODB_INDEX_CORRUPT,
+ "Externally stored column(%lu) has a reference "
+ "length of %lu in the cluster index %s",
+ (ulong) i, (ulong) len, index_name);
+
+ return(DB_CORRUPTION);
+ }
+
+ field += BTR_EXTERN_SPACE_ID - BTR_EXTERN_FIELD_REF_SIZE + len;
+
+ if (is_compressed_table()) {
+ mach_write_to_4(field, get_space_id());
+
+ page_zip_write_blob_ptr(
+ m_page_zip_ptr, rec, m_cluster_index, offsets, i, 0);
+ } else {
+ mlog_write_ulint(field, get_space_id(), MLOG_4BYTES, 0);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/**
+Adjusts the BLOB reference in the clustered index row for all externally
+stored columns.
+@param rec - record to update
+@param offsets - column offsets for the record
+@return DB_SUCCESS or error code */
+dberr_t
+PageConverter::adjust_cluster_index_blob_columns(
+ rec_t* rec,
+ const ulint* offsets) UNIV_NOTHROW
+{
+ ut_ad(rec_offs_any_extern(offsets));
+
+ /* Adjust the space_id in the BLOB pointers. */
+
+ for (ulint i = 0; i < rec_offs_n_fields(offsets); ++i) {
+
+ /* Only if the column is stored "externally". */
+
+ if (rec_offs_nth_extern(offsets, i)) {
+ dberr_t err;
+
+ err = adjust_cluster_index_blob_column(rec, offsets, i);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/**
+In the clustered index, adjust BLOB pointers as needed. Also update the
+BLOB reference, write the new space id.
+@param rec - record to update
+@param offsets - column offsets for the record
+@return DB_SUCCESS or error code */
+dberr_t
+PageConverter::adjust_cluster_index_blob_ref(
+ rec_t* rec,
+ const ulint* offsets) UNIV_NOTHROW
+{
+ if (rec_offs_any_extern(offsets)) {
+ dberr_t err;
+
+ err = adjust_cluster_index_blob_columns(rec, offsets);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/**
+Purge delete-marked records, only if it is possible to do so without
+re-organising the B+tree.
+@param offsets - current row offsets.
+@return true if purge succeeded */
+bool
+PageConverter::purge(const ulint* offsets) UNIV_NOTHROW
+{
+ const dict_index_t* index = m_index->m_srv_index;
+
+ /* We can't have a page that is empty and not root. */
+ if (m_rec_iter.remove(index, m_page_zip_ptr, m_offsets)) {
+
+ ++m_index->m_stats.m_n_purged;
+
+ return(true);
+ } else {
+ ++m_index->m_stats.m_n_purge_failed;
+ }
+
+ return(false);
+}
+
+/**
+Adjust the BLOB references and sys fields for the current record.
+@param rec - record to update
+@param offsets - column offsets for the record
+@param deleted - true if row is delete marked
+@return DB_SUCCESS or error code. */
+dberr_t
+PageConverter::adjust_cluster_record(
+ const dict_index_t* index,
+ rec_t* rec,
+ const ulint* offsets,
+ bool deleted) UNIV_NOTHROW
+{
+ dberr_t err;
+
+ if ((err = adjust_cluster_index_blob_ref(rec, offsets)) == DB_SUCCESS) {
+
+ /* Reset DB_TRX_ID and DB_ROLL_PTR. Normally, these fields
+ are only written in conjunction with other changes to the
+ record. */
+
+ row_upd_rec_sys_fields(
+ rec, m_page_zip_ptr, m_cluster_index, m_offsets,
+ m_trx, 0);
+ }
+
+ return(err);
+}
+
+/**
+Update the BLOB refrences and write UNDO log entries for
+rows that can't be purged optimistically.
+@param block - block to update
+@retval DB_SUCCESS or error code */
+dberr_t
+PageConverter::update_records(
+ buf_block_t* block) UNIV_NOTHROW
+{
+ ibool comp = dict_table_is_comp(m_cfg->m_table);
+ bool clust_index = m_index->m_srv_index == m_cluster_index;
+
+ /* This will also position the cursor on the first user record. */
+
+ m_rec_iter.open(block);
+
+ while (!m_rec_iter.end()) {
+
+ rec_t* rec = m_rec_iter.current();
+
+ /* FIXME: Move out of the loop */
+
+ if (rec_get_status(rec) == REC_STATUS_NODE_PTR) {
+ break;
+ }
+
+ ibool deleted = rec_get_deleted_flag(rec, comp);
+
+ /* For the clustered index we have to adjust the BLOB
+ reference and the system fields irrespective of the
+ delete marked flag. The adjustment of delete marked
+ cluster records is required for purge to work later. */
+
+ if (deleted || clust_index) {
+ m_offsets = rec_get_offsets(
+ rec, m_index->m_srv_index, m_offsets,
+ ULINT_UNDEFINED, &m_heap);
+ }
+
+ if (clust_index) {
+
+ dberr_t err = adjust_cluster_record(
+ m_index->m_srv_index, rec, m_offsets,
+ deleted);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+ }
+
+ /* If it is a delete marked record then try an
+ optimistic delete. */
+
+ if (deleted) {
+ /* A successful purge will move the cursor to the
+ next record. */
+
+ if (!purge(m_offsets)) {
+ m_rec_iter.next();
+ }
+
+ ++m_index->m_stats.m_n_deleted;
+ } else {
+ ++m_index->m_stats.m_n_rows;
+ m_rec_iter.next();
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/**
+Update the space, index id, trx id.
+@return DB_SUCCESS or error code */
+dberr_t
+PageConverter::update_index_page(
+ buf_block_t* block) UNIV_NOTHROW
+{
+ index_id_t id;
+ buf_frame_t* page = block->frame;
+
+ if (is_free(buf_block_get_page_no(block))) {
+ return(DB_SUCCESS);
+ } else if ((id = btr_page_get_index_id(page)) != m_index->m_id) {
+
+ row_index_t* index = find_index(id);
+
+ if (index == 0) {
+ m_index = 0;
+ return(DB_CORRUPTION);
+ }
+
+ /* Update current index */
+ m_index = index;
+ }
+
+ /* If the .cfg file is missing and there is an index mismatch
+ then ignore the error. */
+ if (m_cfg->m_missing && (m_index == 0 || m_index->m_srv_index == 0)) {
+ return(DB_SUCCESS);
+ }
+
+#ifdef UNIV_ZIP_DEBUG
+ ut_a(!is_compressed_table()
+ || page_zip_validate(m_page_zip_ptr, page, m_index->m_srv_index));
+#endif /* UNIV_ZIP_DEBUG */
+
+ /* This has to be written to uncompressed index header. Set it to
+ the current index id. */
+ btr_page_set_index_id(
+ page, m_page_zip_ptr, m_index->m_srv_index->id, 0);
+
+ page_set_max_trx_id(block, m_page_zip_ptr, m_trx->id, 0);
+
+ if (page_get_n_recs(block->frame) == 0) {
+
+ /* Only a root page can be empty. */
+ if (!is_root_page(block->frame)) {
+ // TODO: We should relax this and skip secondary
+ // indexes. Mark them as corrupt because they can
+ // always be rebuilt.
+ return(DB_CORRUPTION);
+ }
+
+ return(DB_SUCCESS);
+ }
+
+ return(update_records(block));
+}
+
+/**
+Validate the space flags and update tablespace header page.
+@param block - block read from file, not from the buffer pool.
+@retval DB_SUCCESS or error code */
+dberr_t
+PageConverter::update_header(
+ buf_block_t* block) UNIV_NOTHROW
+{
+ /* Check for valid header */
+ switch(fsp_header_get_space_id(get_frame(block))) {
+ case 0:
+ return(DB_CORRUPTION);
+ case ULINT_UNDEFINED:
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Space id check in the header failed "
+ "- ignored");
+ }
+
+ ulint space_flags = fsp_header_get_flags(get_frame(block));
+
+ if (!fsp_flags_is_valid(space_flags)) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unsupported tablespace format %lu",
+ (ulong) space_flags);
+
+ return(DB_UNSUPPORTED);
+ }
+
+ mach_write_to_8(
+ get_frame(block) + FIL_PAGE_FILE_FLUSH_LSN, m_current_lsn);
+
+ /* Write space_id to the tablespace header, page 0. */
+ mach_write_to_4(
+ get_frame(block) + FSP_HEADER_OFFSET + FSP_SPACE_ID,
+ get_space_id());
+
+ /* This is on every page in the tablespace. */
+ mach_write_to_4(
+ get_frame(block) + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID,
+ get_space_id());
+
+ return(DB_SUCCESS);
+}
+
+/**
+Update the page, set the space id, max trx id and index id.
+@param block - block read from file
+@retval DB_SUCCESS or error code */
+dberr_t
+PageConverter::update_page(
+ buf_block_t* block,
+ ulint& page_type) UNIV_NOTHROW
+{
+ dberr_t err = DB_SUCCESS;
+
+ switch (page_type = fil_page_get_type(get_frame(block))) {
+ case FIL_PAGE_TYPE_FSP_HDR:
+ /* Work directly on the uncompressed page headers. */
+ ut_a(buf_block_get_page_no(block) == 0);
+ return(update_header(block));
+
+ case FIL_PAGE_INDEX:
+ /* We need to decompress the contents into block->frame
+ before we can do any thing with Btree pages. */
+
+ if (is_compressed_table() && !buf_zip_decompress(block, TRUE)) {
+ return(DB_CORRUPTION);
+ }
+
+ /* This is on every page in the tablespace. */
+ mach_write_to_4(
+ get_frame(block)
+ + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, get_space_id());
+
+ /* Only update the Btree nodes. */
+ return(update_index_page(block));
+
+ case FIL_PAGE_TYPE_SYS:
+ /* This is page 0 in the system tablespace. */
+ return(DB_CORRUPTION);
+
+ case FIL_PAGE_TYPE_XDES:
+ err = set_current_xdes(
+ buf_block_get_page_no(block), get_frame(block));
+ case FIL_PAGE_INODE:
+ case FIL_PAGE_TYPE_TRX_SYS:
+ case FIL_PAGE_IBUF_FREE_LIST:
+ case FIL_PAGE_TYPE_ALLOCATED:
+ case FIL_PAGE_IBUF_BITMAP:
+ case FIL_PAGE_TYPE_BLOB:
+ case FIL_PAGE_TYPE_ZBLOB:
+ case FIL_PAGE_TYPE_ZBLOB2:
+
+ /* Work directly on the uncompressed page headers. */
+ /* This is on every page in the tablespace. */
+ mach_write_to_4(
+ get_frame(block)
+ + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, get_space_id());
+
+ return(err);
+ }
+
+ ib_logf(IB_LOG_LEVEL_WARN, "Unknown page type (%lu)", page_type);
+
+ return(DB_CORRUPTION);
+}
+
+/**
+Validate the page
+@param offset - physical offset within file.
+@param page - page read from file.
+@return status */
+PageConverter::import_page_status_t
+PageConverter::validate(
+ os_offset_t offset,
+ buf_block_t* block) UNIV_NOTHROW
+{
+ buf_frame_t* page = get_frame(block);
+
+ /* Check that the page number corresponds to the offset in
+ the file. Flag as corrupt if it doesn't. Disable the check
+ for LSN in buf_page_is_corrupted() */
+
+ if (buf_page_is_corrupted(false, page, get_zip_size())
+ || (page_get_page_no(page) != offset / m_page_size
+ && page_get_page_no(page) != 0)) {
+
+ return(IMPORT_PAGE_STATUS_CORRUPTED);
+
+ } else if (offset > 0 && page_get_page_no(page) == 0) {
+ const byte* b = page;
+ const byte* e = b + m_page_size;
+
+ /* If the page number is zero and offset > 0 then
+ the entire page MUST consist of zeroes. If not then
+ we flag it as corrupt. */
+
+ while (b != e) {
+
+ if (*b++ && !trigger_corruption()) {
+ return(IMPORT_PAGE_STATUS_CORRUPTED);
+ }
+ }
+
+ /* The page is all zero: do nothing. */
+ return(IMPORT_PAGE_STATUS_ALL_ZERO);
+ }
+
+ return(IMPORT_PAGE_STATUS_OK);
+}
+
+/**
+Called for every page in the tablespace. If the page was not
+updated then its state must be set to BUF_PAGE_NOT_USED.
+@param offset - physical offset within the file
+@param block - block read from file, note it is not from the buffer pool
+@retval DB_SUCCESS or error code. */
+dberr_t
+PageConverter::operator() (
+ os_offset_t offset,
+ buf_block_t* block) UNIV_NOTHROW
+{
+ ulint page_type;
+ dberr_t err = DB_SUCCESS;
+
+ if ((err = periodic_check()) != DB_SUCCESS) {
+ return(err);
+ }
+
+ if (is_compressed_table()) {
+ m_page_zip_ptr = &block->page.zip;
+ } else {
+ ut_ad(m_page_zip_ptr == 0);
+ }
+
+ switch(validate(offset, block)) {
+ case IMPORT_PAGE_STATUS_OK:
+
+ /* We have to decompress the compressed pages before
+ we can work on them */
+
+ if ((err = update_page(block, page_type)) != DB_SUCCESS) {
+ return(err);
+ }
+
+ /* Note: For compressed pages this function will write to the
+ zip descriptor and for uncompressed pages it will write to
+ page (ie. the block->frame). Therefore the caller should write
+ out the descriptor contents and not block->frame for compressed
+ pages. */
+
+ if (!is_compressed_table() || page_type == FIL_PAGE_INDEX) {
+
+ buf_flush_init_for_writing(
+ !is_compressed_table()
+ ? block->frame : block->page.zip.data,
+ !is_compressed_table() ? 0 : m_page_zip_ptr,
+ m_current_lsn);
+ } else {
+ /* Calculate and update the checksum of non-btree
+ pages for compressed tables explicitly here. */
+
+ buf_flush_update_zip_checksum(
+ get_frame(block), get_zip_size(),
+ m_current_lsn);
+ }
+
+ break;
+
+ case IMPORT_PAGE_STATUS_ALL_ZERO:
+ /* The page is all zero: leave it as is. */
+ break;
+
+ case IMPORT_PAGE_STATUS_CORRUPTED:
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "%s: Page %lu at offset " UINT64PF " looks corrupted.",
+ m_filepath, (ulong) (offset / m_page_size), offset);
+
+ return(DB_CORRUPTION);
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Clean up after import tablespace failure, this function will acquire
+the dictionary latches on behalf of the transaction if the transaction
+hasn't already acquired them. */
+static __attribute__((nonnull))
+void
+row_import_discard_changes(
+/*=======================*/
+ row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from handler */
+ trx_t* trx, /*!< in/out: transaction for import */
+ dberr_t err) /*!< in: error code */
+{
+ dict_table_t* table = prebuilt->table;
+
+ ut_a(err != DB_SUCCESS);
+
+ prebuilt->trx->error_info = NULL;
+
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name),
+ prebuilt->table->name, FALSE);
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Discarding tablespace of table %s: %s",
+ table_name, ut_strerr(err));
+
+ if (trx->dict_operation_lock_mode != RW_X_LATCH) {
+ ut_a(trx->dict_operation_lock_mode == 0);
+ row_mysql_lock_data_dictionary(trx);
+ }
+
+ ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
+
+ /* Since we update the index root page numbers on disk after
+ we've done a successful import. The table will not be loadable.
+ However, we need to ensure that the in memory root page numbers
+ are reset to "NULL". */
+
+ for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
+ index != 0;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ index->page = FIL_NULL;
+ index->space = FIL_NULL;
+ }
+
+ table->ibd_file_missing = TRUE;
+
+ fil_close_tablespace(trx, table->space);
+}
+
+/*****************************************************************//**
+Clean up after import tablespace. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_cleanup(
+/*===============*/
+ row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from handler */
+ trx_t* trx, /*!< in/out: transaction for import */
+ dberr_t err) /*!< in: error code */
+{
+ ut_a(prebuilt->trx != trx);
+
+ if (err != DB_SUCCESS) {
+ row_import_discard_changes(prebuilt, trx, err);
+ }
+
+ ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
+
+ DBUG_EXECUTE_IF("ib_import_before_commit_crash", DBUG_SUICIDE(););
+
+ trx_commit_for_mysql(trx);
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ trx_free_for_mysql(trx);
+
+ prebuilt->trx->op_info = "";
+
+ DBUG_EXECUTE_IF("ib_import_before_checkpoint_crash", DBUG_SUICIDE(););
+
+ log_make_checkpoint_at(IB_ULONGLONG_MAX, TRUE);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Report error during tablespace import. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_error(
+/*=============*/
+ row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from handler */
+ trx_t* trx, /*!< in/out: transaction for import */
+ dberr_t err) /*!< in: error code */
+{
+ if (!trx_is_interrupted(trx)) {
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name),
+ prebuilt->table->name, FALSE);
+
+ ib_senderrf(
+ trx->mysql_thd, IB_LOG_LEVEL_WARN,
+ ER_INNODB_IMPORT_ERROR,
+ table_name, (ulong) err, ut_strerr(err));
+ }
+
+ return(row_import_cleanup(prebuilt, trx, err));
+}
+
+/*****************************************************************//**
+Adjust the root page index node and leaf node segment headers, update
+with the new space id. For all the table's secondary indexes.
+@return error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_adjust_root_pages_of_secondary_indexes(
+/*==============================================*/
+ row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from
+ handler */
+ trx_t* trx, /*!< in: transaction used for
+ the import */
+ dict_table_t* table, /*!< in: table the indexes
+ belong to */
+ const row_import& cfg) /*!< Import context */
+{
+ dict_index_t* index;
+ ulint n_rows_in_table;
+ dberr_t err = DB_SUCCESS;
+
+ /* Skip the clustered index. */
+ index = dict_table_get_first_index(table);
+
+ n_rows_in_table = cfg.get_n_rows(index->name);
+
+ DBUG_EXECUTE_IF("ib_import_sec_rec_count_mismatch_failure",
+ n_rows_in_table++;);
+
+ /* Adjust the root pages of the secondary indexes only. */
+ while ((index = dict_table_get_next_index(index)) != NULL) {
+ char index_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ index_name, sizeof(index_name), index->name, TRUE);
+
+ ut_a(!dict_index_is_clust(index));
+
+ if (!(index->type & DICT_CORRUPT)
+ && index->space != FIL_NULL
+ && index->page != FIL_NULL) {
+
+ /* Update the Btree segment headers for index node and
+ leaf nodes in the root page. Set the new space id. */
+
+ err = btr_root_adjust_on_import(index);
+ } else {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Skip adjustment of root pages for "
+ "index %s.", index->name);
+
+ err = DB_CORRUPTION;
+ }
+
+ if (err != DB_SUCCESS) {
+
+ if (index->type & DICT_CLUSTERED) {
+ break;
+ }
+
+ ib_errf(trx->mysql_thd,
+ IB_LOG_LEVEL_WARN,
+ ER_INNODB_INDEX_CORRUPT,
+ "Index '%s' not found or corrupt, "
+ "you should recreate this index.",
+ index_name);
+
+ /* Do not bail out, so that the data
+ can be recovered. */
+
+ err = DB_SUCCESS;
+ index->type |= DICT_CORRUPT;
+ continue;
+ }
+
+ /* If we failed to purge any records in the index then
+ do it the hard way.
+
+ TODO: We can do this in the first pass by generating UNDO log
+ records for the failed rows. */
+
+ if (!cfg.requires_purge(index->name)) {
+ continue;
+ }
+
+ IndexPurge purge(trx, index);
+
+ trx->op_info = "secondary: purge delete marked records";
+
+ err = purge.garbage_collect();
+
+ trx->op_info = "";
+
+ if (err != DB_SUCCESS) {
+ break;
+ } else if (purge.get_n_rows() != n_rows_in_table) {
+
+ ib_errf(trx->mysql_thd,
+ IB_LOG_LEVEL_WARN,
+ ER_INNODB_INDEX_CORRUPT,
+ "Index '%s' contains %lu entries, "
+ "should be %lu, you should recreate "
+ "this index.", index_name,
+ (ulong) purge.get_n_rows(),
+ (ulong) n_rows_in_table);
+
+ index->type |= DICT_CORRUPT;
+
+ /* Do not bail out, so that the data
+ can be recovered. */
+
+ err = DB_SUCCESS;
+ }
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Ensure that dict_sys->row_id exceeds SELECT MAX(DB_ROW_ID).
+@return error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_set_sys_max_row_id(
+/*==========================*/
+ row_prebuilt_t* prebuilt, /*!< in/out: prebuilt from
+ handler */
+ const dict_table_t* table) /*!< in: table to import */
+{
+ dberr_t err;
+ const rec_t* rec;
+ mtr_t mtr;
+ btr_pcur_t pcur;
+ row_id_t row_id = 0;
+ dict_index_t* index;
+
+ index = dict_table_get_first_index(table);
+ ut_a(dict_index_is_clust(index));
+
+ mtr_start(&mtr);
+
+ mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
+
+ btr_pcur_open_at_index_side(
+ false, // High end
+ index,
+ BTR_SEARCH_LEAF,
+ &pcur,
+ true, // Init cursor
+ 0, // Leaf level
+ &mtr);
+
+ btr_pcur_move_to_prev_on_page(&pcur);
+ rec = btr_pcur_get_rec(&pcur);
+
+ /* Check for empty table. */
+ if (!page_rec_is_infimum(rec)) {
+ ulint len;
+ const byte* field;
+ mem_heap_t* heap = NULL;
+ ulint offsets_[1 + REC_OFFS_HEADER_SIZE];
+ ulint* offsets;
+
+ rec_offs_init(offsets_);
+
+ offsets = rec_get_offsets(
+ rec, index, offsets_, ULINT_UNDEFINED, &heap);
+
+ field = rec_get_nth_field(
+ rec, offsets,
+ dict_index_get_sys_col_pos(index, DATA_ROW_ID),
+ &len);
+
+ if (len == DATA_ROW_ID_LEN) {
+ row_id = mach_read_from_6(field);
+ err = DB_SUCCESS;
+ } else {
+ err = DB_CORRUPTION;
+ }
+
+ if (heap != NULL) {
+ mem_heap_free(heap);
+ }
+ } else {
+ /* The table is empty. */
+ err = DB_SUCCESS;
+ }
+
+ btr_pcur_close(&pcur);
+ mtr_commit(&mtr);
+
+ DBUG_EXECUTE_IF("ib_import_set_max_rowid_failure",
+ err = DB_CORRUPTION;);
+
+ if (err != DB_SUCCESS) {
+ char index_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ index_name, sizeof(index_name), index->name, TRUE);
+
+ ib_errf(prebuilt->trx->mysql_thd,
+ IB_LOG_LEVEL_WARN,
+ ER_INNODB_INDEX_CORRUPT,
+ "Index '%s' corruption detected, invalid DB_ROW_ID "
+ "in index.", index_name);
+
+ return(err);
+
+ } else if (row_id > 0) {
+
+ /* Update the system row id if the imported index row id is
+ greater than the max system row id. */
+
+ mutex_enter(&dict_sys->mutex);
+
+ if (row_id >= dict_sys->row_id) {
+ dict_sys->row_id = row_id + 1;
+ dict_hdr_flush_row_id();
+ }
+
+ mutex_exit(&dict_sys->mutex);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Read the a string from the meta data file.
+@return DB_SUCCESS or error code. */
+static
+dberr_t
+row_import_cfg_read_string(
+/*=======================*/
+ FILE* file, /*!< in/out: File to read from */
+ byte* ptr, /*!< out: string to read */
+ ulint max_len) /*!< in: maximum length of the output
+ buffer in bytes */
+{
+ DBUG_EXECUTE_IF("ib_import_string_read_error",
+ errno = EINVAL; return(DB_IO_ERROR););
+
+ ulint len = 0;
+
+ while (!feof(file)) {
+ int ch = fgetc(file);
+
+ if (ch == EOF) {
+ break;
+ } else if (ch != 0) {
+ if (len < max_len) {
+ ptr[len++] = ch;
+ } else {
+ break;
+ }
+ /* max_len includes the NUL byte */
+ } else if (len != max_len - 1) {
+ break;
+ } else {
+ ptr[len] = 0;
+ return(DB_SUCCESS);
+ }
+ }
+
+ errno = EINVAL;
+
+ return(DB_IO_ERROR);
+}
+
+/*********************************************************************//**
+Write the meta data (index user fields) config file.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_cfg_read_index_fields(
+/*=============================*/
+ FILE* file, /*!< in: file to write to */
+ THD* thd, /*!< in/out: session */
+ row_index_t* index, /*!< Index being read in */
+ row_import* cfg) /*!< in/out: meta-data read */
+{
+ byte row[sizeof(ib_uint32_t) * 3];
+ ulint n_fields = index->m_n_fields;
+
+ index->m_fields = new(std::nothrow) dict_field_t[n_fields];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_4",
+ delete [] index->m_fields; index->m_fields = 0;);
+
+ if (index->m_fields == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ dict_field_t* field = index->m_fields;
+
+ memset(field, 0x0, sizeof(*field) * n_fields);
+
+ for (ulint i = 0; i < n_fields; ++i, ++field) {
+ byte* ptr = row;
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_1",
+ (void) fseek(file, 0L, SEEK_END););
+
+ if (fread(row, 1, sizeof(row), file) != sizeof(row)) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading index fields.");
+
+ return(DB_IO_ERROR);
+ }
+
+ field->prefix_len = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ field->fixed_len = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ /* Include the NUL byte in the length. */
+ ulint len = mach_read_from_4(ptr);
+
+ byte* name = new(std::nothrow) byte[len];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_5", delete [] name; name = 0;);
+
+ if (name == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ field->name = reinterpret_cast<const char*>(name);
+
+ dberr_t err = row_import_cfg_read_string(file, name, len);
+
+ if (err != DB_SUCCESS) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while parsing table name.");
+
+ return(err);
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Read the index names and root page numbers of the indexes and set the values.
+Row format [root_page_no, len of str, str ... ]
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_read_index_data(
+/*=======================*/
+ FILE* file, /*!< in: File to read from */
+ THD* thd, /*!< in: session */
+ row_import* cfg) /*!< in/out: meta-data read */
+{
+ byte* ptr;
+ row_index_t* cfg_index;
+ byte row[sizeof(index_id_t) + sizeof(ib_uint32_t) * 9];
+
+ /* FIXME: What is the max value? */
+ ut_a(cfg->m_n_indexes > 0);
+ ut_a(cfg->m_n_indexes < 1024);
+
+ cfg->m_indexes = new(std::nothrow) row_index_t[cfg->m_n_indexes];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_6",
+ delete [] cfg->m_indexes; cfg->m_indexes = 0;);
+
+ if (cfg->m_indexes == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ memset(cfg->m_indexes, 0x0, sizeof(*cfg->m_indexes) * cfg->m_n_indexes);
+
+ cfg_index = cfg->m_indexes;
+
+ for (ulint i = 0; i < cfg->m_n_indexes; ++i, ++cfg_index) {
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_2",
+ (void) fseek(file, 0L, SEEK_END););
+
+ /* Read the index data. */
+ size_t n_bytes = fread(row, 1, sizeof(row), file);
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error",
+ (void) fseek(file, 0L, SEEK_END););
+
+ if (n_bytes != sizeof(row)) {
+ char msg[BUFSIZ];
+
+ ut_snprintf(msg, sizeof(msg),
+ "while reading index meta-data, expected "
+ "to read %lu bytes but read only %lu "
+ "bytes",
+ (ulong) sizeof(row), (ulong) n_bytes);
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno), msg);
+
+ ib_logf(IB_LOG_LEVEL_ERROR, "IO Error: %s", msg);
+
+ return(DB_IO_ERROR);
+ }
+
+ ptr = row;
+
+ cfg_index->m_id = mach_read_from_8(ptr);
+ ptr += sizeof(index_id_t);
+
+ cfg_index->m_space = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ cfg_index->m_page_no = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ cfg_index->m_type = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ cfg_index->m_trx_id_offset = mach_read_from_4(ptr);
+ if (cfg_index->m_trx_id_offset != mach_read_from_4(ptr)) {
+ ut_ad(0);
+ /* Overflow. Pretend that the clustered index
+ has a variable-length PRIMARY KEY. */
+ cfg_index->m_trx_id_offset = 0;
+ }
+ ptr += sizeof(ib_uint32_t);
+
+ cfg_index->m_n_user_defined_cols = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ cfg_index->m_n_uniq = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ cfg_index->m_n_nullable = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ cfg_index->m_n_fields = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ /* The NUL byte is included in the name length. */
+ ulint len = mach_read_from_4(ptr);
+
+ if (len > OS_FILE_MAX_PATH) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_INNODB_INDEX_CORRUPT,
+ "Index name length (%lu) is too long, "
+ "the meta-data is corrupt", len);
+
+ return(DB_CORRUPTION);
+ }
+
+ cfg_index->m_name = new(std::nothrow) byte[len];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_7",
+ delete [] cfg_index->m_name;
+ cfg_index->m_name = 0;);
+
+ if (cfg_index->m_name == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ dberr_t err;
+
+ err = row_import_cfg_read_string(file, cfg_index->m_name, len);
+
+ if (err != DB_SUCCESS) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while parsing index name.");
+
+ return(err);
+ }
+
+ err = row_import_cfg_read_index_fields(
+ file, thd, cfg_index, cfg);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Set the index root page number for v1 format.
+@return DB_SUCCESS or error code. */
+static
+dberr_t
+row_import_read_indexes(
+/*====================*/
+ FILE* file, /*!< in: File to read from */
+ THD* thd, /*!< in: session */
+ row_import* cfg) /*!< in/out: meta-data read */
+{
+ byte row[sizeof(ib_uint32_t)];
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_3",
+ (void) fseek(file, 0L, SEEK_END););
+
+ /* Read the number of indexes. */
+ if (fread(row, 1, sizeof(row), file) != sizeof(row)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading number of indexes.");
+
+ return(DB_IO_ERROR);
+ }
+
+ cfg->m_n_indexes = mach_read_from_4(row);
+
+ if (cfg->m_n_indexes == 0) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ "Number of indexes in meta-data file is 0");
+
+ return(DB_CORRUPTION);
+
+ } else if (cfg->m_n_indexes > 1024) {
+ // FIXME: What is the upper limit? */
+ ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ "Number of indexes in meta-data file is too high: %lu",
+ (ulong) cfg->m_n_indexes);
+ cfg->m_n_indexes = 0;
+
+ return(DB_CORRUPTION);
+ }
+
+ return(row_import_read_index_data(file, thd, cfg));
+}
+
+/*********************************************************************//**
+Read the meta data (table columns) config file. Deserialise the contents of
+dict_col_t structure, along with the column name. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_read_columns(
+/*====================*/
+ FILE* file, /*!< in: file to write to */
+ THD* thd, /*!< in/out: session */
+ row_import* cfg) /*!< in/out: meta-data read */
+{
+ dict_col_t* col;
+ byte row[sizeof(ib_uint32_t) * 8];
+
+ /* FIXME: What should the upper limit be? */
+ ut_a(cfg->m_n_cols > 0);
+ ut_a(cfg->m_n_cols < 1024);
+
+ cfg->m_cols = new(std::nothrow) dict_col_t[cfg->m_n_cols];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_8",
+ delete [] cfg->m_cols; cfg->m_cols = 0;);
+
+ if (cfg->m_cols == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ cfg->m_col_names = new(std::nothrow) byte* [cfg->m_n_cols];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_9",
+ delete [] cfg->m_col_names; cfg->m_col_names = 0;);
+
+ if (cfg->m_col_names == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ memset(cfg->m_cols, 0x0, sizeof(cfg->m_cols) * cfg->m_n_cols);
+ memset(cfg->m_col_names, 0x0, sizeof(cfg->m_col_names) * cfg->m_n_cols);
+
+ col = cfg->m_cols;
+
+ for (ulint i = 0; i < cfg->m_n_cols; ++i, ++col) {
+ byte* ptr = row;
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_4",
+ (void) fseek(file, 0L, SEEK_END););
+
+ if (fread(row, 1, sizeof(row), file) != sizeof(row)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading table column meta-data.");
+
+ return(DB_IO_ERROR);
+ }
+
+ col->prtype = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ col->mtype = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ col->len = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ col->mbminmaxlen = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ col->ind = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ col->ord_part = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ col->max_prefix = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ /* Read in the column name as [len, byte array]. The len
+ includes the NUL byte. */
+
+ ulint len = mach_read_from_4(ptr);
+
+ /* FIXME: What is the maximum column name length? */
+ if (len == 0 || len > 128) {
+ ib_errf(thd, IB_LOG_LEVEL_ERROR,
+ ER_IO_READ_ERROR,
+ "Column name length %lu, is invalid",
+ (ulong) len);
+
+ return(DB_CORRUPTION);
+ }
+
+ cfg->m_col_names[i] = new(std::nothrow) byte[len];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_10",
+ delete [] cfg->m_col_names[i];
+ cfg->m_col_names[i] = 0;);
+
+ if (cfg->m_col_names[i] == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ dberr_t err;
+
+ err = row_import_cfg_read_string(
+ file, cfg->m_col_names[i], len);
+
+ if (err != DB_SUCCESS) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while parsing table column name.");
+
+ return(err);
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*****************************************************************//**
+Read the contents of the <tablespace>.cfg file.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_read_v1(
+/*===============*/
+ FILE* file, /*!< in: File to read from */
+ THD* thd, /*!< in: session */
+ row_import* cfg) /*!< out: meta data */
+{
+ byte value[sizeof(ib_uint32_t)];
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_5",
+ (void) fseek(file, 0L, SEEK_END););
+
+ /* Read the hostname where the tablespace was exported. */
+ if (fread(value, 1, sizeof(value), file) != sizeof(value)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading meta-data export hostname length.");
+
+ return(DB_IO_ERROR);
+ }
+
+ ulint len = mach_read_from_4(value);
+
+ /* NUL byte is part of name length. */
+ cfg->m_hostname = new(std::nothrow) byte[len];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_1",
+ delete [] cfg->m_hostname; cfg->m_hostname = 0;);
+
+ if (cfg->m_hostname == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ dberr_t err = row_import_cfg_read_string(file, cfg->m_hostname, len);
+
+ if (err != DB_SUCCESS) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while parsing export hostname.");
+
+ return(err);
+ }
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_6",
+ (void) fseek(file, 0L, SEEK_END););
+
+ /* Read the table name of tablespace that was exported. */
+ if (fread(value, 1, sizeof(value), file) != sizeof(value)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading meta-data table name length.");
+
+ return(DB_IO_ERROR);
+ }
+
+ len = mach_read_from_4(value);
+
+ /* NUL byte is part of name length. */
+ cfg->m_table_name = new(std::nothrow) byte[len];
+
+ /* Trigger OOM */
+ DBUG_EXECUTE_IF("ib_import_OOM_2",
+ delete [] cfg->m_table_name; cfg->m_table_name = 0;);
+
+ if (cfg->m_table_name == 0) {
+ return(DB_OUT_OF_MEMORY);
+ }
+
+ err = row_import_cfg_read_string(file, cfg->m_table_name, len);
+
+ if (err != DB_SUCCESS) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while parsing table name.");
+
+ return(err);
+ }
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Importing tablespace for table '%s' that was exported "
+ "from host '%s'", cfg->m_table_name, cfg->m_hostname);
+
+ byte row[sizeof(ib_uint32_t) * 3];
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_7",
+ (void) fseek(file, 0L, SEEK_END););
+
+ /* Read the autoinc value. */
+ if (fread(row, 1, sizeof(ib_uint64_t), file) != sizeof(ib_uint64_t)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading autoinc value.");
+
+ return(DB_IO_ERROR);
+ }
+
+ cfg->m_autoinc = mach_read_from_8(row);
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_8",
+ (void) fseek(file, 0L, SEEK_END););
+
+ /* Read the tablespace page size. */
+ if (fread(row, 1, sizeof(row), file) != sizeof(row)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading meta-data header.");
+
+ return(DB_IO_ERROR);
+ }
+
+ byte* ptr = row;
+
+ cfg->m_page_size = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ if (cfg->m_page_size != UNIV_PAGE_SIZE) {
+
+ ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH,
+ "Tablespace to be imported has a different "
+ "page size than this server. Server page size "
+ "is %lu, whereas tablespace page size is %lu",
+ UNIV_PAGE_SIZE, (ulong) cfg->m_page_size);
+
+ return(DB_ERROR);
+ }
+
+ cfg->m_flags = mach_read_from_4(ptr);
+ ptr += sizeof(ib_uint32_t);
+
+ cfg->m_n_cols = mach_read_from_4(ptr);
+
+ if (!dict_tf_is_valid(cfg->m_flags)) {
+
+ return(DB_CORRUPTION);
+
+ } else if ((err = row_import_read_columns(file, thd, cfg))
+ != DB_SUCCESS) {
+
+ return(err);
+
+ } else if ((err = row_import_read_indexes(file, thd, cfg))
+ != DB_SUCCESS) {
+
+ return(err);
+ }
+
+ ut_a(err == DB_SUCCESS);
+ return(err);
+}
+
+/**
+Read the contents of the <tablespace>.cfg file.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_read_meta_data(
+/*======================*/
+ dict_table_t* table, /*!< in: table */
+ FILE* file, /*!< in: File to read from */
+ THD* thd, /*!< in: session */
+ row_import& cfg) /*!< out: contents of the .cfg file */
+{
+ byte row[sizeof(ib_uint32_t)];
+
+ /* Trigger EOF */
+ DBUG_EXECUTE_IF("ib_import_io_read_error_9",
+ (void) fseek(file, 0L, SEEK_END););
+
+ if (fread(&row, 1, sizeof(row), file) != sizeof(row)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ errno, strerror(errno),
+ "while reading meta-data version.");
+
+ return(DB_IO_ERROR);
+ }
+
+ cfg.m_version = mach_read_from_4(row);
+
+ /* Check the version number. */
+ switch (cfg.m_version) {
+ case IB_EXPORT_CFG_VERSION_V1:
+
+ return(row_import_read_v1(file, thd, &cfg));
+ default:
+ ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_IO_READ_ERROR,
+ "Unsupported meta-data version number (%lu), "
+ "file ignored", (ulong) cfg.m_version);
+ }
+
+ return(DB_ERROR);
+}
+
+/**
+Read the contents of the <tablename>.cfg file.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_import_read_cfg(
+/*================*/
+ dict_table_t* table, /*!< in: table */
+ THD* thd, /*!< in: session */
+ row_import& cfg) /*!< out: contents of the .cfg file */
+{
+ dberr_t err;
+ char name[OS_FILE_MAX_PATH];
+
+ cfg.m_table = table;
+
+ srv_get_meta_data_filename(table, name, sizeof(name));
+
+ FILE* file = fopen(name, "rb");
+
+ if (file == NULL) {
+ char msg[BUFSIZ];
+
+ ut_snprintf(msg, sizeof(msg),
+ "Error opening '%s', will attempt to import "
+ "without schema verification", name);
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_READ_ERROR,
+ errno, strerror(errno), msg);
+
+ cfg.m_missing = true;
+
+ err = DB_FAIL;
+ } else {
+
+ cfg.m_missing = false;
+
+ err = row_import_read_meta_data(table, file, thd, cfg);
+ fclose(file);
+ }
+
+ return(err);
+}
+
+/*****************************************************************//**
+Update the <space, root page> of a table's indexes from the values
+in the data dictionary.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+row_import_update_index_root(
+/*=========================*/
+ trx_t* trx, /*!< in/out: transaction that
+ covers the update */
+ const dict_table_t* table, /*!< in: Table for which we want
+ to set the root page_no */
+ bool reset, /*!< in: if true then set to
+ FIL_NUL */
+ bool dict_locked) /*!< in: Set to true if the
+ caller already owns the
+ dict_sys_t:: mutex. */
+
+{
+ const dict_index_t* index;
+ que_t* graph = 0;
+ dberr_t err = DB_SUCCESS;
+
+ static const char sql[] = {
+ "PROCEDURE UPDATE_INDEX_ROOT() IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_INDEXES\n"
+ "SET SPACE = :space,\n"
+ " PAGE_NO = :page,\n"
+ " TYPE = :type\n"
+ "WHERE TABLE_ID = :table_id AND ID = :index_id;\n"
+ "END;\n"};
+
+ if (!dict_locked) {
+ mutex_enter(&dict_sys->mutex);
+ }
+
+ for (index = dict_table_get_first_index(table);
+ index != 0;
+ index = dict_table_get_next_index(index)) {
+
+ pars_info_t* info;
+ ib_uint32_t page;
+ ib_uint32_t space;
+ ib_uint32_t type;
+ index_id_t index_id;
+ table_id_t table_id;
+
+ info = (graph != 0) ? graph->info : pars_info_create();
+
+ mach_write_to_4(
+ reinterpret_cast<byte*>(&type),
+ index->type);
+
+ mach_write_to_4(
+ reinterpret_cast<byte*>(&page),
+ reset ? FIL_NULL : index->page);
+
+ mach_write_to_4(
+ reinterpret_cast<byte*>(&space),
+ reset ? FIL_NULL : index->space);
+
+ mach_write_to_8(
+ reinterpret_cast<byte*>(&index_id),
+ index->id);
+
+ mach_write_to_8(
+ reinterpret_cast<byte*>(&table_id),
+ table->id);
+
+ /* If we set the corrupt bit during the IMPORT phase then
+ we need to update the system tables. */
+ pars_info_bind_int4_literal(info, "type", &type);
+ pars_info_bind_int4_literal(info, "space", &space);
+ pars_info_bind_int4_literal(info, "page", &page);
+ pars_info_bind_ull_literal(info, "index_id", &index_id);
+ pars_info_bind_ull_literal(info, "table_id", &table_id);
+
+ if (graph == 0) {
+ graph = pars_sql(info, sql);
+ ut_a(graph);
+ graph->trx = trx;
+ }
+
+ que_thr_t* thr;
+
+ graph->fork_type = QUE_FORK_MYSQL_INTERFACE;
+
+ ut_a(thr = que_fork_start_command(graph));
+
+ que_run_threads(thr);
+
+ DBUG_EXECUTE_IF("ib_import_internal_error",
+ trx->error_state = DB_ERROR;);
+
+ err = trx->error_state;
+
+ if (err != DB_SUCCESS) {
+ char index_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ index_name, sizeof(index_name),
+ index->name, TRUE);
+
+ ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_INTERNAL_ERROR,
+ "While updating the <space, root page "
+ "number> of index %s - %s",
+ index_name, ut_strerr(err));
+
+ break;
+ }
+ }
+
+ que_graph_free(graph);
+
+ if (!dict_locked) {
+ mutex_exit(&dict_sys->mutex);
+ }
+
+ return(err);
+}
+
+/** Callback arg for row_import_set_discarded. */
+struct discard_t {
+ ib_uint32_t flags2; /*!< Value read from column */
+ bool state; /*!< New state of the flag */
+ ulint n_recs; /*!< Number of recs processed */
+};
+
+/******************************************************************//**
+Fetch callback that sets or unsets the DISCARDED tablespace flag in
+SYS_TABLES. The flags is stored in MIX_LEN column.
+@return FALSE if all OK */
+static
+ibool
+row_import_set_discarded(
+/*=====================*/
+ void* row, /*!< in: sel_node_t* */
+ void* user_arg) /*!< in: bool set/unset flag */
+{
+ sel_node_t* node = static_cast<sel_node_t*>(row);
+ discard_t* discard = static_cast<discard_t*>(user_arg);
+ dfield_t* dfield = que_node_get_val(node->select_list);
+ dtype_t* type = dfield_get_type(dfield);
+ ulint len = dfield_get_len(dfield);
+
+ ut_a(dtype_get_mtype(type) == DATA_INT);
+ ut_a(len == sizeof(ib_uint32_t));
+
+ ulint flags2 = mach_read_from_4(
+ static_cast<byte*>(dfield_get_data(dfield)));
+
+ if (discard->state) {
+ flags2 |= DICT_TF2_DISCARDED;
+ } else {
+ flags2 &= ~DICT_TF2_DISCARDED;
+ }
+
+ mach_write_to_4(reinterpret_cast<byte*>(&discard->flags2), flags2);
+
+ ++discard->n_recs;
+
+ /* There should be at most one matching record. */
+ ut_a(discard->n_recs == 1);
+
+ return(FALSE);
+}
+
+/*****************************************************************//**
+Update the DICT_TF2_DISCARDED flag in SYS_TABLES.
+@return DB_SUCCESS or error code. */
+UNIV_INTERN
+dberr_t
+row_import_update_discarded_flag(
+/*=============================*/
+ trx_t* trx, /*!< in/out: transaction that
+ covers the update */
+ table_id_t table_id, /*!< in: Table for which we want
+ to set the root table->flags2 */
+ bool discarded, /*!< in: set MIX_LEN column bit
+ to discarded, if true */
+ bool dict_locked) /*!< in: set to true if the
+ caller already owns the
+ dict_sys_t:: mutex. */
+
+{
+ pars_info_t* info;
+ discard_t discard;
+
+ static const char sql[] =
+ "PROCEDURE UPDATE_DISCARDED_FLAG() IS\n"
+ "DECLARE FUNCTION my_func;\n"
+ "DECLARE CURSOR c IS\n"
+ " SELECT MIX_LEN "
+ " FROM SYS_TABLES "
+ " WHERE ID = :table_id FOR UPDATE;"
+ "\n"
+ "BEGIN\n"
+ "OPEN c;\n"
+ "WHILE 1 = 1 LOOP\n"
+ " FETCH c INTO my_func();\n"
+ " IF c % NOTFOUND THEN\n"
+ " EXIT;\n"
+ " END IF;\n"
+ "END LOOP;\n"
+ "UPDATE SYS_TABLES"
+ " SET MIX_LEN = :flags2"
+ " WHERE ID = :table_id;\n"
+ "CLOSE c;\n"
+ "END;\n";
+
+ discard.n_recs = 0;
+ discard.state = discarded;
+ discard.flags2 = ULINT32_UNDEFINED;
+
+ info = pars_info_create();
+
+ pars_info_add_ull_literal(info, "table_id", table_id);
+ pars_info_bind_int4_literal(info, "flags2", &discard.flags2);
+
+ pars_info_bind_function(
+ info, "my_func", row_import_set_discarded, &discard);
+
+ dberr_t err = que_eval_sql(info, sql, !dict_locked, trx);
+
+ ut_a(discard.n_recs == 1);
+ ut_a(discard.flags2 != ULINT32_UNDEFINED);
+
+ return(err);
+}
+
+/*****************************************************************//**
+Imports a tablespace. The space id in the .ibd file must match the space id
+of the table in the data dictionary.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+row_import_for_mysql(
+/*=================*/
+ dict_table_t* table, /*!< in/out: table */
+ row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL */
+{
+ dberr_t err;
+ trx_t* trx;
+ ib_uint64_t autoinc = 0;
+ char table_name[MAX_FULL_NAME_LEN + 1];
+ char* filepath = NULL;
+
+ ut_ad(!srv_read_only_mode);
+
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
+
+ ut_a(table->space);
+ ut_ad(prebuilt->trx);
+ ut_a(table->ibd_file_missing);
+
+ trx_start_if_not_started(prebuilt->trx);
+
+ trx = trx_allocate_for_mysql();
+
+ /* So that the table is not DROPped during recovery. */
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
+
+ trx_start_if_not_started(trx);
+
+ /* So that we can send error messages to the user. */
+ trx->mysql_thd = prebuilt->trx->mysql_thd;
+
+ /* Ensure that the table will be dropped by trx_rollback_active()
+ in case of a crash. */
+
+ trx->table_id = table->id;
+
+ /* Assign an undo segment for the transaction, so that the
+ transaction will be recovered after a crash. */
+
+ mutex_enter(&trx->undo_mutex);
+
+ err = trx_undo_assign_undo(trx, TRX_UNDO_UPDATE);
+
+ mutex_exit(&trx->undo_mutex);
+
+ DBUG_EXECUTE_IF("ib_import_undo_assign_failure",
+ err = DB_TOO_MANY_CONCURRENT_TRXS;);
+
+ if (err != DB_SUCCESS) {
+
+ return(row_import_cleanup(prebuilt, trx, err));
+
+ } else if (trx->update_undo == 0) {
+
+ err = DB_TOO_MANY_CONCURRENT_TRXS;
+ return(row_import_cleanup(prebuilt, trx, err));
+ }
+
+ prebuilt->trx->op_info = "read meta-data file";
+
+ /* Prevent DDL operations while we are checking. */
+
+ rw_lock_s_lock_func(&dict_operation_lock, 0, __FILE__, __LINE__);
+
+ row_import cfg;
+
+ memset(&cfg, 0x0, sizeof(cfg));
+
+ err = row_import_read_cfg(table, trx->mysql_thd, cfg);
+
+ /* Check if the table column definitions match the contents
+ of the config file. */
+
+ if (err == DB_SUCCESS) {
+
+ /* We have a schema file, try and match it with the our
+ data dictionary. */
+
+ err = cfg.match_schema(trx->mysql_thd);
+
+ /* Update index->page and SYS_INDEXES.PAGE_NO to match the
+ B-tree root page numbers in the tablespace. Use the index
+ name from the .cfg file to find match. */
+
+ if (err == DB_SUCCESS) {
+ cfg.set_root_by_name();
+ autoinc = cfg.m_autoinc;
+ }
+
+ rw_lock_s_unlock_gen(&dict_operation_lock, 0);
+
+ DBUG_EXECUTE_IF("ib_import_set_index_root_failure",
+ err = DB_TOO_MANY_CONCURRENT_TRXS;);
+
+ } else if (cfg.m_missing) {
+
+ rw_lock_s_unlock_gen(&dict_operation_lock, 0);
+
+ /* We don't have a schema file, we will have to discover
+ the index root pages from the .ibd file and skip the schema
+ matching step. */
+
+ ut_a(err == DB_FAIL);
+
+ cfg.m_page_size = UNIV_PAGE_SIZE;
+
+ FetchIndexRootPages fetchIndexRootPages(table, trx);
+
+ err = fil_tablespace_iterate(
+ table, IO_BUFFER_SIZE(cfg.m_page_size),
+ fetchIndexRootPages);
+
+ if (err == DB_SUCCESS) {
+
+ err = fetchIndexRootPages.build_row_import(&cfg);
+
+ /* Update index->page and SYS_INDEXES.PAGE_NO
+ to match the B-tree root page numbers in the
+ tablespace. */
+
+ if (err == DB_SUCCESS) {
+ err = cfg.set_root_by_heuristic();
+ }
+ }
+
+ } else {
+ rw_lock_s_unlock_gen(&dict_operation_lock, 0);
+ }
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ }
+
+ prebuilt->trx->op_info = "importing tablespace";
+
+ ib_logf(IB_LOG_LEVEL_INFO, "Phase I - Update all pages");
+
+ /* Iterate over all the pages and do the sanity checking and
+ the conversion required to import the tablespace. */
+
+ PageConverter converter(&cfg, trx);
+
+ /* Set the IO buffer size in pages. */
+
+ err = fil_tablespace_iterate(
+ table, IO_BUFFER_SIZE(cfg.m_page_size), converter);
+
+ DBUG_EXECUTE_IF("ib_import_reset_space_and_lsn_failure",
+ err = DB_TOO_MANY_CONCURRENT_TRXS;);
+
+ if (err != DB_SUCCESS) {
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
+
+ ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_INTERNAL_ERROR,
+ "Cannot reset LSNs in table '%s' : %s",
+ table_name, ut_strerr(err));
+
+ return(row_import_cleanup(prebuilt, trx, err));
+ }
+
+ row_mysql_lock_data_dictionary(trx);
+
+ /* If the table is stored in a remote tablespace, we need to
+ determine that filepath from the link file and system tables.
+ Find the space ID in SYS_TABLES since this is an ALTER TABLE. */
+ if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+ dict_get_and_save_data_dir_path(table, true);
+ ut_a(table->data_dir_path);
+
+ filepath = os_file_make_remote_pathname(
+ table->data_dir_path, table->name, "ibd");
+ } else {
+ filepath = fil_make_ibd_name(table->name, false);
+ }
+ ut_a(filepath);
+
+ /* Open the tablespace so that we can access via the buffer pool.
+ We set the 2nd param (fix_dict = true) here because we already
+ have an x-lock on dict_operation_lock and dict_sys->mutex. */
+
+ err = fil_open_single_table_tablespace(
+ true, true, table->space,
+ dict_tf_to_fsp_flags(table->flags),
+ table->name, filepath);
+
+ DBUG_EXECUTE_IF("ib_import_open_tablespace_failure",
+ err = DB_TABLESPACE_NOT_FOUND;);
+
+ if (err != DB_SUCCESS) {
+ row_mysql_unlock_data_dictionary(trx);
+
+ ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_FILE_NOT_FOUND,
+ filepath, err, ut_strerr(err));
+
+ mem_free(filepath);
+
+ return(row_import_cleanup(prebuilt, trx, err));
+ }
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ mem_free(filepath);
+
+ err = ibuf_check_bitmap_on_import(trx, table->space);
+
+ DBUG_EXECUTE_IF("ib_import_check_bitmap_failure", err = DB_CORRUPTION;);
+
+ if (err != DB_SUCCESS) {
+ return(row_import_cleanup(prebuilt, trx, err));
+ }
+
+ /* The first index must always be the clustered index. */
+
+ dict_index_t* index = dict_table_get_first_index(table);
+
+ if (!dict_index_is_clust(index)) {
+ return(row_import_error(prebuilt, trx, DB_CORRUPTION));
+ }
+
+ /* Update the Btree segment headers for index node and
+ leaf nodes in the root page. Set the new space id. */
+
+ err = btr_root_adjust_on_import(index);
+
+ DBUG_EXECUTE_IF("ib_import_cluster_root_adjust_failure",
+ err = DB_CORRUPTION;);
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ }
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ } else if (cfg.requires_purge(index->name)) {
+
+ /* Purge any delete-marked records that couldn't be
+ purged during the page conversion phase from the
+ cluster index. */
+
+ IndexPurge purge(trx, index);
+
+ trx->op_info = "cluster: purging delete marked records";
+
+ err = purge.garbage_collect();
+
+ trx->op_info = "";
+ }
+
+ DBUG_EXECUTE_IF("ib_import_cluster_failure", err = DB_CORRUPTION;);
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ }
+
+ /* For secondary indexes, purge any records that couldn't be purged
+ during the page conversion phase. */
+
+ err = row_import_adjust_root_pages_of_secondary_indexes(
+ prebuilt, trx, table, cfg);
+
+ DBUG_EXECUTE_IF("ib_import_sec_root_adjust_failure",
+ err = DB_CORRUPTION;);
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ }
+
+ /* Ensure that the next available DB_ROW_ID is not smaller than
+ any DB_ROW_ID stored in the table. */
+
+ if (prebuilt->clust_index_was_generated) {
+
+ err = row_import_set_sys_max_row_id(prebuilt, table);
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ }
+ }
+
+ ib_logf(IB_LOG_LEVEL_INFO, "Phase III - Flush changes to disk");
+
+ /* Ensure that all pages dirtied during the IMPORT make it to disk.
+ The only dirty pages generated should be from the pessimistic purge
+ of delete marked records that couldn't be purged in Phase I. */
+
+ buf_LRU_flush_or_remove_pages(
+ prebuilt->table->space, BUF_REMOVE_FLUSH_WRITE, trx);
+
+ if (trx_is_interrupted(trx)) {
+ ib_logf(IB_LOG_LEVEL_INFO, "Phase III - Flush interrupted");
+ return(row_import_error(prebuilt, trx, DB_INTERRUPTED));
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO, "Phase IV - Flush complete");
+ }
+
+ /* The dictionary latches will be released in in row_import_cleanup()
+ after the transaction commit, for both success and error. */
+
+ row_mysql_lock_data_dictionary(trx);
+
+ /* Update the root pages of the table's indexes. */
+ err = row_import_update_index_root(trx, table, false, true);
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ }
+
+ /* Update the table's discarded flag, unset it. */
+ err = row_import_update_discarded_flag(trx, table->id, false, true);
+
+ if (err != DB_SUCCESS) {
+ return(row_import_error(prebuilt, trx, err));
+ }
+
+ table->ibd_file_missing = false;
+ table->flags2 &= ~DICT_TF2_DISCARDED;
+
+ if (autoinc != 0) {
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
+
+ ib_logf(IB_LOG_LEVEL_INFO, "%s autoinc value set to " IB_ID_FMT,
+ table_name, autoinc);
+
+ dict_table_autoinc_lock(table);
+ dict_table_autoinc_initialize(table, autoinc);
+ dict_table_autoinc_unlock(table);
+ }
+
+ ut_a(err == DB_SUCCESS);
+
+ return(row_import_cleanup(prebuilt, trx, err));
+}
+
diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc
index e8d15fb539c..c1c27152831 100644
--- a/storage/innobase/row/row0ins.cc
+++ b/storage/innobase/row/row0ins.cc
@@ -23,11 +23,8 @@ Insert into a table
Created 4/20/1996 Heikki Tuuri
*******************************************************/
-#include "m_string.h" /* for my_sys.h */
#include "row0ins.h"
-#define DEBUG_SYNC_C_IF_THD(A,B) DEBUG_SYNC(A,B)
-
#ifdef UNIV_NONINL
#include "row0ins.ic"
#endif
@@ -35,6 +32,7 @@ Created 4/20/1996 Heikki Tuuri
#include "ha_prototypes.h"
#include "dict0dict.h"
#include "dict0boot.h"
+#include "trx0rec.h"
#include "trx0undo.h"
#include "btr0btr.h"
#include "btr0cur.h"
@@ -43,6 +41,7 @@ Created 4/20/1996 Heikki Tuuri
#include "row0upd.h"
#include "row0sel.h"
#include "row0row.h"
+#include "row0log.h"
#include "rem0cmp.h"
#include "lock0lock.h"
#include "log0log.h"
@@ -52,6 +51,7 @@ Created 4/20/1996 Heikki Tuuri
#include "buf0lru.h"
#include "fts0fts.h"
#include "fts0types.h"
+#include "m_string.h"
/*************************************************************************
IMPORTANT NOTE: Any operation that generates redo MUST check that there
@@ -101,7 +101,7 @@ ins_node_create(
/***********************************************************//**
Creates an entry template for each index of a table. */
-UNIV_INTERN
+static
void
ins_node_create_entry_list(
/*=======================*/
@@ -222,68 +222,92 @@ Does an insert operation by updating a delete-marked existing record
in the index. This situation can occur if the delete-marked record is
kept in the index for consistent reads.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins_sec_index_entry_by_modify(
/*==============================*/
+ ulint flags, /*!< in: undo logging and locking flags */
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
depending on whether mtr holds just a leaf
latch or also a tree latch */
btr_cur_t* cursor, /*!< in: B-tree cursor */
+ ulint** offsets,/*!< in/out: offsets on cursor->page_cur.rec */
+ mem_heap_t* offsets_heap,
+ /*!< in/out: memory heap that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
const dtuple_t* entry, /*!< in: index entry to insert */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
{
big_rec_t* dummy_big_rec;
- mem_heap_t* heap;
upd_t* update;
rec_t* rec;
- ulint err;
+ dberr_t err;
rec = btr_cur_get_rec(cursor);
ut_ad(!dict_index_is_clust(cursor->index));
- ut_ad(rec_get_deleted_flag(rec,
- dict_table_is_comp(cursor->index->table)));
+ ut_ad(rec_offs_validate(rec, cursor->index, *offsets));
+ ut_ad(!entry->info_bits);
/* We know that in the alphabetical ordering, entry and rec are
identified. But in their binary form there may be differences if
there are char fields in them. Therefore we have to calculate the
difference. */
- heap = mem_heap_create(1024);
-
update = row_upd_build_sec_rec_difference_binary(
- cursor->index, entry, rec, thr_get_trx(thr), heap);
+ rec, cursor->index, *offsets, entry, heap);
+
+ if (!rec_get_deleted_flag(rec, rec_offs_comp(*offsets))) {
+ /* We should never insert in place of a record that
+ has not been delete-marked. The only exception is when
+ online CREATE INDEX copied the changes that we already
+ made to the clustered index, and completed the
+ secondary index creation before we got here. In this
+ case, the change would already be there. The CREATE
+ INDEX should be waiting for a MySQL meta-data lock
+ upgrade at least until this INSERT or UPDATE
+ returns. After that point, the TEMP_INDEX_PREFIX
+ would be dropped from the index name in
+ commit_inplace_alter_table(). */
+ ut_a(update->n_fields == 0);
+ ut_a(*cursor->index->name == TEMP_INDEX_PREFIX);
+ ut_ad(!dict_index_is_online_ddl(cursor->index));
+ return(DB_SUCCESS);
+ }
+
if (mode == BTR_MODIFY_LEAF) {
/* Try an optimistic updating of the record, keeping changes
within the page */
- err = btr_cur_optimistic_update(BTR_KEEP_SYS_FLAG, cursor,
- update, 0, thr, mtr);
+ /* TODO: pass only *offsets */
+ err = btr_cur_optimistic_update(
+ flags | BTR_KEEP_SYS_FLAG, cursor,
+ offsets, &offsets_heap, update, 0, thr,
+ thr_get_trx(thr)->id, mtr);
switch (err) {
case DB_OVERFLOW:
case DB_UNDERFLOW:
case DB_ZIP_OVERFLOW:
err = DB_FAIL;
+ default:
+ break;
}
} else {
ut_a(mode == BTR_MODIFY_TREE);
if (buf_LRU_buf_pool_running_out()) {
- err = DB_LOCK_TABLE_FULL;
-
- goto func_exit;
+ return(DB_LOCK_TABLE_FULL);
}
- err = btr_cur_pessimistic_update(BTR_KEEP_SYS_FLAG, cursor,
- &heap, &dummy_big_rec, update,
- 0, thr, mtr);
+ err = btr_cur_pessimistic_update(
+ flags | BTR_KEEP_SYS_FLAG, cursor,
+ offsets, &offsets_heap,
+ heap, &dummy_big_rec, update, 0,
+ thr, thr_get_trx(thr)->id, mtr);
ut_ad(!dummy_big_rec);
}
-func_exit:
- mem_heap_free(heap);
return(err);
}
@@ -293,15 +317,20 @@ Does an insert operation by delete unmarking and updating a delete marked
existing record in the index. This situation can occur if the delete marked
record is kept in the index for consistent reads.
@return DB_SUCCESS, DB_FAIL, or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins_clust_index_entry_by_modify(
/*================================*/
+ ulint flags, /*!< in: undo logging and locking flags */
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
depending on whether mtr holds just a leaf
latch or also a tree latch */
btr_cur_t* cursor, /*!< in: B-tree cursor */
- mem_heap_t** heap, /*!< in/out: pointer to memory heap, or NULL */
+ ulint** offsets,/*!< out: offsets on cursor->page_cur.rec */
+ mem_heap_t** offsets_heap,
+ /*!< in/out: pointer to memory heap that can
+ be emptied, or NULL */
+ mem_heap_t* heap, /*!< in/out: memory heap */
big_rec_t** big_rec,/*!< out: possible big rec vector of fields
which have to be stored externally by the
caller */
@@ -310,9 +339,9 @@ row_ins_clust_index_entry_by_modify(
mtr_t* mtr) /*!< in: mtr; must be committed before
latching any further pages */
{
- rec_t* rec;
- upd_t* update;
- ulint err;
+ const rec_t* rec;
+ const upd_t* update;
+ dberr_t err;
ut_ad(dict_index_is_clust(cursor->index));
@@ -323,38 +352,40 @@ row_ins_clust_index_entry_by_modify(
ut_ad(rec_get_deleted_flag(rec,
dict_table_is_comp(cursor->index->table)));
- if (!*heap) {
- *heap = mem_heap_create(1024);
- }
-
/* Build an update vector containing all the fields to be modified;
NOTE that this vector may NOT contain system columns trx_id or
roll_ptr */
- update = row_upd_build_difference_binary(cursor->index, entry, rec,
- thr_get_trx(thr), *heap);
- if (mode == BTR_MODIFY_LEAF) {
+ update = row_upd_build_difference_binary(
+ cursor->index, entry, rec, NULL, true,
+ thr_get_trx(thr), heap);
+ if (mode != BTR_MODIFY_TREE) {
+ ut_ad((mode & ~BTR_ALREADY_S_LATCHED) == BTR_MODIFY_LEAF);
+
/* Try optimistic updating of the record, keeping changes
within the page */
- err = btr_cur_optimistic_update(0, cursor, update, 0, thr,
- mtr);
+ err = btr_cur_optimistic_update(
+ flags, cursor, offsets, offsets_heap, update, 0, thr,
+ thr_get_trx(thr)->id, mtr);
switch (err) {
case DB_OVERFLOW:
case DB_UNDERFLOW:
case DB_ZIP_OVERFLOW:
err = DB_FAIL;
+ default:
+ break;
}
} else {
- ut_a(mode == BTR_MODIFY_TREE);
if (buf_LRU_buf_pool_running_out()) {
return(DB_LOCK_TABLE_FULL);
}
err = btr_cur_pessimistic_update(
- BTR_KEEP_POS_FLAG, cursor, heap, big_rec, update,
- 0, thr, mtr);
+ flags | BTR_KEEP_POS_FLAG,
+ cursor, offsets, offsets_heap, heap,
+ big_rec, update, 0, thr, thr_get_trx(thr)->id, mtr);
}
return(err);
@@ -394,7 +425,7 @@ row_ins_cascade_ancestor_updates_table(
Returns the number of ancestor UPDATE or DELETE nodes of a
cascaded update/delete node.
@return number of ancestors */
-static
+static __attribute__((nonnull, warn_unused_result))
ulint
row_ins_cascade_n_ancestors(
/*========================*/
@@ -420,7 +451,7 @@ a cascaded update.
can also be 0 if no foreign key fields changed; the returned value is
ULINT_UNDEFINED if the column type in the child table is too short to
fit the new value in the parent table: that means the update fails */
-static
+static __attribute__((nonnull, warn_unused_result))
ulint
row_ins_cascade_calc_update_vec(
/*============================*/
@@ -691,6 +722,8 @@ row_ins_set_detailed(
trx_t* trx, /*!< in: transaction */
dict_foreign_t* foreign) /*!< in: foreign key constraint */
{
+ ut_ad(!srv_read_only_mode);
+
mutex_enter(&srv_misc_tmpfile_mutex);
rewind(srv_misc_tmpfile);
@@ -717,13 +750,17 @@ row_ins_foreign_trx_print(
/*======================*/
trx_t* trx) /*!< in: transaction */
{
- ulint n_lock_rec;
- ulint n_lock_struct;
+ ulint n_rec_locks;
+ ulint n_trx_locks;
ulint heap_size;
+ if (srv_read_only_mode) {
+ return;
+ }
+
lock_mutex_enter();
- n_lock_rec = lock_number_of_rows_locked(&trx->lock);
- n_lock_struct = UT_LIST_GET_LEN(trx->lock.trx_locks);
+ n_rec_locks = lock_number_of_rows_locked(&trx->lock);
+ n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
heap_size = mem_heap_get_size(trx->lock.lock_heap);
lock_mutex_exit();
@@ -735,7 +772,7 @@ row_ins_foreign_trx_print(
fputs(" Transaction:\n", dict_foreign_err_file);
trx_print_low(dict_foreign_err_file, trx, 600,
- n_lock_rec, n_lock_struct, heap_size);
+ n_rec_locks, n_trx_locks, heap_size);
mutex_exit(&trx_sys->mutex);
@@ -759,6 +796,10 @@ row_ins_foreign_report_err(
const dtuple_t* entry) /*!< in: index entry in the parent
table */
{
+ if (srv_read_only_mode) {
+ return;
+ }
+
FILE* ef = dict_foreign_err_file;
trx_t* trx = thr_get_trx(thr);
@@ -810,6 +851,10 @@ row_ins_foreign_report_add_err(
const dtuple_t* entry) /*!< in: index entry to insert in the
child table */
{
+ if (srv_read_only_mode) {
+ return;
+ }
+
FILE* ef = dict_foreign_err_file;
row_ins_set_detailed(trx, foreign);
@@ -879,8 +924,8 @@ Perform referential actions or checks when a parent row is deleted or updated
and the constraint had an ON DELETE or ON UPDATE condition which was not
RESTRICT.
@return DB_SUCCESS, DB_LOCK_WAIT, or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins_foreign_check_on_constraint(
/*================================*/
que_thr_t* thr, /*!< in: query thread whose run_node
@@ -906,7 +951,7 @@ row_ins_foreign_check_on_constraint(
const buf_block_t* clust_block;
upd_t* update;
ulint n_to_update;
- ulint err;
+ dberr_t err;
ulint i;
trx_t* trx;
mem_heap_t* tmp_heap = NULL;
@@ -1242,6 +1287,9 @@ row_ins_foreign_check_on_constraint(
release the latch. */
row_mysql_unfreeze_data_dictionary(thr_get_trx(thr));
+
+ DEBUG_SYNC_C("innodb_dml_cascade_dict_unfreeze");
+
row_mysql_freeze_data_dictionary(thr_get_trx(thr));
mtr_start(mtr);
@@ -1284,7 +1332,7 @@ Sets a shared lock on a record. Used in locking possible duplicate key
records and also in checking foreign key constraints.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */
static
-enum db_err
+dberr_t
row_ins_set_shared_rec_lock(
/*========================*/
ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
@@ -1295,7 +1343,7 @@ row_ins_set_shared_rec_lock(
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
que_thr_t* thr) /*!< in: query thread */
{
- enum db_err err;
+ dberr_t err;
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -1315,7 +1363,7 @@ Sets a exclusive lock on a record. Used in locking possible duplicate key
records
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */
static
-enum db_err
+dberr_t
row_ins_set_exclusive_rec_lock(
/*===========================*/
ulint type, /*!< in: LOCK_ORDINARY, LOCK_GAP, or
@@ -1326,7 +1374,7 @@ row_ins_set_exclusive_rec_lock(
const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
que_thr_t* thr) /*!< in: query thread */
{
- enum db_err err;
+ dberr_t err;
ut_ad(rec_offs_validate(rec, index, offsets));
@@ -1347,7 +1395,7 @@ which lock either the success or the failure of the constraint. NOTE that
the caller must have a shared latch on dict_operation_lock.
@return DB_SUCCESS, DB_NO_REFERENCED_ROW, or DB_ROW_IS_REFERENCED */
UNIV_INTERN
-ulint
+dberr_t
row_ins_check_foreign_constraint(
/*=============================*/
ibool check_ref,/*!< in: TRUE if we want to check that
@@ -1361,7 +1409,7 @@ row_ins_check_foreign_constraint(
dtuple_t* entry, /*!< in: index entry for index */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
+ dberr_t err;
upd_node_t* upd_node;
dict_table_t* check_table;
dict_index_t* check_index;
@@ -1433,9 +1481,11 @@ run_again:
check_index = foreign->foreign_index;
}
- if (check_table == NULL || check_table->ibd_file_missing
+ if (check_table == NULL
+ || check_table->ibd_file_missing
|| check_index == NULL) {
- if (check_ref) {
+
+ if (!srv_read_only_mode && check_ref) {
FILE* ef = dict_foreign_err_file;
row_ins_set_detailed(trx, foreign);
@@ -1611,6 +1661,8 @@ run_again:
} else {
err = DB_SUCCESS;
}
+ default:
+ break;
}
goto end_scan;
@@ -1635,18 +1687,43 @@ end_scan:
do_possible_lock_wait:
if (err == DB_LOCK_WAIT) {
- trx->error_state = static_cast<enum db_err>(err);
+ bool verified = false;
+
+ trx->error_state = err;
que_thr_stop_for_mysql(thr);
lock_wait_suspend_thread(thr);
- if (trx->error_state == DB_SUCCESS) {
+ if (check_table->to_be_dropped) {
+ /* The table is being dropped. We shall timeout
+ this operation */
+ err = DB_LOCK_WAIT_TIMEOUT;
+ goto exit_func;
+ }
- goto run_again;
+ /* We had temporarily released dict_operation_lock in
+ above lock sleep wait, now we have the lock again, and
+ we will need to re-check whether the foreign key has been
+ dropped */
+ for (const dict_foreign_t* check_foreign = UT_LIST_GET_FIRST(
+ table->referenced_list);
+ check_foreign;
+ check_foreign = UT_LIST_GET_NEXT(
+ referenced_list, check_foreign)) {
+ if (check_foreign == foreign) {
+ verified = true;
+ break;
+ }
}
- err = trx->error_state;
+ if (!verified) {
+ err = DB_DICT_CHANGED;
+ } else if (trx->error_state == DB_SUCCESS) {
+ goto run_again;
+ } else {
+ err = trx->error_state;
+ }
}
exit_func:
@@ -1663,8 +1740,8 @@ Otherwise does searches to the indexes of referenced tables and
sets shared locks which lock either the success or the failure of
a constraint.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins_check_foreign_constraints(
/*==============================*/
dict_table_t* table, /*!< in: table */
@@ -1673,7 +1750,7 @@ row_ins_check_foreign_constraints(
que_thr_t* thr) /*!< in: query thread */
{
dict_foreign_t* foreign;
- ulint err;
+ dberr_t err;
trx_t* trx;
ibool got_s_lock = FALSE;
@@ -1681,14 +1758,21 @@ row_ins_check_foreign_constraints(
foreign = UT_LIST_GET_FIRST(table->foreign_list);
+ DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
+ "foreign_constraint_check_for_ins");
+
while (foreign) {
if (foreign->foreign_index == index) {
dict_table_t* ref_table = NULL;
+ dict_table_t* foreign_table = foreign->foreign_table;
+ dict_table_t* referenced_table
+ = foreign->referenced_table;
- if (foreign->referenced_table == NULL) {
+ if (referenced_table == NULL) {
ref_table = dict_table_open_on_name(
- foreign->referenced_table_name_lookup, FALSE);
+ foreign->referenced_table_name_lookup,
+ FALSE, FALSE, DICT_ERR_IGNORE_NONE);
}
if (0 == trx->dict_operation_lock_mode) {
@@ -1697,9 +1781,9 @@ row_ins_check_foreign_constraints(
row_mysql_freeze_data_dictionary(trx);
}
- if (foreign->referenced_table) {
+ if (referenced_table) {
os_inc_counter(dict_sys->mutex,
- foreign->foreign_table
+ foreign_table
->n_foreign_key_checks_running);
}
@@ -1711,9 +1795,12 @@ row_ins_check_foreign_constraints(
err = row_ins_check_foreign_constraint(
TRUE, foreign, table, entry, thr);
- if (foreign->referenced_table) {
+ DBUG_EXECUTE_IF("row_ins_dict_change_err",
+ err = DB_DICT_CHANGED;);
+
+ if (referenced_table) {
os_dec_counter(dict_sys->mutex,
- foreign->foreign_table
+ foreign_table
->n_foreign_key_checks_running);
}
@@ -1722,7 +1809,7 @@ row_ins_check_foreign_constraints(
}
if (ref_table != NULL) {
- dict_table_close(ref_table, FALSE);
+ dict_table_close(ref_table, FALSE, FALSE);
}
if (err != DB_SUCCESS) {
@@ -1778,8 +1865,7 @@ row_ins_dupl_error_with_rec(
if (!dict_index_is_clust(index)) {
for (i = 0; i < n_unique; i++) {
- if (UNIV_SQL_NULL == dfield_get_len(
- dtuple_get_nth_field(entry, i))) {
+ if (dfield_is_null(dtuple_get_nth_field(entry, i))) {
return(FALSE);
}
@@ -1794,26 +1880,30 @@ Scans a unique non-clustered index at a given index entry to determine
whether a uniqueness violation has occurred for the key value of the entry.
Set shared locks on possible duplicate records.
@return DB_SUCCESS, DB_DUPLICATE_KEY, or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins_scan_sec_index_for_duplicate(
/*=================================*/
+ ulint flags, /*!< in: undo logging and locking flags */
dict_index_t* index, /*!< in: non-clustered unique index */
dtuple_t* entry, /*!< in: index entry */
- que_thr_t* thr) /*!< in: query thread */
+ que_thr_t* thr, /*!< in: query thread */
+ bool s_latch,/*!< in: whether index->lock is being held */
+ mtr_t* mtr, /*!< in/out: mini-transaction */
+ mem_heap_t* offsets_heap)
+ /*!< in/out: memory heap that can be emptied */
{
ulint n_unique;
- ulint i;
int cmp;
ulint n_fields_cmp;
btr_pcur_t pcur;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ulint allow_duplicates;
- mtr_t mtr;
- mem_heap_t* heap = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- ulint* offsets = offsets_;
- rec_offs_init(offsets_);
+ ulint* offsets = NULL;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(s_latch == rw_lock_own(&index->lock, RW_LOCK_SHARED));
+#endif /* UNIV_SYNC_DEBUG */
n_unique = dict_index_get_n_unique(index);
@@ -1821,7 +1911,7 @@ row_ins_scan_sec_index_for_duplicate(
n_unique first fields is NULL, a unique key violation cannot occur,
since we define NULL != NULL in this case */
- for (i = 0; i < n_unique; i++) {
+ for (ulint i = 0; i < n_unique; i++) {
if (UNIV_SQL_NULL == dfield_get_len(
dtuple_get_nth_field(entry, i))) {
@@ -1829,15 +1919,17 @@ row_ins_scan_sec_index_for_duplicate(
}
}
- mtr_start(&mtr);
-
/* Store old value on n_fields_cmp */
n_fields_cmp = dtuple_get_n_fields_cmp(entry);
- dtuple_set_n_fields_cmp(entry, dict_index_get_n_unique(index));
+ dtuple_set_n_fields_cmp(entry, n_unique);
- btr_pcur_open(index, entry, PAGE_CUR_GE, BTR_SEARCH_LEAF, &pcur, &mtr);
+ btr_pcur_open(index, entry, PAGE_CUR_GE,
+ s_latch
+ ? BTR_SEARCH_LEAF | BTR_ALREADY_S_LATCHED
+ : BTR_SEARCH_LEAF,
+ &pcur, mtr);
allow_duplicates = thr_get_trx(thr)->duplicates;
@@ -1853,9 +1945,12 @@ row_ins_scan_sec_index_for_duplicate(
}
offsets = rec_get_offsets(rec, index, offsets,
- ULINT_UNDEFINED, &heap);
+ ULINT_UNDEFINED, &offsets_heap);
- if (allow_duplicates) {
+ if (flags & BTR_NO_LOCKING_FLAG) {
+ /* Set no locks when applying log
+ in online table rebuild. */
+ } else if (allow_duplicates) {
/* If the SQL-query will update or replace
duplicate key we will take X-lock for
@@ -1901,37 +1996,115 @@ row_ins_scan_sec_index_for_duplicate(
ut_a(cmp < 0);
goto end_scan;
}
- } while (btr_pcur_move_to_next(&pcur, &mtr));
+ } while (btr_pcur_move_to_next(&pcur, mtr));
end_scan:
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
- mtr_commit(&mtr);
-
/* Restore old value */
dtuple_set_n_fields_cmp(entry, n_fields_cmp);
return(err);
}
+/** Checks for a duplicate when the table is being rebuilt online.
+@retval DB_SUCCESS when no duplicate is detected
+@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
+a newer version of entry (the entry should not be inserted)
+@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_ins_duplicate_online(
+/*=====================*/
+ ulint n_uniq, /*!< in: offset of DB_TRX_ID */
+ const dtuple_t* entry, /*!< in: entry that is being inserted */
+ const rec_t* rec, /*!< in: clustered index record */
+ ulint* offsets)/*!< in/out: rec_get_offsets(rec) */
+{
+ ulint fields = 0;
+ ulint bytes = 0;
+
+ /* During rebuild, there should not be any delete-marked rows
+ in the new table. */
+ ut_ad(!rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
+ ut_ad(dtuple_get_n_fields_cmp(entry) == n_uniq);
+
+ /* Compare the PRIMARY KEY fields and the
+ DB_TRX_ID, DB_ROLL_PTR. */
+ cmp_dtuple_rec_with_match_low(
+ entry, rec, offsets, n_uniq + 2, &fields, &bytes);
+
+ if (fields < n_uniq) {
+ /* Not a duplicate. */
+ return(DB_SUCCESS);
+ }
+
+ if (fields == n_uniq + 2) {
+ /* rec is an exact match of entry. */
+ ut_ad(bytes == 0);
+ return(DB_SUCCESS_LOCKED_REC);
+ }
+
+ return(DB_DUPLICATE_KEY);
+}
+
+/** Checks for a duplicate when the table is being rebuilt online.
+@retval DB_SUCCESS when no duplicate is detected
+@retval DB_SUCCESS_LOCKED_REC when rec is an exact match of entry or
+a newer version of entry (the entry should not be inserted)
+@retval DB_DUPLICATE_KEY when entry is a duplicate of rec */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_ins_duplicate_error_in_clust_online(
+/*====================================*/
+ ulint n_uniq, /*!< in: offset of DB_TRX_ID */
+ const dtuple_t* entry, /*!< in: entry that is being inserted */
+ const btr_cur_t*cursor, /*!< in: cursor on insert position */
+ ulint** offsets,/*!< in/out: rec_get_offsets(rec) */
+ mem_heap_t** heap) /*!< in/out: heap for offsets */
+{
+ dberr_t err = DB_SUCCESS;
+ const rec_t* rec = btr_cur_get_rec(cursor);
+
+ if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) {
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ ULINT_UNDEFINED, heap);
+ err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+ }
+
+ rec = page_rec_get_next_const(btr_cur_get_rec(cursor));
+
+ if (cursor->up_match >= n_uniq && !page_rec_is_supremum(rec)) {
+ *offsets = rec_get_offsets(rec, cursor->index, *offsets,
+ ULINT_UNDEFINED, heap);
+ err = row_ins_duplicate_online(n_uniq, entry, rec, *offsets);
+ }
+
+ return(err);
+}
+
/***************************************************************//**
Checks if a unique key violation error would occur at an index entry
insert. Sets shared locks on possible duplicate records. Works only
for a clustered index!
-@return DB_SUCCESS if no error, DB_DUPLICATE_KEY if error,
-DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate
-record */
-static
-ulint
+@retval DB_SUCCESS if no error
+@retval DB_DUPLICATE_KEY if error,
+@retval DB_LOCK_WAIT if we have to wait for a lock on a possible duplicate
+record
+@retval DB_SUCCESS_LOCKED_REC if an exact match of the record was found
+in online table rebuild (flags & (BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG)) */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins_duplicate_error_in_clust(
/*=============================*/
+ ulint flags, /*!< in: undo logging and locking flags */
btr_cur_t* cursor, /*!< in: B-tree cursor */
const dtuple_t* entry, /*!< in: entry to insert */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr */
{
- ulint err;
+ dberr_t err;
rec_t* rec;
ulint n_unique;
trx_t* trx = thr_get_trx(thr);
@@ -1942,8 +2115,7 @@ row_ins_duplicate_error_in_clust(
UT_NOT_USED(mtr);
- ut_a(dict_index_is_clust(cursor->index));
- ut_ad(dict_index_is_unique(cursor->index));
+ ut_ad(dict_index_is_clust(cursor->index));
/* NOTE: For unique non-clustered indexes there may be any number
of delete marked records with the same value for the non-clustered
@@ -2002,6 +2174,7 @@ row_ins_duplicate_error_in_clust(
if (row_ins_dupl_error_with_rec(
rec, entry, cursor->index, offsets)) {
+duplicate:
trx->error_info = cursor->index;
err = DB_DUPLICATE_KEY;
goto func_exit;
@@ -2046,14 +2219,12 @@ row_ins_duplicate_error_in_clust(
if (row_ins_dupl_error_with_rec(
rec, entry, cursor->index, offsets)) {
- trx->error_info = cursor->index;
- err = DB_DUPLICATE_KEY;
- goto func_exit;
+ goto duplicate;
}
}
- ut_a(!dict_index_is_clust(cursor->index));
/* This should never happen */
+ ut_error;
}
err = DB_SUCCESS;
@@ -2081,12 +2252,12 @@ row_ins_must_modify_rec(
/*====================*/
const btr_cur_t* cursor) /*!< in: B-tree cursor */
{
- /* NOTE: (compare to the note in row_ins_duplicate_error) Because node
- pointers on upper levels of the B-tree may match more to entry than
- to actual user records on the leaf level, we have to check if the
- candidate record is actually a user record. In a clustered index
- node pointers contain index->n_unique first fields, and in the case
- of a secondary index, all fields of the index. */
+ /* NOTE: (compare to the note in row_ins_duplicate_error_in_clust)
+ Because node pointers on upper levels of the B-tree may match more
+ to entry than to actual user records on the leaf level, we
+ have to check if the candidate record is actually a user record.
+ A clustered index node pointer contains index->n_unique first fields,
+ and a secondary index node pointer contains all index fields. */
return(cursor->low_match
>= dict_index_get_n_unique_in_tree(cursor->index)
@@ -2094,56 +2265,359 @@ row_ins_must_modify_rec(
}
/***************************************************************//**
-Tries to insert an index entry to an index. If the index is clustered
-and a record with the same unique key is found, the other record is
-necessarily marked deleted by a committed transaction, or a unique key
-violation error occurs. The delete marked record is then updated to an
-existing record, and we must write an undo log record on the delete
-marked record. If the index is secondary, and a record with exactly the
-same fields is found, the other record is necessarily marked deleted.
-It is then unmarked. Otherwise, the entry is just inserted to the index.
-@return DB_SUCCESS, DB_LOCK_WAIT, DB_FAIL if pessimistic retry needed,
-or error code */
-static
-ulint
-row_ins_index_entry_low(
-/*====================*/
+Tries to insert an entry into a clustered index, ignoring foreign key
+constraints. If a record with the same unique key is found, the other
+record is necessarily marked deleted by a committed transaction, or a
+unique key violation error occurs. The delete marked record is then
+updated to an existing record, and we must write an undo log record on
+the delete marked record.
+@retval DB_SUCCESS on success
+@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
+@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
+@return error code */
+UNIV_INTERN
+dberr_t
+row_ins_clust_index_entry_low(
+/*==========================*/
+ ulint flags, /*!< in: undo logging and locking flags */
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
depending on whether we wish optimistic or
pessimistic descent down the index tree */
- dict_index_t* index, /*!< in: index */
+ dict_index_t* index, /*!< in: clustered index */
+ ulint n_uniq, /*!< in: 0 or index->n_uniq */
dtuple_t* entry, /*!< in/out: index entry to insert */
ulint n_ext, /*!< in: number of externally stored columns */
que_thr_t* thr) /*!< in: query thread */
{
btr_cur_t cursor;
- ulint search_mode;
- ibool modify = FALSE;
- rec_t* insert_rec;
- rec_t* rec;
- ulint* offsets;
- ulint err;
- ulint n_unique;
- big_rec_t* big_rec = NULL;
+ ulint* offsets = NULL;
+ dberr_t err;
+ big_rec_t* big_rec = NULL;
mtr_t mtr;
- mem_heap_t* heap = NULL;
+ mem_heap_t* offsets_heap = NULL;
- log_free_check();
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(!dict_index_is_unique(index)
+ || n_uniq == dict_index_get_n_unique(index));
+ ut_ad(!n_uniq || n_uniq == dict_index_get_n_unique(index));
mtr_start(&mtr);
+ if (mode == BTR_MODIFY_LEAF && dict_index_is_online_ddl(index)) {
+ mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ }
+
cursor.thr = thr;
/* Note that we use PAGE_CUR_LE as the search mode, because then
the function will return in both low_match and up_match of the
cursor sensible values */
- if (dict_index_is_clust(index)) {
- search_mode = mode;
- } else if (!(thr_get_trx(thr)->check_unique_secondary)) {
- search_mode = mode | BTR_INSERT | BTR_IGNORE_SEC_UNIQUE;
+ btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE, mode,
+ &cursor, 0, __FILE__, __LINE__, &mtr);
+
+#ifdef UNIV_DEBUG
+ {
+ page_t* page = btr_cur_get_page(&cursor);
+ rec_t* first_rec = page_rec_get_next(
+ page_get_infimum_rec(page));
+
+ ut_ad(page_rec_is_supremum(first_rec)
+ || rec_get_n_fields(first_rec, index)
+ == dtuple_get_n_fields(entry));
+ }
+#endif
+
+ if (n_uniq && (cursor.up_match >= n_uniq
+ || cursor.low_match >= n_uniq)) {
+
+ if (flags
+ == (BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG)) {
+ /* Set no locks when applying log
+ in online table rebuild. Only check for duplicates. */
+ err = row_ins_duplicate_error_in_clust_online(
+ n_uniq, entry, &cursor,
+ &offsets, &offsets_heap);
+
+ switch (err) {
+ case DB_SUCCESS:
+ break;
+ default:
+ ut_ad(0);
+ /* fall through */
+ case DB_SUCCESS_LOCKED_REC:
+ case DB_DUPLICATE_KEY:
+ thr_get_trx(thr)->error_info = cursor.index;
+ }
+ } else {
+ /* Note that the following may return also
+ DB_LOCK_WAIT */
+
+ err = row_ins_duplicate_error_in_clust(
+ flags, &cursor, entry, thr, &mtr);
+ }
+
+ if (err != DB_SUCCESS) {
+err_exit:
+ mtr_commit(&mtr);
+ goto func_exit;
+ }
+ }
+
+ if (row_ins_must_modify_rec(&cursor)) {
+ /* There is already an index entry with a long enough common
+ prefix, we must convert the insert into a modify of an
+ existing record */
+ mem_heap_t* entry_heap = mem_heap_create(1024);
+
+ err = row_ins_clust_index_entry_by_modify(
+ flags, mode, &cursor, &offsets, &offsets_heap,
+ entry_heap, &big_rec, entry, thr, &mtr);
+
+ rec_t* rec = btr_cur_get_rec(&cursor);
+
+ if (big_rec) {
+ ut_a(err == DB_SUCCESS);
+ /* Write out the externally stored
+ columns while still x-latching
+ index->lock and block->lock. Allocate
+ pages for big_rec in the mtr that
+ modified the B-tree, but be sure to skip
+ any pages that were freed in mtr. We will
+ write out the big_rec pages before
+ committing the B-tree mini-transaction. If
+ the system crashes so that crash recovery
+ will not replay the mtr_commit(&mtr), the
+ big_rec pages will be left orphaned until
+ the pages are allocated for something else.
+
+ TODO: If the allocation extends the
+ tablespace, it will not be redo
+ logged, in either mini-transaction.
+ Tablespace extension should be
+ redo-logged in the big_rec
+ mini-transaction, so that recovery
+ will not fail when the big_rec was
+ written to the extended portion of the
+ file, in case the file was somehow
+ truncated in the crash. */
+
+ DEBUG_SYNC_C_IF_THD(
+ thr_get_trx(thr)->mysql_thd,
+ "before_row_ins_upd_extern");
+ err = btr_store_big_rec_extern_fields(
+ index, btr_cur_get_block(&cursor),
+ rec, offsets, big_rec, &mtr,
+ BTR_STORE_INSERT_UPDATE);
+ DEBUG_SYNC_C_IF_THD(
+ thr_get_trx(thr)->mysql_thd,
+ "after_row_ins_upd_extern");
+ /* If writing big_rec fails (for
+ example, because of DB_OUT_OF_FILE_SPACE),
+ the record will be corrupted. Even if
+ we did not update any externally
+ stored columns, our update could cause
+ the record to grow so that a
+ non-updated column was selected for
+ external storage. This non-update
+ would not have been written to the
+ undo log, and thus the record cannot
+ be rolled back.
+
+ However, because we have not executed
+ mtr_commit(mtr) yet, the update will
+ not be replayed in crash recovery, and
+ the following assertion failure will
+ effectively "roll back" the operation. */
+ ut_a(err == DB_SUCCESS);
+ dtuple_big_rec_free(big_rec);
+ }
+
+ if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) {
+ row_log_table_insert(rec, index, offsets);
+ }
+
+ mtr_commit(&mtr);
+ mem_heap_free(entry_heap);
} else {
- search_mode = mode | BTR_INSERT;
+ rec_t* insert_rec;
+
+ if (mode != BTR_MODIFY_TREE) {
+ ut_ad((mode & ~BTR_ALREADY_S_LATCHED)
+ == BTR_MODIFY_LEAF);
+ err = btr_cur_optimistic_insert(
+ flags, &cursor, &offsets, &offsets_heap,
+ entry, &insert_rec, &big_rec,
+ n_ext, thr, &mtr);
+ } else {
+ if (buf_LRU_buf_pool_running_out()) {
+
+ err = DB_LOCK_TABLE_FULL;
+ goto err_exit;
+ }
+
+ err = btr_cur_optimistic_insert(
+ flags, &cursor,
+ &offsets, &offsets_heap,
+ entry, &insert_rec, &big_rec,
+ n_ext, thr, &mtr);
+
+ if (err == DB_FAIL) {
+ err = btr_cur_pessimistic_insert(
+ flags, &cursor,
+ &offsets, &offsets_heap,
+ entry, &insert_rec, &big_rec,
+ n_ext, thr, &mtr);
+ }
+ }
+
+ if (UNIV_LIKELY_NULL(big_rec)) {
+ mtr_commit(&mtr);
+
+ /* Online table rebuild could read (and
+ ignore) the incomplete record at this point.
+ If online rebuild is in progress, the
+ row_ins_index_entry_big_rec() will write log. */
+
+ DBUG_EXECUTE_IF(
+ "row_ins_extern_checkpoint",
+ log_make_checkpoint_at(
+ IB_ULONGLONG_MAX, TRUE););
+ err = row_ins_index_entry_big_rec(
+ entry, big_rec, offsets, &offsets_heap, index,
+ thr_get_trx(thr)->mysql_thd,
+ __FILE__, __LINE__);
+ dtuple_convert_back_big_rec(index, entry, big_rec);
+ } else {
+ if (err == DB_SUCCESS
+ && dict_index_is_online_ddl(index)) {
+ row_log_table_insert(
+ insert_rec, index, offsets);
+ }
+
+ mtr_commit(&mtr);
+ }
+ }
+
+func_exit:
+ if (offsets_heap) {
+ mem_heap_free(offsets_heap);
+ }
+
+ return(err);
+}
+
+/***************************************************************//**
+Starts a mini-transaction and checks if the index will be dropped.
+@return true if the index is to be dropped */
+static __attribute__((nonnull, warn_unused_result))
+bool
+row_ins_sec_mtr_start_and_check_if_aborted(
+/*=======================================*/
+ mtr_t* mtr, /*!< out: mini-transaction */
+ dict_index_t* index, /*!< in/out: secondary index */
+ bool check, /*!< in: whether to check */
+ ulint search_mode)
+ /*!< in: flags */
+{
+ ut_ad(!dict_index_is_clust(index));
+
+ mtr_start(mtr);
+
+ if (!check) {
+ return(false);
+ }
+
+ if (search_mode & BTR_ALREADY_S_LATCHED) {
+ mtr_s_lock(dict_index_get_lock(index), mtr);
+ } else {
+ mtr_x_lock(dict_index_get_lock(index), mtr);
+ }
+
+ switch (index->online_status) {
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ ut_ad(*index->name == TEMP_INDEX_PREFIX);
+ return(true);
+ case ONLINE_INDEX_COMPLETE:
+ return(false);
+ case ONLINE_INDEX_CREATION:
+ break;
+ }
+
+ ut_error;
+ return(true);
+}
+
+/***************************************************************//**
+Tries to insert an entry into a secondary index. If a record with exactly the
+same fields is found, the other record is necessarily marked deleted.
+It is then unmarked. Otherwise, the entry is just inserted to the index.
+@retval DB_SUCCESS on success
+@retval DB_LOCK_WAIT on lock wait when !(flags & BTR_NO_LOCKING_FLAG)
+@retval DB_FAIL if retry with BTR_MODIFY_TREE is needed
+@return error code */
+UNIV_INTERN
+dberr_t
+row_ins_sec_index_entry_low(
+/*========================*/
+ ulint flags, /*!< in: undo logging and locking flags */
+ ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
+ depending on whether we wish optimistic or
+ pessimistic descent down the index tree */
+ dict_index_t* index, /*!< in: secondary index */
+ mem_heap_t* offsets_heap,
+ /*!< in/out: memory heap that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ dtuple_t* entry, /*!< in/out: index entry to insert */
+ trx_id_t trx_id, /*!< in: PAGE_MAX_TRX_ID during
+ row_log_table_apply(), or 0 */
+ que_thr_t* thr) /*!< in: query thread */
+{
+ btr_cur_t cursor;
+ ulint search_mode = mode | BTR_INSERT;
+ dberr_t err = DB_SUCCESS;
+ ulint n_unique;
+ mtr_t mtr;
+ ulint* offsets = NULL;
+
+ ut_ad(!dict_index_is_clust(index));
+ ut_ad(mode == BTR_MODIFY_LEAF || mode == BTR_MODIFY_TREE);
+
+ cursor.thr = thr;
+ ut_ad(thr_get_trx(thr)->id);
+ mtr_start(&mtr);
+
+ /* Ensure that we acquire index->lock when inserting into an
+ index with index->online_status == ONLINE_INDEX_COMPLETE, but
+ could still be subject to rollback_inplace_alter_table().
+ This prevents a concurrent change of index->online_status.
+ The memory object cannot be freed as long as we have an open
+ reference to the table, or index->table->n_ref_count > 0. */
+ const bool check = *index->name == TEMP_INDEX_PREFIX;
+ if (check) {
+ DEBUG_SYNC_C("row_ins_sec_index_enter");
+ if (mode == BTR_MODIFY_LEAF) {
+ search_mode |= BTR_ALREADY_S_LATCHED;
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ } else {
+ mtr_x_lock(dict_index_get_lock(index), &mtr);
+ }
+
+ if (row_log_online_op_try(
+ index, entry, thr_get_trx(thr)->id)) {
+ goto func_exit;
+ }
+ }
+
+ /* Note that we use PAGE_CUR_LE as the search mode, because then
+ the function will return in both low_match and up_match of the
+ cursor sensible values */
+
+ if (!thr_get_trx(thr)->check_unique_secondary) {
+ search_mode |= BTR_IGNORE_SEC_UNIQUE;
}
btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
@@ -2151,13 +2625,8 @@ row_ins_index_entry_low(
&cursor, 0, __FILE__, __LINE__, &mtr);
if (cursor.flag == BTR_CUR_INSERT_TO_IBUF) {
- /* The insertion was made to the insert buffer already during
- the search: we are done */
-
- ut_ad(search_mode & BTR_INSERT);
- err = DB_SUCCESS;
-
- goto function_exit;
+ /* The insert was buffered during the search: we are done */
+ goto func_exit;
}
#ifdef UNIV_DEBUG
@@ -2174,213 +2643,250 @@ row_ins_index_entry_low(
n_unique = dict_index_get_n_unique(index);
- if (dict_index_is_unique(index) && (cursor.up_match >= n_unique
- || cursor.low_match >= n_unique)) {
+ if (dict_index_is_unique(index)
+ && (cursor.low_match >= n_unique || cursor.up_match >= n_unique)) {
+ mtr_commit(&mtr);
+
+ DEBUG_SYNC_C("row_ins_sec_index_unique");
- if (dict_index_is_clust(index)) {
- /* Note that the following may return also
- DB_LOCK_WAIT */
+ if (row_ins_sec_mtr_start_and_check_if_aborted(
+ &mtr, index, check, search_mode)) {
+ goto func_exit;
+ }
- err = row_ins_duplicate_error_in_clust(
- &cursor, entry, thr, &mtr);
- if (err != DB_SUCCESS) {
+ err = row_ins_scan_sec_index_for_duplicate(
+ flags, index, entry, thr, check, &mtr, offsets_heap);
- goto function_exit;
- }
- } else {
- mtr_commit(&mtr);
- err = row_ins_scan_sec_index_for_duplicate(
- index, entry, thr);
- mtr_start(&mtr);
+ mtr_commit(&mtr);
- if (err != DB_SUCCESS) {
- goto function_exit;
+ switch (err) {
+ case DB_SUCCESS:
+ break;
+ case DB_DUPLICATE_KEY:
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ ut_ad(!thr_get_trx(thr)
+ ->dict_operation_lock_mode);
+ mutex_enter(&dict_sys->mutex);
+ dict_set_corrupted_index_cache_only(
+ index, index->table);
+ mutex_exit(&dict_sys->mutex);
+ /* Do not return any error to the
+ caller. The duplicate will be reported
+ by ALTER TABLE or CREATE UNIQUE INDEX.
+ Unfortunately we cannot report the
+ duplicate key value to the DDL thread,
+ because the altered_table object is
+ private to its call stack. */
+ err = DB_SUCCESS;
}
+ /* fall through */
+ default:
+ return(err);
+ }
- /* We did not find a duplicate and we have now
- locked with s-locks the necessary records to
- prevent any insertion of a duplicate by another
- transaction. Let us now reposition the cursor and
- continue the insertion. */
-
- btr_cur_search_to_nth_level(index, 0, entry,
- PAGE_CUR_LE,
- mode | BTR_INSERT,
- &cursor, 0,
- __FILE__, __LINE__, &mtr);
+ if (row_ins_sec_mtr_start_and_check_if_aborted(
+ &mtr, index, check, search_mode)) {
+ goto func_exit;
}
- }
- modify = row_ins_must_modify_rec(&cursor);
+ /* We did not find a duplicate and we have now
+ locked with s-locks the necessary records to
+ prevent any insertion of a duplicate by another
+ transaction. Let us now reposition the cursor and
+ continue the insertion. */
- if (modify) {
+ btr_cur_search_to_nth_level(
+ index, 0, entry, PAGE_CUR_LE,
+ search_mode & ~(BTR_INSERT | BTR_IGNORE_SEC_UNIQUE),
+ &cursor, 0, __FILE__, __LINE__, &mtr);
+ }
+
+ if (row_ins_must_modify_rec(&cursor)) {
/* There is already an index entry with a long enough common
prefix, we must convert the insert into a modify of an
existing record */
+ offsets = rec_get_offsets(
+ btr_cur_get_rec(&cursor), index, offsets,
+ ULINT_UNDEFINED, &offsets_heap);
- if (dict_index_is_clust(index)) {
- err = row_ins_clust_index_entry_by_modify(
- mode, &cursor, &heap, &big_rec, entry,
- thr, &mtr);
-
- if (big_rec) {
- ut_a(err == DB_SUCCESS);
- /* Write out the externally stored
- columns while still x-latching
- index->lock and block->lock. Allocate
- pages for big_rec in the mtr that
- modified the B-tree, but be sure to skip
- any pages that were freed in mtr. We will
- write out the big_rec pages before
- committing the B-tree mini-transaction. If
- the system crashes so that crash recovery
- will not replay the mtr_commit(&mtr), the
- big_rec pages will be left orphaned until
- the pages are allocated for something else.
-
- TODO: If the allocation extends the
- tablespace, it will not be redo
- logged, in either mini-transaction.
- Tablespace extension should be
- redo-logged in the big_rec
- mini-transaction, so that recovery
- will not fail when the big_rec was
- written to the extended portion of the
- file, in case the file was somehow
- truncated in the crash. */
-
- rec = btr_cur_get_rec(&cursor);
- offsets = rec_get_offsets(
- rec, index, NULL,
- ULINT_UNDEFINED, &heap);
-
- DEBUG_SYNC_C_IF_THD((THD*)
- thr_get_trx(thr)->mysql_thd,
- "before_row_ins_upd_extern");
- err = btr_store_big_rec_extern_fields(
- index, btr_cur_get_block(&cursor),
- rec, offsets, big_rec, &mtr,
- BTR_STORE_INSERT_UPDATE);
- DEBUG_SYNC_C_IF_THD((THD*)
- thr_get_trx(thr)->mysql_thd,
- "after_row_ins_upd_extern");
- /* If writing big_rec fails (for
- example, because of DB_OUT_OF_FILE_SPACE),
- the record will be corrupted. Even if
- we did not update any externally
- stored columns, our update could cause
- the record to grow so that a
- non-updated column was selected for
- external storage. This non-update
- would not have been written to the
- undo log, and thus the record cannot
- be rolled back.
-
- However, because we have not executed
- mtr_commit(mtr) yet, the update will
- not be replayed in crash recovery, and
- the following assertion failure will
- effectively "roll back" the operation. */
- ut_a(err == DB_SUCCESS);
- goto stored_big_rec;
- }
- } else {
- ut_ad(!n_ext);
- err = row_ins_sec_index_entry_by_modify(
- mode, &cursor, entry, thr, &mtr);
- }
+ err = row_ins_sec_index_entry_by_modify(
+ flags, mode, &cursor, &offsets,
+ offsets_heap, heap, entry, thr, &mtr);
} else {
+ rec_t* insert_rec;
+ big_rec_t* big_rec;
+
if (mode == BTR_MODIFY_LEAF) {
err = btr_cur_optimistic_insert(
- 0, &cursor, entry, &insert_rec, &big_rec,
- n_ext, thr, &mtr);
+ flags, &cursor, &offsets, &offsets_heap,
+ entry, &insert_rec,
+ &big_rec, 0, thr, &mtr);
} else {
- ut_a(mode == BTR_MODIFY_TREE);
+ ut_ad(mode == BTR_MODIFY_TREE);
if (buf_LRU_buf_pool_running_out()) {
err = DB_LOCK_TABLE_FULL;
-
- goto function_exit;
+ goto func_exit;
}
err = btr_cur_optimistic_insert(
- 0, &cursor, entry, &insert_rec, &big_rec,
- n_ext, thr, &mtr);
-
+ flags, &cursor,
+ &offsets, &offsets_heap,
+ entry, &insert_rec,
+ &big_rec, 0, thr, &mtr);
if (err == DB_FAIL) {
err = btr_cur_pessimistic_insert(
- 0, &cursor, entry, &insert_rec,
- &big_rec, n_ext, thr, &mtr);
+ flags, &cursor,
+ &offsets, &offsets_heap,
+ entry, &insert_rec,
+ &big_rec, 0, thr, &mtr);
}
}
+
+ if (err == DB_SUCCESS && trx_id) {
+ page_update_max_trx_id(
+ btr_cur_get_block(&cursor),
+ btr_cur_get_page_zip(&cursor),
+ trx_id, &mtr);
+ }
+
+ ut_ad(!big_rec);
}
-function_exit:
+func_exit:
mtr_commit(&mtr);
+ return(err);
+}
- if (UNIV_LIKELY_NULL(big_rec)) {
- DBUG_EXECUTE_IF(
- "row_ins_extern_checkpoint",
- log_make_checkpoint_at(IB_ULONGLONG_MAX, TRUE););
-
- mtr_start(&mtr);
-
- DEBUG_SYNC_C_IF_THD((THD*)
- thr_get_trx(thr)->mysql_thd,
- "before_row_ins_extern_latch");
- btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
- BTR_MODIFY_TREE, &cursor, 0,
- __FILE__, __LINE__, &mtr);
- rec = btr_cur_get_rec(&cursor);
- offsets = rec_get_offsets(rec, index, NULL,
- ULINT_UNDEFINED, &heap);
-
- DEBUG_SYNC_C_IF_THD((THD*)
- thr_get_trx(thr)->mysql_thd,
- "before_row_ins_extern");
- err = btr_store_big_rec_extern_fields(
- index, btr_cur_get_block(&cursor),
- rec, offsets, big_rec, &mtr, BTR_STORE_INSERT);
- DEBUG_SYNC_C_IF_THD((THD*)
- thr_get_trx(thr)->mysql_thd,
- "after_row_ins_extern");
-
-stored_big_rec:
- if (modify) {
- dtuple_big_rec_free(big_rec);
- } else {
- dtuple_convert_back_big_rec(index, entry, big_rec);
+/***************************************************************//**
+Tries to insert the externally stored fields (off-page columns)
+of a clustered index entry.
+@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
+UNIV_INTERN
+dberr_t
+row_ins_index_entry_big_rec_func(
+/*=============================*/
+ const dtuple_t* entry, /*!< in/out: index entry to insert */
+ const big_rec_t* big_rec,/*!< in: externally stored fields */
+ ulint* offsets,/*!< in/out: rec offsets */
+ mem_heap_t** heap, /*!< in/out: memory heap */
+ dict_index_t* index, /*!< in: index */
+ const char* file, /*!< in: file name of caller */
+#ifndef DBUG_OFF
+ const void* thd, /*!< in: connection, or NULL */
+#endif /* DBUG_OFF */
+ ulint line) /*!< in: line number of caller */
+{
+ mtr_t mtr;
+ btr_cur_t cursor;
+ rec_t* rec;
+ dberr_t error;
+
+ ut_ad(dict_index_is_clust(index));
+
+ DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern_latch");
+
+ mtr_start(&mtr);
+ btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
+ BTR_MODIFY_TREE, &cursor, 0,
+ file, line, &mtr);
+ rec = btr_cur_get_rec(&cursor);
+ offsets = rec_get_offsets(rec, index, offsets,
+ ULINT_UNDEFINED, heap);
+
+ DEBUG_SYNC_C_IF_THD(thd, "before_row_ins_extern");
+ error = btr_store_big_rec_extern_fields(
+ index, btr_cur_get_block(&cursor),
+ rec, offsets, big_rec, &mtr, BTR_STORE_INSERT);
+ DEBUG_SYNC_C_IF_THD(thd, "after_row_ins_extern");
+
+ if (error == DB_SUCCESS
+ && dict_index_is_online_ddl(index)) {
+ row_log_table_insert(rec, index, offsets);
+ }
+
+ mtr_commit(&mtr);
+
+ return(error);
+}
+
+/***************************************************************//**
+Inserts an entry into a clustered index. Tries first optimistic,
+then pessimistic descent down the tree. If the entry matches enough
+to a delete marked record, performs the insert by updating or delete
+unmarking the delete marked record.
+@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
+UNIV_INTERN
+dberr_t
+row_ins_clust_index_entry(
+/*======================*/
+ dict_index_t* index, /*!< in: clustered index */
+ dtuple_t* entry, /*!< in/out: index entry to insert */
+ que_thr_t* thr, /*!< in: query thread */
+ ulint n_ext) /*!< in: number of externally stored columns */
+{
+ dberr_t err;
+ ulint n_uniq;
+
+ if (UT_LIST_GET_FIRST(index->table->foreign_list)) {
+ err = row_ins_check_foreign_constraints(
+ index->table, index, entry, thr);
+ if (err != DB_SUCCESS) {
+
+ return(err);
}
+ }
- mtr_commit(&mtr);
+ n_uniq = dict_index_is_unique(index) ? index->n_uniq : 0;
+
+ /* Try first optimistic descent to the B-tree */
+
+ log_free_check();
+
+ err = row_ins_clust_index_entry_low(
+ 0, BTR_MODIFY_LEAF, index, n_uniq, entry, n_ext, thr);
+
+#ifdef UNIV_DEBUG
+ /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
+ Once it is fixed, remove the 'ifdef', 'if' and this comment. */
+ if (!thr_get_trx(thr)->ddl) {
+ DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
+ "after_row_ins_clust_index_entry_leaf");
}
+#endif /* UNIV_DEBUG */
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
+ if (err != DB_FAIL) {
+ DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
+ return(err);
}
- return(err);
+
+ /* Try then pessimistic descent to the B-tree */
+
+ log_free_check();
+
+ return(row_ins_clust_index_entry_low(
+ 0, BTR_MODIFY_TREE, index, n_uniq, entry, n_ext, thr));
}
/***************************************************************//**
-Inserts an index entry to index. Tries first optimistic, then pessimistic
-descent down the tree. If the entry matches enough to a delete marked record,
-performs the insert by updating or delete unmarking the delete marked
-record.
+Inserts an entry into a secondary index. Tries first optimistic,
+then pessimistic descent down the tree. If the entry matches enough
+to a delete marked record, performs the insert by updating or delete
+unmarking the delete marked record.
@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
UNIV_INTERN
-ulint
-row_ins_index_entry(
-/*================*/
- dict_index_t* index, /*!< in: index */
+dberr_t
+row_ins_sec_index_entry(
+/*====================*/
+ dict_index_t* index, /*!< in: secondary index */
dtuple_t* entry, /*!< in/out: index entry to insert */
- ulint n_ext, /*!< in: number of externally stored columns */
- ibool foreign,/*!< in: TRUE=check foreign key constraints
- (foreign=FALSE only during CREATE INDEX) */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
+ dberr_t err;
+ mem_heap_t* offsets_heap;
+ mem_heap_t* heap;
- if (foreign && UT_LIST_GET_FIRST(index->table->foreign_list)) {
+ if (UT_LIST_GET_FIRST(index->table->foreign_list)) {
err = row_ins_check_foreign_constraints(index->table, index,
entry, thr);
if (err != DB_SUCCESS) {
@@ -2389,29 +2895,59 @@ row_ins_index_entry(
}
}
+ ut_ad(thr_get_trx(thr)->id);
+
+ offsets_heap = mem_heap_create(1024);
+ heap = mem_heap_create(1024);
+
/* Try first optimistic descent to the B-tree */
- err = row_ins_index_entry_low(BTR_MODIFY_LEAF, index, entry,
- n_ext, thr);
- if (err != DB_FAIL) {
- if (index == dict_table_get_first_index(index->table)
- && thr_get_trx(thr)->mysql_thd != 0) {
- DEBUG_SYNC_C("row_ins_clust_index_entry_leaf_after");
- }
- return(err);
- }
+ log_free_check();
- /* Try then pessimistic descent to the B-tree */
+ err = row_ins_sec_index_entry_low(
+ 0, BTR_MODIFY_LEAF, index, offsets_heap, heap, entry, 0, thr);
+ if (err == DB_FAIL) {
+ mem_heap_empty(heap);
- err = row_ins_index_entry_low(BTR_MODIFY_TREE, index, entry,
- n_ext, thr);
+ /* Try then pessimistic descent to the B-tree */
+
+ log_free_check();
+
+ err = row_ins_sec_index_entry_low(
+ 0, BTR_MODIFY_TREE, index,
+ offsets_heap, heap, entry, 0, thr);
+ }
+
+ mem_heap_free(heap);
+ mem_heap_free(offsets_heap);
return(err);
}
+/***************************************************************//**
+Inserts an index entry to index. Tries first optimistic, then pessimistic
+descent down the tree. If the entry matches enough to a delete marked record,
+performs the insert by updating or delete unmarking the delete marked
+record.
+@return DB_SUCCESS, DB_LOCK_WAIT, DB_DUPLICATE_KEY, or some other error code */
+static
+dberr_t
+row_ins_index_entry(
+/*================*/
+ dict_index_t* index, /*!< in: index */
+ dtuple_t* entry, /*!< in/out: index entry to insert */
+ que_thr_t* thr) /*!< in: query thread */
+{
+ if (dict_index_is_clust(index)) {
+ return(row_ins_clust_index_entry(index, entry, thr, 0));
+ } else {
+ return(row_ins_sec_index_entry(index, entry, thr));
+ }
+}
+
/***********************************************************//**
Sets the values of the dtuple fields in entry from the values of appropriate
columns in row. */
-static
+static __attribute__((nonnull))
void
row_ins_index_entry_set_vals(
/*=========================*/
@@ -2422,8 +2958,6 @@ row_ins_index_entry_set_vals(
ulint n_fields;
ulint i;
- ut_ad(entry && row);
-
n_fields = dtuple_get_n_fields(entry);
for (i = 0; i < n_fields; i++) {
@@ -2466,14 +3000,14 @@ row_ins_index_entry_set_vals(
Inserts a single index entry to the table.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins_index_entry_step(
/*=====================*/
ins_node_t* node, /*!< in: row insert node */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
+ dberr_t err;
ut_ad(dtuple_check_typed(node->row));
@@ -2481,7 +3015,16 @@ row_ins_index_entry_step(
ut_ad(dtuple_check_typed(node->entry));
- err = row_ins_index_entry(node->index, node->entry, 0, TRUE, thr);
+ err = row_ins_index_entry(node->index, node->entry, thr);
+
+#ifdef UNIV_DEBUG
+ /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
+ Once it is fixed, remove the 'ifdef', 'if' and this comment. */
+ if (!thr_get_trx(thr)->ddl) {
+ DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
+ "after_row_ins_index_entry_step");
+ }
+#endif /* UNIV_DEBUG */
return(err);
}
@@ -2580,16 +3123,14 @@ row_ins_get_row_from_select(
Inserts a row to a table.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_ins(
/*====*/
ins_node_t* node, /*!< in: row insert node */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
-
- ut_ad(node && thr);
+ dberr_t err;
if (node->state == INS_NODE_ALLOC_ROW_ID) {
@@ -2625,6 +3166,10 @@ row_ins(
node->index = dict_table_get_next_index(node->index);
node->entry = UT_LIST_GET_NEXT(tuple_list, node->entry);
+ DBUG_EXECUTE_IF(
+ "row_ins_skip_sec",
+ node->index = NULL; node->entry = NULL; break;);
+
/* Skip corrupted secondary index and its entry */
while (node->index && dict_index_is_corrupted(node->index)) {
@@ -2654,7 +3199,7 @@ row_ins_step(
que_node_t* parent;
sel_node_t* sel_node;
trx_t* trx;
- ulint err;
+ dberr_t err;
ut_ad(thr);
@@ -2687,6 +3232,8 @@ row_ins_step(
if (node->state == INS_NODE_SET_IX_LOCK) {
+ node->state = INS_NODE_ALLOC_ROW_ID;
+
/* It may be that the current session has not yet started
its transaction, or it has been committed: */
@@ -2698,6 +3245,9 @@ row_ins_step(
err = lock_table(0, node->table, LOCK_IX, thr);
+ DBUG_EXECUTE_IF("ib_row_ins_ix_lock_wait",
+ err = DB_LOCK_WAIT;);
+
if (err != DB_SUCCESS) {
goto error_handling;
@@ -2705,8 +3255,6 @@ row_ins_step(
node->trx_id = trx->id;
same_trx:
- node->state = INS_NODE_ALLOC_ROW_ID;
-
if (node->ins_type == INS_SEARCHED) {
/* Reset the cursor */
sel_node->state = SEL_NODE_OPEN;
@@ -2735,7 +3283,7 @@ same_trx:
err = row_ins(node, thr);
error_handling:
- trx->error_state = static_cast<enum db_err>(err);
+ trx->error_state = err;
if (err != DB_SUCCESS) {
/* err == DB_LOCK_WAIT or SQL error detected */
diff --git a/storage/innobase/row/row0log.cc b/storage/innobase/row/row0log.cc
new file mode 100644
index 00000000000..b373b70ab7a
--- /dev/null
+++ b/storage/innobase/row/row0log.cc
@@ -0,0 +1,3219 @@
+/*****************************************************************************
+
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file row/row0log.cc
+Modification log for online index creation and online table rebuild
+
+Created 2011-05-26 Marko Makela
+*******************************************************/
+
+#include "row0log.h"
+
+#ifdef UNIV_NONINL
+#include "row0log.ic"
+#endif
+
+#include "row0row.h"
+#include "row0ins.h"
+#include "row0upd.h"
+#include "row0merge.h"
+#include "row0ext.h"
+#include "data0data.h"
+#include "que0que.h"
+#include "handler0alter.h"
+
+#include<set>
+
+/** Table row modification operations during online table rebuild.
+Delete-marked records are not copied to the rebuilt table. */
+enum row_tab_op {
+ /** Insert a record */
+ ROW_T_INSERT = 0x41,
+ /** Update a record in place */
+ ROW_T_UPDATE,
+ /** Delete (purge) a record */
+ ROW_T_DELETE
+};
+
+/** Index record modification operations during online index creation */
+enum row_op {
+ /** Insert a record */
+ ROW_OP_INSERT = 0x61,
+ /** Delete a record */
+ ROW_OP_DELETE
+};
+
+#ifdef UNIV_DEBUG
+/** Write information about the applied record to the error log */
+# define ROW_LOG_APPLY_PRINT
+#endif /* UNIV_DEBUG */
+
+#ifdef ROW_LOG_APPLY_PRINT
+/** When set, write information about the applied record to the error log */
+static bool row_log_apply_print;
+#endif /* ROW_LOG_APPLY_PRINT */
+
+/** Size of the modification log entry header, in bytes */
+#define ROW_LOG_HEADER_SIZE 2/*op, extra_size*/
+
+/** Log block for modifications during online index creation */
+struct row_log_buf_t {
+ byte* block; /*!< file block buffer */
+ mrec_buf_t buf; /*!< buffer for accessing a record
+ that spans two blocks */
+ ulint blocks; /*!< current position in blocks */
+ ulint bytes; /*!< current position within buf */
+};
+
+/** Set of transactions that rolled back inserts of BLOBs during
+online table rebuild */
+typedef std::set<trx_id_t> trx_id_set;
+
+/** @brief Buffer for logging modifications during online index creation
+
+All modifications to an index that is being created will be logged by
+row_log_online_op() to this buffer.
+
+All modifications to a table that is being rebuilt will be logged by
+row_log_table_delete(), row_log_table_update(), row_log_table_insert()
+to this buffer.
+
+When head.blocks == tail.blocks, the reader will access tail.block
+directly. When also head.bytes == tail.bytes, both counts will be
+reset to 0 and the file will be truncated. */
+struct row_log_t {
+ int fd; /*!< file descriptor */
+ ib_mutex_t mutex; /*!< mutex protecting trx_log, error,
+ max_trx and tail */
+ trx_id_set* trx_rb; /*!< set of transactions that rolled back
+ inserts of BLOBs during online table rebuild;
+ protected by mutex */
+ dict_table_t* table; /*!< table that is being rebuilt,
+ or NULL when this is a secondary
+ index that is being created online */
+ bool same_pk;/*!< whether the definition of the PRIMARY KEY
+ has remained the same */
+ const dtuple_t* add_cols;
+ /*!< default values of added columns, or NULL */
+ const ulint* col_map;/*!< mapping of old column numbers to
+ new ones, or NULL if !table */
+ dberr_t error; /*!< error that occurred during online
+ table rebuild */
+ trx_id_t max_trx;/*!< biggest observed trx_id in
+ row_log_online_op();
+ protected by mutex and index->lock S-latch,
+ or by index->lock X-latch only */
+ row_log_buf_t tail; /*!< writer context;
+ protected by mutex and index->lock S-latch,
+ or by index->lock X-latch only */
+ row_log_buf_t head; /*!< reader context; protected by MDL only;
+ modifiable by row_log_apply_ops() */
+ ulint size; /*!< allocated size */
+};
+
+/******************************************************//**
+Logs an operation to a secondary index that is (or was) being created. */
+UNIV_INTERN
+void
+row_log_online_op(
+/*==============*/
+ dict_index_t* index, /*!< in/out: index, S or X latched */
+ const dtuple_t* tuple, /*!< in: index tuple */
+ trx_id_t trx_id) /*!< in: transaction ID for insert,
+ or 0 for delete */
+{
+ byte* b;
+ ulint extra_size;
+ ulint size;
+ ulint mrec_size;
+ ulint avail_size;
+ row_log_t* log;
+
+ ut_ad(dtuple_validate(tuple));
+ ut_ad(dtuple_get_n_fields(tuple) == dict_index_get_n_fields(index));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_SHARED)
+ || rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ if (dict_index_is_corrupted(index)) {
+ return;
+ }
+
+ ut_ad(dict_index_is_online_ddl(index));
+
+ /* Compute the size of the record. This differs from
+ row_merge_buf_encode(), because here we do not encode
+ extra_size+1 (and reserve 0 as the end-of-chunk marker). */
+
+ size = rec_get_converted_size_temp(
+ index, tuple->fields, tuple->n_fields, &extra_size);
+ ut_ad(size >= extra_size);
+ ut_ad(size <= sizeof log->tail.buf);
+
+ mrec_size = ROW_LOG_HEADER_SIZE
+ + (extra_size >= 0x80) + size
+ + (trx_id ? DATA_TRX_ID_LEN : 0);
+
+ log = index->online_log;
+ mutex_enter(&log->mutex);
+
+ if (trx_id > log->max_trx) {
+ log->max_trx = trx_id;
+ }
+
+ UNIV_MEM_INVALID(log->tail.buf, sizeof log->tail.buf);
+
+ ut_ad(log->tail.bytes < srv_sort_buf_size);
+ avail_size = srv_sort_buf_size - log->tail.bytes;
+
+ if (mrec_size > avail_size) {
+ b = log->tail.buf;
+ } else {
+ b = log->tail.block + log->tail.bytes;
+ }
+
+ if (trx_id != 0) {
+ *b++ = ROW_OP_INSERT;
+ trx_write_trx_id(b, trx_id);
+ b += DATA_TRX_ID_LEN;
+ } else {
+ *b++ = ROW_OP_DELETE;
+ }
+
+ if (extra_size < 0x80) {
+ *b++ = (byte) extra_size;
+ } else {
+ ut_ad(extra_size < 0x8000);
+ *b++ = (byte) (0x80 | (extra_size >> 8));
+ *b++ = (byte) extra_size;
+ }
+
+ rec_convert_dtuple_to_temp(
+ b + extra_size, index, tuple->fields, tuple->n_fields);
+ b += size;
+
+ if (mrec_size >= avail_size) {
+ const os_offset_t byte_offset
+ = (os_offset_t) log->tail.blocks
+ * srv_sort_buf_size;
+ ibool ret;
+
+ if (byte_offset + srv_sort_buf_size >= srv_online_max_size) {
+ goto write_failed;
+ }
+
+ if (mrec_size == avail_size) {
+ ut_ad(b == &log->tail.block[srv_sort_buf_size]);
+ } else {
+ ut_ad(b == log->tail.buf + mrec_size);
+ memcpy(log->tail.block + log->tail.bytes,
+ log->tail.buf, avail_size);
+ }
+ UNIV_MEM_ASSERT_RW(log->tail.block, srv_sort_buf_size);
+ ret = os_file_write(
+ "(modification log)",
+ OS_FILE_FROM_FD(log->fd),
+ log->tail.block, byte_offset, srv_sort_buf_size);
+ log->tail.blocks++;
+ if (!ret) {
+write_failed:
+ /* We set the flag directly instead of invoking
+ dict_set_corrupted_index_cache_only(index) here,
+ because the index is not "public" yet. */
+ index->type |= DICT_CORRUPT;
+ }
+ UNIV_MEM_INVALID(log->tail.block, srv_sort_buf_size);
+ memcpy(log->tail.block, log->tail.buf + avail_size,
+ mrec_size - avail_size);
+ log->tail.bytes = mrec_size - avail_size;
+ } else {
+ log->tail.bytes += mrec_size;
+ ut_ad(b == log->tail.block + log->tail.bytes);
+ }
+
+ UNIV_MEM_INVALID(log->tail.buf, sizeof log->tail.buf);
+ mutex_exit(&log->mutex);
+}
+
+/******************************************************//**
+Gets the error status of the online index rebuild log.
+@return DB_SUCCESS or error code */
+UNIV_INTERN
+dberr_t
+row_log_table_get_error(
+/*====================*/
+ const dict_index_t* index) /*!< in: clustered index of a table
+ that is being rebuilt online */
+{
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(dict_index_is_online_ddl(index));
+ return(index->online_log->error);
+}
+
+/******************************************************//**
+Starts logging an operation to a table that is being rebuilt.
+@return pointer to log, or NULL if no logging is necessary */
+static __attribute__((nonnull, warn_unused_result))
+byte*
+row_log_table_open(
+/*===============*/
+ row_log_t* log, /*!< in/out: online rebuild log */
+ ulint size, /*!< in: size of log record */
+ ulint* avail) /*!< out: available size for log record */
+{
+ mutex_enter(&log->mutex);
+
+ UNIV_MEM_INVALID(log->tail.buf, sizeof log->tail.buf);
+
+ if (log->error != DB_SUCCESS) {
+ mutex_exit(&log->mutex);
+ return(NULL);
+ }
+
+ ut_ad(log->tail.bytes < srv_sort_buf_size);
+ *avail = srv_sort_buf_size - log->tail.bytes;
+
+ if (size > *avail) {
+ return(log->tail.buf);
+ } else {
+ return(log->tail.block + log->tail.bytes);
+ }
+}
+
+/******************************************************//**
+Stops logging an operation to a table that is being rebuilt. */
+static __attribute__((nonnull))
+void
+row_log_table_close_func(
+/*=====================*/
+ row_log_t* log, /*!< in/out: online rebuild log */
+#ifdef UNIV_DEBUG
+ const byte* b, /*!< in: end of log record */
+#endif /* UNIV_DEBUG */
+ ulint size, /*!< in: size of log record */
+ ulint avail) /*!< in: available size for log record */
+{
+ ut_ad(mutex_own(&log->mutex));
+
+ if (size >= avail) {
+ const os_offset_t byte_offset
+ = (os_offset_t) log->tail.blocks
+ * srv_sort_buf_size;
+ ibool ret;
+
+ if (byte_offset + srv_sort_buf_size >= srv_online_max_size) {
+ goto write_failed;
+ }
+
+ if (size == avail) {
+ ut_ad(b == &log->tail.block[srv_sort_buf_size]);
+ } else {
+ ut_ad(b == log->tail.buf + size);
+ memcpy(log->tail.block + log->tail.bytes,
+ log->tail.buf, avail);
+ }
+ UNIV_MEM_ASSERT_RW(log->tail.block, srv_sort_buf_size);
+ ret = os_file_write(
+ "(modification log)",
+ OS_FILE_FROM_FD(log->fd),
+ log->tail.block, byte_offset, srv_sort_buf_size);
+ log->tail.blocks++;
+ if (!ret) {
+write_failed:
+ log->error = DB_ONLINE_LOG_TOO_BIG;
+ }
+ UNIV_MEM_INVALID(log->tail.block, srv_sort_buf_size);
+ memcpy(log->tail.block, log->tail.buf + avail, size - avail);
+ log->tail.bytes = size - avail;
+ } else {
+ log->tail.bytes += size;
+ ut_ad(b == log->tail.block + log->tail.bytes);
+ }
+
+ UNIV_MEM_INVALID(log->tail.buf, sizeof log->tail.buf);
+ mutex_exit(&log->mutex);
+}
+
+#ifdef UNIV_DEBUG
+# define row_log_table_close(log, b, size, avail) \
+ row_log_table_close_func(log, b, size, avail)
+#else /* UNIV_DEBUG */
+# define row_log_table_close(log, b, size, avail) \
+ row_log_table_close_func(log, size, avail)
+#endif /* UNIV_DEBUG */
+
+/******************************************************//**
+Logs a delete operation to a table that is being rebuilt.
+This will be merged in row_log_table_apply_delete(). */
+UNIV_INTERN
+void
+row_log_table_delete(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
+ trx_id_t trx_id) /*!< in: DB_TRX_ID of the record before
+ it was deleted */
+{
+ ulint old_pk_extra_size;
+ ulint old_pk_size;
+ ulint ext_size = 0;
+ ulint mrec_size;
+ ulint avail_size;
+ mem_heap_t* heap = NULL;
+ const dtuple_t* old_pk;
+ row_ext_t* ext;
+
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(rec_offs_validate(rec, index, offsets));
+ ut_ad(rec_offs_n_fields(offsets) == dict_index_get_n_fields(index));
+ ut_ad(rec_offs_size(offsets) <= sizeof index->online_log->tail.buf);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&index->lock, RW_LOCK_SHARED)
+ || rw_lock_own(&index->lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ if (dict_index_is_corrupted(index)
+ || !dict_index_is_online_ddl(index)
+ || index->online_log->error != DB_SUCCESS) {
+ return;
+ }
+
+ dict_table_t* new_table = index->online_log->table;
+ dict_index_t* new_index = dict_table_get_first_index(new_table);
+
+ ut_ad(dict_index_is_clust(new_index));
+ ut_ad(!dict_index_is_online_ddl(new_index));
+
+ /* Create the tuple PRIMARY KEY, DB_TRX_ID in the new_table. */
+ if (index->online_log->same_pk) {
+ byte* db_trx_id;
+ dtuple_t* tuple;
+ ut_ad(new_index->n_uniq == index->n_uniq);
+
+ /* The PRIMARY KEY and DB_TRX_ID are in the first
+ fields of the record. */
+ heap = mem_heap_create(
+ DATA_TRX_ID_LEN
+ + DTUPLE_EST_ALLOC(new_index->n_uniq + 1));
+ old_pk = tuple = dtuple_create(heap, new_index->n_uniq + 1);
+ dict_index_copy_types(tuple, new_index, tuple->n_fields);
+ dtuple_set_n_fields_cmp(tuple, new_index->n_uniq);
+
+ for (ulint i = 0; i < new_index->n_uniq; i++) {
+ ulint len;
+ const void* field = rec_get_nth_field(
+ rec, offsets, i, &len);
+ dfield_t* dfield = dtuple_get_nth_field(
+ tuple, i);
+ ut_ad(len != UNIV_SQL_NULL);
+ ut_ad(!rec_offs_nth_extern(offsets, i));
+ dfield_set_data(dfield, field, len);
+ }
+
+ db_trx_id = static_cast<byte*>(
+ mem_heap_alloc(heap, DATA_TRX_ID_LEN));
+ trx_write_trx_id(db_trx_id, trx_id);
+
+ dfield_set_data(dtuple_get_nth_field(tuple, new_index->n_uniq),
+ db_trx_id, DATA_TRX_ID_LEN);
+ } else {
+ /* The PRIMARY KEY has changed. Translate the tuple. */
+ dfield_t* dfield;
+
+ old_pk = row_log_table_get_pk(rec, index, offsets, &heap);
+
+ if (!old_pk) {
+ ut_ad(index->online_log->error != DB_SUCCESS);
+ return;
+ }
+
+ /* Remove DB_ROLL_PTR. */
+ ut_ad(dtuple_get_n_fields_cmp(old_pk)
+ == dict_index_get_n_unique(new_index));
+ ut_ad(dtuple_get_n_fields(old_pk)
+ == dict_index_get_n_unique(new_index) + 2);
+ const_cast<ulint&>(old_pk->n_fields)--;
+
+ /* Overwrite DB_TRX_ID with the old trx_id. */
+ dfield = dtuple_get_nth_field(old_pk, new_index->n_uniq);
+ ut_ad(dfield_get_type(dfield)->mtype == DATA_SYS);
+ ut_ad(dfield_get_type(dfield)->prtype
+ == (DATA_NOT_NULL | DATA_TRX_ID));
+ ut_ad(dfield_get_len(dfield) == DATA_TRX_ID_LEN);
+ trx_write_trx_id(static_cast<byte*>(dfield->data), trx_id);
+ }
+
+ ut_ad(dtuple_get_n_fields(old_pk) > 1);
+ ut_ad(DATA_TRX_ID_LEN == dtuple_get_nth_field(
+ old_pk, old_pk->n_fields - 1)->len);
+ old_pk_size = rec_get_converted_size_temp(
+ new_index, old_pk->fields, old_pk->n_fields,
+ &old_pk_extra_size);
+ ut_ad(old_pk_extra_size < 0x100);
+
+ mrec_size = 4 + old_pk_size;
+
+ /* If the row is marked as rollback, we will need to
+ log the enough prefix of the BLOB unless both the
+ old and new table are in COMPACT or REDUNDANT format */
+ if ((dict_table_get_format(index->table) >= UNIV_FORMAT_B
+ || dict_table_get_format(new_table) >= UNIV_FORMAT_B)
+ && row_log_table_is_rollback(index, trx_id)) {
+ if (rec_offs_any_extern(offsets)) {
+ /* Build a cache of those off-page column
+ prefixes that are referenced by secondary
+ indexes. It can be that none of the off-page
+ columns are needed. */
+ row_build(ROW_COPY_DATA, index, rec,
+ offsets, NULL, NULL, NULL, &ext, heap);
+ if (ext) {
+ /* Log the row_ext_t, ext->ext and ext->buf */
+ ext_size = ext->n_ext * ext->max_len
+ + sizeof(*ext)
+ + ext->n_ext * sizeof(ulint)
+ + (ext->n_ext - 1) * sizeof ext->len;
+ mrec_size += ext_size;
+ }
+ }
+ }
+
+ if (byte* b = row_log_table_open(index->online_log,
+ mrec_size, &avail_size)) {
+ *b++ = ROW_T_DELETE;
+ *b++ = static_cast<byte>(old_pk_extra_size);
+
+ /* Log the size of external prefix we saved */
+ mach_write_to_2(b, ext_size);
+ b += 2;
+
+ rec_convert_dtuple_to_temp(
+ b + old_pk_extra_size, new_index,
+ old_pk->fields, old_pk->n_fields);
+
+ b += old_pk_size;
+
+ if (ext_size) {
+ ulint cur_ext_size = sizeof(*ext)
+ + (ext->n_ext - 1) * sizeof ext->len;
+
+ memcpy(b, ext, cur_ext_size);
+ b += cur_ext_size;
+
+ /* Check if we need to col_map to adjust the column
+ number. If columns were added/removed/reordered,
+ adjust the column number. */
+ if (const ulint* col_map =
+ index->online_log->col_map) {
+ for (ulint i = 0; i < ext->n_ext; i++) {
+ const_cast<ulint&>(ext->ext[i]) =
+ col_map[ext->ext[i]];
+ }
+ }
+
+ memcpy(b, ext->ext, ext->n_ext * sizeof(*ext->ext));
+ b += ext->n_ext * sizeof(*ext->ext);
+
+ ext_size -= cur_ext_size
+ + ext->n_ext * sizeof(*ext->ext);
+ memcpy(b, ext->buf, ext_size);
+ b += ext_size;
+ }
+
+ row_log_table_close(
+ index->online_log, b, mrec_size, avail_size);
+ }
+
+ mem_heap_free(heap);
+}
+
+/******************************************************//**
+Logs an insert or update to a table that is being rebuilt. */
+static __attribute__((nonnull(1,2,3)))
+void
+row_log_table_low_redundant(
+/*========================*/
+ const rec_t* rec, /*!< in: clustered index leaf
+ page record in ROW_FORMAT=REDUNDANT,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
+ bool insert, /*!< in: true if insert,
+ false if update */
+ const dtuple_t* old_pk, /*!< in: old PRIMARY KEY value
+ (if !insert and a PRIMARY KEY
+ is being created) */
+ const dict_index_t* new_index)
+ /*!< in: clustered index of the
+ new table, not latched */
+{
+ ulint old_pk_size;
+ ulint old_pk_extra_size;
+ ulint size;
+ ulint extra_size;
+ ulint mrec_size;
+ ulint avail_size;
+ mem_heap_t* heap = NULL;
+ dtuple_t* tuple;
+
+ ut_ad(!page_is_comp(page_align(rec)));
+ ut_ad(dict_index_get_n_fields(index) == rec_get_n_fields_old(rec));
+
+ heap = mem_heap_create(DTUPLE_EST_ALLOC(index->n_fields));
+ tuple = dtuple_create(heap, index->n_fields);
+ dict_index_copy_types(tuple, index, index->n_fields);
+ dtuple_set_n_fields_cmp(tuple, dict_index_get_n_unique(index));
+
+ if (rec_get_1byte_offs_flag(rec)) {
+ for (ulint i = 0; i < index->n_fields; i++) {
+ dfield_t* dfield;
+ ulint len;
+ const void* field;
+
+ dfield = dtuple_get_nth_field(tuple, i);
+ field = rec_get_nth_field_old(rec, i, &len);
+
+ dfield_set_data(dfield, field, len);
+ }
+ } else {
+ for (ulint i = 0; i < index->n_fields; i++) {
+ dfield_t* dfield;
+ ulint len;
+ const void* field;
+
+ dfield = dtuple_get_nth_field(tuple, i);
+ field = rec_get_nth_field_old(rec, i, &len);
+
+ dfield_set_data(dfield, field, len);
+
+ if (rec_2_is_field_extern(rec, i)) {
+ dfield_set_ext(dfield);
+ }
+ }
+ }
+
+ size = rec_get_converted_size_temp(
+ index, tuple->fields, tuple->n_fields, &extra_size);
+
+ mrec_size = ROW_LOG_HEADER_SIZE + size + (extra_size >= 0x80);
+
+ if (insert || index->online_log->same_pk) {
+ ut_ad(!old_pk);
+ old_pk_extra_size = old_pk_size = 0;
+ } else {
+ ut_ad(old_pk);
+ ut_ad(old_pk->n_fields == 2 + old_pk->n_fields_cmp);
+ ut_ad(DATA_TRX_ID_LEN == dtuple_get_nth_field(
+ old_pk, old_pk->n_fields - 2)->len);
+ ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
+ old_pk, old_pk->n_fields - 1)->len);
+
+ old_pk_size = rec_get_converted_size_temp(
+ new_index, old_pk->fields, old_pk->n_fields,
+ &old_pk_extra_size);
+ ut_ad(old_pk_extra_size < 0x100);
+ mrec_size += 1/*old_pk_extra_size*/ + old_pk_size;
+ }
+
+ if (byte* b = row_log_table_open(index->online_log,
+ mrec_size, &avail_size)) {
+ *b++ = insert ? ROW_T_INSERT : ROW_T_UPDATE;
+
+ if (old_pk_size) {
+ *b++ = static_cast<byte>(old_pk_extra_size);
+
+ rec_convert_dtuple_to_temp(
+ b + old_pk_extra_size, new_index,
+ old_pk->fields, old_pk->n_fields);
+ b += old_pk_size;
+ }
+
+ if (extra_size < 0x80) {
+ *b++ = static_cast<byte>(extra_size);
+ } else {
+ ut_ad(extra_size < 0x8000);
+ *b++ = static_cast<byte>(0x80 | (extra_size >> 8));
+ *b++ = static_cast<byte>(extra_size);
+ }
+
+ rec_convert_dtuple_to_temp(
+ b + extra_size, index, tuple->fields, tuple->n_fields);
+ b += size;
+
+ row_log_table_close(
+ index->online_log, b, mrec_size, avail_size);
+ }
+
+ mem_heap_free(heap);
+}
+
+/******************************************************//**
+Logs an insert or update to a table that is being rebuilt. */
+static __attribute__((nonnull(1,2,3)))
+void
+row_log_table_low(
+/*==============*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
+ bool insert, /*!< in: true if insert, false if update */
+ const dtuple_t* old_pk) /*!< in: old PRIMARY KEY value (if !insert
+ and a PRIMARY KEY is being created) */
+{
+ ulint omit_size;
+ ulint old_pk_size;
+ ulint old_pk_extra_size;
+ ulint extra_size;
+ ulint mrec_size;
+ ulint avail_size;
+ const dict_index_t* new_index = dict_table_get_first_index(
+ index->online_log->table);
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(dict_index_is_clust(new_index));
+ ut_ad(!dict_index_is_online_ddl(new_index));
+ ut_ad(rec_offs_validate(rec, index, offsets));
+ ut_ad(rec_offs_n_fields(offsets) == dict_index_get_n_fields(index));
+ ut_ad(rec_offs_size(offsets) <= sizeof index->online_log->tail.buf);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&index->lock, RW_LOCK_SHARED)
+ || rw_lock_own(&index->lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(fil_page_get_type(page_align(rec)) == FIL_PAGE_INDEX);
+ ut_ad(page_is_leaf(page_align(rec)));
+ ut_ad(!page_is_comp(page_align(rec)) == !rec_offs_comp(offsets));
+
+ if (dict_index_is_corrupted(index)
+ || !dict_index_is_online_ddl(index)
+ || index->online_log->error != DB_SUCCESS) {
+ return;
+ }
+
+ if (!rec_offs_comp(offsets)) {
+ row_log_table_low_redundant(
+ rec, index, offsets, insert, old_pk, new_index);
+ return;
+ }
+
+ ut_ad(page_is_comp(page_align(rec)));
+ ut_ad(rec_get_status(rec) == REC_STATUS_ORDINARY);
+
+ omit_size = REC_N_NEW_EXTRA_BYTES;
+
+ extra_size = rec_offs_extra_size(offsets) - omit_size;
+
+ mrec_size = rec_offs_size(offsets) - omit_size
+ + ROW_LOG_HEADER_SIZE + (extra_size >= 0x80);
+
+ if (insert || index->online_log->same_pk) {
+ ut_ad(!old_pk);
+ old_pk_extra_size = old_pk_size = 0;
+ } else {
+ ut_ad(old_pk);
+ ut_ad(old_pk->n_fields == 2 + old_pk->n_fields_cmp);
+ ut_ad(DATA_TRX_ID_LEN == dtuple_get_nth_field(
+ old_pk, old_pk->n_fields - 2)->len);
+ ut_ad(DATA_ROLL_PTR_LEN == dtuple_get_nth_field(
+ old_pk, old_pk->n_fields - 1)->len);
+
+ old_pk_size = rec_get_converted_size_temp(
+ new_index, old_pk->fields, old_pk->n_fields,
+ &old_pk_extra_size);
+ ut_ad(old_pk_extra_size < 0x100);
+ mrec_size += 1/*old_pk_extra_size*/ + old_pk_size;
+ }
+
+ if (byte* b = row_log_table_open(index->online_log,
+ mrec_size, &avail_size)) {
+ *b++ = insert ? ROW_T_INSERT : ROW_T_UPDATE;
+
+ if (old_pk_size) {
+ *b++ = static_cast<byte>(old_pk_extra_size);
+
+ rec_convert_dtuple_to_temp(
+ b + old_pk_extra_size, new_index,
+ old_pk->fields, old_pk->n_fields);
+ b += old_pk_size;
+ }
+
+ if (extra_size < 0x80) {
+ *b++ = static_cast<byte>(extra_size);
+ } else {
+ ut_ad(extra_size < 0x8000);
+ *b++ = static_cast<byte>(0x80 | (extra_size >> 8));
+ *b++ = static_cast<byte>(extra_size);
+ }
+
+ memcpy(b, rec - rec_offs_extra_size(offsets), extra_size);
+ b += extra_size;
+ memcpy(b, rec, rec_offs_data_size(offsets));
+ b += rec_offs_data_size(offsets);
+
+ row_log_table_close(
+ index->online_log, b, mrec_size, avail_size);
+ }
+}
+
+/******************************************************//**
+Logs an update to a table that is being rebuilt.
+This will be merged in row_log_table_apply_update(). */
+UNIV_INTERN
+void
+row_log_table_update(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
+ const dtuple_t* old_pk) /*!< in: row_log_table_get_pk()
+ before the update */
+{
+ row_log_table_low(rec, index, offsets, false, old_pk);
+}
+
+/******************************************************//**
+Constructs the old PRIMARY KEY and DB_TRX_ID,DB_ROLL_PTR
+of a table that is being rebuilt.
+@return tuple of PRIMARY KEY,DB_TRX_ID,DB_ROLL_PTR in the rebuilt table,
+or NULL if the PRIMARY KEY definition does not change */
+UNIV_INTERN
+const dtuple_t*
+row_log_table_get_pk(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index) */
+ mem_heap_t** heap) /*!< in/out: memory heap where allocated */
+{
+ dtuple_t* tuple = NULL;
+ row_log_t* log = index->online_log;
+
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(dict_index_is_online_ddl(index));
+ ut_ad(!offsets || rec_offs_validate(rec, index, offsets));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&index->lock, RW_LOCK_SHARED)
+ || rw_lock_own(&index->lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ ut_ad(log);
+ ut_ad(log->table);
+
+ if (log->same_pk) {
+ /* The PRIMARY KEY columns are unchanged. */
+ return(NULL);
+ }
+
+ mutex_enter(&log->mutex);
+
+ /* log->error is protected by log->mutex. */
+ if (log->error == DB_SUCCESS) {
+ dict_table_t* new_table = log->table;
+ dict_index_t* new_index
+ = dict_table_get_first_index(new_table);
+ const ulint new_n_uniq
+ = dict_index_get_n_unique(new_index);
+
+ if (!*heap) {
+ ulint size = 0;
+
+ if (!offsets) {
+ size += (1 + REC_OFFS_HEADER_SIZE
+ + index->n_fields)
+ * sizeof *offsets;
+ }
+
+ for (ulint i = 0; i < new_n_uniq; i++) {
+ size += dict_col_get_min_size(
+ dict_index_get_nth_col(new_index, i));
+ }
+
+ *heap = mem_heap_create(
+ DTUPLE_EST_ALLOC(new_n_uniq + 2) + size);
+ }
+
+ if (!offsets) {
+ offsets = rec_get_offsets(rec, index, NULL,
+ ULINT_UNDEFINED, heap);
+ }
+
+ tuple = dtuple_create(*heap, new_n_uniq + 2);
+ dict_index_copy_types(tuple, new_index, tuple->n_fields);
+ dtuple_set_n_fields_cmp(tuple, new_n_uniq);
+
+ for (ulint new_i = 0; new_i < new_n_uniq; new_i++) {
+ dict_field_t* ifield;
+ dfield_t* dfield;
+ const dict_col_t* new_col;
+ const dict_col_t* col;
+ ulint col_no;
+ ulint i;
+ ulint len;
+ const byte* field;
+
+ ifield = dict_index_get_nth_field(new_index, new_i);
+ dfield = dtuple_get_nth_field(tuple, new_i);
+ new_col = dict_field_get_col(ifield);
+ col_no = new_col->ind;
+
+ for (ulint old_i = 0; old_i < index->table->n_cols;
+ old_i++) {
+ if (col_no == log->col_map[old_i]) {
+ col_no = old_i;
+ goto copy_col;
+ }
+ }
+
+ /* No matching column was found in the old
+ table, so this must be an added column.
+ Copy the default value. */
+ ut_ad(log->add_cols);
+ dfield_copy(dfield,
+ dtuple_get_nth_field(
+ log->add_cols, col_no));
+ continue;
+
+copy_col:
+ col = dict_table_get_nth_col(index->table, col_no);
+
+ i = dict_col_get_clust_pos(col, index);
+
+ if (i == ULINT_UNDEFINED) {
+ ut_ad(0);
+ log->error = DB_CORRUPTION;
+ tuple = NULL;
+ goto func_exit;
+ }
+
+ field = rec_get_nth_field(rec, offsets, i, &len);
+
+ if (len == UNIV_SQL_NULL) {
+ log->error = DB_INVALID_NULL;
+ tuple = NULL;
+ goto func_exit;
+ }
+
+ if (rec_offs_nth_extern(offsets, i)) {
+ ulint field_len = ifield->prefix_len;
+ byte* blob_field;
+ const ulint max_len =
+ DICT_MAX_FIELD_LEN_BY_FORMAT(
+ new_table);
+
+ if (!field_len) {
+ field_len = ifield->fixed_len;
+ if (!field_len) {
+ field_len = max_len + 1;
+ }
+ }
+
+ blob_field = static_cast<byte*>(
+ mem_heap_alloc(*heap, field_len));
+
+ len = btr_copy_externally_stored_field_prefix(
+ blob_field, field_len,
+ dict_table_zip_size(index->table),
+ field, len);
+ if (len == max_len + 1) {
+ log->error = DB_TOO_BIG_INDEX_COL;
+ tuple = NULL;
+ goto func_exit;
+ }
+
+ dfield_set_data(dfield, blob_field, len);
+ } else {
+ if (ifield->prefix_len
+ && ifield->prefix_len < len) {
+ len = ifield->prefix_len;
+ }
+
+ dfield_set_data(
+ dfield,
+ mem_heap_dup(*heap, field, len), len);
+ }
+ }
+
+ const byte* trx_roll = rec
+ + row_get_trx_id_offset(index, offsets);
+
+ dfield_set_data(dtuple_get_nth_field(tuple, new_n_uniq),
+ trx_roll, DATA_TRX_ID_LEN);
+ dfield_set_data(dtuple_get_nth_field(tuple, new_n_uniq + 1),
+ trx_roll + DATA_TRX_ID_LEN, DATA_ROLL_PTR_LEN);
+ }
+
+func_exit:
+ mutex_exit(&log->mutex);
+ return(tuple);
+}
+
+/******************************************************//**
+Logs an insert to a table that is being rebuilt.
+This will be merged in row_log_table_apply_insert(). */
+UNIV_INTERN
+void
+row_log_table_insert(
+/*=================*/
+ const rec_t* rec, /*!< in: clustered index leaf page record,
+ page X-latched */
+ dict_index_t* index, /*!< in/out: clustered index, S-latched
+ or X-latched */
+ const ulint* offsets)/*!< in: rec_get_offsets(rec,index) */
+{
+ row_log_table_low(rec, index, offsets, true, NULL);
+}
+
+/******************************************************//**
+Notes that a transaction is being rolled back. */
+UNIV_INTERN
+void
+row_log_table_rollback(
+/*===================*/
+ dict_index_t* index, /*!< in/out: clustered index */
+ trx_id_t trx_id) /*!< in: transaction being rolled back */
+{
+ ut_ad(dict_index_is_clust(index));
+#ifdef UNIV_DEBUG
+ ibool corrupt = FALSE;
+ ut_ad(trx_rw_is_active(trx_id, &corrupt));
+ ut_ad(!corrupt);
+#endif /* UNIV_DEBUG */
+
+ /* Protect transitions of index->online_status and access to
+ index->online_log. */
+ rw_lock_s_lock(&index->lock);
+
+ if (dict_index_is_online_ddl(index)) {
+ ut_ad(index->online_log);
+ ut_ad(index->online_log->table);
+ mutex_enter(&index->online_log->mutex);
+ trx_id_set* trxs = index->online_log->trx_rb;
+
+ if (!trxs) {
+ index->online_log->trx_rb = trxs = new trx_id_set();
+ }
+
+ trxs->insert(trx_id);
+
+ mutex_exit(&index->online_log->mutex);
+ }
+
+ rw_lock_s_unlock(&index->lock);
+}
+
+/******************************************************//**
+Check if a transaction rollback has been initiated.
+@return true if inserts of this transaction were rolled back */
+UNIV_INTERN
+bool
+row_log_table_is_rollback(
+/*======================*/
+ const dict_index_t* index, /*!< in: clustered index */
+ trx_id_t trx_id) /*!< in: transaction id */
+{
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(dict_index_is_online_ddl(index));
+ ut_ad(index->online_log);
+
+ if (const trx_id_set* trxs = index->online_log->trx_rb) {
+ mutex_enter(&index->online_log->mutex);
+ bool is_rollback = trxs->find(trx_id) != trxs->end();
+ mutex_exit(&index->online_log->mutex);
+
+ return(is_rollback);
+ }
+
+ return(false);
+}
+
+/******************************************************//**
+Converts a log record to a table row.
+@return converted row, or NULL if the conversion fails
+or the transaction has been rolled back */
+static __attribute__((nonnull, warn_unused_result))
+const dtuple_t*
+row_log_table_apply_convert_mrec(
+/*=============================*/
+ const mrec_t* mrec, /*!< in: merge record */
+ dict_index_t* index, /*!< in: index of mrec */
+ const ulint* offsets, /*!< in: offsets of mrec */
+ const row_log_t* log, /*!< in: rebuild context */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ trx_id_t trx_id, /*!< in: DB_TRX_ID of mrec */
+ dberr_t* error) /*!< out: DB_SUCCESS or
+ reason of failure */
+{
+ dtuple_t* row;
+
+#ifdef UNIV_SYNC_DEBUG
+ /* This prevents BLOBs from being freed, in case an insert
+ transaction rollback starts after row_log_table_is_rollback(). */
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ if (row_log_table_is_rollback(index, trx_id)) {
+ row = NULL;
+ goto func_exit;
+ }
+
+ /* This is based on row_build(). */
+ if (log->add_cols) {
+ row = dtuple_copy(log->add_cols, heap);
+ /* dict_table_copy_types() would set the fields to NULL */
+ for (ulint i = 0; i < dict_table_get_n_cols(log->table); i++) {
+ dict_col_copy_type(
+ dict_table_get_nth_col(log->table, i),
+ dfield_get_type(dtuple_get_nth_field(row, i)));
+ }
+ } else {
+ row = dtuple_create(heap, dict_table_get_n_cols(log->table));
+ dict_table_copy_types(row, log->table);
+ }
+
+ for (ulint i = 0; i < rec_offs_n_fields(offsets); i++) {
+ const dict_field_t* ind_field
+ = dict_index_get_nth_field(index, i);
+
+ if (ind_field->prefix_len) {
+ /* Column prefixes can only occur in key
+ fields, which cannot be stored externally. For
+ a column prefix, there should also be the full
+ field in the clustered index tuple. The row
+ tuple comprises full fields, not prefixes. */
+ ut_ad(!rec_offs_nth_extern(offsets, i));
+ continue;
+ }
+
+ const dict_col_t* col
+ = dict_field_get_col(ind_field);
+ ulint col_no
+ = log->col_map[dict_col_get_no(col)];
+
+ if (col_no == ULINT_UNDEFINED) {
+ /* dropped column */
+ continue;
+ }
+
+ dfield_t* dfield
+ = dtuple_get_nth_field(row, col_no);
+ ulint len;
+ const void* data;
+
+ if (rec_offs_nth_extern(offsets, i)) {
+ ut_ad(rec_offs_any_extern(offsets));
+ data = btr_rec_copy_externally_stored_field(
+ mrec, offsets,
+ dict_table_zip_size(index->table),
+ i, &len, heap);
+ ut_a(data);
+ } else {
+ data = rec_get_nth_field(mrec, offsets, i, &len);
+ }
+
+ dfield_set_data(dfield, data, len);
+
+ /* See if any columns were changed to NULL or NOT NULL. */
+ const dict_col_t* new_col
+ = dict_table_get_nth_col(log->table, col_no);
+ ut_ad(new_col->mtype == col->mtype);
+
+ /* Assert that prtype matches except for nullability. */
+ ut_ad(!((new_col->prtype ^ col->prtype) & ~DATA_NOT_NULL));
+ ut_ad(!((new_col->prtype ^ dfield_get_type(dfield)->prtype)
+ & ~DATA_NOT_NULL));
+
+ if (new_col->prtype == col->prtype) {
+ continue;
+ }
+
+ if ((new_col->prtype & DATA_NOT_NULL)
+ && dfield_is_null(dfield)) {
+ /* We got a NULL value for a NOT NULL column. */
+ *error = DB_INVALID_NULL;
+ return(NULL);
+ }
+
+ /* Adjust the DATA_NOT_NULL flag in the parsed row. */
+ dfield_get_type(dfield)->prtype = new_col->prtype;
+
+ ut_ad(dict_col_type_assert_equal(new_col,
+ dfield_get_type(dfield)));
+ }
+
+func_exit:
+ *error = DB_SUCCESS;
+ return(row);
+}
+
+/******************************************************//**
+Replays an insert operation on a table that was rebuilt.
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_log_table_apply_insert_low(
+/*===========================*/
+ que_thr_t* thr, /*!< in: query graph */
+ const dtuple_t* row, /*!< in: table row
+ in the old table definition */
+ trx_id_t trx_id, /*!< in: trx_id of the row */
+ mem_heap_t* offsets_heap, /*!< in/out: memory heap
+ that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ row_merge_dup_t* dup) /*!< in/out: for reporting
+ duplicate key errors */
+{
+ dberr_t error;
+ dtuple_t* entry;
+ const row_log_t*log = dup->index->online_log;
+ dict_index_t* index = dict_table_get_first_index(log->table);
+
+ ut_ad(dtuple_validate(row));
+ ut_ad(trx_id);
+
+#ifdef ROW_LOG_APPLY_PRINT
+ if (row_log_apply_print) {
+ fprintf(stderr, "table apply insert "
+ IB_ID_FMT " " IB_ID_FMT "\n",
+ index->table->id, index->id);
+ dtuple_print(stderr, row);
+ }
+#endif /* ROW_LOG_APPLY_PRINT */
+
+ static const ulint flags
+ = (BTR_CREATE_FLAG
+ | BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG
+ | BTR_KEEP_SYS_FLAG);
+
+ entry = row_build_index_entry(row, NULL, index, heap);
+
+ error = row_ins_clust_index_entry_low(
+ flags, BTR_MODIFY_TREE, index, index->n_uniq, entry, 0, thr);
+
+ switch (error) {
+ case DB_SUCCESS:
+ break;
+ case DB_SUCCESS_LOCKED_REC:
+ /* The row had already been copied to the table. */
+ return(DB_SUCCESS);
+ default:
+ return(error);
+ }
+
+ do {
+ if (!(index = dict_table_get_next_index(index))) {
+ break;
+ }
+
+ if (index->type & DICT_FTS) {
+ continue;
+ }
+
+ entry = row_build_index_entry(row, NULL, index, heap);
+ error = row_ins_sec_index_entry_low(
+ flags, BTR_MODIFY_TREE,
+ index, offsets_heap, heap, entry, trx_id, thr);
+ } while (error == DB_SUCCESS);
+
+ return(error);
+}
+
+/******************************************************//**
+Replays an insert operation on a table that was rebuilt.
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_log_table_apply_insert(
+/*=======================*/
+ que_thr_t* thr, /*!< in: query graph */
+ const mrec_t* mrec, /*!< in: record to insert */
+ const ulint* offsets, /*!< in: offsets of mrec */
+ mem_heap_t* offsets_heap, /*!< in/out: memory heap
+ that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ row_merge_dup_t* dup, /*!< in/out: for reporting
+ duplicate key errors */
+ trx_id_t trx_id) /*!< in: DB_TRX_ID of mrec */
+{
+ const row_log_t*log = dup->index->online_log;
+ dberr_t error;
+ const dtuple_t* row = row_log_table_apply_convert_mrec(
+ mrec, dup->index, offsets, log, heap, trx_id, &error);
+
+ ut_ad(error == DB_SUCCESS || !row);
+ /* Handling of duplicate key error requires storing
+ of offending key in a record buffer. */
+ ut_ad(error != DB_DUPLICATE_KEY);
+
+ if (error != DB_SUCCESS)
+ return(error);
+
+ if (row) {
+ error = row_log_table_apply_insert_low(
+ thr, row, trx_id, offsets_heap, heap, dup);
+ if (error != DB_SUCCESS) {
+ /* Report the erroneous row using the new
+ version of the table. */
+ innobase_row_to_mysql(dup->table, log->table, row);
+ }
+ }
+ return(error);
+}
+
+/******************************************************//**
+Deletes a record from a table that is being rebuilt.
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull(1, 2, 4, 5), warn_unused_result))
+dberr_t
+row_log_table_apply_delete_low(
+/*===========================*/
+ btr_pcur_t* pcur, /*!< in/out: B-tree cursor,
+ will be trashed */
+ const ulint* offsets, /*!< in: offsets on pcur */
+ const row_ext_t* save_ext, /*!< in: saved external field
+ info, or NULL */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ mtr_t* mtr) /*!< in/out: mini-transaction,
+ will be committed */
+{
+ dberr_t error;
+ row_ext_t* ext;
+ dtuple_t* row;
+ dict_index_t* index = btr_pcur_get_btr_cur(pcur)->index;
+
+ ut_ad(dict_index_is_clust(index));
+
+#ifdef ROW_LOG_APPLY_PRINT
+ if (row_log_apply_print) {
+ fprintf(stderr, "table apply delete "
+ IB_ID_FMT " " IB_ID_FMT "\n",
+ index->table->id, index->id);
+ rec_print_new(stderr, btr_pcur_get_rec(pcur), offsets);
+ }
+#endif /* ROW_LOG_APPLY_PRINT */
+ if (dict_table_get_next_index(index)) {
+ /* Build a row template for purging secondary index entries. */
+ row = row_build(
+ ROW_COPY_DATA, index, btr_pcur_get_rec(pcur),
+ offsets, NULL, NULL, NULL,
+ save_ext ? NULL : &ext, heap);
+ if (!save_ext) {
+ save_ext = ext;
+ }
+ } else {
+ row = NULL;
+ }
+
+ btr_cur_pessimistic_delete(&error, FALSE, btr_pcur_get_btr_cur(pcur),
+ BTR_CREATE_FLAG, RB_NONE, mtr);
+ mtr_commit(mtr);
+
+ if (error != DB_SUCCESS) {
+ return(error);
+ }
+
+ while ((index = dict_table_get_next_index(index)) != NULL) {
+ if (index->type & DICT_FTS) {
+ continue;
+ }
+
+ const dtuple_t* entry = row_build_index_entry(
+ row, save_ext, index, heap);
+ mtr_start(mtr);
+ btr_pcur_open(index, entry, PAGE_CUR_LE,
+ BTR_MODIFY_TREE, pcur, mtr);
+#ifdef UNIV_DEBUG
+ switch (btr_pcur_get_btr_cur(pcur)->flag) {
+ case BTR_CUR_DELETE_REF:
+ case BTR_CUR_DEL_MARK_IBUF:
+ case BTR_CUR_DELETE_IBUF:
+ case BTR_CUR_INSERT_TO_IBUF:
+ /* We did not request buffering. */
+ break;
+ case BTR_CUR_HASH:
+ case BTR_CUR_HASH_FAIL:
+ case BTR_CUR_BINARY:
+ goto flag_ok;
+ }
+ ut_ad(0);
+flag_ok:
+#endif /* UNIV_DEBUG */
+
+ if (page_rec_is_infimum(btr_pcur_get_rec(pcur))
+ || btr_pcur_get_low_match(pcur) < index->n_uniq) {
+ /* All secondary index entries should be
+ found, because new_table is being modified by
+ this thread only, and all indexes should be
+ updated in sync. */
+ mtr_commit(mtr);
+ return(DB_INDEX_CORRUPT);
+ }
+
+ btr_cur_pessimistic_delete(&error, FALSE,
+ btr_pcur_get_btr_cur(pcur),
+ BTR_CREATE_FLAG, RB_NONE, mtr);
+ mtr_commit(mtr);
+ }
+
+ return(error);
+}
+
+/******************************************************//**
+Replays a delete operation on a table that was rebuilt.
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull(1, 3, 4, 5, 6, 7), warn_unused_result))
+dberr_t
+row_log_table_apply_delete(
+/*=======================*/
+ que_thr_t* thr, /*!< in: query graph */
+ ulint trx_id_col, /*!< in: position of
+ DB_TRX_ID in the new
+ clustered index */
+ const mrec_t* mrec, /*!< in: merge record */
+ const ulint* moffsets, /*!< in: offsets of mrec */
+ mem_heap_t* offsets_heap, /*!< in/out: memory heap
+ that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ dict_table_t* new_table, /*!< in: rebuilt table */
+ const row_ext_t* save_ext) /*!< in: saved external field
+ info, or NULL */
+{
+ dict_index_t* index = dict_table_get_first_index(new_table);
+ dtuple_t* old_pk;
+ mtr_t mtr;
+ btr_pcur_t pcur;
+ ulint* offsets;
+
+ ut_ad(rec_offs_n_fields(moffsets)
+ == dict_index_get_n_unique(index) + 1);
+ ut_ad(!rec_offs_any_extern(moffsets));
+
+ /* Convert the row to a search tuple. */
+ old_pk = dtuple_create(heap, index->n_uniq + 1);
+ dict_index_copy_types(old_pk, index, old_pk->n_fields);
+ dtuple_set_n_fields_cmp(old_pk, index->n_uniq);
+
+ for (ulint i = 0; i <= index->n_uniq; i++) {
+ ulint len;
+ const void* field;
+ field = rec_get_nth_field(mrec, moffsets, i, &len);
+ ut_ad(len != UNIV_SQL_NULL);
+ dfield_set_data(dtuple_get_nth_field(old_pk, i),
+ field, len);
+ }
+
+ mtr_start(&mtr);
+ btr_pcur_open(index, old_pk, PAGE_CUR_LE,
+ BTR_MODIFY_TREE, &pcur, &mtr);
+#ifdef UNIV_DEBUG
+ switch (btr_pcur_get_btr_cur(&pcur)->flag) {
+ case BTR_CUR_DELETE_REF:
+ case BTR_CUR_DEL_MARK_IBUF:
+ case BTR_CUR_DELETE_IBUF:
+ case BTR_CUR_INSERT_TO_IBUF:
+ /* We did not request buffering. */
+ break;
+ case BTR_CUR_HASH:
+ case BTR_CUR_HASH_FAIL:
+ case BTR_CUR_BINARY:
+ goto flag_ok;
+ }
+ ut_ad(0);
+flag_ok:
+#endif /* UNIV_DEBUG */
+
+ if (page_rec_is_infimum(btr_pcur_get_rec(&pcur))
+ || btr_pcur_get_low_match(&pcur) < index->n_uniq) {
+all_done:
+ mtr_commit(&mtr);
+ /* The record was not found. All done. */
+ return(DB_SUCCESS);
+ }
+
+ offsets = rec_get_offsets(btr_pcur_get_rec(&pcur), index, NULL,
+ ULINT_UNDEFINED, &offsets_heap);
+#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
+ ut_a(!rec_offs_any_null_extern(btr_pcur_get_rec(&pcur), offsets));
+#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
+
+ /* Only remove the record if DB_TRX_ID matches what was
+ buffered. */
+
+ {
+ ulint len;
+ const void* mrec_trx_id
+ = rec_get_nth_field(mrec, moffsets, trx_id_col, &len);
+ ut_ad(len == DATA_TRX_ID_LEN);
+ const void* rec_trx_id
+ = rec_get_nth_field(btr_pcur_get_rec(&pcur), offsets,
+ trx_id_col, &len);
+ ut_ad(len == DATA_TRX_ID_LEN);
+ if (memcmp(mrec_trx_id, rec_trx_id, DATA_TRX_ID_LEN)) {
+ goto all_done;
+ }
+ }
+
+ return(row_log_table_apply_delete_low(&pcur, offsets, save_ext,
+ heap, &mtr));
+}
+
+/******************************************************//**
+Replays an update operation on a table that was rebuilt.
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_log_table_apply_update(
+/*=======================*/
+ que_thr_t* thr, /*!< in: query graph */
+ ulint trx_id_col, /*!< in: position of
+ DB_TRX_ID in the
+ old clustered index */
+ ulint new_trx_id_col, /*!< in: position of
+ DB_TRX_ID in the new
+ clustered index */
+ const mrec_t* mrec, /*!< in: new value */
+ const ulint* offsets, /*!< in: offsets of mrec */
+ mem_heap_t* offsets_heap, /*!< in/out: memory heap
+ that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ row_merge_dup_t* dup, /*!< in/out: for reporting
+ duplicate key errors */
+ trx_id_t trx_id, /*!< in: DB_TRX_ID of mrec */
+ const dtuple_t* old_pk) /*!< in: PRIMARY KEY and
+ DB_TRX_ID,DB_ROLL_PTR
+ of the old value,
+ or PRIMARY KEY if same_pk */
+{
+ const row_log_t*log = dup->index->online_log;
+ const dtuple_t* row;
+ dict_index_t* index = dict_table_get_first_index(log->table);
+ mtr_t mtr;
+ btr_pcur_t pcur;
+ dberr_t error;
+
+ ut_ad(dtuple_get_n_fields_cmp(old_pk)
+ == dict_index_get_n_unique(index));
+ ut_ad(dtuple_get_n_fields(old_pk)
+ == dict_index_get_n_unique(index)
+ + (dup->index->online_log->same_pk ? 0 : 2));
+
+ row = row_log_table_apply_convert_mrec(
+ mrec, dup->index, offsets, log, heap, trx_id, &error);
+
+ ut_ad(error == DB_SUCCESS || !row);
+ /* Handling of duplicate key error requires storing
+ of offending key in a record buffer. */
+ ut_ad(error != DB_DUPLICATE_KEY);
+
+ if (!row) {
+ return(error);
+ }
+
+ mtr_start(&mtr);
+ btr_pcur_open(index, old_pk, PAGE_CUR_LE,
+ BTR_MODIFY_TREE, &pcur, &mtr);
+#ifdef UNIV_DEBUG
+ switch (btr_pcur_get_btr_cur(&pcur)->flag) {
+ case BTR_CUR_DELETE_REF:
+ case BTR_CUR_DEL_MARK_IBUF:
+ case BTR_CUR_DELETE_IBUF:
+ case BTR_CUR_INSERT_TO_IBUF:
+ ut_ad(0);/* We did not request buffering. */
+ case BTR_CUR_HASH:
+ case BTR_CUR_HASH_FAIL:
+ case BTR_CUR_BINARY:
+ break;
+ }
+#endif /* UNIV_DEBUG */
+
+ if (page_rec_is_infimum(btr_pcur_get_rec(&pcur))
+ || btr_pcur_get_low_match(&pcur) < index->n_uniq) {
+ mtr_commit(&mtr);
+insert:
+ ut_ad(mtr.state == MTR_COMMITTED);
+ /* The row was not found. Insert it. */
+ error = row_log_table_apply_insert_low(
+ thr, row, trx_id, offsets_heap, heap, dup);
+ if (error != DB_SUCCESS) {
+err_exit:
+ /* Report the erroneous row using the new
+ version of the table. */
+ innobase_row_to_mysql(dup->table, log->table, row);
+ }
+
+ return(error);
+ }
+
+ /* Update the record. */
+ ulint* cur_offsets = rec_get_offsets(
+ btr_pcur_get_rec(&pcur),
+ index, NULL, ULINT_UNDEFINED, &offsets_heap);
+
+ dtuple_t* entry = row_build_index_entry(
+ row, NULL, index, heap);
+ const upd_t* update = row_upd_build_difference_binary(
+ index, entry, btr_pcur_get_rec(&pcur), cur_offsets,
+ false, NULL, heap);
+
+ error = DB_SUCCESS;
+
+ if (!update->n_fields) {
+ /* Nothing to do. */
+ goto func_exit;
+ }
+
+ if (rec_offs_any_extern(cur_offsets)) {
+ /* If the record contains any externally stored
+ columns, perform the update by delete and insert,
+ because we will not write any undo log that would
+ allow purge to free any orphaned externally stored
+ columns. */
+delete_insert:
+ error = row_log_table_apply_delete_low(
+ &pcur, cur_offsets, NULL, heap, &mtr);
+ ut_ad(mtr.state == MTR_COMMITTED);
+
+ if (error != DB_SUCCESS) {
+ goto err_exit;
+ }
+
+ goto insert;
+ }
+
+ if (upd_get_nth_field(update, 0)->field_no < new_trx_id_col) {
+ if (dup->index->online_log->same_pk) {
+ /* The ROW_T_UPDATE log record should only be
+ written when the PRIMARY KEY fields of the
+ record did not change in the old table. We
+ can only get a change of PRIMARY KEY columns
+ in the rebuilt table if the PRIMARY KEY was
+ redefined (!same_pk). */
+ ut_ad(0);
+ error = DB_CORRUPTION;
+ goto func_exit;
+ }
+
+ /* The PRIMARY KEY columns have changed.
+ Delete the record with the old PRIMARY KEY value,
+ provided that it carries the same
+ DB_TRX_ID,DB_ROLL_PTR. Then, insert the new row. */
+ ulint len;
+ const byte* cur_trx_roll = rec_get_nth_field(
+ mrec, offsets, trx_id_col, &len);
+ ut_ad(len == DATA_TRX_ID_LEN);
+ const dfield_t* new_trx_roll = dtuple_get_nth_field(
+ old_pk, new_trx_id_col);
+ /* We assume that DB_TRX_ID,DB_ROLL_PTR are stored
+ in one contiguous block. */
+ ut_ad(rec_get_nth_field(mrec, offsets, trx_id_col + 1, &len)
+ == cur_trx_roll + DATA_TRX_ID_LEN);
+ ut_ad(len == DATA_ROLL_PTR_LEN);
+ ut_ad(new_trx_roll->len == DATA_TRX_ID_LEN);
+ ut_ad(dtuple_get_nth_field(old_pk, new_trx_id_col + 1)
+ -> len == DATA_ROLL_PTR_LEN);
+ ut_ad(static_cast<const byte*>(
+ dtuple_get_nth_field(old_pk, new_trx_id_col + 1)
+ ->data)
+ == static_cast<const byte*>(new_trx_roll->data)
+ + DATA_TRX_ID_LEN);
+
+ if (!memcmp(cur_trx_roll, new_trx_roll->data,
+ DATA_TRX_ID_LEN + DATA_ROLL_PTR_LEN)) {
+ /* The old row exists. Remove it. */
+ goto delete_insert;
+ }
+
+ /* Unless we called row_log_table_apply_delete_low(),
+ this will likely cause a duplicate key error. */
+ mtr_commit(&mtr);
+ goto insert;
+ }
+
+ dtuple_t* old_row;
+ row_ext_t* old_ext;
+
+ if (dict_table_get_next_index(index)) {
+ /* Construct the row corresponding to the old value of
+ the record. */
+ old_row = row_build(
+ ROW_COPY_DATA, index, btr_pcur_get_rec(&pcur),
+ cur_offsets, NULL, NULL, NULL, &old_ext, heap);
+ ut_ad(old_row);
+#ifdef ROW_LOG_APPLY_PRINT
+ if (row_log_apply_print) {
+ fprintf(stderr, "table apply update "
+ IB_ID_FMT " " IB_ID_FMT "\n",
+ index->table->id, index->id);
+ dtuple_print(stderr, old_row);
+ dtuple_print(stderr, row);
+ }
+#endif /* ROW_LOG_APPLY_PRINT */
+ } else {
+ old_row = NULL;
+ old_ext = NULL;
+ }
+
+ big_rec_t* big_rec;
+
+ error = btr_cur_pessimistic_update(
+ BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG
+ | BTR_KEEP_POS_FLAG,
+ btr_pcur_get_btr_cur(&pcur),
+ &cur_offsets, &offsets_heap, heap, &big_rec,
+ update, 0, NULL, 0, &mtr);
+
+ if (big_rec) {
+ if (error == DB_SUCCESS) {
+ error = btr_store_big_rec_extern_fields(
+ index, btr_pcur_get_block(&pcur),
+ btr_pcur_get_rec(&pcur), cur_offsets,
+ big_rec, &mtr, BTR_STORE_UPDATE);
+ }
+
+ dtuple_big_rec_free(big_rec);
+ }
+
+ while ((index = dict_table_get_next_index(index)) != NULL) {
+ if (error != DB_SUCCESS) {
+ break;
+ }
+
+ if (index->type & DICT_FTS) {
+ continue;
+ }
+
+ if (!row_upd_changes_ord_field_binary(
+ index, update, thr, old_row, NULL)) {
+ continue;
+ }
+
+ mtr_commit(&mtr);
+
+ entry = row_build_index_entry(old_row, old_ext, index, heap);
+ if (!entry) {
+ ut_ad(0);
+ return(DB_CORRUPTION);
+ }
+
+ mtr_start(&mtr);
+
+ if (ROW_FOUND != row_search_index_entry(
+ index, entry, BTR_MODIFY_TREE, &pcur, &mtr)) {
+ ut_ad(0);
+ error = DB_CORRUPTION;
+ break;
+ }
+
+ btr_cur_pessimistic_delete(
+ &error, FALSE, btr_pcur_get_btr_cur(&pcur),
+ BTR_CREATE_FLAG, RB_NONE, &mtr);
+
+ if (error != DB_SUCCESS) {
+ break;
+ }
+
+ mtr_commit(&mtr);
+
+ entry = row_build_index_entry(row, NULL, index, heap);
+ error = row_ins_sec_index_entry_low(
+ BTR_CREATE_FLAG | BTR_NO_LOCKING_FLAG
+ | BTR_NO_UNDO_LOG_FLAG | BTR_KEEP_SYS_FLAG,
+ BTR_MODIFY_TREE, index, offsets_heap, heap,
+ entry, trx_id, thr);
+
+ mtr_start(&mtr);
+ }
+
+func_exit:
+ mtr_commit(&mtr);
+ if (error != DB_SUCCESS) {
+ goto err_exit;
+ }
+
+ return(error);
+}
+
+/******************************************************//**
+Applies an operation to a table that was rebuilt.
+@return NULL on failure (mrec corruption) or when out of data;
+pointer to next record on success */
+static __attribute__((nonnull, warn_unused_result))
+const mrec_t*
+row_log_table_apply_op(
+/*===================*/
+ que_thr_t* thr, /*!< in: query graph */
+ ulint trx_id_col, /*!< in: position of
+ DB_TRX_ID in old index */
+ ulint new_trx_id_col, /*!< in: position of
+ DB_TRX_ID in new index */
+ row_merge_dup_t* dup, /*!< in/out: for reporting
+ duplicate key errors */
+ dberr_t* error, /*!< out: DB_SUCCESS
+ or error code */
+ mem_heap_t* offsets_heap, /*!< in/out: memory heap
+ that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ const mrec_t* mrec, /*!< in: merge record */
+ const mrec_t* mrec_end, /*!< in: end of buffer */
+ ulint* offsets) /*!< in/out: work area
+ for parsing mrec */
+{
+ const row_log_t*log = dup->index->online_log;
+ dict_index_t* new_index = dict_table_get_first_index(log->table);
+ ulint extra_size;
+ const mrec_t* next_mrec;
+ dtuple_t* old_pk;
+ row_ext_t* ext;
+ ulint ext_size;
+
+ ut_ad(dict_index_is_clust(dup->index));
+ ut_ad(dup->index->table != log->table);
+
+ *error = DB_SUCCESS;
+
+ /* 3 = 1 (op type) + 1 (ext_size) + at least 1 byte payload */
+ if (mrec + 3 >= mrec_end) {
+ return(NULL);
+ }
+
+ switch (*mrec++) {
+ default:
+ ut_ad(0);
+ *error = DB_CORRUPTION;
+ return(NULL);
+ case ROW_T_INSERT:
+ extra_size = *mrec++;
+
+ if (extra_size >= 0x80) {
+ /* Read another byte of extra_size. */
+
+ extra_size = (extra_size & 0x7f) << 8;
+ extra_size |= *mrec++;
+ }
+
+ mrec += extra_size;
+
+ if (mrec > mrec_end) {
+ return(NULL);
+ }
+
+ rec_offs_set_n_fields(offsets, dup->index->n_fields);
+ rec_init_offsets_temp(mrec, dup->index, offsets);
+
+ next_mrec = mrec + rec_offs_data_size(offsets);
+
+ if (next_mrec > mrec_end) {
+ return(NULL);
+ } else {
+ ulint len;
+ const byte* db_trx_id
+ = rec_get_nth_field(
+ mrec, offsets, trx_id_col, &len);
+ ut_ad(len == DATA_TRX_ID_LEN);
+ *error = row_log_table_apply_insert(
+ thr, mrec, offsets, offsets_heap,
+ heap, dup, trx_read_trx_id(db_trx_id));
+ }
+ break;
+
+ case ROW_T_DELETE:
+ /* 1 (extra_size) + 2 (ext_size) + at least 1 (payload) */
+ if (mrec + 4 >= mrec_end) {
+ return(NULL);
+ }
+
+ extra_size = *mrec++;
+ ext_size = mach_read_from_2(mrec);
+ mrec += 2;
+ ut_ad(mrec < mrec_end);
+
+ /* We assume extra_size < 0x100 for the PRIMARY KEY prefix.
+ For fixed-length PRIMARY key columns, it is 0. */
+ mrec += extra_size;
+
+ rec_offs_set_n_fields(offsets, new_index->n_uniq + 1);
+ rec_init_offsets_temp(mrec, new_index, offsets);
+ next_mrec = mrec + rec_offs_data_size(offsets) + ext_size;
+ if (next_mrec > mrec_end) {
+ return(NULL);
+ }
+
+ /* If there are external fields, retrieve those logged
+ prefix info and reconstruct the row_ext_t */
+ if (ext_size) {
+ /* We use memcpy to avoid unaligned
+ access on some non-x86 platforms.*/
+ ext = static_cast<row_ext_t*>(
+ mem_heap_dup(heap,
+ mrec + rec_offs_data_size(offsets),
+ ext_size));
+
+ byte* ext_start = reinterpret_cast<byte*>(ext);
+
+ ulint ext_len = sizeof(*ext)
+ + (ext->n_ext - 1) * sizeof ext->len;
+
+ ext->ext = reinterpret_cast<ulint*>(ext_start + ext_len);
+ ext_len += ext->n_ext * sizeof(*ext->ext);
+
+ ext->buf = static_cast<byte*>(ext_start + ext_len);
+ } else {
+ ext = NULL;
+ }
+
+ *error = row_log_table_apply_delete(
+ thr, new_trx_id_col,
+ mrec, offsets, offsets_heap, heap,
+ log->table, ext);
+ break;
+
+ case ROW_T_UPDATE:
+ /* Logically, the log entry consists of the
+ (PRIMARY KEY,DB_TRX_ID) of the old value (converted
+ to the new primary key definition) followed by
+ the new value in the old table definition. If the
+ definition of the columns belonging to PRIMARY KEY
+ is not changed, the log will only contain
+ DB_TRX_ID,new_row. */
+
+ if (dup->index->online_log->same_pk) {
+ ut_ad(new_index->n_uniq == dup->index->n_uniq);
+
+ extra_size = *mrec++;
+
+ if (extra_size >= 0x80) {
+ /* Read another byte of extra_size. */
+
+ extra_size = (extra_size & 0x7f) << 8;
+ extra_size |= *mrec++;
+ }
+
+ mrec += extra_size;
+
+ if (mrec > mrec_end) {
+ return(NULL);
+ }
+
+ rec_offs_set_n_fields(offsets, dup->index->n_fields);
+ rec_init_offsets_temp(mrec, dup->index, offsets);
+
+ next_mrec = mrec + rec_offs_data_size(offsets);
+
+ if (next_mrec > mrec_end) {
+ return(NULL);
+ }
+
+ old_pk = dtuple_create(heap, new_index->n_uniq);
+ dict_index_copy_types(
+ old_pk, new_index, old_pk->n_fields);
+
+ /* Copy the PRIMARY KEY fields from mrec to old_pk. */
+ for (ulint i = 0; i < new_index->n_uniq; i++) {
+ const void* field;
+ ulint len;
+ dfield_t* dfield;
+
+ ut_ad(!rec_offs_nth_extern(offsets, i));
+
+ field = rec_get_nth_field(
+ mrec, offsets, i, &len);
+ ut_ad(len != UNIV_SQL_NULL);
+
+ dfield = dtuple_get_nth_field(old_pk, i);
+ dfield_set_data(dfield, field, len);
+ }
+ } else {
+ /* We assume extra_size < 0x100
+ for the PRIMARY KEY prefix. */
+ mrec += *mrec + 1;
+
+ if (mrec > mrec_end) {
+ return(NULL);
+ }
+
+ /* Get offsets for PRIMARY KEY,
+ DB_TRX_ID, DB_ROLL_PTR. */
+ rec_offs_set_n_fields(offsets, new_index->n_uniq + 2);
+ rec_init_offsets_temp(mrec, new_index, offsets);
+
+ next_mrec = mrec + rec_offs_data_size(offsets);
+ if (next_mrec + 2 > mrec_end) {
+ return(NULL);
+ }
+
+ /* Copy the PRIMARY KEY fields and
+ DB_TRX_ID, DB_ROLL_PTR from mrec to old_pk. */
+ old_pk = dtuple_create(heap, new_index->n_uniq + 2);
+ dict_index_copy_types(old_pk, new_index,
+ old_pk->n_fields);
+
+ for (ulint i = 0;
+ i < dict_index_get_n_unique(new_index) + 2;
+ i++) {
+ const void* field;
+ ulint len;
+ dfield_t* dfield;
+
+ ut_ad(!rec_offs_nth_extern(offsets, i));
+
+ field = rec_get_nth_field(
+ mrec, offsets, i, &len);
+ ut_ad(len != UNIV_SQL_NULL);
+
+ dfield = dtuple_get_nth_field(old_pk, i);
+ dfield_set_data(dfield, field, len);
+ }
+
+ mrec = next_mrec;
+
+ /* Fetch the new value of the row as it was
+ in the old table definition. */
+ extra_size = *mrec++;
+
+ if (extra_size >= 0x80) {
+ /* Read another byte of extra_size. */
+
+ extra_size = (extra_size & 0x7f) << 8;
+ extra_size |= *mrec++;
+ }
+
+ mrec += extra_size;
+
+ if (mrec > mrec_end) {
+ return(NULL);
+ }
+
+ rec_offs_set_n_fields(offsets, dup->index->n_fields);
+ rec_init_offsets_temp(mrec, dup->index, offsets);
+
+ next_mrec = mrec + rec_offs_data_size(offsets);
+
+ if (next_mrec > mrec_end) {
+ return(NULL);
+ }
+ }
+
+ ut_ad(next_mrec <= mrec_end);
+ dtuple_set_n_fields_cmp(old_pk, new_index->n_uniq);
+
+ {
+ ulint len;
+ const byte* db_trx_id
+ = rec_get_nth_field(
+ mrec, offsets, trx_id_col, &len);
+ ut_ad(len == DATA_TRX_ID_LEN);
+ *error = row_log_table_apply_update(
+ thr, trx_id_col, new_trx_id_col,
+ mrec, offsets, offsets_heap,
+ heap, dup, trx_read_trx_id(db_trx_id), old_pk);
+ }
+
+ break;
+ }
+
+ mem_heap_empty(offsets_heap);
+ mem_heap_empty(heap);
+ return(next_mrec);
+}
+
+/******************************************************//**
+Applies operations to a table was rebuilt.
+@return DB_SUCCESS, or error code on failure */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_log_table_apply_ops(
+/*====================*/
+ que_thr_t* thr, /*!< in: query graph */
+ row_merge_dup_t*dup) /*!< in/out: for reporting duplicate key
+ errors */
+{
+ dberr_t error;
+ const mrec_t* mrec = NULL;
+ const mrec_t* next_mrec;
+ const mrec_t* mrec_end = NULL; /* silence bogus warning */
+ const mrec_t* next_mrec_end;
+ mem_heap_t* heap;
+ mem_heap_t* offsets_heap;
+ ulint* offsets;
+ bool has_index_lock;
+ dict_index_t* index = const_cast<dict_index_t*>(
+ dup->index);
+ dict_table_t* new_table = index->online_log->table;
+ dict_index_t* new_index = dict_table_get_first_index(
+ new_table);
+ const ulint i = 1 + REC_OFFS_HEADER_SIZE
+ + ut_max(dict_index_get_n_fields(index),
+ dict_index_get_n_unique(new_index) + 2);
+ const ulint trx_id_col = dict_col_get_clust_pos(
+ dict_table_get_sys_col(index->table, DATA_TRX_ID), index);
+ const ulint new_trx_id_col = dict_col_get_clust_pos(
+ dict_table_get_sys_col(new_table, DATA_TRX_ID), new_index);
+ trx_t* trx = thr_get_trx(thr);
+
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(dict_index_is_online_ddl(index));
+ ut_ad(trx->mysql_thd);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(!dict_index_is_online_ddl(new_index));
+ ut_ad(trx_id_col > 0);
+ ut_ad(trx_id_col != ULINT_UNDEFINED);
+ ut_ad(new_trx_id_col > 0);
+ ut_ad(new_trx_id_col != ULINT_UNDEFINED);
+
+ UNIV_MEM_INVALID(&mrec_end, sizeof mrec_end);
+
+ offsets = static_cast<ulint*>(ut_malloc(i * sizeof *offsets));
+ offsets[0] = i;
+ offsets[1] = dict_index_get_n_fields(index);
+
+ heap = mem_heap_create(UNIV_PAGE_SIZE);
+ offsets_heap = mem_heap_create(UNIV_PAGE_SIZE);
+ has_index_lock = true;
+
+next_block:
+ ut_ad(has_index_lock);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(index->online_log->head.bytes == 0);
+
+ if (trx_is_interrupted(trx)) {
+ goto interrupted;
+ }
+
+ if (dict_index_is_corrupted(index)) {
+ error = DB_INDEX_CORRUPT;
+ goto func_exit;
+ }
+
+ ut_ad(dict_index_is_online_ddl(index));
+
+ error = index->online_log->error;
+
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ }
+
+ if (UNIV_UNLIKELY(index->online_log->head.blocks
+ > index->online_log->tail.blocks)) {
+unexpected_eof:
+ fprintf(stderr, "InnoDB: unexpected end of temporary file"
+ " for table %s\n", index->table_name);
+corruption:
+ error = DB_CORRUPTION;
+ goto func_exit;
+ }
+
+ if (index->online_log->head.blocks
+ == index->online_log->tail.blocks) {
+ if (index->online_log->head.blocks) {
+#ifdef HAVE_FTRUNCATE
+ /* Truncate the file in order to save space. */
+ ftruncate(index->online_log->fd, 0);
+#endif /* HAVE_FTRUNCATE */
+ index->online_log->head.blocks
+ = index->online_log->tail.blocks = 0;
+ }
+
+ next_mrec = index->online_log->tail.block;
+ next_mrec_end = next_mrec + index->online_log->tail.bytes;
+
+ if (next_mrec_end == next_mrec) {
+ /* End of log reached. */
+all_done:
+ ut_ad(has_index_lock);
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->tail.blocks == 0);
+ index->online_log->head.bytes = 0;
+ index->online_log->tail.bytes = 0;
+ error = DB_SUCCESS;
+ goto func_exit;
+ }
+ } else {
+ os_offset_t ofs;
+ ibool success;
+
+ ofs = (os_offset_t) index->online_log->head.blocks
+ * srv_sort_buf_size;
+
+ ut_ad(has_index_lock);
+ has_index_lock = false;
+ rw_lock_x_unlock(dict_index_get_lock(index));
+
+ log_free_check();
+
+ ut_ad(dict_index_is_online_ddl(index));
+
+ success = os_file_read_no_error_handling(
+ OS_FILE_FROM_FD(index->online_log->fd),
+ index->online_log->head.block, ofs,
+ srv_sort_buf_size);
+
+ if (!success) {
+ fprintf(stderr, "InnoDB: unable to read temporary file"
+ " for table %s\n", index->table_name);
+ goto corruption;
+ }
+
+#ifdef POSIX_FADV_DONTNEED
+ /* Each block is read exactly once. Free up the file cache. */
+ posix_fadvise(index->online_log->fd,
+ ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED);
+#endif /* POSIX_FADV_DONTNEED */
+#ifdef FALLOC_FL_PUNCH_HOLE
+ /* Try to deallocate the space for the file on disk.
+ This should work on ext4 on Linux 2.6.39 and later,
+ and be ignored when the operation is unsupported. */
+ fallocate(index->online_log->fd,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ ofs, srv_buf_size);
+#endif /* FALLOC_FL_PUNCH_HOLE */
+
+ next_mrec = index->online_log->head.block;
+ next_mrec_end = next_mrec + srv_sort_buf_size;
+ }
+
+ /* This read is not protected by index->online_log->mutex for
+ performance reasons. We will eventually notice any error that
+ was flagged by a DML thread. */
+ error = index->online_log->error;
+
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ }
+
+ if (mrec) {
+ /* A partial record was read from the previous block.
+ Copy the temporary buffer full, as we do not know the
+ length of the record. Parse subsequent records from
+ the bigger buffer index->online_log->head.block
+ or index->online_log->tail.block. */
+
+ ut_ad(mrec == index->online_log->head.buf);
+ ut_ad(mrec_end > mrec);
+ ut_ad(mrec_end < (&index->online_log->head.buf)[1]);
+
+ memcpy((mrec_t*) mrec_end, next_mrec,
+ (&index->online_log->head.buf)[1] - mrec_end);
+ mrec = row_log_table_apply_op(
+ thr, trx_id_col, new_trx_id_col,
+ dup, &error, offsets_heap, heap,
+ index->online_log->head.buf,
+ (&index->online_log->head.buf)[1], offsets);
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ } else if (UNIV_UNLIKELY(mrec == NULL)) {
+ /* The record was not reassembled properly. */
+ goto corruption;
+ }
+ /* The record was previously found out to be
+ truncated. Now that the parse buffer was extended,
+ it should proceed beyond the old end of the buffer. */
+ ut_a(mrec > mrec_end);
+
+ index->online_log->head.bytes = mrec - mrec_end;
+ next_mrec += index->online_log->head.bytes;
+ }
+
+ ut_ad(next_mrec <= next_mrec_end);
+ /* The following loop must not be parsing the temporary
+ buffer, but head.block or tail.block. */
+
+ /* mrec!=NULL means that the next record starts from the
+ middle of the block */
+ ut_ad((mrec == NULL) == (index->online_log->head.bytes == 0));
+
+#ifdef UNIV_DEBUG
+ if (next_mrec_end == index->online_log->head.block
+ + srv_sort_buf_size) {
+ /* If tail.bytes == 0, next_mrec_end can also be at
+ the end of tail.block. */
+ if (index->online_log->tail.bytes == 0) {
+ ut_ad(next_mrec == next_mrec_end);
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->head.bytes == 0);
+ } else {
+ ut_ad(next_mrec == index->online_log->head.block
+ + index->online_log->head.bytes);
+ ut_ad(index->online_log->tail.blocks
+ > index->online_log->head.blocks);
+ }
+ } else if (next_mrec_end == index->online_log->tail.block
+ + index->online_log->tail.bytes) {
+ ut_ad(next_mrec == index->online_log->tail.block
+ + index->online_log->head.bytes);
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->head.bytes
+ <= index->online_log->tail.bytes);
+ } else {
+ ut_error;
+ }
+#endif /* UNIV_DEBUG */
+
+ mrec_end = next_mrec_end;
+
+ while (!trx_is_interrupted(trx)) {
+ mrec = next_mrec;
+ ut_ad(mrec < mrec_end);
+
+ if (!has_index_lock) {
+ /* We are applying operations from a different
+ block than the one that is being written to.
+ We do not hold index->lock in order to
+ allow other threads to concurrently buffer
+ modifications. */
+ ut_ad(mrec >= index->online_log->head.block);
+ ut_ad(mrec_end == index->online_log->head.block
+ + srv_sort_buf_size);
+ ut_ad(index->online_log->head.bytes
+ < srv_sort_buf_size);
+
+ /* Take the opportunity to do a redo log
+ checkpoint if needed. */
+ log_free_check();
+ } else {
+ /* We are applying operations from the last block.
+ Do not allow other threads to buffer anything,
+ so that we can finally catch up and synchronize. */
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(mrec_end == index->online_log->tail.block
+ + index->online_log->tail.bytes);
+ ut_ad(mrec >= index->online_log->tail.block);
+ }
+
+ /* This read is not protected by index->online_log->mutex
+ for performance reasons. We will eventually notice any
+ error that was flagged by a DML thread. */
+ error = index->online_log->error;
+
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ }
+
+ next_mrec = row_log_table_apply_op(
+ thr, trx_id_col, new_trx_id_col,
+ dup, &error, offsets_heap, heap,
+ mrec, mrec_end, offsets);
+
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ } else if (next_mrec == next_mrec_end) {
+ /* The record happened to end on a block boundary.
+ Do we have more blocks left? */
+ if (has_index_lock) {
+ /* The index will be locked while
+ applying the last block. */
+ goto all_done;
+ }
+
+ mrec = NULL;
+process_next_block:
+ rw_lock_x_lock(dict_index_get_lock(index));
+ has_index_lock = true;
+
+ index->online_log->head.bytes = 0;
+ index->online_log->head.blocks++;
+ goto next_block;
+ } else if (next_mrec != NULL) {
+ ut_ad(next_mrec < next_mrec_end);
+ index->online_log->head.bytes += next_mrec - mrec;
+ } else if (has_index_lock) {
+ /* When mrec is within tail.block, it should
+ be a complete record, because we are holding
+ index->lock and thus excluding the writer. */
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(mrec_end == index->online_log->tail.block
+ + index->online_log->tail.bytes);
+ ut_ad(0);
+ goto unexpected_eof;
+ } else {
+ memcpy(index->online_log->head.buf, mrec,
+ mrec_end - mrec);
+ mrec_end += index->online_log->head.buf - mrec;
+ mrec = index->online_log->head.buf;
+ goto process_next_block;
+ }
+ }
+
+interrupted:
+ error = DB_INTERRUPTED;
+func_exit:
+ if (!has_index_lock) {
+ rw_lock_x_lock(dict_index_get_lock(index));
+ }
+
+ mem_heap_free(offsets_heap);
+ mem_heap_free(heap);
+ ut_free(offsets);
+ return(error);
+}
+
+/******************************************************//**
+Apply the row_log_table log to a table upon completing rebuild.
+@return DB_SUCCESS, or error code on failure */
+UNIV_INTERN
+dberr_t
+row_log_table_apply(
+/*================*/
+ que_thr_t* thr, /*!< in: query graph */
+ dict_table_t* old_table,
+ /*!< in: old table */
+ struct TABLE* table) /*!< in/out: MySQL table
+ (for reporting duplicates) */
+{
+ dberr_t error;
+ dict_index_t* clust_index;
+
+ thr_get_trx(thr)->error_key_num = 0;
+
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(!rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED));
+#endif /* UNIV_SYNC_DEBUG */
+ clust_index = dict_table_get_first_index(old_table);
+
+ rw_lock_x_lock(dict_index_get_lock(clust_index));
+
+ if (!clust_index->online_log) {
+ ut_ad(dict_index_get_online_status(clust_index)
+ == ONLINE_INDEX_COMPLETE);
+ /* This function should not be called unless
+ rebuilding a table online. Build in some fault
+ tolerance. */
+ ut_ad(0);
+ error = DB_ERROR;
+ } else {
+ row_merge_dup_t dup = {
+ clust_index, table,
+ clust_index->online_log->col_map, 0
+ };
+
+ error = row_log_table_apply_ops(thr, &dup);
+ }
+
+ rw_lock_x_unlock(dict_index_get_lock(clust_index));
+ return(error);
+}
+
+/******************************************************//**
+Allocate the row log for an index and flag the index
+for online creation.
+@retval true if success, false if not */
+UNIV_INTERN
+bool
+row_log_allocate(
+/*=============*/
+ dict_index_t* index, /*!< in/out: index */
+ dict_table_t* table, /*!< in/out: new table being rebuilt,
+ or NULL when creating a secondary index */
+ bool same_pk,/*!< in: whether the definition of the
+ PRIMARY KEY has remained the same */
+ const dtuple_t* add_cols,
+ /*!< in: default values of
+ added columns, or NULL */
+ const ulint* col_map)/*!< in: mapping of old column
+ numbers to new ones, or NULL if !table */
+{
+ byte* buf;
+ row_log_t* log;
+ ulint size;
+
+ ut_ad(!dict_index_is_online_ddl(index));
+ ut_ad(dict_index_is_clust(index) == !!table);
+ ut_ad(!table || index->table != table);
+ ut_ad(same_pk || table);
+ ut_ad(!table || col_map);
+ ut_ad(!add_cols || col_map);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ size = 2 * srv_sort_buf_size + sizeof *log;
+ buf = (byte*) os_mem_alloc_large(&size);
+ if (!buf) {
+ return(false);
+ }
+
+ log = (row_log_t*) &buf[2 * srv_sort_buf_size];
+ log->size = size;
+ log->fd = row_merge_file_create_low();
+ if (log->fd < 0) {
+ os_mem_free_large(buf, size);
+ return(false);
+ }
+ mutex_create(index_online_log_key, &log->mutex,
+ SYNC_INDEX_ONLINE_LOG);
+ log->trx_rb = NULL;
+ log->table = table;
+ log->same_pk = same_pk;
+ log->add_cols = add_cols;
+ log->col_map = col_map;
+ log->error = DB_SUCCESS;
+ log->max_trx = 0;
+ log->head.block = buf;
+ log->tail.block = buf + srv_sort_buf_size;
+ log->tail.blocks = log->tail.bytes = 0;
+ log->head.blocks = log->head.bytes = 0;
+ dict_index_set_online_status(index, ONLINE_INDEX_CREATION);
+ index->online_log = log;
+
+ /* While we might be holding an exclusive data dictionary lock
+ here, in row_log_abort_sec() we will not always be holding it. Use
+ atomic operations in both cases. */
+ MONITOR_ATOMIC_INC(MONITOR_ONLINE_CREATE_INDEX);
+
+ return(true);
+}
+
+/******************************************************//**
+Free the row log for an index that was being created online. */
+UNIV_INTERN
+void
+row_log_free(
+/*=========*/
+ row_log_t*& log) /*!< in,own: row log */
+{
+ MONITOR_ATOMIC_DEC(MONITOR_ONLINE_CREATE_INDEX);
+
+ delete log->trx_rb;
+ row_merge_file_destroy_low(log->fd);
+ mutex_free(&log->mutex);
+ os_mem_free_large(log->head.block, log->size);
+ log = 0;
+}
+
+/******************************************************//**
+Get the latest transaction ID that has invoked row_log_online_op()
+during online creation.
+@return latest transaction ID, or 0 if nothing was logged */
+UNIV_INTERN
+trx_id_t
+row_log_get_max_trx(
+/*================*/
+ dict_index_t* index) /*!< in: index, must be locked */
+{
+ ut_ad(dict_index_get_online_status(index) == ONLINE_INDEX_CREATION);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad((rw_lock_own(dict_index_get_lock(index), RW_LOCK_SHARED)
+ && mutex_own(&index->online_log->mutex))
+ || rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ return(index->online_log->max_trx);
+}
+
+/******************************************************//**
+Applies an operation to a secondary index that was being created. */
+static __attribute__((nonnull))
+void
+row_log_apply_op_low(
+/*=================*/
+ dict_index_t* index, /*!< in/out: index */
+ row_merge_dup_t*dup, /*!< in/out: for reporting
+ duplicate key errors */
+ dberr_t* error, /*!< out: DB_SUCCESS or error code */
+ mem_heap_t* offsets_heap, /*!< in/out: memory heap for
+ allocating offsets; can be emptied */
+ bool has_index_lock, /*!< in: true if holding index->lock
+ in exclusive mode */
+ enum row_op op, /*!< in: operation being applied */
+ trx_id_t trx_id, /*!< in: transaction identifier */
+ const dtuple_t* entry) /*!< in: row */
+{
+ mtr_t mtr;
+ btr_cur_t cursor;
+ ulint* offsets = NULL;
+
+ ut_ad(!dict_index_is_clust(index));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX)
+ == has_index_lock);
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(!dict_index_is_corrupted(index));
+ ut_ad(trx_id != 0 || op == ROW_OP_DELETE);
+
+ mtr_start(&mtr);
+
+ /* We perform the pessimistic variant of the operations if we
+ already hold index->lock exclusively. First, search the
+ record. The operation may already have been performed,
+ depending on when the row in the clustered index was
+ scanned. */
+ btr_cur_search_to_nth_level(index, 0, entry, PAGE_CUR_LE,
+ has_index_lock
+ ? BTR_MODIFY_TREE
+ : BTR_MODIFY_LEAF,
+ &cursor, 0, __FILE__, __LINE__,
+ &mtr);
+
+ ut_ad(dict_index_get_n_unique(index) > 0);
+ /* This test is somewhat similar to row_ins_must_modify_rec(),
+ but not identical for unique secondary indexes. */
+ if (cursor.low_match >= dict_index_get_n_unique(index)
+ && !page_rec_is_infimum(btr_cur_get_rec(&cursor))) {
+ /* We have a matching record. */
+ bool exists = (cursor.low_match
+ == dict_index_get_n_fields(index));
+#ifdef UNIV_DEBUG
+ rec_t* rec = btr_cur_get_rec(&cursor);
+ ut_ad(page_rec_is_user_rec(rec));
+ ut_ad(!rec_get_deleted_flag(rec, page_rec_is_comp(rec)));
+#endif /* UNIV_DEBUG */
+
+ ut_ad(exists || dict_index_is_unique(index));
+
+ switch (op) {
+ case ROW_OP_DELETE:
+ if (!exists) {
+ /* The record was already deleted. */
+ goto func_exit;
+ }
+
+ if (btr_cur_optimistic_delete(
+ &cursor, BTR_CREATE_FLAG, &mtr)) {
+ *error = DB_SUCCESS;
+ break;
+ }
+
+ if (!has_index_lock) {
+ /* This needs a pessimistic operation.
+ Lock the index tree exclusively. */
+ mtr_commit(&mtr);
+ mtr_start(&mtr);
+ btr_cur_search_to_nth_level(
+ index, 0, entry, PAGE_CUR_LE,
+ BTR_MODIFY_TREE, &cursor, 0,
+ __FILE__, __LINE__, &mtr);
+
+ /* No other thread than the current one
+ is allowed to modify the index tree.
+ Thus, the record should still exist. */
+ ut_ad(cursor.low_match
+ >= dict_index_get_n_fields(index));
+ ut_ad(page_rec_is_user_rec(
+ btr_cur_get_rec(&cursor)));
+ }
+
+ /* As there are no externally stored fields in
+ a secondary index record, the parameter
+ rb_ctx = RB_NONE will be ignored. */
+
+ btr_cur_pessimistic_delete(
+ error, FALSE, &cursor,
+ BTR_CREATE_FLAG, RB_NONE, &mtr);
+ break;
+ case ROW_OP_INSERT:
+ if (exists) {
+ /* The record already exists. There
+ is nothing to be inserted. */
+ goto func_exit;
+ }
+
+ if (dtuple_contains_null(entry)) {
+ /* The UNIQUE KEY columns match, but
+ there is a NULL value in the key, and
+ NULL!=NULL. */
+ goto insert_the_rec;
+ }
+
+ /* Duplicate key error */
+ ut_ad(dict_index_is_unique(index));
+ row_merge_dup_report(dup, entry->fields);
+ goto func_exit;
+ }
+ } else {
+ switch (op) {
+ rec_t* rec;
+ big_rec_t* big_rec;
+ case ROW_OP_DELETE:
+ /* The record does not exist. */
+ goto func_exit;
+ case ROW_OP_INSERT:
+ if (dict_index_is_unique(index)
+ && (cursor.up_match
+ >= dict_index_get_n_unique(index)
+ || cursor.low_match
+ >= dict_index_get_n_unique(index))
+ && (!index->n_nullable
+ || !dtuple_contains_null(entry))) {
+ /* Duplicate key */
+ row_merge_dup_report(dup, entry->fields);
+ goto func_exit;
+ }
+insert_the_rec:
+ /* Insert the record. As we are inserting into
+ a secondary index, there cannot be externally
+ stored columns (!big_rec). */
+ *error = btr_cur_optimistic_insert(
+ BTR_NO_UNDO_LOG_FLAG
+ | BTR_NO_LOCKING_FLAG
+ | BTR_CREATE_FLAG,
+ &cursor, &offsets, &offsets_heap,
+ const_cast<dtuple_t*>(entry),
+ &rec, &big_rec, 0, NULL, &mtr);
+ ut_ad(!big_rec);
+ if (*error != DB_FAIL) {
+ break;
+ }
+
+ if (!has_index_lock) {
+ /* This needs a pessimistic operation.
+ Lock the index tree exclusively. */
+ mtr_commit(&mtr);
+ mtr_start(&mtr);
+ btr_cur_search_to_nth_level(
+ index, 0, entry, PAGE_CUR_LE,
+ BTR_MODIFY_TREE, &cursor, 0,
+ __FILE__, __LINE__, &mtr);
+ }
+
+ /* We already determined that the
+ record did not exist. No other thread
+ than the current one is allowed to
+ modify the index tree. Thus, the
+ record should still not exist. */
+
+ *error = btr_cur_pessimistic_insert(
+ BTR_NO_UNDO_LOG_FLAG
+ | BTR_NO_LOCKING_FLAG
+ | BTR_CREATE_FLAG,
+ &cursor, &offsets, &offsets_heap,
+ const_cast<dtuple_t*>(entry),
+ &rec, &big_rec,
+ 0, NULL, &mtr);
+ ut_ad(!big_rec);
+ break;
+ }
+ mem_heap_empty(offsets_heap);
+ }
+
+ if (*error == DB_SUCCESS && trx_id) {
+ page_update_max_trx_id(btr_cur_get_block(&cursor),
+ btr_cur_get_page_zip(&cursor),
+ trx_id, &mtr);
+ }
+
+func_exit:
+ mtr_commit(&mtr);
+}
+
+/******************************************************//**
+Applies an operation to a secondary index that was being created.
+@return NULL on failure (mrec corruption) or when out of data;
+pointer to next record on success */
+static __attribute__((nonnull, warn_unused_result))
+const mrec_t*
+row_log_apply_op(
+/*=============*/
+ dict_index_t* index, /*!< in/out: index */
+ row_merge_dup_t*dup, /*!< in/out: for reporting
+ duplicate key errors */
+ dberr_t* error, /*!< out: DB_SUCCESS or error code */
+ mem_heap_t* offsets_heap, /*!< in/out: memory heap for
+ allocating offsets; can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap for
+ allocating data tuples */
+ bool has_index_lock, /*!< in: true if holding index->lock
+ in exclusive mode */
+ const mrec_t* mrec, /*!< in: merge record */
+ const mrec_t* mrec_end, /*!< in: end of buffer */
+ ulint* offsets) /*!< in/out: work area for
+ rec_init_offsets_temp() */
+
+{
+ enum row_op op;
+ ulint extra_size;
+ ulint data_size;
+ ulint n_ext;
+ dtuple_t* entry;
+ trx_id_t trx_id;
+
+ /* Online index creation is only used for secondary indexes. */
+ ut_ad(!dict_index_is_clust(index));
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX)
+ == has_index_lock);
+#endif /* UNIV_SYNC_DEBUG */
+
+ if (dict_index_is_corrupted(index)) {
+ *error = DB_INDEX_CORRUPT;
+ return(NULL);
+ }
+
+ *error = DB_SUCCESS;
+
+ if (mrec + ROW_LOG_HEADER_SIZE >= mrec_end) {
+ return(NULL);
+ }
+
+ switch (*mrec) {
+ case ROW_OP_INSERT:
+ if (ROW_LOG_HEADER_SIZE + DATA_TRX_ID_LEN + mrec >= mrec_end) {
+ return(NULL);
+ }
+
+ op = static_cast<enum row_op>(*mrec++);
+ trx_id = trx_read_trx_id(mrec);
+ mrec += DATA_TRX_ID_LEN;
+ break;
+ case ROW_OP_DELETE:
+ op = static_cast<enum row_op>(*mrec++);
+ trx_id = 0;
+ break;
+ default:
+corrupted:
+ ut_ad(0);
+ *error = DB_CORRUPTION;
+ return(NULL);
+ }
+
+ extra_size = *mrec++;
+
+ ut_ad(mrec < mrec_end);
+
+ if (extra_size >= 0x80) {
+ /* Read another byte of extra_size. */
+
+ extra_size = (extra_size & 0x7f) << 8;
+ extra_size |= *mrec++;
+ }
+
+ mrec += extra_size;
+
+ if (mrec > mrec_end) {
+ return(NULL);
+ }
+
+ rec_init_offsets_temp(mrec, index, offsets);
+
+ if (rec_offs_any_extern(offsets)) {
+ /* There should never be any externally stored fields
+ in a secondary index, which is what online index
+ creation is used for. Therefore, the log file must be
+ corrupted. */
+ goto corrupted;
+ }
+
+ data_size = rec_offs_data_size(offsets);
+
+ mrec += data_size;
+
+ if (mrec > mrec_end) {
+ return(NULL);
+ }
+
+ entry = row_rec_to_index_entry_low(
+ mrec - data_size, index, offsets, &n_ext, heap);
+ /* Online index creation is only implemented for secondary
+ indexes, which never contain off-page columns. */
+ ut_ad(n_ext == 0);
+#ifdef ROW_LOG_APPLY_PRINT
+ if (row_log_apply_print) {
+ fprintf(stderr, "apply " IB_ID_FMT " " TRX_ID_FMT " %u %u ",
+ index->id, trx_id,
+ unsigned (op), unsigned (has_index_lock));
+ for (const byte* m = mrec - data_size; m < mrec; m++) {
+ fprintf(stderr, "%02x", *m);
+ }
+ putc('\n', stderr);
+ }
+#endif /* ROW_LOG_APPLY_PRINT */
+ row_log_apply_op_low(index, dup, error, offsets_heap,
+ has_index_lock, op, trx_id, entry);
+ return(mrec);
+}
+
+/******************************************************//**
+Applies operations to a secondary index that was being created.
+@return DB_SUCCESS, or error code on failure */
+static __attribute__((nonnull))
+dberr_t
+row_log_apply_ops(
+/*==============*/
+ trx_t* trx, /*!< in: transaction (for checking if
+ the operation was interrupted) */
+ dict_index_t* index, /*!< in/out: index */
+ row_merge_dup_t*dup) /*!< in/out: for reporting duplicate key
+ errors */
+{
+ dberr_t error;
+ const mrec_t* mrec = NULL;
+ const mrec_t* next_mrec;
+ const mrec_t* mrec_end= NULL; /* silence bogus warning */
+ const mrec_t* next_mrec_end;
+ mem_heap_t* offsets_heap;
+ mem_heap_t* heap;
+ ulint* offsets;
+ bool has_index_lock;
+ const ulint i = 1 + REC_OFFS_HEADER_SIZE
+ + dict_index_get_n_fields(index);
+
+ ut_ad(dict_index_is_online_ddl(index));
+ ut_ad(*index->name == TEMP_INDEX_PREFIX);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(index->online_log);
+ UNIV_MEM_INVALID(&mrec_end, sizeof mrec_end);
+
+ offsets = static_cast<ulint*>(ut_malloc(i * sizeof *offsets));
+ offsets[0] = i;
+ offsets[1] = dict_index_get_n_fields(index);
+
+ offsets_heap = mem_heap_create(UNIV_PAGE_SIZE);
+ heap = mem_heap_create(UNIV_PAGE_SIZE);
+ has_index_lock = true;
+
+next_block:
+ ut_ad(has_index_lock);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(dict_index_get_lock(index), RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ ut_ad(index->online_log->head.bytes == 0);
+
+ if (trx_is_interrupted(trx)) {
+ goto interrupted;
+ }
+
+ if (dict_index_is_corrupted(index)) {
+ error = DB_INDEX_CORRUPT;
+ goto func_exit;
+ }
+
+ if (UNIV_UNLIKELY(index->online_log->head.blocks
+ > index->online_log->tail.blocks)) {
+unexpected_eof:
+ fprintf(stderr, "InnoDB: unexpected end of temporary file"
+ " for index %s\n", index->name + 1);
+corruption:
+ error = DB_CORRUPTION;
+ goto func_exit;
+ }
+
+ if (index->online_log->head.blocks
+ == index->online_log->tail.blocks) {
+ if (index->online_log->head.blocks) {
+#ifdef HAVE_FTRUNCATE
+ /* Truncate the file in order to save space. */
+ ftruncate(index->online_log->fd, 0);
+#endif /* HAVE_FTRUNCATE */
+ index->online_log->head.blocks
+ = index->online_log->tail.blocks = 0;
+ }
+
+ next_mrec = index->online_log->tail.block;
+ next_mrec_end = next_mrec + index->online_log->tail.bytes;
+
+ if (next_mrec_end == next_mrec) {
+ /* End of log reached. */
+all_done:
+ ut_ad(has_index_lock);
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->tail.blocks == 0);
+ error = DB_SUCCESS;
+ goto func_exit;
+ }
+ } else {
+ os_offset_t ofs;
+ ibool success;
+
+ ofs = (os_offset_t) index->online_log->head.blocks
+ * srv_sort_buf_size;
+
+ ut_ad(has_index_lock);
+ has_index_lock = false;
+ rw_lock_x_unlock(dict_index_get_lock(index));
+
+ log_free_check();
+
+ success = os_file_read_no_error_handling(
+ OS_FILE_FROM_FD(index->online_log->fd),
+ index->online_log->head.block, ofs,
+ srv_sort_buf_size);
+
+ if (!success) {
+ fprintf(stderr, "InnoDB: unable to read temporary file"
+ " for index %s\n", index->name + 1);
+ goto corruption;
+ }
+
+#ifdef POSIX_FADV_DONTNEED
+ /* Each block is read exactly once. Free up the file cache. */
+ posix_fadvise(index->online_log->fd,
+ ofs, srv_sort_buf_size, POSIX_FADV_DONTNEED);
+#endif /* POSIX_FADV_DONTNEED */
+#ifdef FALLOC_FL_PUNCH_HOLE
+ /* Try to deallocate the space for the file on disk.
+ This should work on ext4 on Linux 2.6.39 and later,
+ and be ignored when the operation is unsupported. */
+ fallocate(index->online_log->fd,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ ofs, srv_buf_size);
+#endif /* FALLOC_FL_PUNCH_HOLE */
+
+ next_mrec = index->online_log->head.block;
+ next_mrec_end = next_mrec + srv_sort_buf_size;
+ }
+
+ if (mrec) {
+ /* A partial record was read from the previous block.
+ Copy the temporary buffer full, as we do not know the
+ length of the record. Parse subsequent records from
+ the bigger buffer index->online_log->head.block
+ or index->online_log->tail.block. */
+
+ ut_ad(mrec == index->online_log->head.buf);
+ ut_ad(mrec_end > mrec);
+ ut_ad(mrec_end < (&index->online_log->head.buf)[1]);
+
+ memcpy((mrec_t*) mrec_end, next_mrec,
+ (&index->online_log->head.buf)[1] - mrec_end);
+ mrec = row_log_apply_op(
+ index, dup, &error, offsets_heap, heap,
+ has_index_lock, index->online_log->head.buf,
+ (&index->online_log->head.buf)[1], offsets);
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ } else if (UNIV_UNLIKELY(mrec == NULL)) {
+ /* The record was not reassembled properly. */
+ goto corruption;
+ }
+ /* The record was previously found out to be
+ truncated. Now that the parse buffer was extended,
+ it should proceed beyond the old end of the buffer. */
+ ut_a(mrec > mrec_end);
+
+ index->online_log->head.bytes = mrec - mrec_end;
+ next_mrec += index->online_log->head.bytes;
+ }
+
+ ut_ad(next_mrec <= next_mrec_end);
+ /* The following loop must not be parsing the temporary
+ buffer, but head.block or tail.block. */
+
+ /* mrec!=NULL means that the next record starts from the
+ middle of the block */
+ ut_ad((mrec == NULL) == (index->online_log->head.bytes == 0));
+
+#ifdef UNIV_DEBUG
+ if (next_mrec_end == index->online_log->head.block
+ + srv_sort_buf_size) {
+ /* If tail.bytes == 0, next_mrec_end can also be at
+ the end of tail.block. */
+ if (index->online_log->tail.bytes == 0) {
+ ut_ad(next_mrec == next_mrec_end);
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->head.bytes == 0);
+ } else {
+ ut_ad(next_mrec == index->online_log->head.block
+ + index->online_log->head.bytes);
+ ut_ad(index->online_log->tail.blocks
+ > index->online_log->head.blocks);
+ }
+ } else if (next_mrec_end == index->online_log->tail.block
+ + index->online_log->tail.bytes) {
+ ut_ad(next_mrec == index->online_log->tail.block
+ + index->online_log->head.bytes);
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->head.bytes
+ <= index->online_log->tail.bytes);
+ } else {
+ ut_error;
+ }
+#endif /* UNIV_DEBUG */
+
+ mrec_end = next_mrec_end;
+
+ while (!trx_is_interrupted(trx)) {
+ mrec = next_mrec;
+ ut_ad(mrec < mrec_end);
+
+ if (!has_index_lock) {
+ /* We are applying operations from a different
+ block than the one that is being written to.
+ We do not hold index->lock in order to
+ allow other threads to concurrently buffer
+ modifications. */
+ ut_ad(mrec >= index->online_log->head.block);
+ ut_ad(mrec_end == index->online_log->head.block
+ + srv_sort_buf_size);
+ ut_ad(index->online_log->head.bytes
+ < srv_sort_buf_size);
+
+ /* Take the opportunity to do a redo log
+ checkpoint if needed. */
+ log_free_check();
+ } else {
+ /* We are applying operations from the last block.
+ Do not allow other threads to buffer anything,
+ so that we can finally catch up and synchronize. */
+ ut_ad(index->online_log->head.blocks == 0);
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(mrec_end == index->online_log->tail.block
+ + index->online_log->tail.bytes);
+ ut_ad(mrec >= index->online_log->tail.block);
+ }
+
+ next_mrec = row_log_apply_op(
+ index, dup, &error, offsets_heap, heap,
+ has_index_lock, mrec, mrec_end, offsets);
+
+ if (error != DB_SUCCESS) {
+ goto func_exit;
+ } else if (next_mrec == next_mrec_end) {
+ /* The record happened to end on a block boundary.
+ Do we have more blocks left? */
+ if (has_index_lock) {
+ /* The index will be locked while
+ applying the last block. */
+ goto all_done;
+ }
+
+ mrec = NULL;
+process_next_block:
+ rw_lock_x_lock(dict_index_get_lock(index));
+ has_index_lock = true;
+
+ index->online_log->head.bytes = 0;
+ index->online_log->head.blocks++;
+ goto next_block;
+ } else if (next_mrec != NULL) {
+ ut_ad(next_mrec < next_mrec_end);
+ index->online_log->head.bytes += next_mrec - mrec;
+ } else if (has_index_lock) {
+ /* When mrec is within tail.block, it should
+ be a complete record, because we are holding
+ index->lock and thus excluding the writer. */
+ ut_ad(index->online_log->tail.blocks == 0);
+ ut_ad(mrec_end == index->online_log->tail.block
+ + index->online_log->tail.bytes);
+ ut_ad(0);
+ goto unexpected_eof;
+ } else {
+ memcpy(index->online_log->head.buf, mrec,
+ mrec_end - mrec);
+ mrec_end += index->online_log->head.buf - mrec;
+ mrec = index->online_log->head.buf;
+ goto process_next_block;
+ }
+ }
+
+interrupted:
+ error = DB_INTERRUPTED;
+func_exit:
+ if (!has_index_lock) {
+ rw_lock_x_lock(dict_index_get_lock(index));
+ }
+
+ switch (error) {
+ case DB_SUCCESS:
+ break;
+ case DB_INDEX_CORRUPT:
+ if (((os_offset_t) index->online_log->tail.blocks + 1)
+ * srv_sort_buf_size >= srv_online_max_size) {
+ /* The log file grew too big. */
+ error = DB_ONLINE_LOG_TOO_BIG;
+ }
+ /* fall through */
+ default:
+ /* We set the flag directly instead of invoking
+ dict_set_corrupted_index_cache_only(index) here,
+ because the index is not "public" yet. */
+ index->type |= DICT_CORRUPT;
+ }
+
+ mem_heap_free(heap);
+ mem_heap_free(offsets_heap);
+ ut_free(offsets);
+ return(error);
+}
+
+/******************************************************//**
+Apply the row log to the index upon completing index creation.
+@return DB_SUCCESS, or error code on failure */
+UNIV_INTERN
+dberr_t
+row_log_apply(
+/*==========*/
+ trx_t* trx, /*!< in: transaction (for checking if
+ the operation was interrupted) */
+ dict_index_t* index, /*!< in/out: secondary index */
+ struct TABLE* table) /*!< in/out: MySQL table
+ (for reporting duplicates) */
+{
+ dberr_t error;
+ row_log_t* log;
+ row_merge_dup_t dup = { index, table, NULL, 0 };
+
+ ut_ad(dict_index_is_online_ddl(index));
+ ut_ad(!dict_index_is_clust(index));
+
+ log_free_check();
+
+ rw_lock_x_lock(dict_index_get_lock(index));
+
+ if (!dict_table_is_corrupted(index->table)) {
+ error = row_log_apply_ops(trx, index, &dup);
+ } else {
+ error = DB_SUCCESS;
+ }
+
+ if (error != DB_SUCCESS || dup.n_dup) {
+ ut_a(!dict_table_is_discarded(index->table));
+ /* We set the flag directly instead of invoking
+ dict_set_corrupted_index_cache_only(index) here,
+ because the index is not "public" yet. */
+ index->type |= DICT_CORRUPT;
+ index->table->drop_aborted = TRUE;
+
+ if (error == DB_SUCCESS) {
+ error = DB_DUPLICATE_KEY;
+ }
+
+ dict_index_set_online_status(index, ONLINE_INDEX_ABORTED);
+ } else {
+ dict_index_set_online_status(index, ONLINE_INDEX_COMPLETE);
+ }
+
+ log = index->online_log;
+ index->online_log = NULL;
+ /* We could remove the TEMP_INDEX_PREFIX and update the data
+ dictionary to say that this index is complete, if we had
+ access to the .frm file here. If the server crashes before
+ all requested indexes have been created, this completed index
+ will be dropped. */
+ rw_lock_x_unlock(dict_index_get_lock(index));
+
+ row_log_free(log);
+
+ return(error);
+}
diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc
index 244aa0a69f1..a509e2c5ca8 100644
--- a/storage/innobase/row/row0merge.cc
+++ b/storage/innobase/row/row0merge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1995, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2005, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -26,40 +26,18 @@ Completed by Sunny Bains and Marko Makela
#include "row0merge.h"
#include "row0ext.h"
-#include "row0row.h"
-#include "row0upd.h"
+#include "row0log.h"
#include "row0ins.h"
#include "row0sel.h"
-#include "dict0dict.h"
-#include "dict0mem.h"
-#include "dict0boot.h"
#include "dict0crea.h"
-#include "dict0load.h"
-#include "btr0btr.h"
-#include "mach0data.h"
-#include "trx0rseg.h"
-#include "trx0trx.h"
-#include "trx0roll.h"
-#include "trx0undo.h"
#include "trx0purge.h"
-#include "trx0rec.h"
-#include "que0que.h"
-#include "rem0cmp.h"
-#include "read0read.h"
-#include "os0file.h"
#include "lock0lock.h"
-#include "data0data.h"
-#include "data0type.h"
-#include "que0que.h"
#include "pars0pars.h"
-#include "mem0mem.h"
-#include "log0log.h"
#include "ut0sort.h"
-#include "handler0alter.h"
-#include "fts0fts.h"
-#include "fts0types.h"
-#include "fts0priv.h"
#include "row0ftsort.h"
+#include "row0import.h"
+#include "handler0alter.h"
+#include "ha_prototypes.h"
/* Ignore posix_fadvise() on those platforms where it does not exist */
#if defined __WIN__
@@ -69,8 +47,6 @@ Completed by Sunny Bains and Marko Makela
#ifdef UNIV_DEBUG
/** Set these in order ot enable debug printout. */
/* @{ */
-/** Log the outcome of each row_merge_cmp() call, comparing records. */
-static ibool row_merge_print_cmp;
/** Log each record read from temporary file. */
static ibool row_merge_print_read;
/** Log each record write to temporary file. */
@@ -86,39 +62,23 @@ static ibool row_merge_print_block_write;
#endif /* UNIV_DEBUG */
/* Whether to disable file system cache */
-UNIV_INTERN char srv_disable_sort_file_cache;
-
-/********************************************************************//**
-Read sorted file containing index data tuples and insert these data
-tuples to the index
-@return DB_SUCCESS or error number */
-static
-ulint
-row_merge_insert_index_tuples(
-/*==========================*/
- trx_t* trx, /*!< in: transaction */
- dict_index_t* index, /*!< in: index */
- dict_table_t* table, /*!< in: new table */
- ulint zip_size,/*!< in: compressed page size of
- the old table, or 0 if uncompressed */
- int fd, /*!< in: file descriptor */
- row_merge_block_t* block); /*!< in/out: file buffer */
+UNIV_INTERN char srv_disable_sort_file_cache;
#ifdef UNIV_DEBUG
/******************************************************//**
Display a merge tuple. */
-static
+static __attribute__((nonnull))
void
row_merge_tuple_print(
/*==================*/
FILE* f, /*!< in: output stream */
- const dfield_t* entry, /*!< in: tuple to print */
+ const mtuple_t* entry, /*!< in: tuple to print */
ulint n_fields)/*!< in: number of fields in the tuple */
{
ulint j;
for (j = 0; j < n_fields; j++) {
- const dfield_t* field = &entry[j];
+ const dfield_t* field = &entry->fields[j];
if (dfield_is_null(field)) {
fputs("\n NULL;", f);
@@ -141,16 +101,54 @@ row_merge_tuple_print(
#endif /* UNIV_DEBUG */
/******************************************************//**
+Encode an index record. */
+static __attribute__((nonnull))
+void
+row_merge_buf_encode(
+/*=================*/
+ byte** b, /*!< in/out: pointer to
+ current end of output buffer */
+ const dict_index_t* index, /*!< in: index */
+ const mtuple_t* entry, /*!< in: index fields
+ of the record to encode */
+ ulint n_fields) /*!< in: number of fields
+ in the entry */
+{
+ ulint size;
+ ulint extra_size;
+
+ size = rec_get_converted_size_temp(
+ index, entry->fields, n_fields, &extra_size);
+ ut_ad(size >= extra_size);
+
+ /* Encode extra_size + 1 */
+ if (extra_size + 1 < 0x80) {
+ *(*b)++ = (byte) (extra_size + 1);
+ } else {
+ ut_ad((extra_size + 1) < 0x8000);
+ *(*b)++ = (byte) (0x80 | ((extra_size + 1) >> 8));
+ *(*b)++ = (byte) (extra_size + 1);
+ }
+
+ rec_convert_dtuple_to_temp(*b + extra_size, index,
+ entry->fields, n_fields);
+
+ *b += size;
+}
+
+/******************************************************//**
Allocate a sort buffer.
@return own: sort buffer */
-static
+static __attribute__((malloc, nonnull))
row_merge_buf_t*
row_merge_buf_create_low(
/*=====================*/
mem_heap_t* heap, /*!< in: heap where allocated */
dict_index_t* index, /*!< in: secondary index */
- ulint max_tuples, /*!< in: maximum number of data tuples */
- ulint buf_size) /*!< in: size of the buffer, in bytes */
+ ulint max_tuples, /*!< in: maximum number of
+ data tuples */
+ ulint buf_size) /*!< in: size of the buffer,
+ in bytes */
{
row_merge_buf_t* buf;
@@ -162,7 +160,7 @@ row_merge_buf_create_low(
buf->heap = heap;
buf->index = index;
buf->max_tuples = max_tuples;
- buf->tuples = static_cast<const dfield_t**>(
+ buf->tuples = static_cast<mtuple_t*>(
ut_malloc(2 * max_tuples * sizeof *buf->tuples));
buf->tmp_tuples = buf->tuples + max_tuples;
@@ -204,13 +202,11 @@ row_merge_buf_empty(
/*================*/
row_merge_buf_t* buf) /*!< in,own: sort buffer */
{
- ulint buf_size;
+ ulint buf_size = sizeof *buf;
ulint max_tuples = buf->max_tuples;
mem_heap_t* heap = buf->heap;
dict_index_t* index = buf->index;
- void* tuple = buf->tuples;
-
- buf_size = (sizeof *buf);;
+ mtuple_t* tuples = buf->tuples;
mem_heap_empty(heap);
@@ -218,7 +214,7 @@ row_merge_buf_empty(
buf->heap = heap;
buf->index = index;
buf->max_tuples = max_tuples;
- buf->tuples = static_cast<const dfield_t**>(tuple);
+ buf->tuples = tuples;
buf->tmp_tuples = buf->tuples + max_tuples;
return(buf);
@@ -230,7 +226,7 @@ UNIV_INTERN
void
row_merge_buf_free(
/*===============*/
- row_merge_buf_t* buf) /*!< in,own: sort buffer, to be freed */
+ row_merge_buf_t* buf) /*!< in,own: sort buffer to be freed */
{
ut_free(buf->tuples);
mem_heap_free(buf->heap);
@@ -244,19 +240,18 @@ ulint
row_merge_buf_add(
/*==============*/
row_merge_buf_t* buf, /*!< in/out: sort buffer */
- dict_index_t* fts_index,/*!< fts index to be
- created */
+ dict_index_t* fts_index,/*!< in: fts index to be created */
+ const dict_table_t* old_table,/*!< in: original table */
fts_psort_t* psort_info, /*!< in: parallel sort info */
- const dtuple_t* row, /*!< in: row in clustered index */
+ const dtuple_t* row, /*!< in: table row */
const row_ext_t* ext, /*!< in: cache of externally stored
column prefixes, or NULL */
doc_id_t* doc_id) /*!< in/out: Doc ID if we are
creating FTS index */
-
{
ulint i;
const dict_index_t* index;
- dfield_t* entry;
+ mtuple_t* entry;
dfield_t* field;
const dict_field_t* ifield;
ulint n_fields;
@@ -267,9 +262,13 @@ row_merge_buf_add(
ulint n_row_added = 0;
if (buf->n_tuples >= buf->max_tuples) {
- return(FALSE);
+ return(0);
}
+ DBUG_EXECUTE_IF(
+ "ib_row_merge_buf_add_two",
+ if (buf->n_tuples >= 2) return(0););
+
UNIV_PREFETCH_R(row->fields);
/* If we are building FTS index, buf->index points to
@@ -279,11 +278,9 @@ row_merge_buf_add(
n_fields = dict_index_get_n_fields(index);
- entry = static_cast<dfield_t*>(
- mem_heap_alloc(buf->heap, n_fields * sizeof *entry));
-
- buf->tuples[buf->n_tuples] = entry;
- field = entry;
+ entry = &buf->tuples[buf->n_tuples];
+ field = entry->fields = static_cast<dfield_t*>(
+ mem_heap_alloc(buf->heap, n_fields * sizeof *entry->fields));
data_size = 0;
extra_size = UT_BITS_IN_BYTES(index->n_nullable);
@@ -296,30 +293,13 @@ row_merge_buf_add(
ulint col_no;
ulint fixed_len;
const dfield_t* row_field;
- ibool col_adjusted;
col = ifield->col;
col_no = dict_col_get_no(col);
- col_adjusted = FALSE;
-
- /* If we are creating a FTS index, a new Doc
- ID column is being added, so we need to adjust
- any column number positioned after this Doc ID */
- if (*doc_id > 0
- && DICT_TF2_FLAG_IS_SET(index->table,
- DICT_TF2_FTS_ADD_DOC_ID)
- && col_no > index->table->fts->doc_col) {
-
- ut_ad(index->table->fts);
-
- col_no--;
- col_adjusted = TRUE;
- }
/* Process the Doc ID column */
if (*doc_id > 0
- && col_no == index->table->fts->doc_col
- && !col_adjusted) {
+ && col_no == index->table->fts->doc_col) {
fts_write_doc_id((byte*) &write_doc_id, *doc_id);
/* Note: field->data now points to a value on the
@@ -487,7 +467,7 @@ row_merge_buf_add(
ulint extra;
size = rec_get_converted_size_temp(
- index, entry, n_fields, &extra);
+ index, entry->fields, n_fields, &extra);
ut_ad(data_size + extra_size == size);
ut_ad(extra_size == extra);
@@ -500,12 +480,6 @@ row_merge_buf_add(
of extra_size. */
data_size += (extra_size + 1) + ((extra_size + 1) >= 0x80);
- /* The following assertion may fail if row_merge_block_t is
- declared very small and a PRIMARY KEY is being created with
- many prefix columns. In that case, the record may exceed the
- page_zip_rec_needs_ext() limit. However, no further columns
- will be moved to external storage until the record is inserted
- to the clustered index B-tree. */
ut_ad(data_size < srv_sort_buf_size);
/* Reserve one byte for the end marker of row_merge_block_t. */
@@ -517,7 +491,7 @@ row_merge_buf_add(
buf->n_tuples++;
n_row_added++;
- field = entry;
+ field = entry->fields;
/* Copy the data fields. */
@@ -530,118 +504,120 @@ row_merge_buf_add(
/*************************************************************//**
Report a duplicate key. */
-static
+UNIV_INTERN
void
row_merge_dup_report(
/*=================*/
row_merge_dup_t* dup, /*!< in/out: for reporting duplicates */
const dfield_t* entry) /*!< in: duplicate index entry */
{
- mrec_buf_t* buf;
- const dtuple_t* tuple;
- dtuple_t tuple_store;
- const rec_t* rec;
- const dict_index_t* index = dup->index;
- ulint n_fields= dict_index_get_n_fields(index);
- mem_heap_t* heap;
- ulint* offsets;
- ulint n_ext;
-
- if (dup->n_dup++) {
+ if (!dup->n_dup++) {
/* Only report the first duplicate record,
but count all duplicate records. */
- return;
+ innobase_fields_to_mysql(dup->table, dup->index, entry);
}
-
- /* Convert the tuple to a record and then to MySQL format. */
- heap = mem_heap_create((1 + REC_OFFS_HEADER_SIZE + n_fields)
- * sizeof *offsets
- + sizeof *buf);
-
- buf = static_cast<mrec_buf_t*>(mem_heap_alloc(heap, sizeof *buf));
-
- tuple = dtuple_from_fields(&tuple_store, entry, n_fields);
- n_ext = dict_index_is_clust(index) ? dtuple_get_n_ext(tuple) : 0;
-
- rec = rec_convert_dtuple_to_rec(*buf, index, tuple, n_ext);
- offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
-
- innobase_rec_to_mysql(dup->table, rec, index, offsets);
-
- mem_heap_free(heap);
}
/*************************************************************//**
Compare two tuples.
@return 1, 0, -1 if a is greater, equal, less, respectively, than b */
-static
+static __attribute__((warn_unused_result))
int
row_merge_tuple_cmp(
/*================*/
+ ulint n_uniq, /*!< in: number of unique fields */
ulint n_field,/*!< in: number of fields */
- const dfield_t* a, /*!< in: first tuple to be compared */
- const dfield_t* b, /*!< in: second tuple to be compared */
- row_merge_dup_t* dup) /*!< in/out: for reporting duplicates */
+ const mtuple_t& a, /*!< in: first tuple to be compared */
+ const mtuple_t& b, /*!< in: second tuple to be compared */
+ row_merge_dup_t* dup) /*!< in/out: for reporting duplicates,
+ NULL if non-unique index */
{
int cmp;
- const dfield_t* field = a;
+ const dfield_t* af = a.fields;
+ const dfield_t* bf = b.fields;
+ ulint n = n_uniq;
+
+ ut_ad(n_uniq > 0);
+ ut_ad(n_uniq <= n_field);
/* Compare the fields of the tuples until a difference is
found or we run out of fields to compare. If !cmp at the
end, the tuples are equal. */
do {
- cmp = cmp_dfield_dfield(a++, b++);
- } while (!cmp && --n_field);
+ cmp = cmp_dfield_dfield(af++, bf++);
+ } while (!cmp && --n);
- if (UNIV_UNLIKELY(!cmp) && UNIV_LIKELY_NULL(dup)) {
+ if (cmp) {
+ return(cmp);
+ }
+
+ if (dup) {
/* Report a duplicate value error if the tuples are
logically equal. NULL columns are logically inequal,
although they are equal in the sorting order. Find
out if any of the fields are NULL. */
- for (b = field; b != a; b++) {
- if (dfield_is_null(b)) {
-
- goto func_exit;
+ for (const dfield_t* df = a.fields; df != af; df++) {
+ if (dfield_is_null(df)) {
+ goto no_report;
}
}
- row_merge_dup_report(dup, field);
+ row_merge_dup_report(dup, a.fields);
}
-func_exit:
+no_report:
+ /* The n_uniq fields were equal, but we compare all fields so
+ that we will get the same (internal) order as in the B-tree. */
+ for (n = n_field - n_uniq + 1; --n; ) {
+ cmp = cmp_dfield_dfield(af++, bf++);
+ if (cmp) {
+ return(cmp);
+ }
+ }
+
+ /* This should never be reached, except in a secondary index
+ when creating a secondary index and a PRIMARY KEY, and there
+ is a duplicate in the PRIMARY KEY that has not been detected
+ yet. Internally, an index must never contain duplicates. */
return(cmp);
}
/** Wrapper for row_merge_tuple_sort() to inject some more context to
UT_SORT_FUNCTION_BODY().
-@param a array of tuples that being sorted
-@param b aux (work area), same size as tuples[]
-@param c lower bound of the sorting area, inclusive
-@param d upper bound of the sorting area, inclusive */
-#define row_merge_tuple_sort_ctx(a,b,c,d) \
- row_merge_tuple_sort(n_field, dup, a, b, c, d)
+@param tuples array of tuples that being sorted
+@param aux work area, same size as tuples[]
+@param low lower bound of the sorting area, inclusive
+@param high upper bound of the sorting area, inclusive */
+#define row_merge_tuple_sort_ctx(tuples, aux, low, high) \
+ row_merge_tuple_sort(n_uniq, n_field, dup, tuples, aux, low, high)
/** Wrapper for row_merge_tuple_cmp() to inject some more context to
UT_SORT_FUNCTION_BODY().
@param a first tuple to be compared
@param b second tuple to be compared
@return 1, 0, -1 if a is greater, equal, less, respectively, than b */
-#define row_merge_tuple_cmp_ctx(a,b) row_merge_tuple_cmp(n_field, a, b, dup)
+#define row_merge_tuple_cmp_ctx(a,b) \
+ row_merge_tuple_cmp(n_uniq, n_field, a, b, dup)
/**********************************************************************//**
Merge sort the tuple buffer in main memory. */
-static
+static __attribute__((nonnull(4,5)))
void
row_merge_tuple_sort(
/*=================*/
+ ulint n_uniq, /*!< in: number of unique fields */
ulint n_field,/*!< in: number of fields */
- row_merge_dup_t* dup, /*!< in/out: for reporting duplicates */
- const dfield_t** tuples, /*!< in/out: tuples */
- const dfield_t** aux, /*!< in/out: work area */
+ row_merge_dup_t* dup, /*!< in/out: reporter of duplicates
+ (NULL if non-unique index) */
+ mtuple_t* tuples, /*!< in/out: tuples */
+ mtuple_t* aux, /*!< in/out: work area */
ulint low, /*!< in: lower bound of the
sorting area, inclusive */
ulint high) /*!< in: upper bound of the
sorting area, exclusive */
{
+ ut_ad(n_field > 0);
+ ut_ad(n_uniq <= n_field);
+
UT_SORT_FUNCTION_BODY(row_merge_tuple_sort_ctx,
tuples, aux, low, high, row_merge_tuple_cmp_ctx);
}
@@ -653,9 +629,12 @@ void
row_merge_buf_sort(
/*===============*/
row_merge_buf_t* buf, /*!< in/out: sort buffer */
- row_merge_dup_t* dup) /*!< in/out: for reporting duplicates */
+ row_merge_dup_t* dup) /*!< in/out: reporter of duplicates
+ (NULL if non-unique index) */
{
- row_merge_tuple_sort(dict_index_get_n_unique(buf->index), dup,
+ row_merge_tuple_sort(dict_index_get_n_unique(buf->index),
+ dict_index_get_n_fields(buf->index),
+ dup,
buf->tuples, buf->tmp_tuples, 0, buf->n_tuples);
}
@@ -674,33 +653,11 @@ row_merge_buf_write(
ulint n_fields= dict_index_get_n_fields(index);
byte* b = &block[0];
- ulint i;
-
- for (i = 0; i < buf->n_tuples; i++) {
- ulint size;
- ulint extra_size;
- const dfield_t* entry = buf->tuples[i];
-
- size = rec_get_converted_size_temp(
- index, entry, n_fields, &extra_size);
- ut_ad(size >= extra_size);
-
- /* Encode extra_size + 1 */
- if (extra_size + 1 < 0x80) {
- *b++ = (byte) (extra_size + 1);
- } else {
- ut_ad((extra_size + 1) < 0x8000);
- *b++ = (byte) (0x80 | ((extra_size + 1) >> 8));
- *b++ = (byte) (extra_size + 1);
- }
-
- ut_ad(b + size < &block[srv_sort_buf_size]);
-
- rec_convert_dtuple_to_temp(b + extra_size, index,
- entry, n_fields);
-
- b += size;
+ for (ulint i = 0; i < buf->n_tuples; i++) {
+ const mtuple_t* entry = &buf->tuples[i];
+ row_merge_buf_encode(&b, index, entry, n_fields);
+ ut_ad(b < &block[srv_sort_buf_size]);
#ifdef UNIV_DEBUG
if (row_merge_print_write) {
fprintf(stderr, "row_merge_buf_write %p,%d,%lu %lu",
@@ -759,36 +716,6 @@ row_merge_heap_create(
return(heap);
}
-/**********************************************************************//**
-Search an index object by name and column names. If several indexes match,
-return the index with the max id.
-@return matching index, NULL if not found */
-static
-dict_index_t*
-row_merge_dict_table_get_index(
-/*===========================*/
- dict_table_t* table, /*!< in: table */
- const merge_index_def_t*index_def) /*!< in: index definition */
-{
- ulint i;
- dict_index_t* index;
- const char** column_names;
-
- column_names = static_cast<const char**>(
- mem_alloc(index_def->n_fields * sizeof *column_names));
-
- for (i = 0; i < index_def->n_fields; ++i) {
- column_names[i] = index_def->fields[i].field_name;
- }
-
- index = dict_table_get_index_by_max_id(
- table, index_def->name, column_names, index_def->n_fields);
-
- mem_free((void*) column_names);
-
- return(index);
-}
-
/********************************************************************//**
Read a merge block from the file system.
@return TRUE if request was successful, FALSE if fail */
@@ -854,10 +781,10 @@ row_merge_write(
os_offset_t ofs = buf_len * (os_offset_t) offset;
ibool ret;
- ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf, ofs, buf_len);
-
DBUG_EXECUTE_IF("row_merge_write_failure", return(FALSE););
+ ret = os_file_write("(merge)", OS_FILE_FROM_FD(fd), buf, ofs, buf_len);
+
#ifdef UNIV_DEBUG
if (row_merge_print_block_write) {
fprintf(stderr, "row_merge_write fd=%d ofs=%lu\n",
@@ -877,7 +804,7 @@ row_merge_write(
/********************************************************************//**
Read a merge record.
@return pointer to next record, or NULL on I/O error or end of list */
-UNIV_INTERN __attribute__((nonnull))
+UNIV_INTERN
const byte*
row_merge_read_rec(
/*===============*/
@@ -953,7 +880,7 @@ err_exit:
case. */
avail_size = &block[srv_sort_buf_size] - b;
-
+ ut_ad(avail_size < sizeof *buf);
memcpy(*buf, b, avail_size);
if (!row_merge_read(fd, ++(*foffs), block)) {
@@ -1193,46 +1120,12 @@ row_merge_write_eof(
return(&block[0]);
}
-/*************************************************************//**
-Compare two merge records.
-@return 1, 0, -1 if mrec1 is greater, equal, less, respectively, than mrec2 */
-UNIV_INTERN
-int
-row_merge_cmp(
-/*==========*/
- const mrec_t* mrec1, /*!< in: first merge
- record to be compared */
- const mrec_t* mrec2, /*!< in: second merge
- record to be compared */
- const ulint* offsets1, /*!< in: first record offsets */
- const ulint* offsets2, /*!< in: second record offsets */
- const dict_index_t* index, /*!< in: index */
- ibool* null_eq) /*!< out: set to TRUE if
- found matching null values */
-{
- int cmp;
-
- cmp = cmp_rec_rec_simple(mrec1, mrec2, offsets1, offsets2, index,
- null_eq);
-
-#ifdef UNIV_DEBUG
- if (row_merge_print_cmp) {
- fputs("row_merge_cmp1 ", stderr);
- rec_print_comp(stderr, mrec1, offsets1);
- fputs("\nrow_merge_cmp2 ", stderr);
- rec_print_comp(stderr, mrec2, offsets2);
- fprintf(stderr, "\nrow_merge_cmp=%d\n", cmp);
- }
-#endif /* UNIV_DEBUG */
-
- return(cmp);
-}
/********************************************************************//**
Reads clustered index of the table and create temporary files
containing the index entries for the indexes to be built.
@return DB_SUCCESS or error */
-static __attribute__((nonnull))
-ulint
+static __attribute__((nonnull(1,2,3,4,6,9,10,16), warn_unused_result))
+dberr_t
row_merge_read_clustered_index(
/*===========================*/
trx_t* trx, /*!< in: transaction */
@@ -1243,23 +1136,40 @@ row_merge_read_clustered_index(
const dict_table_t* new_table,/*!< in: table where indexes are
created; identical to old_table
unless creating a PRIMARY KEY */
+ bool online, /*!< in: true if creating indexes
+ online */
dict_index_t** index, /*!< in: indexes to be created */
dict_index_t* fts_sort_idx,
- /*!< in: indexes to be created */
- fts_psort_t* psort_info, /*!< in: parallel sort info */
+ /*!< in: full-text index to be created,
+ or NULL */
+ fts_psort_t* psort_info,
+ /*!< in: parallel sort info for
+ fts_sort_idx creation, or NULL */
merge_file_t* files, /*!< in: temporary files */
+ const ulint* key_numbers,
+ /*!< in: MySQL key numbers to create */
ulint n_index,/*!< in: number of indexes to create */
+ const dtuple_t* add_cols,
+ /*!< in: default values of
+ added columns, or NULL */
+ const ulint* col_map,/*!< in: mapping of old column
+ numbers to new ones, or NULL
+ if old_table == new_table */
+ ulint add_autoinc,
+ /*!< in: number of added
+ AUTO_INCREMENT column, or
+ ULINT_UNDEFINED if none is added */
+ ib_sequence_t& sequence,/*!< in/out: autoinc sequence */
row_merge_block_t* block) /*!< in/out: file buffer */
{
dict_index_t* clust_index; /* Clustered index */
mem_heap_t* row_heap; /* Heap memory to create
- clustered index records */
+ clustered index tuples */
row_merge_buf_t** merge_buf; /* Temporary list for records*/
- btr_pcur_t pcur; /* Persistent cursor on the
- clustered index */
+ btr_pcur_t pcur; /* Cursor on the clustered
+ index */
mtr_t mtr; /* Mini transaction */
- ulint err = DB_SUCCESS;/* Return code */
- ulint i;
+ dberr_t err = DB_SUCCESS;/* Return code */
ulint n_nonnull = 0; /* number of columns
changed to NOT NULL */
ulint* nonnull = NULL; /* NOT NULL columns */
@@ -1271,13 +1181,10 @@ row_merge_read_clustered_index(
ibool fts_pll_sort = FALSE;
ib_int64_t sig_count = 0;
- trx->op_info = "reading clustered index";
+ ut_ad((old_table == new_table) == !col_map);
+ ut_ad(!add_cols || col_map);
- ut_ad(trx);
- ut_ad(old_table);
- ut_ad(new_table);
- ut_ad(index);
- ut_ad(files);
+ trx->op_info = "reading clustered index";
#ifdef FTS_INTERNAL_DIAG_PRINT
DEBUG_FTS_SORT_PRINT("FTS_SORT: Start Create Index\n");
@@ -1288,8 +1195,7 @@ row_merge_read_clustered_index(
merge_buf = static_cast<row_merge_buf_t**>(
mem_alloc(n_index * sizeof *merge_buf));
-
- for (i = 0; i < n_index; i++) {
+ for (ulint i = 0; i < n_index; i++) {
if (index[i]->type & DICT_FTS) {
/* We are building a FT index, make sure
@@ -1301,14 +1207,14 @@ row_merge_read_clustered_index(
merge_buf[i] = row_merge_buf_create(fts_sort_idx);
add_doc_id = DICT_TF2_FLAG_IS_SET(
- old_table, DICT_TF2_FTS_ADD_DOC_ID);
+ new_table, DICT_TF2_FTS_ADD_DOC_ID);
/* If Doc ID does not exist in the table itself,
fetch the first FTS Doc ID */
if (add_doc_id) {
fts_get_next_doc_id(
(dict_table_t*) new_table,
- &doc_id);
+ &doc_id);
ut_ad(doc_id > 0);
}
@@ -1329,35 +1235,34 @@ row_merge_read_clustered_index(
clust_index = dict_table_get_first_index(old_table);
btr_pcur_open_at_index_side(
- TRUE, clust_index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
-
- if (UNIV_UNLIKELY(old_table != new_table)) {
- ulint n_cols = dict_table_get_n_cols(old_table);
-
- /* A primary key will be created. Identify the
- columns that were flagged NOT NULL in the new table,
- so that we can quickly check that the records in the
- (old) clustered index do not violate the added NOT
- NULL constraints. */
+ true, clust_index, BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
- if (!fts_sort_idx) {
- ut_a(n_cols == dict_table_get_n_cols(new_table));
- }
+ if (old_table != new_table) {
+ /* The table is being rebuilt. Identify the columns
+ that were flagged NOT NULL in the new table, so that
+ we can quickly check that the records in the old table
+ do not violate the added NOT NULL constraints. */
nonnull = static_cast<ulint*>(
- mem_alloc(n_cols * sizeof *nonnull));
+ mem_alloc(dict_table_get_n_cols(new_table)
+ * sizeof *nonnull));
- for (i = 0; i < n_cols; i++) {
+ for (ulint i = 0; i < dict_table_get_n_cols(old_table); i++) {
if (dict_table_get_nth_col(old_table, i)->prtype
& DATA_NOT_NULL) {
+ continue;
+ }
+ const ulint j = col_map[i];
+
+ if (j == ULINT_UNDEFINED) {
+ /* The column was dropped. */
continue;
}
- if (dict_table_get_nth_col(new_table, i)->prtype
+ if (dict_table_get_nth_col(new_table, j)->prtype
& DATA_NOT_NULL) {
-
- nonnull[n_nonnull++] = i;
+ nonnull[n_nonnull++] = j;
}
}
@@ -1373,81 +1278,221 @@ row_merge_read_clustered_index(
for (;;) {
const rec_t* rec;
ulint* offsets;
- dtuple_t* row = NULL;
+ const dtuple_t* row;
row_ext_t* ext;
- ibool has_next = TRUE;
+ page_cur_t* cur = btr_pcur_get_page_cur(&pcur);
- btr_pcur_move_to_next_on_page(&pcur);
+ page_cur_move_to_next(cur);
- /* When switching pages, commit the mini-transaction
- in order to release the latch on the old page. */
-
- if (btr_pcur_is_after_last_on_page(&pcur)) {
+ if (page_cur_is_after_last(cur)) {
if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
err = DB_INTERRUPTED;
trx->error_key_num = 0;
goto func_exit;
}
- /* Store the cursor position on the last user
- record on the page. */
- btr_pcur_move_to_prev_on_page(&pcur);
- /* Leaf pages must never be empty, unless
- this is the only page in the index tree. */
- ut_ad(btr_pcur_is_on_user_rec(&pcur)
- || buf_block_get_page_no(
- btr_pcur_get_block(&pcur))
- == clust_index->page);
-
- btr_pcur_store_position(&pcur, &mtr);
- mtr_commit(&mtr);
- mtr_start(&mtr);
- /* Restore position on the record, or its
- predecessor if the record was purged
- meanwhile. */
- btr_pcur_restore_position(BTR_SEARCH_LEAF,
- &pcur, &mtr);
- /* Move to the successor of the original record. */
- has_next = btr_pcur_move_to_next_user_rec(&pcur, &mtr);
+ if (online && old_table != new_table) {
+ err = row_log_table_get_error(clust_index);
+ if (err != DB_SUCCESS) {
+ trx->error_key_num = 0;
+ goto func_exit;
+ }
+ }
+#ifdef DBUG_OFF
+# define dbug_run_purge false
+#else /* DBUG_OFF */
+ bool dbug_run_purge = false;
+#endif /* DBUG_OFF */
+ DBUG_EXECUTE_IF(
+ "ib_purge_on_create_index_page_switch",
+ dbug_run_purge = true;);
+
+ if (dbug_run_purge
+ || rw_lock_get_waiters(
+ dict_index_get_lock(clust_index))) {
+ /* There are waiters on the clustered
+ index tree lock, likely the purge
+ thread. Store and restore the cursor
+ position, and yield so that scanning a
+ large table will not starve other
+ threads. */
+
+ /* Store the cursor position on the last user
+ record on the page. */
+ btr_pcur_move_to_prev_on_page(&pcur);
+ /* Leaf pages must never be empty, unless
+ this is the only page in the index tree. */
+ ut_ad(btr_pcur_is_on_user_rec(&pcur)
+ || buf_block_get_page_no(
+ btr_pcur_get_block(&pcur))
+ == clust_index->page);
+
+ btr_pcur_store_position(&pcur, &mtr);
+ mtr_commit(&mtr);
+
+ if (dbug_run_purge) {
+ /* This is for testing
+ purposes only (see
+ DBUG_EXECUTE_IF above). We
+ signal the purge thread and
+ hope that the purge batch will
+ complete before we execute
+ btr_pcur_restore_position(). */
+ trx_purge_run();
+ os_thread_sleep(1000000);
+ }
+
+ /* Give the waiters a chance to proceed. */
+ os_thread_yield();
+
+ mtr_start(&mtr);
+ /* Restore position on the record, or its
+ predecessor if the record was purged
+ meanwhile. */
+ btr_pcur_restore_position(
+ BTR_SEARCH_LEAF, &pcur, &mtr);
+ /* Move to the successor of the
+ original record. */
+ if (!btr_pcur_move_to_next_user_rec(
+ &pcur, &mtr)) {
+end_of_index:
+ row = NULL;
+ mtr_commit(&mtr);
+ mem_heap_free(row_heap);
+ if (nonnull) {
+ mem_free(nonnull);
+ }
+ goto write_buffers;
+ }
+ } else {
+ ulint next_page_no;
+ buf_block_t* block;
+
+ next_page_no = btr_page_get_next(
+ page_cur_get_page(cur), &mtr);
+
+ if (next_page_no == FIL_NULL) {
+ goto end_of_index;
+ }
+
+ block = page_cur_get_block(cur);
+ block = btr_block_get(
+ buf_block_get_space(block),
+ buf_block_get_zip_size(block),
+ next_page_no, BTR_SEARCH_LEAF,
+ clust_index, &mtr);
+
+ btr_leaf_page_release(page_cur_get_block(cur),
+ BTR_SEARCH_LEAF, &mtr);
+ page_cur_set_before_first(block, cur);
+ page_cur_move_to_next(cur);
+
+ ut_ad(!page_cur_is_after_last(cur));
+ }
}
- if (UNIV_LIKELY(has_next)) {
- rec = btr_pcur_get_rec(&pcur);
- offsets = rec_get_offsets(rec, clust_index, NULL,
- ULINT_UNDEFINED, &row_heap);
+ rec = page_cur_get_rec(cur);
+
+ offsets = rec_get_offsets(rec, clust_index, NULL,
+ ULINT_UNDEFINED, &row_heap);
+
+ if (online && new_table != old_table) {
+ /* When rebuilding the table online, perform a
+ REPEATABLE READ, so that row_log_table_apply()
+ will not see a newer state of the table when
+ applying the log. This is mainly to prevent
+ false duplicate key errors, because the log
+ will identify records by the PRIMARY KEY. */
+ ut_ad(trx->read_view);
+
+ if (!read_view_sees_trx_id(
+ trx->read_view,
+ row_get_rec_trx_id(
+ rec, clust_index, offsets))) {
+ rec_t* old_vers;
+
+ row_vers_build_for_consistent_read(
+ rec, &mtr, clust_index, &offsets,
+ trx->read_view, &row_heap,
+ row_heap, &old_vers);
+
+ rec = old_vers;
+
+ if (!rec) {
+ continue;
+ }
+ }
- /* Skip delete marked records. */
if (rec_get_deleted_flag(
- rec, dict_table_is_comp(old_table))) {
+ rec,
+ dict_table_is_comp(old_table))) {
+ /* This record was deleted in the latest
+ committed version, or it was deleted and
+ then reinserted-by-update before purge
+ kicked in. Skip it. */
continue;
}
- srv_n_rows_inserted++;
+ ut_ad(!rec_offs_any_null_extern(rec, offsets));
+ } else if (rec_get_deleted_flag(
+ rec, dict_table_is_comp(old_table))) {
+ /* Skip delete-marked records.
+
+ Skipping delete-marked records will make the
+ created indexes unuseable for transactions
+ whose read views were created before the index
+ creation completed, but preserving the history
+ would make it tricky to detect duplicate
+ keys. */
+ continue;
+ } else if (UNIV_LIKELY_NULL(rec_offs_any_null_extern(
+ rec, offsets))) {
+ /* This is essentially a READ UNCOMMITTED to
+ fetch the most recent version of the record. */
+#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
+ trx_id_t trx_id;
+ ulint trx_id_offset;
+
+ /* It is possible that the record was
+ just inserted and the off-page columns
+ have not yet been written. We will
+ ignore the record if this is the case,
+ because it should be covered by the
+ index->info.online log in that case. */
+
+ trx_id_offset = clust_index->trx_id_offset;
+ if (!trx_id_offset) {
+ trx_id_offset = row_get_trx_id_offset(
+ clust_index, offsets);
+ }
- /* Build a row based on the clustered index. */
+ trx_id = trx_read_trx_id(rec + trx_id_offset);
+ ut_a(trx_rw_is_active(trx_id, NULL));
+ ut_a(trx_undo_trx_id_is_insert(rec + trx_id_offset));
+#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
- row = row_build(ROW_COPY_POINTERS, clust_index,
- rec, offsets,
- new_table, &ext, row_heap);
+ /* When !online, we are holding an X-lock on
+ old_table, preventing any inserts. */
+ ut_ad(online);
+ continue;
+ }
- if (UNIV_LIKELY_NULL(nonnull)) {
- for (i = 0; i < n_nonnull; i++) {
- dfield_t* field
- = &row->fields[nonnull[i]];
- dtype_t* field_type
- = dfield_get_type(field);
+ /* Build a row based on the clustered index. */
- ut_a(!(field_type->prtype
- & DATA_NOT_NULL));
+ row = row_build(ROW_COPY_POINTERS, clust_index,
+ rec, offsets, new_table,
+ add_cols, col_map, &ext, row_heap);
+ ut_ad(row);
- if (dfield_is_null(field)) {
- err = DB_PRIMARY_KEY_IS_NULL;
- trx->error_key_num = 0;
- goto func_exit;
- }
+ for (ulint i = 0; i < n_nonnull; i++) {
+ const dfield_t* field = &row->fields[nonnull[i]];
- field_type->prtype |= DATA_NOT_NULL;
- }
+ ut_ad(dfield_get_type(field)->prtype & DATA_NOT_NULL);
+
+ if (dfield_is_null(field)) {
+ err = DB_INVALID_NULL;
+ trx->error_key_num = 0;
+ goto func_exit;
}
}
@@ -1458,19 +1503,72 @@ row_merge_read_clustered_index(
doc_id = 0;
}
+ if (add_autoinc != ULINT_UNDEFINED) {
+
+ ut_ad(add_autoinc
+ < dict_table_get_n_user_cols(new_table));
+
+ const dfield_t* dfield;
+
+ dfield = dtuple_get_nth_field(row, add_autoinc);
+ if (dfield_is_null(dfield)) {
+ goto write_buffers;
+ }
+
+ const dtype_t* dtype = dfield_get_type(dfield);
+ byte* b = static_cast<byte*>(dfield_get_data(dfield));
+
+ if (sequence.eof()) {
+ err = DB_ERROR;
+ trx->error_key_num = 0;
+
+ ib_errf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_AUTOINC_READ_FAILED, "[NULL]");
+
+ goto func_exit;
+ }
+
+ ulonglong value = sequence++;
+
+ switch (dtype_get_mtype(dtype)) {
+ case DATA_INT: {
+ ibool usign;
+ ulint len = dfield_get_len(dfield);
+
+ usign = dtype_get_prtype(dtype) & DATA_UNSIGNED;
+ mach_write_ulonglong(b, value, len, usign);
+
+ break;
+ }
+
+ case DATA_FLOAT:
+ mach_float_write(
+ b, static_cast<float>(value));
+ break;
+
+ case DATA_DOUBLE:
+ mach_double_write(
+ b, static_cast<double>(value));
+ break;
+
+ default:
+ ut_ad(0);
+ }
+ }
+
+write_buffers:
/* Build all entries for all the indexes to be created
in a single scan of the clustered index. */
- for (i = 0; i < n_index; i++) {
+ for (ulint i = 0; i < n_index; i++) {
row_merge_buf_t* buf = merge_buf[i];
merge_file_t* file = &files[i];
- const dict_index_t* index = buf->index;
ulint rows_added = 0;
if (UNIV_LIKELY
(row && (rows_added = row_merge_buf_add(
- buf, fts_index, psort_info,
- row, ext, &doc_id)))) {
+ buf, fts_index, old_table,
+ psort_info, row, ext, &doc_id)))) {
/* If we are creating FTS index,
a single row can generate more
@@ -1483,35 +1581,60 @@ row_merge_read_clustered_index(
continue;
}
- if ((!row || !doc_id)
- && index->type & DICT_FTS) {
+ if ((buf->index->type & DICT_FTS)
+ && (!row || !doc_id)) {
continue;
}
/* The buffer must be sufficiently large
- to hold at least one record. */
- ut_ad(buf->n_tuples || !has_next);
+ to hold at least one record. It may only
+ be empty when we reach the end of the
+ clustered index. row_merge_buf_add()
+ must not have been called in this loop. */
+ ut_ad(buf->n_tuples || row == NULL);
/* We have enough data tuples to form a block.
Sort them and write to disk. */
if (buf->n_tuples) {
- if (dict_index_is_unique(index)) {
- row_merge_dup_t dup;
- dup.index = buf->index;
- dup.table = table;
- dup.n_dup = 0;
+ if (dict_index_is_unique(buf->index)) {
+ row_merge_dup_t dup = {
+ buf->index, table, col_map, 0};
row_merge_buf_sort(buf, &dup);
if (dup.n_dup) {
err = DB_DUPLICATE_KEY;
- trx->error_key_num = i;
- goto func_exit;
+ trx->error_key_num
+ = key_numbers[i];
+ break;
}
} else {
row_merge_buf_sort(buf, NULL);
}
+ } else if (online && new_table == old_table) {
+ /* Note the newest transaction that
+ modified this index when the scan was
+ completed. We prevent older readers
+ from accessing this index, to ensure
+ read consistency. */
+
+ trx_id_t max_trx_id;
+
+ ut_a(row == NULL);
+ rw_lock_x_lock(
+ dict_index_get_lock(buf->index));
+ ut_a(dict_index_get_online_status(buf->index)
+ == ONLINE_INDEX_CREATION);
+
+ max_trx_id = row_log_get_max_trx(buf->index);
+
+ if (max_trx_id > buf->index->trx_id) {
+ buf->index->trx_id = max_trx_id;
+ }
+
+ rw_lock_x_unlock(
+ dict_index_get_lock(buf->index));
}
row_merge_buf_write(buf, file, block);
@@ -1520,7 +1643,7 @@ row_merge_read_clustered_index(
block)) {
err = DB_OUT_OF_FILE_SPACE;
trx->error_key_num = i;
- goto func_exit;
+ break;
}
UNIV_MEM_INVALID(&block[0], srv_sort_buf_size);
@@ -1533,14 +1656,11 @@ row_merge_read_clustered_index(
if (UNIV_UNLIKELY
(!(rows_added = row_merge_buf_add(
- buf, fts_index, psort_info, row,
- ext, &doc_id)))) {
+ buf, fts_index, old_table,
+ psort_info, row, ext,
+ &doc_id)))) {
/* An empty buffer should have enough
- room for at least one record.
- TODO: for FTS index building, we'll
- need to prepared for coping with very
- large text/blob data in a single row
- that could fill up the merge file */
+ room for at least one record. */
ut_error;
}
@@ -1548,27 +1668,40 @@ row_merge_read_clustered_index(
}
}
- mem_heap_empty(row_heap);
+ if (row == NULL) {
+ goto all_done;
+ }
- if (UNIV_UNLIKELY(!has_next)) {
+ if (err != DB_SUCCESS) {
goto func_exit;
}
+
+ mem_heap_empty(row_heap);
}
func_exit:
+ mtr_commit(&mtr);
+ mem_heap_free(row_heap);
+
+ if (nonnull) {
+ mem_free(nonnull);
+ }
+
+all_done:
#ifdef FTS_INTERNAL_DIAG_PRINT
DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Scan Table\n");
#endif
if (fts_pll_sort) {
- for (i = 0; i < fts_sort_pll_degree; i++) {
+ for (ulint i = 0; i < fts_sort_pll_degree; i++) {
psort_info[i].state = FTS_PARENT_COMPLETE;
}
wait_again:
os_event_wait_time_low(fts_parallel_sort_event,
1000000, sig_count);
- for (i = 0; i < fts_sort_pll_degree; i++) {
- if (psort_info[i].child_status != FTS_CHILD_COMPLETE) {
+ for (ulint i = 0; i < fts_sort_pll_degree; i++) {
+ if (psort_info[i].child_status != FTS_CHILD_COMPLETE
+ && psort_info[i].child_status != FTS_CHILD_EXITING) {
sig_count = os_event_reset(
fts_parallel_sort_event);
goto wait_again;
@@ -1579,17 +1712,7 @@ wait_again:
#ifdef FTS_INTERNAL_DIAG_PRINT
DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Tokenization\n");
#endif
-
- btr_pcur_close(&pcur);
- mtr_commit(&mtr);
- mem_heap_free(row_heap);
-
- if (UNIV_LIKELY_NULL(nonnull)) {
- mem_free(nonnull);
- }
-
-
- for (i = 0; i < n_index; i++) {
+ for (ulint i = 0; i < n_index; i++) {
row_merge_buf_free(merge_buf[i]);
}
@@ -1597,10 +1720,13 @@ wait_again:
mem_free(merge_buf);
+ btr_pcur_close(&pcur);
+
/* Update the next Doc ID we used. Table should be locked, so
no concurrent DML */
if (max_doc_id) {
- fts_update_next_doc_id(new_table, old_table->name, max_doc_id);
+ fts_update_next_doc_id(
+ 0, new_table, old_table->name, max_doc_id);
}
trx->op_info = "";
@@ -1609,24 +1735,20 @@ wait_again:
}
/** Write a record via buffer 2 and read the next record to buffer N.
-@param M FTS merge info structure
-@param N index into array of merge info structure
-@param INDEX the FTS index */
-
-
-/** Write a record via buffer 2 and read the next record to buffer N.
@param N number of the buffer (0 or 1)
+@param INDEX record descriptor
@param AT_END statement to execute at end of input */
-#define ROW_MERGE_WRITE_GET_NEXT(N, AT_END) \
+#define ROW_MERGE_WRITE_GET_NEXT(N, INDEX, AT_END) \
do { \
- b2 = row_merge_write_rec(&block[2 * srv_sort_buf_size], &buf[2], b2, \
+ b2 = row_merge_write_rec(&block[2 * srv_sort_buf_size], \
+ &buf[2], b2, \
of->fd, &of->offset, \
mrec##N, offsets##N); \
if (UNIV_UNLIKELY(!b2 || ++of->n_rec > file->n_rec)) { \
goto corrupt; \
} \
- b##N = row_merge_read_rec(&block[N * srv_sort_buf_size], &buf[N], \
- b##N, index, \
+ b##N = row_merge_read_rec(&block[N * srv_sort_buf_size],\
+ &buf[N], b##N, INDEX, \
file->fd, foffs##N, \
&mrec##N, offsets##N); \
if (UNIV_UNLIKELY(!b##N)) { \
@@ -1640,11 +1762,12 @@ wait_again:
/*************************************************************//**
Merge two blocks of records on disk and write a bigger block.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_merge_blocks(
/*=============*/
- const dict_index_t* index, /*!< in: index being created */
+ const row_merge_dup_t* dup, /*!< in: descriptor of
+ index being created */
const merge_file_t* file, /*!< in: file containing
index entries */
row_merge_block_t* block, /*!< in/out: 3 buffers */
@@ -1652,20 +1775,18 @@ row_merge_blocks(
source list in the file */
ulint* foffs1, /*!< in/out: offset of second
source list in the file */
- merge_file_t* of, /*!< in/out: output file */
- struct TABLE* table) /*!< in/out: MySQL table, for
- reporting erroneous key value
- if applicable */
+ merge_file_t* of) /*!< in/out: output file */
{
mem_heap_t* heap; /*!< memory heap for offsets0, offsets1 */
mrec_buf_t* buf; /*!< buffer for handling
split mrec in block[] */
const byte* b0; /*!< pointer to block[0] */
- const byte* b1; /*!< pointer to block[1] */
- byte* b2; /*!< pointer to block[2] */
+ const byte* b1; /*!< pointer to block[srv_sort_buf_size] */
+ byte* b2; /*!< pointer to block[2 * srv_sort_buf_size] */
const mrec_t* mrec0; /*!< merge rec, points to block[0] or buf[0] */
- const mrec_t* mrec1; /*!< merge rec, points to block[1] or buf[1] */
+ const mrec_t* mrec1; /*!< merge rec, points to
+ block[srv_sort_buf_size] or buf[1] */
ulint* offsets0;/* offsets of mrec0 */
ulint* offsets1;/* offsets of mrec1 */
@@ -1680,7 +1801,7 @@ row_merge_blocks(
}
#endif /* UNIV_DEBUG */
- heap = row_merge_heap_create(index, &buf, &offsets0, &offsets1);
+ heap = row_merge_heap_create(dup->index, &buf, &offsets0, &offsets1);
/* Write a record and read the next record. Split the output
file in two halves, which can be merged on the following pass. */
@@ -1696,10 +1817,13 @@ corrupt:
b1 = &block[srv_sort_buf_size];
b2 = &block[2 * srv_sort_buf_size];
- b0 = row_merge_read_rec(&block[0], &buf[0], b0, index, file->fd,
- foffs0, &mrec0, offsets0);
- b1 = row_merge_read_rec(&block[srv_sort_buf_size], &buf[srv_sort_buf_size], b1, index, file->fd,
- foffs1, &mrec1, offsets1);
+ b0 = row_merge_read_rec(
+ &block[0], &buf[0], b0, dup->index,
+ file->fd, foffs0, &mrec0, offsets0);
+ b1 = row_merge_read_rec(
+ &block[srv_sort_buf_size],
+ &buf[srv_sort_buf_size], b1, dup->index,
+ file->fd, foffs1, &mrec1, offsets1);
if (UNIV_UNLIKELY(!b0 && mrec0)
|| UNIV_UNLIKELY(!b1 && mrec1)) {
@@ -1707,56 +1831,49 @@ corrupt:
}
while (mrec0 && mrec1) {
- ibool null_eq = FALSE;
- switch (row_merge_cmp(mrec0, mrec1,
- offsets0, offsets1, index,
- &null_eq)) {
+ switch (cmp_rec_rec_simple(
+ mrec0, mrec1, offsets0, offsets1,
+ dup->index, dup->table)) {
case 0:
- if (UNIV_UNLIKELY
- (dict_index_is_unique(index) && !null_eq)) {
- innobase_rec_to_mysql(table, mrec0,
- index, offsets0);
- mem_heap_free(heap);
- return(DB_DUPLICATE_KEY);
- }
- /* fall through */
+ mem_heap_free(heap);
+ return(DB_DUPLICATE_KEY);
case -1:
- ROW_MERGE_WRITE_GET_NEXT(0, goto merged);
+ ROW_MERGE_WRITE_GET_NEXT(0, dup->index, goto merged);
break;
case 1:
- ROW_MERGE_WRITE_GET_NEXT(1, goto merged);
+ ROW_MERGE_WRITE_GET_NEXT(1, dup->index, goto merged);
break;
default:
ut_error;
}
-
}
merged:
if (mrec0) {
/* append all mrec0 to output */
for (;;) {
- ROW_MERGE_WRITE_GET_NEXT(0, goto done0);
+ ROW_MERGE_WRITE_GET_NEXT(0, dup->index, goto done0);
}
}
done0:
if (mrec1) {
/* append all mrec1 to output */
for (;;) {
- ROW_MERGE_WRITE_GET_NEXT(1, goto done1);
+ ROW_MERGE_WRITE_GET_NEXT(1, dup->index, goto done1);
}
}
done1:
mem_heap_free(heap);
- b2 = row_merge_write_eof(&block[2 * srv_sort_buf_size], b2, of->fd, &of->offset);
+ b2 = row_merge_write_eof(&block[2 * srv_sort_buf_size],
+ b2, of->fd, &of->offset);
return(b2 ? DB_SUCCESS : DB_CORRUPTION);
}
/*************************************************************//**
Copy a block of index entries.
@return TRUE on success, FALSE on failure */
-static __attribute__((nonnull))
+static __attribute__((nonnull, warn_unused_result))
ibool
row_merge_blocks_copy(
/*==================*/
@@ -1771,7 +1888,7 @@ row_merge_blocks_copy(
mrec_buf_t* buf; /*!< buffer for handling
split mrec in block[] */
const byte* b0; /*!< pointer to block[0] */
- byte* b2; /*!< pointer to block[2] */
+ byte* b2; /*!< pointer to block[2 * srv_sort_buf_size] */
const mrec_t* mrec0; /*!< merge rec, points to block[0] */
ulint* offsets0;/* offsets of mrec0 */
ulint* offsets1;/* dummy offsets */
@@ -1801,8 +1918,8 @@ corrupt:
b2 = &block[2 * srv_sort_buf_size];
- b0 = row_merge_read_rec(&block[0], &buf[0], b0, index, file->fd,
- foffs0, &mrec0, offsets0);
+ b0 = row_merge_read_rec(&block[0], &buf[0], b0, index,
+ file->fd, foffs0, &mrec0, offsets0);
if (UNIV_UNLIKELY(!b0 && mrec0)) {
goto corrupt;
@@ -1811,7 +1928,7 @@ corrupt:
if (mrec0) {
/* append all mrec0 to output */
for (;;) {
- ROW_MERGE_WRITE_GET_NEXT(0, goto done0);
+ ROW_MERGE_WRITE_GET_NEXT(0, index, goto done0);
}
}
done0:
@@ -1821,7 +1938,8 @@ done0:
(*foffs0)++;
mem_heap_free(heap);
- return(row_merge_write_eof(&block[2 * srv_sort_buf_size], b2, of->fd, &of->offset)
+ return(row_merge_write_eof(&block[2 * srv_sort_buf_size],
+ b2, of->fd, &of->offset)
!= NULL);
}
@@ -1829,18 +1947,16 @@ done0:
Merge disk files.
@return DB_SUCCESS or error code */
static __attribute__((nonnull))
-ulint
+dberr_t
row_merge(
/*======*/
trx_t* trx, /*!< in: transaction */
- const dict_index_t* index, /*!< in: index being created */
+ const row_merge_dup_t* dup, /*!< in: descriptor of
+ index being created */
merge_file_t* file, /*!< in/out: file containing
index entries */
row_merge_block_t* block, /*!< in/out: 3 buffers */
int* tmpfd, /*!< in/out: temporary file handle */
- struct TABLE* table, /*!< in/out: MySQL table, for
- reporting erroneous key value
- if applicable */
ulint* num_run,/*!< in/out: Number of runs remain
to be merged */
ulint* run_offset) /*!< in/out: Array contains the
@@ -1849,7 +1965,7 @@ row_merge(
{
ulint foffs0; /*!< first input offset */
ulint foffs1; /*!< second input offset */
- ulint error; /*!< error code */
+ dberr_t error; /*!< error code */
merge_file_t of; /*!< output file */
const ulint ihalf = run_offset[*num_run / 2];
/*!< half the input file */
@@ -1880,15 +1996,15 @@ row_merge(
for (; foffs0 < ihalf && foffs1 < file->offset; foffs0++, foffs1++) {
- if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
+ if (trx_is_interrupted(trx)) {
return(DB_INTERRUPTED);
}
/* Remember the offset number for this run */
run_offset[n_run++] = of.offset;
- error = row_merge_blocks(index, file, block,
- &foffs0, &foffs1, &of, table);
+ error = row_merge_blocks(dup, file, block,
+ &foffs0, &foffs1, &of);
if (error != DB_SUCCESS) {
return(error);
@@ -1906,7 +2022,8 @@ row_merge(
/* Remember the offset number for this run */
run_offset[n_run++] = of.offset;
- if (!row_merge_blocks_copy(index, file, block, &foffs0, &of)) {
+ if (!row_merge_blocks_copy(dup->index, file, block,
+ &foffs0, &of)) {
return(DB_CORRUPTION);
}
}
@@ -1914,14 +2031,15 @@ row_merge(
ut_ad(foffs0 == ihalf);
while (foffs1 < file->offset) {
- if (UNIV_UNLIKELY(trx_is_interrupted(trx))) {
+ if (trx_is_interrupted(trx)) {
return(DB_INTERRUPTED);
}
/* Remember the offset number for this run */
run_offset[n_run++] = of.offset;
- if (!row_merge_blocks_copy(index, file, block, &foffs1, &of)) {
+ if (!row_merge_blocks_copy(dup->index, file, block,
+ &foffs1, &of)) {
return(DB_CORRUPTION);
}
}
@@ -1959,23 +2077,21 @@ row_merge(
Merge disk files.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_merge_sort(
/*===========*/
trx_t* trx, /*!< in: transaction */
- const dict_index_t* index, /*!< in: index being created */
+ const row_merge_dup_t* dup, /*!< in: descriptor of
+ index being created */
merge_file_t* file, /*!< in/out: file containing
index entries */
row_merge_block_t* block, /*!< in/out: 3 buffers */
- int* tmpfd, /*!< in/out: temporary file handle */
- struct TABLE* table) /*!< in/out: MySQL table, for
- reporting erroneous key value
- if applicable */
+ int* tmpfd) /*!< in/out: temporary file handle */
{
- ulint half = file->offset / 2;
- ulint num_runs;
- ulint* run_offset;
- ulint error = DB_SUCCESS;
+ const ulint half = file->offset / 2;
+ ulint num_runs;
+ ulint* run_offset;
+ dberr_t error = DB_SUCCESS;
/* Record the number of merge runs we need to perform */
num_runs = file->offset;
@@ -1998,14 +2114,14 @@ row_merge_sort(
/* Merge the runs until we have one big run */
do {
- error = row_merge(trx, index, file, block, tmpfd,
- table, &num_runs, run_offset);
-
- UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
+ error = row_merge(trx, dup, file, block, tmpfd,
+ &num_runs, run_offset);
if (error != DB_SUCCESS) {
break;
}
+
+ UNIV_MEM_ASSERT_RW(run_offset, num_runs * sizeof *run_offset);
} while (num_runs > 1);
mem_free(run_offset);
@@ -2014,8 +2130,25 @@ row_merge_sort(
}
/*************************************************************//**
+Set blob fields empty */
+static __attribute__((nonnull))
+void
+row_merge_set_blob_empty(
+/*=====================*/
+ dtuple_t* tuple) /*!< in/out: data tuple */
+{
+ for (ulint i = 0; i < dtuple_get_n_fields(tuple); i++) {
+ dfield_t* field = dtuple_get_nth_field(tuple, i);
+
+ if (dfield_is_ext(field)) {
+ dfield_set_data(field, NULL, 0);
+ }
+ }
+}
+
+/*************************************************************//**
Copy externally stored columns to the data tuple. */
-static
+static __attribute__((nonnull))
void
row_merge_copy_blobs(
/*=================*/
@@ -2025,10 +2158,9 @@ row_merge_copy_blobs(
dtuple_t* tuple, /*!< in/out: data tuple */
mem_heap_t* heap) /*!< in/out: memory heap */
{
- ulint i;
- ulint n_fields = dtuple_get_n_fields(tuple);
+ ut_ad(rec_offs_any_extern(offsets));
- for (i = 0; i < n_fields; i++) {
+ for (ulint i = 0; i < dtuple_get_n_fields(tuple); i++) {
ulint len;
const void* data;
dfield_t* field = dtuple_get_nth_field(tuple, i);
@@ -2039,11 +2171,12 @@ row_merge_copy_blobs(
ut_ad(!dfield_is_null(field));
- /* The table is locked during index creation.
- Therefore, externally stored columns cannot possibly
- be freed between the time the BLOB pointers are read
- (row_merge_read_clustered_index()) and dereferenced
- (below). */
+ /* During the creation of a PRIMARY KEY, the table is
+ X-locked, and we skip copying records that have been
+ marked for deletion. Therefore, externally stored
+ columns cannot possibly be freed between the time the
+ BLOB pointers are read (row_merge_read_clustered_index())
+ and dereferenced (below). */
data = btr_rec_copy_externally_stored_field(
mrec, offsets, zip_size, i, &len, heap);
/* Because we have locked the table, any records
@@ -2060,54 +2193,38 @@ row_merge_copy_blobs(
Read sorted file containing index data tuples and insert these data
tuples to the index
@return DB_SUCCESS or error number */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_merge_insert_index_tuples(
/*==========================*/
- trx_t* trx, /*!< in: transaction */
+ trx_id_t trx_id, /*!< in: transaction identifier */
dict_index_t* index, /*!< in: index */
- dict_table_t* table, /*!< in: new table */
- ulint zip_size,/*!< in: compressed page size of
- the old table, or 0 if uncompressed */
+ const dict_table_t* old_table,/*!< in: old table */
int fd, /*!< in: file descriptor */
row_merge_block_t* block) /*!< in/out: file buffer */
{
const byte* b;
- que_thr_t* thr;
- ins_node_t* node;
+ mem_heap_t* heap;
mem_heap_t* tuple_heap;
- mem_heap_t* graph_heap;
- ulint error = DB_SUCCESS;
+ mem_heap_t* ins_heap;
+ dberr_t error = DB_SUCCESS;
ulint foffs = 0;
ulint* offsets;
+ mrec_buf_t* buf;
- ut_ad(trx);
- ut_ad(index);
- ut_ad(table);
-
+ ut_ad(!srv_read_only_mode);
ut_ad(!(index->type & DICT_FTS));
-
- /* We use the insert query graph as the dummy graph
- needed in the row module call */
-
- trx->op_info = "inserting index entries";
-
- graph_heap = mem_heap_create(500 + sizeof(mrec_buf_t));
- node = ins_node_create(INS_DIRECT, table, graph_heap);
-
- thr = pars_complete_graph_for_exec(node, trx, graph_heap);
-
- que_thr_move_to_run_state_for_mysql(thr, trx);
+ ut_ad(trx_id);
tuple_heap = mem_heap_create(1000);
{
ulint i = 1 + REC_OFFS_HEADER_SIZE
+ dict_index_get_n_fields(index);
-
+ heap = mem_heap_create(sizeof *buf + i * sizeof *offsets);
+ ins_heap = mem_heap_create(sizeof *buf + i * sizeof *offsets);
offsets = static_cast<ulint*>(
- mem_heap_alloc(graph_heap, i * sizeof *offsets));
-
+ mem_heap_alloc(heap, i * sizeof *offsets));
offsets[0] = i;
offsets[1] = dict_index_get_n_fields(index);
}
@@ -2117,15 +2234,17 @@ row_merge_insert_index_tuples(
if (!row_merge_read(fd, foffs, block)) {
error = DB_CORRUPTION;
} else {
- mrec_buf_t* buf;
-
buf = static_cast<mrec_buf_t*>(
- mem_heap_alloc(graph_heap, sizeof *buf));
+ mem_heap_alloc(heap, sizeof *buf));
for (;;) {
const mrec_t* mrec;
dtuple_t* dtuple;
ulint n_ext;
+ big_rec_t* big_rec;
+ rec_t* rec;
+ btr_cur_t cursor;
+ mtr_t mtr;
b = row_merge_read_rec(block, buf, b, index,
fd, &foffs, &mrec, offsets);
@@ -2137,55 +2256,164 @@ row_merge_insert_index_tuples(
break;
}
+ dict_index_t* old_index
+ = dict_table_get_first_index(old_table);
+
+ if (dict_index_is_clust(index)
+ && dict_index_is_online_ddl(old_index)) {
+ error = row_log_table_get_error(old_index);
+ if (error != DB_SUCCESS) {
+ break;
+ }
+ }
+
dtuple = row_rec_to_index_entry_low(
mrec, index, offsets, &n_ext, tuple_heap);
- if (UNIV_UNLIKELY(n_ext)) {
- row_merge_copy_blobs(mrec, offsets, zip_size,
- dtuple, tuple_heap);
- }
+ if (!n_ext) {
+ /* There are no externally stored columns. */
+ } else if (!dict_index_is_online_ddl(old_index)) {
+ ut_ad(dict_index_is_clust(index));
+ /* Modifications to the table are
+ blocked while we are not rebuilding it
+ or creating indexes. Off-page columns
+ can be fetched safely. */
+ row_merge_copy_blobs(
+ mrec, offsets,
+ dict_table_zip_size(old_table),
+ dtuple, tuple_heap);
+ } else {
+ ut_ad(dict_index_is_clust(index));
- node->row = dtuple;
- node->table = table;
- node->trx_id = trx->id;
+ ulint offset = index->trx_id_offset;
- ut_ad(dtuple_validate(dtuple));
+ if (!offset) {
+ offset = row_get_trx_id_offset(
+ index, offsets);
+ }
- do {
- thr->run_node = thr;
- thr->prev_node = thr->common.parent;
+ /* Copy the off-page columns while
+ holding old_index->lock, so
+ that they cannot be freed by
+ a rollback of a fresh insert. */
+ rw_lock_s_lock(&old_index->lock);
+
+ if (row_log_table_is_rollback(
+ old_index,
+ trx_read_trx_id(mrec + offset))) {
+ /* The row and BLOB could
+ already be freed. They
+ will be deleted by
+ row_undo_ins_remove_clust_rec
+ when rolling back a fresh
+ insert. So, no need to retrieve
+ the off-page column. */
+ row_merge_set_blob_empty(
+ dtuple);
+ } else {
+ row_merge_copy_blobs(
+ mrec, offsets,
+ dict_table_zip_size(old_table),
+ dtuple, tuple_heap);
+ }
- error = row_ins_index_entry(index, dtuple,
- 0, FALSE, thr);
+ rw_lock_s_unlock(&old_index->lock);
+ }
- if (UNIV_LIKELY(error == DB_SUCCESS)) {
+ ut_ad(dtuple_validate(dtuple));
+ log_free_check();
- goto next_rec;
- }
+ mtr_start(&mtr);
+ /* Insert after the last user record. */
+ btr_cur_open_at_index_side(
+ false, index, BTR_MODIFY_LEAF,
+ &cursor, 0, &mtr);
+ page_cur_position(
+ page_rec_get_prev(btr_cur_get_rec(&cursor)),
+ btr_cur_get_block(&cursor),
+ btr_cur_get_page_cur(&cursor));
+ cursor.flag = BTR_CUR_BINARY;
+#ifdef UNIV_DEBUG
+ /* Check that the records are inserted in order. */
+ rec = btr_cur_get_rec(&cursor);
+
+ if (!page_rec_is_infimum(rec)) {
+ ulint* rec_offsets = rec_get_offsets(
+ rec, index, offsets,
+ ULINT_UNDEFINED, &tuple_heap);
+ ut_ad(cmp_dtuple_rec(dtuple, rec, rec_offsets)
+ > 0);
+ }
+#endif /* UNIV_DEBUG */
+ ulint* ins_offsets = NULL;
+
+ error = btr_cur_optimistic_insert(
+ BTR_NO_UNDO_LOG_FLAG | BTR_NO_LOCKING_FLAG
+ | BTR_KEEP_SYS_FLAG | BTR_CREATE_FLAG,
+ &cursor, &ins_offsets, &ins_heap,
+ dtuple, &rec, &big_rec, 0, NULL, &mtr);
+
+ if (error == DB_FAIL) {
+ ut_ad(!big_rec);
+ mtr_commit(&mtr);
+ mtr_start(&mtr);
+ btr_cur_open_at_index_side(
+ false, index, BTR_MODIFY_TREE,
+ &cursor, 0, &mtr);
+ page_cur_position(
+ page_rec_get_prev(btr_cur_get_rec(
+ &cursor)),
+ btr_cur_get_block(&cursor),
+ btr_cur_get_page_cur(&cursor));
+
+ error = btr_cur_pessimistic_insert(
+ BTR_NO_UNDO_LOG_FLAG
+ | BTR_NO_LOCKING_FLAG
+ | BTR_KEEP_SYS_FLAG | BTR_CREATE_FLAG,
+ &cursor, &ins_offsets, &ins_heap,
+ dtuple, &rec, &big_rec, 0, NULL, &mtr);
+ }
- thr->lock_state = QUE_THR_LOCK_ROW;
+ if (!dict_index_is_clust(index)) {
+ page_update_max_trx_id(
+ btr_cur_get_block(&cursor),
+ btr_cur_get_page_zip(&cursor),
+ trx_id, &mtr);
+ }
- trx->error_state = static_cast<enum db_err>(
- error);
+ mtr_commit(&mtr);
- que_thr_stop_for_mysql(thr);
- thr->lock_state = QUE_THR_LOCK_NOLOCK;
- } while (row_mysql_handle_errors(&error, trx,
- thr, NULL));
+ if (UNIV_LIKELY_NULL(big_rec)) {
+ /* If the system crashes at this
+ point, the clustered index record will
+ contain a null BLOB pointer. This
+ should not matter, because the copied
+ table will be dropped on crash
+ recovery anyway. */
+
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(error == DB_SUCCESS);
+ error = row_ins_index_entry_big_rec(
+ dtuple, big_rec,
+ ins_offsets, &ins_heap,
+ index, NULL, __FILE__, __LINE__);
+ dtuple_convert_back_big_rec(
+ index, dtuple, big_rec);
+ }
+
+ if (error != DB_SUCCESS) {
+ goto err_exit;
+ }
- goto err_exit;
-next_rec:
mem_heap_empty(tuple_heap);
+ mem_heap_empty(ins_heap);
}
}
- que_thr_stop_for_mysql_no_error(thr, trx);
err_exit:
- que_graph_free(thr->graph);
-
- trx->op_info = "";
-
mem_heap_free(tuple_heap);
+ mem_heap_free(ins_heap);
+ mem_heap_free(heap);
return(error);
}
@@ -2194,7 +2422,7 @@ err_exit:
Sets an exclusive lock on a table, for the duration of creating indexes.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_merge_lock_table(
/*=================*/
trx_t* trx, /*!< in/out: transaction */
@@ -2203,10 +2431,10 @@ row_merge_lock_table(
{
mem_heap_t* heap;
que_thr_t* thr;
- ulint err;
+ dberr_t err;
sel_node_t* node;
- ut_ad(trx);
+ ut_ad(!srv_read_only_mode);
ut_ad(mode == LOCK_X || mode == LOCK_S);
heap = mem_heap_create(512);
@@ -2232,7 +2460,7 @@ run_again:
err = lock_table(0, table, mode, thr);
- trx->error_state =static_cast<enum db_err>( err);
+ trx->error_state = err;
if (UNIV_LIKELY(err == DB_SUCCESS)) {
que_thr_stop_for_mysql_no_error(thr, trx);
@@ -2240,7 +2468,7 @@ run_again:
que_thr_stop_for_mysql(thr);
if (err != DB_QUE_THR_SUSPENDED) {
- ibool was_lock_wait;
+ bool was_lock_wait;
was_lock_wait = row_mysql_handle_errors(
&err, trx, thr, NULL);
@@ -2274,105 +2502,312 @@ run_again:
}
/*********************************************************************//**
-Drop an index from the InnoDB system tables. The data dictionary must
-have been locked exclusively by the caller, because the transaction
-will not be committed. */
-UNIV_INTERN
+Drop an index that was created before an error occurred.
+The data dictionary must have been locked exclusively by the caller,
+because the transaction will not be committed. */
+static
void
-row_merge_drop_index(
-/*=================*/
- dict_index_t* index, /*!< in: index to be removed */
- dict_table_t* table, /*!< in: table */
- trx_t* trx) /*!< in: transaction handle */
+row_merge_drop_index_dict(
+/*======================*/
+ trx_t* trx, /*!< in/out: dictionary transaction */
+ index_id_t index_id)/*!< in: index identifier */
{
- db_err err;
- pars_info_t* info = pars_info_create();
-
- /* We use the private SQL parser of Innobase to generate the
- query graphs needed in deleting the dictionary data from system
- tables in Innobase. Deleting a row from SYS_INDEXES table also
- frees the file segments of the B-tree associated with the index. */
-
static const char sql[] =
"PROCEDURE DROP_INDEX_PROC () IS\n"
"BEGIN\n"
- /* Rename the index, so that it will be dropped by
- row_merge_drop_temp_indexes() at crash recovery
- if the server crashes before this trx is committed. */
- "UPDATE SYS_INDEXES SET NAME=CONCAT('"
- TEMP_INDEX_PREFIX_STR "', NAME) WHERE ID = :indexid;\n"
- "COMMIT WORK;\n"
- /* Drop the field definitions of the index. */
- "DELETE FROM SYS_FIELDS WHERE INDEX_ID = :indexid;\n"
- /* Drop the index definition and the B-tree. */
- "DELETE FROM SYS_INDEXES WHERE ID = :indexid;\n"
+ "DELETE FROM SYS_FIELDS WHERE INDEX_ID=:indexid;\n"
+ "DELETE FROM SYS_INDEXES WHERE ID=:indexid;\n"
"END;\n";
+ dberr_t error;
+ pars_info_t* info;
- ut_ad(index && table && trx);
+ ut_ad(!srv_read_only_mode);
+ ut_ad(mutex_own(&dict_sys->mutex));
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
- pars_info_add_ull_literal(info, "indexid", index->id);
+ info = pars_info_create();
+ pars_info_add_ull_literal(info, "indexid", index_id);
+ trx->op_info = "dropping index from dictionary";
+ error = que_eval_sql(info, sql, FALSE, trx);
- trx_start_if_not_started_xa(trx);
- trx->op_info = "dropping index";
+ if (error != DB_SUCCESS) {
+ /* Even though we ensure that DDL transactions are WAIT
+ and DEADLOCK free, we could encounter other errors e.g.,
+ DB_TOO_MANY_CONCURRENT_TRXS. */
+ trx->error_state = DB_SUCCESS;
- ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Error: row_merge_drop_index_dict "
+ "failed with error code: %u.\n", (unsigned) error);
+ }
- err = static_cast<db_err>(que_eval_sql(info, sql, FALSE, trx));
+ trx->op_info = "";
+}
- DBUG_EXECUTE_IF(
- "ib_drop_index_too_many_concurrent_trxs",
- err = DB_TOO_MANY_CONCURRENT_TRXS;
- trx->error_state = err;);
+/*********************************************************************//**
+Drop indexes that were created before an error occurred.
+The data dictionary must have been locked exclusively by the caller,
+because the transaction will not be committed. */
+UNIV_INTERN
+void
+row_merge_drop_indexes_dict(
+/*========================*/
+ trx_t* trx, /*!< in/out: dictionary transaction */
+ table_id_t table_id)/*!< in: table identifier */
+{
+ static const char sql[] =
+ "PROCEDURE DROP_INDEXES_PROC () IS\n"
+ "ixid CHAR;\n"
+ "found INT;\n"
- if (err == DB_SUCCESS) {
+ "DECLARE CURSOR index_cur IS\n"
+ " SELECT ID FROM SYS_INDEXES\n"
+ " WHERE TABLE_ID=:tableid AND\n"
+ " SUBSTR(NAME,0,1)='" TEMP_INDEX_PREFIX_STR "'\n"
+ "FOR UPDATE;\n"
- /* If it is FTS index, drop from table->fts and also drop
- its auxiliary tables */
- if (index->type & DICT_FTS) {
- ut_a(table->fts);
- fts_drop_index(table, index, trx);
- }
+ "BEGIN\n"
+ "found := 1;\n"
+ "OPEN index_cur;\n"
+ "WHILE found = 1 LOOP\n"
+ " FETCH index_cur INTO ixid;\n"
+ " IF (SQL % NOTFOUND) THEN\n"
+ " found := 0;\n"
+ " ELSE\n"
+ " DELETE FROM SYS_FIELDS WHERE INDEX_ID=ixid;\n"
+ " DELETE FROM SYS_INDEXES WHERE CURRENT OF index_cur;\n"
+ " END IF;\n"
+ "END LOOP;\n"
+ "CLOSE index_cur;\n"
+
+ "END;\n";
+ dberr_t error;
+ pars_info_t* info;
- /* Replace this index with another equivalent index for all
- foreign key constraints on this table where this index is
- used */
+ ut_ad(!srv_read_only_mode);
+ ut_ad(mutex_own(&dict_sys->mutex));
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
- dict_table_replace_index_in_foreign_list(table, index, trx);
- dict_index_remove_from_cache(table, index);
+ /* It is possible that table->n_ref_count > 1 when
+ locked=TRUE. In this case, all code that should have an open
+ handle to the table be waiting for the next statement to execute,
+ or waiting for a meta-data lock.
- } else {
+ A concurrent purge will be prevented by dict_operation_lock. */
+
+ info = pars_info_create();
+ pars_info_add_ull_literal(info, "tableid", table_id);
+ trx->op_info = "dropping indexes";
+ error = que_eval_sql(info, sql, FALSE, trx);
+
+ if (error != DB_SUCCESS) {
/* Even though we ensure that DDL transactions are WAIT
and DEADLOCK free, we could encounter other errors e.g.,
- DB_TOO_MANY_TRANSACTIONS. */
+ DB_TOO_MANY_CONCURRENT_TRXS. */
trx->error_state = DB_SUCCESS;
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: row_merge_drop_index failed "
- "with error code: %lu.\n", (ulint) err);
+ fprintf(stderr, " InnoDB: Error: row_merge_drop_indexes_dict "
+ "failed with error code: %u.\n", (unsigned) error);
}
trx->op_info = "";
}
/*********************************************************************//**
-Drop those indexes which were created before an error occurred when
-building an index. The data dictionary must have been locked
-exclusively by the caller, because the transaction will not be
-committed. */
+Drop indexes that were created before an error occurred.
+The data dictionary must have been locked exclusively by the caller,
+because the transaction will not be committed. */
UNIV_INTERN
void
row_merge_drop_indexes(
/*===================*/
- trx_t* trx, /*!< in: transaction */
- dict_table_t* table, /*!< in: table containing the indexes */
- dict_index_t** index, /*!< in: indexes to drop */
- ulint num_created) /*!< in: number of elements in index[] */
+ trx_t* trx, /*!< in/out: dictionary transaction */
+ dict_table_t* table, /*!< in/out: table containing the indexes */
+ ibool locked) /*!< in: TRUE=table locked,
+ FALSE=may need to do a lazy drop */
{
- ulint key_num;
+ dict_index_t* index;
+ dict_index_t* next_index;
+
+ ut_ad(!srv_read_only_mode);
+ ut_ad(mutex_own(&dict_sys->mutex));
+ ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+
+ index = dict_table_get_first_index(table);
+ ut_ad(dict_index_is_clust(index));
+ ut_ad(dict_index_get_online_status(index) == ONLINE_INDEX_COMPLETE);
+
+ /* the caller should have an open handle to the table */
+ ut_ad(table->n_ref_count >= 1);
+
+ /* It is possible that table->n_ref_count > 1 when
+ locked=TRUE. In this case, all code that should have an open
+ handle to the table be waiting for the next statement to execute,
+ or waiting for a meta-data lock.
+
+ A concurrent purge will be prevented by dict_operation_lock. */
+
+ if (!locked && table->n_ref_count > 1) {
+ /* We will have to drop the indexes later, when the
+ table is guaranteed to be no longer in use. Mark the
+ indexes as incomplete and corrupted, so that other
+ threads will stop using them. Let dict_table_close()
+ or crash recovery or the next invocation of
+ prepare_inplace_alter_table() take care of dropping
+ the indexes. */
+
+ while ((index = dict_table_get_next_index(index)) != NULL) {
+ ut_ad(!dict_index_is_clust(index));
+
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ continue;
+ case ONLINE_INDEX_COMPLETE:
+ if (*index->name != TEMP_INDEX_PREFIX) {
+ /* Do nothing to already
+ published indexes. */
+ } else if (index->type & DICT_FTS) {
+ /* Drop a completed FULLTEXT
+ index, due to a timeout during
+ MDL upgrade for
+ commit_inplace_alter_table().
+ Because only concurrent reads
+ are allowed (and they are not
+ seeing this index yet) we
+ are safe to drop the index. */
+ dict_index_t* prev = UT_LIST_GET_PREV(
+ indexes, index);
+ /* At least there should be
+ the clustered index before
+ this one. */
+ ut_ad(prev);
+ ut_a(table->fts);
+ fts_drop_index(table, index, trx);
+ /* Since
+ INNOBASE_SHARE::idx_trans_tbl
+ is shared between all open
+ ha_innobase handles to this
+ table, no thread should be
+ accessing this dict_index_t
+ object. Also, we should be
+ holding LOCK=SHARED MDL on the
+ table even after the MDL
+ upgrade timeout. */
+
+ /* We can remove a DICT_FTS
+ index from the cache, because
+ we do not allow ADD FULLTEXT INDEX
+ with LOCK=NONE. If we allowed that,
+ we should exclude FTS entries from
+ prebuilt->ins_node->entry_list
+ in ins_node_create_entry_list(). */
+ dict_index_remove_from_cache(
+ table, index);
+ index = prev;
+ } else {
+ rw_lock_x_lock(
+ dict_index_get_lock(index));
+ dict_index_set_online_status(
+ index, ONLINE_INDEX_ABORTED);
+ index->type |= DICT_CORRUPT;
+ table->drop_aborted = TRUE;
+ goto drop_aborted;
+ }
+ continue;
+ case ONLINE_INDEX_CREATION:
+ rw_lock_x_lock(dict_index_get_lock(index));
+ ut_ad(*index->name == TEMP_INDEX_PREFIX);
+ row_log_abort_sec(index);
+ drop_aborted:
+ rw_lock_x_unlock(dict_index_get_lock(index));
+
+ DEBUG_SYNC_C("merge_drop_index_after_abort");
+ /* covered by dict_sys->mutex */
+ MONITOR_INC(MONITOR_BACKGROUND_DROP_INDEX);
+ /* fall through */
+ case ONLINE_INDEX_ABORTED:
+ /* Drop the index tree from the
+ data dictionary and free it from
+ the tablespace, but keep the object
+ in the data dictionary cache. */
+ row_merge_drop_index_dict(trx, index->id);
+ rw_lock_x_lock(dict_index_get_lock(index));
+ dict_index_set_online_status(
+ index, ONLINE_INDEX_ABORTED_DROPPED);
+ rw_lock_x_unlock(dict_index_get_lock(index));
+ table->drop_aborted = TRUE;
+ continue;
+ }
+ ut_error;
+ }
- for (key_num = 0; key_num < num_created; key_num++) {
- row_merge_drop_index(index[key_num], table, trx);
+ return;
}
+
+ row_merge_drop_indexes_dict(trx, table->id);
+
+ /* Invalidate all row_prebuilt_t::ins_graph that are referring
+ to this table. That is, force row_get_prebuilt_insert_row() to
+ rebuild prebuilt->ins_node->entry_list). */
+ ut_ad(table->def_trx_id <= trx->id);
+ table->def_trx_id = trx->id;
+
+ next_index = dict_table_get_next_index(index);
+
+ while ((index = next_index) != NULL) {
+ /* read the next pointer before freeing the index */
+ next_index = dict_table_get_next_index(index);
+
+ ut_ad(!dict_index_is_clust(index));
+
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ /* If it is FTS index, drop from table->fts
+ and also drop its auxiliary tables */
+ if (index->type & DICT_FTS) {
+ ut_a(table->fts);
+ fts_drop_index(table, index, trx);
+ }
+
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_CREATION:
+ /* This state should only be possible
+ when prepare_inplace_alter_table() fails
+ after invoking row_merge_create_index().
+ In inplace_alter_table(),
+ row_merge_build_indexes()
+ should never leave the index in this state.
+ It would invoke row_log_abort_sec() on
+ failure. */
+ case ONLINE_INDEX_COMPLETE:
+ /* In these cases, we are able to drop
+ the index straight. The DROP INDEX was
+ never deferred. */
+ break;
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ /* covered by dict_sys->mutex */
+ MONITOR_DEC(MONITOR_BACKGROUND_DROP_INDEX);
+ }
+
+ dict_index_remove_from_cache(table, index);
+ }
+ }
+
+ table->drop_aborted = FALSE;
+ ut_d(dict_table_check_for_dup_indexes(table, CHECK_ALL_COMPLETE));
}
/*********************************************************************//**
@@ -2382,9 +2817,32 @@ void
row_merge_drop_temp_indexes(void)
/*=============================*/
{
- trx_t* trx;
- btr_pcur_t pcur;
- mtr_t mtr;
+ static const char sql[] =
+ "PROCEDURE DROP_TEMP_INDEXES_PROC () IS\n"
+ "ixid CHAR;\n"
+ "found INT;\n"
+
+ "DECLARE CURSOR index_cur IS\n"
+ " SELECT ID FROM SYS_INDEXES\n"
+ " WHERE SUBSTR(NAME,0,1)='" TEMP_INDEX_PREFIX_STR "'\n"
+ "FOR UPDATE;\n"
+
+ "BEGIN\n"
+ "found := 1;\n"
+ "OPEN index_cur;\n"
+ "WHILE found = 1 LOOP\n"
+ " FETCH index_cur INTO ixid;\n"
+ " IF (SQL % NOTFOUND) THEN\n"
+ " found := 0;\n"
+ " ELSE\n"
+ " DELETE FROM SYS_FIELDS WHERE INDEX_ID=ixid;\n"
+ " DELETE FROM SYS_INDEXES WHERE CURRENT OF index_cur;\n"
+ " END IF;\n"
+ "END LOOP;\n"
+ "CLOSE index_cur;\n"
+ "END;\n";
+ trx_t* trx;
+ dberr_t error;
/* Load the table definitions that contain partially defined
indexes, so that the data dictionary information can be checked
@@ -2392,75 +2850,26 @@ row_merge_drop_temp_indexes(void)
trx = trx_allocate_for_background();
trx->op_info = "dropping partially created indexes";
row_mysql_lock_data_dictionary(trx);
+ /* Ensure that this transaction will be rolled back and locks
+ will be released, if the server gets killed before the commit
+ gets written to the redo log. */
+ trx_set_dict_operation(trx, TRX_DICT_OP_INDEX);
- mtr_start(&mtr);
-
- btr_pcur_open_at_index_side(
- TRUE,
- dict_table_get_first_index(dict_sys->sys_indexes),
- BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
-
- for (;;) {
- const rec_t* rec;
- const byte* field;
- ulint len;
- table_id_t table_id;
- dict_table_t* table;
-
- btr_pcur_move_to_next_user_rec(&pcur, &mtr);
-
- if (!btr_pcur_is_on_user_rec(&pcur)) {
- break;
- }
-
- rec = btr_pcur_get_rec(&pcur);
- field = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_INDEXES__NAME, &len);
- if (len == UNIV_SQL_NULL || len == 0
- || (char) *field != TEMP_INDEX_PREFIX) {
- continue;
- }
-
- /* This is a temporary index. */
+ trx->op_info = "dropping indexes";
+ error = que_eval_sql(NULL, sql, FALSE, trx);
- field = rec_get_nth_field_old(
- rec, DICT_FLD__SYS_INDEXES__TABLE_ID, &len);
- if (len != 8) {
- /* Corrupted TABLE_ID */
- continue;
- }
-
- table_id = mach_read_from_8(field);
-
- btr_pcur_store_position(&pcur, &mtr);
- btr_pcur_commit_specify_mtr(&pcur, &mtr);
-
- table = dict_table_open_on_id(table_id, TRUE);
-
- if (table) {
- dict_index_t* index;
- dict_index_t* next_index;
-
- for (index = dict_table_get_first_index(table);
- index; index = next_index) {
-
- next_index = dict_table_get_next_index(index);
-
- if (*index->name == TEMP_INDEX_PREFIX) {
- row_merge_drop_index(index, table, trx);
- trx_commit_for_mysql(trx);
- }
- }
-
- dict_table_close(table, TRUE);
- }
+ if (error != DB_SUCCESS) {
+ /* Even though we ensure that DDL transactions are WAIT
+ and DEADLOCK free, we could encounter other errors e.g.,
+ DB_TOO_MANY_CONCURRENT_TRXS. */
+ trx->error_state = DB_SUCCESS;
- mtr_start(&mtr);
- btr_pcur_restore_position(BTR_SEARCH_LEAF, &pcur, &mtr);
+ ut_print_timestamp(stderr);
+ fprintf(stderr, " InnoDB: Error: row_merge_drop_temp_indexes "
+ "failed with error code: %u.\n", (unsigned) error);
}
- btr_pcur_close(&pcur);
- mtr_commit(&mtr);
+ trx_commit_for_mysql(trx);
row_mysql_unlock_data_dictionary(trx);
trx_free_for_background(trx);
}
@@ -2469,7 +2878,7 @@ row_merge_drop_temp_indexes(void)
Creates temporary merge files, and if UNIV_PFS_IO defined, register
the file descriptor with Performance Schema.
@return file descriptor, or -1 on failure */
-UNIV_INLINE
+UNIV_INTERN
int
row_merge_file_create_low(void)
/*===========================*/
@@ -2488,12 +2897,13 @@ row_merge_file_create_low(void)
#endif
fd = innobase_mysql_tmpfile();
#ifdef UNIV_PFS_IO
- register_pfs_file_open_end(locker, fd);
+ register_pfs_file_open_end(locker, fd);
#endif
+
if (fd < 0) {
- fprintf(stderr,
- "InnoDB: Error: Cannot create temporary merge file\n");
- return(-1);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create temporary merge file");
+ return -1;
}
return(fd);
}
@@ -2508,18 +2918,22 @@ row_merge_file_create(
merge_file_t* merge_file) /*!< out: merge file structure */
{
merge_file->fd = row_merge_file_create_low();
- if (srv_disable_sort_file_cache) {
- os_file_set_nocache(merge_file->fd, "row0merge.c", "sort");
- }
merge_file->offset = 0;
merge_file->n_rec = 0;
+
+ if (merge_file->fd >= 0) {
+ if (srv_disable_sort_file_cache) {
+ os_file_set_nocache(merge_file->fd,
+ "row0merge.cc", "sort");
+ }
+ }
return(merge_file->fd);
}
/*********************************************************************//**
Destroy a merge file. And de-register the file from Performance Schema
if UNIV_PFS_IO is defined. */
-UNIV_INLINE
+UNIV_INTERN
void
row_merge_file_destroy_low(
/*=======================*/
@@ -2532,7 +2946,9 @@ row_merge_file_destroy_low(
fd, 0, PSI_FILE_CLOSE,
__FILE__, __LINE__);
#endif
- close(fd);
+ if (fd >= 0) {
+ close(fd);
+ }
#ifdef UNIV_PFS_IO
register_pfs_file_io_end(locker, 0);
#endif
@@ -2543,8 +2959,10 @@ UNIV_INTERN
void
row_merge_file_destroy(
/*===================*/
- merge_file_t* merge_file) /*!< out: merge file structure */
+ merge_file_t* merge_file) /*!< in/out: merge file structure */
{
+ ut_ad(!srv_read_only_mode);
+
if (merge_file->fd != -1) {
row_merge_file_destroy_low(merge_file->fd);
merge_file->fd = -1;
@@ -2552,173 +2970,109 @@ row_merge_file_destroy(
}
/*********************************************************************//**
-Determine the precise type of a column that is added to a tem
-if a column must be constrained NOT NULL.
-@return col->prtype, possibly ORed with DATA_NOT_NULL */
-UNIV_INLINE
-ulint
-row_merge_col_prtype(
-/*=================*/
- const dict_col_t* col, /*!< in: column */
- const char* col_name, /*!< in: name of the column */
- const merge_index_def_t*index_def) /*!< in: the index definition
- of the primary key */
-{
- ulint prtype = col->prtype;
- ulint i;
-
- ut_ad(index_def->ind_type & DICT_CLUSTERED);
-
- if (prtype & DATA_NOT_NULL) {
-
- return(prtype);
- }
-
- /* All columns that are included
- in the PRIMARY KEY must be NOT NULL. */
-
- for (i = 0; i < index_def->n_fields; i++) {
- if (!strcmp(col_name, index_def->fields[i].field_name)) {
- return(prtype | DATA_NOT_NULL);
- }
- }
-
- return(prtype);
-}
-
-/*********************************************************************//**
-Create a temporary table for creating a primary key, using the definition
-of an existing table.
-@return table, or NULL on error */
+Rename an index in the dictionary that was created. The data
+dictionary must have been locked exclusively by the caller, because
+the transaction will not be committed.
+@return DB_SUCCESS if all OK */
UNIV_INTERN
-dict_table_t*
-row_merge_create_temporary_table(
-/*=============================*/
- const char* table_name, /*!< in: new table name */
- const merge_index_def_t*index_def, /*!< in: the index definition
- of the primary key */
- const dict_table_t* table, /*!< in: old table definition */
- trx_t* trx) /*!< in/out: transaction
- (sets error_state) */
+dberr_t
+row_merge_rename_index_to_add(
+/*==========================*/
+ trx_t* trx, /*!< in/out: transaction */
+ table_id_t table_id, /*!< in: table identifier */
+ index_id_t index_id) /*!< in: index identifier */
{
- ulint i;
- dict_table_t* new_table = NULL;
- ulint n_cols = dict_table_get_n_user_cols(table);
- ulint error;
- mem_heap_t* heap = mem_heap_create(1000);
- ulint num_col;
-
- ut_ad(table_name);
- ut_ad(index_def);
- ut_ad(table);
- ut_ad(mutex_own(&dict_sys->mutex));
-
- num_col = DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)
- ? n_cols + 1
- : n_cols;
-
- new_table = dict_mem_table_create(
- table_name, 0, num_col, table->flags, table->flags2);
-
- for (i = 0; i < n_cols; i++) {
- const dict_col_t* col;
- const char* col_name;
+ dberr_t err = DB_SUCCESS;
+ pars_info_t* info = pars_info_create();
- col = dict_table_get_nth_col(table, i);
- col_name = dict_table_get_col_name(table, i);
+ /* We use the private SQL parser of Innobase to generate the
+ query graphs needed in renaming indexes. */
- dict_mem_table_add_col(new_table, heap, col_name, col->mtype,
- row_merge_col_prtype(col, col_name,
- index_def),
- col->len);
- }
+ static const char rename_index[] =
+ "PROCEDURE RENAME_INDEX_PROC () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_INDEXES SET NAME=SUBSTR(NAME,1,LENGTH(NAME)-1)\n"
+ "WHERE TABLE_ID = :tableid AND ID = :indexid;\n"
+ "END;\n";
- /* Add the FTS doc_id hidden column */
- if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_ADD_DOC_ID)) {
- fts_add_doc_id_column(new_table);
- new_table->fts->doc_col = n_cols;
- }
+ ut_ad(trx);
+ ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
- error = row_create_table_for_mysql(new_table, trx);
- mem_heap_free(heap);
+ trx->op_info = "renaming index to add";
- if (error != DB_SUCCESS) {
- trx->error_state = static_cast<enum db_err>(error);
- new_table = NULL;
- } else {
- dict_table_t* temp_table;
+ pars_info_add_ull_literal(info, "tableid", table_id);
+ pars_info_add_ull_literal(info, "indexid", index_id);
- /* We need to bump up the table ref count and before we can
- use it we need to open the table. */
+ err = que_eval_sql(info, rename_index, FALSE, trx);
- temp_table = dict_table_open_on_name_no_stats(
- new_table->name, TRUE, DICT_ERR_IGNORE_NONE);
+ if (err != DB_SUCCESS) {
+ /* Even though we ensure that DDL transactions are WAIT
+ and DEADLOCK free, we could encounter other errors e.g.,
+ DB_TOO_MANY_CONCURRENT_TRXS. */
+ trx->error_state = DB_SUCCESS;
- ut_a(new_table == temp_table);
+ ut_print_timestamp(stderr);
+ fprintf(stderr,
+ " InnoDB: Error: row_merge_rename_index_to_add "
+ "failed with error code: %u.\n", (unsigned) err);
}
- return(new_table);
+ trx->op_info = "";
+
+ return(err);
}
/*********************************************************************//**
-Rename the temporary indexes in the dictionary to permanent ones. The
-data dictionary must have been locked exclusively by the caller,
-because the transaction will not be committed.
+Rename an index in the dictionary that is to be dropped. The data
+dictionary must have been locked exclusively by the caller, because
+the transaction will not be committed.
@return DB_SUCCESS if all OK */
UNIV_INTERN
-ulint
-row_merge_rename_indexes(
-/*=====================*/
+dberr_t
+row_merge_rename_index_to_drop(
+/*===========================*/
trx_t* trx, /*!< in/out: transaction */
- dict_table_t* table) /*!< in/out: table with new indexes */
+ table_id_t table_id, /*!< in: table identifier */
+ index_id_t index_id) /*!< in: index identifier */
{
- db_err err = DB_SUCCESS;
+ dberr_t err;
pars_info_t* info = pars_info_create();
+ ut_ad(!srv_read_only_mode);
+
/* We use the private SQL parser of Innobase to generate the
query graphs needed in renaming indexes. */
- static const char* sql =
- "PROCEDURE RENAME_INDEXES_PROC () IS\n"
+ static const char rename_index[] =
+ "PROCEDURE RENAME_INDEX_PROC () IS\n"
"BEGIN\n"
- "UPDATE SYS_INDEXES SET NAME=SUBSTR(NAME,1,LENGTH(NAME)-1)\n"
- "WHERE TABLE_ID = :tableid AND SUBSTR(NAME,0,1)='"
- TEMP_INDEX_PREFIX_STR "';\n"
+ "UPDATE SYS_INDEXES SET NAME=CONCAT('"
+ TEMP_INDEX_PREFIX_STR "',NAME)\n"
+ "WHERE TABLE_ID = :tableid AND ID = :indexid;\n"
"END;\n";
- ut_ad(table);
ut_ad(trx);
ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_INDEX);
- trx->op_info = "renaming indexes";
+ trx->op_info = "renaming index to drop";
- pars_info_add_ull_literal(info, "tableid", table->id);
+ pars_info_add_ull_literal(info, "tableid", table_id);
+ pars_info_add_ull_literal(info, "indexid", index_id);
- err = static_cast<db_err>(que_eval_sql(info, sql, FALSE, trx));
+ err = que_eval_sql(info, rename_index, FALSE, trx);
- DBUG_EXECUTE_IF(
- "ib_rename_indexes_too_many_concurrent_trxs",
- err = DB_TOO_MANY_CONCURRENT_TRXS;
- trx->error_state = static_cast<db_err>(err););
-
- if (err == DB_SUCCESS) {
- dict_index_t* index = dict_table_get_first_index(table);
- do {
- if (*index->name == TEMP_INDEX_PREFIX) {
- index->name++;
- }
- index = dict_table_get_next_index(index);
- } while (index);
- } else {
+ if (err != DB_SUCCESS) {
/* Even though we ensure that DDL transactions are WAIT
and DEADLOCK free, we could encounter other errors e.g.,
- DB_TOO_MANY_TRANSACTIONS. */
-
+ DB_TOO_MANY_CONCURRENT_TRXS. */
trx->error_state = DB_SUCCESS;
ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error: row_merge_rename_indexes "
- "failed with error code: %lu.\n", (ulint) err);
+ fprintf(stderr,
+ " InnoDB: Error: row_merge_rename_index_to_drop "
+ "failed with error code: %u.\n", (unsigned) err);
}
trx->op_info = "";
@@ -2727,12 +3081,39 @@ row_merge_rename_indexes(
}
/*********************************************************************//**
+Provide a new pathname for a table that is being renamed if it belongs to
+a file-per-table tablespace. The caller is responsible for freeing the
+memory allocated for the return value.
+@return new pathname of tablespace file, or NULL if space = 0 */
+UNIV_INTERN
+char*
+row_make_new_pathname(
+/*==================*/
+ dict_table_t* table, /*!< in: table to be renamed */
+ const char* new_name) /*!< in: new name */
+{
+ char* new_path;
+ char* old_path;
+
+ ut_ad(table->space != TRX_SYS_SPACE);
+
+ old_path = fil_space_get_first_path(table->space);
+ ut_a(old_path);
+
+ new_path = os_file_make_new_pathname(old_path, new_name);
+
+ mem_free(old_path);
+
+ return(new_path);
+}
+
+/*********************************************************************//**
Rename the tables in the data dictionary. The data dictionary must
have been locked exclusively by the caller, because the transaction
will not be committed.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_merge_rename_tables(
/*====================*/
dict_table_t* old_table, /*!< in/out: old table, renamed to
@@ -2742,28 +3123,32 @@ row_merge_rename_tables(
const char* tmp_name, /*!< in: new name for old_table */
trx_t* trx) /*!< in: transaction handle */
{
- ulint err = DB_ERROR;
+ dberr_t err = DB_ERROR;
pars_info_t* info;
char old_name[MAX_FULL_NAME_LEN + 1];
+ ut_ad(!srv_read_only_mode);
ut_ad(old_table != new_table);
ut_ad(mutex_own(&dict_sys->mutex));
-
ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
+ ut_ad(trx_get_dict_operation(trx) == TRX_DICT_OP_TABLE);
/* store the old/current name to an automatic variable */
if (strlen(old_table->name) + 1 <= sizeof(old_name)) {
memcpy(old_name, old_table->name, strlen(old_table->name) + 1);
} else {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: too long table name: '%s', "
- "max length is %d\n", old_table->name,
- MAX_FULL_NAME_LEN);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Too long table name: '%s', max length is %d",
+ old_table->name, MAX_FULL_NAME_LEN);
ut_error;
}
trx->op_info = "renaming tables";
+ DBUG_EXECUTE_IF(
+ "ib_rebuild_cannot_rename",
+ err = DB_ERROR; goto err_exit;);
+
/* We use the private SQL parser of Innobase to generate the query
graphs needed in updating the dictionary data in system tables. */
@@ -2782,8 +3167,63 @@ row_merge_rename_tables(
" WHERE NAME = :new_name;\n"
"END;\n", FALSE, trx);
- if (err != DB_SUCCESS) {
+ /* Update SYS_TABLESPACES and SYS_DATAFILES if the old
+ table is in a non-system tablespace where space > 0. */
+ if (err == DB_SUCCESS
+ && old_table->space != TRX_SYS_SPACE
+ && !old_table->ibd_file_missing) {
+ /* Make pathname to update SYS_DATAFILES. */
+ char* tmp_path = row_make_new_pathname(old_table, tmp_name);
+
+ info = pars_info_create();
+
+ pars_info_add_str_literal(info, "tmp_name", tmp_name);
+ pars_info_add_str_literal(info, "tmp_path", tmp_path);
+ pars_info_add_int4_literal(info, "old_space",
+ (lint) old_table->space);
+
+ err = que_eval_sql(info,
+ "PROCEDURE RENAME_OLD_SPACE () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_TABLESPACES"
+ " SET NAME = :tmp_name\n"
+ " WHERE SPACE = :old_space;\n"
+ "UPDATE SYS_DATAFILES"
+ " SET PATH = :tmp_path\n"
+ " WHERE SPACE = :old_space;\n"
+ "END;\n", FALSE, trx);
+
+ mem_free(tmp_path);
+ }
+
+ /* Update SYS_TABLESPACES and SYS_DATAFILES if the new
+ table is in a non-system tablespace where space > 0. */
+ if (err == DB_SUCCESS && new_table->space != TRX_SYS_SPACE) {
+ /* Make pathname to update SYS_DATAFILES. */
+ char* old_path = row_make_new_pathname(new_table, old_name);
+
+ info = pars_info_create();
+
+ pars_info_add_str_literal(info, "old_name", old_name);
+ pars_info_add_str_literal(info, "old_path", old_path);
+ pars_info_add_int4_literal(info, "new_space",
+ (lint) new_table->space);
+
+ err = que_eval_sql(info,
+ "PROCEDURE RENAME_NEW_SPACE () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_TABLESPACES"
+ " SET NAME = :old_name\n"
+ " WHERE SPACE = :new_space;\n"
+ "UPDATE SYS_DATAFILES"
+ " SET PATH = :old_path\n"
+ " WHERE SPACE = :new_space;\n"
+ "END;\n", FALSE, trx);
+
+ mem_free(old_path);
+ }
+ if (err != DB_SUCCESS) {
goto err_exit;
}
@@ -2812,13 +3252,39 @@ row_merge_rename_tables(
/* The following calls will also rename the .ibd data files if
the tables are stored in a single-table tablespace */
- if (!dict_table_rename_in_cache(old_table, tmp_name, FALSE)
- || !dict_table_rename_in_cache(new_table, old_name, FALSE)) {
+ err = dict_table_rename_in_cache(old_table, tmp_name, FALSE);
- err = DB_ERROR;
- goto err_exit;
+ if (err == DB_SUCCESS) {
+
+ ut_ad(dict_table_is_discarded(old_table)
+ == dict_table_is_discarded(new_table));
+
+ err = dict_table_rename_in_cache(new_table, old_name, FALSE);
+
+ if (err != DB_SUCCESS) {
+
+ if (dict_table_rename_in_cache(
+ old_table, old_name, FALSE)
+ != DB_SUCCESS) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot undo the rename in cache "
+ "from %s to %s", old_name, tmp_name);
+ }
+
+ goto err_exit;
+ }
+
+ if (dict_table_is_discarded(new_table)) {
+
+ err = row_import_update_discarded_flag(
+ trx, new_table->id, true, true);
+ }
}
+ DBUG_EXECUTE_IF("ib_rebuild_cannot_load_fk",
+ err = DB_ERROR; goto err_exit;);
+
err = dict_load_foreigns(old_name, FALSE, TRUE);
if (err != DB_SUCCESS) {
@@ -2836,8 +3302,8 @@ err_exit:
/*********************************************************************//**
Create and execute a query graph for creating an index.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_merge_create_index_graph(
/*=========================*/
trx_t* trx, /*!< in: trx */
@@ -2847,7 +3313,7 @@ row_merge_create_index_graph(
ind_node_t* node; /*!< Index creation node */
mem_heap_t* heap; /*!< Memory heap */
que_thr_t* thr; /*!< Query thread */
- ulint err;
+ dberr_t err;
ut_ad(trx);
ut_ad(table);
@@ -2856,7 +3322,7 @@ row_merge_create_index_graph(
heap = mem_heap_create(512);
index->table = table;
- node = ind_create_graph_create(index, heap);
+ node = ind_create_graph_create(index, heap, false);
thr = pars_complete_graph_for_exec(node, trx, heap);
ut_a(thr == que_fork_start_command(
@@ -2880,14 +3346,16 @@ row_merge_create_index(
/*===================*/
trx_t* trx, /*!< in/out: trx (sets error_state) */
dict_table_t* table, /*!< in: the index is on this table */
- const merge_index_def_t*index_def)
+ const index_def_t* index_def)
/*!< in: the index definition */
{
dict_index_t* index;
- ulint err;
+ dberr_t err;
ulint n_fields = index_def->n_fields;
ulint i;
+ ut_ad(!srv_read_only_mode);
+
/* Create the index prototype, using the passed in def, this is not
a persistent operation. We pass 0 as the space id, and determine at
a lower level the space id where to store the table. */
@@ -2898,10 +3366,11 @@ row_merge_create_index(
ut_a(index);
for (i = 0; i < n_fields; i++) {
- merge_index_field_t* ifield = &index_def->fields[i];
+ index_field_t* ifield = &index_def->fields[i];
- dict_mem_index_add_field(index, ifield->field_name,
- ifield->prefix_len);
+ dict_mem_index_add_field(
+ index, dict_table_get_col_name(table, ifield->col_no),
+ ifield->prefix_len);
}
/* Add the index to SYS_INDEXES, using the index prototype. */
@@ -2909,15 +3378,14 @@ row_merge_create_index(
if (err == DB_SUCCESS) {
- index = row_merge_dict_table_get_index(
- table, index_def);
+ index = dict_table_get_index_on_name(table, index_def->name);
ut_a(index);
/* Note the id of the transaction that created this
index, we use it to restrict readers from accessing
this index, to ensure read consistency. */
- index->trx_id = trx->id;
+ ut_ad(index->trx_id == trx->id);
} else {
index = NULL;
}
@@ -2934,35 +3402,46 @@ row_merge_is_index_usable(
const trx_t* trx, /*!< in: transaction */
const dict_index_t* index) /*!< in: index to check */
{
+ if (!dict_index_is_clust(index)
+ && dict_index_is_online_ddl(index)) {
+ /* Indexes that are being created are not useable. */
+ return(FALSE);
+ }
+
return(!dict_index_is_corrupted(index)
- && (!trx->read_view
- || read_view_sees_trx_id(trx->read_view, index->trx_id)));
+ && (dict_table_is_temporary(index->table)
+ || !trx->read_view
+ || read_view_sees_trx_id(trx->read_view, index->trx_id)));
}
/*********************************************************************//**
-Drop the old table.
+Drop a table. The caller must have ensured that the background stats
+thread is not processing the table. This can be done by calling
+dict_stats_wait_bg_to_stop_using_tables() after locking the dictionary and
+before calling this function.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_merge_drop_table(
/*=================*/
trx_t* trx, /*!< in: transaction */
dict_table_t* table) /*!< in: table to drop */
{
+ ut_ad(!srv_read_only_mode);
+
/* There must be no open transactions on the table. */
ut_a(table->n_ref_count == 0);
- return(row_drop_table_for_mysql(table->name, trx, FALSE));
+ return(row_drop_table_for_mysql(table->name, trx, false, false));
}
-
/*********************************************************************//**
Build indexes on a table by reading a clustered index,
creating a temporary file containing index entries, merge sorting
these index entries and inserting sorted index entries to indexes.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_merge_build_indexes(
/*====================*/
trx_t* trx, /*!< in: transaction */
@@ -2971,54 +3450,59 @@ row_merge_build_indexes(
dict_table_t* new_table, /*!< in: table where indexes are
created; identical to old_table
unless creating a PRIMARY KEY */
+ bool online, /*!< in: true if creating indexes
+ online */
dict_index_t** indexes, /*!< in: indexes to be created */
+ const ulint* key_numbers, /*!< in: MySQL key numbers */
ulint n_indexes, /*!< in: size of indexes[] */
- struct TABLE* table) /*!< in/out: MySQL table, for
+ struct TABLE* table, /*!< in/out: MySQL table, for
reporting erroneous key value
if applicable */
+ const dtuple_t* add_cols, /*!< in: default values of
+ added columns, or NULL */
+ const ulint* col_map, /*!< in: mapping of old column
+ numbers to new ones, or NULL
+ if old_table == new_table */
+ ulint add_autoinc, /*!< in: number of added
+ AUTO_INCREMENT column, or
+ ULINT_UNDEFINED if none is added */
+ ib_sequence_t& sequence) /*!< in: autoinc instance if
+ add_autoinc != ULINT_UNDEFINED */
{
merge_file_t* merge_files;
row_merge_block_t* block;
ulint block_size;
ulint i;
ulint j;
- ulint error;
- int tmpfd = -1;
+ dberr_t error;
+ int tmpfd;
dict_index_t* fts_sort_idx = NULL;
fts_psort_t* psort_info = NULL;
fts_psort_t* merge_info = NULL;
ib_int64_t sig_count = 0;
- ut_ad(trx);
- ut_ad(old_table);
- ut_ad(new_table);
- ut_ad(indexes);
- ut_ad(n_indexes);
-
- trx_start_if_not_started_xa(trx);
+ ut_ad(!srv_read_only_mode);
+ ut_ad((old_table == new_table) == !col_map);
+ ut_ad(!add_cols || col_map);
/* Allocate memory for merge file data structure and initialize
fields */
- merge_files = static_cast<merge_file_t*>(
- mem_alloc(n_indexes * sizeof *merge_files));
-
block_size = 3 * srv_sort_buf_size;
block = static_cast<row_merge_block_t*>(
os_mem_alloc_large(&block_size));
- /* Initialize all the merge file descriptors, so that we
- don't call row_merge_file_destroy() on uninitialized
- merge file descriptor */
-
- for (i = 0; i < n_indexes; i++) {
- merge_files[i].fd = -1;
+ if (block == NULL) {
+ return(DB_OUT_OF_MEMORY);
}
- for (i = 0; i < n_indexes; i++) {
+ trx_start_if_not_started_xa(trx);
- if (row_merge_file_create(&merge_files[i]) < 0)
- {
+ merge_files = static_cast<merge_file_t*>(
+ mem_alloc(n_indexes * sizeof *merge_files));
+
+ for (i = 0; i < n_indexes; i++) {
+ if (row_merge_file_create(&merge_files[i]) < 0) {
error = DB_OUT_OF_MEMORY;
goto func_exit;
}
@@ -3031,19 +3515,24 @@ row_merge_build_indexes(
we need to build a "fts sort index" indexing
on above three 'fields' */
fts_sort_idx = row_merge_create_fts_sort_index(
- indexes[i], old_table,
- &opt_doc_id_size);
-
- row_fts_psort_info_init(trx, table, new_table,
- fts_sort_idx, opt_doc_id_size,
- &psort_info, &merge_info);
+ indexes[i], old_table, &opt_doc_id_size);
+
+ row_merge_dup_t* dup = static_cast<row_merge_dup_t*>(
+ ut_malloc(sizeof *dup));
+ dup->index = fts_sort_idx;
+ dup->table = table;
+ dup->col_map = col_map;
+ dup->n_dup = 0;
+
+ row_fts_psort_info_init(
+ trx, dup, new_table, opt_doc_id_size,
+ &psort_info, &merge_info);
}
}
tmpfd = row_merge_file_create_low();
- if (tmpfd < 0)
- {
+ if (tmpfd < 0) {
error = DB_OUT_OF_MEMORY;
goto func_exit;
}
@@ -3056,31 +3545,61 @@ row_merge_build_indexes(
secondary index entries for merge sort */
error = row_merge_read_clustered_index(
- trx, table, old_table, new_table, indexes,
- fts_sort_idx, psort_info, merge_files, n_indexes, block);
+ trx, table, old_table, new_table, online, indexes,
+ fts_sort_idx, psort_info, merge_files, key_numbers,
+ n_indexes, add_cols, col_map,
+ add_autoinc, sequence, block);
if (error != DB_SUCCESS) {
goto func_exit;
}
+ DEBUG_SYNC_C("row_merge_after_scan");
+
/* Now we have files containing index entries ready for
sorting and inserting. */
for (i = 0; i < n_indexes; i++) {
- dict_index_t* sort_idx;
-
- sort_idx = (indexes[i]->type & DICT_FTS)
- ? fts_sort_idx
- : indexes[i];
+ dict_index_t* sort_idx = indexes[i];
if (indexes[i]->type & DICT_FTS) {
os_event_t fts_parallel_merge_event;
+ bool all_exit = false;
+ ulint trial_count = 0;
+
+ sort_idx = fts_sort_idx;
+
+ /* Now all children should complete, wait
+ a bit until they all finish using event */
+ while (!all_exit && trial_count < 10000) {
+ all_exit = true;
+
+ for (j = 0; j < fts_sort_pll_degree;
+ j++) {
+ if (psort_info[j].child_status
+ != FTS_CHILD_EXITING) {
+ all_exit = false;
+ os_thread_sleep(1000);
+ break;
+ }
+ }
+ trial_count++;
+ }
+
+ if (!all_exit) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Not all child sort threads exited"
+ " when creating FTS index '%s'",
+ indexes[i]->name);
+ }
fts_parallel_merge_event
- = merge_info[0].psort_common->sort_event;
+ = merge_info[0].psort_common->merge_event;
if (FTS_PLL_MERGE) {
+ trial_count = 0;
+ all_exit = false;
os_event_reset(fts_parallel_merge_event);
row_fts_start_parallel_merge(merge_info);
wait_again:
@@ -3090,33 +3609,64 @@ wait_again:
for (j = 0; j < FTS_NUM_AUX_INDEX; j++) {
if (merge_info[j].child_status
- != FTS_CHILD_COMPLETE) {
+ != FTS_CHILD_COMPLETE
+ && merge_info[j].child_status
+ != FTS_CHILD_EXITING) {
sig_count = os_event_reset(
fts_parallel_merge_event);
goto wait_again;
}
}
+
+ /* Now all children should complete, wait
+ a bit until they all finish using event */
+ while (!all_exit && trial_count < 10000) {
+ all_exit = true;
+
+ for (j = 0; j < FTS_NUM_AUX_INDEX;
+ j++) {
+ if (merge_info[j].child_status
+ != FTS_CHILD_EXITING) {
+ all_exit = false;
+ os_thread_sleep(1000);
+ break;
+ }
+ }
+ trial_count++;
+ }
+
+ if (!all_exit) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Not all child merge threads"
+ " exited when creating FTS"
+ " index '%s'",
+ indexes[i]->name);
+ }
} else {
+ /* This cannot report duplicates; an
+ assertion would fail in that case. */
error = row_fts_merge_insert(
sort_idx, new_table,
psort_info, 0);
}
+#ifdef FTS_INTERNAL_DIAG_PRINT
+ DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Insert\n");
+#endif
} else {
- error = row_merge_sort(trx, sort_idx, &merge_files[i],
- block, &tmpfd, table);
+ row_merge_dup_t dup = {
+ sort_idx, table, col_map, 0};
+
+ error = row_merge_sort(
+ trx, &dup, &merge_files[i],
+ block, &tmpfd);
if (error == DB_SUCCESS) {
error = row_merge_insert_index_tuples(
- trx, sort_idx, new_table,
- dict_table_zip_size(old_table),
+ trx->id, sort_idx, old_table,
merge_files[i].fd, block);
}
-
-#ifdef FTS_INTERNAL_DIAG_PRINT
- DEBUG_FTS_SORT_PRINT("FTS_SORT: Complete Insert\n");
-#endif
}
/* Close the temporary file to free up space. */
@@ -3124,10 +3674,20 @@ wait_again:
if (indexes[i]->type & DICT_FTS) {
row_fts_psort_info_destroy(psort_info, merge_info);
+ } else if (error != DB_SUCCESS || !online) {
+ /* Do not apply any online log. */
+ } else if (old_table != new_table) {
+ ut_ad(!sort_idx->online_log);
+ ut_ad(sort_idx->online_status
+ == ONLINE_INDEX_COMPLETE);
+ } else {
+ DEBUG_SYNC_C("row_log_apply_before");
+ error = row_log_apply(trx, sort_idx, table);
+ DEBUG_SYNC_C("row_log_apply_after");
}
if (error != DB_SUCCESS) {
- trx->error_key_num = i;
+ trx->error_key_num = key_numbers[i];
goto func_exit;
}
@@ -3148,7 +3708,7 @@ func_exit:
DBUG_EXECUTE_IF(
"ib_build_indexes_too_many_concurrent_trxs",
error = DB_TOO_MANY_CONCURRENT_TRXS;
- trx->error_state = static_cast<db_err>(error););
+ trx->error_state = error;);
row_merge_file_destroy_low(tmpfd);
@@ -3163,5 +3723,45 @@ func_exit:
mem_free(merge_files);
os_mem_free_large(block, block_size);
+ DICT_TF2_FLAG_UNSET(new_table, DICT_TF2_FTS_ADD_DOC_ID);
+
+ if (online && old_table == new_table && error != DB_SUCCESS) {
+ /* On error, flag all online secondary index creation
+ as aborted. */
+ for (i = 0; i < n_indexes; i++) {
+ ut_ad(!(indexes[i]->type & DICT_FTS));
+ ut_ad(*indexes[i]->name == TEMP_INDEX_PREFIX);
+ ut_ad(!dict_index_is_clust(indexes[i]));
+
+ /* Completed indexes should be dropped as
+ well, and indexes whose creation was aborted
+ should be dropped from the persistent
+ storage. However, at this point we can only
+ set some flags in the not-yet-published
+ indexes. These indexes will be dropped later
+ in row_merge_drop_indexes(), called by
+ rollback_inplace_alter_table(). */
+
+ switch (dict_index_get_online_status(indexes[i])) {
+ case ONLINE_INDEX_COMPLETE:
+ break;
+ case ONLINE_INDEX_CREATION:
+ rw_lock_x_lock(
+ dict_index_get_lock(indexes[i]));
+ row_log_abort_sec(indexes[i]);
+ indexes[i]->type |= DICT_CORRUPT;
+ rw_lock_x_unlock(
+ dict_index_get_lock(indexes[i]));
+ new_table->drop_aborted = TRUE;
+ /* fall through */
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ case ONLINE_INDEX_ABORTED:
+ MONITOR_MUTEX_INC(
+ &dict_sys->mutex,
+ MONITOR_BACKGROUND_DROP_INDEX);
+ }
+ }
+ }
+
return(error);
}
diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc
index 7a07833fa16..f46d202eed8 100644
--- a/storage/innobase/row/row0mysql.cc
+++ b/storage/innobase/row/row0mysql.cc
@@ -30,6 +30,9 @@ Created 9/17/2000 Heikki Tuuri
#include "row0mysql.ic"
#endif
+#include <debug_sync.h>
+#include <my_dbug.h>
+
#include "row0ins.h"
#include "row0merge.h"
#include "row0sel.h"
@@ -42,6 +45,7 @@ Created 9/17/2000 Heikki Tuuri
#include "dict0load.h"
#include "dict0boot.h"
#include "dict0stats.h"
+#include "dict0stats_bg.h"
#include "trx0roll.h"
#include "trx0purge.h"
#include "trx0rec.h"
@@ -54,16 +58,16 @@ Created 9/17/2000 Heikki Tuuri
#include "ibuf0ibuf.h"
#include "fts0fts.h"
#include "fts0types.h"
-#include "srv0mon.h"
+#include "srv0start.h"
+#include "row0import.h"
+#include "m_string.h"
+#include "my_sys.h"
/** Provide optional 4.x backwards compatibility for 5.0 and above */
UNIV_INTERN ibool row_rollback_on_timeout = FALSE;
/** Chain node of the list of tables to drop in the background. */
-typedef struct row_mysql_drop_struct row_mysql_drop_t;
-
-/** Chain node of the list of tables to drop in the background. */
-struct row_mysql_drop_struct{
+struct row_mysql_drop_t{
char* table_name; /*!< table name */
UT_LIST_NODE_T(row_mysql_drop_t)row_mysql_drop_list;
/*!< list chain node */
@@ -82,7 +86,7 @@ more. Protected by row_drop_list_mutex. */
static UT_LIST_BASE_NODE_T(row_mysql_drop_t) row_mysql_drop_list;
/** Mutex protecting the background table drop list. */
-static mutex_t row_drop_list_mutex;
+static ib_mutex_t row_drop_list_mutex;
/** Flag: has row_mysql_drop_list been initialized? */
static ibool row_mysql_drop_list_inited = FALSE;
@@ -570,21 +574,21 @@ next_column:
/****************************************************************//**
Handles user errors and lock waits detected by the database engine.
-@return TRUE if it was a lock wait and we should continue running the
+@return true if it was a lock wait and we should continue running the
query thread and in that case the thr is ALREADY in the running state. */
UNIV_INTERN
-ibool
+bool
row_mysql_handle_errors(
/*====================*/
- ulint* new_err,/*!< out: possible new error encountered in
+ dberr_t* new_err,/*!< out: possible new error encountered in
lock wait, or if no new error, the value
of trx->error_state at the entry of this
function */
trx_t* trx, /*!< in: transaction */
- que_thr_t* thr, /*!< in: query thread */
- trx_savept_t* savept) /*!< in: savepoint or NULL */
+ que_thr_t* thr, /*!< in: query thread, or NULL */
+ trx_savept_t* savept) /*!< in: savepoint, or NULL */
{
- ulint err;
+ dberr_t err;
handle_new_error:
err = trx->error_state;
@@ -612,6 +616,7 @@ handle_new_error:
case DB_READ_ONLY:
case DB_FTS_INVALID_DOCID:
case DB_INTERRUPTED:
+ case DB_DICT_CHANGED:
if (savept) {
/* Roll back the latest, possibly incomplete
insertion or update */
@@ -631,7 +636,7 @@ handle_new_error:
*new_err = err;
- return(TRUE);
+ return(true);
case DB_DEADLOCK:
case DB_LOCK_TABLE_FULL:
@@ -648,6 +653,7 @@ handle_new_error:
" a new data file to\n"
"InnoDB: my.cnf and restart the database.\n", stderr);
+ ut_ad(0);
exit(1);
case DB_CORRUPTION:
@@ -686,7 +692,7 @@ handle_new_error:
trx->error_state = DB_SUCCESS;
- return(FALSE);
+ return(false);
}
/********************************************************************//**
@@ -774,7 +780,7 @@ row_create_prebuilt(
prebuilt->clust_ref = ref;
- prebuilt->autoinc_error = 0;
+ prebuilt->autoinc_error = DB_SUCCESS;
prebuilt->autoinc_offset = 0;
/* Default to 1, we will set the actual value later in
@@ -883,7 +889,7 @@ row_prebuilt_free(
mem_free(base);
}
- dict_table_close(prebuilt->table, dict_locked);
+ dict_table_close(prebuilt->table, dict_locked, TRUE);
mem_heap_free(prebuilt->heap);
}
@@ -950,44 +956,62 @@ row_get_prebuilt_insert_row(
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in MySQL
handle */
{
- ins_node_t* node;
- dtuple_t* row;
- dict_table_t* table = prebuilt->table;
+ dict_table_t* table = prebuilt->table;
ut_ad(prebuilt && table && prebuilt->trx);
- if (prebuilt->ins_node == NULL) {
-
- /* Not called before for this handle: create an insert node
- and query graph to the prebuilt struct */
+ if (prebuilt->ins_node != 0) {
- node = ins_node_create(INS_DIRECT, table, prebuilt->heap);
+ /* Check if indexes have been dropped or added and we
+ may need to rebuild the row insert template. */
- prebuilt->ins_node = node;
+ if (prebuilt->trx_id == table->def_trx_id
+ && UT_LIST_GET_LEN(prebuilt->ins_node->entry_list)
+ == UT_LIST_GET_LEN(table->indexes)) {
- if (prebuilt->ins_upd_rec_buff == NULL) {
- prebuilt->ins_upd_rec_buff = static_cast<byte*>(
- mem_heap_alloc(
- prebuilt->heap,
- prebuilt->mysql_row_len));
+ return(prebuilt->ins_node->row);
}
- row = dtuple_create(prebuilt->heap,
- dict_table_get_n_cols(table));
+ ut_ad(prebuilt->trx_id < table->def_trx_id);
- dict_table_copy_types(row, table);
+ que_graph_free_recursive(prebuilt->ins_graph);
- ins_node_set_new_row(node, row);
+ prebuilt->ins_graph = 0;
+ }
- prebuilt->ins_graph = static_cast<que_fork_t*>(
- que_node_get_parent(
- pars_complete_graph_for_exec(
- node,
- prebuilt->trx, prebuilt->heap)));
+ /* Create an insert node and query graph to the prebuilt struct */
- prebuilt->ins_graph->state = QUE_FORK_ACTIVE;
+ ins_node_t* node;
+
+ node = ins_node_create(INS_DIRECT, table, prebuilt->heap);
+
+ prebuilt->ins_node = node;
+
+ if (prebuilt->ins_upd_rec_buff == 0) {
+ prebuilt->ins_upd_rec_buff = static_cast<byte*>(
+ mem_heap_alloc(
+ prebuilt->heap,
+ prebuilt->mysql_row_len));
}
+ dtuple_t* row;
+
+ row = dtuple_create(prebuilt->heap, dict_table_get_n_cols(table));
+
+ dict_table_copy_types(row, table);
+
+ ins_node_set_new_row(node, row);
+
+ prebuilt->ins_graph = static_cast<que_fork_t*>(
+ que_node_get_parent(
+ pars_complete_graph_for_exec(
+ node,
+ prebuilt->trx, prebuilt->heap)));
+
+ prebuilt->ins_graph->state = QUE_FORK_ACTIVE;
+
+ prebuilt->trx_id = table->def_trx_id;
+
return(prebuilt->ins_node->row);
}
@@ -1000,23 +1024,41 @@ row_update_statistics_if_needed(
/*============================*/
dict_table_t* table) /*!< in: table */
{
- ulint counter;
+ ib_uint64_t counter;
+ ib_uint64_t n_rows;
+
+ if (!table->stat_initialized) {
+ DBUG_EXECUTE_IF(
+ "test_upd_stats_if_needed_not_inited",
+ fprintf(stderr, "test_upd_stats_if_needed_not_inited "
+ "was executed\n");
+ );
+ return;
+ }
- counter = table->stat_modified_counter;
+ counter = table->stat_modified_counter++;
+ n_rows = dict_table_get_n_rows(table);
- table->stat_modified_counter = counter + 1;
+ if (dict_stats_is_persistent_enabled(table)) {
+ if (counter > n_rows / 10 /* 10% */
+ && dict_stats_auto_recalc_is_enabled(table)) {
+
+ dict_stats_recalc_pool_add(table);
+ table->stat_modified_counter = 0;
+ }
+ return;
+ }
/* Calculate new statistics if 1 / 16 of table has been modified
- since the last time a statistics batch was run, or if
- stat_modified_counter > 2 000 000 000 (to avoid wrap-around).
+ since the last time a statistics batch was run.
We calculate statistics at most every 16th round, since we may have
a counter table which is very small and updated very often. */
- if (counter > 2000000000
- || ((ib_int64_t) counter > 16 + table->stat_n_rows / 16)) {
+ if (counter > 16 + n_rows / 16 /* 6.25% */) {
ut_ad(!mutex_own(&dict_sys->mutex));
- dict_stats_update(table, DICT_STATS_FETCH, FALSE);
+ /* this will reset table->stat_modified_counter to 0 */
+ dict_stats_update(table, DICT_STATS_RECALC_TRANSIENT);
}
}
@@ -1028,7 +1070,7 @@ It is not compatible with another AUTO_INC or exclusive lock on the
table.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_lock_table_autoinc_for_mysql(
/*=============================*/
row_prebuilt_t* prebuilt) /*!< in: prebuilt struct in the MySQL
@@ -1038,7 +1080,7 @@ row_lock_table_autoinc_for_mysql(
ins_node_t* node = prebuilt->ins_node;
const dict_table_t* table = prebuilt->table;
que_thr_t* thr;
- ulint err;
+ dberr_t err;
ibool was_lock_wait;
ut_ad(trx);
@@ -1053,10 +1095,8 @@ row_lock_table_autoinc_for_mysql(
trx->op_info = "setting auto-inc lock";
- if (node == NULL) {
- row_get_prebuilt_insert_row(prebuilt);
- node = prebuilt->ins_node;
- }
+ row_get_prebuilt_insert_row(prebuilt);
+ node = prebuilt->ins_node;
/* We use the insert query graph as the dummy graph needed
in the lock module call */
@@ -1076,7 +1116,7 @@ run_again:
err = lock_table(0, prebuilt->table, LOCK_AUTO_INC, thr);
- trx->error_state = static_cast<enum db_err>(err);
+ trx->error_state = err;
if (err != DB_SUCCESS) {
que_thr_stop_for_mysql(thr);
@@ -1089,21 +1129,21 @@ run_again:
trx->op_info = "";
- return((int) err);
+ return(err);
}
que_thr_stop_for_mysql_no_error(thr, trx);
trx->op_info = "";
- return((int) err);
+ return(err);
}
/*********************************************************************//**
Sets a table lock on the table mentioned in prebuilt.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_lock_table_for_mysql(
/*=====================*/
row_prebuilt_t* prebuilt, /*!< in: prebuilt struct in the MySQL
@@ -1117,7 +1157,7 @@ row_lock_table_for_mysql(
{
trx_t* trx = prebuilt->trx;
que_thr_t* thr;
- ulint err;
+ dberr_t err;
ibool was_lock_wait;
ut_ad(trx);
@@ -1157,7 +1197,7 @@ run_again:
thr);
}
- trx->error_state = static_cast<enum db_err>(err);
+ trx->error_state = err;
if (err != DB_SUCCESS) {
que_thr_stop_for_mysql(thr);
@@ -1170,21 +1210,21 @@ run_again:
trx->op_info = "";
- return((int) err);
+ return(err);
}
que_thr_stop_for_mysql_no_error(thr, trx);
trx->op_info = "";
- return((int) err);
+ return(err);
}
/*********************************************************************//**
Does an insert for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_insert_for_mysql(
/*=================*/
byte* mysql_rec, /*!< in: row in the MySQL format */
@@ -1193,7 +1233,7 @@ row_insert_for_mysql(
{
trx_savept_t savept;
que_thr_t* thr;
- ulint err;
+ dberr_t err;
ibool was_lock_wait;
trx_t* trx = prebuilt->trx;
ins_node_t* node = prebuilt->ins_node;
@@ -1201,24 +1241,23 @@ row_insert_for_mysql(
ut_ad(trx);
- if (table->ibd_file_missing) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error:\n"
- "InnoDB: MySQL is trying to use a table handle"
- " but the .ibd file for\n"
- "InnoDB: table %s does not exist.\n"
- "InnoDB: Have you deleted the .ibd file"
- " from the database directory under\n"
- "InnoDB: the MySQL datadir, or have you"
- " used DISCARD TABLESPACE?\n"
- "InnoDB: Look from\n"
- "InnoDB: " REFMAN "innodb-troubleshooting.html\n"
- "InnoDB: how you can resolve the problem.\n",
+ if (dict_table_is_discarded(prebuilt->table)) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "The table %s doesn't have a corresponding "
+ "tablespace, it was discarded.",
prebuilt->table->name);
- return(DB_ERROR);
- }
- if (UNIV_UNLIKELY(prebuilt->magic_n != ROW_PREBUILT_ALLOCATED)) {
+ return(DB_TABLESPACE_DELETED);
+
+ } else if (prebuilt->table->ibd_file_missing) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ ".ibd file is missing for table %s",
+ prebuilt->table->name);
+
+ return(DB_TABLESPACE_NOT_FOUND);
+
+ } else if (prebuilt->magic_n != ROW_PREBUILT_ALLOCATED) {
fprintf(stderr,
"InnoDB: Error: trying to free a corrupt\n"
"InnoDB: table handle. Magic n %lu, table name ",
@@ -1229,9 +1268,7 @@ row_insert_for_mysql(
mem_analyze_corruption(prebuilt);
ut_error;
- }
-
- if (UNIV_UNLIKELY(srv_created_new_raw || srv_force_recovery)) {
+ } else if (srv_created_new_raw || srv_force_recovery) {
fputs("InnoDB: A new raw disk partition was initialized or\n"
"InnoDB: innodb_force_recovery is on: we do not allow\n"
"InnoDB: database modifications by the user. Shut down\n"
@@ -1249,10 +1286,8 @@ row_insert_for_mysql(
trx_start_if_not_started_xa(trx);
- if (node == NULL) {
- row_get_prebuilt_insert_row(prebuilt);
- node = prebuilt->ins_node;
- }
+ row_get_prebuilt_insert_row(prebuilt);
+ node = prebuilt->ins_node;
row_mysql_convert_row_to_innobase(node->row, prebuilt, mysql_rec);
@@ -1290,12 +1325,14 @@ error_exit:
thr->lock_state = QUE_THR_LOCK_NOLOCK;
if (was_lock_wait) {
+ ut_ad(node->state == INS_NODE_INSERT_ENTRIES
+ || node->state == INS_NODE_ALLOC_ROW_ID);
goto run_again;
}
trx->op_info = "";
- return((int) err);
+ return(err);
}
if (dict_table_has_fts_index(table)) {
@@ -1353,19 +1390,18 @@ error_exit:
que_thr_stop_for_mysql_no_error(thr, trx);
- table->stat_n_rows++;
+ srv_stats.n_rows_inserted.add((size_t)trx->id, 1);
- srv_n_rows_inserted++;
-
- if (prebuilt->table->stat_n_rows == 0) {
- /* Avoid wrap-over */
- table->stat_n_rows--;
- }
+ /* Not protected by dict_table_stats_lock() for performance
+ reasons, we would rather get garbage in stat_n_rows (which is
+ just an estimate anyway) than protecting the following code
+ with a latch. */
+ dict_table_n_rows_inc(table);
row_update_statistics_if_needed(table);
trx->op_info = "";
- return((int) err);
+ return(err);
}
/*********************************************************************//**
@@ -1490,7 +1526,7 @@ row_fts_do_update(
Handles FTS matters for an update or a delete.
NOTE: should not be called if the table does not have an FTS index. .*/
static
-ulint
+dberr_t
row_fts_update_or_delete(
/*=====================*/
row_prebuilt_t* prebuilt) /* in: prebuilt struct in MySQL
@@ -1530,16 +1566,18 @@ void
init_fts_doc_id_for_ref(
/*====================*/
dict_table_t* table, /*!< in: table */
- ulint depth) /*!< in: recusive call depth */
+ ulint* depth) /*!< in: recusive call depth */
{
dict_foreign_t* foreign;
foreign = UT_LIST_GET_FIRST(table->referenced_list);
- depth++;
+ table->fk_max_recusive_level = 0;
+
+ (*depth)++;
/* Limit on tables involved in cascading delete/update */
- if (depth > FK_MAX_CASCADE_DEL) {
+ if (*depth > FK_MAX_CASCADE_DEL) {
return;
}
@@ -1563,7 +1601,7 @@ init_fts_doc_id_for_ref(
Does an update or delete of a row for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_update_for_mysql(
/*=================*/
byte* mysql_rec, /*!< in: the row to be updated, in
@@ -1572,7 +1610,7 @@ row_update_for_mysql(
handle */
{
trx_savept_t savept;
- ulint err;
+ dberr_t err;
que_thr_t* thr;
ibool was_lock_wait;
dict_index_t* clust_index;
@@ -1580,6 +1618,7 @@ row_update_for_mysql(
upd_node_t* node;
dict_table_t* table = prebuilt->table;
trx_t* trx = prebuilt->trx;
+ ulint fk_depth = 0;
ut_ad(prebuilt && trx);
UT_NOT_USED(mysql_rec);
@@ -1626,14 +1665,26 @@ row_update_for_mysql(
return(DB_ERROR);
}
+ DEBUG_SYNC_C("innodb_row_update_for_mysql_begin");
+
trx->op_info = "updating or deleting";
row_mysql_delay_if_needed();
- init_fts_doc_id_for_ref(table, 0);
-
trx_start_if_not_started_xa(trx);
+ if (dict_table_is_referenced_by_foreign_key(table)) {
+ /* Share lock the data dictionary to prevent any
+ table dictionary (for foreign constraint) change.
+ This is similar to row_ins_check_foreign_constraint
+ check protect by the dictionary lock as well.
+ In the future, this can be removed once the Foreign
+ key MDL is implemented */
+ row_mysql_freeze_data_dictionary(trx);
+ init_fts_doc_id_for_ref(table, &fk_depth);
+ row_mysql_unfreeze_data_dictionary(trx);
+ }
+
node = prebuilt->upd_node;
clust_index = dict_table_get_first_index(table);
@@ -1683,10 +1734,13 @@ run_again:
trx->error_state = DB_SUCCESS;
trx->op_info = "";
- return((int) err);
+ return(err);
}
thr->lock_state= QUE_THR_LOCK_ROW;
+
+ DEBUG_SYNC(trx->mysql_thd, "row_update_for_mysql_error");
+
was_lock_wait = row_mysql_handle_errors(&err, trx, thr,
&savept);
thr->lock_state= QUE_THR_LOCK_NOLOCK;
@@ -1697,7 +1751,7 @@ run_again:
trx->op_info = "";
- return((int) err);
+ return(err);
}
que_thr_stop_for_mysql_no_error(thr, trx);
@@ -1707,18 +1761,20 @@ run_again:
err = row_fts_update_or_delete(prebuilt);
if (err != DB_SUCCESS) {
trx->op_info = "";
- return((int) err);
+ return(err);
}
}
if (node->is_delete) {
- if (prebuilt->table->stat_n_rows > 0) {
- prebuilt->table->stat_n_rows--;
- }
+ /* Not protected by dict_table_stats_lock() for performance
+ reasons, we would rather get garbage in stat_n_rows (which is
+ just an estimate anyway) than protecting the following code
+ with a latch. */
+ dict_table_n_rows_dec(prebuilt->table);
- srv_n_rows_deleted++;
+ srv_stats.n_rows_deleted.add((size_t)trx->id, 1);
} else {
- srv_n_rows_updated++;
+ srv_stats.n_rows_updated.add((size_t)trx->id, 1);
}
/* We update table statistics only if it is a DELETE or UPDATE
@@ -1730,7 +1786,7 @@ run_again:
trx->op_info = "";
- return((int) err);
+ return(err);
}
/*********************************************************************//**
@@ -1744,7 +1800,7 @@ prebuilt->clust_pcur. Thus, this implements a 'mini-rollback' that
releases the latest clustered index record lock we set.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+void
row_unlock_for_mysql(
/*=================*/
row_prebuilt_t* prebuilt, /*!< in/out: prebuilt struct in MySQL
@@ -1770,8 +1826,7 @@ row_unlock_for_mysql(
"InnoDB: innodb_locks_unsafe_for_binlog is FALSE and\n"
"InnoDB: this session is not using"
" READ COMMITTED isolation level.\n");
-
- return(DB_SUCCESS);
+ return;
}
trx->op_info = "unlock_row";
@@ -1863,15 +1918,13 @@ no_unlock:
}
trx->op_info = "";
-
- return(DB_SUCCESS);
}
/**********************************************************************//**
Does a cascaded delete or set null in a foreign key operation.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_update_cascade_for_mysql(
/*=========================*/
que_thr_t* thr, /*!< in: query thread */
@@ -1879,7 +1932,7 @@ row_update_cascade_for_mysql(
or set null operation */
dict_table_t* table) /*!< in: table where we do the operation */
{
- ulint err;
+ dberr_t err;
trx_t* trx;
trx = thr_get_trx(thr);
@@ -1890,12 +1943,14 @@ row_update_cascade_for_mysql(
thr->fk_cascade_depth++;
if (thr->fk_cascade_depth > FK_MAX_CASCADE_DEL) {
- return (DB_FOREIGN_EXCEED_MAX_CASCADE);
+ return(DB_FOREIGN_EXCEED_MAX_CASCADE);
}
run_again:
thr->run_node = node;
thr->prev_node = node;
+ DEBUG_SYNC_C("foreign_constraint_update_cascade");
+
row_upd_step(thr);
/* The recursive call for cascading update/delete happens
@@ -1937,13 +1992,15 @@ run_again:
}
if (node->is_delete) {
- if (table->stat_n_rows > 0) {
- table->stat_n_rows--;
- }
+ /* Not protected by dict_table_stats_lock() for performance
+ reasons, we would rather get garbage in stat_n_rows (which is
+ just an estimate anyway) than protecting the following code
+ with a latch. */
+ dict_table_n_rows_dec(table);
- srv_n_rows_deleted++;
+ srv_stats.n_rows_deleted.add((size_t)trx->id, 1);
} else {
- srv_n_rows_updated++;
+ srv_stats.n_rows_updated.add((size_t)trx->id, 1);
}
row_update_statistics_if_needed(table);
@@ -1981,7 +2038,7 @@ row_mysql_freeze_data_dictionary_func(
{
ut_a(trx->dict_operation_lock_mode == 0);
- rw_lock_s_lock_func(&dict_operation_lock, 0, file, line);
+ rw_lock_s_lock_inline(&dict_operation_lock, 0, file, line);
trx->dict_operation_lock_mode = RW_S_LATCH;
}
@@ -1994,6 +2051,8 @@ row_mysql_unfreeze_data_dictionary(
/*===============================*/
trx_t* trx) /*!< in/out: transaction */
{
+ ut_ad(lock_trx_has_sys_table_locks(trx) == NULL);
+
ut_a(trx->dict_operation_lock_mode == RW_S_LATCH);
rw_lock_s_unlock(&dict_operation_lock);
@@ -2018,7 +2077,7 @@ row_mysql_lock_data_dictionary_func(
/* Serialize data dictionary operations with dictionary mutex:
no deadlocks or lock waits can occur then in these operations */
- rw_lock_x_lock_func(&dict_operation_lock, 0, file, line);
+ rw_lock_x_lock_inline(&dict_operation_lock, 0, file, line);
trx->dict_operation_lock_mode = RW_X_LATCH;
mutex_enter(&(dict_sys->mutex));
@@ -2032,6 +2091,8 @@ row_mysql_unlock_data_dictionary(
/*=============================*/
trx_t* trx) /*!< in/out: transaction */
{
+ ut_ad(lock_trx_has_sys_table_locks(trx) == NULL);
+
ut_a(trx->dict_operation_lock_mode == RW_X_LATCH);
/* Serialize data dictionary operations with dictionary mutex:
@@ -2052,19 +2113,21 @@ InnoDB will try to invoke mem_validate(). On failure the transaction will
be rolled back and the 'table' object will be freed.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_create_table_for_mysql(
/*=======================*/
dict_table_t* table, /*!< in, own: table definition
- (will be freed) */
- trx_t* trx) /*!< in: transaction handle */
+ (will be freed, or on DB_SUCCESS
+ added to the data dictionary cache) */
+ trx_t* trx, /*!< in/out: transaction */
+ bool commit) /*!< in: if true, commit the transaction */
{
tab_node_t* node;
mem_heap_t* heap;
que_thr_t* thr;
const char* table_name;
ulint table_name_len;
- ulint err;
+ dberr_t err;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
@@ -2072,6 +2135,11 @@ row_create_table_for_mysql(
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH);
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_at_start_of_row_create_table_for_mysql",
+ goto err_exit;
+ );
+
if (srv_created_new_raw) {
fputs("InnoDB: A new raw disk partition was initialized:\n"
"InnoDB: we do not allow database modifications"
@@ -2080,7 +2148,10 @@ row_create_table_for_mysql(
" is replaced with raw.\n", stderr);
err_exit:
dict_mem_table_free(table);
- trx_commit_for_mysql(trx);
+
+ if (commit) {
+ trx_commit_for_mysql(trx);
+ }
return(DB_ERROR);
}
@@ -2117,23 +2188,23 @@ err_exit:
/* The lock timeout monitor thread also takes care
of InnoDB monitor prints */
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
} else if (STR_EQ(table_name, table_name_len,
S_innodb_lock_monitor)) {
srv_print_innodb_monitor = TRUE;
srv_print_innodb_lock_monitor = TRUE;
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
} else if (STR_EQ(table_name, table_name_len,
S_innodb_tablespace_monitor)) {
srv_print_innodb_tablespace_monitor = TRUE;
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
} else if (STR_EQ(table_name, table_name_len,
S_innodb_table_monitor)) {
srv_print_innodb_table_monitor = TRUE;
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
#ifdef UNIV_MEM_DEBUG
} else if (STR_EQ(table_name, table_name_len,
S_innodb_mem_validate)) {
@@ -2152,12 +2223,21 @@ err_exit:
#endif /* UNIV_MEM_DEBUG */
}
-
heap = mem_heap_create(512);
- trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+ switch (trx_get_dict_operation(trx)) {
+ case TRX_DICT_OP_NONE:
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+ case TRX_DICT_OP_TABLE:
+ break;
+ case TRX_DICT_OP_INDEX:
+ /* If the transaction was previously flagged as
+ TRX_DICT_OP_INDEX, we should be creating auxiliary
+ tables for full-text indexes. */
+ ut_ad(strstr(table->name, "/FTS_") != NULL);
+ }
- node = tab_create_graph_create(table, heap);
+ node = tab_create_graph_create(table, heap, commit);
thr = pars_complete_graph_for_exec(node, trx, heap);
@@ -2168,6 +2248,29 @@ err_exit:
err = trx->error_state;
+ if (table->space != TRX_SYS_SPACE) {
+ ut_a(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_USE_TABLESPACE));
+
+ /* Update SYS_TABLESPACES and SYS_DATAFILES if a new
+ tablespace was created. */
+ if (err == DB_SUCCESS) {
+ char* path;
+ path = fil_space_get_first_path(table->space);
+
+ err = dict_create_add_tablespace_to_dictionary(
+ table->space, table->name,
+ fil_space_get_flags(table->space),
+ path, trx, commit);
+
+ mem_free(path);
+ }
+
+ if (err != DB_SUCCESS) {
+ /* We must delete the link file. */
+ fil_delete_link_file(table->name);
+ }
+ }
+
switch (err) {
case DB_SUCCESS:
break;
@@ -2181,8 +2284,8 @@ err_exit:
ut_print_name(stderr, trx, TRUE, table->name);
fputs(" because tablespace full\n", stderr);
- if (dict_table_open_on_name_no_stats(
- table->name, FALSE, DICT_ERR_IGNORE_NONE)) {
+ if (dict_table_open_on_name(table->name, TRUE, FALSE,
+ DICT_ERR_IGNORE_NONE)) {
/* Make things easy for the drop table code. */
@@ -2190,10 +2293,13 @@ err_exit:
dict_table_move_from_lru_to_non_lru(table);
}
- dict_table_close(table, FALSE);
+ dict_table_close(table, TRUE, FALSE);
row_drop_table_for_mysql(table->name, trx, FALSE);
- trx_commit_for_mysql(trx);
+
+ if (commit) {
+ trx_commit_for_mysql(trx);
+ }
} else {
dict_mem_table_free(table);
}
@@ -2203,7 +2309,12 @@ err_exit:
case DB_TOO_MANY_CONCURRENT_TRXS:
/* We already have .ibd file here. it should be deleted. */
- if (table->space && !fil_delete_tablespace(table->space)) {
+ if (table->space
+ && fil_delete_tablespace(
+ table->space,
+ BUF_REMOVE_FLUSH_NO_WRITE)
+ != DB_SUCCESS) {
+
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: not able to"
@@ -2215,10 +2326,8 @@ err_exit:
/* fall through */
case DB_DUPLICATE_KEY:
+ case DB_TABLESPACE_EXISTS:
default:
- /* We may also get err == DB_ERROR if the .ibd file for the
- table already exists */
-
trx->error_state = DB_SUCCESS;
trx_rollback_to_savepoint(trx, NULL);
dict_mem_table_free(table);
@@ -2229,7 +2338,7 @@ err_exit:
trx->op_info = "";
- return((int) err);
+ return(err);
}
/*********************************************************************//**
@@ -2238,7 +2347,7 @@ to create an index results in dropping the whole table! This is no problem
currently as all indexes must be created at the same time as the table.
@return error number or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_create_index_for_mysql(
/*=======================*/
dict_index_t* index, /*!< in, own: index definition
@@ -2254,13 +2363,13 @@ row_create_index_for_mysql(
ind_node_t* node;
mem_heap_t* heap;
que_thr_t* thr;
- ulint err;
+ dberr_t err;
ulint i;
ulint len;
char* table_name;
char* index_name;
dict_table_t* table;
- ibool is_fts = FALSE;
+ ibool is_fts;
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
@@ -2277,8 +2386,8 @@ row_create_index_for_mysql(
is_fts = (index->type == DICT_FTS);
- table = dict_table_open_on_name_no_stats(table_name, TRUE,
- DICT_ERR_IGNORE_NONE);
+ table = dict_table_open_on_name(table_name, TRUE, TRUE,
+ DICT_ERR_IGNORE_NONE);
trx_start_if_not_started_xa(trx);
@@ -2292,6 +2401,11 @@ row_create_index_for_mysql(
len = ut_max(len, field_lengths[i]);
}
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_at_create_index",
+ len = DICT_MAX_FIELD_LEN_BY_FORMAT(table) + 1;
+ );
+
/* Column or prefix length exceeds maximum column length */
if (len > (ulint) DICT_MAX_FIELD_LEN_BY_FORMAT(table)) {
err = DB_TOO_BIG_INDEX_COL;
@@ -2308,7 +2422,7 @@ row_create_index_for_mysql(
/* Note that the space id where we store the index is inherited from
the table in dict_build_index_def_step() in dict0crea.cc. */
- node = ind_create_graph_create(index, heap);
+ node = ind_create_graph_create(index, heap, true);
thr = pars_complete_graph_for_exec(node, trx, heap);
@@ -2332,7 +2446,7 @@ row_create_index_for_mysql(
}
error_handling:
- dict_table_close(table, TRUE);
+ dict_table_close(table, TRUE, FALSE);
if (err != DB_SUCCESS) {
/* We have special error handling here */
@@ -2353,7 +2467,7 @@ error_handling:
mem_free(table_name);
mem_free(index_name);
- return((int) err);
+ return(err);
}
/*********************************************************************//**
@@ -2366,7 +2480,7 @@ fields than mentioned in the constraint. Check also that foreign key
constraints which reference this table are ok.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_table_add_foreign_constraints(
/*==============================*/
trx_t* trx, /*!< in: transaction */
@@ -2383,7 +2497,7 @@ row_table_add_foreign_constraints(
code DB_CANNOT_ADD_CONSTRAINT if
any foreign keys are found. */
{
- ulint err;
+ dberr_t err;
ut_ad(mutex_own(&(dict_sys->mutex)));
#ifdef UNIV_SYNC_DEBUG
@@ -2399,6 +2513,12 @@ row_table_add_foreign_constraints(
err = dict_create_foreign_constraints(trx, sql_string, sql_length,
name, reject_fks);
+
+ DBUG_EXECUTE_IF("ib_table_add_foreign_fail",
+ err = DB_DUPLICATE_KEY;);
+
+ DEBUG_SYNC_C("table_add_foreign_constraints");
+
if (err == DB_SUCCESS) {
/* Check that also referencing constraints are ok */
err = dict_load_foreigns(name, FALSE, TRUE);
@@ -2418,7 +2538,7 @@ row_table_add_foreign_constraints(
trx->error_state = DB_SUCCESS;
}
- return((int) err);
+ return(err);
}
/*********************************************************************//**
@@ -2430,12 +2550,12 @@ as a background operation, which is taken care of by the master thread
in srv0srv.cc.
@return error code or DB_SUCCESS */
static
-int
+dberr_t
row_drop_table_for_mysql_in_background(
/*===================================*/
const char* name) /*!< in: table name */
{
- ulint error;
+ dberr_t error;
trx_t* trx;
trx = trx_allocate_for_background();
@@ -2464,7 +2584,7 @@ row_drop_table_for_mysql_in_background(
trx_free_for_background(trx);
- return((int) error);
+ return(error);
}
/*********************************************************************//**
@@ -2498,8 +2618,8 @@ loop:
return(n_tables + n_tables_dropped);
}
- table = dict_table_open_on_name_no_stats(drop->table_name, FALSE,
- DICT_ERR_IGNORE_NONE);
+ table = dict_table_open_on_name(drop->table_name, FALSE, FALSE,
+ DICT_ERR_IGNORE_NONE);
if (table == NULL) {
/* If for some reason the table has already been dropped
@@ -2510,7 +2630,7 @@ loop:
ut_a(!table->can_be_evicted);
- dict_table_close(table, FALSE);
+ dict_table_close(table, FALSE, FALSE);
if (DB_SUCCESS != row_drop_table_for_mysql_in_background(
drop->table_name)) {
@@ -2617,356 +2737,429 @@ row_add_table_to_background_drop_list(
}
/*********************************************************************//**
-Discards the tablespace of a table which stored in an .ibd file. Discarding
-means that this function deletes the .ibd file and assigns a new table id for
-the table. Also the flag table->ibd_file_missing is set TRUE.
+Reassigns the table identifier of a table.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
-row_discard_tablespace_for_mysql(
-/*=============================*/
- const char* name, /*!< in: table name */
- trx_t* trx) /*!< in: transaction handle */
+dberr_t
+row_mysql_table_id_reassign(
+/*========================*/
+ dict_table_t* table, /*!< in/out: table */
+ trx_t* trx, /*!< in/out: transaction */
+ table_id_t* new_id) /*!< out: new table id */
{
- dict_foreign_t* foreign;
- table_id_t new_id;
- dict_table_t* table;
- ibool success;
- ulint err;
- pars_info_t* info = NULL;
+ dberr_t err;
+ pars_info_t* info = pars_info_create();
- /* How do we prevent crashes caused by ongoing operations on
- the table? Old operations could try to access non-existent
- pages.
+ dict_hdr_get_new_id(new_id, NULL, NULL);
- 1) SQL queries, INSERT, SELECT, ...: we must get an exclusive
- MySQL table lock on the table before we can do DISCARD
- TABLESPACE. Then there are no running queries on the table.
+ /* Remove all locks except the table-level S and X locks. */
+ lock_remove_all_on_table(table, FALSE);
- 2) Purge and rollback: we assign a new table id for the
- table. Since purge and rollback look for the table based on
- the table id, they see the table as 'dropped' and discard
- their operations.
+ pars_info_add_ull_literal(info, "old_id", table->id);
+ pars_info_add_ull_literal(info, "new_id", *new_id);
+
+ err = que_eval_sql(
+ info,
+ "PROCEDURE RENUMBER_TABLE_PROC () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_TABLES SET ID = :new_id\n"
+ " WHERE ID = :old_id;\n"
+ "UPDATE SYS_COLUMNS SET TABLE_ID = :new_id\n"
+ " WHERE TABLE_ID = :old_id;\n"
+ "UPDATE SYS_INDEXES SET TABLE_ID = :new_id\n"
+ " WHERE TABLE_ID = :old_id;\n"
+ "END;\n", FALSE, trx);
- 3) Insert buffer: we remove all entries for the tablespace in
- the insert buffer tree; as long as the tablespace mem object
- does not exist, ongoing insert buffer page merges are
- discarded in buf0rea.cc. If we recreate the tablespace mem
- object with IMPORT TABLESPACE later, then the tablespace will
- have the same id, but the tablespace_version field in the mem
- object is different, and ongoing old insert buffer page merges
- get discarded.
+ return(err);
+}
- 4) Linear readahead and random readahead: we use the same
- method as in 3) to discard ongoing operations.
+/*********************************************************************//**
+Setup the pre-requisites for DISCARD TABLESPACE. It will start the transaction,
+acquire the data dictionary lock in X mode and open the table.
+@return table instance or 0 if not found. */
+static
+dict_table_t*
+row_discard_tablespace_begin(
+/*=========================*/
+ const char* name, /*!< in: table name */
+ trx_t* trx) /*!< in: transaction handle */
+{
+ trx->op_info = "discarding tablespace";
- 5) FOREIGN KEY operations: if
- table->n_foreign_key_checks_running > 0, we do not allow the
- discard. We also reserve the data dictionary latch. */
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
- trx->op_info = "discarding tablespace";
trx_start_if_not_started_xa(trx);
/* Serialize data dictionary operations with dictionary mutex:
- no deadlocks can occur then in these operations */
+ this is to avoid deadlocks during data dictionary operations */
row_mysql_lock_data_dictionary(trx);
- table = dict_table_open_on_name_no_stats(name, TRUE,
- DICT_ERR_IGNORE_NONE);
-
- if (!table) {
- err = DB_TABLE_NOT_FOUND;
-
- goto funct_exit;
- }
+ dict_table_t* table;
- if (table->space == 0) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: table ", stderr);
- ut_print_name(stderr, trx, TRUE, name);
- fputs("\n"
- "InnoDB: is in the system tablespace 0"
- " which cannot be discarded\n", stderr);
- err = DB_ERROR;
+ table = dict_table_open_on_name(
+ name, TRUE, FALSE, DICT_ERR_IGNORE_NONE);
- goto funct_exit;
+ if (table) {
+ dict_stats_wait_bg_to_stop_using_tables(table, NULL, trx);
+ ut_a(table->space != TRX_SYS_SPACE);
+ ut_a(table->n_foreign_key_checks_running == 0);
}
- if (table->n_foreign_key_checks_running > 0) {
-
- ut_print_timestamp(stderr);
- fputs(" InnoDB: You are trying to DISCARD table ", stderr);
- ut_print_name(stderr, trx, TRUE, table->name);
- fputs("\n"
- "InnoDB: though there is a foreign key check"
- " running on it.\n"
- "InnoDB: Cannot discard the table.\n",
- stderr);
-
- err = DB_ERROR;
+ return(table);
+}
- goto funct_exit;
- }
+/*********************************************************************//**
+Do the foreign key constraint checks.
+@return DB_SUCCESS or error code. */
+static
+dberr_t
+row_discard_tablespace_foreign_key_checks(
+/*======================================*/
+ const trx_t* trx, /*!< in: transaction handle */
+ const dict_table_t* table) /*!< in: table to be discarded */
+{
+ const dict_foreign_t* foreign;
/* Check if the table is referenced by foreign key constraints from
some other table (not the table itself) */
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ foreign && foreign->foreign_table == table;
+ foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
- while (foreign && foreign->foreign_table == table) {
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
- if (foreign && trx->check_foreigns) {
+ if (!srv_read_only_mode && foreign && trx->check_foreigns) {
FILE* ef = dict_foreign_err_file;
/* We only allow discarding a referenced table if
FOREIGN_KEY_CHECKS is set to 0 */
- err = DB_CANNOT_DROP_CONSTRAINT;
-
mutex_enter(&dict_foreign_err_mutex);
+
rewind(ef);
+
ut_print_timestamp(ef);
fputs(" Cannot DISCARD table ", ef);
- ut_print_name(stderr, trx, TRUE, name);
+ ut_print_name(stderr, trx, TRUE, table->name);
fputs("\n"
"because it is referenced by ", ef);
ut_print_name(stderr, trx, TRUE, foreign->foreign_table_name);
putc('\n', ef);
+
mutex_exit(&dict_foreign_err_mutex);
- goto funct_exit;
+ return(DB_CANNOT_DROP_CONSTRAINT);
}
- dict_hdr_get_new_id(&new_id, NULL, NULL);
+ return(DB_SUCCESS);
+}
- /* Remove all locks except the table-level S and X locks. */
- lock_remove_all_on_table(table, FALSE);
+/*********************************************************************//**
+Cleanup after the DISCARD TABLESPACE operation.
+@return error code. */
+static
+dberr_t
+row_discard_tablespace_end(
+/*=======================*/
+ trx_t* trx, /*!< in/out: transaction handle */
+ dict_table_t* table, /*!< in/out: table to be discarded */
+ dberr_t err) /*!< in: error code */
+{
+ if (table != 0) {
+ dict_table_close(table, TRUE, FALSE);
+ }
- info = pars_info_create();
+ DBUG_EXECUTE_IF("ib_discard_before_commit_crash",
+ log_make_checkpoint_at(IB_ULONGLONG_MAX, TRUE);
+ DBUG_SUICIDE(););
- pars_info_add_str_literal(info, "table_name", name);
- pars_info_add_ull_literal(info, "new_id", new_id);
+ trx_commit_for_mysql(trx);
- err = que_eval_sql(info,
- "PROCEDURE DISCARD_TABLESPACE_PROC () IS\n"
- "old_id CHAR;\n"
- "BEGIN\n"
- "SELECT ID INTO old_id\n"
- "FROM SYS_TABLES\n"
- "WHERE NAME = :table_name\n"
- "LOCK IN SHARE MODE;\n"
- "IF (SQL % NOTFOUND) THEN\n"
- " COMMIT WORK;\n"
- " RETURN;\n"
- "END IF;\n"
- "UPDATE SYS_TABLES SET ID = :new_id\n"
- " WHERE ID = old_id;\n"
- "UPDATE SYS_COLUMNS SET TABLE_ID = :new_id\n"
- " WHERE TABLE_ID = old_id;\n"
- "UPDATE SYS_INDEXES SET TABLE_ID = :new_id\n"
- " WHERE TABLE_ID = old_id;\n"
- "COMMIT WORK;\n"
- "END;\n"
- , FALSE, trx);
+ DBUG_EXECUTE_IF("ib_discard_after_commit_crash",
+ log_make_checkpoint_at(IB_ULONGLONG_MAX, TRUE);
+ DBUG_SUICIDE(););
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ trx->op_info = "";
+
+ return(err);
+}
+
+/*********************************************************************//**
+Do the DISCARD TABLESPACE operation.
+@return DB_SUCCESS or error code. */
+static
+dberr_t
+row_discard_tablespace(
+/*===================*/
+ trx_t* trx, /*!< in/out: transaction handle */
+ dict_table_t* table) /*!< in/out: table to be discarded */
+{
+ dberr_t err;
+
+ /* How do we prevent crashes caused by ongoing operations on
+ the table? Old operations could try to access non-existent
+ pages. MySQL will block all DML on the table using MDL and a
+ DISCARD will not start unless all existing operations on the
+ table to be discarded are completed.
+
+ 1) Acquire the data dictionary latch in X mode. To prevent any
+ internal operations that MySQL is not aware off and also for
+ the internal SQL parser.
+
+ 2) Purge and rollback: we assign a new table id for the
+ table. Since purge and rollback look for the table based on
+ the table id, they see the table as 'dropped' and discard
+ their operations.
+
+ 3) Insert buffer: we remove all entries for the tablespace in
+ the insert buffer tree.
+
+ 4) FOREIGN KEY operations: if table->n_foreign_key_checks_running > 0,
+ we do not allow the discard. */
+
+ /* Play safe and remove all insert buffer entries, though we should
+ have removed them already when DISCARD TABLESPACE was called */
+
+ ibuf_delete_for_discarded_space(table->space);
+
+ table_id_t new_id;
+
+ /* Set the TABLESPACE DISCARD flag in the table definition on disk. */
+
+ err = row_import_update_discarded_flag(trx, table->id, true, true);
if (err != DB_SUCCESS) {
- trx->error_state = DB_SUCCESS;
- trx_rollback_to_savepoint(trx, NULL);
- trx->error_state = DB_SUCCESS;
- } else {
- dict_table_change_id_in_cache(table, new_id);
+ return(err);
+ }
- success = fil_discard_tablespace(table->space);
+ /* Update the index root pages in the system tables, on disk */
- if (!success) {
- trx->error_state = DB_SUCCESS;
- trx_rollback_to_savepoint(trx, NULL);
- trx->error_state = DB_SUCCESS;
+ err = row_import_update_index_root(trx, table, true, true);
- err = DB_ERROR;
- } else {
- /* Set the flag which tells that now it is legal to
- IMPORT a tablespace for this table */
- table->tablespace_discarded = TRUE;
- table->ibd_file_missing = TRUE;
- }
+ if (err != DB_SUCCESS) {
+ return(err);
}
-funct_exit:
+ /* Drop all the FTS auxiliary tables. */
+ if (dict_table_has_fts_index(table)
+ || DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) {
- if (table != NULL) {
- dict_table_close(table, TRUE);
+ fts_drop_tables(trx, table);
}
- trx_commit_for_mysql(trx);
+ /* Assign a new space ID to the table definition so that purge
+ can ignore the changes. Update the system table on disk. */
- row_mysql_unlock_data_dictionary(trx);
+ err = row_mysql_table_id_reassign(table, trx, &new_id);
- trx->op_info = "";
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
- return((int) err);
+ /* Discard the physical file that is used for the tablespace. */
+
+ err = fil_discard_tablespace(table->space);
+
+ switch(err) {
+ case DB_SUCCESS:
+ case DB_IO_ERROR:
+ case DB_TABLESPACE_NOT_FOUND:
+ /* All persistent operations successful, update the
+ data dictionary memory cache. */
+
+ table->ibd_file_missing = TRUE;
+
+ table->flags2 |= DICT_TF2_DISCARDED;
+
+ dict_table_change_id_in_cache(table, new_id);
+
+ /* Reset the root page numbers. */
+
+ for (dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
+ index != 0;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ index->page = FIL_NULL;
+ index->space = FIL_NULL;
+ }
+
+ /* If the tablespace did not already exist or we couldn't
+ write to it, we treat that as a successful DISCARD. It is
+ unusable anyway. */
+
+ err = DB_SUCCESS;
+ break;
+
+ default:
+ /* We need to rollback the disk changes, something failed. */
+
+ trx->error_state = DB_SUCCESS;
+
+ trx_rollback_to_savepoint(trx, NULL);
+
+ trx->error_state = DB_SUCCESS;
+ }
+
+ return(err);
}
-/*****************************************************************//**
-Imports a tablespace. The space id in the .ibd file must match the space id
-of the table in the data dictionary.
+/*********************************************************************//**
+Discards the tablespace of a table which stored in an .ibd file. Discarding
+means that this function renames the .ibd file and assigns a new table id for
+the table. Also the flag table->ibd_file_missing is set to TRUE.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
-row_import_tablespace_for_mysql(
-/*============================*/
+dberr_t
+row_discard_tablespace_for_mysql(
+/*=============================*/
const char* name, /*!< in: table name */
trx_t* trx) /*!< in: transaction handle */
{
+ dberr_t err;
dict_table_t* table;
- ibool success;
- lsn_t current_lsn;
- ulint err = DB_SUCCESS;
- trx_start_if_not_started_xa(trx);
+ /* Open the table and start the transaction if not started. */
- trx->op_info = "importing tablespace";
+ table = row_discard_tablespace_begin(name, trx);
- current_lsn = log_get_lsn();
+ if (table == 0) {
+ err = DB_TABLE_NOT_FOUND;
+ } else if (table->space == TRX_SYS_SPACE) {
+ char table_name[MAX_FULL_NAME_LEN + 1];
- /* It is possible, though very improbable, that the lsn's in the
- tablespace to be imported have risen above the current system lsn, if
- a lengthy purge, ibuf merge, or rollback was performed on a backup
- taken with ibbackup. If that is the case, reset page lsn's in the
- file. We assume that mysqld was shut down after it performed these
- cleanup operations on the .ibd file, so that it stamped the latest lsn
- to the FIL_PAGE_FILE_FLUSH_LSN in the first page of the .ibd file.
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
- TODO: reset also the trx id's in clustered index records and write
- a new space id to each data page. That would allow us to import clean
- .ibd files from another MySQL installation. */
+ ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_TABLE_IN_SYSTEM_TABLESPACE, table_name);
- success = fil_reset_too_high_lsns(name, current_lsn);
+ err = DB_ERROR;
- if (!success) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: cannot reset lsn's in table ", stderr);
- ut_print_name(stderr, trx, TRUE, name);
- fputs("\n"
- "InnoDB: in ALTER TABLE ... IMPORT TABLESPACE\n",
- stderr);
+ } else if (table->n_foreign_key_checks_running > 0) {
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
+
+ ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_ERROR,
+ ER_DISCARD_FK_CHECKS_RUNNING, table_name);
err = DB_ERROR;
- row_mysql_lock_data_dictionary(trx);
- table = NULL;
+ } else {
+ /* Do foreign key constraint checks. */
- goto funct_exit;
- }
+ err = row_discard_tablespace_foreign_key_checks(trx, table);
- /* Serialize data dictionary operations with dictionary mutex:
- no deadlocks can occur then in these operations */
+ if (err == DB_SUCCESS) {
+ err = row_discard_tablespace(trx, table);
+ }
+ }
- row_mysql_lock_data_dictionary(trx);
+ return(row_discard_tablespace_end(trx, table, err));
+}
- table = dict_table_open_on_name_no_stats(name, TRUE,
- DICT_ERR_IGNORE_NONE);
+/*********************************************************************//**
+Sets an exclusive lock on a table.
+@return error code or DB_SUCCESS */
+UNIV_INTERN
+dberr_t
+row_mysql_lock_table(
+/*=================*/
+ trx_t* trx, /*!< in/out: transaction */
+ dict_table_t* table, /*!< in: table to lock */
+ enum lock_mode mode, /*!< in: LOCK_X or LOCK_S */
+ const char* op_info) /*!< in: string for trx->op_info */
+{
+ mem_heap_t* heap;
+ que_thr_t* thr;
+ dberr_t err;
+ sel_node_t* node;
- if (!table) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: table ", stderr);
- ut_print_name(stderr, trx, TRUE, name);
- fputs("\n"
- "InnoDB: does not exist in the InnoDB data dictionary\n"
- "InnoDB: in ALTER TABLE ... IMPORT TABLESPACE\n",
- stderr);
+ ut_ad(trx);
+ ut_ad(mode == LOCK_X || mode == LOCK_S);
- err = DB_TABLE_NOT_FOUND;
+ heap = mem_heap_create(512);
- goto funct_exit;
- }
+ trx->op_info = op_info;
- if (table->space == 0) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: table ", stderr);
- ut_print_name(stderr, trx, TRUE, name);
- fputs("\n"
- "InnoDB: is in the system tablespace 0"
- " which cannot be imported\n", stderr);
- err = DB_ERROR;
+ node = sel_node_create(heap);
+ thr = pars_complete_graph_for_exec(node, trx, heap);
+ thr->graph->state = QUE_FORK_ACTIVE;
- goto funct_exit;
- }
+ /* We use the select query graph as the dummy graph needed
+ in the lock module call */
- if (!table->tablespace_discarded) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: you are trying to"
- " IMPORT a tablespace\n"
- "InnoDB: ", stderr);
- ut_print_name(stderr, trx, TRUE, name);
- fputs(", though you have not called DISCARD on it yet\n"
- "InnoDB: during the lifetime of the mysqld process!\n",
- stderr);
+ thr = que_fork_get_first_thr(
+ static_cast<que_fork_t*>(que_node_get_parent(thr)));
- err = DB_ERROR;
+ que_thr_move_to_run_state_for_mysql(thr, trx);
- goto funct_exit;
- }
+run_again:
+ thr->run_node = thr;
+ thr->prev_node = thr->common.parent;
- /* Play safe and remove all insert buffer entries, though we should
- have removed them already when DISCARD TABLESPACE was called */
+ err = lock_table(0, table, mode, thr);
- ibuf_delete_for_discarded_space(table->space);
+ trx->error_state = err;
- success = fil_open_single_table_tablespace(
- TRUE, table->space,
- dict_tf_to_fsp_flags(table->flags),
- table->name);
- if (success) {
- table->ibd_file_missing = FALSE;
- table->tablespace_discarded = FALSE;
+ if (err == DB_SUCCESS) {
+ que_thr_stop_for_mysql_no_error(thr, trx);
} else {
- if (table->ibd_file_missing) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: cannot find or open in the"
- " database directory the .ibd file of\n"
- "InnoDB: table ", stderr);
- ut_print_name(stderr, trx, TRUE, name);
- fputs("\n"
- "InnoDB: in ALTER TABLE ... IMPORT TABLESPACE\n",
- stderr);
- }
+ que_thr_stop_for_mysql(thr);
- err = DB_ERROR;
- }
+ if (err != DB_QUE_THR_SUSPENDED) {
+ ibool was_lock_wait;
-funct_exit:
+ was_lock_wait = row_mysql_handle_errors(
+ &err, trx, thr, NULL);
- if (table != NULL) {
- dict_table_close(table, TRUE);
- }
+ if (was_lock_wait) {
+ goto run_again;
+ }
+ } else {
+ que_thr_t* run_thr;
+ que_node_t* parent;
- trx_commit_for_mysql(trx);
+ parent = que_node_get_parent(thr);
- row_mysql_unlock_data_dictionary(trx);
+ run_thr = que_fork_start_command(
+ static_cast<que_fork_t*>(parent));
+
+ ut_a(run_thr == thr);
+
+ /* There was a lock wait but the thread was not
+ in a ready to run or running state. */
+ trx->error_state = DB_LOCK_WAIT;
+ goto run_again;
+ }
+ }
+
+ que_graph_free(thr->graph);
trx->op_info = "";
- return((int) err);
+ return(err);
}
/*********************************************************************//**
Truncates a table for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_truncate_table_for_mysql(
/*=========================*/
dict_table_t* table, /*!< in: table handle */
trx_t* trx) /*!< in: transaction handle */
{
dict_foreign_t* foreign;
- ulint err;
+ dberr_t err;
mem_heap_t* heap;
byte* buf;
dtuple_t* tuple;
@@ -2978,17 +3171,15 @@ row_truncate_table_for_mysql(
ulint recreate_space = 0;
pars_info_t* info = NULL;
ibool has_internal_doc_id;
+ ulint old_space = table->space;
/* How do we prevent crashes caused by ongoing operations on
the table? Old operations could try to access non-existent
pages.
1) SQL queries, INSERT, SELECT, ...: we must get an exclusive
- MySQL table lock on the table before we can do TRUNCATE
- TABLE. Then there are no running queries on the table. This is
- guaranteed, because in ha_innobase::store_lock(), we do not
- weaken the TL_WRITE lock requested by MySQL when executing
- SQLCOM_TRUNCATE.
+ InnoDB table lock on the table before we can do TRUNCATE
+ TABLE. Then there are no running queries on the table.
2) Purge and rollback: we assign a new table id for the
table. Since purge and rollback look for the table based on
@@ -3031,9 +3222,15 @@ row_truncate_table_for_mysql(
return(DB_ERROR);
}
- trx->op_info = "truncating table";
+ if (dict_table_is_discarded(table)) {
+ return(DB_TABLESPACE_DELETED);
+ } else if (table->ibd_file_missing) {
+ return(DB_TABLESPACE_NOT_FOUND);
+ }
- trx_start_if_not_started_xa(trx);
+ trx_start_for_ddl(trx, TRX_DICT_OP_TABLE);
+
+ trx->op_info = "truncating table";
/* Serialize data dictionary operations with dictionary mutex:
no deadlocks can occur then in these operations */
@@ -3049,16 +3246,22 @@ row_truncate_table_for_mysql(
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
+ dict_stats_wait_bg_to_stop_using_tables(table, NULL, trx);
+
/* Check if the table is referenced by foreign key constraints from
some other table (not the table itself) */
- foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ for (foreign = UT_LIST_GET_FIRST(table->referenced_list);
+ foreign != 0 && foreign->foreign_table == table;
+ foreign = UT_LIST_GET_NEXT(referenced_list, foreign)) {
- while (foreign && foreign->foreign_table == table) {
- foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
+ /* Do nothing. */
}
- if (foreign && trx->check_foreigns) {
+ if (!srv_read_only_mode
+ && foreign
+ && trx->check_foreigns) {
+
FILE* ef = dict_foreign_err_file;
/* We only allow truncating a referenced table if
@@ -3099,19 +3302,41 @@ row_truncate_table_for_mysql(
goto funct_exit;
}
- /* Remove all locks except the table-level S and X locks. */
+ /* Remove all locks except the table-level X lock. */
lock_remove_all_on_table(table, FALSE);
+ /* Ensure that the table will be dropped by
+ trx_rollback_active() in case of a crash. */
+
trx->table_id = table->id;
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+
+ /* Assign an undo segment for the transaction, so that the
+ transaction will be recovered after a crash. */
+
+ mutex_enter(&trx->undo_mutex);
+
+ err = trx_undo_assign_undo(trx, TRX_UNDO_UPDATE);
+
+ mutex_exit(&trx->undo_mutex);
+
+ if (err != DB_SUCCESS) {
+
+ goto funct_exit;
+ }
if (table->space && !table->dir_path_of_temp_table) {
/* Discard and create the single-table tablespace. */
ulint space = table->space;
ulint flags = fil_space_get_flags(space);
+ ut_a(!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY));
+
+ dict_get_and_save_data_dir_path(table, true);
+
if (flags != ULINT_UNDEFINED
- && fil_discard_tablespace(space)) {
+ && fil_discard_tablespace(space) == DB_SUCCESS) {
dict_index_t* index;
@@ -3124,15 +3349,18 @@ row_truncate_table_for_mysql(
if (space == ULINT_UNDEFINED
|| fil_create_new_single_table_tablespace(
- space, table->name, FALSE,
+ space, table->name,
+ table->data_dir_path,
flags, table->flags2,
- FIL_IBD_FILE_INITIAL_SIZE) != DB_SUCCESS) {
+ FIL_IBD_FILE_INITIAL_SIZE)
+ != DB_SUCCESS) {
dict_table_x_unlock_indexes(table);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: TRUNCATE TABLE %s failed to"
- " create a new tablespace\n",
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "TRUNCATE TABLE %s failed to "
+ "create a new tablespace",
table->name);
+
table->ibd_file_missing = 1;
err = DB_ERROR;
goto funct_exit;
@@ -3240,7 +3468,6 @@ next_rec:
mtr_commit(&mtr);
mem_heap_free(heap);
-
/* Done with index truncation, release index tree locks,
subsequent work relates to table level metadata change */
dict_table_x_unlock_indexes(table);
@@ -3259,21 +3486,21 @@ next_rec:
fts_table.name = table->name;
fts_table.id = new_id;
- err = fts_create_common_tables(trx, &fts_table, table->name,
- TRUE);
+ err = fts_create_common_tables(
+ trx, &fts_table, table->name, TRUE);
- if (err == DB_SUCCESS) {
- for (i = 0; i < ib_vector_size(table->fts->indexes);
- i++) {
- dict_index_t* fts_index;
+ for (i = 0;
+ i < ib_vector_size(table->fts->indexes)
+ && err == DB_SUCCESS;
+ i++) {
- fts_index = static_cast<dict_index_t*>(
- ib_vector_getp(
- table->fts->indexes, i));
+ dict_index_t* fts_index;
- fts_create_index_tables_low(
- trx, fts_index, table->name, new_id);
- }
+ fts_index = static_cast<dict_index_t*>(
+ ib_vector_getp(table->fts->indexes, i));
+
+ err = fts_create_index_tables_low(
+ trx, fts_index, table->name, new_id);
}
if (err != DB_SUCCESS) {
@@ -3287,34 +3514,64 @@ next_rec:
fputs("\n", stderr);
goto funct_exit;
+ } else {
+ ut_ad(trx->state != TRX_STATE_NOT_STARTED);
}
}
info = pars_info_create();
- pars_info_add_int4_literal(info, "space", (lint) table->space);
+ pars_info_add_int4_literal(info, "new_space", (lint) table->space);
pars_info_add_ull_literal(info, "old_id", table->id);
pars_info_add_ull_literal(info, "new_id", new_id);
err = que_eval_sql(info,
- "PROCEDURE RENUMBER_TABLESPACE_PROC () IS\n"
+ "PROCEDURE RENUMBER_TABLE_ID_PROC () IS\n"
"BEGIN\n"
"UPDATE SYS_TABLES"
- " SET ID = :new_id, SPACE = :space\n"
+ " SET ID = :new_id, SPACE = :new_space\n"
" WHERE ID = :old_id;\n"
"UPDATE SYS_COLUMNS SET TABLE_ID = :new_id\n"
" WHERE TABLE_ID = :old_id;\n"
"UPDATE SYS_INDEXES"
- " SET TABLE_ID = :new_id, SPACE = :space\n"
+ " SET TABLE_ID = :new_id, SPACE = :new_space\n"
" WHERE TABLE_ID = :old_id;\n"
- "COMMIT WORK;\n"
"END;\n"
, FALSE, trx);
+ if (err == DB_SUCCESS && old_space != table->space) {
+ info = pars_info_create();
+
+ pars_info_add_int4_literal(info, "old_space", (lint) old_space);
+
+ pars_info_add_int4_literal(
+ info, "new_space", (lint) table->space);
+
+ err = que_eval_sql(info,
+ "PROCEDURE RENUMBER_TABLESPACE_PROC () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_TABLESPACES"
+ " SET SPACE = :new_space\n"
+ " WHERE SPACE = :old_space;\n"
+ "UPDATE SYS_DATAFILES"
+ " SET SPACE = :new_space"
+ " WHERE SPACE = :old_space;\n"
+ "END;\n"
+ , FALSE, trx);
+ }
+ DBUG_EXECUTE_IF("ib_ddl_crash_before_fts_truncate", err = DB_ERROR;);
+
if (err != DB_SUCCESS) {
trx->error_state = DB_SUCCESS;
trx_rollback_to_savepoint(trx, NULL);
trx->error_state = DB_SUCCESS;
+
+ /* Update system table failed. Table in memory metadata
+ could be in an inconsistent state, mark the in-memory
+ table->corrupted to be true. In the long run, this should
+ be fixed by atomic truncate table */
+ table->corrupted = true;
+
ut_print_timestamp(stderr);
fputs(" InnoDB: Unable to assign a new identifier to table ",
stderr);
@@ -3323,30 +3580,40 @@ next_rec:
"InnoDB: after truncating it. Background processes"
" may corrupt the table!\n", stderr);
- /* Fail to update the table id, so drop the new
+ /* Failed to update the table id, so drop the new
FTS auxiliary tables */
if (has_internal_doc_id) {
- dict_table_t fts_table;
+ ut_ad(trx->state == TRX_STATE_NOT_STARTED);
+
+ table_id_t id = table->id;
- fts_table.name = table->name;
- fts_table.id = new_id;
+ table->id = new_id;
- fts_drop_tables(trx, &fts_table);
+ fts_drop_tables(trx, table);
+
+ table->id = id;
+
+ ut_ad(trx->state != TRX_STATE_NOT_STARTED);
}
err = DB_ERROR;
} else {
/* Drop the old FTS index */
if (has_internal_doc_id) {
+ ut_ad(trx->state != TRX_STATE_NOT_STARTED);
fts_drop_tables(trx, table);
+ ut_ad(trx->state != TRX_STATE_NOT_STARTED);
}
+ DBUG_EXECUTE_IF("ib_truncate_crash_after_fts_drop",
+ DBUG_SUICIDE(););
+
dict_table_change_id_in_cache(table, new_id);
/* Reset the Doc ID in cache to 0 */
if (has_internal_doc_id && table->fts->cache) {
table->fts->fts_status |= TABLE_DICT_LOCKED;
- fts_update_next_doc_id(table, NULL, 0);
+ fts_update_next_doc_id(trx, table, NULL, 0);
fts_cache_clear(table->fts->cache, TRUE);
fts_cache_init(table->fts->cache);
table->fts->fts_status &= ~TABLE_DICT_LOCKED;
@@ -3364,16 +3631,13 @@ funct_exit:
row_mysql_unlock_data_dictionary(trx);
- /* We are supposed to recalc and save the stats only
- on ANALYZE, but it also makes sense to do so on TRUNCATE */
- dict_stats_update(table, DICT_STATS_RECALC_PERSISTENT_SILENT,
- FALSE);
+ dict_stats_update(table, DICT_STATS_EMPTY_TABLE);
trx->op_info = "";
srv_wake_master_thread();
- return((int) err);
+ return(err);
}
/*********************************************************************//**
@@ -3385,23 +3649,29 @@ by the transaction, the transaction will be committed. Otherwise, the
data dictionary will remain locked.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_drop_table_for_mysql(
/*=====================*/
const char* name, /*!< in: table name */
trx_t* trx, /*!< in: transaction handle */
- ibool drop_db)/*!< in: TRUE=dropping whole database */
+ bool drop_db,/*!< in: true=dropping whole database */
+ bool nonatomic)
+ /*!< in: whether it is permitted
+ to release and reacquire dict_operation_lock */
{
+ dberr_t err;
dict_foreign_t* foreign;
dict_table_t* table;
- dict_index_t* index;
+ ibool print_msg;
ulint space_id;
- ulint err;
- const char* table_name;
+ char* filepath = NULL;
+ const char* tablename_minus_db;
+ char* tablename = NULL;
+ bool ibd_file_missing;
ulint namelen;
- ibool locked_dictionary = FALSE;
- ibool fts_bg_thread_exited = FALSE;
+ bool locked_dictionary = false;
pars_info_t* info = NULL;
+ mem_heap_t* heap = NULL;
ut_a(name != NULL);
@@ -3419,19 +3689,19 @@ row_drop_table_for_mysql(
Certain table names starting with 'innodb_' have their special
meaning regardless of the database name. Thus, we need to
ignore the database name prefix in the comparisons. */
- table_name = strchr(name, '/');
+ tablename_minus_db = strchr(name, '/');
- if (table_name) {
- table_name++;
+ if (tablename_minus_db) {
+ tablename_minus_db++;
} else {
/* Ancillary FTS tables don't have '/' characters. */
- table_name = name;
+ tablename_minus_db = name;
}
- namelen = strlen(table_name) + 1;
+ namelen = strlen(tablename_minus_db) + 1;
if (namelen == sizeof S_innodb_monitor
- && !memcmp(table_name, S_innodb_monitor,
+ && !memcmp(tablename_minus_db, S_innodb_monitor,
sizeof S_innodb_monitor)) {
/* Table name equals "innodb_monitor":
@@ -3440,17 +3710,17 @@ row_drop_table_for_mysql(
srv_print_innodb_monitor = FALSE;
srv_print_innodb_lock_monitor = FALSE;
} else if (namelen == sizeof S_innodb_lock_monitor
- && !memcmp(table_name, S_innodb_lock_monitor,
+ && !memcmp(tablename_minus_db, S_innodb_lock_monitor,
sizeof S_innodb_lock_monitor)) {
srv_print_innodb_monitor = FALSE;
srv_print_innodb_lock_monitor = FALSE;
} else if (namelen == sizeof S_innodb_tablespace_monitor
- && !memcmp(table_name, S_innodb_tablespace_monitor,
+ && !memcmp(tablename_minus_db, S_innodb_tablespace_monitor,
sizeof S_innodb_tablespace_monitor)) {
srv_print_innodb_tablespace_monitor = FALSE;
} else if (namelen == sizeof S_innodb_table_monitor
- && !memcmp(table_name, S_innodb_table_monitor,
+ && !memcmp(tablename_minus_db, S_innodb_table_monitor,
sizeof S_innodb_table_monitor)) {
srv_print_innodb_table_monitor = FALSE;
@@ -3461,7 +3731,10 @@ row_drop_table_for_mysql(
trx->op_info = "dropping table";
- trx_start_if_not_started(trx);
+ /* This function is called recursively via fts_drop_tables(). */
+ if (trx->state == TRX_STATE_NOT_STARTED) {
+ trx_start_for_ddl(trx, TRX_DICT_OP_TABLE);
+ }
if (trx->dict_operation_lock_mode != RW_X_LATCH) {
/* Prevent foreign key checks etc. while we are dropping the
@@ -3469,17 +3742,17 @@ row_drop_table_for_mysql(
row_mysql_lock_data_dictionary(trx);
- locked_dictionary = TRUE;
+ locked_dictionary = true;
+ nonatomic = true;
}
-retry:
ut_ad(mutex_own(&(dict_sys->mutex)));
#ifdef UNIV_SYNC_DEBUG
ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
#endif /* UNIV_SYNC_DEBUG */
- table = dict_table_open_on_name_no_stats(
- name, TRUE,
+ table = dict_table_open_on_name(
+ name, TRUE, FALSE,
static_cast<dict_err_ignore_t>(
DICT_ERR_IGNORE_INDEX_ROOT | DICT_ERR_IGNORE_CORRUPT));
@@ -3502,34 +3775,53 @@ retry:
goto funct_exit;
}
- if (table->fts) {
- fts_t* fts = table->fts;
+ /* Turn on this drop bit before we could release the dictionary
+ latch */
+ table->to_be_dropped = true;
- /* It is possible that background 'Add' thread fts_add_thread()
- just gets called and the fts_optimize_thread()
- is processing deleted records. There could be undetected
- deadlock between threads synchronization and dict_sys_mutex
- since fts_parse_sql() requires dict_sys->mutex. Ask the
- background thread to exit before proceeds to drop table to
- avoid undetected deadlocks */
- row_mysql_unlock_data_dictionary(trx);
+ if (nonatomic) {
+ /* This trx did not acquire any locks on dictionary
+ table records yet. Thus it is safe to release and
+ reacquire the data dictionary latches. */
+ if (table->fts) {
+ ut_ad(!table->fts->add_wq);
+ ut_ad(lock_trx_has_sys_table_locks(trx) == 0);
- if (fts->add_wq && (!fts_bg_thread_exited)) {
- /* Wait for any background threads accessing the table
- to exit. */
- mutex_enter(&fts->bg_threads_mutex);
- fts->fts_status |= BG_THREAD_STOP;
+ row_mysql_unlock_data_dictionary(trx);
+ fts_optimize_remove_table(table);
+ row_mysql_lock_data_dictionary(trx);
+ }
- dict_table_wait_for_bg_threads_to_exit(table, 250000);
+ /* Do not bother to deal with persistent stats for temp
+ tables since we know temp tables do not use persistent
+ stats. */
+ if (!dict_table_is_temporary(table)) {
+ dict_stats_wait_bg_to_stop_using_tables(
+ table, NULL, trx);
+ }
+ }
- mutex_exit(&fts->bg_threads_mutex);
+ /* make sure background stats thread is not running on the table */
+ ut_ad(!(table->stats_bg_flag & BG_STAT_IN_PROGRESS));
- row_mysql_lock_data_dictionary(trx);
- fts_bg_thread_exited = TRUE;
- goto retry;
- } else {
- fts_optimize_remove_table(table);
- row_mysql_lock_data_dictionary(trx);
+ /* Delete the link file if used. */
+ if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+ fil_delete_link_file(name);
+ }
+
+ if (!dict_table_is_temporary(table)) {
+
+ dict_stats_recalc_pool_del(table);
+
+ /* Remove stats for this table and all of its indexes from the
+ persistent storage if it exists and if there are stats for this
+ table in there. This function creates its own trx and commits
+ it. */
+ char errstr[1024];
+ err = dict_stats_drop_table(name, errstr, sizeof(errstr));
+
+ if (err != DB_SUCCESS) {
+ ib_logf(IB_LOG_LEVEL_WARN, "%s", errstr);
}
}
@@ -3540,7 +3832,7 @@ retry:
dict_table_move_from_lru_to_non_lru(table);
}
- dict_table_close(table, TRUE);
+ dict_table_close(table, TRUE, FALSE);
/* Check if the table is referenced by foreign key constraints from
some other table (not the table itself) */
@@ -3552,7 +3844,9 @@ check_next_foreign:
foreign = UT_LIST_GET_NEXT(referenced_list, foreign);
}
- if (foreign && trx->check_foreigns
+ if (!srv_read_only_mode
+ && foreign
+ && trx->check_foreigns
&& !(drop_db && dict_tables_have_same_db(
name, foreign->foreign_table_name_lookup))) {
FILE* ef = dict_foreign_err_file;
@@ -3589,16 +3883,16 @@ check_next_foreign:
if (table->n_foreign_key_checks_running > 0) {
- const char* table_name = table->name;
+ const char* save_tablename = table->name;
ibool added;
- added = row_add_table_to_background_drop_list(table_name);
+ added = row_add_table_to_background_drop_list(save_tablename);
if (added) {
ut_print_timestamp(stderr);
fputs(" InnoDB: You are trying to drop table ",
stderr);
- ut_print_name(stderr, trx, TRUE, table_name);
+ ut_print_name(stderr, trx, TRUE, save_tablename);
fputs("\n"
"InnoDB: though there is a"
" foreign key check running on it.\n"
@@ -3663,23 +3957,54 @@ check_next_foreign:
goto funct_exit;
}
+ /* The "to_be_dropped" marks table that is to be dropped, but
+ has not been dropped, instead, was put in the background drop
+ list due to being used by concurrent DML operations. Clear it
+ here since there are no longer any concurrent activities on it,
+ and it is free to be dropped */
+ table->to_be_dropped = false;
+
/* If we get this far then the table to be dropped must not have
any table or record locks on it. */
ut_a(!lock_table_has_locks(table));
- trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
- trx->table_id = table->id;
+ switch (trx_get_dict_operation(trx)) {
+ case TRX_DICT_OP_NONE:
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+ trx->table_id = table->id;
+ case TRX_DICT_OP_TABLE:
+ break;
+ case TRX_DICT_OP_INDEX:
+ /* If the transaction was previously flagged as
+ TRX_DICT_OP_INDEX, we should be dropping auxiliary
+ tables for full-text indexes. */
+ ut_ad(strstr(table->name, "/FTS_") != NULL);
+ }
/* Mark all indexes unavailable in the data dictionary cache
before starting to drop the table. */
- for (index = dict_table_get_first_index(table);
+ unsigned* page_no;
+ unsigned* page_nos;
+ heap = mem_heap_create(
+ 200 + UT_LIST_GET_LEN(table->indexes) * sizeof *page_nos);
+ tablename = mem_heap_strdup(heap, name);
+
+ page_no = page_nos = static_cast<unsigned*>(
+ mem_heap_alloc(
+ heap,
+ UT_LIST_GET_LEN(table->indexes) * sizeof *page_no));
+
+ for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
rw_lock_x_lock(dict_index_get_lock(index));
- ut_ad(!index->to_be_dropped);
- index->to_be_dropped = TRUE;
+ /* Save the page numbers so that we can restore them
+ if the operation fails. */
+ *page_no++ = index->page;
+ /* Mark the index unusable. */
+ index->page = FIL_NULL;
rw_lock_x_unlock(dict_index_get_lock(index));
}
@@ -3698,6 +4023,7 @@ check_next_foreign:
"table_id CHAR;\n"
"index_id CHAR;\n"
"foreign_id CHAR;\n"
+ "space_id INT;\n"
"found INT;\n"
"DECLARE CURSOR cur_fk IS\n"
@@ -3720,6 +4046,12 @@ check_next_foreign:
"IF (SQL % NOTFOUND) THEN\n"
" RETURN;\n"
"END IF;\n"
+ "SELECT SPACE INTO space_id\n"
+ "FROM SYS_TABLES\n"
+ "WHERE NAME = :table_name;\n"
+ "IF (SQL % NOTFOUND) THEN\n"
+ " RETURN;\n"
+ "END IF;\n"
"found := 1;\n"
"SELECT ID INTO sys_foreign_id\n"
"FROM SYS_TABLES\n"
@@ -3762,56 +4094,90 @@ check_next_foreign:
" END IF;\n"
"END LOOP;\n"
"CLOSE cur_idx;\n"
+ "DELETE FROM SYS_TABLESPACES\n"
+ "WHERE SPACE = space_id;\n"
+ "DELETE FROM SYS_DATAFILES\n"
+ "WHERE SPACE = space_id;\n"
"DELETE FROM SYS_COLUMNS\n"
"WHERE TABLE_ID = table_id;\n"
"DELETE FROM SYS_TABLES\n"
- "WHERE ID = table_id;\n"
+ "WHERE NAME = :table_name;\n"
"END;\n"
, FALSE, trx);
switch (err) {
- ibool is_temp;
- mem_heap_t* heap;
+ ibool is_temp;
case DB_SUCCESS:
-
- heap = mem_heap_create(200);
-
/* Clone the name, in case it has been allocated
from table->heap, which will be freed by
dict_table_remove_from_cache(table) below. */
- name = mem_heap_strdup(heap, name);
space_id = table->space;
+ ibd_file_missing = table->ibd_file_missing;
- is_temp = table->flags2 & DICT_TF2_TEMPORARY;
+ is_temp = DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY);
+
+ /* If there is a temp path then the temp flag is set.
+ However, during recovery, we might have a temp flag but
+ not know the temp path */
ut_a(table->dir_path_of_temp_table == NULL || is_temp);
+ if (dict_table_is_discarded(table)
+ || table->ibd_file_missing) {
+ /* Do not attempt to drop known-to-be-missing
+ tablespaces. */
+ space_id = 0;
+ }
+
+ /* We do not allow temporary tables with a remote path. */
+ ut_a(!(is_temp && DICT_TF_HAS_DATA_DIR(table->flags)));
+
+ if (space_id && DICT_TF_HAS_DATA_DIR(table->flags)) {
+ dict_get_and_save_data_dir_path(table, true);
+ ut_a(table->data_dir_path);
+
+ filepath = os_file_make_remote_pathname(
+ table->data_dir_path, table->name, "ibd");
+ } else if (table->dir_path_of_temp_table) {
+ filepath = fil_make_ibd_name(
+ table->dir_path_of_temp_table, true);
+ } else {
+ filepath = fil_make_ibd_name(tablename, false);
+ }
if (dict_table_has_fts_index(table)
|| DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) {
ut_ad(table->n_ref_count == 0);
+ ut_ad(trx->state != TRX_STATE_NOT_STARTED);
err = fts_drop_tables(trx, table);
if (err != DB_SUCCESS) {
ut_print_timestamp(stderr);
- fprintf(stderr," InnoDB: Error: (%lu) not "
+ fprintf(stderr," InnoDB: Error: (%s) not "
"able to remove ancillary FTS tables "
- "for table ", err);
- ut_print_name(stderr, trx, TRUE, name);
+ "for table ", ut_strerr(err));
+ ut_print_name(stderr, trx, TRUE, tablename);
fputs("\n", stderr);
goto funct_exit;
}
+ }
+ /* The table->fts flag can be set on the table for which
+ the cluster index is being rebuilt. Such table might not have
+ DICT_TF2_FTS flag set. So keep this out of above
+ dict_table_has_fts_index condition */
+ if (table->fts) {
fts_free(table);
}
dict_table_remove_from_cache(table);
- if (dict_load_table(name, TRUE, DICT_ERR_IGNORE_NONE) != NULL) {
+ if (dict_load_table(tablename, TRUE,
+ DICT_ERR_IGNORE_NONE) != NULL) {
ut_print_timestamp(stderr);
fputs(" InnoDB: Error: not able to remove table ",
stderr);
- ut_print_name(stderr, trx, TRUE, name);
+ ut_print_name(stderr, trx, TRUE, tablename);
fputs(" from the dictionary cache!\n", stderr);
err = DB_ERROR;
}
@@ -3819,23 +4185,46 @@ check_next_foreign:
/* Do not drop possible .ibd tablespace if something went
wrong: we do not want to delete valuable data of the user */
- if (err == DB_SUCCESS && space_id > 0) {
- if (!fil_space_for_table_exists_in_mem(
- space_id, name, FALSE, !is_temp)) {
+ /* Don't spam the log if we can't find the tablespace of
+ a temp table or if the tablesace has been discarded. */
+ print_msg = !(is_temp || ibd_file_missing);
+
+ if (err == DB_SUCCESS && space_id > TRX_SYS_SPACE) {
+ if (!is_temp
+ && !fil_space_for_table_exists_in_mem(
+ space_id, tablename, FALSE,
+ print_msg, false, NULL, 0)) {
+ /* This might happen if we are dropping a
+ discarded tablespace */
err = DB_SUCCESS;
+ if (print_msg) {
+ char msg_tablename[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ msg_tablename, sizeof(tablename),
+ tablename, FALSE);
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Removed the table %s from "
+ "InnoDB's data dictionary",
+ msg_tablename);
+ }
+
+ /* Force a delete of any discarded
+ or temporary files. */
+
+ fil_delete_file(filepath);
+
+ } else if (fil_delete_tablespace(
+ space_id,
+ BUF_REMOVE_FLUSH_NO_WRITE)
+ != DB_SUCCESS) {
fprintf(stderr,
"InnoDB: We removed now the InnoDB"
" internal data dictionary entry\n"
"InnoDB: of table ");
- ut_print_name(stderr, trx, TRUE, name);
- fprintf(stderr, ".\n");
- } else if (!fil_delete_tablespace(space_id)) {
- fprintf(stderr,
- "InnoDB: We removed now the InnoDB"
- " internal data dictionary entry\n"
- "InnoDB: of table ");
- ut_print_name(stderr, trx, TRUE, name);
+ ut_print_name(stderr, trx, TRUE, tablename);
fprintf(stderr, ".\n");
ut_print_timestamp(stderr);
@@ -3843,13 +4232,12 @@ check_next_foreign:
" InnoDB: Error: not able to"
" delete tablespace %lu of table ",
(ulong) space_id);
- ut_print_name(stderr, trx, TRUE, name);
+ ut_print_name(stderr, trx, TRUE, tablename);
fputs("!\n", stderr);
err = DB_ERROR;
}
}
- mem_heap_free(heap);
break;
case DB_OUT_OF_FILE_SPACE:
@@ -3874,7 +4262,7 @@ check_next_foreign:
fprintf(stderr, "InnoDB: unknown error code %lu"
" while dropping table:", (ulong) err);
- ut_print_name(stderr, trx, TRUE, name);
+ ut_print_name(stderr, trx, TRUE, tablename);
fprintf(stderr, ".\n");
trx->error_state = DB_SUCCESS;
@@ -3884,16 +4272,25 @@ check_next_foreign:
/* Mark all indexes available in the data dictionary
cache again. */
- for (index = dict_table_get_first_index(table);
+ page_no = page_nos;
+
+ for (dict_index_t* index = dict_table_get_first_index(table);
index != NULL;
index = dict_table_get_next_index(index)) {
rw_lock_x_lock(dict_index_get_lock(index));
- index->to_be_dropped = FALSE;
+ ut_a(index->page == FIL_NULL);
+ index->page = *page_no++;
rw_lock_x_unlock(dict_index_get_lock(index));
}
}
funct_exit:
+ if (heap) {
+ mem_heap_free(heap);
+ }
+ if (filepath) {
+ mem_free(filepath);
+ }
if (locked_dictionary) {
trx_commit_for_mysql(trx);
@@ -3905,7 +4302,7 @@ funct_exit:
srv_wake_master_thread();
- return((int) err);
+ return(err);
}
/*********************************************************************//**
@@ -3929,9 +4326,9 @@ row_mysql_drop_temp_tables(void)
mtr_start(&mtr);
btr_pcur_open_at_index_side(
- TRUE,
+ true,
dict_table_get_first_index(dict_sys->sys_tables),
- BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
+ BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
for (;;) {
const rec_t* rec;
@@ -3950,6 +4347,8 @@ row_mysql_drop_temp_tables(void)
ROW_FORMAT=REDUNDANT. */
rec = btr_pcur_get_rec(&pcur);
field = rec_get_nth_field_old(
+ rec, DICT_FLD__SYS_TABLES__NAME, &len);
+ field = rec_get_nth_field_old(
rec, DICT_FLD__SYS_TABLES__N_COLS, &len);
if (len != 4
|| !(mach_read_from_4(field) & DICT_N_COLS_COMPACT)) {
@@ -4003,15 +4402,15 @@ row_mysql_drop_temp_tables(void)
Drop all foreign keys in a database, see Bug#18942.
Called at the end of row_drop_database_for_mysql().
@return error code or DB_SUCCESS */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
drop_all_foreign_keys_in_db(
/*========================*/
const char* name, /*!< in: database name which ends to '/' */
trx_t* trx) /*!< in: transaction handle */
{
pars_info_t* pinfo;
- ulint err;
+ dberr_t err;
ut_a(name[strlen(name) - 1] == '/');
@@ -4063,22 +4462,24 @@ drop_all_foreign_keys_in_db(
Drops a database for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
row_drop_database_for_mysql(
/*========================*/
const char* name, /*!< in: database name which ends to '/' */
trx_t* trx) /*!< in: transaction handle */
{
- dict_table_t* table;
- char* table_name;
- int err = DB_SUCCESS;
- ulint namelen = strlen(name);
+ dict_table_t* table;
+ char* table_name;
+ dberr_t err = DB_SUCCESS;
+ ulint namelen = strlen(name);
ut_a(name != NULL);
ut_a(name[namelen - 1] == '/');
trx->op_info = "dropping database";
+ trx_set_dict_operation(trx, TRX_DICT_OP_TABLE);
+
trx_start_if_not_started_xa(trx);
loop:
row_mysql_lock_data_dictionary(trx);
@@ -4086,11 +4487,29 @@ loop:
while ((table_name = dict_get_first_table_name_in_db(name))) {
ut_a(memcmp(table_name, name, namelen) == 0);
- table = dict_table_open_on_name_no_stats(table_name, TRUE,
- DICT_ERR_IGNORE_NONE);
+ table = dict_table_open_on_name(
+ table_name, TRUE, FALSE, static_cast<dict_err_ignore_t>(
+ DICT_ERR_IGNORE_INDEX_ROOT
+ | DICT_ERR_IGNORE_CORRUPT));
- ut_a(table);
- ut_a(!table->can_be_evicted);
+ if (!table) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot load table %s from InnoDB internal "
+ "data dictionary during drop database",
+ table_name);
+ mem_free(table_name);
+ err = DB_TABLE_NOT_FOUND;
+ break;
+
+ }
+
+ if (row_is_mysql_tmp_table_name(table->name)) {
+ /* There could be an orphan temp table left from
+ interupted alter table rebuild operation */
+ dict_table_close(table, TRUE, FALSE);
+ } else {
+ ut_a(!table->can_be_evicted || table->ibd_file_missing);
+ }
/* Wait until MySQL does not have any queries running on
the table */
@@ -4121,8 +4540,8 @@ loop:
if (err != DB_SUCCESS) {
fputs("InnoDB: DROP DATABASE ", stderr);
ut_print_name(stderr, trx, TRUE, name);
- fprintf(stderr, " failed with error %lu for table ",
- (ulint) err);
+ fprintf(stderr, " failed with error (%s) for table ",
+ ut_strerr(err));
ut_print_name(stderr, trx, TRUE, table_name);
putc('\n', stderr);
mem_free(table_name);
@@ -4135,7 +4554,7 @@ loop:
if (err == DB_SUCCESS) {
/* after dropping all tables try to drop all leftover
foreign keys in case orphaned ones exist */
- err = (int) drop_all_foreign_keys_in_db(name, trx);
+ err = drop_all_foreign_keys_in_db(name, trx);
if (err != DB_SUCCESS) {
fputs("InnoDB: DROP DATABASE ", stderr);
@@ -4157,9 +4576,9 @@ loop:
/*********************************************************************//**
Checks if a table name contains the string "/#sql" which denotes temporary
tables in MySQL.
-@return TRUE if temporary table */
-static
-ibool
+@return true if temporary table */
+UNIV_INTERN __attribute__((warn_unused_result))
+bool
row_is_mysql_tmp_table_name(
/*========================*/
const char* name) /*!< in: table name in the form
@@ -4172,8 +4591,8 @@ row_is_mysql_tmp_table_name(
/****************************************************************//**
Delete a single constraint.
@return error code or DB_SUCCESS */
-static
-int
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_delete_constraint_low(
/*======================*/
const char* id, /*!< in: constraint id */
@@ -4183,7 +4602,7 @@ row_delete_constraint_low(
pars_info_add_str_literal(info, "id", id);
- return((int) que_eval_sql(info,
+ return(que_eval_sql(info,
"PROCEDURE DELETE_CONSTRAINT () IS\n"
"BEGIN\n"
"DELETE FROM SYS_FOREIGN_COLS WHERE ID = :id;\n"
@@ -4195,8 +4614,8 @@ row_delete_constraint_low(
/****************************************************************//**
Delete a single constraint.
@return error code or DB_SUCCESS */
-static
-int
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_delete_constraint(
/*==================*/
const char* id, /*!< in: constraint id */
@@ -4205,7 +4624,7 @@ row_delete_constraint(
mem_heap_t* heap, /*!< in: memory heap */
trx_t* trx) /*!< in: transaction handle */
{
- ulint err;
+ dberr_t err;
/* New format constraints have ids <databasename>/<constraintname>. */
err = row_delete_constraint_low(
@@ -4222,29 +4641,30 @@ row_delete_constraint(
err = row_delete_constraint_low(id, trx);
}
- return((int) err);
+ return(err);
}
/*********************************************************************//**
Renames a table for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
row_rename_table_for_mysql(
/*=======================*/
const char* old_name, /*!< in: old table name */
const char* new_name, /*!< in: new table name */
- trx_t* trx, /*!< in: transaction handle */
- ibool commit) /*!< in: if TRUE then commit trx */
+ trx_t* trx, /*!< in/out: transaction */
+ bool commit) /*!< in: whether to commit trx */
{
dict_table_t* table = NULL;
ibool dict_locked = FALSE;
- ulint err = DB_ERROR;
+ dberr_t err = DB_ERROR;
mem_heap_t* heap = NULL;
const char** constraints_to_drop = NULL;
ulint n_constraints_to_drop = 0;
ibool old_is_tmp, new_is_tmp;
pars_info_t* info = NULL;
+ int retry;
ut_a(old_name != NULL);
ut_a(new_name != NULL);
@@ -4279,8 +4699,8 @@ row_rename_table_for_mysql(
dict_locked = trx->dict_operation_lock_mode == RW_X_LATCH;
- table = dict_table_open_on_name_no_stats(old_name, dict_locked,
- DICT_ERR_IGNORE_NONE);
+ table = dict_table_open_on_name(old_name, dict_locked, FALSE,
+ DICT_ERR_IGNORE_NONE);
if (!table) {
err = DB_TABLE_NOT_FOUND;
@@ -4299,18 +4719,19 @@ row_rename_table_for_mysql(
"InnoDB: " REFMAN "innodb-troubleshooting.html\n",
stderr);
goto funct_exit;
- } else if (table->ibd_file_missing) {
+
+ } else if (table->ibd_file_missing
+ && !dict_table_is_discarded(table)) {
+
err = DB_TABLE_NOT_FOUND;
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: table ", stderr);
- ut_print_name(stderr, trx, TRUE, old_name);
- fputs(" does not have an .ibd file"
- " in the database directory.\n"
- "InnoDB: You can look for further help from\n"
- "InnoDB: " REFMAN "innodb-troubleshooting.html\n",
- stderr);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Table %s does not have an .ibd file in the database "
+ "directory. See " REFMAN "innodb-troubleshooting.html",
+ old_name);
+
goto funct_exit;
+
} else if (new_is_tmp) {
/* MySQL is doing an ALTER TABLE command and it renames the
original table to a temporary table name. We want to preserve
@@ -4329,27 +4750,75 @@ row_rename_table_for_mysql(
}
}
+ /* Is a foreign key check running on this table? */
+ for (retry = 0; retry < 100
+ && table->n_foreign_key_checks_running > 0; ++retry) {
+ row_mysql_unlock_data_dictionary(trx);
+ os_thread_yield();
+ row_mysql_lock_data_dictionary(trx);
+ }
+
+ if (table->n_foreign_key_checks_running > 0) {
+ ut_print_timestamp(stderr);
+ fputs(" InnoDB: Error: in ALTER TABLE ", stderr);
+ ut_print_name(stderr, trx, TRUE, old_name);
+ fprintf(stderr, "\n"
+ "InnoDB: a FOREIGN KEY check is running.\n"
+ "InnoDB: Cannot rename table.\n");
+ err = DB_TABLE_IN_FK_CHECK;
+ goto funct_exit;
+ }
+
/* We use the private SQL parser of Innobase to generate the query
graphs needed in updating the dictionary data from system tables. */
info = pars_info_create();
pars_info_add_str_literal(info, "new_table_name", new_name);
-
pars_info_add_str_literal(info, "old_table_name", old_name);
err = que_eval_sql(info,
"PROCEDURE RENAME_TABLE () IS\n"
"BEGIN\n"
- "UPDATE SYS_TABLES SET NAME = :new_table_name\n"
+ "UPDATE SYS_TABLES"
+ " SET NAME = :new_table_name\n"
" WHERE NAME = :old_table_name;\n"
"END;\n"
, FALSE, trx);
- if (err != DB_SUCCESS) {
+ /* SYS_TABLESPACES and SYS_DATAFILES track non-system tablespaces
+ which have space IDs > 0. */
+ if (err == DB_SUCCESS
+ && table->space != TRX_SYS_SPACE
+ && !table->ibd_file_missing) {
+ /* Make a new pathname to update SYS_DATAFILES. */
+ char* new_path = row_make_new_pathname(table, new_name);
+
+ info = pars_info_create();
+ pars_info_add_str_literal(info, "new_table_name", new_name);
+ pars_info_add_str_literal(info, "new_path_name", new_path);
+ pars_info_add_int4_literal(info, "space_id", table->space);
+
+ err = que_eval_sql(info,
+ "PROCEDURE RENAME_SPACE () IS\n"
+ "BEGIN\n"
+ "UPDATE SYS_TABLESPACES"
+ " SET NAME = :new_table_name\n"
+ " WHERE SPACE = :space_id;\n"
+ "UPDATE SYS_DATAFILES"
+ " SET PATH = :new_path_name\n"
+ " WHERE SPACE = :space_id;\n"
+ "END;\n"
+ , FALSE, trx);
+
+ mem_free(new_path);
+ }
+ if (err != DB_SUCCESS) {
goto end;
- } else if (!new_is_tmp) {
+ }
+
+ if (!new_is_tmp) {
/* Rename all constraints. */
info = pars_info_create();
@@ -4486,12 +4955,12 @@ end:
/* The following call will also rename the .ibd data file if
the table is stored in a single-table tablespace */
- if (!dict_table_rename_in_cache(table, new_name,
- !new_is_tmp)) {
+ err = dict_table_rename_in_cache(
+ table, new_name, !new_is_tmp);
+ if (err != DB_SUCCESS) {
trx->error_state = DB_SUCCESS;
trx_rollback_to_savepoint(trx, NULL);
trx->error_state = DB_SUCCESS;
- err = DB_ERROR;
goto funct_exit;
}
@@ -4527,8 +4996,8 @@ end:
stderr);
}
- ut_a(dict_table_rename_in_cache(table,
- old_name, FALSE));
+ ut_a(DB_SUCCESS == dict_table_rename_in_cache(
+ table, old_name, FALSE));
trx->error_state = DB_SUCCESS;
trx_rollback_to_savepoint(trx, NULL);
trx->error_state = DB_SUCCESS;
@@ -4545,7 +5014,7 @@ end:
funct_exit:
if (table != NULL) {
- dict_table_close(table, dict_locked);
+ dict_table_close(table, dict_locked, FALSE);
}
if (commit) {
@@ -4565,9 +5034,9 @@ funct_exit:
Checks that the index contains entries in an ascending order, unique
constraint is not broken, and calculates the number of index entries
in the read view of the current transaction.
-@return TRUE if ok */
+@return true if ok */
UNIV_INTERN
-ibool
+bool
row_check_index_for_mysql(
/*======================*/
row_prebuilt_t* prebuilt, /*!< in: prebuilt struct
@@ -4582,7 +5051,7 @@ row_check_index_for_mysql(
byte* buf;
ulint ret;
rec_t* rec;
- ibool is_ok = TRUE;
+ bool is_ok = true;
int cmp;
ibool contains_null;
ulint i;
@@ -4595,10 +5064,20 @@ row_check_index_for_mysql(
*n_rows = 0;
- /* Full Text index are implemented by auxiliary tables,
- not the B-tree */
- if (index->type & DICT_FTS) {
- return(TRUE);
+ if (dict_index_is_clust(index)) {
+ /* The clustered index of a table is always available.
+ During online ALTER TABLE that rebuilds the table, the
+ clustered index in the old table will have
+ index->online_log pointing to the new table. All
+ indexes of the old table will remain valid and the new
+ table will be unaccessible to MySQL until the
+ completion of the ALTER TABLE. */
+ } else if (dict_index_is_online_ddl(index)
+ || (index->type & DICT_FTS)) {
+ /* Full Text index are implemented by auxiliary tables,
+ not the B-tree. We also skip secondary indexes that are
+ being created online. */
+ return(true);
}
buf = static_cast<byte*>(mem_alloc(UNIV_PAGE_SIZE));
@@ -4679,7 +5158,7 @@ not_ok:
"InnoDB: record ", stderr);
rec_print_new(stderr, rec, offsets);
putc('\n', stderr);
- is_ok = FALSE;
+ is_ok = false;
} else if (dict_index_is_unique(index)
&& !contains_null
&& matched_fields
@@ -4709,9 +5188,8 @@ not_ok:
mem_heap_empty(heap);
- prev_entry = row_rec_to_index_entry(ROW_COPY_DATA, rec,
- index, offsets,
- &n_ext, heap);
+ prev_entry = row_rec_to_index_entry(
+ rec, index, offsets, &n_ext, heap);
if (UNIV_LIKELY_NULL(tmp_heap)) {
mem_heap_free(tmp_heap);
@@ -4725,9 +5203,9 @@ not_ok:
/*********************************************************************//**
Determines if a table is a magic monitor table.
-@return TRUE if monitor table */
+@return true if monitor table */
UNIV_INTERN
-ibool
+bool
row_is_magic_monitor_table(
/*=======================*/
const char* table_name) /*!< in: name of the table, in the
@@ -4758,7 +5236,7 @@ row_mysql_init(void)
{
mutex_create(
row_drop_list_mutex_key,
- &row_drop_list_mutex, SYNC_NO_ORDER_CHECK);
+ &row_drop_list_mutex, SYNC_NO_ORDER_CHECK);
UT_LIST_INIT(row_mysql_drop_list);
diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc
index ab28b396920..ee603be453a 100644
--- a/storage/innobase/row/row0purge.cc
+++ b/storage/innobase/row/row0purge.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -42,8 +42,10 @@ Created 3/14/1997 Heikki Tuuri
#include "row0upd.h"
#include "row0vers.h"
#include "row0mysql.h"
+#include "row0log.h"
#include "log0log.h"
#include "srv0mon.h"
+#include "srv0start.h"
/*************************************************************************
IMPORTANT NOTE: Any operation that generates redo MUST check that there
@@ -110,119 +112,134 @@ row_purge_reposition_pcur(
return(node->found_clust);
}
+/** Status of row_purge_remove_clust() */
+enum row_purge_status {
+ ROW_PURGE_DONE, /*!< The row has been removed. */
+ ROW_PURGE_FAIL, /*!< The purge was not successful. */
+ ROW_PURGE_SUSPEND/*!< Cannot purge now, due to online rebuild. */
+};
+
/***********************************************************//**
Removes a delete marked clustered index record if possible.
-@return TRUE if success, or if not found, or if modified after the
-delete marking */
-static
-ibool
+@retval ROW_PURGE_DONE if the row was not found, or it was successfully removed
+@retval ROW_PURGE_FAIL if the row was modified after the delete marking
+@retval ROW_PURGE_SUSPEND if the row refers to an off-page column and
+an online ALTER TABLE (table rebuild) is in progress. */
+static __attribute__((nonnull, warn_unused_result))
+enum row_purge_status
row_purge_remove_clust_if_poss_low(
/*===============================*/
- purge_node_t* node, /*!< in: row purge node */
+ purge_node_t* node, /*!< in/out: row purge node */
ulint mode) /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
{
- dict_index_t* index;
- btr_pcur_t* pcur;
- btr_cur_t* btr_cur;
- ibool success;
- ulint err;
- mtr_t mtr;
- rec_t* rec;
- mem_heap_t* heap = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
+ dict_index_t* index;
+ enum row_purge_status status = ROW_PURGE_DONE;
+ mtr_t mtr;
+ rec_t* rec;
+ mem_heap_t* heap = NULL;
+ ulint* offsets;
+ ulint offsets_[REC_OFFS_NORMAL_SIZE];
rec_offs_init(offsets_);
- index = dict_table_get_first_index(node->table);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED));
+#endif /* UNIV_SYNC_DEBUG */
- pcur = &node->pcur;
- btr_cur = btr_pcur_get_btr_cur(pcur);
+ index = dict_table_get_first_index(node->table);
log_free_check();
mtr_start(&mtr);
- success = row_purge_reposition_pcur(mode, node, &mtr);
-
- if (!success) {
- /* The record is already removed */
-
- btr_pcur_commit_specify_mtr(pcur, &mtr);
-
- return(TRUE);
+ if (!row_purge_reposition_pcur(mode, node, &mtr)) {
+ /* The record was already removed. */
+ goto func_exit;
}
- rec = btr_pcur_get_rec(pcur);
+ rec = btr_pcur_get_rec(&node->pcur);
- if (node->roll_ptr != row_get_rec_roll_ptr(
- rec, index, rec_get_offsets(rec, index, offsets_,
- ULINT_UNDEFINED, &heap))) {
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
- /* Someone else has modified the record later: do not remove */
- btr_pcur_commit_specify_mtr(pcur, &mtr);
+ offsets = rec_get_offsets(
+ rec, index, offsets_, ULINT_UNDEFINED, &heap);
- return(TRUE);
+ if (node->roll_ptr != row_get_rec_roll_ptr(rec, index, offsets)) {
+ /* Someone else has modified the record later: do not remove */
+ goto func_exit;
}
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
+ if (dict_index_get_online_status(index) == ONLINE_INDEX_CREATION
+ && rec_offs_any_extern(offsets)) {
+ status = ROW_PURGE_SUSPEND;
+ goto func_exit;
}
if (mode == BTR_MODIFY_LEAF) {
- success = btr_cur_optimistic_delete(btr_cur, &mtr);
+ status = btr_cur_optimistic_delete(
+ btr_pcur_get_btr_cur(&node->pcur), 0, &mtr)
+ ? ROW_PURGE_DONE : ROW_PURGE_FAIL;
} else {
+ dberr_t err;
ut_ad(mode == BTR_MODIFY_TREE);
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
- RB_NONE, &mtr);
+ btr_cur_pessimistic_delete(
+ &err, FALSE, btr_pcur_get_btr_cur(&node->pcur), 0,
+ RB_NONE, &mtr);
- if (err == DB_SUCCESS) {
- success = TRUE;
- } else if (err == DB_OUT_OF_FILE_SPACE) {
- success = FALSE;
- } else {
+ switch (err) {
+ case DB_SUCCESS:
+ break;
+ case DB_OUT_OF_FILE_SPACE:
+ status = ROW_PURGE_FAIL;
+ break;
+ default:
ut_error;
}
}
- btr_pcur_commit_specify_mtr(pcur, &mtr);
+func_exit:
+ if (heap) {
+ mem_heap_free(heap);
+ }
- return(success);
+ btr_pcur_commit_specify_mtr(&node->pcur, &mtr);
+
+ return(status);
}
/***********************************************************//**
Removes a clustered index record if it has not been modified after the delete
-marking. */
-static
-void
+marking.
+@retval true if the row was not found, or it was successfully removed
+@retval false the purge needs to be suspended, either because of
+running out of file space or because the row refers to an off-page
+column and an online ALTER TABLE (table rebuild) is in progress. */
+static __attribute__((nonnull, warn_unused_result))
+bool
row_purge_remove_clust_if_poss(
/*===========================*/
- purge_node_t* node) /*!< in: row purge node */
+ purge_node_t* node) /*!< in/out: row purge node */
{
- ibool success;
- ulint n_tries = 0;
-
- /* fputs("Purge: Removing clustered record\n", stderr); */
-
- success = row_purge_remove_clust_if_poss_low(node, BTR_MODIFY_LEAF);
- if (success) {
-
- return;
+ switch (row_purge_remove_clust_if_poss_low(node, BTR_MODIFY_LEAF)) {
+ case ROW_PURGE_DONE:
+ return(true);
+ case ROW_PURGE_SUSPEND:
+ return(false);
+ case ROW_PURGE_FAIL:
+ break;
}
-retry:
- success = row_purge_remove_clust_if_poss_low(node, BTR_MODIFY_TREE);
- /* The delete operation may fail if we have little
- file space left: TODO: easiest to crash the database
- and restart with more file space */
- if (!success && n_tries < BTR_CUR_RETRY_DELETE_N_TIMES) {
- n_tries++;
-
- os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME);
-
- goto retry;
+ for (ulint n_tries = 0;
+ n_tries < BTR_CUR_RETRY_DELETE_N_TIMES;
+ n_tries++) {
+ switch (row_purge_remove_clust_if_poss_low(
+ node, BTR_MODIFY_TREE)) {
+ case ROW_PURGE_DONE:
+ return(true);
+ case ROW_PURGE_SUSPEND:
+ return(false);
+ case ROW_PURGE_FAIL:
+ os_thread_sleep(BTR_CUR_RETRY_SLEEP_TIME);
+ }
}
- ut_a(success);
+ return(false);
}
/***********************************************************//**
@@ -234,21 +251,21 @@ is newer than the purge view.
NOTE: This function should only be called by the purge thread, only
while holding a latch on the leaf page of the secondary index entry
(or keeping the buffer pool watch on the page). It is possible that
-this function first returns TRUE and then FALSE, if a user transaction
+this function first returns true and then false, if a user transaction
inserts a record that the secondary index entry would refer to.
However, in that case, the user transaction would also re-insert the
secondary index entry after purge has removed it and released the leaf
page latch.
-@return TRUE if the secondary index record can be purged */
+@return true if the secondary index record can be purged */
UNIV_INTERN
-ibool
+bool
row_purge_poss_sec(
/*===============*/
purge_node_t* node, /*!< in/out: row purge node */
dict_index_t* index, /*!< in: secondary index */
const dtuple_t* entry) /*!< in: secondary index entry */
{
- ibool can_delete;
+ bool can_delete;
mtr_t mtr;
ut_ad(!dict_index_is_clust(index));
@@ -268,7 +285,7 @@ row_purge_poss_sec(
Removes a secondary index entry if possible, by modifying the
index tree. Does not try to buffer the delete.
@return TRUE if success or if not found */
-static
+static __attribute__((nonnull, warn_unused_result))
ibool
row_purge_remove_sec_if_poss_tree(
/*==============================*/
@@ -279,13 +296,35 @@ row_purge_remove_sec_if_poss_tree(
btr_pcur_t pcur;
btr_cur_t* btr_cur;
ibool success = TRUE;
- ulint err;
+ dberr_t err;
mtr_t mtr;
enum row_search_result search_result;
log_free_check();
mtr_start(&mtr);
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ /* The index->online_status may change if the
+ index->name starts with TEMP_INDEX_PREFIX (meaning
+ that the index is or was being created online). It is
+ protected by index->lock. */
+ mtr_x_lock(dict_index_get_lock(index), &mtr);
+
+ if (dict_index_is_online_ddl(index)) {
+ /* Online secondary index creation will not
+ copy any delete-marked records. Therefore
+ there is nothing to be purged. We must also
+ skip the purge when a completed index is
+ dropped by rollback_inplace_alter_table(). */
+ goto func_exit_no_pcur;
+ }
+ } else {
+ /* For secondary indexes,
+ index->online_status==ONLINE_INDEX_CREATION unless
+ index->name starts with TEMP_INDEX_PREFIX. */
+ ut_ad(!dict_index_is_online_ddl(index));
+ }
+
search_result = row_search_index_entry(index, entry, BTR_MODIFY_TREE,
&pcur, &mtr);
@@ -327,7 +366,7 @@ row_purge_remove_sec_if_poss_tree(
& rec_get_info_bits(btr_cur_get_rec(btr_cur),
dict_table_is_comp(index->table)));
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
+ btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0,
RB_NONE, &mtr);
switch (UNIV_EXPECT(err, DB_SUCCESS)) {
case DB_SUCCESS:
@@ -342,6 +381,7 @@ row_purge_remove_sec_if_poss_tree(
func_exit:
btr_pcur_close(&pcur);
+func_exit_no_pcur:
mtr_commit(&mtr);
return(success);
@@ -350,9 +390,10 @@ func_exit:
/***************************************************************
Removes a secondary index entry without modifying the index tree,
if possible.
-@return TRUE if success or if not found */
-static
-ibool
+@retval true if success or if not found
+@retval false if row_purge_remove_sec_if_poss_tree() should be invoked */
+static __attribute__((nonnull, warn_unused_result))
+bool
row_purge_remove_sec_if_poss_leaf(
/*==============================*/
purge_node_t* node, /*!< in: row purge node */
@@ -361,12 +402,40 @@ row_purge_remove_sec_if_poss_leaf(
{
mtr_t mtr;
btr_pcur_t pcur;
+ ulint mode;
enum row_search_result search_result;
+ bool success = true;
log_free_check();
mtr_start(&mtr);
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ /* The index->online_status may change if the
+ index->name starts with TEMP_INDEX_PREFIX (meaning
+ that the index is or was being created online). It is
+ protected by index->lock. */
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+
+ if (dict_index_is_online_ddl(index)) {
+ /* Online secondary index creation will not
+ copy any delete-marked records. Therefore
+ there is nothing to be purged. We must also
+ skip the purge when a completed index is
+ dropped by rollback_inplace_alter_table(). */
+ goto func_exit_no_pcur;
+ }
+
+ mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED | BTR_DELETE;
+ } else {
+ /* For secondary indexes,
+ index->online_status==ONLINE_INDEX_CREATION unless
+ index->name starts with TEMP_INDEX_PREFIX. */
+ ut_ad(!dict_index_is_online_ddl(index));
+
+ mode = BTR_MODIFY_LEAF | BTR_DELETE;
+ }
+
/* Set the purge node for the call to row_purge_poss_sec(). */
pcur.btr_cur.purge_node = node;
/* Set the query thread, so that ibuf_insert_low() will be
@@ -374,10 +443,9 @@ row_purge_remove_sec_if_poss_leaf(
pcur.btr_cur.thr = static_cast<que_thr_t*>(que_node_get_parent(node));
search_result = row_search_index_entry(
- index, entry, BTR_MODIFY_LEAF | BTR_DELETE, &pcur, &mtr);
+ index, entry, mode, &pcur, &mtr);
switch (search_result) {
- ibool success;
case ROW_FOUND:
/* Before attempting to purge a record, check
if it is safe to do so. */
@@ -390,11 +458,10 @@ row_purge_remove_sec_if_poss_leaf(
btr_cur_get_rec(btr_cur),
dict_table_is_comp(index->table)));
- if (!btr_cur_optimistic_delete(btr_cur, &mtr)) {
+ if (!btr_cur_optimistic_delete(btr_cur, 0, &mtr)) {
/* The index entry could not be deleted. */
- success = FALSE;
- goto func_exit;
+ success = false;
}
}
/* fall through (the index entry is still needed,
@@ -405,9 +472,8 @@ row_purge_remove_sec_if_poss_leaf(
/* The deletion was buffered. */
case ROW_NOT_FOUND:
/* The index entry does not exist, nothing to do. */
- success = TRUE;
- func_exit:
btr_pcur_close(&pcur);
+ func_exit_no_pcur:
mtr_commit(&mtr);
return(success);
}
@@ -418,19 +484,26 @@ row_purge_remove_sec_if_poss_leaf(
/***********************************************************//**
Removes a secondary index entry if possible. */
-UNIV_INLINE
+UNIV_INLINE __attribute__((nonnull(1,2)))
void
row_purge_remove_sec_if_poss(
/*=========================*/
purge_node_t* node, /*!< in: row purge node */
dict_index_t* index, /*!< in: index */
- dtuple_t* entry) /*!< in: index entry */
+ const dtuple_t* entry) /*!< in: index entry */
{
ibool success;
ulint n_tries = 0;
/* fputs("Purge: Removing secondary record\n", stderr); */
+ if (!entry) {
+ /* The node->row must have lacked some fields of this
+ index. This is possible when the undo log record was
+ written before this index was created. */
+ return;
+ }
+
if (row_purge_remove_sec_if_poss_leaf(node, index, entry)) {
return;
@@ -454,18 +527,18 @@ retry:
}
/***********************************************************//**
-Purges a delete marking of a record. */
-static
-void
+Purges a delete marking of a record.
+@retval true if the row was not found, or it was successfully removed
+@retval false the purge needs to be suspended, either because of
+running out of file space or because the row refers to an off-page
+column and an online ALTER TABLE (table rebuild) is in progress. */
+static __attribute__((nonnull, warn_unused_result))
+bool
row_purge_del_mark(
/*===============*/
- purge_node_t* node) /*!< in: row purge node */
+ purge_node_t* node) /*!< in/out: row purge node */
{
mem_heap_t* heap;
- dtuple_t* entry;
- dict_index_t* index;
-
- ut_ad(node);
heap = mem_heap_create(1024);
@@ -477,13 +550,11 @@ row_purge_del_mark(
break;
}
- index = node->index;
-
if (node->index->type != DICT_FTS) {
- /* Build the index entry */
- entry = row_build_index_entry(node->row, NULL, index, heap);
- ut_a(entry);
- row_purge_remove_sec_if_poss(node, index, entry);
+ dtuple_t* entry = row_build_index_entry_low(
+ node->row, NULL, node->index, heap);
+ row_purge_remove_sec_if_poss(node, node->index, entry);
+ mem_heap_empty(heap);
}
node->index = dict_table_get_next_index(node->index);
@@ -491,14 +562,15 @@ row_purge_del_mark(
mem_heap_free(heap);
- row_purge_remove_clust_if_poss(node);
+ return(row_purge_remove_clust_if_poss(node));
}
/***********************************************************//**
Purges an update of an existing record. Also purges an update of a delete
-marked record if that record contained an externally stored field. */
-static
-void
+marked record if that record contained an externally stored field.
+@return true if purged, false if skipped */
+static __attribute__((nonnull, warn_unused_result))
+bool
row_purge_upd_exist_or_extern_func(
/*===============================*/
#ifdef UNIV_DEBUG
@@ -508,16 +580,24 @@ row_purge_upd_exist_or_extern_func(
trx_undo_rec_t* undo_rec) /*!< in: record to purge */
{
mem_heap_t* heap;
- dtuple_t* entry;
- dict_index_t* index;
- ibool is_insert;
- ulint rseg_id;
- ulint page_no;
- ulint offset;
- ulint i;
- mtr_t mtr;
- ut_ad(node);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED));
+#endif /* UNIV_SYNC_DEBUG */
+
+ if (dict_index_get_online_status(dict_table_get_first_index(
+ node->table))
+ == ONLINE_INDEX_CREATION) {
+ for (ulint i = 0; i < upd_get_n_fields(node->update); i++) {
+
+ const upd_field_t* ufield
+ = upd_get_nth_field(node->update, i);
+
+ if (dfield_is_ext(&ufield->new_val)) {
+ return(false);
+ }
+ }
+ }
if (node->rec_type == TRX_UNDO_UPD_DEL_REC
|| (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
@@ -534,15 +614,13 @@ row_purge_upd_exist_or_extern_func(
break;
}
- index = node->index;
-
if (row_upd_changes_ord_field_binary(node->index, node->update,
thr, NULL, NULL)) {
/* Build the older version of the index entry */
- entry = row_build_index_entry(node->row, NULL,
- index, heap);
- ut_a(entry);
- row_purge_remove_sec_if_poss(node, index, entry);
+ dtuple_t* entry = row_build_index_entry_low(
+ node->row, NULL, node->index, heap);
+ row_purge_remove_sec_if_poss(node, node->index, entry);
+ mem_heap_empty(heap);
}
node->index = dict_table_get_next_index(node->index);
@@ -552,7 +630,7 @@ row_purge_upd_exist_or_extern_func(
skip_secondaries:
/* Free possible externally stored fields */
- for (i = 0; i < upd_get_n_fields(node->update); i++) {
+ for (ulint i = 0; i < upd_get_n_fields(node->update); i++) {
const upd_field_t* ufield
= upd_get_nth_field(node->update, i);
@@ -562,6 +640,12 @@ skip_secondaries:
buf_block_t* block;
ulint internal_offset;
byte* data_field;
+ dict_index_t* index;
+ ibool is_insert;
+ ulint rseg_id;
+ ulint page_no;
+ ulint offset;
+ mtr_t mtr;
/* We use the fact that new_val points to
undo_rec and get thus the offset of
@@ -590,9 +674,17 @@ skip_secondaries:
index tree */
index = dict_table_get_first_index(node->table);
-
mtr_x_lock(dict_index_get_lock(index), &mtr);
-
+#ifdef UNIV_DEBUG
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_CREATION:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ ut_ad(0);
+ case ONLINE_INDEX_COMPLETE:
+ case ONLINE_INDEX_ABORTED:
+ break;
+ }
+#endif /* UNIV_DEBUG */
/* NOTE: we must also acquire an X-latch to the
root page of the tree. We will need it when we
free pages from the tree. If the tree is of height 1,
@@ -622,6 +714,8 @@ skip_secondaries:
mtr_commit(&mtr);
}
}
+
+ return(true);
}
#ifdef UNIV_DEBUG
@@ -634,14 +728,14 @@ skip_secondaries:
/***********************************************************//**
Parses the row reference and other info in a modify undo log record.
-@return TRUE if purge operation required */
+@return true if purge operation required */
static
-ibool
+bool
row_purge_parse_undo_rec(
/*=====================*/
purge_node_t* node, /*!< in: row undo node */
trx_undo_rec_t* undo_rec, /*!< in: record to purge */
- ibool* updated_extern, /*!< out: TRUE if an externally
+ bool* updated_extern, /*!< out: true if an externally
stored field was updated */
que_thr_t* thr) /*!< in: query thread */
{
@@ -665,40 +759,29 @@ row_purge_parse_undo_rec(
if (type == TRX_UNDO_UPD_DEL_REC && !*updated_extern) {
- return(FALSE);
+ return(false);
}
ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
&info_bits);
node->table = NULL;
- if (type == TRX_UNDO_UPD_EXIST_REC
- && node->cmpl_info & UPD_NODE_NO_ORD_CHANGE
- && !(*updated_extern)) {
-
- /* Purge requires no changes to indexes: we may return */
-
- return(FALSE);
- }
-
/* Prevent DROP TABLE etc. from running when we are doing the purge
for this row */
- rw_lock_s_lock_func(&dict_operation_lock, 0, __FILE__, __LINE__);
+ rw_lock_s_lock_inline(&dict_operation_lock, 0, __FILE__, __LINE__);
- node->table = dict_table_open_on_id(table_id, FALSE);
+ node->table = dict_table_open_on_id(table_id, FALSE, FALSE);
if (node->table == NULL) {
-err_exit:
/* The table has been dropped: no need to do purge */
- rw_lock_s_unlock_gen(&dict_operation_lock, 0);
- return(FALSE);
+ goto err_exit;
}
if (node->table->ibd_file_missing) {
/* We skip purge of missing .ibd files */
- dict_table_close(node->table, FALSE);
+ dict_table_close(node->table, FALSE, FALSE);
node->table = NULL;
@@ -708,12 +791,22 @@ err_exit:
clust_index = dict_table_get_first_index(node->table);
if (clust_index == NULL) {
+ /* The table was corrupt in the data dictionary.
+ dict_set_corrupted() works on an index, and
+ we do not have an index to call it with. */
+close_exit:
+ dict_table_close(node->table, FALSE, FALSE);
+err_exit:
+ rw_lock_s_unlock(&dict_operation_lock);
+ return(false);
+ }
- dict_table_close(node->table, FALSE);
-
- /* The table was corrupt in the data dictionary */
+ if (type == TRX_UNDO_UPD_EXIST_REC
+ && (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)
+ && !*updated_extern) {
- goto err_exit;
+ /* Purge requires no changes to indexes: we may return */
+ goto close_exit;
}
ptr = trx_undo_rec_get_row_ref(ptr, clust_index, &(node->ref),
@@ -734,13 +827,14 @@ err_exit:
node->heap);
}
- return(TRUE);
+ return(true);
}
/***********************************************************//**
-Purges the parsed record. */
-static
-void
+Purges the parsed record.
+@return true if purged, false if skipped */
+static __attribute__((nonnull, warn_unused_result))
+bool
row_purge_record_func(
/*==================*/
purge_node_t* node, /*!< in: row purge node */
@@ -748,10 +842,11 @@ row_purge_record_func(
#ifdef UNIV_DEBUG
const que_thr_t*thr, /*!< in: query thread */
#endif /* UNIV_DEBUG */
- ibool updated_extern) /*!< in: TRUE if external columns
+ bool updated_extern) /*!< in: whether external columns
were updated */
{
dict_index_t* clust_index;
+ bool purged = true;
clust_index = dict_table_get_first_index(node->table);
@@ -759,7 +854,10 @@ row_purge_record_func(
switch (node->rec_type) {
case TRX_UNDO_DEL_MARK_REC:
- row_purge_del_mark(node);
+ purged = row_purge_del_mark(node);
+ if (!purged) {
+ break;
+ }
MONITOR_INC(MONITOR_N_DEL_ROW_PURGE);
break;
default:
@@ -768,20 +866,25 @@ row_purge_record_func(
}
/* fall through */
case TRX_UNDO_UPD_EXIST_REC:
- row_purge_upd_exist_or_extern(thr, node, undo_rec);
+ purged = row_purge_upd_exist_or_extern(thr, node, undo_rec);
+ if (!purged) {
+ break;
+ }
MONITOR_INC(MONITOR_N_UPD_EXIST_EXTERN);
break;
}
if (node->found_clust) {
btr_pcur_close(&node->pcur);
+ node->found_clust = FALSE;
}
if (node->table != NULL) {
- dict_table_close(node->table, FALSE);
+ dict_table_close(node->table, FALSE, FALSE);
node->table = NULL;
}
+ return(purged);
}
#ifdef UNIV_DEBUG
@@ -804,18 +907,24 @@ row_purge(
trx_undo_rec_t* undo_rec, /*!< in: record to purge */
que_thr_t* thr) /*!< in: query thread */
{
- ut_ad(node);
- ut_ad(thr);
-
if (undo_rec != &trx_purge_dummy_rec) {
- ibool updated_extern;
+ bool updated_extern;
- if (row_purge_parse_undo_rec(
- node, undo_rec, &updated_extern, thr)) {
+ while (row_purge_parse_undo_rec(
+ node, undo_rec, &updated_extern, thr)) {
- row_purge_record(node, undo_rec, thr, updated_extern);
+ bool purged = row_purge_record(
+ node, undo_rec, thr, updated_extern);
+
+ rw_lock_s_unlock(&dict_operation_lock);
+
+ if (purged
+ || srv_shutdown_state != SRV_SHUTDOWN_NONE) {
+ return;
+ }
- rw_lock_s_unlock_gen(&dict_operation_lock, 0);
+ /* Retry the purge in a second. */
+ os_thread_sleep(1000000);
}
}
}
diff --git a/storage/innobase/row/row0quiesce.cc b/storage/innobase/row/row0quiesce.cc
new file mode 100644
index 00000000000..72e0bf43d77
--- /dev/null
+++ b/storage/innobase/row/row0quiesce.cc
@@ -0,0 +1,702 @@
+/*****************************************************************************
+
+Copyright (c) 2012, Oracle and/or its affiliates. All Rights Reserved.
+
+This program is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free Software
+Foundation; version 2 of the License.
+
+This program is distributed in the hope that it will be useful, but WITHOUT
+ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License along with
+this program; if not, write to the Free Software Foundation, Inc.,
+51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+
+*****************************************************************************/
+
+/**************************************************//**
+@file row/row0quiesce.cc
+Quiesce a tablespace.
+
+Created 2012-02-08 by Sunny Bains.
+*******************************************************/
+
+#include "row0quiesce.h"
+#include "row0mysql.h"
+
+#ifdef UNIV_NONINL
+#include "row0quiesce.ic"
+#endif
+
+#include "ibuf0ibuf.h"
+#include "srv0start.h"
+#include "trx0purge.h"
+
+/*********************************************************************//**
+Write the meta data (index user fields) config file.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_quiesce_write_index_fields(
+/*===========================*/
+ const dict_index_t* index, /*!< in: write the meta data for
+ this index */
+ FILE* file, /*!< in: file to write to */
+ THD* thd) /*!< in/out: session */
+{
+ byte row[sizeof(ib_uint32_t) * 2];
+
+ for (ulint i = 0; i < index->n_fields; ++i) {
+ byte* ptr = row;
+ const dict_field_t* field = &index->fields[i];
+
+ mach_write_to_4(ptr, field->prefix_len);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, field->fixed_len);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_9",
+ close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing index fields.");
+
+ return(DB_IO_ERROR);
+ }
+
+ /* Include the NUL byte in the length. */
+ ib_uint32_t len = strlen(field->name) + 1;
+ ut_a(len > 1);
+
+ mach_write_to_4(row, len);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_10",
+ close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(len), file) != sizeof(len)
+ || fwrite(field->name, 1, len, file) != len) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing index column.");
+
+ return(DB_IO_ERROR);
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*********************************************************************//**
+Write the meta data config file index information.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_quiesce_write_indexes(
+/*======================*/
+ const dict_table_t* table, /*!< in: write the meta data for
+ this table */
+ FILE* file, /*!< in: file to write to */
+ THD* thd) /*!< in/out: session */
+{
+ {
+ byte row[sizeof(ib_uint32_t)];
+
+ /* Write the number of indexes in the table. */
+ mach_write_to_4(row, UT_LIST_GET_LEN(table->indexes));
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_11",
+ close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing index count.");
+
+ return(DB_IO_ERROR);
+ }
+ }
+
+ dberr_t err = DB_SUCCESS;
+
+ /* Write the index meta data. */
+ for (const dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
+ index != 0 && err == DB_SUCCESS;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ byte* ptr;
+ byte row[sizeof(index_id_t)
+ + sizeof(ib_uint32_t) * 8];
+
+ ptr = row;
+
+ ut_ad(sizeof(index_id_t) == 8);
+ mach_write_to_8(ptr, index->id);
+ ptr += sizeof(index_id_t);
+
+ mach_write_to_4(ptr, index->space);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->page);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->type);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->trx_id_offset);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_user_defined_cols);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_uniq);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_nullable);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, index->n_fields);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_12",
+ close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing index meta-data.");
+
+ return(DB_IO_ERROR);
+ }
+
+ /* Write the length of the index name.
+ NUL byte is included in the length. */
+ ib_uint32_t len = strlen(index->name) + 1;
+ ut_a(len > 1);
+
+ mach_write_to_4(row, len);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_1",
+ close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(len), file) != sizeof(len)
+ || fwrite(index->name, 1, len, file) != len) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing index name.");
+
+ return(DB_IO_ERROR);
+ }
+
+ err = row_quiesce_write_index_fields(index, file, thd);
+ }
+
+ return(err);
+}
+
+/*********************************************************************//**
+Write the meta data (table columns) config file. Serialise the contents of
+dict_col_t structure, along with the column name. All fields are serialized
+as ib_uint32_t.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_quiesce_write_table(
+/*====================*/
+ const dict_table_t* table, /*!< in: write the meta data for
+ this table */
+ FILE* file, /*!< in: file to write to */
+ THD* thd) /*!< in/out: session */
+{
+ dict_col_t* col;
+ byte row[sizeof(ib_uint32_t) * 7];
+
+ col = table->cols;
+
+ for (ulint i = 0; i < table->n_cols; ++i, ++col) {
+ byte* ptr = row;
+
+ mach_write_to_4(ptr, col->prtype);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->mtype);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->len);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->mbminmaxlen);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->ind);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->ord_part);
+ ptr += sizeof(ib_uint32_t);
+
+ mach_write_to_4(ptr, col->max_prefix);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_2",
+ close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing table column data.");
+
+ return(DB_IO_ERROR);
+ }
+
+ /* Write out the column name as [len, byte array]. The len
+ includes the NUL byte. */
+ ib_uint32_t len;
+ const char* col_name;
+
+ col_name = dict_table_get_col_name(table, dict_col_get_no(col));
+
+ /* Include the NUL byte in the length. */
+ len = strlen(col_name) + 1;
+ ut_a(len > 1);
+
+ mach_write_to_4(row, len);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_3",
+ close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(len), file) != sizeof(len)
+ || fwrite(col_name, 1, len, file) != len) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing column name.");
+
+ return(DB_IO_ERROR);
+ }
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*********************************************************************//**
+Write the meta data config file header.
+@return DB_SUCCESS or error code. */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_quiesce_write_header(
+/*=====================*/
+ const dict_table_t* table, /*!< in: write the meta data for
+ this table */
+ FILE* file, /*!< in: file to write to */
+ THD* thd) /*!< in/out: session */
+{
+ byte value[sizeof(ib_uint32_t)];
+
+ /* Write the meta-data version number. */
+ mach_write_to_4(value, IB_EXPORT_CFG_VERSION_V1);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_4", close(fileno(file)););
+
+ if (fwrite(&value, 1, sizeof(value), file) != sizeof(value)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing meta-data version number.");
+
+ return(DB_IO_ERROR);
+ }
+
+ /* Write the server hostname. */
+ ib_uint32_t len;
+ const char* hostname = server_get_hostname();
+
+ /* Play it safe and check for NULL. */
+ if (hostname == 0) {
+ static const char NullHostname[] = "Hostname unknown";
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Unable to determine server hostname.");
+
+ hostname = NullHostname;
+ }
+
+ /* The server hostname includes the NUL byte. */
+ len = strlen(hostname) + 1;
+ mach_write_to_4(value, len);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_5", close(fileno(file)););
+
+ if (fwrite(&value, 1, sizeof(value), file) != sizeof(value)
+ || fwrite(hostname, 1, len, file) != len) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing hostname.");
+
+ return(DB_IO_ERROR);
+ }
+
+ /* The table name includes the NUL byte. */
+ ut_a(table->name != 0);
+ len = strlen(table->name) + 1;
+
+ /* Write the table name. */
+ mach_write_to_4(value, len);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_6", close(fileno(file)););
+
+ if (fwrite(&value, 1, sizeof(value), file) != sizeof(value)
+ || fwrite(table->name, 1, len, file) != len) {
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing table name.");
+
+ return(DB_IO_ERROR);
+ }
+
+ byte row[sizeof(ib_uint32_t) * 3];
+
+ /* Write the next autoinc value. */
+ mach_write_to_8(row, table->autoinc);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_7", close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(ib_uint64_t), file) != sizeof(ib_uint64_t)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing table autoinc value.");
+
+ return(DB_IO_ERROR);
+ }
+
+ byte* ptr = row;
+
+ /* Write the system page size. */
+ mach_write_to_4(ptr, UNIV_PAGE_SIZE);
+ ptr += sizeof(ib_uint32_t);
+
+ /* Write the table->flags. */
+ mach_write_to_4(ptr, table->flags);
+ ptr += sizeof(ib_uint32_t);
+
+ /* Write the number of columns in the table. */
+ mach_write_to_4(ptr, table->n_cols);
+
+ DBUG_EXECUTE_IF("ib_export_io_write_failure_8", close(fileno(file)););
+
+ if (fwrite(row, 1, sizeof(row), file) != sizeof(row)) {
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno),
+ "while writing table meta-data.");
+
+ return(DB_IO_ERROR);
+ }
+
+ return(DB_SUCCESS);
+}
+
+/*********************************************************************//**
+Write the table meta data after quiesce.
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+row_quiesce_write_cfg(
+/*==================*/
+ dict_table_t* table, /*!< in: write the meta data for
+ this table */
+ THD* thd) /*!< in/out: session */
+{
+ dberr_t err;
+ char name[OS_FILE_MAX_PATH];
+
+ srv_get_meta_data_filename(table, name, sizeof(name));
+
+ ib_logf(IB_LOG_LEVEL_INFO, "Writing table metadata to '%s'", name);
+
+ FILE* file = fopen(name, "w+b");
+
+ if (file == NULL) {
+ ib_errf(thd, IB_LOG_LEVEL_WARN, ER_CANT_CREATE_FILE,
+ name, errno, strerror(errno));
+
+ err = DB_IO_ERROR;
+ } else {
+ err = row_quiesce_write_header(table, file, thd);
+
+ if (err == DB_SUCCESS) {
+ err = row_quiesce_write_table(table, file, thd);
+ }
+
+ if (err == DB_SUCCESS) {
+ err = row_quiesce_write_indexes(table, file, thd);
+ }
+
+ if (fflush(file) != 0) {
+
+ char msg[BUFSIZ];
+
+ ut_snprintf(msg, sizeof(msg), "%s flush() failed",
+ name);
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno), msg);
+ }
+
+ if (fclose(file) != 0) {
+ char msg[BUFSIZ];
+
+ ut_snprintf(msg, sizeof(msg), "%s flose() failed",
+ name);
+
+ ib_senderrf(
+ thd, IB_LOG_LEVEL_WARN, ER_IO_WRITE_ERROR,
+ errno, strerror(errno), msg);
+ }
+ }
+
+ return(err);
+}
+
+/*********************************************************************//**
+Check whether a table has an FTS index defined on it.
+@return true if an FTS index exists on the table */
+static
+bool
+row_quiesce_table_has_fts_index(
+/*============================*/
+ const dict_table_t* table) /*!< in: quiesce this table */
+{
+ bool exists = false;
+
+ dict_mutex_enter_for_mysql();
+
+ for (const dict_index_t* index = UT_LIST_GET_FIRST(table->indexes);
+ index != 0;
+ index = UT_LIST_GET_NEXT(indexes, index)) {
+
+ if (index->type & DICT_FTS) {
+ exists = true;
+ break;
+ }
+ }
+
+ dict_mutex_exit_for_mysql();
+
+ return(exists);
+}
+
+/*********************************************************************//**
+Quiesce the tablespace that the table resides in. */
+UNIV_INTERN
+void
+row_quiesce_table_start(
+/*====================*/
+ dict_table_t* table, /*!< in: quiesce this table */
+ trx_t* trx) /*!< in/out: transaction/session */
+{
+ ut_a(trx->mysql_thd != 0);
+ ut_a(srv_n_purge_threads > 0);
+ ut_ad(!srv_read_only_mode);
+
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ ut_a(trx->mysql_thd != 0);
+
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Sync to disk of '%s' started.", table_name);
+
+ if (trx_purge_state() != PURGE_STATE_DISABLED) {
+ trx_purge_stop();
+ }
+
+ ut_a(table->id > 0);
+
+ ulint count = 0;
+
+ while (ibuf_contract_in_background(table->id, TRUE) != 0) {
+ if (!(++count % 20)) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Merging change buffer entries for '%s'",
+ table_name);
+ }
+ }
+
+ if (!trx_is_interrupted(trx)) {
+ buf_LRU_flush_or_remove_pages(
+ table->space, BUF_REMOVE_FLUSH_WRITE, trx);
+
+ if (trx_is_interrupted(trx)) {
+
+ ib_logf(IB_LOG_LEVEL_WARN, "Quiesce aborted!");
+
+ } else if (row_quiesce_write_cfg(table, trx->mysql_thd)
+ != DB_SUCCESS) {
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "There was an error writing to the "
+ "meta data file");
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Table '%s' flushed to disk", table_name);
+ }
+ } else {
+ ib_logf(IB_LOG_LEVEL_WARN, "Quiesce aborted!");
+ }
+
+ dberr_t err = row_quiesce_set_state(table, QUIESCE_COMPLETE, trx);
+ ut_a(err == DB_SUCCESS);
+}
+
+/*********************************************************************//**
+Cleanup after table quiesce. */
+UNIV_INTERN
+void
+row_quiesce_table_complete(
+/*=======================*/
+ dict_table_t* table, /*!< in: quiesce this table */
+ trx_t* trx) /*!< in/out: transaction/session */
+{
+ ulint count = 0;
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ ut_a(trx->mysql_thd != 0);
+
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
+
+ /* We need to wait for the operation to complete if the
+ transaction has been killed. */
+
+ while (table->quiesce != QUIESCE_COMPLETE) {
+
+ /* Print a warning after every minute. */
+ if (!(count % 60)) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Waiting for quiesce of '%s' to complete",
+ table_name);
+ }
+
+ /* Sleep for a second. */
+ os_thread_sleep(1000000);
+
+ ++count;
+ }
+
+ /* Remove the .cfg file now that the user has resumed
+ normal operations. Otherwise it will cause problems when
+ the user tries to drop the database (remove directory). */
+ char cfg_name[OS_FILE_MAX_PATH];
+
+ srv_get_meta_data_filename(table, cfg_name, sizeof(cfg_name));
+
+ os_file_delete_if_exists(cfg_name);
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Deleting the meta-data file '%s'", cfg_name);
+
+ if (trx_purge_state() != PURGE_STATE_DISABLED) {
+ trx_purge_run();
+ }
+
+ dberr_t err = row_quiesce_set_state(table, QUIESCE_NONE, trx);
+ ut_a(err == DB_SUCCESS);
+}
+
+/*********************************************************************//**
+Set a table's quiesce state.
+@return DB_SUCCESS or error code. */
+UNIV_INTERN
+dberr_t
+row_quiesce_set_state(
+/*==================*/
+ dict_table_t* table, /*!< in: quiesce this table */
+ ib_quiesce_t state, /*!< in: quiesce state to set */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ ut_a(srv_n_purge_threads > 0);
+
+ if (srv_read_only_mode) {
+
+ ib_senderrf(trx->mysql_thd,
+ IB_LOG_LEVEL_WARN, ER_READ_ONLY_MODE);
+
+ return(DB_UNSUPPORTED);
+
+ } else if (table->space == TRX_SYS_SPACE) {
+
+ char table_name[MAX_FULL_NAME_LEN + 1];
+
+ innobase_format_name(
+ table_name, sizeof(table_name), table->name, FALSE);
+
+ ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_WARN,
+ ER_TABLE_IN_SYSTEM_TABLESPACE, table_name);
+
+ return(DB_UNSUPPORTED);
+ } else if (row_quiesce_table_has_fts_index(table)) {
+
+ ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_WARN,
+ ER_NOT_SUPPORTED_YET,
+ "FLUSH TABLES on tables that have an FTS index. "
+ "FTS auxiliary tables will not be flushed.");
+
+ } else if (DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)) {
+ /* If this flag is set then the table may not have any active
+ FTS indexes but it will still have the auxiliary tables. */
+
+ ib_senderrf(trx->mysql_thd, IB_LOG_LEVEL_WARN,
+ ER_NOT_SUPPORTED_YET,
+ "FLUSH TABLES on a table that had an FTS index, "
+ "created on a hidden column, the "
+ "auxiliary tables haven't been dropped as yet. "
+ "FTS auxiliary tables will not be flushed.");
+ }
+
+ row_mysql_lock_data_dictionary(trx);
+
+ dict_table_x_lock_indexes(table);
+
+ switch (state) {
+ case QUIESCE_START:
+ ut_a(table->quiesce == QUIESCE_NONE);
+ break;
+
+ case QUIESCE_COMPLETE:
+ ut_a(table->quiesce == QUIESCE_START);
+ break;
+
+ case QUIESCE_NONE:
+ ut_a(table->quiesce == QUIESCE_COMPLETE);
+ break;
+ }
+
+ table->quiesce = state;
+
+ dict_table_x_unlock_indexes(table);
+
+ row_mysql_unlock_data_dictionary(trx);
+
+ return(DB_SUCCESS);
+}
+
diff --git a/storage/innobase/row/row0row.cc b/storage/innobase/row/row0row.cc
index 8c703b1e06c..be786f954fb 100644
--- a/storage/innobase/row/row0row.cc
+++ b/storage/innobase/row/row0row.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -50,28 +50,26 @@ Created 4/20/1996 Heikki Tuuri
/*****************************************************************//**
When an insert or purge to a table is performed, this function builds
the entry to be inserted into or purged from an index on the table.
-@return index entry which should be inserted or purged, or NULL if the
-externally stored columns in the clustered index record are
-unavailable and ext != NULL */
+@return index entry which should be inserted or purged
+@retval NULL if the externally stored columns in the clustered index record
+are unavailable and ext != NULL, or row is missing some needed columns. */
UNIV_INTERN
dtuple_t*
-row_build_index_entry(
-/*==================*/
- const dtuple_t* row, /*!< in: row which should be
- inserted or purged */
- row_ext_t* ext, /*!< in: externally stored column prefixes,
- or NULL */
- dict_index_t* index, /*!< in: index on the table */
- mem_heap_t* heap) /*!< in: memory heap from which the memory for
- the index entry is allocated */
+row_build_index_entry_low(
+/*======================*/
+ const dtuple_t* row, /*!< in: row which should be
+ inserted or purged */
+ const row_ext_t* ext, /*!< in: externally stored column
+ prefixes, or NULL */
+ dict_index_t* index, /*!< in: index on the table */
+ mem_heap_t* heap) /*!< in: memory heap from which
+ the memory for the index entry
+ is allocated */
{
dtuple_t* entry;
ulint entry_len;
ulint i;
- ut_ad(row && index && heap);
- ut_ad(dtuple_check_typed(row));
-
entry_len = dict_index_get_n_fields(index);
entry = dtuple_create(heap, entry_len);
@@ -96,8 +94,19 @@ row_build_index_entry(
= dtuple_get_nth_field(entry, i);
const dfield_t* dfield2
= dtuple_get_nth_field(row, col_no);
- ulint len
- = dfield_get_len(dfield2);
+ ulint len;
+
+#if DATA_MISSING != 0
+# error "DATA_MISSING != 0"
+#endif
+ if (UNIV_UNLIKELY(dfield_get_type(dfield2)->mtype
+ == DATA_MISSING)) {
+ /* The field has not been initialized in the row.
+ This should be from trx_undo_rec_get_partial_row(). */
+ return(NULL);
+ }
+
+ len = dfield_get_len(dfield2);
dfield_copy(dfield, dfield2);
@@ -171,8 +180,6 @@ row_build_index_entry(
}
}
- ut_ad(dtuple_check_typed(entry));
-
return(entry);
}
@@ -211,21 +218,23 @@ row_build(
of an index, or NULL if
index->table should be
consulted instead */
+ const dtuple_t* add_cols,
+ /*!< in: default values of
+ added columns, or NULL */
+ const ulint* col_map,/*!< in: mapping of old column
+ numbers to new ones, or NULL */
row_ext_t** ext, /*!< out, own: cache of
externally stored column
prefixes, or NULL */
mem_heap_t* heap) /*!< in: memory heap from which
the memory needed is allocated */
{
+ const byte* copy;
dtuple_t* row;
- const dict_table_t* table;
- ulint n_fields;
ulint n_ext_cols;
ulint* ext_cols = NULL; /* remove warning */
ulint len;
- ulint row_len;
byte* buf;
- ulint i;
ulint j;
mem_heap_t* tmp_heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
@@ -234,6 +243,7 @@ row_build(
ut_ad(index && rec && heap);
ut_ad(dict_index_is_clust(index));
ut_ad(!mutex_own(&trx_sys->mutex));
+ ut_ad(!col_map || col_table);
if (!offsets) {
offsets = rec_get_offsets(rec, index, offsets_,
@@ -260,55 +270,84 @@ row_build(
buf = static_cast<byte*>(
mem_heap_alloc(heap, rec_offs_size(offsets)));
- rec = rec_copy(buf, rec, offsets);
- /* Avoid a debug assertion in rec_offs_validate(). */
- rec_offs_make_valid(rec, index, (ulint*) offsets);
+ copy = rec_copy(buf, rec, offsets);
+ } else {
+ copy = rec;
}
- table = index->table;
- row_len = dict_table_get_n_cols(table);
-
- row = dtuple_create(heap, row_len);
-
- dict_table_copy_types(row, table);
-
- dtuple_set_info_bits(row, rec_get_info_bits(
- rec, dict_table_is_comp(table)));
-
- n_fields = rec_offs_n_fields(offsets);
n_ext_cols = rec_offs_n_extern(offsets);
if (n_ext_cols) {
ext_cols = static_cast<ulint*>(
mem_heap_alloc(heap, n_ext_cols * sizeof *ext_cols));
}
- for (i = j = 0; i < n_fields; i++) {
- dict_field_t* ind_field
+ /* Avoid a debug assertion in rec_offs_validate(). */
+ rec_offs_make_valid(copy, index, const_cast<ulint*>(offsets));
+
+ if (!col_table) {
+ ut_ad(!col_map);
+ ut_ad(!add_cols);
+ col_table = index->table;
+ }
+
+ if (add_cols) {
+ ut_ad(col_map);
+ row = dtuple_copy(add_cols, heap);
+ /* dict_table_copy_types() would set the fields to NULL */
+ for (ulint i = 0; i < dict_table_get_n_cols(col_table); i++) {
+ dict_col_copy_type(
+ dict_table_get_nth_col(col_table, i),
+ dfield_get_type(dtuple_get_nth_field(row, i)));
+ }
+ } else {
+ row = dtuple_create(heap, dict_table_get_n_cols(col_table));
+ dict_table_copy_types(row, col_table);
+ }
+
+ dtuple_set_info_bits(row, rec_get_info_bits(
+ copy, rec_offs_comp(offsets)));
+
+ j = 0;
+
+ for (ulint i = 0; i < rec_offs_n_fields(offsets); i++) {
+ const dict_field_t* ind_field
= dict_index_get_nth_field(index, i);
+
+ if (ind_field->prefix_len) {
+ /* Column prefixes can only occur in key
+ fields, which cannot be stored externally. For
+ a column prefix, there should also be the full
+ field in the clustered index tuple. The row
+ tuple comprises full fields, not prefixes. */
+ ut_ad(!rec_offs_nth_extern(offsets, i));
+ continue;
+ }
+
const dict_col_t* col
= dict_field_get_col(ind_field);
ulint col_no
= dict_col_get_no(col);
- dfield_t* dfield
- = dtuple_get_nth_field(row, col_no);
-
- if (ind_field->prefix_len == 0) {
- const byte* field = rec_get_nth_field(
- rec, offsets, i, &len);
+ if (col_map) {
+ col_no = col_map[col_no];
- dfield_set_data(dfield, field, len);
+ if (col_no == ULINT_UNDEFINED) {
+ /* dropped column */
+ continue;
+ }
}
+ dfield_t* dfield = dtuple_get_nth_field(row, col_no);
+
+ const byte* field = rec_get_nth_field(
+ copy, offsets, i, &len);
+
+ dfield_set_data(dfield, field, len);
+
if (rec_offs_nth_extern(offsets, i)) {
dfield_set_ext(dfield);
- if (UNIV_LIKELY_NULL(col_table)) {
- ut_a(col_no
- < dict_table_get_n_cols(col_table));
- col = dict_table_get_nth_col(
- col_table, col_no);
- }
+ col = dict_table_get_nth_col(col_table, col_no);
if (col->ord_part) {
/* We will have to fetch prefixes of
@@ -319,14 +358,20 @@ row_build(
}
}
+ rec_offs_make_valid(rec, index, const_cast<ulint*>(offsets));
+
ut_ad(dtuple_check_typed(row));
if (!ext) {
/* REDUNDANT and COMPACT formats store a local
768-byte prefix of each externally stored
- column. No cache is needed. */
- ut_ad(dict_table_get_format(index->table)
- < UNIV_FORMAT_B);
+ column. No cache is needed.
+
+ During online table rebuild,
+ row_log_table_apply_delete_low()
+ may use a cache that was set up by
+ row_log_table_delete(). */
+
} else if (j) {
*ext = row_ext_create(j, ext_cols, index->table->flags, row,
heap);
@@ -402,28 +447,14 @@ row_rec_to_index_entry_low(
/*******************************************************************//**
Converts an index record to a typed data tuple. NOTE that externally
stored (often big) fields are NOT copied to heap.
-@return own: index entry built; see the NOTE below! */
+@return own: index entry built */
UNIV_INTERN
dtuple_t*
row_rec_to_index_entry(
/*===================*/
- ulint type, /*!< in: ROW_COPY_DATA, or
- ROW_COPY_POINTERS: the former
- copies also the data fields to
- heap as the latter only places
- pointers to data fields on the
- index page */
- const rec_t* rec, /*!< in: record in the index;
- NOTE: in the case
- ROW_COPY_POINTERS the data
- fields in the row will point
- directly into this record,
- therefore, the buffer page of
- this record must be at least
- s-latched and the latch held
- as long as the dtuple is used! */
+ const rec_t* rec, /*!< in: record in the index */
const dict_index_t* index, /*!< in: index */
- ulint* offsets,/*!< in/out: rec_get_offsets(rec) */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec) */
ulint* n_ext, /*!< out: number of externally
stored columns */
mem_heap_t* heap) /*!< in: memory heap from which
@@ -431,25 +462,21 @@ row_rec_to_index_entry(
{
dtuple_t* entry;
byte* buf;
+ const rec_t* copy_rec;
ut_ad(rec && heap && index);
ut_ad(rec_offs_validate(rec, index, offsets));
- if (type == ROW_COPY_DATA) {
- /* Take a copy of rec to heap */
- buf = static_cast<byte*>(
- mem_heap_alloc(heap, rec_offs_size(offsets)));
+ /* Take a copy of rec to heap */
+ buf = static_cast<byte*>(
+ mem_heap_alloc(heap, rec_offs_size(offsets)));
- rec = rec_copy(buf, rec, offsets);
- /* Avoid a debug assertion in rec_offs_validate(). */
- rec_offs_make_valid(rec, index, offsets);
-#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
- } else {
- ut_a(!rec_offs_any_null_extern(rec, offsets));
-#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
- }
+ copy_rec = rec_copy(buf, rec, offsets);
- entry = row_rec_to_index_entry_low(rec, index, offsets, n_ext, heap);
+ rec_offs_make_valid(copy_rec, index, const_cast<ulint*>(offsets));
+ entry = row_rec_to_index_entry_low(
+ copy_rec, index, offsets, n_ext, heap);
+ rec_offs_make_valid(rec, index, const_cast<ulint*>(offsets));
dtuple_set_info_bits(entry,
rec_get_info_bits(rec, rec_offs_comp(offsets)));
diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc
index 96884e89511..bfda669d97a 100644
--- a/storage/innobase/row/row0sel.cc
+++ b/storage/innobase/row/row0sel.cc
@@ -57,7 +57,6 @@ Created 12/19/1997 Heikki Tuuri
#include "read0read.h"
#include "buf0lru.h"
#include "ha_prototypes.h"
-#include "srv0mon.h"
#include "my_compare.h" /* enum icp_result */
@@ -673,8 +672,8 @@ sel_enqueue_prefetched_row(
/*********************************************************************//**
Builds a previous version of a clustered index record for a consistent read
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_sel_build_prev_vers(
/*====================*/
read_view_t* read_view, /*!< in: read view */
@@ -691,7 +690,7 @@ row_sel_build_prev_vers(
afterwards */
mtr_t* mtr) /*!< in: mtr */
{
- ulint err;
+ dberr_t err;
if (*old_vers_heap) {
mem_heap_empty(*old_vers_heap);
@@ -707,10 +706,9 @@ row_sel_build_prev_vers(
/*********************************************************************//**
Builds the last committed version of a clustered index record for a
-semi-consistent read.
-@return DB_SUCCESS or error code */
-static
-ulint
+semi-consistent read. */
+static __attribute__((nonnull))
+void
row_sel_build_committed_vers_for_mysql(
/*===================================*/
dict_index_t* clust_index, /*!< in: clustered index */
@@ -726,18 +724,16 @@ row_sel_build_committed_vers_for_mysql(
afterwards */
mtr_t* mtr) /*!< in: mtr */
{
- ulint err;
-
if (prebuilt->old_vers_heap) {
mem_heap_empty(prebuilt->old_vers_heap);
} else {
- prebuilt->old_vers_heap = mem_heap_create(200);
+ prebuilt->old_vers_heap = mem_heap_create(
+ rec_offs_size(*offsets));
}
- err = row_vers_build_for_semi_consistent_read(
+ row_vers_build_for_semi_consistent_read(
rec, mtr, clust_index, offsets, offset_heap,
prebuilt->old_vers_heap, old_vers);
- return(err);
}
/*********************************************************************//**
@@ -809,8 +805,8 @@ row_sel_test_other_conds(
Retrieves the clustered index record corresponding to a record in a
non-clustered index. Does the necessary locking.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_sel_get_clust_rec(
/*==================*/
sel_node_t* node, /*!< in: select_node */
@@ -828,7 +824,7 @@ row_sel_get_clust_rec(
dict_index_t* index;
rec_t* clust_rec;
rec_t* old_vers;
- ulint err;
+ dberr_t err;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
@@ -982,7 +978,7 @@ err_exit:
Sets a lock on a record.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */
UNIV_INLINE
-enum db_err
+dberr_t
sel_set_rec_lock(
/*=============*/
const buf_block_t* block, /*!< in: buffer block of rec */
@@ -995,7 +991,7 @@ sel_set_rec_lock(
que_thr_t* thr) /*!< in: query thread */
{
trx_t* trx;
- enum db_err err;
+ dberr_t err;
trx = thr_get_trx(thr);
@@ -1084,7 +1080,7 @@ row_sel_open_pcur(
(FALSE: no init) */
btr_pcur_open_at_index_side(plan->asc, index, BTR_SEARCH_LEAF,
- &(plan->pcur), FALSE, mtr);
+ &(plan->pcur), false, 0, mtr);
}
ut_ad(plan->n_rows_prefetched == 0);
@@ -1313,8 +1309,8 @@ func_exit:
/*********************************************************************//**
Performs a select step.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_sel(
/*====*/
sel_node_t* node, /*!< in: select node */
@@ -1347,7 +1343,7 @@ row_sel(
&mtr must be committed before we move
to the next non-clustered record */
ulint found_flag;
- ulint err;
+ dberr_t err;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
@@ -2083,11 +2079,9 @@ row_sel_step(
table_node = static_cast<sym_node_t*>(
que_node_get_next(table_node))) {
- enum db_err err;
-
- err = static_cast<enum db_err>(lock_table(
+ dberr_t err = lock_table(
0, table_node->table, i_lock_mode,
- thr));
+ thr);
if (err != DB_SUCCESS) {
trx_t* trx;
@@ -2120,7 +2114,7 @@ row_sel_step(
}
}
- enum db_err err = static_cast<enum db_err>(row_sel(node, thr));
+ dberr_t err = row_sel(node, thr);
/* NOTE! if queries are parallelized, the following assignment may
have problems; the assignment should be made only if thr is the
@@ -2305,42 +2299,6 @@ row_printf_step(
return(thr);
}
-/********************************************************************
-Creates a key in Innobase dtuple format.*/
-
-void
-row_create_key(
-/*===========*/
- dtuple_t* tuple, /* in: tuple where to build;
- NOTE: we assume that the type info
- in the tuple is already according
- to index! */
- dict_index_t* index, /* in: index of the key value */
- doc_id_t* doc_id) /* in: doc id to search. */
-{
- dtype_t type;
- dict_field_t* field;
- doc_id_t temp_doc_id;
- dfield_t* dfield = dtuple_get_nth_field(tuple, 0);
-
- ut_a(dict_index_get_n_unique(index) == 1);
-
- /* Permit us to access any field in the tuple (ULINT_MAX): */
- dtuple_set_n_fields(tuple, ULINT_MAX);
-
- field = dict_index_get_nth_field(index, 0);
- dict_col_copy_type(field->col, &type);
- ut_a(dtype_get_mtype(&type) == DATA_INT);
-
- /* Convert to storage byte order */
- mach_write_to_8((byte*) &temp_doc_id, *doc_id);
- *doc_id = temp_doc_id;
-
- ut_a(sizeof(*doc_id) == field->fixed_len);
- dfield_set_data(dfield, doc_id, field->fixed_len);
-
- dtuple_set_n_fields(tuple, 1);
-}
/****************************************************************//**
Converts a key value stored in MySQL format to an Innobase dtuple. The last
field of the key value may be just a prefix of a fixed length field: hence
@@ -2536,6 +2494,7 @@ row_sel_convert_mysql_key_to_innobase(
dfield_set_len(dfield, len
- (ulint) (key_ptr - key_end));
}
+ ut_ad(0);
}
n_fields++;
@@ -3008,8 +2967,8 @@ row_sel_store_mysql_rec(
/*********************************************************************//**
Builds a previous version of a clustered index record for a consistent read
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_sel_build_prev_vers_for_mysql(
/*==============================*/
read_view_t* read_view, /*!< in: read view */
@@ -3026,7 +2985,7 @@ row_sel_build_prev_vers_for_mysql(
afterwards */
mtr_t* mtr) /*!< in: mtr */
{
- ulint err;
+ dberr_t err;
if (prebuilt->old_vers_heap) {
mem_heap_empty(prebuilt->old_vers_heap);
@@ -3045,8 +3004,8 @@ Retrieves the clustered index record corresponding to a record in a
non-clustered index. Does the necessary locking. Used in the MySQL
interface.
@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, or error code */
-static
-enum db_err
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_sel_get_clust_rec_for_mysql(
/*============================*/
row_prebuilt_t* prebuilt,/*!< in: prebuilt struct in the handle */
@@ -3073,7 +3032,7 @@ row_sel_get_clust_rec_for_mysql(
dict_index_t* clust_index;
const rec_t* clust_rec;
rec_t* old_vers;
- enum db_err err;
+ dberr_t err;
trx_t* trx;
*out_rec = NULL;
@@ -3172,17 +3131,13 @@ row_sel_get_clust_rec_for_mysql(
clust_rec, clust_index, *offsets,
trx->read_view)) {
- ulint db_err;
-
/* The following call returns 'offsets' associated with
'old_vers' */
- db_err = row_sel_build_prev_vers_for_mysql(
+ err = row_sel_build_prev_vers_for_mysql(
trx->read_view, clust_index, prebuilt,
clust_rec, offsets, offset_heap, &old_vers,
mtr);
- err = static_cast<enum db_err>(db_err);
-
if (err != DB_SUCCESS || old_vers == NULL) {
goto err_exit;
@@ -3226,7 +3181,10 @@ row_sel_get_clust_rec_for_mysql(
func_exit:
*out_rec = clust_rec;
- if (prebuilt->select_lock_type != LOCK_NONE) {
+ /* Store the current position if select_lock_type is not
+ LOCK_NONE or if we are scanning using InnoDB APIs */
+ if (prebuilt->select_lock_type != LOCK_NONE
+ || prebuilt->innodb_api) {
/* We may use the cursor in update or in unlock_row():
store its position */
@@ -3633,7 +3591,7 @@ row_search_idx_cond_check(
return(result);
case ICP_ERROR:
case ICP_ABORTED_BY_USER:
- return(result);
+ return(result);
}
ut_error;
@@ -3649,7 +3607,7 @@ position and fetch next or fetch prev must not be tried to the cursor!
@return DB_SUCCESS, DB_RECORD_NOT_FOUND, DB_END_OF_INDEX, DB_DEADLOCK,
DB_LOCK_TABLE_FULL, DB_CORRUPTION, or DB_TOO_BIG_RECORD */
UNIV_INTERN
-ulint
+dberr_t
row_search_for_mysql(
/*=================*/
byte* buf, /*!< in/out: buffer for the fetched
@@ -3678,9 +3636,9 @@ row_search_for_mysql(
dict_index_t* clust_index;
que_thr_t* thr;
const rec_t* rec;
- const rec_t* result_rec;
+ const rec_t* result_rec = NULL;
const rec_t* clust_rec;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ibool unique_search = FALSE;
ibool mtr_has_extra_clust_latch = FALSE;
ibool moves_up = FALSE;
@@ -3701,48 +3659,41 @@ row_search_for_mysql(
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
ibool table_lock_waited = FALSE;
+ byte* next_buf = 0;
rec_offs_init(offsets_);
ut_ad(index && pcur && search_tuple);
- if (UNIV_UNLIKELY(prebuilt->table->ibd_file_missing)) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error:\n"
- "InnoDB: MySQL is trying to use a table handle"
- " but the .ibd file for\n"
- "InnoDB: table %s does not exist.\n"
- "InnoDB: Have you deleted the .ibd file"
- " from the database directory under\n"
- "InnoDB: the MySQL datadir, or have you used"
- " DISCARD TABLESPACE?\n"
- "InnoDB: Look from\n"
- "InnoDB: " REFMAN "innodb-troubleshooting.html\n"
- "InnoDB: how you can resolve the problem.\n",
- prebuilt->table->name);
+ /* We don't support FTS queries from the HANDLER interfaces, because
+ we implemented FTS as reversed inverted index with auxiliary tables.
+ So anything related to traditional index query would not apply to
+ it. */
+ if (index->type & DICT_FTS) {
+ return(DB_END_OF_INDEX);
+ }
#ifdef UNIV_SYNC_DEBUG
- ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch));
+ ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch));
#endif /* UNIV_SYNC_DEBUG */
- return(DB_ERROR);
- }
- if (UNIV_UNLIKELY(!prebuilt->index_usable)) {
+ if (dict_table_is_discarded(prebuilt->table)) {
+
+ return(DB_TABLESPACE_DELETED);
+
+ } else if (prebuilt->table->ibd_file_missing) {
+
+ return(DB_TABLESPACE_NOT_FOUND);
+
+ } else if (!prebuilt->index_usable) {
-#ifdef UNIV_SYNC_DEBUG
- ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch));
-#endif /* UNIV_SYNC_DEBUG */
return(DB_MISSING_HISTORY);
- }
- if (dict_index_is_corrupted(index)) {
-#ifdef UNIV_SYNC_DEBUG
- ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch));
-#endif /* UNIV_SYNC_DEBUG */
+ } else if (dict_index_is_corrupted(index)) {
+
return(DB_CORRUPTION);
- }
- if (prebuilt->magic_n != ROW_PREBUILT_ALLOCATED) {
+ } else if (prebuilt->magic_n != ROW_PREBUILT_ALLOCATED) {
fprintf(stderr,
"InnoDB: Error: trying to free a corrupt\n"
"InnoDB: table handle. Magic n %lu, table name ",
@@ -3846,7 +3797,6 @@ row_search_for_mysql(
prebuilt->n_rows_fetched++;
- srv_n_rows_read++;
err = DB_SUCCESS;
goto func_exit;
}
@@ -3925,7 +3875,8 @@ row_search_for_mysql(
&& dict_index_is_clust(index)
&& !prebuilt->templ_contains_blob
&& !prebuilt->used_in_HANDLER
- && (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)) {
+ && (prebuilt->mysql_row_len < UNIV_PAGE_SIZE / 8)
+ && !prebuilt->innodb_api) {
mode = PAGE_CUR_GE;
@@ -3973,8 +3924,8 @@ row_search_for_mysql(
rec, offsets)) {
case ICP_NO_MATCH:
case ICP_OUT_OF_RANGE:
- case ICP_ERROR:
case ICP_ABORTED_BY_USER:
+ case ICP_ERROR:
goto shortcut_mismatch;
case ICP_MATCH:
goto shortcut_match;
@@ -4005,8 +3956,6 @@ row_search_for_mysql(
/* ut_print_name(stderr, index->name);
fputs(" shortcut\n", stderr); */
- srv_n_rows_read++;
-
err = DB_SUCCESS;
goto release_search_latch_if_needed;
@@ -4179,12 +4128,12 @@ wait_table_again:
/* Try to place a gap lock on the next index record
to prevent phantoms in ORDER BY ... DESC queries */
- const rec_t* next = page_rec_get_next_const(rec);
+ const rec_t* next_rec = page_rec_get_next_const(rec);
- offsets = rec_get_offsets(next, index, offsets,
+ offsets = rec_get_offsets(next_rec, index, offsets,
ULINT_UNDEFINED, &heap);
err = sel_set_rec_lock(btr_pcur_get_block(pcur),
- next, index, offsets,
+ next_rec, index, offsets,
prebuilt->select_lock_type,
LOCK_GAP, thr);
@@ -4197,16 +4146,10 @@ wait_table_again:
goto lock_wait_or_error;
}
}
- } else {
- if (mode == PAGE_CUR_G) {
- btr_pcur_open_at_index_side(
- TRUE, index, BTR_SEARCH_LEAF, pcur, FALSE,
- &mtr);
- } else if (mode == PAGE_CUR_L) {
- btr_pcur_open_at_index_side(
- FALSE, index, BTR_SEARCH_LEAF, pcur, FALSE,
- &mtr);
- }
+ } else if (mode == PAGE_CUR_G || mode == PAGE_CUR_L) {
+ btr_pcur_open_at_index_side(
+ mode == PAGE_CUR_G, index, BTR_SEARCH_LEAF,
+ pcur, false, 0, &mtr);
}
rec_loop:
@@ -4348,6 +4291,9 @@ wrong_offs:
/* Calculate the 'offsets' associated with 'rec' */
+ ut_ad(fil_page_get_type(btr_pcur_get_page(pcur)) == FIL_PAGE_INDEX);
+ ut_ad(btr_page_get_index_id(btr_pcur_get_page(pcur)) == index->id);
+
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
if (UNIV_UNLIKELY(srv_force_recovery > 0)) {
@@ -4539,15 +4485,10 @@ no_gap_lock:
/* The following call returns 'offsets'
associated with 'old_vers' */
- err = row_sel_build_committed_vers_for_mysql(
+ row_sel_build_committed_vers_for_mysql(
clust_index, prebuilt, rec,
&offsets, &heap, &old_vers, &mtr);
- if (err != DB_SUCCESS) {
-
- goto lock_wait_or_error;
- }
-
/* Check whether it was a deadlock or not, if not
a deadlock and the transaction had to wait then
release the lock it is waiting on. */
@@ -4649,8 +4590,8 @@ no_gap_lock:
case ICP_NO_MATCH:
goto next_rec;
case ICP_OUT_OF_RANGE:
- case ICP_ERROR:
case ICP_ABORTED_BY_USER:
+ case ICP_ERROR:
err = DB_RECORD_NOT_FOUND;
goto idx_cond_failed;
case ICP_MATCH:
@@ -4690,12 +4631,15 @@ locks_ok:
delete marked record and the record following it.
For now this is applicable only to clustered indexes while
- doing a unique search. There is scope for further optimization
+ doing a unique search except for HANDLER queries because
+ HANDLER allows NEXT and PREV even in unique search on
+ clustered index. There is scope for further optimization
applicable to unique secondary indexes. Current behaviour is
to widen the scope of a lock on an already delete marked record
if the same record is deleted twice by the same transaction */
if (index == clust_index && unique_search
- && !prebuilt->used_in_HANDLER) {
+ && !prebuilt->used_in_HANDLER) {
+
err = DB_RECORD_NOT_FOUND;
goto normal_return;
@@ -4712,8 +4656,8 @@ locks_ok:
}
goto next_rec;
case ICP_OUT_OF_RANGE:
- case ICP_ERROR:
case ICP_ABORTED_BY_USER:
+ case ICP_ERROR:
err = DB_RECORD_NOT_FOUND;
goto idx_cond_failed;
case ICP_MATCH:
@@ -4831,9 +4775,10 @@ requires_clust_rec:
&& !prebuilt->templ_contains_blob
&& !prebuilt->clust_index_was_generated
&& !prebuilt->used_in_HANDLER
+ && !prebuilt->innodb_api
&& prebuilt->template_type
!= ROW_MYSQL_DUMMY_TEMPLATE
- && !prebuilt->result) {
+ && !prebuilt->in_fts_query) {
/* Inside an update, for example, we do not cache rows,
since we may use the cursor position to do the actual
@@ -4849,29 +4794,58 @@ requires_clust_rec:
/* We only convert from InnoDB row format to MySQL row
format when ICP is disabled. */
- if (!prebuilt->idx_cond
- && !row_sel_store_mysql_rec(
- row_sel_fetch_last_buf(prebuilt),
- prebuilt, result_rec,
- result_rec != rec,
- result_rec != rec ? clust_index : index,
- offsets)) {
-
- /* Only fresh inserts may contain incomplete
- externally stored columns. Pretend that such
- records do not exist. Such records may only be
- accessed at the READ UNCOMMITTED isolation
- level or when rolling back a recovered
- transaction. Rollback happens at a lower
- level, not here. */
- goto next_rec;
- }
+ if (!prebuilt->idx_cond) {
- row_sel_enqueue_cache_row_for_mysql(buf, prebuilt);
+ /* We use next_buf to track the allocation of buffers
+ where we store and enqueue the buffers for our
+ pre-fetch optimisation.
+
+ If next_buf == 0 then we store the converted record
+ directly into the MySQL record buffer (buf). If it is
+ != 0 then we allocate a pre-fetch buffer and store the
+ converted record there.
+
+ If the conversion fails and the MySQL record buffer
+ was not written to then we reset next_buf so that
+ we can re-use the MySQL record buffer in the next
+ iteration. */
+
+ next_buf = next_buf
+ ? row_sel_fetch_last_buf(prebuilt) : buf;
+
+ if (!row_sel_store_mysql_rec(
+ next_buf, prebuilt, result_rec,
+ result_rec != rec,
+ result_rec != rec ? clust_index : index,
+ offsets)) {
+
+ if (next_buf == buf) {
+ ut_a(prebuilt->n_fetch_cached == 0);
+ next_buf = 0;
+ }
+
+ /* Only fresh inserts may contain incomplete
+ externally stored columns. Pretend that such
+ records do not exist. Such records may only be
+ accessed at the READ UNCOMMITTED isolation
+ level or when rolling back a recovered
+ transaction. Rollback happens at a lower
+ level, not here. */
+ goto next_rec;
+ }
+
+ if (next_buf != buf) {
+ row_sel_enqueue_cache_row_for_mysql(
+ next_buf, prebuilt);
+ }
+ } else {
+ row_sel_enqueue_cache_row_for_mysql(buf, prebuilt);
+ }
if (prebuilt->n_fetch_cached < MYSQL_FETCH_CACHE_SIZE) {
goto next_rec;
}
+
} else {
if (UNIV_UNLIKELY
(prebuilt->template_type == ROW_MYSQL_DUMMY_TEMPLATE)) {
@@ -4892,7 +4866,7 @@ requires_clust_rec:
rec_offs_size(offsets));
mach_write_to_4(buf,
rec_offs_extra_size(offsets) + 4);
- } else if (!prebuilt->idx_cond) {
+ } else if (!prebuilt->idx_cond && !prebuilt->innodb_api) {
/* The record was not yet converted to MySQL format. */
if (!row_sel_store_mysql_rec(
buf, prebuilt, result_rec,
@@ -4935,11 +4909,16 @@ idx_cond_failed:
|| !dict_index_is_clust(index)
|| direction != 0
|| prebuilt->select_lock_type != LOCK_NONE
- || prebuilt->used_in_HANDLER) {
+ || prebuilt->used_in_HANDLER
+ || prebuilt->innodb_api) {
/* Inside an update always store the cursor position */
btr_pcur_store_position(pcur, &mtr);
+
+ if (prebuilt->innodb_api) {
+ prebuilt->innodb_api_rec = result_rec;
+ }
}
goto normal_return;
@@ -5032,7 +5011,7 @@ lock_table_wait:
mtr_commit(&mtr);
mtr_has_extra_clust_latch = FALSE;
- trx->error_state = static_cast<enum db_err>(err);
+ trx->error_state = err;
/* The following is a patch for MySQL */
@@ -5101,8 +5080,23 @@ normal_return:
mtr_commit(&mtr);
- if (prebuilt->n_fetch_cached > 0) {
- row_sel_dequeue_cached_row_for_mysql(buf, prebuilt);
+ if (prebuilt->idx_cond != 0) {
+
+ /* When ICP is active we don't write to the MySQL buffer
+ directly, only to buffers that are enqueued in the pre-fetch
+ queue. We need to dequeue the first buffer and copy the contents
+ to the record buffer that was passed in by MySQL. */
+
+ if (prebuilt->n_fetch_cached > 0) {
+ row_sel_dequeue_cached_row_for_mysql(buf, prebuilt);
+ err = DB_SUCCESS;
+ }
+
+ } else if (next_buf != 0) {
+
+ /* We may or may not have enqueued some buffers to the
+ pre-fetch queue, but we definitely wrote to the record
+ buffer passed to use by MySQL. */
err = DB_SUCCESS;
}
@@ -5112,9 +5106,6 @@ normal_return:
dict_index_name_print(stderr, index);
fprintf(stderr, " cnt %lu ret value %lu err\n", cnt, err); */
#endif /* UNIV_SEARCH_DEBUG */
- if (err == DB_SUCCESS) {
- srv_n_rows_read++;
- }
func_exit:
trx->op_info = "";
@@ -5139,6 +5130,9 @@ func_exit:
#ifdef UNIV_SYNC_DEBUG
ut_ad(!sync_thread_levels_nonempty_trx(trx->has_search_latch));
#endif /* UNIV_SYNC_DEBUG */
+
+ DEBUG_SYNC_C("innodb_row_search_for_mysql_exit");
+
return(err);
}
@@ -5157,7 +5151,22 @@ row_search_check_if_query_cache_permitted(
dict_table_t* table;
ibool ret = FALSE;
- table = dict_table_open_on_name(norm_name, FALSE);
+ /* Disable query cache altogether for all tables if recovered XA
+ transactions in prepared state exist. This is because we do not
+ restore the table locks for those transactions and we may wrongly
+ set ret=TRUE above if "lock_table_get_n_locks(table) == 0". See
+ "Bug#14658648 XA ROLLBACK (DISTRIBUTED DATABASE) NOT WORKING WITH
+ QUERY CACHE ENABLED".
+ Read trx_sys->n_prepared_recovered_trx without mutex protection,
+ not possible to end up with a torn read since n_prepared_recovered_trx
+ is word size. */
+ if (trx_sys->n_prepared_recovered_trx > 0) {
+
+ return(FALSE);
+ }
+
+ table = dict_table_open_on_name(norm_name, FALSE, FALSE,
+ DICT_ERR_IGNORE_NONE);
if (table == NULL) {
@@ -5191,7 +5200,7 @@ row_search_check_if_query_cache_permitted(
}
}
- dict_table_close(table, FALSE);
+ dict_table_close(table, FALSE, FALSE);
return(ret);
}
@@ -5229,8 +5238,6 @@ row_search_autoinc_read_column(
data = rec_get_nth_field(rec, offsets, col_no, &len);
- ut_a(len != UNIV_SQL_NULL);
-
switch (mtype) {
case DATA_INT:
ut_a(len <= sizeof value);
@@ -5289,7 +5296,7 @@ Read the max AUTOINC value from an index.
@return DB_SUCCESS if all OK else error code, DB_RECORD_NOT_FOUND if
column name can't be found in index */
UNIV_INTERN
-ulint
+dberr_t
row_search_max_autoinc(
/*===================*/
dict_index_t* index, /*!< in: index to search */
@@ -5299,7 +5306,7 @@ row_search_max_autoinc(
ulint i;
ulint n_cols;
dict_field_t* dfield = NULL;
- ulint error = DB_SUCCESS;
+ dberr_t error = DB_SUCCESS;
n_cols = dict_index_get_n_ordering_defined_by_user(index);
@@ -5321,10 +5328,9 @@ row_search_max_autoinc(
mtr_start(&mtr);
- /* Open at the high/right end (FALSE), and INIT
- cursor (TRUE) */
+ /* Open at the high/right end (false), and init cursor */
btr_pcur_open_at_index_side(
- FALSE, index, BTR_SEARCH_LEAF, &pcur, TRUE, &mtr);
+ false, index, BTR_SEARCH_LEAF, &pcur, true, 0, &mtr);
if (page_get_n_recs(btr_pcur_get_page(&pcur)) > 0) {
const rec_t* rec;
diff --git a/storage/innobase/row/row0uins.cc b/storage/innobase/row/row0uins.cc
index 78fd4ad5199..25b2b6b62ce 100644
--- a/storage/innobase/row/row0uins.cc
+++ b/storage/innobase/row/row0uins.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -38,6 +38,7 @@ Created 2/25/1997 Heikki Tuuri
#include "mach0data.h"
#include "row0undo.h"
#include "row0vers.h"
+#include "row0log.h"
#include "trx0trx.h"
#include "trx0rec.h"
#include "row0row.h"
@@ -60,25 +61,64 @@ introduced where a call to log_free_check() is bypassed. */
Removes a clustered index record. The pcur in node was positioned on the
record, now it is detached.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_ins_remove_clust_rec(
/*==========================*/
undo_node_t* node) /*!< in: undo node */
{
btr_cur_t* btr_cur;
ibool success;
- ulint err;
- ulint n_tries = 0;
+ dberr_t err;
+ ulint n_tries = 0;
mtr_t mtr;
+ dict_index_t* index = node->pcur.btr_cur.index;
+ bool online;
+
+ ut_ad(dict_index_is_clust(index));
mtr_start(&mtr);
- success = btr_pcur_restore_position(BTR_MODIFY_LEAF, &(node->pcur),
- &mtr);
+ /* This is similar to row_undo_mod_clust(). Even though we
+ call row_log_table_rollback() elsewhere, the DDL thread may
+ already have copied this row to the sort buffers or to the new
+ table. We must log the removal, so that the row will be
+ correctly purged. However, we can log the removal out of sync
+ with the B-tree modification. */
+
+ online = dict_index_is_online_ddl(index);
+ if (online) {
+ ut_ad(node->trx->dict_operation_lock_mode
+ != RW_X_LATCH);
+ ut_ad(node->table->id != DICT_INDEXES_ID);
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ }
+
+ success = btr_pcur_restore_position(
+ online
+ ? BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
+ : BTR_MODIFY_LEAF, &node->pcur, &mtr);
ut_a(success);
+ btr_cur = btr_pcur_get_btr_cur(&node->pcur);
+
+ ut_ad(rec_get_trx_id(btr_cur_get_rec(btr_cur), btr_cur->index)
+ == node->trx->id);
+
+ if (online && dict_index_is_online_ddl(index)) {
+ const rec_t* rec = btr_cur_get_rec(btr_cur);
+ mem_heap_t* heap = NULL;
+ const ulint* offsets = rec_get_offsets(
+ rec, index, NULL, ULINT_UNDEFINED, &heap);
+ row_log_table_delete(
+ rec, index, offsets,
+ trx_read_trx_id(row_get_trx_id_offset(index, offsets)
+ + rec));
+ mem_heap_free(heap);
+ }
+
if (node->table->id == DICT_INDEXES_ID) {
+ ut_ad(!online);
ut_ad(node->trx->dict_operation_lock_mode == RW_X_LATCH);
/* Drop the index tree associated with the row in
@@ -90,14 +130,12 @@ row_undo_ins_remove_clust_rec(
mtr_start(&mtr);
- success = btr_pcur_restore_position(BTR_MODIFY_LEAF,
- &(node->pcur), &mtr);
+ success = btr_pcur_restore_position(
+ BTR_MODIFY_LEAF, &node->pcur, &mtr);
ut_a(success);
}
- btr_cur = btr_pcur_get_btr_cur(&(node->pcur));
-
- if (btr_cur_optimistic_delete(btr_cur, &mtr)) {
+ if (btr_cur_optimistic_delete(btr_cur, 0, &mtr)) {
err = DB_SUCCESS;
goto func_exit;
}
@@ -111,7 +149,7 @@ retry:
&(node->pcur), &mtr);
ut_a(success);
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
+ btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0,
trx_is_recv(node->trx)
? RB_RECOVERY
: RB_NORMAL, &mtr);
@@ -142,8 +180,8 @@ func_exit:
/***************************************************************//**
Removes a secondary index entry if found.
@return DB_SUCCESS, DB_FAIL, or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_ins_remove_sec_low(
/*========================*/
ulint mode, /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE,
@@ -154,22 +192,31 @@ row_undo_ins_remove_sec_low(
{
btr_pcur_t pcur;
btr_cur_t* btr_cur;
- ulint err;
+ dberr_t err = DB_SUCCESS;
mtr_t mtr;
enum row_search_result search_result;
+ log_free_check();
+
mtr_start(&mtr);
- btr_cur = btr_pcur_get_btr_cur(&pcur);
+ if (mode == BTR_MODIFY_LEAF) {
+ mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ } else {
+ ut_ad(mode == BTR_MODIFY_TREE);
+ mtr_x_lock(dict_index_get_lock(index), &mtr);
+ }
- ut_ad(mode == BTR_MODIFY_TREE || mode == BTR_MODIFY_LEAF);
+ if (row_log_online_op_try(index, entry, 0)) {
+ goto func_exit_no_pcur;
+ }
search_result = row_search_index_entry(index, entry, mode,
&pcur, &mtr);
switch (search_result) {
case ROW_NOT_FOUND:
- err = DB_SUCCESS;
goto func_exit;
case ROW_FOUND:
break;
@@ -181,23 +228,24 @@ row_undo_ins_remove_sec_low(
ut_error;
}
- if (mode == BTR_MODIFY_LEAF) {
- err = btr_cur_optimistic_delete(btr_cur, &mtr)
+ btr_cur = btr_pcur_get_btr_cur(&pcur);
+
+ if (mode != BTR_MODIFY_TREE) {
+ err = btr_cur_optimistic_delete(btr_cur, 0, &mtr)
? DB_SUCCESS : DB_FAIL;
} else {
- ut_ad(mode == BTR_MODIFY_TREE);
-
/* No need to distinguish RB_RECOVERY here, because we
are deleting a secondary index record: the distinction
between RB_NORMAL and RB_RECOVERY only matters when
deleting a record that contains externally stored
columns. */
ut_ad(!dict_index_is_clust(index));
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
+ btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0,
RB_NORMAL, &mtr);
}
func_exit:
btr_pcur_close(&pcur);
+func_exit_no_pcur:
mtr_commit(&mtr);
return(err);
@@ -207,14 +255,14 @@ func_exit:
Removes a secondary index entry from the index if found. Tries first
optimistic, then pessimistic descent down the tree.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_ins_remove_sec(
/*====================*/
dict_index_t* index, /*!< in: index */
dtuple_t* entry) /*!< in: index entry to insert */
{
- ulint err;
+ dberr_t err;
ulint n_tries = 0;
/* Try first optimistic descent to the B-tree */
@@ -261,7 +309,7 @@ row_undo_ins_parse_undo_rec(
table_id_t table_id;
ulint type;
ulint dummy;
- ibool dummy_extern;
+ bool dummy_extern;
ut_ad(node);
@@ -271,12 +319,13 @@ row_undo_ins_parse_undo_rec(
node->rec_type = type;
node->update = NULL;
- node->table = dict_table_open_on_id(table_id, dict_locked);
+ node->table = dict_table_open_on_id(table_id, dict_locked, FALSE);
/* Skip the UNDO if we can't find the table or the .ibd file. */
if (UNIV_UNLIKELY(node->table == NULL)) {
} else if (UNIV_UNLIKELY(node->table->ibd_file_missing)) {
- dict_table_close(node->table, dict_locked);
+close_table:
+ dict_table_close(node->table, dict_locked, FALSE);
node->table = NULL;
} else {
clust_index = dict_table_get_first_index(node->table);
@@ -286,10 +335,7 @@ row_undo_ins_parse_undo_rec(
ptr, clust_index, &node->ref, node->heap);
if (!row_undo_search_clust_to_pcur(node)) {
-
- dict_table_close(node->table, dict_locked);
-
- node->table = NULL;
+ goto close_table;
}
} else {
@@ -299,10 +345,7 @@ row_undo_ins_parse_undo_rec(
node->table->name);
fprintf(stderr, " has no indexes, "
"ignoring the table\n");
-
- dict_table_close(node->table, dict_locked);
-
- node->table = NULL;
+ goto close_table;
}
}
}
@@ -310,27 +353,32 @@ row_undo_ins_parse_undo_rec(
/***************************************************************//**
Removes secondary index records.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_ins_remove_sec_rec(
/*========================*/
undo_node_t* node) /*!< in/out: row undo node */
{
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
+ dict_index_t* index = node->index;
mem_heap_t* heap;
heap = mem_heap_create(1024);
- while (node->index != NULL) {
+ while (index != NULL) {
dtuple_t* entry;
- if (node->index->type & DICT_FTS) {
- dict_table_next_uncorrupted_index(node->index);
+ if (index->type & DICT_FTS) {
+ dict_table_next_uncorrupted_index(index);
continue;
}
- entry = row_build_index_entry(node->row, node->ext,
- node->index, heap);
+ /* An insert undo record TRX_UNDO_INSERT_REC will
+ always contain all fields of the index. It does not
+ matter if any indexes were created afterwards; all
+ index entries can be reconstructed from the row. */
+ entry = row_build_index_entry(
+ node->row, node->ext, index, heap);
if (UNIV_UNLIKELY(!entry)) {
/* The database must have crashed after
inserting a clustered index record but before
@@ -343,9 +391,7 @@ row_undo_ins_remove_sec_rec(
transactions. */
ut_a(trx_is_recv(node->trx));
} else {
- log_free_check();
-
- err = row_undo_ins_remove_sec(node->index, entry);
+ err = row_undo_ins_remove_sec(index, entry);
if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
goto func_exit;
@@ -353,10 +399,11 @@ row_undo_ins_remove_sec_rec(
}
mem_heap_empty(heap);
- dict_table_next_uncorrupted_index(node->index);
+ dict_table_next_uncorrupted_index(index);
}
func_exit:
+ node->index = index;
mem_heap_free(heap);
return(err);
}
@@ -369,15 +416,14 @@ if it figures out that an index record will be removed in the purge
anyway, it will remove it in the rollback.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
UNIV_INTERN
-ulint
+dberr_t
row_undo_ins(
/*=========*/
undo_node_t* node) /*!< in: row undo node */
{
- ulint err;
- ibool dict_locked;
+ dberr_t err;
+ ibool dict_locked;
- ut_ad(node);
ut_ad(node->state == UNDO_NODE_INSERT);
dict_locked = node->trx->dict_operation_lock_mode == RW_X_LATCH;
@@ -392,24 +438,46 @@ row_undo_ins(
/* Iterate over all the indexes and undo the insert.*/
+ node->index = dict_table_get_first_index(node->table);
+ ut_ad(dict_index_is_clust(node->index));
+
+ if (dict_index_is_online_ddl(node->index)) {
+ /* Note that we are rolling back this transaction, so
+ that all inserts and updates with this DB_TRX_ID can
+ be skipped. */
+ row_log_table_rollback(node->index, node->trx->id);
+ }
+
/* Skip the clustered index (the first index) */
- node->index = dict_table_get_next_index(
- dict_table_get_first_index(node->table));
+ node->index = dict_table_get_next_index(node->index);
dict_table_skip_corrupt_index(node->index);
err = row_undo_ins_remove_sec_rec(node);
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
- goto func_exit;
- }
+ if (err == DB_SUCCESS) {
- log_free_check();
+ log_free_check();
- err = row_undo_ins_remove_clust_rec(node);
+ if (node->table->id == DICT_INDEXES_ID) {
-func_exit:
- dict_table_close(node->table, dict_locked);
+ if (!dict_locked) {
+ mutex_enter(&dict_sys->mutex);
+ }
+ }
+
+ // FIXME: We need to update the dict_index_t::space and
+ // page number fields too.
+ err = row_undo_ins_remove_clust_rec(node);
+
+ if (node->table->id == DICT_INDEXES_ID
+ && !dict_locked) {
+
+ mutex_exit(&dict_sys->mutex);
+ }
+ }
+
+ dict_table_close(node->table, dict_locked, FALSE);
node->table = NULL;
diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc
index 42034c5b80d..c1a4ba76052 100644
--- a/storage/innobase/row/row0umod.cc
+++ b/storage/innobase/row/row0umod.cc
@@ -37,6 +37,7 @@ Created 2/27/1997 Heikki Tuuri
#include "mach0data.h"
#include "row0undo.h"
#include "row0vers.h"
+#include "row0log.h"
#include "trx0trx.h"
#include "trx0rec.h"
#include "row0row.h"
@@ -71,11 +72,20 @@ introduced where a call to log_free_check() is bypassed. */
/***********************************************************//**
Undoes a modify in a clustered index record.
@return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_clust_low(
/*===================*/
undo_node_t* node, /*!< in: row undo node */
+ ulint** offsets,/*!< out: rec_get_offsets() on the record */
+ mem_heap_t** offsets_heap,
+ /*!< in/out: memory heap that can be emptied */
+ mem_heap_t* heap, /*!< in/out: memory heap */
+ const dtuple_t**rebuilt_old_pk,
+ /*!< out: row_log_table_get_pk()
+ before the update, or NULL if
+ the table is not being rebuilt online or
+ the PRIMARY KEY definition does not change */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr, /*!< in: mtr; must be committed before
latching any further pages */
@@ -83,12 +93,12 @@ row_undo_mod_clust_low(
{
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
- ulint err;
+ dberr_t err;
#ifdef UNIV_DEBUG
ibool success;
#endif /* UNIV_DEBUG */
- pcur = &(node->pcur);
+ pcur = &node->pcur;
btr_cur = btr_pcur_get_btr_cur(pcur);
#ifdef UNIV_DEBUG
@@ -97,31 +107,40 @@ row_undo_mod_clust_low(
btr_pcur_restore_position(mode, pcur, mtr);
ut_ad(success);
+ ut_ad(rec_get_trx_id(btr_cur_get_rec(btr_cur),
+ btr_cur_get_index(btr_cur))
+ == thr_get_trx(thr)->id);
+
+ if (mode != BTR_MODIFY_LEAF
+ && dict_index_is_online_ddl(btr_cur_get_index(btr_cur))) {
+ *rebuilt_old_pk = row_log_table_get_pk(
+ btr_cur_get_rec(btr_cur),
+ btr_cur_get_index(btr_cur), NULL, &heap);
+ } else {
+ *rebuilt_old_pk = NULL;
+ }
- if (mode == BTR_MODIFY_LEAF) {
+ if (mode != BTR_MODIFY_TREE) {
+ ut_ad((mode & ~BTR_ALREADY_S_LATCHED) == BTR_MODIFY_LEAF);
- err = btr_cur_optimistic_update(BTR_NO_LOCKING_FLAG
- | BTR_NO_UNDO_LOG_FLAG
- | BTR_KEEP_SYS_FLAG,
- btr_cur, node->update,
- node->cmpl_info, thr, mtr);
+ err = btr_cur_optimistic_update(
+ BTR_NO_LOCKING_FLAG | BTR_NO_UNDO_LOG_FLAG
+ | BTR_KEEP_SYS_FLAG,
+ btr_cur, offsets, offsets_heap,
+ node->update, node->cmpl_info,
+ thr, thr_get_trx(thr)->id, mtr);
} else {
- mem_heap_t* heap = NULL;
big_rec_t* dummy_big_rec;
- ut_ad(mode == BTR_MODIFY_TREE);
-
err = btr_cur_pessimistic_update(
BTR_NO_LOCKING_FLAG
| BTR_NO_UNDO_LOG_FLAG
| BTR_KEEP_SYS_FLAG,
- btr_cur, &heap, &dummy_big_rec, node->update,
- node->cmpl_info, thr, mtr);
+ btr_cur, offsets, offsets_heap, heap,
+ &dummy_big_rec, node->update,
+ node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
ut_a(!dummy_big_rec);
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
}
return(err);
@@ -134,8 +153,8 @@ delete-marked record and there no longer exist transactions
that would see the delete-marked record. In other words, we
roll back the insert by purging the record.
@return DB_SUCCESS, DB_FAIL, or error code: we may run out of file space */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_remove_clust_low(
/*==========================*/
undo_node_t* node, /*!< in: row undo node */
@@ -144,7 +163,7 @@ row_undo_mod_remove_clust_low(
ulint mode) /*!< in: BTR_MODIFY_LEAF or BTR_MODIFY_TREE */
{
btr_cur_t* btr_cur;
- ulint err;
+ dberr_t err;
ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
@@ -159,8 +178,14 @@ row_undo_mod_remove_clust_low(
btr_cur = btr_pcur_get_btr_cur(&node->pcur);
+ /* We are about to remove an old, delete-marked version of the
+ record that may have been delete-marked by a different transaction
+ than the rolling-back one. */
+ ut_ad(rec_get_deleted_flag(btr_cur_get_rec(btr_cur),
+ dict_table_is_comp(node->table)));
+
if (mode == BTR_MODIFY_LEAF) {
- err = btr_cur_optimistic_delete(btr_cur, mtr)
+ err = btr_cur_optimistic_delete(btr_cur, 0, mtr)
? DB_SUCCESS
: DB_FAIL;
} else {
@@ -169,7 +194,7 @@ row_undo_mod_remove_clust_low(
/* This operation is analogous to purge, we can free also
inherited externally stored fields */
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
+ btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0,
thr_is_recv(thr)
? RB_RECOVERY_PURGE_REC
: RB_NONE, mtr);
@@ -186,8 +211,8 @@ row_undo_mod_remove_clust_low(
Undoes a modify in a clustered index record. Sets also the node state for the
next round of undo.
@return DB_SUCCESS or error code: we may run out of file space */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_clust(
/*===============*/
undo_node_t* node, /*!< in: row undo node */
@@ -195,21 +220,42 @@ row_undo_mod_clust(
{
btr_pcur_t* pcur;
mtr_t mtr;
- ulint err;
+ dberr_t err;
+ dict_index_t* index;
+ bool online;
- ut_ad(node && thr);
+ ut_ad(thr_get_trx(thr) == node->trx);
+ ut_ad(node->trx->dict_operation_lock_mode);
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&dict_operation_lock, RW_LOCK_SHARED)
+ || rw_lock_own(&dict_operation_lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
log_free_check();
+ pcur = &node->pcur;
+ index = btr_cur_get_index(btr_pcur_get_btr_cur(pcur));
+ mtr_start(&mtr);
- pcur = &(node->pcur);
+ online = dict_index_is_online_ddl(index);
+ if (online) {
+ ut_ad(node->trx->dict_operation_lock_mode != RW_X_LATCH);
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ }
- mtr_start(&mtr);
+ mem_heap_t* heap = mem_heap_create(1024);
+ mem_heap_t* offsets_heap = NULL;
+ ulint* offsets = NULL;
+ const dtuple_t* rebuilt_old_pk;
/* Try optimistic processing of the record, keeping changes within
the index page */
- err = row_undo_mod_clust_low(node, thr, &mtr, BTR_MODIFY_LEAF);
+ err = row_undo_mod_clust_low(node, &offsets, &offsets_heap,
+ heap, &rebuilt_old_pk,
+ thr, &mtr, online
+ ? BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
+ : BTR_MODIFY_LEAF);
if (err != DB_SUCCESS) {
btr_pcur_commit_specify_mtr(pcur, &mtr);
@@ -219,7 +265,40 @@ row_undo_mod_clust(
mtr_start(&mtr);
- err = row_undo_mod_clust_low(node, thr, &mtr, BTR_MODIFY_TREE);
+ err = row_undo_mod_clust_low(
+ node, &offsets, &offsets_heap, heap, &rebuilt_old_pk,
+ thr, &mtr, BTR_MODIFY_TREE);
+ ut_ad(err == DB_SUCCESS || err == DB_OUT_OF_FILE_SPACE);
+ }
+
+ /* Online rebuild cannot be initiated while we are holding
+ dict_operation_lock and index->lock. (It can be aborted.) */
+ ut_ad(online || !dict_index_is_online_ddl(index));
+
+ if (err == DB_SUCCESS && online) {
+#ifdef UNIV_SYNC_DEBUG
+ ut_ad(rw_lock_own(&index->lock, RW_LOCK_SHARED)
+ || rw_lock_own(&index->lock, RW_LOCK_EX));
+#endif /* UNIV_SYNC_DEBUG */
+ switch (node->rec_type) {
+ case TRX_UNDO_DEL_MARK_REC:
+ row_log_table_insert(
+ btr_pcur_get_rec(pcur), index, offsets);
+ break;
+ case TRX_UNDO_UPD_EXIST_REC:
+ row_log_table_update(
+ btr_pcur_get_rec(pcur), index, offsets,
+ rebuilt_old_pk);
+ break;
+ case TRX_UNDO_UPD_DEL_REC:
+ row_log_table_delete(
+ btr_pcur_get_rec(pcur), index, offsets,
+ node->trx->id);
+ break;
+ default:
+ ut_ad(0);
+ break;
+ }
}
btr_pcur_commit_specify_mtr(pcur, &mtr);
@@ -228,8 +307,11 @@ row_undo_mod_clust(
mtr_start(&mtr);
- err = row_undo_mod_remove_clust_low(node, thr, &mtr,
- BTR_MODIFY_LEAF);
+ /* It is not necessary to call row_log_table,
+ because the record is delete-marked and would thus
+ be omitted from the rebuilt copy of the table. */
+ err = row_undo_mod_remove_clust_low(
+ node, thr, &mtr, BTR_MODIFY_LEAF);
if (err != DB_SUCCESS) {
btr_pcur_commit_specify_mtr(pcur, &mtr);
@@ -240,6 +322,9 @@ row_undo_mod_clust(
err = row_undo_mod_remove_clust_low(node, thr, &mtr,
BTR_MODIFY_TREE);
+
+ ut_ad(err == DB_SUCCESS
+ || err == DB_OUT_OF_FILE_SPACE);
}
btr_pcur_commit_specify_mtr(pcur, &mtr);
@@ -249,14 +334,18 @@ row_undo_mod_clust(
trx_undo_rec_release(node->trx, node->undo_no);
+ if (offsets_heap) {
+ mem_heap_free(offsets_heap);
+ }
+ mem_heap_free(heap);
return(err);
}
/***********************************************************//**
Delete marks or removes a secondary index entry if found.
@return DB_SUCCESS, DB_FAIL, or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_del_mark_or_remove_sec_low(
/*====================================*/
undo_node_t* node, /*!< in: row undo node */
@@ -270,7 +359,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
btr_cur_t* btr_cur;
ibool success;
ibool old_has;
- ulint err;
+ dberr_t err = DB_SUCCESS;
mtr_t mtr;
mtr_t mtr_vers;
enum row_search_result search_result;
@@ -278,9 +367,30 @@ row_undo_mod_del_mark_or_remove_sec_low(
log_free_check();
mtr_start(&mtr);
- btr_cur = btr_pcur_get_btr_cur(&pcur);
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ /* The index->online_status may change if the
+ index->name starts with TEMP_INDEX_PREFIX (meaning
+ that the index is or was being created online). It is
+ protected by index->lock. */
+ if (mode == BTR_MODIFY_LEAF) {
+ mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ } else {
+ ut_ad(mode == BTR_MODIFY_TREE);
+ mtr_x_lock(dict_index_get_lock(index), &mtr);
+ }
+
+ if (row_log_online_op_try(index, entry, 0)) {
+ goto func_exit_no_pcur;
+ }
+ } else {
+ /* For secondary indexes,
+ index->online_status==ONLINE_INDEX_CREATION unless
+ index->name starts with TEMP_INDEX_PREFIX. */
+ ut_ad(!dict_index_is_online_ddl(index));
+ }
- ut_ad(mode == BTR_MODIFY_TREE || mode == BTR_MODIFY_LEAF);
+ btr_cur = btr_pcur_get_btr_cur(&pcur);
search_result = row_search_index_entry(index, entry, mode,
&pcur, &mtr);
@@ -296,8 +406,6 @@ row_undo_mod_del_mark_or_remove_sec_low(
In normal processing, if an update ends in a deadlock
before it has inserted all updated secondary index
records, then the undo will not find those records. */
-
- err = DB_SUCCESS;
goto func_exit;
case ROW_FOUND:
break;
@@ -329,16 +437,14 @@ row_undo_mod_del_mark_or_remove_sec_low(
} else {
/* Remove the index record */
- if (mode == BTR_MODIFY_LEAF) {
- success = btr_cur_optimistic_delete(btr_cur, &mtr);
+ if (mode != BTR_MODIFY_TREE) {
+ success = btr_cur_optimistic_delete(btr_cur, 0, &mtr);
if (success) {
err = DB_SUCCESS;
} else {
err = DB_FAIL;
}
} else {
- ut_ad(mode == BTR_MODIFY_TREE);
-
/* No need to distinguish RB_RECOVERY_PURGE here,
because we are deleting a secondary index record:
the distinction between RB_NORMAL and
@@ -346,7 +452,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
record that contains externally stored
columns. */
ut_ad(!dict_index_is_clust(index));
- btr_cur_pessimistic_delete(&err, FALSE, btr_cur,
+ btr_cur_pessimistic_delete(&err, FALSE, btr_cur, 0,
RB_NORMAL, &mtr);
/* The delete operation may fail if we have little
@@ -359,6 +465,7 @@ row_undo_mod_del_mark_or_remove_sec_low(
func_exit:
btr_pcur_close(&pcur);
+func_exit_no_pcur:
mtr_commit(&mtr);
return(err);
@@ -373,8 +480,8 @@ not cause problems because in row0sel.cc, in queries we always retrieve the
clustered index record or an earlier version of it, if the secondary index
record through which we do the search is delete-marked.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_del_mark_or_remove_sec(
/*================================*/
undo_node_t* node, /*!< in: row undo node */
@@ -382,7 +489,7 @@ row_undo_mod_del_mark_or_remove_sec(
dict_index_t* index, /*!< in: index */
dtuple_t* entry) /*!< in: index entry */
{
- ulint err;
+ dberr_t err;
err = row_undo_mod_del_mark_or_remove_sec_low(node, thr, index,
entry, BTR_MODIFY_LEAF);
@@ -401,42 +508,67 @@ Delete unmarks a secondary index entry which must be found. It might not be
delete-marked at the moment, but it does not harm to unmark it anyway. We also
need to update the fields of the secondary index record if we updated its
fields but alphabetically they stayed the same, e.g., 'abc' -> 'aBc'.
-@return DB_FAIL or DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+@retval DB_SUCCESS on success
+@retval DB_FAIL if BTR_MODIFY_TREE should be tried
+@retval DB_OUT_OF_FILE_SPACE when running out of tablespace
+@retval DB_DUPLICATE_KEY if the value was missing
+ and an insert would lead to a duplicate exists */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_del_unmark_sec_and_undo_update(
/*========================================*/
ulint mode, /*!< in: search mode: BTR_MODIFY_LEAF or
BTR_MODIFY_TREE */
que_thr_t* thr, /*!< in: query thread */
dict_index_t* index, /*!< in: index */
- const dtuple_t* entry) /*!< in: index entry */
+ dtuple_t* entry) /*!< in: index entry */
{
- mem_heap_t* heap;
btr_pcur_t pcur;
- btr_cur_t* btr_cur;
+ btr_cur_t* btr_cur = btr_pcur_get_btr_cur(&pcur);
upd_t* update;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
big_rec_t* dummy_big_rec;
mtr_t mtr;
trx_t* trx = thr_get_trx(thr);
+ const ulint flags
+ = BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG;
enum row_search_result search_result;
- /* Ignore indexes that are being created. */
- if (UNIV_UNLIKELY(*index->name == TEMP_INDEX_PREFIX)) {
-
- return(DB_SUCCESS);
- }
+ ut_ad(trx->id);
log_free_check();
mtr_start(&mtr);
- ut_ad(mode == BTR_MODIFY_TREE || mode == BTR_MODIFY_LEAF);
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ /* The index->online_status may change if the
+ index->name starts with TEMP_INDEX_PREFIX (meaning
+ that the index is or was being created online). It is
+ protected by index->lock. */
+ if (mode == BTR_MODIFY_LEAF) {
+ mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ } else {
+ ut_ad(mode == BTR_MODIFY_TREE);
+ mtr_x_lock(dict_index_get_lock(index), &mtr);
+ }
+
+ if (row_log_online_op_try(index, entry, trx->id)) {
+ goto func_exit_no_pcur;
+ }
+ } else {
+ /* For secondary indexes,
+ index->online_status==ONLINE_INDEX_CREATION unless
+ index->name starts with TEMP_INDEX_PREFIX. */
+ ut_ad(!dict_index_is_online_ddl(index));
+ }
search_result = row_search_index_entry(index, entry, mode,
&pcur, &mtr);
switch (search_result) {
+ mem_heap_t* heap;
+ mem_heap_t* offsets_heap;
+ ulint* offsets;
case ROW_BUFFERED:
case ROW_NOT_DELETED_REF:
/* These are invalid outcomes, because the mode passed
@@ -444,81 +576,184 @@ row_undo_mod_del_unmark_sec_and_undo_update(
flags BTR_INSERT, BTR_DELETE, or BTR_DELETE_MARK. */
ut_error;
case ROW_NOT_FOUND:
- fputs("InnoDB: error in sec index entry del undo in\n"
- "InnoDB: ", stderr);
- dict_index_name_print(stderr, trx, index);
- fputs("\n"
- "InnoDB: tuple ", stderr);
- dtuple_print(stderr, entry);
- fputs("\n"
- "InnoDB: record ", stderr);
- rec_print(stderr, btr_pcur_get_rec(&pcur), index);
- putc('\n', stderr);
- trx_print(stderr, trx, 0);
- fputs("\n"
- "InnoDB: Submit a detailed bug report"
- " to http://bugs.mysql.com\n", stderr);
- ut_ad(0);
+ if (*index->name != TEMP_INDEX_PREFIX) {
+ /* During online secondary index creation, it
+ is possible that MySQL is waiting for a
+ meta-data lock upgrade before invoking
+ ha_innobase::commit_inplace_alter_table()
+ while this ROLLBACK is executing. InnoDB has
+ finished building the index, but it does not
+ yet exist in MySQL. In this case, we suppress
+ the printout to the error log. */
+ fputs("InnoDB: error in sec index entry del undo in\n"
+ "InnoDB: ", stderr);
+ dict_index_name_print(stderr, trx, index);
+ fputs("\n"
+ "InnoDB: tuple ", stderr);
+ dtuple_print(stderr, entry);
+ fputs("\n"
+ "InnoDB: record ", stderr);
+ rec_print(stderr, btr_pcur_get_rec(&pcur), index);
+ putc('\n', stderr);
+ trx_print(stderr, trx, 0);
+ fputs("\n"
+ "InnoDB: Submit a detailed bug report"
+ " to http://bugs.mysql.com\n", stderr);
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "record in index %s was not found"
+ " on rollback, trying to insert",
+ index->name);
+ }
+
+ if (btr_cur->up_match >= dict_index_get_n_unique(index)
+ || btr_cur->low_match >= dict_index_get_n_unique(index)) {
+ if (*index->name != TEMP_INDEX_PREFIX) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "record in index %s was not found on"
+ " rollback, and a duplicate exists",
+ index->name);
+ }
+ err = DB_DUPLICATE_KEY;
+ break;
+ }
+
+ /* Insert the missing record that we were trying to
+ delete-unmark. */
+ big_rec_t* big_rec;
+ rec_t* insert_rec;
+ offsets = NULL;
+ offsets_heap = NULL;
+
+ err = btr_cur_optimistic_insert(
+ flags, btr_cur, &offsets, &offsets_heap,
+ entry, &insert_rec, &big_rec,
+ 0, thr, &mtr);
+ ut_ad(!big_rec);
+
+ if (err == DB_FAIL && mode == BTR_MODIFY_TREE) {
+ err = btr_cur_pessimistic_insert(
+ flags, btr_cur,
+ &offsets, &offsets_heap,
+ entry, &insert_rec, &big_rec,
+ 0, thr, &mtr);
+ /* There are no off-page columns in
+ secondary indexes. */
+ ut_ad(!big_rec);
+ }
+
+ if (err == DB_SUCCESS) {
+ page_update_max_trx_id(
+ btr_cur_get_block(btr_cur),
+ btr_cur_get_page_zip(btr_cur),
+ trx->id, &mtr);
+ }
+
+ if (offsets_heap) {
+ mem_heap_free(offsets_heap);
+ }
+
break;
case ROW_FOUND:
- btr_cur = btr_pcur_get_btr_cur(&pcur);
- err = btr_cur_del_mark_set_sec_rec(BTR_NO_LOCKING_FLAG,
- btr_cur, FALSE, thr, &mtr);
+ err = btr_cur_del_mark_set_sec_rec(
+ BTR_NO_LOCKING_FLAG,
+ btr_cur, FALSE, thr, &mtr);
ut_a(err == DB_SUCCESS);
- heap = mem_heap_create(100);
-
+ heap = mem_heap_create(
+ sizeof(upd_t)
+ + dtuple_get_n_fields(entry) * sizeof(upd_field_t));
+ offsets_heap = NULL;
+ offsets = rec_get_offsets(
+ btr_cur_get_rec(btr_cur),
+ index, NULL, ULINT_UNDEFINED, &offsets_heap);
update = row_upd_build_sec_rec_difference_binary(
- index, entry, btr_cur_get_rec(btr_cur), trx, heap);
+ btr_cur_get_rec(btr_cur), index, offsets, entry, heap);
if (upd_get_n_fields(update) == 0) {
/* Do nothing */
- } else if (mode == BTR_MODIFY_LEAF) {
+ } else if (mode != BTR_MODIFY_TREE) {
/* Try an optimistic updating of the record, keeping
changes within the page */
+ /* TODO: pass offsets, not &offsets */
err = btr_cur_optimistic_update(
- BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG,
- btr_cur, update, 0, thr, &mtr);
+ flags, btr_cur, &offsets, &offsets_heap,
+ update, 0, thr, thr_get_trx(thr)->id, &mtr);
switch (err) {
case DB_OVERFLOW:
case DB_UNDERFLOW:
case DB_ZIP_OVERFLOW:
err = DB_FAIL;
+ default:
+ break;
}
} else {
- ut_a(mode == BTR_MODIFY_TREE);
err = btr_cur_pessimistic_update(
- BTR_KEEP_SYS_FLAG | BTR_NO_LOCKING_FLAG,
- btr_cur, &heap, &dummy_big_rec,
- update, 0, thr, &mtr);
+ flags, btr_cur, &offsets, &offsets_heap,
+ heap, &dummy_big_rec,
+ update, 0, thr, thr_get_trx(thr)->id, &mtr);
ut_a(!dummy_big_rec);
}
mem_heap_free(heap);
+ mem_heap_free(offsets_heap);
}
btr_pcur_close(&pcur);
+func_exit_no_pcur:
mtr_commit(&mtr);
return(err);
}
/***********************************************************//**
+Flags a secondary index corrupted. */
+static __attribute__((nonnull))
+void
+row_undo_mod_sec_flag_corrupted(
+/*============================*/
+ trx_t* trx, /*!< in/out: transaction */
+ dict_index_t* index) /*!< in: secondary index */
+{
+ ut_ad(!dict_index_is_clust(index));
+
+ switch (trx->dict_operation_lock_mode) {
+ case RW_S_LATCH:
+ /* Because row_undo() is holding an S-latch
+ on the data dictionary during normal rollback,
+ we can only mark the index corrupted in the
+ data dictionary cache. TODO: fix this somehow.*/
+ mutex_enter(&dict_sys->mutex);
+ dict_set_corrupted_index_cache_only(index, index->table);
+ mutex_exit(&dict_sys->mutex);
+ break;
+ default:
+ ut_ad(0);
+ /* fall through */
+ case RW_X_LATCH:
+ /* This should be the rollback of a data dictionary
+ transaction. */
+ dict_set_corrupted(index, trx, "rollback");
+ }
+}
+
+/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is UPD_DEL.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_upd_del_sec(
/*=====================*/
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
mem_heap_t* heap;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
ut_ad(!node->undo_row);
+
heap = mem_heap_create(1024);
while (node->index != NULL) {
@@ -530,6 +765,13 @@ row_undo_mod_upd_del_sec(
continue;
}
+ /* During online index creation,
+ HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE should
+ guarantee that any active transaction has not modified
+ indexed columns such that col->ord_part was 0 at the
+ time when the undo log record was written. When we get
+ to roll back an undo log entry TRX_UNDO_DEL_MARK_REC,
+ it should always cover all affected indexes. */
entry = row_build_index_entry(
node->row, node->ext, index, heap);
@@ -566,15 +808,15 @@ row_undo_mod_upd_del_sec(
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is DEL_MARK.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_del_mark_sec(
/*======================*/
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
mem_heap_t* heap;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ut_ad(!node->undo_row);
@@ -589,6 +831,13 @@ row_undo_mod_del_mark_sec(
continue;
}
+ /* During online index creation,
+ HA_ALTER_INPLACE_NO_LOCK_AFTER_PREPARE should
+ guarantee that any active transaction has not modified
+ indexed columns such that col->ord_part was 0 at the
+ time when the undo log record was written. When we get
+ to roll back an undo log entry TRX_UNDO_DEL_MARK_REC,
+ it should always cover all affected indexes. */
entry = row_build_index_entry(
node->row, node->ext, index, heap);
@@ -601,8 +850,17 @@ row_undo_mod_del_mark_sec(
BTR_MODIFY_TREE, thr, index, entry);
}
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
-
+ if (err == DB_DUPLICATE_KEY) {
+ row_undo_mod_sec_flag_corrupted(
+ thr_get_trx(thr), index);
+ err = DB_SUCCESS;
+ /* Do not return any error to the caller. The
+ duplicate will be reported by ALTER TABLE or
+ CREATE UNIQUE INDEX. Unfortunately we cannot
+ report the duplicate key value to the DDL
+ thread, because the altered_table object is
+ private to its call stack. */
+ } else if (err != DB_SUCCESS) {
break;
}
@@ -618,18 +876,18 @@ row_undo_mod_del_mark_sec(
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is UPD_EXIST.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo_mod_upd_exist_sec(
/*=======================*/
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
mem_heap_t* heap;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
if (node->index == NULL
- || (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
+ || ((node->cmpl_info & UPD_NODE_NO_ORD_CHANGE))) {
/* No change in secondary indexes */
return(err);
@@ -715,7 +973,11 @@ row_undo_mod_upd_exist_sec(
BTR_MODIFY_TREE, thr, index, entry);
}
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
+ if (err == DB_DUPLICATE_KEY) {
+ row_undo_mod_sec_flag_corrupted(
+ thr_get_trx(thr), index);
+ err = DB_SUCCESS;
+ } else if (err != DB_SUCCESS) {
break;
}
@@ -730,12 +992,11 @@ row_undo_mod_upd_exist_sec(
/***********************************************************//**
Parses the row reference and other info in a modify undo log record. */
-static
+static __attribute__((nonnull))
void
row_undo_mod_parse_undo_rec(
/*========================*/
undo_node_t* node, /*!< in: row undo node */
- que_thr_t* thr, /*!< in: query thread */
ibool dict_locked) /*!< in: TRUE if own dict_sys->mutex */
{
dict_index_t* clust_index;
@@ -747,16 +1008,13 @@ row_undo_mod_parse_undo_rec(
ulint info_bits;
ulint type;
ulint cmpl_info;
- ibool dummy_extern;
- trx_t* trx;
+ bool dummy_extern;
- ut_ad(node && thr);
- trx = thr_get_trx(thr);
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &cmpl_info,
&dummy_extern, &undo_no, &table_id);
node->rec_type = type;
- node->table = dict_table_open_on_id(table_id, dict_locked);
+ node->table = dict_table_open_on_id(table_id, dict_locked, FALSE);
/* TODO: other fixes associated with DROP TABLE + rollback in the
same table by another user */
@@ -767,7 +1025,7 @@ row_undo_mod_parse_undo_rec(
}
if (node->table->ibd_file_missing) {
- dict_table_close(node->table, dict_locked);
+ dict_table_close(node->table, dict_locked, FALSE);
/* We skip undo operations to missing .ibd files */
node->table = NULL;
@@ -784,14 +1042,14 @@ row_undo_mod_parse_undo_rec(
node->heap);
trx_undo_update_rec_get_update(ptr, clust_index, type, trx_id,
- roll_ptr, info_bits, trx,
+ roll_ptr, info_bits, node->trx,
node->heap, &(node->update));
node->new_trx_id = trx_id;
node->cmpl_info = cmpl_info;
if (!row_undo_search_clust_to_pcur(node)) {
- dict_table_close(node->table, dict_locked);
+ dict_table_close(node->table, dict_locked, FALSE);
node->table = NULL;
}
@@ -801,21 +1059,23 @@ row_undo_mod_parse_undo_rec(
Undoes a modify operation on a row of a table.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
row_undo_mod(
/*=========*/
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
- ibool dict_locked;
+ dberr_t err;
+ ibool dict_locked;
ut_ad(node && thr);
ut_ad(node->state == UNDO_NODE_MODIFY);
dict_locked = thr_get_trx(thr)->dict_operation_lock_mode == RW_X_LATCH;
- row_undo_mod_parse_undo_rec(node, thr, dict_locked);
+ ut_ad(thr_get_trx(thr) == node->trx);
+
+ row_undo_mod_parse_undo_rec(node, dict_locked);
if (node->table == NULL) {
/* It is already undone, or will be undone by another query
@@ -827,8 +1087,18 @@ row_undo_mod(
return(DB_SUCCESS);
}
- node->index = dict_table_get_next_index(
- dict_table_get_first_index(node->table));
+ node->index = dict_table_get_first_index(node->table);
+ ut_ad(dict_index_is_clust(node->index));
+
+ if (dict_index_is_online_ddl(node->index)) {
+ /* Note that we are rolling back this transaction, so
+ that all inserts and updates with this DB_TRX_ID can
+ be skipped. */
+ row_log_table_rollback(node->index, node->trx->id);
+ }
+
+ /* Skip the clustered index (the first index) */
+ node->index = dict_table_get_next_index(node->index);
/* Skip all corrupted secondary index */
dict_table_skip_corrupt_index(node->index);
@@ -853,7 +1123,7 @@ row_undo_mod(
err = row_undo_mod_clust(node, thr);
}
- dict_table_close(node->table, dict_locked);
+ dict_table_close(node->table, dict_locked, FALSE);
node->table = NULL;
diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc
index a73f858599d..9977a1e8f04 100644
--- a/storage/innobase/row/row0undo.cc
+++ b/storage/innobase/row/row0undo.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -216,7 +216,8 @@ row_undo_search_clust_to_pcur(
}
node->row = row_build(ROW_COPY_DATA, clust_index, rec,
- offsets, NULL, ext, node->heap);
+ offsets, NULL,
+ NULL, NULL, ext, node->heap);
if (node->rec_type == TRX_UNDO_UPD_EXIST_REC) {
node->undo_row = dtuple_copy(node->row, node->heap);
row_upd_replace(node->undo_row, &node->undo_ext,
@@ -244,14 +245,14 @@ Fetches an undo log record and does the undo for the recorded operation.
If none left, or a partial rollback completed, returns control to the
parent node, which is always a query thread node.
@return DB_SUCCESS if operation successfully completed, else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_undo(
/*=====*/
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
+ dberr_t err;
trx_t* trx;
roll_ptr_t roll_ptr;
ibool locked_data_dict;
@@ -332,7 +333,7 @@ row_undo_step(
/*==========*/
que_thr_t* thr) /*!< in: query thread */
{
- ulint err;
+ dberr_t err;
undo_node_t* node;
trx_t* trx;
@@ -348,17 +349,17 @@ row_undo_step(
err = row_undo(node, thr);
- trx->error_state = static_cast<enum db_err>(err);
+ trx->error_state = err;
if (err != DB_SUCCESS) {
/* SQL error detected */
- fprintf(stderr, "InnoDB: Fatal error %lu in rollback.\n",
- (ulong) err);
+ fprintf(stderr, "InnoDB: Fatal error (%s) in rollback.\n",
+ ut_strerr(err));
if (err == DB_OUT_OF_FILE_SPACE) {
fprintf(stderr,
- "InnoDB: Error 13 means out of tablespace.\n"
+ "InnoDB: Out of tablespace.\n"
"InnoDB: Consider increasing"
" your tablespace.\n");
diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc
index 28faa59add8..f97c0c3c82b 100644
--- a/storage/innobase/row/row0upd.cc
+++ b/storage/innobase/row/row0upd.cc
@@ -23,14 +23,13 @@ Update of a row
Created 12/27/1996 Heikki Tuuri
*******************************************************/
-#include "m_string.h" /* for my_sys.h */
-#include "my_sys.h" /* DEBUG_SYNC_C */
#include "row0upd.h"
#ifdef UNIV_NONINL
#include "row0upd.ic"
#endif
+#include "ha_prototypes.h"
#include "dict0dict.h"
#include "trx0undo.h"
#include "rem0rec.h"
@@ -43,8 +42,9 @@ Created 12/27/1996 Heikki Tuuri
#include "que0que.h"
#include "row0ext.h"
#include "row0ins.h"
-#include "row0sel.h"
+#include "row0log.h"
#include "row0row.h"
+#include "row0sel.h"
#include "rem0cmp.h"
#include "lock0lock.h"
#include "log0log.h"
@@ -178,8 +178,8 @@ NOTE that this function will temporarily commit mtr and lose the
pcur position!
@return DB_SUCCESS or an error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd_check_references_constraints(
/*=================================*/
upd_node_t* node, /*!< in: row update node */
@@ -197,7 +197,7 @@ row_upd_check_references_constraints(
trx_t* trx;
const rec_t* rec;
ulint n_ext;
- ulint err;
+ dberr_t err;
ibool got_s_lock = FALSE;
if (UT_LIST_GET_FIRST(table->referenced_list) == NULL) {
@@ -212,11 +212,12 @@ row_upd_check_references_constraints(
heap = mem_heap_create(500);
- entry = row_rec_to_index_entry(ROW_COPY_DATA, rec, index, offsets,
- &n_ext, heap);
+ entry = row_rec_to_index_entry(rec, index, offsets, &n_ext, heap);
mtr_commit(mtr);
+ DEBUG_SYNC_C("foreign_constraint_check_for_update");
+
mtr_start(mtr);
if (trx->dict_operation_lock_mode == 0) {
@@ -225,6 +226,7 @@ row_upd_check_references_constraints(
row_mysql_freeze_data_dictionary(trx);
}
+run_again:
foreign = UT_LIST_GET_FIRST(table->referenced_list);
while (foreign) {
@@ -238,18 +240,20 @@ row_upd_check_references_constraints(
|| row_upd_changes_first_fields_binary(
entry, index, node->update,
foreign->n_fields))) {
+ dict_table_t* foreign_table = foreign->foreign_table;
dict_table_t* ref_table = NULL;
- if (foreign->foreign_table == NULL) {
+ if (foreign_table == NULL) {
ref_table = dict_table_open_on_name(
- foreign->foreign_table_name_lookup, FALSE);
+ foreign->foreign_table_name_lookup,
+ FALSE, FALSE, DICT_ERR_IGNORE_NONE);
}
- if (foreign->foreign_table) {
+ if (foreign_table) {
os_inc_counter(dict_sys->mutex,
- foreign->foreign_table
+ foreign_table
->n_foreign_key_checks_running);
}
@@ -261,18 +265,20 @@ row_upd_check_references_constraints(
err = row_ins_check_foreign_constraint(
FALSE, foreign, table, entry, thr);
- if (foreign->foreign_table) {
+ if (foreign_table) {
os_dec_counter(dict_sys->mutex,
- foreign->foreign_table
+ foreign_table
->n_foreign_key_checks_running);
}
if (ref_table != NULL) {
- dict_table_close(ref_table, FALSE);
+ dict_table_close(ref_table, FALSE, FALSE);
}
- if (err != DB_SUCCESS) {
-
+ /* Some table foreign key dropped, try again */
+ if (err == DB_DICT_CHANGED) {
+ goto run_again;
+ } else if (err != DB_SUCCESS) {
goto func_exit;
}
}
@@ -289,6 +295,8 @@ func_exit:
mem_heap_free(heap);
+ DEBUG_SYNC_C("foreign_constraint_check_for_update_done");
+
return(err);
}
@@ -465,6 +473,47 @@ row_upd_changes_field_size_or_external(
return(FALSE);
}
+
+/***********************************************************//**
+Returns true if row update contains disowned external fields.
+@return true if the update contains disowned external fields. */
+UNIV_INTERN
+bool
+row_upd_changes_disowned_external(
+/*==============================*/
+ const upd_t* update) /*!< in: update vector */
+{
+ const upd_field_t* upd_field;
+ const dfield_t* new_val;
+ ulint new_len;
+ ulint n_fields;
+ ulint i;
+
+ n_fields = upd_get_n_fields(update);
+
+ for (i = 0; i < n_fields; i++) {
+ const byte* field_ref;
+
+ upd_field = upd_get_nth_field(update, i);
+ new_val = &(upd_field->new_val);
+ new_len = dfield_get_len(new_val);
+
+ if (!dfield_is_ext(new_val)) {
+ continue;
+ }
+
+ ut_ad(new_len >= BTR_EXTERN_FIELD_REF_SIZE);
+
+ field_ref = static_cast<const byte*>(dfield_get_data(new_val))
+ + new_len - BTR_EXTERN_FIELD_REF_SIZE;
+
+ if (field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG) {
+ return(true);
+ }
+ }
+
+ return(false);
+}
#endif /* !UNIV_HOTBACKUP */
/***********************************************************//**
@@ -560,7 +609,7 @@ byte*
row_upd_write_sys_vals_to_log(
/*==========================*/
dict_index_t* index, /*!< in: clustered index */
- trx_t* trx, /*!< in: transaction */
+ trx_id_t trx_id, /*!< in: transaction id */
roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */
byte* log_ptr,/*!< pointer to a buffer of size > 20 opened
in mlog */
@@ -576,7 +625,7 @@ row_upd_write_sys_vals_to_log(
trx_write_roll_ptr(log_ptr, roll_ptr);
log_ptr += DATA_ROLL_PTR_LEN;
- log_ptr += mach_ull_write_compressed(log_ptr, trx->id);
+ log_ptr += mach_ull_write_compressed(log_ptr, trx_id);
return(log_ptr);
}
@@ -779,10 +828,10 @@ UNIV_INTERN
upd_t*
row_upd_build_sec_rec_difference_binary(
/*====================================*/
+ const rec_t* rec, /*!< in: secondary index record */
dict_index_t* index, /*!< in: index */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
const dtuple_t* entry, /*!< in: entry to insert */
- const rec_t* rec, /*!< in: secondary index record */
- trx_t* trx, /*!< in: transaction */
mem_heap_t* heap) /*!< in: memory heap from which allocated */
{
upd_field_t* upd_field;
@@ -792,18 +841,16 @@ row_upd_build_sec_rec_difference_binary(
upd_t* update;
ulint n_diff;
ulint i;
- ulint offsets_[REC_OFFS_SMALL_SIZE];
- const ulint* offsets;
- rec_offs_init(offsets_);
/* This function is used only for a secondary index */
ut_a(!dict_index_is_clust(index));
+ ut_ad(rec_offs_validate(rec, index, offsets));
+ ut_ad(rec_offs_n_fields(offsets) == dtuple_get_n_fields(entry));
+ ut_ad(!rec_offs_any_extern(offsets));
update = upd_create(dtuple_get_n_fields(entry), heap);
n_diff = 0;
- offsets = rec_get_offsets(rec, index, offsets_,
- ULINT_UNDEFINED, &heap);
for (i = 0; i < dtuple_get_n_fields(entry); i++) {
@@ -828,7 +875,7 @@ row_upd_build_sec_rec_difference_binary(
dfield_copy(&(upd_field->new_val), dfield);
- upd_field_set_field_no(upd_field, i, index, trx);
+ upd_field_set_field_no(upd_field, i, index, NULL);
n_diff++;
}
@@ -846,12 +893,15 @@ the equal ordering fields. NOTE: we compare the fields as binary strings!
@return own: update vector of differing fields, excluding roll ptr and
trx id */
UNIV_INTERN
-upd_t*
+const upd_t*
row_upd_build_difference_binary(
/*============================*/
dict_index_t* index, /*!< in: clustered index */
const dtuple_t* entry, /*!< in: entry to insert */
const rec_t* rec, /*!< in: clustered index record */
+ const ulint* offsets,/*!< in: rec_get_offsets(rec,index), or NULL */
+ bool no_sys, /*!< in: skip the system columns
+ DB_TRX_ID and DB_ROLL_PTR */
trx_t* trx, /*!< in: transaction */
mem_heap_t* heap) /*!< in: memory heap from which allocated */
{
@@ -861,11 +911,9 @@ row_upd_build_difference_binary(
ulint len;
upd_t* update;
ulint n_diff;
- ulint roll_ptr_pos;
ulint trx_id_pos;
ulint i;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
- const ulint* offsets;
rec_offs_init(offsets_);
/* This function is used only for a clustered index */
@@ -875,11 +923,16 @@ row_upd_build_difference_binary(
n_diff = 0;
- roll_ptr_pos = dict_index_get_sys_col_pos(index, DATA_ROLL_PTR);
trx_id_pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
+ ut_ad(dict_index_get_sys_col_pos(index, DATA_ROLL_PTR)
+ == trx_id_pos + 1);
- offsets = rec_get_offsets(rec, index, offsets_,
- ULINT_UNDEFINED, &heap);
+ if (!offsets) {
+ offsets = rec_get_offsets(rec, index, offsets_,
+ ULINT_UNDEFINED, &heap);
+ } else {
+ ut_ad(rec_offs_validate(rec, index, offsets));
+ }
for (i = 0; i < dtuple_get_n_fields(entry); i++) {
@@ -890,9 +943,9 @@ row_upd_build_difference_binary(
/* NOTE: we compare the fields as binary strings!
(No collation) */
- if (i == trx_id_pos || i == roll_ptr_pos) {
+ if (no_sys && (i == trx_id_pos || i == trx_id_pos + 1)) {
- goto skip_compare;
+ continue;
}
if (!dfield_is_ext(dfield)
@@ -907,8 +960,6 @@ row_upd_build_difference_binary(
n_diff++;
}
-skip_compare:
- ;
}
update->n_fields = n_diff;
@@ -1386,9 +1437,9 @@ row_upd_changes_some_index_ord_field_binary(
/***********************************************************//**
Checks if an FTS Doc ID column is affected by an UPDATE.
-@return TRUE if the Doc ID column is changed */
+@return whether the Doc ID column is changed */
UNIV_INTERN
-ulint
+bool
row_upd_changes_doc_id(
/*===================*/
dict_table_t* table, /*!< in: table */
@@ -1431,61 +1482,6 @@ row_upd_changes_fts_column(
}
/***********************************************************//**
-Checks if an update vector changes the table's FTS-indexed columns.
-NOTE: must not be called for tables which do not have an FTS-index.
-Also, the vector returned must be explicitly freed as it's allocated
-using the ut_malloc() allocator.
-@return vector of FTS indexes that were affected by the update */
-UNIV_INTERN
-ib_vector_t*
-row_upd_changes_fts_columns(
-/*========================*/
- dict_table_t* table, /*!< in: table */
- upd_t* update) /*!< in: update vector for the row */
-{
- ulint i;
- ulint offset;
- fts_t* fts = table->fts;
- ib_vector_t* updated_fts_indexes = NULL;
-
- for (i = 0; i < upd_get_n_fields(update); ++i) {
- upd_field_t* upd_field = upd_get_nth_field(update, i);
-
- offset = row_upd_changes_fts_column(table, upd_field);
-
- if (offset != ULINT_UNDEFINED) {
-
- dict_index_t* index;
-
- /* TODO: Investigate if we can check whether the
- existing set of affected indexes matches the new
- affected set. If matched then we don't need to
- do the extra malloc()/free(). */
-
- /* This vector is created from the ut_malloc()
- allocator because we only want to keep one instance
- around not matter how many times this row is
- updated. The old entry should be deleted when
- we update the FTS row info with this new vector. */
- if (updated_fts_indexes == NULL) {
- ib_alloc_t* ut_alloc;
-
- ut_alloc = ib_ut_allocator_create();
-
- updated_fts_indexes = ib_vector_create(
- ut_alloc, sizeof(dict_index_t*), 2);
- }
-
- index = static_cast<dict_index_t*>(
- ib_vector_getp(fts->indexes, offset));
- ib_vector_push(updated_fts_indexes, &index);
- }
- }
-
- return(updated_fts_indexes);
-}
-
-/***********************************************************//**
Checks if an update vector changes some of the first ordering fields of an
index record. This is only used in foreign key checks and we can assume
that index does not contain column prefixes.
@@ -1633,7 +1629,7 @@ row_upd_store_row(
}
node->row = row_build(ROW_COPY_DATA, clust_index, rec, offsets,
- NULL, ext, node->heap);
+ NULL, NULL, NULL, ext, node->heap);
if (node->is_delete) {
node->upd_row = NULL;
node->upd_ext = NULL;
@@ -1652,8 +1648,8 @@ row_upd_store_row(
Updates a secondary index entry of a row.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd_sec_index_entry(
/*====================*/
upd_node_t* node, /*!< in: row update node */
@@ -1667,11 +1663,13 @@ row_upd_sec_index_entry(
dict_index_t* index;
btr_cur_t* btr_cur;
ibool referenced;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
trx_t* trx = thr_get_trx(thr);
- ulint mode = BTR_MODIFY_LEAF;
+ ulint mode;
enum row_search_result search_result;
+ ut_ad(trx->id);
+
index = node->index;
referenced = row_upd_index_is_referenced(index, trx);
@@ -1682,19 +1680,74 @@ row_upd_sec_index_entry(
entry = row_build_index_entry(node->row, node->ext, index, heap);
ut_a(entry);
+ log_free_check();
+
+#ifdef UNIV_DEBUG
+ /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
+ Once it is fixed, remove the 'ifdef', 'if' and this comment. */
+ if (!trx->ddl) {
+ DEBUG_SYNC_C_IF_THD(trx->mysql_thd,
+ "before_row_upd_sec_index_entry");
+ }
+#endif /* UNIV_DEBUG */
+
mtr_start(&mtr);
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ /* The index->online_status may change if the
+ index->name starts with TEMP_INDEX_PREFIX (meaning
+ that the index is or was being created online). It is
+ protected by index->lock. */
+
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+
+ switch (dict_index_get_online_status(index)) {
+ case ONLINE_INDEX_COMPLETE:
+ /* This is a normal index. Do not log anything.
+ Perform the update on the index tree directly. */
+ break;
+ case ONLINE_INDEX_CREATION:
+ /* Log a DELETE and optionally INSERT. */
+ row_log_online_op(index, entry, 0);
+
+ if (!node->is_delete) {
+ mem_heap_empty(heap);
+ entry = row_build_index_entry(
+ node->upd_row, node->upd_ext,
+ index, heap);
+ ut_a(entry);
+ row_log_online_op(index, entry, trx->id);
+ }
+ /* fall through */
+ case ONLINE_INDEX_ABORTED:
+ case ONLINE_INDEX_ABORTED_DROPPED:
+ mtr_commit(&mtr);
+ goto func_exit;
+ }
+
+ /* We can only buffer delete-mark operations if there
+ are no foreign key constraints referring to the index. */
+ mode = referenced
+ ? BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
+ : BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
+ | BTR_DELETE_MARK;
+ } else {
+ /* For secondary indexes,
+ index->online_status==ONLINE_INDEX_CREATION unless
+ index->name starts with TEMP_INDEX_PREFIX. */
+ ut_ad(!dict_index_is_online_ddl(index));
+
+ /* We can only buffer delete-mark operations if there
+ are no foreign key constraints referring to the index. */
+ mode = referenced
+ ? BTR_MODIFY_LEAF
+ : BTR_MODIFY_LEAF | BTR_DELETE_MARK;
+ }
+
/* Set the query thread, so that ibuf_insert_low() will be
able to invoke thd_get_trx(). */
btr_pcur_get_btr_cur(&pcur)->thr = thr;
- /* We can only try to use the insert/delete buffer to buffer
- delete-mark operations if the index we're modifying has no foreign
- key constraints referring to it. */
- if (!referenced) {
- mode |= BTR_DELETE_MARK;
- }
-
search_result = row_search_index_entry(index, entry, mode,
&pcur, &mtr);
@@ -1711,6 +1764,20 @@ row_upd_sec_index_entry(
break;
case ROW_NOT_FOUND:
+ if (*index->name == TEMP_INDEX_PREFIX) {
+ /* When online CREATE INDEX copied the update
+ that we already made to the clustered index,
+ and completed the secondary index creation
+ before we got here, the old secondary index
+ record would not exist. The CREATE INDEX
+ should be waiting for a MySQL meta-data lock
+ upgrade at least until this UPDATE
+ returns. After that point, the
+ TEMP_INDEX_PREFIX would be dropped from the
+ index name in commit_inplace_alter_table(). */
+ break;
+ }
+
fputs("InnoDB: error in sec index entry update in\n"
"InnoDB: ", stderr);
dict_index_name_print(stderr, trx, index);
@@ -1730,11 +1797,9 @@ row_upd_sec_index_entry(
case ROW_FOUND:
/* Delete mark the old index record; it can already be
delete marked if we return after a lock wait in
- row_ins_index_entry below */
-
+ row_ins_sec_index_entry() below */
if (!rec_get_deleted_flag(
- rec, dict_table_is_comp(index->table))) {
-
+ rec, dict_table_is_comp(index->table))) {
err = btr_cur_del_mark_set_sec_rec(
0, btr_cur, TRUE, thr, &mtr);
@@ -1764,13 +1829,15 @@ row_upd_sec_index_entry(
goto func_exit;
}
+ mem_heap_empty(heap);
+
/* Build a new index entry */
entry = row_build_index_entry(node->upd_row, node->upd_ext,
index, heap);
ut_a(entry);
/* Insert new index entry */
- err = row_ins_index_entry(index, entry, 0, TRUE, thr);
+ err = row_ins_sec_index_entry(index, entry, thr);
func_exit:
mem_heap_free(heap);
@@ -1783,8 +1850,8 @@ Updates the secondary index record if it is changed in the row update or
deletes it if this is a delete.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd_sec_step(
/*=============*/
upd_node_t* node, /*!< in: row update node */
@@ -1897,8 +1964,8 @@ fields of the clustered index record change. This should be quite rare in
database applications.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd_clust_rec_by_insert(
/*========================*/
upd_node_t* node, /*!< in/out: row update node */
@@ -1914,7 +1981,7 @@ row_upd_clust_rec_by_insert(
trx_t* trx;
dict_table_t* table;
dtuple_t* entry;
- ulint err;
+ dberr_t err;
ibool change_ownership = FALSE;
rec_t* rec;
ulint* offsets = NULL;
@@ -1939,7 +2006,7 @@ row_upd_clust_rec_by_insert(
default:
ut_error;
case UPD_NODE_INSERT_BLOB:
- /* A lock wait occurred in row_ins_index_entry() in
+ /* A lock wait occurred in row_ins_clust_index_entry() in
the previous invocation of this function. Mark the
off-page columns in the entry inherited. */
@@ -1948,7 +2015,7 @@ row_upd_clust_rec_by_insert(
ut_a(change_ownership);
/* fall through */
case UPD_NODE_INSERT_CLUSTERED:
- /* A lock wait occurred in row_ins_index_entry() in
+ /* A lock wait occurred in row_ins_clust_index_entry() in
the previous invocation of this function. */
break;
case UPD_NODE_UPDATE_CLUSTERED:
@@ -1961,8 +2028,8 @@ row_upd_clust_rec_by_insert(
ut_ad(page_rec_is_user_rec(rec));
err = btr_cur_del_mark_set_clust_rec(
- BTR_NO_LOCKING_FLAG, btr_cur_get_block(btr_cur),
- rec, index, offsets, TRUE, thr, mtr);
+ btr_cur_get_block(btr_cur), rec, index, offsets,
+ thr, mtr);
if (err != DB_SUCCESS) {
err_exit:
mtr_commit(mtr);
@@ -1999,9 +2066,9 @@ err_exit:
mtr_commit(mtr);
- err = row_ins_index_entry(index, entry,
- node->upd_ext ? node->upd_ext->n_ext : 0,
- TRUE, thr);
+ err = row_ins_clust_index_entry(
+ index, entry, thr,
+ node->upd_ext ? node->upd_ext->n_ext : 0);
node->state = change_ownership
? UPD_NODE_INSERT_BLOB
: UPD_NODE_INSERT_CLUSTERED;
@@ -2027,11 +2094,17 @@ err_exit:
offsets = rec_get_offsets(rec, index, offsets,
ULINT_UNDEFINED, &heap);
ut_ad(page_rec_is_user_rec(rec));
+ ut_ad(rec_get_deleted_flag(rec, rec_offs_comp(offsets)));
btr_cur_disown_inherited_fields(
btr_cur_get_page_zip(btr_cur),
rec, index, offsets, node->update, mtr);
+ /* It is not necessary to call row_log_table for
+ this, because during online table rebuild, purge will
+ not free any BLOBs in the table, whether or not they
+ are owned by the clustered index record. */
+
mtr_commit(mtr);
}
@@ -2045,20 +2118,24 @@ Updates a clustered index record of a row when the ordering fields do
not change.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd_clust_rec(
/*==============*/
upd_node_t* node, /*!< in: row update node */
dict_index_t* index, /*!< in: clustered index */
+ ulint* offsets,/*!< in: rec_get_offsets() on node->pcur */
+ mem_heap_t** offsets_heap,
+ /*!< in/out: memory heap, can be emptied */
que_thr_t* thr, /*!< in: query thread */
mtr_t* mtr) /*!< in: mtr; gets committed here */
{
- mem_heap_t* heap = NULL;
- big_rec_t* big_rec = NULL;
+ mem_heap_t* heap = NULL;
+ big_rec_t* big_rec = NULL;
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
- ulint err;
+ dberr_t err;
+ const dtuple_t* rebuilt_old_pk = NULL;
ut_ad(node);
ut_ad(dict_index_is_clust(index));
@@ -2066,33 +2143,48 @@ row_upd_clust_rec(
pcur = node->pcur;
btr_cur = btr_pcur_get_btr_cur(pcur);
- ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
+ ut_ad(btr_cur_get_index(btr_cur) == index);
+ ut_ad(!rec_get_deleted_flag(btr_cur_get_rec(btr_cur),
dict_table_is_comp(index->table)));
+ ut_ad(rec_offs_validate(btr_cur_get_rec(btr_cur), index, offsets));
+
+ if (dict_index_is_online_ddl(index)) {
+ rebuilt_old_pk = row_log_table_get_pk(
+ btr_cur_get_rec(btr_cur), index, offsets, &heap);
+ }
/* Try optimistic updating of the record, keeping changes within
the page; we do not check locks because we assume the x-lock on the
record to update */
if (node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE) {
- err = btr_cur_update_in_place(BTR_NO_LOCKING_FLAG,
- btr_cur, node->update,
- node->cmpl_info, thr, mtr);
+ err = btr_cur_update_in_place(
+ BTR_NO_LOCKING_FLAG, btr_cur,
+ offsets, node->update,
+ node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
} else {
- err = btr_cur_optimistic_update(BTR_NO_LOCKING_FLAG,
- btr_cur, node->update,
- node->cmpl_info, thr, mtr);
+ err = btr_cur_optimistic_update(
+ BTR_NO_LOCKING_FLAG, btr_cur,
+ &offsets, offsets_heap, node->update,
+ node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
+ }
+
+ if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) {
+ row_log_table_update(btr_cur_get_rec(btr_cur),
+ index, offsets, rebuilt_old_pk);
}
mtr_commit(mtr);
if (UNIV_LIKELY(err == DB_SUCCESS)) {
- return(DB_SUCCESS);
+ goto func_exit;
}
if (buf_LRU_buf_pool_running_out()) {
- return(DB_LOCK_TABLE_FULL);
+ err = DB_LOCK_TABLE_FULL;
+ goto func_exit;
}
/* We may have to modify the tree structure: do a pessimistic descent
down the index tree */
@@ -2110,14 +2202,16 @@ row_upd_clust_rec(
ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
dict_table_is_comp(index->table)));
+ if (!heap) {
+ heap = mem_heap_create(1024);
+ }
+
err = btr_cur_pessimistic_update(
BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG, btr_cur,
- &heap, &big_rec, node->update, node->cmpl_info, thr, mtr);
+ &offsets, offsets_heap, heap, &big_rec,
+ node->update, node->cmpl_info,
+ thr, thr_get_trx(thr)->id, mtr);
if (big_rec) {
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- rec_t* rec;
- rec_offs_init(offsets_);
-
ut_a(err == DB_SUCCESS);
/* Write out the externally stored
columns while still x-latching
@@ -2140,12 +2234,10 @@ row_upd_clust_rec(
portion of the file, in case the file was somehow
truncated in the crash. */
- rec = btr_cur_get_rec(btr_cur);
DEBUG_SYNC_C("before_row_upd_extern");
err = btr_store_big_rec_extern_fields(
- index, btr_cur_get_block(btr_cur), rec,
- rec_get_offsets(rec, index, offsets_,
- ULINT_UNDEFINED, &heap),
+ index, btr_cur_get_block(btr_cur),
+ btr_cur_get_rec(btr_cur), offsets,
big_rec, mtr, BTR_STORE_UPDATE);
DEBUG_SYNC_C("after_row_upd_extern");
/* If writing big_rec fails (for example, because of
@@ -2164,9 +2256,14 @@ row_upd_clust_rec(
ut_a(err == DB_SUCCESS);
}
- mtr_commit(mtr);
+ if (err == DB_SUCCESS && dict_index_is_online_ddl(index)) {
+ row_log_table_update(btr_cur_get_rec(btr_cur),
+ index, offsets, rebuilt_old_pk);
+ }
- if (UNIV_LIKELY_NULL(heap)) {
+ mtr_commit(mtr);
+func_exit:
+ if (heap) {
mem_heap_free(heap);
}
@@ -2180,8 +2277,8 @@ row_upd_clust_rec(
/***********************************************************//**
Delete marks a clustered index record.
@return DB_SUCCESS if operation successfully completed, else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd_del_mark_clust_rec(
/*=======================*/
upd_node_t* node, /*!< in: row update node */
@@ -2196,7 +2293,7 @@ row_upd_del_mark_clust_rec(
{
btr_pcur_t* pcur;
btr_cur_t* btr_cur;
- ulint err;
+ dberr_t err;
ut_ad(node);
ut_ad(dict_index_is_clust(index));
@@ -2214,8 +2311,8 @@ row_upd_del_mark_clust_rec(
locks, because we assume that we have an x-lock on the record */
err = btr_cur_del_mark_set_clust_rec(
- BTR_NO_LOCKING_FLAG, btr_cur_get_block(btr_cur),
- btr_cur_get_rec(btr_cur), index, offsets, TRUE, thr, mtr);
+ btr_cur_get_block(btr_cur), btr_cur_get_rec(btr_cur),
+ index, offsets, thr, mtr);
if (err == DB_SUCCESS && referenced) {
/* NOTE that the following call loses the position of pcur ! */
@@ -2232,8 +2329,8 @@ row_upd_del_mark_clust_rec(
Updates the clustered index record.
@return DB_SUCCESS if operation successfully completed, DB_LOCK_WAIT
in case of a lock wait, else error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd_clust_step(
/*===============*/
upd_node_t* node, /*!< in: row update node */
@@ -2242,11 +2339,10 @@ row_upd_clust_step(
dict_index_t* index;
btr_pcur_t* pcur;
ibool success;
- ulint err;
- mtr_t* mtr;
- mtr_t mtr_buf;
+ dberr_t err;
+ mtr_t mtr;
rec_t* rec;
- mem_heap_t* heap = NULL;
+ mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets;
ibool referenced;
@@ -2259,9 +2355,8 @@ row_upd_clust_step(
pcur = node->pcur;
/* We have to restore the cursor to its position */
- mtr = &mtr_buf;
- mtr_start(mtr);
+ mtr_start(&mtr);
/* If the restoration does not succeed, then the same
transaction has deleted the record on which the cursor was,
@@ -2273,12 +2368,32 @@ row_upd_clust_step(
ut_a(pcur->rel_pos == BTR_PCUR_ON);
- success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur, mtr);
+ ulint mode;
+
+#ifdef UNIV_DEBUG
+ /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
+ Once it is fixed, remove the 'ifdef', 'if' and this comment. */
+ if (!thr_get_trx(thr)->ddl) {
+ DEBUG_SYNC_C_IF_THD(
+ thr_get_trx(thr)->mysql_thd,
+ "innodb_row_upd_clust_step_enter");
+ }
+#endif /* UNIV_DEBUG */
+
+ if (dict_index_is_online_ddl(index)) {
+ ut_ad(node->table->id != DICT_INDEXES_ID);
+ mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
+ mtr_s_lock(dict_index_get_lock(index), &mtr);
+ } else {
+ mode = BTR_MODIFY_LEAF;
+ }
+
+ success = btr_pcur_restore_position(mode, pcur, &mtr);
if (!success) {
err = DB_RECORD_NOT_FOUND;
- mtr_commit(mtr);
+ mtr_commit(&mtr);
return(err);
}
@@ -2289,18 +2404,20 @@ row_upd_clust_step(
if (node->is_delete && node->table->id == DICT_INDEXES_ID) {
- dict_drop_index_tree(btr_pcur_get_rec(pcur), mtr);
+ ut_ad(!dict_index_is_online_ddl(index));
- mtr_commit(mtr);
+ dict_drop_index_tree(btr_pcur_get_rec(pcur), &mtr);
- mtr_start(mtr);
+ mtr_commit(&mtr);
+
+ mtr_start(&mtr);
success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur,
- mtr);
+ &mtr);
if (!success) {
err = DB_ERROR;
- mtr_commit(mtr);
+ mtr_commit(&mtr);
return(err);
}
@@ -2315,7 +2432,7 @@ row_upd_clust_step(
0, btr_pcur_get_block(pcur),
rec, index, offsets, thr);
if (err != DB_SUCCESS) {
- mtr_commit(mtr);
+ mtr_commit(&mtr);
goto exit_func;
}
}
@@ -2324,17 +2441,14 @@ row_upd_clust_step(
if (node->is_delete) {
err = row_upd_del_mark_clust_rec(
- node, index, offsets, thr, referenced, mtr);
+ node, index, offsets, thr, referenced, &mtr);
if (err == DB_SUCCESS) {
node->state = UPD_NODE_UPDATE_ALL_SEC;
node->index = dict_table_get_next_index(index);
}
-exit_func:
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
- return(err);
+
+ goto exit_func;
}
/* If the update is made for MySQL, we already have the update vector
@@ -2348,13 +2462,11 @@ exit_func:
row_upd_eval_new_vals(node->update);
}
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
-
if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
- return(row_upd_clust_rec(node, index, thr, mtr));
+ err = row_upd_clust_rec(
+ node, index, offsets, &heap, thr, &mtr);
+ goto exit_func;
}
row_upd_store_row(node);
@@ -2374,20 +2486,21 @@ exit_func:
externally! */
err = row_upd_clust_rec_by_insert(
- node, index, thr, referenced, mtr);
+ node, index, thr, referenced, &mtr);
if (err != DB_SUCCESS) {
- return(err);
+ goto exit_func;
}
node->state = UPD_NODE_UPDATE_ALL_SEC;
} else {
- err = row_upd_clust_rec(node, index, thr, mtr);
+ err = row_upd_clust_rec(
+ node, index, offsets, &heap, thr, &mtr);
if (err != DB_SUCCESS) {
- return(err);
+ goto exit_func;
}
node->state = UPD_NODE_UPDATE_SOME_SEC;
@@ -2395,6 +2508,10 @@ exit_func:
node->index = dict_table_get_next_index(index);
+exit_func:
+ if (heap) {
+ mem_heap_free(heap);
+ }
return(err);
}
@@ -2404,14 +2521,14 @@ to this node, we assume that we have a persistent cursor which was on a
record, and the position of the cursor is stored in the cursor.
@return DB_SUCCESS if operation successfully completed, else error
code or DB_LOCK_WAIT */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
row_upd(
/*====*/
upd_node_t* node, /*!< in: row update node */
que_thr_t* thr) /*!< in: query thread */
{
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ut_ad(node && thr);
@@ -2449,6 +2566,17 @@ row_upd(
return(DB_SUCCESS);
}
+#ifdef UNIV_DEBUG
+ /* Work around Bug#14626800 ASSERTION FAILURE IN DEBUG_SYNC().
+ Once it is fixed, remove the 'ifdef', 'if' and this comment. */
+ if (!thr_get_trx(thr)->ddl) {
+ DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
+ "after_row_upd_clust");
+ }
+#endif /* UNIV_DEBUG */
+
+ DBUG_EXECUTE_IF("row_upd_skip_sec", node->index = NULL;);
+
do {
/* Skip corrupted index */
dict_table_skip_corrupt_index(node->index);
@@ -2458,7 +2586,6 @@ row_upd(
}
if (node->index->type != DICT_FTS) {
- log_free_check();
err = row_upd_sec_step(node, thr);
if (err != DB_SUCCESS) {
@@ -2500,7 +2627,7 @@ row_upd_step(
upd_node_t* node;
sel_node_t* sel_node;
que_node_t* parent;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
trx_t* trx;
ut_ad(thr);
@@ -2579,7 +2706,7 @@ row_upd_step(
err = row_upd(node, thr);
error_handling:
- trx->error_state = static_cast<enum db_err>(err);
+ trx->error_state = err;
if (err != DB_SUCCESS) {
return(NULL);
diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc
index 0aad8675ff8..2c3191928fd 100644
--- a/storage/innobase/row/row0vers.cc
+++ b/storage/innobase/row/row0vers.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1997, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -114,7 +114,6 @@ row_vers_impl_x_locked_low(
on rec. */
for (version = clust_rec;; version = prev_version) {
- ulint err;
row_ext_t* ext;
const dtuple_t* row;
dtuple_t* entry;
@@ -128,24 +127,22 @@ row_vers_impl_x_locked_low(
heap = mem_heap_create(1024);
- err = trx_undo_prev_version_build(
+ trx_undo_prev_version_build(
clust_rec, mtr, version, clust_index, clust_offsets,
heap, &prev_version);
- /* Free version and clust_offsets. */
+ /* Free version and clust_offsets. */
mem_heap_free(old_heap);
if (prev_version == NULL) {
- /* clust_rec must be a fresh insert, because
+ /* clust_rec should be a fresh insert, because
no previous version was found or the transaction
has committed. The caller has to recheck as the
synopsis of this function states, whether trx_id
is active or not. */
- ut_a(err == DB_SUCCESS || err == DB_MISSING_HISTORY);
-
break;
}
@@ -155,15 +152,16 @@ row_vers_impl_x_locked_low(
vers_del = rec_get_deleted_flag(prev_version, comp);
- prev_trx_id = row_get_rec_trx_id(
- prev_version, clust_index, clust_offsets);
+ prev_trx_id = row_get_rec_trx_id(prev_version, clust_index,
+ clust_offsets);
/* The stack of versions is locked by mtr. Thus, it
is safe to fetch the prefixes for externally stored
columns. */
row = row_build(ROW_COPY_POINTERS, clust_index, prev_version,
- clust_offsets, NULL, &ext, heap);
+ clust_offsets,
+ NULL, NULL, NULL, &ext, heap);
entry = row_build_index_entry(row, ext, index, heap);
@@ -183,8 +181,6 @@ row_vers_impl_x_locked_low(
There is no guarantee that the transaction is still
active. */
- ut_ad(err == DB_SUCCESS);
-
/* We check if entry and rec are identified in the alphabetical
ordering */
@@ -355,7 +351,6 @@ row_vers_old_has_index_entry(
mem_heap_t* heap2;
const dtuple_t* row;
const dtuple_t* entry;
- ulint err;
ulint comp;
ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
@@ -383,7 +378,8 @@ row_vers_old_has_index_entry(
Thus, it is safe to fetch the prefixes for
externally stored columns. */
row = row_build(ROW_COPY_POINTERS, clust_index,
- rec, clust_offsets, NULL, &ext, heap);
+ rec, clust_offsets,
+ NULL, NULL, NULL, &ext, heap);
entry = row_build_index_entry(row, ext, index, heap);
/* If entry == NULL, the record contains unset BLOB
@@ -420,12 +416,12 @@ row_vers_old_has_index_entry(
for (;;) {
heap2 = heap;
heap = mem_heap_create(1024);
- err = trx_undo_prev_version_build(rec, mtr, version,
- clust_index, clust_offsets,
- heap, &prev_version);
+ trx_undo_prev_version_build(rec, mtr, version,
+ clust_index, clust_offsets,
+ heap, &prev_version);
mem_heap_free(heap2); /* free version and clust_offsets */
- if (err != DB_SUCCESS || !prev_version) {
+ if (!prev_version) {
/* Versions end here */
mem_heap_free(heap);
@@ -444,7 +440,7 @@ row_vers_old_has_index_entry(
externally stored columns. */
row = row_build(ROW_COPY_POINTERS, clust_index,
prev_version, clust_offsets,
- NULL, &ext, heap);
+ NULL, NULL, NULL, &ext, heap);
entry = row_build_index_entry(row, ext, index, heap);
/* If entry == NULL, the record contains unset
@@ -477,7 +473,7 @@ read should see. We assume that the trx id stored in rec is such that
the consistent read should not see rec in its present version.
@return DB_SUCCESS or DB_MISSING_HISTORY */
UNIV_INTERN
-ulint
+dberr_t
row_vers_build_for_consistent_read(
/*===============================*/
const rec_t* rec, /*!< in: record in a clustered index; the
@@ -495,8 +491,9 @@ row_vers_build_for_consistent_read(
*old_vers is allocated; memory for possible
intermediate versions is allocated and freed
locally within the function */
- rec_t** old_vers)/*!< out, own: old version, or NULL if the
- record does not exist in the view, that is,
+ rec_t** old_vers)/*!< out, own: old version, or NULL
+ if the history is missing or the record
+ does not exist in the view, that is,
it was freshly inserted afterwards */
{
const rec_t* version;
@@ -504,7 +501,7 @@ row_vers_build_for_consistent_read(
trx_id_t trx_id;
mem_heap_t* heap = NULL;
byte* buf;
- ulint err;
+ dberr_t err;
ut_ad(dict_index_is_clust(index));
ut_ad(mtr_memo_contains_page(mtr, rec, MTR_MEMO_PAGE_X_FIX)
@@ -558,27 +555,21 @@ row_vers_build_for_consistent_read(
rec_offs_make_valid(*old_vers, index,
*offsets);
err = DB_SUCCESS;
-
break;
}
}
err = trx_undo_prev_version_build(rec, mtr, version, index,
*offsets, heap,
- &prev_version);
+ &prev_version)
+ ? DB_SUCCESS : DB_MISSING_HISTORY;
if (heap2) {
mem_heap_free(heap2); /* free version */
}
- if (err != DB_SUCCESS) {
- break;
- }
-
if (prev_version == NULL) {
/* It was a freshly inserted version */
*old_vers = NULL;
- err = DB_SUCCESS;
-
break;
}
@@ -602,8 +593,6 @@ row_vers_build_for_consistent_read(
*old_vers = rec_copy(buf, prev_version, *offsets);
rec_offs_make_valid(*old_vers, index, *offsets);
- err = DB_SUCCESS;
-
break;
}
@@ -617,10 +606,9 @@ row_vers_build_for_consistent_read(
/*****************************************************************//**
Constructs the last committed version of a clustered index record,
-which should be seen by a semi-consistent read.
-@return DB_SUCCESS or DB_MISSING_HISTORY */
+which should be seen by a semi-consistent read. */
UNIV_INTERN
-ulint
+void
row_vers_build_for_semi_consistent_read(
/*====================================*/
const rec_t* rec, /*!< in: record in a clustered index; the
@@ -644,7 +632,6 @@ row_vers_build_for_semi_consistent_read(
const rec_t* version;
mem_heap_t* heap = NULL;
byte* buf;
- ulint err;
trx_id_t rec_trx_id = 0;
ut_ad(dict_index_is_clust(index));
@@ -683,7 +670,7 @@ row_vers_build_for_semi_consistent_read(
mutex_exit(&trx_sys->mutex);
if (!version_trx) {
-
+committed_version_trx:
/* We found a version that belongs to a
committed transaction: return it. */
@@ -693,7 +680,6 @@ row_vers_build_for_semi_consistent_read(
if (rec == version) {
*old_vers = rec;
- err = DB_SUCCESS;
break;
}
@@ -721,30 +707,30 @@ row_vers_build_for_semi_consistent_read(
*old_vers = rec_copy(buf, version, *offsets);
rec_offs_make_valid(*old_vers, index, *offsets);
- err = DB_SUCCESS;
-
break;
}
+ DEBUG_SYNC_C("after_row_vers_check_trx_active");
+
heap2 = heap;
heap = mem_heap_create(1024);
- err = trx_undo_prev_version_build(rec, mtr, version, index,
- *offsets, heap,
- &prev_version);
- if (heap2) {
- mem_heap_free(heap2); /* free version */
+ if (!trx_undo_prev_version_build(rec, mtr, version, index,
+ *offsets, heap,
+ &prev_version)) {
+ mem_heap_free(heap);
+ heap = heap2;
+ heap2 = NULL;
+ goto committed_version_trx;
}
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
- break;
+ if (heap2) {
+ mem_heap_free(heap2); /* free version */
}
if (prev_version == NULL) {
/* It was a freshly inserted version */
*old_vers = NULL;
- err = DB_SUCCESS;
-
break;
}
@@ -759,6 +745,4 @@ row_vers_build_for_semi_consistent_read(
if (heap) {
mem_heap_free(heap);
}
-
- return(err);
}
diff --git a/storage/innobase/srv/srv0conc.cc b/storage/innobase/srv/srv0conc.cc
index d5c949f3a06..820700a95a8 100644
--- a/storage/innobase/srv/srv0conc.cc
+++ b/storage/innobase/srv/srv0conc.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2011, 2012, Oracle and/or its affiliates. All Rights Reserved.
Portions of this file contain modifications contributed and copyrighted by
Google, Inc. Those modifications are gratefully acknowledged and are described
@@ -40,7 +40,6 @@ Created 2011/04/18 Sunny Bains
#include "srv0srv.h"
#include "sync0sync.h"
#include "trx0trx.h"
-#include "ha_prototypes.h"
#include "mysql/plugin.h"
@@ -73,13 +72,11 @@ UNIV_INTERN ulong srv_thread_concurrency = 0;
/** This mutex protects srv_conc data structures */
static os_fast_mutex_t srv_conc_mutex;
-/** Slot for a thread waiting in the concurrency control queue. */
-typedef struct srv_conc_slot_struct srv_conc_slot_t;
-
/** Concurrency list node */
-typedef UT_LIST_NODE_T(srv_conc_slot_t) srv_conc_node_t;
+typedef UT_LIST_NODE_T(struct srv_conc_slot_t) srv_conc_node_t;
-struct srv_conc_slot_struct{
+/** Slot for a thread waiting in the concurrency control queue. */
+struct srv_conc_slot_t{
os_event_t event; /*!< event to wait */
ibool reserved; /*!< TRUE if slot
reserved */
@@ -106,10 +103,8 @@ UNIV_INTERN mysql_pfs_key_t srv_conc_mutex_key;
#endif /* !HAVE_ATOMIC_BUILTINS */
-typedef struct srv_conc_struct srv_conc_t;
-
/** Variables tracking the active and waiting threads. */
-struct srv_conc_struct {
+struct srv_conc_t {
char pad[64 - (sizeof(ulint) + sizeof(lint))];
/** Number of transactions that have declared_to_be_inside_innodb set.
@@ -148,7 +143,7 @@ srv_conc_init(void)
for (i = 0; i < OS_THREAD_MAX_N; i++) {
srv_conc_slot_t* conc_slot = &srv_conc_slots[i];
- conc_slot->event = os_event_create(NULL);
+ conc_slot->event = os_event_create();
ut_a(conc_slot->event);
}
#endif /* !HAVE_ATOMIC_BUILTINS */
@@ -224,9 +219,7 @@ srv_conc_enter_innodb_with_atomics(
(void) os_atomic_decrement_lint(
&srv_conc.n_waiting, 1);
- thd_wait_end(
- static_cast<THD*>(
- trx->mysql_thd));
+ thd_wait_end(trx->mysql_thd);
}
if (srv_adaptive_max_sleep_delay > 0) {
@@ -262,9 +255,7 @@ srv_conc_enter_innodb_with_atomics(
trx_search_latch_release_if_reserved(trx);
}
- thd_wait_begin(
- static_cast<THD*>(trx->mysql_thd),
- THD_WAIT_USER_LOCK);
+ thd_wait_begin(trx->mysql_thd, THD_WAIT_USER_LOCK);
notified_mysql = TRUE;
}
@@ -477,10 +468,10 @@ retry:
#endif /* UNIV_SYNC_DEBUG */
trx->op_info = "waiting in InnoDB queue";
- thd_wait_begin(static_cast<THD*>(trx->mysql_thd), THD_WAIT_USER_LOCK);
+ thd_wait_begin(trx->mysql_thd, THD_WAIT_USER_LOCK);
os_event_wait(slot->event);
- thd_wait_end(static_cast<THD*>(trx->mysql_thd));
+ thd_wait_end(trx->mysql_thd);
trx->op_info = "";
diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc
index 9c6e56bcb9d..3b3da2f070f 100644
--- a/storage/innobase/srv/srv0mon.cc
+++ b/storage/innobase/srv/srv0mon.cc
@@ -1,6 +1,7 @@
/*****************************************************************************
-Copyright (c) 2010, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2010, 2012, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2012, Facebook Inc.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -33,7 +34,6 @@ Created 12/9/2009 Jimmy Yang
#include "trx0rseg.h"
#include "lock0lock.h"
#include "ibuf0ibuf.h"
-#include "btr0cur.h"
#ifdef UNIV_NONINL
#include "srv0mon.ic"
#endif
@@ -215,11 +215,6 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_EXISTING | MONITOR_DEFAULT_ON),
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_WRITE_REQUEST},
- {"buffer_pool_pages_in_flush", "buffer",
- "Number of pages in flush list",
- MONITOR_NONE,
- MONITOR_DEFAULT_START, MONITOR_PAGE_INFLUSH},
-
{"buffer_pool_wait_free", "buffer",
"Number of times waited for free buffer"
" (innodb_buffer_pool_wait_free)",
@@ -259,12 +254,24 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_PAGES_DATA},
+ {"buffer_pool_bytes_data", "buffer",
+ "Buffer bytes containing data (innodb_buffer_pool_bytes_data)",
+ static_cast<monitor_type_t>(
+ MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
+ MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_BYTES_DATA},
+
{"buffer_pool_pages_dirty", "buffer",
"Buffer pages currently dirty (innodb_buffer_pool_pages_dirty)",
static_cast<monitor_type_t>(
MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_PAGES_DIRTY},
+ {"buffer_pool_bytes_dirty", "buffer",
+ "Buffer bytes currently dirty (innodb_buffer_pool_bytes_dirty)",
+ static_cast<monitor_type_t>(
+ MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON),
+ MONITOR_DEFAULT_START, MONITOR_OVLD_BUF_POOL_BYTES_DIRTY},
+
{"buffer_pool_pages_free", "buffer",
"Buffer pages currently free (innodb_buffer_pool_pages_free)",
static_cast<monitor_type_t>(
@@ -350,25 +357,40 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_SET_MEMBER, MONITOR_FLUSH_NEIGHBOR_TOTAL_PAGE,
MONITOR_FLUSH_NEIGHBOR_PAGES},
- /* Cumulative counter for flush batches because of max_dirty */
- {"buffer_flush_max_dirty_total_pages", "buffer",
- "Total pages flushed as part of max_dirty batches",
- MONITOR_SET_OWNER, MONITOR_FLUSH_MAX_DIRTY_COUNT,
- MONITOR_FLUSH_MAX_DIRTY_TOTAL_PAGE},
+ {"buffer_flush_n_to_flush_requested", "buffer",
+ "Number of pages requested for flushing.",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_FLUSH_N_TO_FLUSH_REQUESTED},
+
+ {"buffer_flush_avg_page_rate", "buffer",
+ "Average number of pages at which flushing is happening",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_FLUSH_AVG_PAGE_RATE},
+
+ {"buffer_flush_lsn_avg_rate", "buffer",
+ "Average redo generation rate",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_FLUSH_LSN_AVG_RATE},
+
+ {"buffer_flush_pct_for_dirty", "buffer",
+ "Percent of IO capacity used to avoid max dirty page limit",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_FLUSH_PCT_FOR_DIRTY},
- {"buffer_flush_max_dirty", "buffer",
- "Number of max_dirty batches",
- MONITOR_SET_MEMBER, MONITOR_FLUSH_MAX_DIRTY_TOTAL_PAGE,
- MONITOR_FLUSH_MAX_DIRTY_COUNT},
+ {"buffer_flush_pct_for_lsn", "buffer",
+ "Percent of IO capacity used to avoid reusable redo space limit",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_FLUSH_PCT_FOR_LSN},
+
+ {"buffer_flush_sync_waits", "buffer",
+ "Number of times a wait happens due to sync flushing",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_FLUSH_SYNC_WAITS},
- {"buffer_flush_max_dirty_pages", "buffer",
- "Pages queued as a max_dirty batch",
- MONITOR_SET_MEMBER, MONITOR_FLUSH_MAX_DIRTY_TOTAL_PAGE,
- MONITOR_FLUSH_MAX_DIRTY_PAGES},
- /* Cumulative counter for flush batches because of adaptive */
+ /* Cumulative counter for flush batches for adaptive flushing */
{"buffer_flush_adaptive_total_pages", "buffer",
- "Total pages flushed as part of adaptive batches",
+ "Total pages flushed as part of adaptive flushing",
MONITOR_SET_OWNER, MONITOR_FLUSH_ADAPTIVE_COUNT,
MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE},
@@ -382,22 +404,6 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_SET_MEMBER, MONITOR_FLUSH_ADAPTIVE_TOTAL_PAGE,
MONITOR_FLUSH_ADAPTIVE_PAGES},
- /* Cumulative counter for flush batches because of async */
- {"buffer_flush_async_total_pages", "buffer",
- "Total pages flushed as part of async batches",
- MONITOR_SET_OWNER, MONITOR_FLUSH_ASYNC_COUNT,
- MONITOR_FLUSH_ASYNC_TOTAL_PAGE},
-
- {"buffer_flush_async", "buffer",
- "Number of async batches",
- MONITOR_SET_MEMBER, MONITOR_FLUSH_ASYNC_TOTAL_PAGE,
- MONITOR_FLUSH_ASYNC_COUNT},
-
- {"buffer_flush_async_pages", "buffer",
- "Pages queued as an async batch",
- MONITOR_SET_MEMBER, MONITOR_FLUSH_ASYNC_TOTAL_PAGE,
- MONITOR_FLUSH_ASYNC_PAGES},
-
/* Cumulative counter for flush batches because of sync */
{"buffer_flush_sync_total_pages", "buffer",
"Total pages flushed as part of sync batches",
@@ -859,6 +865,16 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_NONE,
MONITOR_DEFAULT_START, MONITOR_PAGE_DECOMPRESS},
+ {"compression_pad_increments", "compression",
+ "Number of times padding is incremented to avoid compression failures",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_PAD_INCREMENTS},
+
+ {"compression_pad_decrements", "compression",
+ "Number of times padding is decremented due to good compressibility",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_PAD_DECREMENTS},
+
/* ========== Counters for Index ========== */
{"module_index", "index", "Index Manager",
MONITOR_MODULE,
@@ -1130,11 +1146,26 @@ static monitor_info_t innodb_counter_info[] =
MONITOR_MODULE,
MONITOR_DEFAULT_START, MONITOR_MODULE_DDL_STATS},
+ {"ddl_background_drop_indexes", "ddl",
+ "Number of indexes waiting to be dropped after failed index creation",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_BACKGROUND_DROP_INDEX},
+
{"ddl_background_drop_tables", "ddl",
"Number of tables in background drop table list",
MONITOR_NONE,
MONITOR_DEFAULT_START, MONITOR_BACKGROUND_DROP_TABLE},
+ {"ddl_online_create_index", "ddl",
+ "Number of indexes being created online",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_ONLINE_CREATE_INDEX},
+
+ {"ddl_pending_alter_table", "ddl",
+ "Number of ALTER TABLE, CREATE INDEX, DROP INDEX in progress",
+ MONITOR_NONE,
+ MONITOR_DEFAULT_START, MONITOR_PENDING_ALTER_TABLE},
+
/* ===== Counters for ICP (Index Condition Pushdown) Module ===== */
{"module_icp", "icp", "Index Condition Pushdown",
MONITOR_MODULE,
@@ -1171,6 +1202,34 @@ has been turned on/off. */
UNIV_INTERN ulint monitor_set_tbl[(NUM_MONITOR + NUM_BITS_ULINT
- 1) / NUM_BITS_ULINT];
+#ifndef HAVE_ATOMIC_BUILTINS_64
+/** Mutex protecting atomic operations on platforms that lack
+built-in operations for atomic memory access */
+ib_mutex_t monitor_mutex;
+
+/** Key to register monitor_mutex with performance schema */
+UNIV_INTERN mysql_pfs_key_t monitor_mutex_key;
+
+/****************************************************************//**
+Initialize the monitor subsystem. */
+UNIV_INTERN
+void
+srv_mon_create(void)
+/*================*/
+{
+ mutex_create(monitor_mutex_key, &monitor_mutex, SYNC_ANY_LATCH);
+}
+/****************************************************************//**
+Close the monitor subsystem. */
+UNIV_INTERN
+void
+srv_mon_free(void)
+/*==============*/
+{
+ mutex_free(&monitor_mutex);
+}
+#endif /* !HAVE_ATOMIC_BUILTINS_64 */
+
/****************************************************************//**
Get a monitor's "monitor_info" by its monitor id (index into the
innodb_counter_info array.
@@ -1359,13 +1418,14 @@ srv_mon_process_existing_counter(
mon_option_t set_option) /*!< in: Turn on/off reset the
counter */
{
- mon_type_t value;
- monitor_info_t* monitor_info;
- ibool update_min = FALSE;
- buf_pool_stat_t stat;
- ulint LRU_len;
- ulint free_len;
- ulint flush_list_len;
+ mon_type_t value;
+ monitor_info_t* monitor_info;
+ ibool update_min = FALSE;
+ buf_pool_stat_t stat;
+ buf_pools_list_size_t buf_pools_list_size;
+ ulint LRU_len;
+ ulint free_len;
+ ulint flush_list_len;
monitor_info = srv_mon_get_info(monitor_id);
@@ -1381,7 +1441,7 @@ srv_mon_process_existing_counter(
/* export_vars.innodb_buffer_pool_reads. Num Reads from
disk (page not in buffer) */
case MONITOR_OVLD_BUF_POOL_READS:
- value = srv_buf_pool_reads;
+ value = srv_stats.buf_pool_reads;
break;
/* innodb_buffer_pool_read_requests, the number of logical
@@ -1394,12 +1454,12 @@ srv_mon_process_existing_counter(
/* innodb_buffer_pool_write_requests, the number of
write request */
case MONITOR_OVLD_BUF_POOL_WRITE_REQUEST:
- value = srv_buf_pool_write_requests;
+ value = srv_stats.buf_pool_write_requests;
break;
/* innodb_buffer_pool_wait_free */
case MONITOR_OVLD_BUF_POOL_WAIT_FREE:
- value = srv_buf_pool_wait_free;
+ value = srv_stats.buf_pool_wait_free;
break;
/* innodb_buffer_pool_read_ahead */
@@ -1431,12 +1491,25 @@ srv_mon_process_existing_counter(
value = LRU_len;
break;
+ /* innodb_buffer_pool_bytes_data */
+ case MONITOR_OVLD_BUF_POOL_BYTES_DATA:
+ buf_get_total_list_size_in_bytes(&buf_pools_list_size);
+ value = buf_pools_list_size.LRU_bytes
+ + buf_pools_list_size.unzip_LRU_bytes;
+ break;
+
/* innodb_buffer_pool_pages_dirty */
case MONITOR_OVLD_BUF_POOL_PAGES_DIRTY:
buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len);
value = flush_list_len;
break;
+ /* innodb_buffer_pool_bytes_dirty */
+ case MONITOR_OVLD_BUF_POOL_BYTES_DIRTY:
+ buf_get_total_list_size_in_bytes(&buf_pools_list_size);
+ value = buf_pools_list_size.flush_list_bytes;
+ break;
+
/* innodb_buffer_pool_pages_free */
case MONITOR_OVLD_BUF_POOL_PAGES_FREE:
buf_get_total_list_len(&LRU_len, &free_len, &flush_list_len);
@@ -1463,12 +1536,12 @@ srv_mon_process_existing_counter(
/* innodb_data_reads, the total number of data reads */
case MONITOR_OVLD_BYTE_READ:
- value = srv_data_read;
+ value = srv_stats.data_read;
break;
/* innodb_data_writes, the total number of data writes. */
case MONITOR_OVLD_BYTE_WRITTEN:
- value = srv_data_written;
+ value = srv_stats.data_written;
break;
/* innodb_data_reads, the total number of data reads. */
@@ -1488,7 +1561,7 @@ srv_mon_process_existing_counter(
/* innodb_os_log_written */
case MONITOR_OVLD_OS_LOG_WRITTEN:
- value = (mon_type_t) srv_os_log_written;
+ value = (mon_type_t) srv_stats.os_log_written;
break;
/* innodb_os_log_fsyncs */
@@ -1504,33 +1577,33 @@ srv_mon_process_existing_counter(
/* innodb_os_log_pending_writes */
case MONITOR_OVLD_OS_LOG_PENDING_WRITES:
- value = srv_os_log_pending_writes;
+ value = srv_stats.os_log_pending_writes;
update_min = TRUE;
break;
/* innodb_log_waits */
case MONITOR_OVLD_LOG_WAITS:
- value = srv_log_waits;
+ value = srv_stats.log_waits;
break;
/* innodb_log_write_requests */
case MONITOR_OVLD_LOG_WRITE_REQUEST:
- value = srv_log_write_requests;
+ value = srv_stats.log_write_requests;
break;
/* innodb_log_writes */
case MONITOR_OVLD_LOG_WRITES:
- value = srv_log_writes;
+ value = srv_stats.log_writes;
break;
/* innodb_dblwr_writes */
case MONITOR_OVLD_SRV_DBLWR_WRITES:
- value = srv_dblwr_writes;
+ value = srv_stats.dblwr_writes;
break;
/* innodb_dblwr_pages_written */
case MONITOR_OVLD_SRV_DBLWR_PAGES_WRITTEN:
- value = srv_dblwr_pages_written;
+ value = srv_stats.dblwr_pages_written;
break;
/* innodb_page_size */
@@ -1539,27 +1612,27 @@ srv_mon_process_existing_counter(
break;
case MONITOR_OVLD_RWLOCK_S_SPIN_WAITS:
- value = rw_s_spin_wait_count;
+ value = rw_lock_stats.rw_s_spin_wait_count;
break;
case MONITOR_OVLD_RWLOCK_X_SPIN_WAITS:
- value = rw_x_os_wait_count;
+ value = rw_lock_stats.rw_x_os_wait_count;
break;
case MONITOR_OVLD_RWLOCK_S_SPIN_ROUNDS:
- value = rw_s_spin_round_count;
+ value = rw_lock_stats.rw_s_spin_round_count;
break;
case MONITOR_OVLD_RWLOCK_X_SPIN_ROUNDS:
- value = rw_x_spin_round_count;
+ value = rw_lock_stats.rw_x_spin_round_count;
break;
case MONITOR_OVLD_RWLOCK_S_OS_WAITS:
- value = rw_s_os_wait_count;
+ value = rw_lock_stats.rw_s_os_wait_count;
break;
case MONITOR_OVLD_RWLOCK_X_OS_WAITS:
- value = rw_x_os_wait_count;
+ value = rw_lock_stats.rw_x_os_wait_count;
break;
case MONITOR_OVLD_BUFFER_POOL_SIZE:
@@ -1568,44 +1641,44 @@ srv_mon_process_existing_counter(
/* innodb_rows_read */
case MONITOR_OLVD_ROW_READ:
- value = srv_n_rows_read;
+ value = srv_stats.n_rows_read;
break;
/* innodb_rows_inserted */
case MONITOR_OLVD_ROW_INSERTED:
- value = srv_n_rows_inserted;
+ value = srv_stats.n_rows_inserted;
break;
/* innodb_rows_deleted */
case MONITOR_OLVD_ROW_DELETED:
- value = srv_n_rows_deleted;
+ value = srv_stats.n_rows_deleted;
break;
/* innodb_rows_updated */
case MONITOR_OLVD_ROW_UPDTATED:
- value = srv_n_rows_updated;
+ value = srv_stats.n_rows_updated;
break;
/* innodb_row_lock_current_waits */
case MONITOR_OVLD_ROW_LOCK_CURRENT_WAIT:
- value = srv_n_lock_wait_current_count;
+ value = srv_stats.n_lock_wait_current_count;
break;
/* innodb_row_lock_time */
case MONITOR_OVLD_LOCK_WAIT_TIME:
- value = srv_n_lock_wait_time / 1000;
+ value = srv_stats.n_lock_wait_time / 1000;
break;
/* innodb_row_lock_time_max */
case MONITOR_OVLD_LOCK_MAX_WAIT_TIME:
- value = srv_n_lock_max_wait_time / 1000;
+ value = lock_sys->n_lock_max_wait_time / 1000;
break;
/* innodb_row_lock_time_avg */
case MONITOR_OVLD_LOCK_AVG_WAIT_TIME:
- if (srv_n_lock_wait_count > 0) {
- value = srv_n_lock_wait_time / 1000
- / srv_n_lock_wait_count;
+ if (srv_stats.n_lock_wait_count > 0) {
+ value = srv_stats.n_lock_wait_time / 1000
+ / srv_stats.n_lock_wait_count;
} else {
value = 0;
}
@@ -1613,7 +1686,7 @@ srv_mon_process_existing_counter(
/* innodb_row_lock_waits */
case MONITOR_OVLD_ROW_LOCK_WAIT:
- value = srv_n_lock_wait_count;
+ value = srv_stats.n_lock_wait_count;
break;
case MONITOR_RSEG_HISTORY_LEN:
diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc
index 30e0698eab9..4c5753ac40e 100644
--- a/storage/innobase/srv/srv0srv.cc
+++ b/storage/innobase/srv/srv0srv.cc
@@ -39,8 +39,6 @@ Created 10/8/1995 Heikki Tuuri
*******************************************************/
/* Dummy comment */
-#include "m_string.h" /* for my_sys.h */
-#include "my_sys.h" /* DEBUG_SYNC_C */
#include "srv0srv.h"
#include "ut0mem.h"
@@ -61,6 +59,7 @@ Created 10/8/1995 Heikki Tuuri
#include "btr0sea.h"
#include "dict0load.h"
#include "dict0boot.h"
+#include "dict0stats_bg.h" /* dict_stats_event */
#include "srv0start.h"
#include "row0mysql.h"
#include "ha_prototypes.h"
@@ -72,10 +71,6 @@ Created 10/8/1995 Heikki Tuuri
#include "mysql/plugin.h"
#include "mysql/service_thd_wait.h"
-/* The following counter is incremented whenever there is some user activity
-in the server */
-UNIV_INTERN ulint srv_activity_count = 0;
-
/* The following is the maximum allowed duration of a lock wait. */
UNIV_INTERN ulint srv_fatal_semaphore_wait_threshold = 600;
@@ -88,6 +83,8 @@ UNIV_INTERN ibool srv_error_monitor_active = FALSE;
UNIV_INTERN ibool srv_buf_dump_thread_active = FALSE;
+UNIV_INTERN ibool srv_dict_stats_thread_active = FALSE;
+
UNIV_INTERN const char* srv_main_thread_op_info = "";
/** Prefix used by MySQL to indicate pre-5.1 table name encoding */
@@ -106,6 +103,9 @@ UNIV_INTERN char* srv_undo_dir = NULL;
/** The number of tablespaces to use for rollback segments. */
UNIV_INTERN ulong srv_undo_tablespaces = 8;
+/** The number of UNDO tablespaces that are open and ready to use. */
+UNIV_INTERN ulint srv_undo_tablespaces_open = 8;
+
/* The number of rollback segments to use */
UNIV_INTERN ulong srv_undo_logs = 1;
@@ -113,6 +113,10 @@ UNIV_INTERN ulong srv_undo_logs = 1;
UNIV_INTERN char* srv_arch_dir = NULL;
#endif /* UNIV_LOG_ARCHIVE */
+/** Set if InnoDB must operate in read-only mode. We don't do any
+recovery and open all tables in RO mode instead of RW mode. We don't
+sync the max trx id to disk either. */
+UNIV_INTERN my_bool srv_read_only_mode;
/** store to its own file each table created by an user; data
dictionary tables are in the system tablespace 0 */
UNIV_INTERN my_bool srv_file_per_table;
@@ -130,6 +134,10 @@ UNIV_INTERN ulint srv_max_file_format_at_startup = UNIV_FORMAT_MAX;
/** Place locks to records only i.e. do not use next-key locking except
on duplicate key checking and foreign key checking */
UNIV_INTERN ibool srv_locks_unsafe_for_binlog = FALSE;
+/** Sort buffer size in index creation */
+UNIV_INTERN ulong srv_sort_buf_size = 1048576;
+/** Maximum modification log file size for online index creation */
+UNIV_INTERN unsigned long long srv_online_max_size;
/* If this flag is TRUE, then we will use the native aio of the
OS (provided we compiled Innobase with it in), otherwise we will
@@ -172,15 +180,16 @@ the user from forgetting the 'newraw' keyword to my.cnf */
UNIV_INTERN ibool srv_created_new_raw = FALSE;
-UNIV_INTERN char** srv_log_group_home_dirs = NULL;
+UNIV_INTERN char* srv_log_group_home_dir = NULL;
-UNIV_INTERN ulint srv_n_log_groups = ULINT_MAX;
-UNIV_INTERN ulint srv_n_log_files = ULINT_MAX;
+UNIV_INTERN ulong srv_n_log_files = SRV_N_LOG_FILES_MAX;
/* size in database pages */
UNIV_INTERN ib_uint64_t srv_log_file_size = IB_UINT64_MAX;
+UNIV_INTERN ib_uint64_t srv_log_file_size_requested;
/* size in database pages */
UNIV_INTERN ulint srv_log_buffer_size = ULINT_MAX;
UNIV_INTERN ulong srv_flush_log_at_trx_commit = 1;
+UNIV_INTERN uint srv_flush_log_at_timeout = 1;
UNIV_INTERN ulong srv_page_size = UNIV_PAGE_SIZE_DEF;
UNIV_INTERN ulong srv_page_size_shift = UNIV_PAGE_SIZE_SHIFT_DEF;
@@ -213,7 +222,7 @@ UNIV_INTERN ulong srv_n_page_hash_locks = 16;
/** Scan depth for LRU flush batch i.e.: number of blocks scanned*/
UNIV_INTERN ulong srv_LRU_scan_depth = 1024;
/** whether or not to flush neighbors of a block */
-UNIV_INTERN my_bool srv_flush_neighbors = TRUE;
+UNIV_INTERN ulong srv_flush_neighbors = 1;
/* previously requested size */
UNIV_INTERN ulint srv_buf_pool_old_size;
/* current size in kilobytes */
@@ -258,7 +267,8 @@ UNIV_INTERN ulint srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
UNIV_INTERN ulint srv_max_n_open_files = 300;
/* Number of IO operations per second the server can do */
-UNIV_INTERN ulong srv_io_capacity = 400;
+UNIV_INTERN ulong srv_io_capacity = 200;
+UNIV_INTERN ulong srv_max_io_capacity = 400;
/* The InnoDB main thread tries to keep the ratio of modified pages
in the buffer pool to all database pages in the buffer pool smaller than
@@ -266,76 +276,49 @@ the following number. But it is not guaranteed that the value stays below
that during a time of heavy update/insert activity. */
UNIV_INTERN ulong srv_max_buf_pool_modified_pct = 75;
+UNIV_INTERN ulong srv_max_dirty_pages_pct_lwm = 50;
+
+/* This is the percentage of log capacity at which adaptive flushing,
+if enabled, will kick in. */
+UNIV_INTERN ulong srv_adaptive_flushing_lwm = 10;
+
+/* Number of iterations over which adaptive flushing is averaged. */
+UNIV_INTERN ulong srv_flushing_avg_loops = 30;
/* The number of purge threads to use.*/
-UNIV_INTERN ulong srv_n_purge_threads = 1;
+UNIV_INTERN ulong srv_n_purge_threads = 1;
/* the number of pages to purge in one batch */
-UNIV_INTERN ulong srv_purge_batch_size = 20;
-
-/* variable counts amount of data read in total (in bytes) */
-UNIV_INTERN ulint srv_data_read = 0;
+UNIV_INTERN ulong srv_purge_batch_size = 20;
/* Internal setting for "innodb_stats_method". Decides how InnoDB treats
NULL value when collecting statistics. By default, it is set to
SRV_STATS_NULLS_EQUAL(0), ie. all NULL value are treated equal */
-ulong srv_innodb_stats_method = SRV_STATS_NULLS_EQUAL;
-
-/* here we count the amount of data written in total (in bytes) */
-UNIV_INTERN ulint srv_data_written = 0;
-
-/* the number of the log write requests done */
-UNIV_INTERN ulint srv_log_write_requests = 0;
-
-/* the number of physical writes to the log performed */
-UNIV_INTERN ulint srv_log_writes = 0;
-
-/* amount of data written to the log files in bytes */
-UNIV_INTERN lsn_t srv_os_log_written = 0;
+UNIV_INTERN ulong srv_innodb_stats_method = SRV_STATS_NULLS_EQUAL;
-/* amount of writes being done to the log files */
-UNIV_INTERN ulint srv_os_log_pending_writes = 0;
-
-/* we increase this counter, when there we don't have enough space in the
-log buffer and have to flush it */
-UNIV_INTERN ulint srv_log_waits = 0;
-
-/* this variable counts the amount of times, when the doublewrite buffer
-was flushed */
-UNIV_INTERN ulint srv_dblwr_writes = 0;
-
-/* here we store the number of pages that have been flushed to the
-doublewrite buffer */
-UNIV_INTERN ulint srv_dblwr_pages_written = 0;
-
-/* in this variable we store the number of write requests issued */
-UNIV_INTERN ulint srv_buf_pool_write_requests = 0;
-
-/* here we store the number of times when we had to wait for a free page
-in the buffer pool. It happens when the buffer pool is full and we need
-to make a flush, in order to be able to read or create a page. */
-UNIV_INTERN ulint srv_buf_pool_wait_free = 0;
-
-/* variable to count the number of pages that were written from buffer
-pool to the disk */
-UNIV_INTERN ulint srv_buf_pool_flushed = 0;
-
-/** Number of buffer pool reads that led to the
-reading of a disk page */
-UNIV_INTERN ulint srv_buf_pool_reads = 0;
+UNIV_INTERN srv_stats_t srv_stats;
/* structure to pass status variables to MySQL */
-UNIV_INTERN export_struc export_vars;
-
-/* If the following is != 0 we do not allow inserts etc. This protects
-the user from forgetting the innodb_force_recovery keyword to my.cnf */
-
-UNIV_INTERN ulint srv_force_recovery = 0;
+UNIV_INTERN export_var_t export_vars;
+
+/** Normally 0. When nonzero, skip some phases of crash recovery,
+starting from SRV_FORCE_IGNORE_CORRUPT, so that data can be recovered
+by SELECT or mysqldump. When this is nonzero, we do not allow any user
+modifications to the data. */
+UNIV_INTERN ulong srv_force_recovery;
+#ifndef DBUG_OFF
+/** Inject a crash at different steps of the recovery process.
+This is for testing and debugging only. */
+UNIV_INTERN ulong srv_force_recovery_crash;
+#endif /* !DBUG_OFF */
/** Print all user-level transactions deadlocks to mysqld stderr */
UNIV_INTERN my_bool srv_print_all_deadlocks = FALSE;
+/** Enable INFORMATION_SCHEMA.innodb_cmp_per_index */
+UNIV_INTERN my_bool srv_cmp_per_index_enabled = FALSE;
+
/* If the following is set to 1 then we do not run purge and insert buffer
merge to completion before shutdown. If it is set to 2, do not even flush the
buffer pool to data files at the shutdown: we effectively 'crash'
@@ -352,21 +335,23 @@ this many index pages, there are 2 ways to calculate statistics:
* quick transient stats, that are used if persistent stats for the given
table/index are not found in the innodb database */
UNIV_INTERN unsigned long long srv_stats_transient_sample_pages = 8;
+UNIV_INTERN my_bool srv_stats_persistent = TRUE;
UNIV_INTERN unsigned long long srv_stats_persistent_sample_pages = 20;
+UNIV_INTERN my_bool srv_stats_auto_recalc = TRUE;
UNIV_INTERN ibool srv_use_doublewrite_buf = TRUE;
-UNIV_INTERN ibool srv_use_atomic_writes = FALSE;
-#ifdef HAVE_POSIX_FALLOCATE
-UNIV_INTERN ibool srv_use_posix_fallocate = TRUE;
-#endif
-
/** doublewrite buffer is 1MB is size i.e.: it can hold 128 16K pages.
The following parameter is the size of the buffer that is used for
batch flushing i.e.: LRU flushing and flush_list flushing. The rest
of the pages are used for single page flushing. */
UNIV_INTERN ulong srv_doublewrite_batch_size = 120;
+UNIV_INTERN ibool srv_use_atomic_writes = FALSE;
+#ifdef HAVE_POSIX_FALLOCATE
+UNIV_INTERN ibool srv_use_posix_fallocate = TRUE;
+#endif
+
UNIV_INTERN ulong srv_replication_delay = 0;
/*-------------------------------------------*/
@@ -382,11 +367,6 @@ UNIV_INTERN ibool srv_print_log_io = FALSE;
UNIV_INTERN ibool srv_print_latch_waits = FALSE;
#endif /* UNIV_DEBUG */
-UNIV_INTERN ulint srv_n_rows_inserted = 0;
-UNIV_INTERN ulint srv_n_rows_updated = 0;
-UNIV_INTERN ulint srv_n_rows_deleted = 0;
-UNIV_INTERN ulint srv_n_rows_read = 0;
-
static ulint srv_n_rows_inserted_old = 0;
static ulint srv_n_rows_updated_old = 0;
static ulint srv_n_rows_deleted_old = 0;
@@ -411,58 +391,58 @@ UNIV_INTERN const char* srv_io_thread_function[SRV_MAX_N_IO_THREADS];
UNIV_INTERN time_t srv_last_monitor_time;
-UNIV_INTERN mutex_t srv_innodb_monitor_mutex;
+UNIV_INTERN ib_mutex_t srv_innodb_monitor_mutex;
-/* Mutex for locking srv_monitor_file */
-UNIV_INTERN mutex_t srv_monitor_file_mutex;
+/* Mutex for locking srv_monitor_file. Not created if srv_read_only_mode */
+UNIV_INTERN ib_mutex_t srv_monitor_file_mutex;
#ifdef UNIV_PFS_MUTEX
# ifndef HAVE_ATOMIC_BUILTINS
/* Key to register server_mutex with performance schema */
UNIV_INTERN mysql_pfs_key_t server_mutex_key;
# endif /* !HAVE_ATOMIC_BUILTINS */
-/* Key to register srv_innodb_monitor_mutex with performance schema */
+/** Key to register srv_innodb_monitor_mutex with performance schema */
UNIV_INTERN mysql_pfs_key_t srv_innodb_monitor_mutex_key;
-/* Key to register srv_monitor_file_mutex with performance schema */
+/** Key to register srv_monitor_file_mutex with performance schema */
UNIV_INTERN mysql_pfs_key_t srv_monitor_file_mutex_key;
-/* Key to register srv_dict_tmpfile_mutex with performance schema */
+/** Key to register srv_dict_tmpfile_mutex with performance schema */
UNIV_INTERN mysql_pfs_key_t srv_dict_tmpfile_mutex_key;
-/* Key to register the mutex with performance schema */
+/** Key to register the mutex with performance schema */
UNIV_INTERN mysql_pfs_key_t srv_misc_tmpfile_mutex_key;
-/* Key to register srv_sys_t::mutex with performance schema */
+/** Key to register srv_sys_t::mutex with performance schema */
UNIV_INTERN mysql_pfs_key_t srv_sys_mutex_key;
-/* Key to register srv_sys_t::tasks_mutex with performance schema */
+/** Key to register srv_sys_t::tasks_mutex with performance schema */
UNIV_INTERN mysql_pfs_key_t srv_sys_tasks_mutex_key;
#endif /* UNIV_PFS_MUTEX */
-/* Temporary file for innodb monitor output */
+/** Temporary file for innodb monitor output */
UNIV_INTERN FILE* srv_monitor_file;
-/* Mutex for locking srv_dict_tmpfile.
+/** Mutex for locking srv_dict_tmpfile. Not created if srv_read_only_mode.
This mutex has a very high rank; threads reserving it should not
be holding any InnoDB latches. */
-UNIV_INTERN mutex_t srv_dict_tmpfile_mutex;
-/* Temporary file for output from the data dictionary */
+UNIV_INTERN ib_mutex_t srv_dict_tmpfile_mutex;
+/** Temporary file for output from the data dictionary */
UNIV_INTERN FILE* srv_dict_tmpfile;
-/* Mutex for locking srv_misc_tmpfile.
+/** Mutex for locking srv_misc_tmpfile. Not created if srv_read_only_mode.
This mutex has a very low rank; threads reserving it should not
acquire any further latches or sleep before releasing this one. */
-UNIV_INTERN mutex_t srv_misc_tmpfile_mutex;
-/* Temporary file for miscellanous diagnostic output */
+UNIV_INTERN ib_mutex_t srv_misc_tmpfile_mutex;
+/** Temporary file for miscellanous diagnostic output */
UNIV_INTERN FILE* srv_misc_tmpfile;
UNIV_INTERN ulint srv_main_thread_process_no = 0;
UNIV_INTERN ulint srv_main_thread_id = 0;
-/* The following count work done by srv_master_thread. */
+/* The following counts are used by the srv_master_thread. */
-/* Iterations of the loop bounded by 'srv_active' label. */
-static ulint srv_main_active_loops = 0;
-/* Iterations of the loop bounded by the 'srv_idle' label. */
-static ulint srv_main_idle_loops = 0;
-/* Iterations of the loop bounded by the 'srv_shutdown' label. */
-static ulint srv_main_shutdown_loops = 0;
-/* Log writes involving flush. */
-static ulint srv_log_writes_and_flush = 0;
+/** Iterations of the loop bounded by 'srv_active' label. */
+static ulint srv_main_active_loops = 0;
+/** Iterations of the loop bounded by the 'srv_idle' label. */
+static ulint srv_main_idle_loops = 0;
+/** Iterations of the loop bounded by the 'srv_shutdown' label. */
+static ulint srv_main_shutdown_loops = 0;
+/** Log writes involving flush. */
+static ulint srv_log_writes_and_flush = 0;
/* This is only ever touched by the master thread. It records the
time when the last flush of log file has happened. The master
@@ -491,7 +471,8 @@ current_time % 5 != 0. */
} while (0)
/** Test if the system mutex is owned. */
-#define srv_sys_mutex_own() mutex_own(&srv_sys->mutex)
+#define srv_sys_mutex_own() (mutex_own(&srv_sys->mutex) \
+ && !srv_read_only_mode)
/** Release the system mutex. */
#define srv_sys_mutex_exit() do { \
@@ -499,7 +480,7 @@ current_time % 5 != 0. */
} while (0)
#define fetch_lock_wait_timeout(trx) \
- ((trx)->lock.allowed_to_wait \
+ ((trx)->lock.allowed_to_wait \
? thd_lock_wait_timeout((trx)->mysql_thd) \
: 0)
@@ -575,35 +556,32 @@ suspending the master thread and utility threads when they have nothing
to do. The thread table can be seen as an analogue to the process table
in a traditional Unix implementation. */
-/** The server system */
-typedef struct srv_sys_struct srv_sys_t;
-
/** The server system struct */
-struct srv_sys_struct{
- mutex_t tasks_mutex; /*!< variable protecting the
+struct srv_sys_t{
+ ib_mutex_t tasks_mutex; /*!< variable protecting the
tasks queue */
UT_LIST_BASE_NODE_T(que_thr_t)
tasks; /*!< task queue */
- mutex_t mutex; /*!< variable protecting the
-
+ ib_mutex_t mutex; /*!< variable protecting the
fields below. */
ulint n_sys_threads; /*!< size of the sys_threads
array */
- srv_table_t* sys_threads; /*!< server thread table */
+ srv_slot_t* sys_threads; /*!< server thread table */
ulint n_threads_active[SRV_MASTER + 1];
/*!< number of threads active
in a thread class */
- ulint activity_count; /*!< For tracking server
+ srv_stats_t::ulint_ctr_1_t
+ activity_count; /*!< For tracking server
activity */
};
#ifndef HAVE_ATOMIC_BUILTINS
/** Mutex protecting some server global variables. */
-UNIV_INTERN mutex_t server_mutex;
+UNIV_INTERN ib_mutex_t server_mutex;
#endif /* !HAVE_ATOMIC_BUILTINS */
static srv_sys_t* srv_sys = NULL;
@@ -663,6 +641,18 @@ srv_set_io_thread_op_info(
srv_io_thread_op_info[i] = str;
}
+/*********************************************************************//**
+Resets the info describing an i/o thread current state. */
+UNIV_INTERN
+void
+srv_reset_io_thread_op_info()
+/*=========================*/
+{
+ for (ulint i = 0; i < UT_ARR_SIZE(srv_io_thread_op_info); ++i) {
+ srv_io_thread_op_info[i] = "not started yet";
+ }
+}
+
#ifdef UNIV_DEBUG
/*********************************************************************//**
Validates the type of a thread table slot.
@@ -763,6 +753,8 @@ srv_suspend_thread_low(
/*===================*/
srv_slot_t* slot) /*!< in/out: thread slot */
{
+
+ ut_ad(!srv_read_only_mode);
ut_ad(srv_sys_mutex_own());
ut_ad(slot->in_use);
@@ -922,9 +914,8 @@ void
srv_init(void)
/*==========*/
{
- ulint i;
- ulint srv_sys_sz;
- ulint n_sys_threads;
+ ulint n_sys_threads = 0;
+ ulint srv_sys_sz = sizeof(*srv_sys);
#ifndef HAVE_ATOMIC_BUILTINS
mutex_create(server_mutex_key, &server_mutex, SYNC_ANY_LATCH);
@@ -933,38 +924,55 @@ srv_init(void)
mutex_create(srv_innodb_monitor_mutex_key,
&srv_innodb_monitor_mutex, SYNC_NO_ORDER_CHECK);
- /* Number of purge threads + master thread */
- n_sys_threads = srv_n_purge_threads + 1;
+ if (!srv_read_only_mode) {
- srv_sys_sz = sizeof(*srv_sys) + (n_sys_threads * sizeof(srv_slot_t));
+ /* Number of purge threads + master thread */
+ n_sys_threads = srv_n_purge_threads + 1;
+
+ srv_sys_sz += n_sys_threads * sizeof(*srv_sys->sys_threads);
+ }
srv_sys = static_cast<srv_sys_t*>(mem_zalloc(srv_sys_sz));
- mutex_create(srv_sys_mutex_key, &srv_sys->mutex, SYNC_THREADS);
+ srv_sys->n_sys_threads = n_sys_threads;
+
+ if (!srv_read_only_mode) {
- mutex_create(srv_sys_tasks_mutex_key,
- &srv_sys->tasks_mutex, SYNC_ANY_LATCH);
+ mutex_create(srv_sys_mutex_key, &srv_sys->mutex, SYNC_THREADS);
- srv_sys->n_sys_threads = n_sys_threads;
- srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1];
+ mutex_create(srv_sys_tasks_mutex_key,
+ &srv_sys->tasks_mutex, SYNC_ANY_LATCH);
- for (i = 0; i < srv_sys->n_sys_threads; i++) {
- srv_slot_t* slot;
+ srv_sys->sys_threads = (srv_slot_t*) &srv_sys[1];
- slot = srv_sys->sys_threads + i;
+ for (ulint i = 0; i < srv_sys->n_sys_threads; ++i) {
+ srv_slot_t* slot = &srv_sys->sys_threads[i];
- slot->event = os_event_create(NULL);
+ slot->event = os_event_create();
- ut_a(slot->event);
- }
+ ut_a(slot->event);
+ }
+
+ srv_error_event = os_event_create();
- srv_error_event = os_event_create(NULL);
+ srv_monitor_event = os_event_create();
- srv_monitor_event = os_event_create(NULL);
+ srv_buf_dump_event = os_event_create();
- srv_buf_dump_event = os_event_create("buf_dump_event");
+ UT_LIST_INIT(srv_sys->tasks);
+ }
- UT_LIST_INIT(srv_sys->tasks);
+ /* page_zip_stat_per_index_mutex is acquired from:
+ 1. page_zip_compress() (after SYNC_FSP)
+ 2. page_zip_decompress()
+ 3. i_s_cmp_per_index_fill_low() (where SYNC_DICT is acquired)
+ 4. innodb_cmp_per_index_update(), no other latches
+ since we do not acquire any other latches while holding this mutex,
+ it can have very low level. We pick SYNC_ANY_LATCH for it. */
+
+ mutex_create(
+ page_zip_stat_per_index_mutex_key,
+ &page_zip_stat_per_index_mutex, SYNC_ANY_LATCH);
/* Create dummy indexes for infimum and supremum records */
@@ -994,8 +1002,10 @@ srv_free(void)
trx_i_s_cache_free(trx_i_s_cache);
- os_event_free(srv_buf_dump_event);
- srv_buf_dump_event = NULL;
+ if (!srv_read_only_mode) {
+ os_event_free(srv_buf_dump_event);
+ srv_buf_dump_event = NULL;
+ }
}
/*********************************************************************//**
@@ -1017,10 +1027,9 @@ srv_general_init(void)
}
/*********************************************************************//**
-Normalizes init parameter values to use units we use inside InnoDB.
-@return DB_SUCCESS or error code */
+Normalizes init parameter values to use units we use inside InnoDB. */
static
-ulint
+void
srv_normalize_init_values(void)
/*===========================*/
{
@@ -1042,28 +1051,19 @@ srv_normalize_init_values(void)
srv_log_buffer_size = srv_log_buffer_size / UNIV_PAGE_SIZE;
srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE);
-
- return(DB_SUCCESS);
}
/*********************************************************************//**
-Boots the InnoDB server.
-@return DB_SUCCESS or error code */
+Boots the InnoDB server. */
UNIV_INTERN
-ulint
+void
srv_boot(void)
/*==========*/
{
- ulint err;
-
/* Transform the init parameter values given by MySQL to
use units we use inside InnoDB: */
- err = srv_normalize_init_values();
-
- if (err != DB_SUCCESS) {
- return(err);
- }
+ srv_normalize_init_values();
/* Initialize synchronization primitives, memory management, and thread
local storage */
@@ -1073,8 +1073,7 @@ srv_boot(void)
/* Initialize this module */
srv_init();
-
- return(DB_SUCCESS);
+ srv_mon_create();
}
/******************************************************************//**
@@ -1097,10 +1096,10 @@ srv_refresh_innodb_monitor_stats(void)
buf_refresh_io_stats_all();
- srv_n_rows_inserted_old = srv_n_rows_inserted;
- srv_n_rows_updated_old = srv_n_rows_updated;
- srv_n_rows_deleted_old = srv_n_rows_deleted;
- srv_n_rows_read_old = srv_n_rows_read;
+ srv_n_rows_inserted_old = srv_stats.n_rows_inserted;
+ srv_n_rows_updated_old = srv_stats.n_rows_updated;
+ srv_n_rows_deleted_old = srv_stats.n_rows_deleted;
+ srv_n_rows_read_old = srv_stats.n_rows_read;
mutex_exit(&srv_innodb_monitor_mutex);
}
@@ -1165,7 +1164,7 @@ srv_printf_innodb_monitor(
mutex_enter(&dict_foreign_err_mutex);
- if (ftell(dict_foreign_err_file) != 0L) {
+ if (!srv_read_only_mode && ftell(dict_foreign_err_file) != 0L) {
fputs("------------------------\n"
"LATEST FOREIGN KEY ERROR\n"
"------------------------\n", file);
@@ -1278,26 +1277,26 @@ srv_printf_innodb_monitor(
"Number of rows inserted " ULINTPF
", updated " ULINTPF ", deleted " ULINTPF
", read " ULINTPF "\n",
- srv_n_rows_inserted,
- srv_n_rows_updated,
- srv_n_rows_deleted,
- srv_n_rows_read);
+ (ulint) srv_stats.n_rows_inserted,
+ (ulint) srv_stats.n_rows_updated,
+ (ulint) srv_stats.n_rows_deleted,
+ (ulint) srv_stats.n_rows_read);
fprintf(file,
"%.2f inserts/s, %.2f updates/s,"
" %.2f deletes/s, %.2f reads/s\n",
- (srv_n_rows_inserted - srv_n_rows_inserted_old)
+ ((ulint) srv_stats.n_rows_inserted - srv_n_rows_inserted_old)
/ time_elapsed,
- (srv_n_rows_updated - srv_n_rows_updated_old)
+ ((ulint) srv_stats.n_rows_updated - srv_n_rows_updated_old)
/ time_elapsed,
- (srv_n_rows_deleted - srv_n_rows_deleted_old)
+ ((ulint) srv_stats.n_rows_deleted - srv_n_rows_deleted_old)
/ time_elapsed,
- (srv_n_rows_read - srv_n_rows_read_old)
+ ((ulint) srv_stats.n_rows_read - srv_n_rows_read_old)
/ time_elapsed);
- srv_n_rows_inserted_old = srv_n_rows_inserted;
- srv_n_rows_updated_old = srv_n_rows_updated;
- srv_n_rows_deleted_old = srv_n_rows_deleted;
- srv_n_rows_read_old = srv_n_rows_read;
+ srv_n_rows_inserted_old = srv_stats.n_rows_inserted;
+ srv_n_rows_updated_old = srv_stats.n_rows_updated;
+ srv_n_rows_deleted_old = srv_stats.n_rows_deleted;
+ srv_n_rows_read_old = srv_stats.n_rows_read;
fputs("----------------------------\n"
"END OF INNODB MONITOR OUTPUT\n"
@@ -1327,84 +1326,156 @@ srv_export_innodb_status(void)
mutex_enter(&srv_innodb_monitor_mutex);
- export_vars.innodb_data_pending_reads
- = os_n_pending_reads;
- export_vars.innodb_data_pending_writes
- = os_n_pending_writes;
- export_vars.innodb_data_pending_fsyncs
- = fil_n_pending_log_flushes
+ export_vars.innodb_data_pending_reads =
+ os_n_pending_reads;
+
+ export_vars.innodb_data_pending_writes =
+ os_n_pending_writes;
+
+ export_vars.innodb_data_pending_fsyncs =
+ fil_n_pending_log_flushes
+ fil_n_pending_tablespace_flushes;
+
export_vars.innodb_data_fsyncs = os_n_fsyncs;
- export_vars.innodb_data_read = srv_data_read;
+
+ export_vars.innodb_data_read = srv_stats.data_read;
+
export_vars.innodb_data_reads = os_n_file_reads;
+
export_vars.innodb_data_writes = os_n_file_writes;
- export_vars.innodb_data_written = srv_data_written;
+
+ export_vars.innodb_data_written = srv_stats.data_written;
+
export_vars.innodb_buffer_pool_read_requests = stat.n_page_gets;
- export_vars.innodb_buffer_pool_write_requests
- = srv_buf_pool_write_requests;
- export_vars.innodb_buffer_pool_wait_free = srv_buf_pool_wait_free;
- export_vars.innodb_buffer_pool_pages_flushed = srv_buf_pool_flushed;
- export_vars.innodb_buffer_pool_reads = srv_buf_pool_reads;
- export_vars.innodb_buffer_pool_read_ahead_rnd
- = stat.n_ra_pages_read_rnd;
- export_vars.innodb_buffer_pool_read_ahead
- = stat.n_ra_pages_read;
- export_vars.innodb_buffer_pool_read_ahead_evicted
- = stat.n_ra_pages_evicted;
+
+ export_vars.innodb_buffer_pool_write_requests =
+ srv_stats.buf_pool_write_requests;
+
+ export_vars.innodb_buffer_pool_wait_free =
+ srv_stats.buf_pool_wait_free;
+
+ export_vars.innodb_buffer_pool_pages_flushed =
+ srv_stats.buf_pool_flushed;
+
+ export_vars.innodb_buffer_pool_reads = srv_stats.buf_pool_reads;
+
+ export_vars.innodb_buffer_pool_read_ahead_rnd =
+ stat.n_ra_pages_read_rnd;
+
+ export_vars.innodb_buffer_pool_read_ahead =
+ stat.n_ra_pages_read;
+
+ export_vars.innodb_buffer_pool_read_ahead_evicted =
+ stat.n_ra_pages_evicted;
+
export_vars.innodb_buffer_pool_pages_data = LRU_len;
+
export_vars.innodb_buffer_pool_bytes_data =
buf_pools_list_size.LRU_bytes
+ buf_pools_list_size.unzip_LRU_bytes;
+
export_vars.innodb_buffer_pool_pages_dirty = flush_list_len;
+
export_vars.innodb_buffer_pool_bytes_dirty =
buf_pools_list_size.flush_list_bytes;
+
export_vars.innodb_buffer_pool_pages_free = free_len;
+
#ifdef UNIV_DEBUG
- export_vars.innodb_buffer_pool_pages_latched
- = buf_get_latched_pages_number();
+ export_vars.innodb_buffer_pool_pages_latched =
+ buf_get_latched_pages_number();
#endif /* UNIV_DEBUG */
export_vars.innodb_buffer_pool_pages_total = buf_pool_get_n_pages();
- export_vars.innodb_buffer_pool_pages_misc
- = buf_pool_get_n_pages() - LRU_len - free_len;
+ export_vars.innodb_buffer_pool_pages_misc =
+ buf_pool_get_n_pages() - LRU_len - free_len;
+
#ifdef HAVE_ATOMIC_BUILTINS
export_vars.innodb_have_atomic_builtins = 1;
#else
export_vars.innodb_have_atomic_builtins = 0;
#endif
export_vars.innodb_page_size = UNIV_PAGE_SIZE;
- export_vars.innodb_log_waits = srv_log_waits;
- export_vars.innodb_os_log_written = srv_os_log_written;
+
+ export_vars.innodb_log_waits = srv_stats.log_waits;
+
+ export_vars.innodb_os_log_written = srv_stats.os_log_written;
+
export_vars.innodb_os_log_fsyncs = fil_n_log_flushes;
+
export_vars.innodb_os_log_pending_fsyncs = fil_n_pending_log_flushes;
- export_vars.innodb_os_log_pending_writes = srv_os_log_pending_writes;
- export_vars.innodb_log_write_requests = srv_log_write_requests;
- export_vars.innodb_log_writes = srv_log_writes;
- export_vars.innodb_dblwr_pages_written = srv_dblwr_pages_written;
- export_vars.innodb_dblwr_writes = srv_dblwr_writes;
+
+ export_vars.innodb_os_log_pending_writes =
+ srv_stats.os_log_pending_writes;
+
+ export_vars.innodb_log_write_requests = srv_stats.log_write_requests;
+
+ export_vars.innodb_log_writes = srv_stats.log_writes;
+
+ export_vars.innodb_dblwr_pages_written =
+ srv_stats.dblwr_pages_written;
+
+ export_vars.innodb_dblwr_writes = srv_stats.dblwr_writes;
+
export_vars.innodb_pages_created = stat.n_pages_created;
+
export_vars.innodb_pages_read = stat.n_pages_read;
+
export_vars.innodb_pages_written = stat.n_pages_written;
- export_vars.innodb_row_lock_waits = srv_n_lock_wait_count;
- export_vars.innodb_row_lock_current_waits
- = srv_n_lock_wait_current_count;
- export_vars.innodb_row_lock_time = srv_n_lock_wait_time / 1000;
- if (srv_n_lock_wait_count > 0) {
+
+ export_vars.innodb_row_lock_waits = srv_stats.n_lock_wait_count;
+
+ export_vars.innodb_row_lock_current_waits =
+ srv_stats.n_lock_wait_current_count;
+
+ export_vars.innodb_row_lock_time = srv_stats.n_lock_wait_time / 1000;
+
+ if (srv_stats.n_lock_wait_count > 0) {
+
export_vars.innodb_row_lock_time_avg = (ulint)
- (srv_n_lock_wait_time / 1000 / srv_n_lock_wait_count);
+ (srv_stats.n_lock_wait_time
+ / 1000 / srv_stats.n_lock_wait_count);
+
} else {
export_vars.innodb_row_lock_time_avg = 0;
}
- export_vars.innodb_row_lock_time_max
- = srv_n_lock_max_wait_time / 1000;
- export_vars.innodb_rows_read = srv_n_rows_read;
- export_vars.innodb_rows_inserted = srv_n_rows_inserted;
- export_vars.innodb_rows_updated = srv_n_rows_updated;
- export_vars.innodb_rows_deleted = srv_n_rows_deleted;
+
+ export_vars.innodb_row_lock_time_max =
+ lock_sys->n_lock_max_wait_time / 1000;
+
+ export_vars.innodb_rows_read = srv_stats.n_rows_read;
+
+ export_vars.innodb_rows_inserted = srv_stats.n_rows_inserted;
+
+ export_vars.innodb_rows_updated = srv_stats.n_rows_updated;
+
+ export_vars.innodb_rows_deleted = srv_stats.n_rows_deleted;
+
export_vars.innodb_num_open_files = fil_n_file_opened;
- export_vars.innodb_truncated_status_writes = srv_truncated_status_writes;
+
+ export_vars.innodb_truncated_status_writes =
+ srv_truncated_status_writes;
+
export_vars.innodb_available_undo_logs = srv_available_undo_logs;
+#ifdef UNIV_DEBUG
+ if (purge_sys->done.trx_no == 0
+ || trx_sys->rw_max_trx_id < purge_sys->done.trx_no - 1) {
+ export_vars.innodb_purge_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_trx_id_age =
+ trx_sys->rw_max_trx_id - purge_sys->done.trx_no + 1;
+ }
+
+ if (!purge_sys->view
+ || trx_sys->rw_max_trx_id < purge_sys->view->up_limit_id) {
+ export_vars.innodb_purge_view_trx_id_age = 0;
+ } else {
+ export_vars.innodb_purge_view_trx_id_age =
+ trx_sys->rw_max_trx_id - purge_sys->view->up_limit_id;
+ }
+#endif /* UNIV_DEBUG */
+
mutex_exit(&srv_innodb_monitor_mutex);
}
@@ -1428,14 +1499,16 @@ DECLARE_THREAD(srv_monitor_thread)(
ulint mutex_skipped;
ibool last_srv_print_monitor;
+ ut_ad(!srv_read_only_mode);
+
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Lock timeout thread starts, id %lu\n",
os_thread_pf(os_thread_get_curr_id()));
-#endif
+#endif /* UNIV_DEBUG_THREAD_CREATION */
#ifdef UNIV_PFS_THREAD
pfs_register_thread(srv_monitor_thread_key);
-#endif
+#endif /* UNIV_PFS_THREAD */
srv_monitor_active = TRUE;
UT_NOT_USED(arg);
@@ -1484,7 +1557,10 @@ loop:
}
- if (srv_innodb_status) {
+ /* We don't create the temp files or associated
+ mutexes in read-only-mode */
+
+ if (!srv_read_only_mode && srv_innodb_status) {
mutex_enter(&srv_monitor_file_mutex);
rewind(srv_monitor_file);
if (!srv_printf_innodb_monitor(srv_monitor_file,
@@ -1601,16 +1677,18 @@ DECLARE_THREAD(srv_error_monitor_thread)(
const void* sema = NULL;
const void* old_sema = NULL;
+ ut_ad(!srv_read_only_mode);
+
old_lsn = srv_start_lsn;
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Error monitor thread starts, id %lu\n",
os_thread_pf(os_thread_get_curr_id()));
-#endif
+#endif /* UNIV_DEBUG_THREAD_CREATION */
#ifdef UNIV_PFS_THREAD
pfs_register_thread(srv_error_monitor_thread_key);
-#endif
+#endif /* UNIV_PFS_THREAD */
srv_error_monitor_active = TRUE;
loop:
@@ -1644,9 +1722,6 @@ loop:
eviction policy. */
buf_LRU_stat_update();
- /* Update the statistics collected for flush rate policy. */
- buf_flush_stat_update();
-
/* In case mutex_exit is not a memory barrier, it is
theoretically possible some threads are left waiting though
the semaphore is already released. Wake up those threads: */
@@ -1704,7 +1779,7 @@ void
srv_inc_activity_count(void)
/*========================*/
{
- ++srv_sys->activity_count;
+ srv_sys->activity_count.inc();
}
/**********************************************************************//**
@@ -1717,12 +1792,15 @@ srv_thread_type
srv_get_active_thread_type(void)
/*============================*/
{
- ulint i;
srv_thread_type ret = SRV_NONE;
+ if (srv_read_only_mode) {
+ return(SRV_NONE);
+ }
+
srv_sys_mutex_enter();
- for (i = SRV_WORKER; i <= SRV_MASTER; ++i) {
+ for (ulint i = SRV_WORKER; i <= SRV_MASTER; ++i) {
if (srv_sys->n_threads_active[i] != 0) {
ret = static_cast<srv_thread_type>(i);
break;
@@ -1734,6 +1812,7 @@ srv_get_active_thread_type(void)
/* Check only on shutdown. */
if (ret == SRV_NONE
&& srv_shutdown_state != SRV_SHUTDOWN_NONE
+ && trx_purge_state() != PURGE_STATE_DISABLED
&& trx_purge_state() != PURGE_STATE_EXIT) {
ret = SRV_PURGE;
@@ -1753,20 +1832,25 @@ srv_any_background_threads_are_active(void)
{
const char* thread_active = NULL;
- if (srv_error_monitor_active) {
+ if (srv_read_only_mode) {
+ return(NULL);
+ } else if (srv_error_monitor_active) {
thread_active = "srv_error_monitor_thread";
- } else if (srv_lock_timeout_active) {
+ } else if (lock_sys->timeout_thread_active) {
thread_active = "srv_lock_timeout thread";
} else if (srv_monitor_active) {
thread_active = "srv_monitor_thread";
} else if (srv_buf_dump_thread_active) {
thread_active = "buf_dump_thread";
+ } else if (srv_dict_stats_thread_active) {
+ thread_active = "dict_stats_thread";
}
os_event_set(srv_error_event);
os_event_set(srv_monitor_event);
- os_event_set(srv_timeout_event);
os_event_set(srv_buf_dump_event);
+ os_event_set(lock_sys->timeout_event);
+ os_event_set(dict_stats_event);
return(thread_active);
}
@@ -1782,6 +1866,10 @@ void
srv_active_wake_master_thread(void)
/*===============================*/
{
+ if (srv_read_only_mode) {
+ return;
+ }
+
ut_ad(!srv_sys_mutex_own());
srv_inc_activity_count();
@@ -1883,7 +1971,8 @@ srv_sync_log_buffer_in_background(void)
time_t current_time = time(NULL);
srv_main_thread_op_info = "flushing log";
- if (difftime(current_time, srv_last_log_flush_time) >= 1) {
+ if (difftime(current_time, srv_last_log_flush_time)
+ >= srv_flush_log_at_timeout) {
log_buffer_sync_in_background(TRUE);
srv_last_log_flush_time = current_time;
srv_log_writes_and_flush++;
@@ -2000,7 +2089,7 @@ srv_master_do_active_tasks(void)
/* Do an ibuf merge */
srv_main_thread_op_info = "doing insert buffer merge";
counter_time = ut_time_us(NULL);
- ibuf_contract_in_background(FALSE);
+ ibuf_contract_in_background(0, FALSE);
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
@@ -2092,7 +2181,7 @@ srv_master_do_idle_tasks(void)
/* Do an ibuf merge */
counter_time = ut_time_us(NULL);
srv_main_thread_op_info = "doing insert buffer merge";
- ibuf_contract_in_background(TRUE);
+ ibuf_contract_in_background(0, TRUE);
MONITOR_INC_TIME_IN_MICRO_SECS(
MONITOR_SRV_IBUF_MERGE_MICROSECOND, counter_time);
@@ -2139,6 +2228,8 @@ srv_master_do_shutdown_tasks(
ulint n_bytes_merged = 0;
ulint n_tables_to_drop = 0;
+ ut_ad(!srv_read_only_mode);
+
++srv_main_shutdown_loops;
ut_a(srv_shutdown_state > 0);
@@ -2166,7 +2257,7 @@ srv_master_do_shutdown_tasks(
/* Do an ibuf merge */
srv_main_thread_op_info = "doing insert buffer merge";
- n_bytes_merged = ibuf_contract_in_background(TRUE);
+ n_bytes_merged = ibuf_contract_in_background(0, TRUE);
/* Flush logs if needed */
srv_sync_log_buffer_in_background();
@@ -2214,14 +2305,16 @@ DECLARE_THREAD(srv_master_thread)(
ulint old_activity_count = srv_get_activity_count();
ib_time_t last_print_time;
+ ut_ad(!srv_read_only_mode);
+
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf(stderr, "Master thread starts, id %lu\n",
os_thread_pf(os_thread_get_curr_id()));
-#endif
+#endif /* UNIV_DEBUG_THREAD_CREATION */
#ifdef UNIV_PFS_THREAD
pfs_register_thread(srv_master_thread_key);
-#endif
+#endif /* UNIV_PFS_THREAD */
srv_main_thread_process_no = os_proc_get_number();
srv_main_thread_id = os_thread_pf(os_thread_get_curr_id());
@@ -2314,6 +2407,7 @@ srv_task_execute(void)
{
que_thr_t* thr = NULL;
+ ut_ad(!srv_read_only_mode);
ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
mutex_enter(&srv_sys->tasks_mutex);
@@ -2352,6 +2446,7 @@ DECLARE_THREAD(srv_worker_thread)(
{
srv_slot_t* slot;
+ ut_ad(!srv_read_only_mode);
ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
#ifdef UNIV_DEBUG_THREAD_CREATION
@@ -2432,6 +2527,7 @@ srv_do_purge(
ulint old_activity_count = srv_get_activity_count();
ut_a(n_threads > 0);
+ ut_ad(!srv_read_only_mode);
/* Purge until there are no more records to purge and there is
no change in configuration or server state. If the user has
@@ -2478,7 +2574,7 @@ srv_do_purge(
n_pages_purged = trx_purge(
n_use_threads, srv_purge_batch_size, false);
- if (!(count++ % TRX_SYS_N_RSEGS) || n_pages_purged == 0) {
+ if (!(count++ % TRX_SYS_N_RSEGS)) {
/* Force a truncate of the history list. */
trx_purge(1, srv_purge_batch_size, true);
}
@@ -2501,14 +2597,9 @@ srv_purge_coordinator_suspend(
ulint rseg_history_len) /*!< in: history list length
before last purge */
{
+ ut_ad(!srv_read_only_mode);
ut_a(slot->type == SRV_PURGE);
- rw_lock_x_lock(&purge_sys->latch);
-
- purge_sys->running = false;
-
- rw_lock_x_unlock(&purge_sys->latch);
-
bool stop = false;
/** Maximum wait time on the purge event, in micro-seconds. */
@@ -2518,6 +2609,12 @@ srv_purge_coordinator_suspend(
ulint ret;
ib_int64_t sig_count = srv_suspend_thread(slot);
+ rw_lock_x_lock(&purge_sys->latch);
+
+ purge_sys->running = false;
+
+ rw_lock_x_unlock(&purge_sys->latch);
+
/* We don't wait right away on the the non-timed wait because
we want to signal the thread that wants to suspend purge. */
@@ -2528,8 +2625,8 @@ srv_purge_coordinator_suspend(
ret = os_event_wait_time_low(
slot->event, SRV_PURGE_MAX_TIMEOUT, sig_count);
} else {
- /* We don't want to waste time waiting if the
- history list has increased by the time we get here
+ /* We don't want to waste time waiting, if the
+ history list increased by the time we got here,
unless purge has been stopped. */
ret = 0;
}
@@ -2596,6 +2693,7 @@ DECLARE_THREAD(srv_purge_coordinator_thread)(
srv_slot_t* slot;
ulint n_total_purged = ULINT_UNDEFINED;
+ ut_ad(!srv_read_only_mode);
ut_a(srv_n_purge_threads >= 1);
ut_a(trx_purge_state() == PURGE_STATE_INIT);
ut_a(srv_force_recovery < SRV_FORCE_NO_BACKGROUND);
@@ -2703,6 +2801,7 @@ srv_que_task_enqueue_low(
/*=====================*/
que_thr_t* thr) /*!< in: query thread */
{
+ ut_ad(!srv_read_only_mode);
mutex_enter(&srv_sys->tasks_mutex);
UT_LIST_ADD_LAST(queue, srv_sys->tasks, thr);
@@ -2722,6 +2821,8 @@ srv_get_task_queue_length(void)
{
ulint n_tasks;
+ ut_ad(!srv_read_only_mode);
+
mutex_enter(&srv_sys->tasks_mutex);
n_tasks = UT_LIST_GET_LEN(srv_sys->tasks);
@@ -2738,6 +2839,8 @@ void
srv_purge_wakeup(void)
/*==================*/
{
+ ut_ad(!srv_read_only_mode);
+
if (srv_force_recovery < SRV_FORCE_NO_BACKGROUND) {
srv_release_threads(SRV_PURGE, 1);
diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc
index 9d1600cff23..efe9f094c0d 100644
--- a/storage/innobase/srv/srv0start.cc
+++ b/storage/innobase/srv/srv0start.cc
@@ -71,6 +71,7 @@ Created 2/16/1996 Heikki Tuuri
# include "buf0rea.h"
# include "dict0boot.h"
# include "dict0load.h"
+# include "dict0stats_bg.h"
# include "que0que.h"
# include "usr0sess.h"
# include "lock0lock.h"
@@ -87,9 +88,9 @@ Created 2/16/1996 Heikki Tuuri
# include "row0row.h"
# include "row0mysql.h"
# include "btr0pcur.h"
-# include "os0sync.h" /* for INNODB_RW_LOCKS_USE_ATOMICS */
-# include "zlib.h" /* for ZLIB_VERSION */
-# include "buf0dblwr.h"
+# include "os0sync.h"
+# include "zlib.h"
+# include "ut0crc32.h"
/** Log sequence number immediately after startup */
UNIV_INTERN lsn_t srv_start_lsn;
@@ -188,6 +189,63 @@ srv_parse_megabytes(
}
/*********************************************************************//**
+Check if a file can be opened in read-write mode.
+@return true if it doesn't exist or can be opened in rw mode. */
+static
+bool
+srv_file_check_mode(
+/*================*/
+ const char* name) /*!< in: filename to check */
+{
+ os_file_stat_t stat;
+
+ memset(&stat, 0x0, sizeof(stat));
+
+ dberr_t err = os_file_get_status(name, &stat, true);
+
+ if (err == DB_FAIL) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "os_file_get_status() failed on '%s'. Can't determine "
+ "file permissions", name);
+
+ return(false);
+
+ } else if (err == DB_SUCCESS) {
+
+ /* Note: stat.rw_perm is only valid of files */
+
+ if (stat.type == OS_FILE_TYPE_FILE) {
+ if (!stat.rw_perm) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "%s can't be opened in %s mode",
+ srv_read_only_mode
+ ? "read-write" : "read",
+ name);
+
+ return(false);
+ }
+ } else {
+ /* Not a regular file, bail out. */
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "'%s' not a regular file.", name);
+
+ return(false);
+ }
+ } else {
+
+ /* This is OK. If the file create fails on RO media, there
+ is nothing we can do. */
+
+ ut_a(err == DB_NOT_FOUND);
+ }
+
+ return(true);
+}
+
+/*********************************************************************//**
Reads the data files and their sizes from a character string given in
the .cnf file.
@return TRUE if ok, FALSE on parse error */
@@ -376,79 +434,6 @@ srv_parse_data_file_paths_and_sizes(
}
/*********************************************************************//**
-Reads log group home directories from a character string given in
-the .cnf file.
-@return TRUE if ok, FALSE on parse error */
-UNIV_INTERN
-ibool
-srv_parse_log_group_home_dirs(
-/*==========================*/
- char* str) /*!< in/out: character string */
-{
- char* input_str;
- char* path;
- ulint i = 0;
-
- srv_log_group_home_dirs = NULL;
-
- input_str = str;
-
- /* First calculate the number of directories and check syntax:
- path;path;... */
-
- while (*str != '\0') {
- path = str;
-
- while (*str != ';' && *str != '\0') {
- str++;
- }
-
- i++;
-
- if (*str == ';') {
- str++;
- } else if (*str != '\0') {
-
- return(FALSE);
- }
- }
-
- if (i != 1) {
- /* If innodb_log_group_home_dir was defined it must
- contain exactly one path definition under current MySQL */
-
- return(FALSE);
- }
-
- srv_log_group_home_dirs = static_cast<char**>(
- malloc(i * sizeof *srv_log_group_home_dirs));
-
- /* Then store the actual values to our array */
-
- str = input_str;
- i = 0;
-
- while (*str != '\0') {
- path = str;
-
- while (*str != ';' && *str != '\0') {
- str++;
- }
-
- if (*str == ';') {
- *str = '\0';
- str++;
- }
-
- srv_log_group_home_dirs[i] = path;
-
- i++;
- }
-
- return(TRUE);
-}
-
-/*********************************************************************//**
Frees the memory allocated by srv_parse_data_file_paths_and_sizes()
and srv_parse_log_group_home_dirs(). */
UNIV_INTERN
@@ -462,8 +447,6 @@ srv_free_paths_and_sizes(void)
srv_data_file_sizes = NULL;
free(srv_data_file_is_raw_partition);
srv_data_file_is_raw_partition = NULL;
- free(srv_log_group_home_dirs);
- srv_log_group_home_dirs = NULL;
}
#ifndef UNIV_HOTBACKUP
@@ -526,175 +509,230 @@ srv_normalize_path_for_win(
#ifndef UNIV_HOTBACKUP
/*********************************************************************//**
-Creates or opens the log files and closes them.
+Creates a log file.
@return DB_SUCCESS or error code */
-static
-ulint
-open_or_create_log_file(
-/*====================*/
- ibool create_new_db, /*!< in: TRUE if we should create a
- new database */
- ibool* log_file_created, /*!< out: TRUE if new log file
- created */
- ibool log_file_has_been_opened,/*!< in: TRUE if a log file has been
- opened before: then it is an error
- to try to create another log file */
- ulint k, /*!< in: log group number */
- ulint i) /*!< in: log file number in group */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+create_log_file(
+/*============*/
+ os_file_t* file, /*!< out: file handle */
+ const char* name) /*!< in: log file name */
{
ibool ret;
- os_offset_t size;
- char name[10000];
- ulint dirnamelen;
- UT_NOT_USED(create_new_db);
+ *file = os_file_create(
+ innodb_file_log_key, name,
+ OS_FILE_CREATE, OS_FILE_NORMAL, OS_LOG_FILE, &ret);
- *log_file_created = FALSE;
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Setting log file %s size to %lu MB",
+ name, (ulong) srv_log_file_size
+ >> (20 - UNIV_PAGE_SIZE_SHIFT));
- srv_normalize_path_for_win(srv_log_group_home_dirs[k]);
+ ret = os_file_set_size(name, *file,
+ (os_offset_t) srv_log_file_size
+ << UNIV_PAGE_SIZE_SHIFT);
+ if (!ret) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "Error in creating %s", name);
+ return(DB_ERROR);
+ }
- dirnamelen = strlen(srv_log_group_home_dirs[k]);
- ut_a(dirnamelen < (sizeof name) - 10 - sizeof "ib_logfile");
- memcpy(name, srv_log_group_home_dirs[k], dirnamelen);
+ ret = os_file_close(*file);
+ ut_a(ret);
- /* Add a path separator if needed. */
- if (dirnamelen && name[dirnamelen - 1] != SRV_PATH_SEPARATOR) {
- name[dirnamelen++] = SRV_PATH_SEPARATOR;
+ return(DB_SUCCESS);
+}
+
+/** Initial number of the first redo log file */
+#define INIT_LOG_FILE0 (SRV_N_LOG_FILES_MAX + 1)
+
+#ifdef DBUG_OFF
+# define RECOVERY_CRASH(x) do {} while(0)
+#else
+# define RECOVERY_CRASH(x) do { \
+ if (srv_force_recovery_crash == x) { \
+ fprintf(stderr, "innodb_force_recovery_crash=%lu\n", \
+ srv_force_recovery_crash); \
+ fflush(stderr); \
+ exit(3); \
+ } \
+} while (0)
+#endif
+
+/*********************************************************************//**
+Creates all log files.
+@return DB_SUCCESS or error code */
+static
+dberr_t
+create_log_files(
+/*=============*/
+ char* logfilename, /*!< in/out: buffer for log file name */
+ size_t dirnamelen, /*!< in: length of the directory path */
+ lsn_t lsn, /*!< in: FIL_PAGE_FILE_FLUSH_LSN value */
+ char*& logfile0) /*!< out: name of the first log file */
+{
+ if (srv_read_only_mode) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create log files in read-only mode");
+ return(DB_READ_ONLY);
}
- sprintf(name + dirnamelen, "%s%lu", "ib_logfile", (ulong) i);
+ /* Remove any old log files. */
+ for (unsigned i = 0; i <= INIT_LOG_FILE0; i++) {
+ sprintf(logfilename + dirnamelen, "ib_logfile%u", i);
- files[i] = os_file_create(innodb_file_log_key, name,
- OS_FILE_CREATE, OS_FILE_NORMAL,
- OS_LOG_FILE, &ret);
- if (ret == FALSE) {
- if (os_file_get_last_error(FALSE) != OS_FILE_ALREADY_EXISTS
-#ifdef UNIV_AIX
- /* AIX 5.1 after security patch ML7 may have errno set
- to 0 here, which causes our function to return 100;
- work around that AIX problem */
- && os_file_get_last_error(FALSE) != 100
+ /* Ignore errors about non-existent files or files
+ that cannot be removed. The create_log_file() will
+ return an error when the file exists. */
+#ifdef __WIN__
+ DeleteFile((LPCTSTR) logfilename);
+#else
+ unlink(logfilename);
#endif
- ) {
- fprintf(stderr,
- "InnoDB: Error in creating"
- " or opening %s\n", name);
+ /* Crashing after deleting the first
+ file should be recoverable. The buffer
+ pool was clean, and we can simply create
+ all log files from the scratch. */
+ RECOVERY_CRASH(6);
+ }
- return(DB_ERROR);
- }
+ ut_ad(!buf_pool_check_no_pending_io());
- files[i] = os_file_create(innodb_file_log_key, name,
- OS_FILE_OPEN, OS_FILE_AIO,
- OS_LOG_FILE, &ret);
- if (!ret) {
- fprintf(stderr,
- "InnoDB: Error in opening %s\n", name);
+ RECOVERY_CRASH(7);
- return(DB_ERROR);
+ for (unsigned i = 0; i < srv_n_log_files; i++) {
+ sprintf(logfilename + dirnamelen,
+ "ib_logfile%u", i ? i : INIT_LOG_FILE0);
+
+ dberr_t err = create_log_file(&files[i], logfilename);
+
+ if (err != DB_SUCCESS) {
+ return(err);
}
+ }
- size = os_file_get_size(files[i]);
- ut_a(size != (os_offset_t) -1);
+ RECOVERY_CRASH(8);
- if (UNIV_UNLIKELY(size != (os_offset_t) srv_log_file_size
- << UNIV_PAGE_SIZE_SHIFT)) {
+ /* We did not create the first log file initially as
+ ib_logfile0, so that crash recovery cannot find it until it
+ has been completed and renamed. */
+ sprintf(logfilename + dirnamelen, "ib_logfile%u", INIT_LOG_FILE0);
- fprintf(stderr,
- "InnoDB: Error: log file %s is"
- " of different size "UINT64PF" bytes\n"
- "InnoDB: than specified in the .cnf"
- " file "UINT64PF" bytes!\n",
- name, size,
- (os_offset_t) srv_log_file_size
- << UNIV_PAGE_SIZE_SHIFT);
+ fil_space_create(
+ logfilename, SRV_LOG_SPACE_FIRST_ID,
+ fsp_flags_set_page_size(0, UNIV_PAGE_SIZE),
+ FIL_LOG);
+ ut_a(fil_validate());
- return(DB_ERROR);
+ logfile0 = fil_node_create(
+ logfilename, (ulint) srv_log_file_size,
+ SRV_LOG_SPACE_FIRST_ID, FALSE);
+ ut_a(logfile0);
+
+ for (unsigned i = 1; i < srv_n_log_files; i++) {
+ sprintf(logfilename + dirnamelen, "ib_logfile%u", i);
+
+ if (!fil_node_create(
+ logfilename,
+ (ulint) srv_log_file_size,
+ SRV_LOG_SPACE_FIRST_ID, FALSE)) {
+ ut_error;
}
- } else {
- *log_file_created = TRUE;
+ }
- ut_print_timestamp(stderr);
+ log_group_init(0, srv_n_log_files,
+ srv_log_file_size * UNIV_PAGE_SIZE,
+ SRV_LOG_SPACE_FIRST_ID,
+ SRV_LOG_SPACE_FIRST_ID + 1);
- fprintf(stderr,
- " InnoDB: Log file %s did not exist:"
- " new to be created\n",
- name);
- if (log_file_has_been_opened) {
+ fil_open_log_and_system_tablespace_files();
- return(DB_ERROR);
- }
+ /* Create a log checkpoint. */
+ mutex_enter(&log_sys->mutex);
+ ut_d(recv_no_log_write = FALSE);
+ recv_reset_logs(lsn);
+ mutex_exit(&log_sys->mutex);
- fprintf(stderr, "InnoDB: Setting log file %s size to %lu MB\n",
- name, (ulong) srv_log_file_size
- >> (20 - UNIV_PAGE_SIZE_SHIFT));
+ return(DB_SUCCESS);
+}
- fprintf(stderr,
- "InnoDB: Database physically writes the file"
- " full: wait...\n");
+/*********************************************************************//**
+Renames the first log file. */
+static
+void
+create_log_files_rename(
+/*====================*/
+ char* logfilename, /*!< in/out: buffer for log file name */
+ size_t dirnamelen, /*!< in: length of the directory path */
+ lsn_t lsn, /*!< in: FIL_PAGE_FILE_FLUSH_LSN value */
+ char* logfile0) /*!< in/out: name of the first log file */
+{
+ /* If innodb_flush_method=O_DSYNC,
+ we need to explicitly flush the log buffers. */
+ fil_flush(SRV_LOG_SPACE_FIRST_ID);
+ /* Close the log files, so that we can rename
+ the first one. */
+ fil_close_log_files(false);
- ret = os_file_set_size(name, files[i],
- (os_offset_t) srv_log_file_size
- << UNIV_PAGE_SIZE_SHIFT);
- if (!ret) {
- fprintf(stderr,
- "InnoDB: Error in creating %s:"
- " probably out of disk space\n",
- name);
+ /* Rename the first log file, now that a log
+ checkpoint has been created. */
+ sprintf(logfilename + dirnamelen, "ib_logfile%u", 0);
- return(DB_ERROR);
- }
- }
+ RECOVERY_CRASH(9);
- ret = os_file_close(files[i]);
- ut_a(ret);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Renaming log file %s to %s", logfile0, logfilename);
- if (i == 0) {
- /* Create in memory the file space object
- which is for this log group */
+ mutex_enter(&log_sys->mutex);
+ ut_ad(strlen(logfile0) == 2 + strlen(logfilename));
+ ibool success = os_file_rename(
+ innodb_file_log_key, logfile0, logfilename);
+ ut_a(success);
- fil_space_create(name,
- 2 * k + SRV_LOG_SPACE_FIRST_ID,
- fsp_flags_set_page_size(0, UNIV_PAGE_SIZE),
- FIL_LOG);
- }
+ RECOVERY_CRASH(10);
- ut_a(fil_validate());
+ /* Replace the first file with ib_logfile0. */
+ strcpy(logfile0, logfilename);
+ mutex_exit(&log_sys->mutex);
- /* srv_log_file_size is measured in pages; if page size is 16KB,
- then we have a limit of 64TB on 32 bit systems */
- ut_a(srv_log_file_size <= ULINT_MAX);
+ fil_open_log_and_system_tablespace_files();
- fil_node_create(name, (ulint) srv_log_file_size,
- 2 * k + SRV_LOG_SPACE_FIRST_ID, FALSE);
-#ifdef UNIV_LOG_ARCHIVE
- /* If this is the first log group, create the file space object
- for archived logs.
- Under MySQL, no archiving ever done. */
+ ib_logf(IB_LOG_LEVEL_WARN, "New log files created, LSN=" LSN_PF, lsn);
+}
- if (k == 0 && i == 0) {
- arch_space_id = 2 * k + 1 + SRV_LOG_SPACE_FIRST_ID;
+/*********************************************************************//**
+Opens a log file.
+@return DB_SUCCESS or error code */
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
+open_log_file(
+/*==========*/
+ os_file_t* file, /*!< out: file handle */
+ const char* name, /*!< in: log file name */
+ os_offset_t* size) /*!< out: file size */
+{
+ ibool ret;
- fil_space_create("arch_log_space", arch_space_id, 0, FIL_LOG);
- } else {
- arch_space_id = ULINT_UNDEFINED;
- }
-#endif /* UNIV_LOG_ARCHIVE */
- if (i == 0) {
- log_group_init(k, srv_n_log_files,
- srv_log_file_size * UNIV_PAGE_SIZE,
- 2 * k + SRV_LOG_SPACE_FIRST_ID,
- SRV_LOG_SPACE_FIRST_ID + 1); /* dummy arch
- space id */
+ *file = os_file_create(innodb_file_log_key, name,
+ OS_FILE_OPEN, OS_FILE_AIO,
+ OS_LOG_FILE, &ret);
+ if (!ret) {
+ ib_logf(IB_LOG_LEVEL_ERROR, "Unable to open '%s'", name);
+ return(DB_ERROR);
}
+ *size = os_file_get_size(*file);
+
+ ret = os_file_close(*file);
+ ut_a(ret);
return(DB_SUCCESS);
}
/*********************************************************************//**
Creates or opens database data files and closes them.
@return DB_SUCCESS or error code */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
open_or_create_data_files(
/*======================*/
ibool* create_new_db, /*!< out: TRUE if new database should be
@@ -718,13 +756,16 @@ open_or_create_data_files(
ibool one_created = FALSE;
os_offset_t size;
ulint flags;
+ ulint space;
ulint rounded_size_pages;
char name[10000];
if (srv_n_data_files >= 1000) {
- fprintf(stderr, "InnoDB: can only have < 1000 data files\n"
- "InnoDB: you have defined %lu\n",
- (ulong) srv_n_data_files);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Can only have < 1000 data files, you have "
+ "defined %lu", (ulong) srv_n_data_files);
+
return(DB_ERROR);
}
@@ -742,7 +783,9 @@ open_or_create_data_files(
ut_a(dirnamelen + strlen(srv_data_file_names[i])
< (sizeof name) - 1);
+
memcpy(name, srv_data_home, dirnamelen);
+
/* Add a path separator if needed. */
if (dirnamelen && name[dirnamelen - 1] != SRV_PATH_SEPARATOR) {
name[dirnamelen++] = SRV_PATH_SEPARATOR;
@@ -750,46 +793,67 @@ open_or_create_data_files(
strcpy(name + dirnamelen, srv_data_file_names[i]);
- if (srv_data_file_is_raw_partition[i] == 0) {
+ /* Note: It will return true if the file doesn' exist. */
+
+ if (!srv_file_check_mode(name)) {
+
+ return(DB_FAIL);
+
+ } else if (srv_data_file_is_raw_partition[i] == 0) {
/* First we try to create the file: if it already
exists, ret will get value FALSE */
- files[i] = os_file_create(innodb_file_data_key,
- name, OS_FILE_CREATE,
- OS_FILE_NORMAL,
- OS_DATA_FILE, &ret);
+ files[i] = os_file_create(
+ innodb_file_data_key, name, OS_FILE_CREATE,
+ OS_FILE_NORMAL, OS_DATA_FILE, &ret);
+
+ if (srv_read_only_mode) {
+
+ if (ret) {
+ goto size_check;
+ }
- if (ret == FALSE && os_file_get_last_error(FALSE)
- != OS_FILE_ALREADY_EXISTS
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Opening %s failed!", name);
+
+ return(DB_ERROR);
+
+ } else if (!ret
+ && os_file_get_last_error(false)
+ != OS_FILE_ALREADY_EXISTS
#ifdef UNIV_AIX
- /* AIX 5.1 after security patch ML7 may have
- errno set to 0 here, which causes our function
- to return 100; work around that AIX problem */
- && os_file_get_last_error(FALSE) != 100
-#endif
+ /* AIX 5.1 after security patch ML7 may have
+ errno set to 0 here, which causes our
+ function to return 100; work around that
+ AIX problem */
+ && os_file_get_last_error(false) != 100
+#endif /* UNIV_AIX */
) {
- fprintf(stderr,
- "InnoDB: Error in creating"
- " or opening %s\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Creating or opening %s failed!",
name);
return(DB_ERROR);
}
+
} else if (srv_data_file_is_raw_partition[i] == SRV_NEW_RAW) {
+
+ ut_a(!srv_read_only_mode);
+
/* The partition is opened, not created; then it is
written over */
srv_start_raw_disk_in_use = TRUE;
srv_created_new_raw = TRUE;
- files[i] = os_file_create(innodb_file_data_key,
- name, OS_FILE_OPEN_RAW,
- OS_FILE_NORMAL,
- OS_DATA_FILE, &ret);
+ files[i] = os_file_create(
+ innodb_file_data_key, name, OS_FILE_OPEN_RAW,
+ OS_FILE_NORMAL, OS_DATA_FILE, &ret);
+
if (!ret) {
- fprintf(stderr,
- "InnoDB: Error in opening %s\n", name);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Error in opening %s", name);
return(DB_ERROR);
}
@@ -805,17 +869,15 @@ open_or_create_data_files(
/* We open the data file */
if (one_created) {
- fprintf(stderr,
- "InnoDB: Error: data files can only"
- " be added at the end\n");
- fprintf(stderr,
- "InnoDB: of a tablespace, but"
- " data file %s existed beforehand.\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Data files can only be added at "
+ "the end of a tablespace, but "
+ "data file %s existed beforehand.",
name);
return(DB_ERROR);
}
-
if (srv_data_file_is_raw_partition[i] == SRV_OLD_RAW) {
+ ut_a(!srv_read_only_mode);
files[i] = os_file_create(
innodb_file_data_key,
name, OS_FILE_OPEN_RAW,
@@ -833,9 +895,11 @@ open_or_create_data_files(
}
if (!ret) {
- fprintf(stderr,
- "InnoDB: Error in opening %s\n", name);
- os_file_get_last_error(TRUE);
+
+ os_file_get_last_error(true);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Can't open '%s'", name);
return(DB_ERROR);
}
@@ -845,6 +909,7 @@ open_or_create_data_files(
goto skip_size_check;
}
+size_check:
size = os_file_get_size(files[i]);
ut_a(size != (os_offset_t) -1);
/* Round size downward to megabytes */
@@ -860,16 +925,16 @@ open_or_create_data_files(
&& srv_last_file_size_max
< rounded_size_pages)) {
- fprintf(stderr,
- "InnoDB: Error: auto-extending"
- " data file %s is"
- " of a different size\n"
- "InnoDB: %lu pages (rounded"
- " down to MB) than specified"
- " in the .cnf file:\n"
- "InnoDB: initial %lu pages,"
- " max %lu (relevant if"
- " non-zero) pages!\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "auto-extending "
+ "data file %s is "
+ "of a different size "
+ "%lu pages (rounded "
+ "down to MB) than specified "
+ "in the .cnf file: "
+ "initial %lu pages, "
+ "max %lu (relevant if "
+ "non-zero) pages!",
name,
(ulong) rounded_size_pages,
(ulong) srv_data_file_sizes[i],
@@ -884,13 +949,11 @@ open_or_create_data_files(
if (rounded_size_pages != srv_data_file_sizes[i]) {
- fprintf(stderr,
- "InnoDB: Error: data file %s"
- " is of a different size\n"
- "InnoDB: %lu pages"
- " (rounded down to MB)\n"
- "InnoDB: than specified"
- " in the .cnf file %lu pages!\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Data file %s is of a different "
+ "size %lu pages (rounded down to MB) "
+ "than specified in the .cnf file "
+ "%lu pages!",
name,
(ulong) rounded_size_pages,
(ulong) srv_data_file_sizes[i]);
@@ -899,63 +962,65 @@ open_or_create_data_files(
}
skip_size_check:
fil_read_first_page(
- files[i], one_opened, &flags,
+ files[i], one_opened, &flags, &space,
#ifdef UNIV_LOG_ARCHIVE
min_arch_log_no, max_arch_log_no,
#endif /* UNIV_LOG_ARCHIVE */
min_flushed_lsn, max_flushed_lsn);
+ /* The first file of the system tablespace must
+ have space ID = TRX_SYS_SPACE. The FSP_SPACE_ID
+ field in files greater than ibdata1 are unreliable. */
+ ut_a(one_opened || space == TRX_SYS_SPACE);
+
+ /* Check the flags for the first system tablespace
+ file only. */
if (!one_opened
&& UNIV_PAGE_SIZE
!= fsp_flags_get_page_size(flags)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: data file %s"
- " uses page size %lu,\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Data file \"%s\" uses page size %lu,"
+ "but the start-up parameter "
+ "is --innodb-page-size=%lu",
name,
- fsp_flags_get_page_size(flags));
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: but the start-up parameter"
- " is innodb-page-size=%lu\n",
+ fsp_flags_get_page_size(flags),
UNIV_PAGE_SIZE);
return(DB_ERROR);
}
one_opened = TRUE;
- } else {
+ } else if (!srv_read_only_mode) {
/* We created the data file and now write it full of
zeros */
one_created = TRUE;
if (i > 0) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Data file %s did not"
- " exist: new to be created\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Data file %s did not"
+ " exist: new to be created",
name);
} else {
- fprintf(stderr,
- "InnoDB: The first specified"
- " data file %s did not exist:\n"
- "InnoDB: a new database"
- " to be created!\n", name);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "The first specified "
+ "data file %s did not exist: "
+ "a new database to be created!",
+ name);
+
*create_new_db = TRUE;
}
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Setting file %s size to %lu MB\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Setting file %s size to %lu MB",
name,
(ulong) (srv_data_file_sizes[i]
>> (20 - UNIV_PAGE_SIZE_SHIFT)));
- fprintf(stderr,
- "InnoDB: Database physically writes the"
- " file full: wait...\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Database physically writes the"
+ " file full: wait...");
ret = os_file_set_size(
name, files[i],
@@ -963,9 +1028,10 @@ skip_size_check:
<< UNIV_PAGE_SIZE_SHIFT);
if (!ret) {
- fprintf(stderr,
- "InnoDB: Error in creating %s:"
- " probably out of disk space\n", name);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Error in creating %s: "
+ "probably out of disk space",
+ name);
return(DB_ERROR);
}
@@ -983,8 +1049,10 @@ skip_size_check:
ut_a(fil_validate());
- fil_node_create(name, srv_data_file_sizes[i], 0,
- srv_data_file_is_raw_partition[i] != 0);
+ if (!fil_node_create(name, srv_data_file_sizes[i], 0,
+ srv_data_file_is_raw_partition[i] != 0)) {
+ return(DB_ERROR);
+ }
}
return(DB_SUCCESS);
@@ -994,7 +1062,7 @@ skip_size_check:
Create undo tablespace.
@return DB_SUCCESS or error code */
static
-enum db_err
+dberr_t
srv_undo_tablespace_create(
/*=======================*/
const char* name, /*!< in: tablespace name */
@@ -1002,48 +1070,55 @@ srv_undo_tablespace_create(
{
os_file_t fh;
ibool ret;
- enum db_err err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
os_file_create_subdirs_if_needed(name);
fh = os_file_create(
- innodb_file_data_key, name, OS_FILE_CREATE,
+ innodb_file_data_key,
+ name,
+ srv_read_only_mode ? OS_FILE_OPEN : OS_FILE_CREATE,
OS_FILE_NORMAL, OS_DATA_FILE, &ret);
- if (ret == FALSE
- && os_file_get_last_error(FALSE) != OS_FILE_ALREADY_EXISTS
+ if (srv_read_only_mode && ret) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "%s opened in read-only mode", name);
+ } else if (ret == FALSE
+ && os_file_get_last_error(false) != OS_FILE_ALREADY_EXISTS
#ifdef UNIV_AIX
- /* AIX 5.1 after security patch ML7 may have
- errno set to 0 here, which causes our function
- to return 100; work around that AIX problem */
- && os_file_get_last_error(FALSE) != 100
-#endif
+ /* AIX 5.1 after security patch ML7 may have
+ errno set to 0 here, which causes our function
+ to return 100; work around that AIX problem */
+ && os_file_get_last_error(false) != 100
+#endif /* UNIV_AIX */
) {
- fprintf(stderr, "InnoDB: Error in creating %s\n", name);
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Can't create UNDO tablespace %s", name);
err = DB_ERROR;
} else {
+ ut_a(!srv_read_only_mode);
+
/* We created the data file and now write it full of zeros */
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Data file %s did not"
- " exist: new to be created\n", name);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Data file %s did not exist: new to be created",
+ name);
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Setting file %s size to %lu MB\n",
- name, size >> (20 - UNIV_PAGE_SIZE_SHIFT));
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Setting file %s size to %lu MB",
+ name, size >> (20 - UNIV_PAGE_SIZE_SHIFT));
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Database physically writes the"
- " file full: wait...\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Database physically writes the file full: wait...");
ret = os_file_set_size(name, fh, size << UNIV_PAGE_SIZE_SHIFT);
if (!ret) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Error in creating %s:"
- " probably out of disk space\n", name);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Error in creating %s: probably out of "
+ "disk space", name);
err = DB_ERROR;
}
@@ -1058,17 +1133,25 @@ srv_undo_tablespace_create(
Open an undo tablespace.
@return DB_SUCCESS or error code */
static
-enum db_err
+dberr_t
srv_undo_tablespace_open(
/*=====================*/
const char* name, /*!< in: tablespace name */
ulint space) /*!< in: tablespace id */
{
os_file_t fh;
- enum db_err err;
+ dberr_t err = DB_ERROR;
ibool ret;
ulint flags;
+ if (!srv_file_check_mode(name)) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "UNDO tablespaces must be %s!",
+ srv_read_only_mode ? "writable" : "readable");
+
+ return(DB_ERROR);
+ }
+
fh = os_file_create(
innodb_file_data_key, name,
OS_FILE_OPEN_RETRY
@@ -1082,7 +1165,6 @@ srv_undo_tablespace_open(
if (ret) {
os_offset_t size;
- os_offset_t n_pages;
size = os_file_get_size(fh);
ut_a(size != (os_offset_t) -1);
@@ -1105,17 +1187,15 @@ srv_undo_tablespace_open(
ut_a(fil_validate());
- n_pages = size / UNIV_PAGE_SIZE;
+ os_offset_t n_pages = size / UNIV_PAGE_SIZE;
/* On 64 bit Windows ulint can be 32 bit and os_offset_t
is 64 bit. It is OK to cast the n_pages to ulint because
the unit has been scaled to pages and they are always
32 bit. */
- fil_node_create(name, (ulint) n_pages, space, FALSE);
-
- err = DB_SUCCESS;
- } else {
- err = DB_ERROR;
+ if (fil_node_create(name, (ulint) n_pages, space, FALSE)) {
+ err = DB_SUCCESS;
+ }
}
return(err);
@@ -1125,20 +1205,25 @@ srv_undo_tablespace_open(
Opens the configured number of undo tablespaces.
@return DB_SUCCESS or error code */
static
-enum db_err
+dberr_t
srv_undo_tablespaces_init(
/*======================*/
ibool create_new_db, /*!< in: TRUE if new db being
created */
- const ulint n_conf_tablespaces) /*!< in: configured undo
+ const ulint n_conf_tablespaces, /*!< in: configured undo
tablespaces */
+ ulint* n_opened) /*!< out: number of UNDO
+ tablespaces successfully
+ discovered and opened */
{
ulint i;
- enum db_err err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ulint prev_space_id = 0;
ulint n_undo_tablespaces;
ulint undo_tablespace_ids[TRX_SYS_N_RSEGS + 1];
+ *n_opened = 0;
+
ut_a(n_conf_tablespaces <= TRX_SYS_N_RSEGS);
memset(undo_tablespace_ids, 0x0, sizeof(undo_tablespace_ids));
@@ -1164,10 +1249,10 @@ srv_undo_tablespaces_init(
name, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES);
if (err != DB_SUCCESS) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Could not create "
- "undo tablespace '%s'.\n", name);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Could not create undo tablespace '%s'.",
+ name);
return(err);
}
@@ -1217,15 +1302,16 @@ srv_undo_tablespaces_init(
err = srv_undo_tablespace_open(name, undo_tablespace_ids[i]);
if (err != DB_SUCCESS) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error opening undo "
- "tablespace %s.\n", name);
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to open undo tablespace '%s'.", name);
return(err);
}
prev_space_id = undo_tablespace_ids[i];
+
+ ++*n_opened;
}
/* Open any extra unused undo tablespaces. These must be contiguous.
@@ -1248,6 +1334,8 @@ srv_undo_tablespaces_init(
}
++n_undo_tablespaces;
+
+ ++*n_opened;
}
/* If the user says that there are fewer than what we find we
@@ -1275,13 +1363,17 @@ srv_undo_tablespaces_init(
"value is %lu\n", n_undo_tablespaces);
return(err != DB_SUCCESS ? err : DB_ERROR);
- }
- if (n_undo_tablespaces > 0) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Opened %lu undo tablespaces\n",
- n_conf_tablespaces);
+ } else if (n_undo_tablespaces > 0) {
+
+ ib_logf(IB_LOG_LEVEL_INFO, "Opened %lu undo tablespaces",
+ n_undo_tablespaces);
+
+ if (n_conf_tablespaces == 0) {
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Using the system tablespace for all UNDO "
+ "logging because innodb_undo_tablespaces=0");
+ }
}
if (create_new_db) {
@@ -1303,18 +1395,51 @@ srv_undo_tablespaces_init(
}
/********************************************************************
+Wait for the purge thread(s) to start up. */
+static
+void
+srv_start_wait_for_purge_to_start()
+/*===============================*/
+{
+ /* Wait for the purge coordinator and master thread to startup. */
+
+ purge_state_t state = trx_purge_state();
+
+ ut_a(state != PURGE_STATE_DISABLED);
+
+ while (srv_shutdown_state == SRV_SHUTDOWN_NONE
+ && srv_force_recovery < SRV_FORCE_NO_BACKGROUND
+ && state == PURGE_STATE_INIT) {
+
+ switch (state = trx_purge_state()) {
+ case PURGE_STATE_RUN:
+ case PURGE_STATE_STOP:
+ break;
+
+ case PURGE_STATE_INIT:
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for purge to start");
+
+ os_thread_sleep(50000);
+ break;
+
+ case PURGE_STATE_EXIT:
+ case PURGE_STATE_DISABLED:
+ ut_error;
+ }
+ }
+}
+
+/********************************************************************
Starts InnoDB and creates a new database if database files
are not found and the user wants.
@return DB_SUCCESS or error code */
UNIV_INTERN
-int
+dberr_t
innobase_start_or_create_for_mysql(void)
/*====================================*/
{
ibool create_new_db;
- ibool log_file_created;
- ibool log_created = FALSE;
- ibool log_opened = FALSE;
lsn_t min_flushed_lsn;
lsn_t max_flushed_lsn;
#ifdef UNIV_LOG_ARCHIVE
@@ -1324,11 +1449,19 @@ innobase_start_or_create_for_mysql(void)
ulint sum_of_new_sizes;
ulint sum_of_data_file_sizes;
ulint tablespace_size_in_header;
- ulint err;
- ulint i;
+ dberr_t err;
+ unsigned i;
+ ulint srv_n_log_files_found = srv_n_log_files;
ulint io_limit;
mtr_t mtr;
ib_bh_t* ib_bh;
+ char logfilename[10000];
+ char* logfile0 = NULL;
+ size_t dirnamelen;
+
+ if (srv_read_only_mode) {
+ ib_logf(IB_LOG_LEVEL_INFO, "Started in read only mode");
+ }
#ifdef HAVE_DARWIN_THREADS
# ifdef F_FULLFSYNC
@@ -1422,31 +1555,34 @@ innobase_start_or_create_for_mysql(void)
" InnoDB: !!!!!!!! UNIV_MEM_DEBUG switched on !!!!!!!!!\n");
#endif
- if (UNIV_LIKELY(srv_use_sys_malloc)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: The InnoDB memory heap is disabled\n");
+ if (srv_use_sys_malloc) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "The InnoDB memory heap is disabled");
}
#if defined(COMPILER_HINTS_ENABLED)
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Compiler hints enabled.\n");
+ ib_logf(IB_LOG_LEVEL_INFO,
+ " InnoDB: Compiler hints enabled.");
#endif /* defined(COMPILER_HINTS_ENABLED) */
- ut_print_timestamp(stderr);
- fputs(" InnoDB: " IB_ATOMICS_STARTUP_MSG "\n", stderr);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "" IB_ATOMICS_STARTUP_MSG "");
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Compressed tables use zlib " ZLIB_VERSION
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Compressed tables use zlib " ZLIB_VERSION
#ifdef UNIV_ZIP_DEBUG
" with validation"
#endif /* UNIV_ZIP_DEBUG */
- "\n" , stderr);
+ );
#ifdef UNIV_ZIP_COPY
- ut_print_timestamp(stderr);
- fputs(" InnoDB: and extra copying\n", stderr);
+ ib_logf(IB_LOG_LEVEL_INFO, "and extra copying");
#endif /* UNIV_ZIP_COPY */
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "CPU %s crc32 instructions",
+ ut_crc32_sse2_enabled ? "supports" : "does not support");
+
/* Since InnoDB does not currently clean up all its internal data
structures in MySQL Embedded Server Library server_end(), we
print an error message if someone tries to start up InnoDB a
@@ -1505,17 +1641,14 @@ innobase_start_or_create_for_mysql(void)
#elif defined(LINUX_NATIVE_AIO)
if (srv_use_native_aio) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Using Linux native AIO\n");
+ ib_logf(IB_LOG_LEVEL_INFO, "Using Linux native AIO");
}
#else
/* Currently native AIO is supported only on windows and linux
and that also when the support is compiled in. In all other
cases, we ignore the setting of innodb_use_native_aio. */
srv_use_native_aio = FALSE;
-
-#endif
+#endif /* __WIN__ */
if (srv_file_flush_method_str == NULL) {
/* These are the default options */
@@ -1533,6 +1666,9 @@ innobase_start_or_create_for_mysql(void)
} else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DIRECT")) {
srv_unix_file_flush_method = SRV_UNIX_O_DIRECT;
+ } else if (0 == ut_strcmp(srv_file_flush_method_str, "O_DIRECT_NO_FSYNC")) {
+ srv_unix_file_flush_method = SRV_UNIX_O_DIRECT_NO_FSYNC;
+
} else if (0 == ut_strcmp(srv_file_flush_method_str, "littlesync")) {
srv_unix_file_flush_method = SRV_UNIX_LITTLESYNC;
@@ -1550,12 +1686,10 @@ innobase_start_or_create_for_mysql(void)
} else if (0 == ut_strcmp(srv_file_flush_method_str,
"async_unbuffered")) {
srv_win_file_flush_method = SRV_WIN_IO_UNBUFFERED;
-#endif
+#endif /* __WIN__ */
} else {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Unrecognized value %s for"
- " innodb_flush_method\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unrecognized value %s for innodb_flush_method",
srv_file_flush_method_str);
return(DB_ERROR);
}
@@ -1580,74 +1714,93 @@ innobase_start_or_create_for_mysql(void)
srv_max_n_threads = 10000;
} else {
srv_buf_pool_instances = 1;
- srv_max_n_threads = 1000; /* saves several MB of memory,
- especially in 64-bit
- computers */
+
+ /* Saves several MB of memory, especially in
+ 64-bit computers */
+
+ srv_max_n_threads = 1000;
}
- err = srv_boot();
+ srv_boot();
- if (err != DB_SUCCESS) {
+ if (!srv_read_only_mode) {
- return((int) err);
- }
+ mutex_create(srv_monitor_file_mutex_key,
+ &srv_monitor_file_mutex, SYNC_NO_ORDER_CHECK);
- mutex_create(srv_monitor_file_mutex_key,
- &srv_monitor_file_mutex, SYNC_NO_ORDER_CHECK);
+ if (srv_innodb_status) {
- if (srv_innodb_status) {
+ srv_monitor_file_name = static_cast<char*>(
+ mem_alloc(
+ strlen(fil_path_to_mysql_datadir)
+ + 20 + sizeof "/innodb_status."));
- srv_monitor_file_name = static_cast<char*>(
- mem_alloc(
- strlen(fil_path_to_mysql_datadir)
- + 20 + sizeof "/innodb_status."));
+ sprintf(srv_monitor_file_name, "%s/innodb_status.%lu",
+ fil_path_to_mysql_datadir,
+ os_proc_get_number());
- sprintf(srv_monitor_file_name, "%s/innodb_status.%lu",
- fil_path_to_mysql_datadir, os_proc_get_number());
- srv_monitor_file = fopen(srv_monitor_file_name, "w+");
- if (!srv_monitor_file) {
- fprintf(stderr, "InnoDB: unable to create %s: %s\n",
- srv_monitor_file_name, strerror(errno));
- return(DB_ERROR);
+ srv_monitor_file = fopen(srv_monitor_file_name, "w+");
+
+ if (!srv_monitor_file) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Unable to create %s: %s",
+ srv_monitor_file_name,
+ strerror(errno));
+
+ return(DB_ERROR);
+ }
+ } else {
+ srv_monitor_file_name = NULL;
+ srv_monitor_file = os_file_create_tmpfile();
+
+ if (!srv_monitor_file) {
+ return(DB_ERROR);
+ }
}
- } else {
- srv_monitor_file_name = NULL;
- srv_monitor_file = os_file_create_tmpfile();
- if (!srv_monitor_file) {
+
+ mutex_create(srv_dict_tmpfile_mutex_key,
+ &srv_dict_tmpfile_mutex, SYNC_DICT_OPERATION);
+
+ srv_dict_tmpfile = os_file_create_tmpfile();
+
+ if (!srv_dict_tmpfile) {
return(DB_ERROR);
}
- }
- mutex_create(srv_dict_tmpfile_mutex_key,
- &srv_dict_tmpfile_mutex, SYNC_DICT_OPERATION);
+ mutex_create(srv_misc_tmpfile_mutex_key,
+ &srv_misc_tmpfile_mutex, SYNC_ANY_LATCH);
- srv_dict_tmpfile = os_file_create_tmpfile();
- if (!srv_dict_tmpfile) {
- return(DB_ERROR);
- }
+ srv_misc_tmpfile = os_file_create_tmpfile();
- mutex_create(srv_misc_tmpfile_mutex_key,
- &srv_misc_tmpfile_mutex, SYNC_ANY_LATCH);
-
- srv_misc_tmpfile = os_file_create_tmpfile();
- if (!srv_misc_tmpfile) {
- return(DB_ERROR);
+ if (!srv_misc_tmpfile) {
+ return(DB_ERROR);
+ }
}
/* If user has set the value of innodb_file_io_threads then
we'll emit a message telling the user that this parameter
is now deprecated. */
if (srv_n_file_io_threads != 4) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Warning:"
- " innodb_file_io_threads is deprecated."
- " Please use innodb_read_io_threads and"
- " innodb_write_io_threads instead\n");
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "innodb_file_io_threads is deprecated. Please use "
+ "innodb_read_io_threads and innodb_write_io_threads "
+ "instead");
}
/* Now overwrite the value on srv_n_file_io_threads */
- srv_n_file_io_threads = 2 + srv_n_read_io_threads
- + srv_n_write_io_threads;
+ srv_n_file_io_threads = srv_n_read_io_threads;
+
+ if (!srv_read_only_mode) {
+ /* Add the log and ibuf IO threads. */
+ srv_n_file_io_threads += 2;
+ srv_n_file_io_threads += srv_n_write_io_threads;
+ } else {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Disabling background IO write threads.");
+
+ srv_n_write_io_threads = 0;
+ }
ut_a(srv_n_file_io_threads <= SRV_MAX_N_IO_THREADS);
@@ -1662,56 +1815,59 @@ innobase_start_or_create_for_mysql(void)
}
# endif /* __WIN__ */
- os_aio_init(io_limit,
- srv_n_read_io_threads,
- srv_n_write_io_threads,
- SRV_MAX_N_PENDING_SYNC_IOS);
+ if (!os_aio_init(io_limit,
+ srv_n_read_io_threads,
+ srv_n_write_io_threads,
+ SRV_MAX_N_PENDING_SYNC_IOS)) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Fatal : Cannot initialize AIO sub-system");
+
+ return(DB_ERROR);
+ }
fil_init(srv_file_per_table ? 50000 : 5000, srv_max_n_open_files);
- /* Print time to initialize the buffer pool */
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Initializing buffer pool, size =");
+ double size;
+ char unit;
if (srv_buf_pool_size >= 1024 * 1024 * 1024) {
- fprintf(stderr,
- " %.1fG\n",
- ((double) srv_buf_pool_size) / (1024 * 1024 * 1024));
+ size = ((double) srv_buf_pool_size) / (1024 * 1024 * 1024);
+ unit = 'G';
} else {
- fprintf(stderr,
- " %.1fM\n",
- ((double) srv_buf_pool_size) / (1024 * 1024));
+ size = ((double) srv_buf_pool_size) / (1024 * 1024);
+ unit = 'M';
}
- err = buf_pool_init(srv_buf_pool_size, srv_buf_pool_instances);
+ /* Print time to initialize the buffer pool */
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Initializing buffer pool, size = %.1f%c", size, unit);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Completed initialization of buffer pool\n");
+ err = buf_pool_init(srv_buf_pool_size, srv_buf_pool_instances);
if (err != DB_SUCCESS) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Fatal error: cannot allocate memory"
- " for the buffer pool\n");
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot allocate memory for the buffer pool");
return(DB_ERROR);
}
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Completed initialization of buffer pool");
+
#ifdef UNIV_DEBUG
/* We have observed deadlocks with a 5MB buffer pool but
the actual lower limit could very well be a little higher. */
if (srv_buf_pool_size <= 5 * 1024 * 1024) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Warning: Small buffer pool size "
- "(%luM), the flst_validate() debug function "
- "can cause a deadlock if the buffer pool fills up.\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Small buffer pool size (%luM), the flst_validate() "
+ "debug function can cause a deadlock if the "
+ "buffer pool fills up.",
srv_buf_pool_size / 1024 / 1024);
}
-#endif
+#endif /* UNIV_DEBUG */
fsp_init();
log_init();
@@ -1720,14 +1876,15 @@ innobase_start_or_create_for_mysql(void)
/* Create i/o-handler threads: */
- for (i = 0; i < srv_n_file_io_threads; i++) {
+ for (ulint i = 0; i < srv_n_file_io_threads; ++i) {
+
n[i] = i;
os_thread_create(io_handler_thread, n + i, thread_ids + i);
}
#ifdef UNIV_LOG_ARCHIVE
- if (0 != ut_strcmp(srv_log_group_home_dirs[0], srv_arch_dir)) {
+ if (0 != ut_strcmp(srv_log_group_home_dir, srv_arch_dir)) {
ut_print_timestamp(stderr);
fprintf(stderr, " InnoDB: Error: you must set the log group home dir in my.cnf\n");
ut_print_timestamp(stderr);
@@ -1738,16 +1895,14 @@ innobase_start_or_create_for_mysql(void)
#endif /* UNIV_LOG_ARCHIVE */
if (srv_n_log_files * srv_log_file_size * UNIV_PAGE_SIZE
- >= 549755813888ULL /* 512G */) {
+ >= 512ULL * 1024ULL * 1024ULL * 1024ULL) {
/* log_block_convert_lsn_to_no() limits the returned block
number to 1G and given that OS_FILE_LOG_BLOCK_SIZE is 512
bytes, then we have a limit of 512 GB. If that limit is to
be raised, then log_block_convert_lsn_to_no() must be
modified. */
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: combined size of log files"
- " must be < 512 GB\n");
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Combined size of log files must be < 512 GB");
return(DB_ERROR);
}
@@ -1759,7 +1914,6 @@ innobase_start_or_create_for_mysql(void)
So next_offset must be < ULINT_MAX * UNIV_PAGE_SIZE. This
means that we are limited to ULINT_MAX * UNIV_PAGE_SIZE which
is 64 TB on 32 bit systems. */
- ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: combined size of log files"
" must be < %lu GB\n",
@@ -1791,10 +1945,8 @@ innobase_start_or_create_for_mysql(void)
}
if (sum_of_new_sizes < 10485760 / UNIV_PAGE_SIZE) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: tablespace size must be"
- " at least 10 MB\n");
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Tablespace size must be at least 10 MB");
return(DB_ERROR);
}
@@ -1805,36 +1957,27 @@ innobase_start_or_create_for_mysql(void)
#endif /* UNIV_LOG_ARCHIVE */
&min_flushed_lsn, &max_flushed_lsn,
&sum_of_new_sizes);
- if (err != DB_SUCCESS) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Could not open or create data files.\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: If you tried to add new data files,"
- " and it failed here,\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: you should now edit innodb_data_file_path"
- " in my.cnf back\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: to what it was, and remove the"
- " new ibdata files InnoDB created\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: in this failed attempt. InnoDB only wrote"
- " those files full of\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: zeros, but did not yet use them in any way."
- " But be careful: do not\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: remove old data files"
- " which contain your precious data!\n");
+ if (err == DB_FAIL) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "The system tablespace must be writable!");
+
+ return(DB_ERROR);
- return((int) err);
+ } else if (err != DB_SUCCESS) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Could not open or create the system tablespace. If "
+ "you tried to add new data files to the system "
+ "tablespace, and it failed here, you should now "
+ "edit innodb_data_file_path in my.cnf back to what "
+ "it was, and remove the new ibdata files InnoDB "
+ "created in this failed attempt. InnoDB only wrote "
+ "those files full of zeros, but did not yet use "
+ "them in any way. But be careful: do not remove "
+ "old data files which contain your precious data!");
+
+ return(err);
}
#ifdef UNIV_LOG_ARCHIVE
@@ -1842,125 +1985,199 @@ innobase_start_or_create_for_mysql(void)
srv_arch_dir = srv_add_path_separator_if_needed(srv_arch_dir);
#endif /* UNIV_LOG_ARCHIVE */
- for (i = 0; i < srv_n_log_files; i++) {
- err = open_or_create_log_file(create_new_db, &log_file_created,
- log_opened, 0, i);
- if (err != DB_SUCCESS) {
+ dirnamelen = strlen(srv_log_group_home_dir);
+ ut_a(dirnamelen < (sizeof logfilename) - 10 - sizeof "ib_logfile");
+ memcpy(logfilename, srv_log_group_home_dir, dirnamelen);
- return((int) err);
- }
+ /* Add a path separator if needed. */
+ if (dirnamelen && logfilename[dirnamelen - 1] != SRV_PATH_SEPARATOR) {
+ logfilename[dirnamelen++] = SRV_PATH_SEPARATOR;
+ }
- if (log_file_created) {
- log_created = TRUE;
- } else {
- log_opened = TRUE;
+ srv_log_file_size_requested = srv_log_file_size;
+
+ if (create_new_db) {
+ bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL);
+ ut_a(success);
+
+ min_flushed_lsn = max_flushed_lsn = log_get_lsn();
+
+ buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
+
+ err = create_log_files(logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+
+ if (err != DB_SUCCESS) {
+ return(err);
}
- if ((log_opened && create_new_db)
- || (log_opened && log_created)) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: all log files must be"
- " created at the same time.\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: All log files must be"
- " created also in database creation.\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: If you want bigger or smaller"
- " log files, shut down the\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: database and make sure there"
- " were no errors in shutdown.\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Then delete the existing log files."
- " Edit the .cnf file\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: and start the database again.\n");
+ } else {
+ for (i = 0; i < SRV_N_LOG_FILES_MAX; i++) {
+ os_offset_t size;
+ os_file_stat_t stat_info;
+
+ sprintf(logfilename + dirnamelen,
+ "ib_logfile%u", i);
+
+ err = os_file_get_status(
+ logfilename, &stat_info, false);
+
+ if (err == DB_NOT_FOUND) {
+ if (i == 0) {
+ if (max_flushed_lsn
+ != min_flushed_lsn) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create"
+ " log files because"
+ " data files are"
+ " corrupt or"
+ " not in sync"
+ " with each other");
+ return(DB_ERROR);
+ }
+
+ if (max_flushed_lsn < (lsn_t) 1000) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot create"
+ " log files because"
+ " data files are"
+ " corrupt or the"
+ " database was not"
+ " shut down cleanly"
+ " after creating"
+ " the data files.");
+ return(DB_ERROR);
+ }
+
+ err = create_log_files(
+ logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ create_log_files_rename(
+ logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+
+ /* Suppress the message about
+ crash recovery. */
+ max_flushed_lsn = min_flushed_lsn
+ = log_get_lsn();
+ goto files_checked;
+ } else if (i < 2) {
+ /* must have at least 2 log files */
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Only one log file found.");
+ return(err);
+ }
- return(DB_ERROR);
+ /* opened all files */
+ break;
+ }
+
+ if (!srv_file_check_mode(logfilename)) {
+ return(DB_ERROR);
+ }
+
+ err = open_log_file(&files[i], logfilename, &size);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ ut_a(size != (os_offset_t) -1);
+
+ if (size & ((1 << UNIV_PAGE_SIZE_SHIFT) - 1)) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Log file %s size "
+ UINT64PF " is not a multiple of"
+ " innodb_page_size",
+ logfilename, size);
+ return(DB_ERROR);
+ }
+
+ size >>= UNIV_PAGE_SIZE_SHIFT;
+
+ if (i == 0) {
+ srv_log_file_size = size;
+ } else if (size != srv_log_file_size) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Log file %s is"
+ " of different size "UINT64PF" bytes"
+ " than other log"
+ " files "UINT64PF" bytes!",
+ logfilename,
+ size << UNIV_PAGE_SIZE_SHIFT,
+ (os_offset_t) srv_log_file_size
+ << UNIV_PAGE_SIZE_SHIFT);
+ return(DB_ERROR);
+ }
}
- }
- /* Open all log files and data files in the system tablespace: we
- keep them open until database shutdown */
+ srv_n_log_files_found = i;
- fil_open_log_and_system_tablespace_files();
+ /* Create the in-memory file space objects. */
- err = srv_undo_tablespaces_init(create_new_db, srv_undo_tablespaces);
+ sprintf(logfilename + dirnamelen, "ib_logfile%u", 0);
- /* If the force recovery is set very high then we carry on regardless
- of all errors. Basically this is fingers crossed mode. */
+ fil_space_create(logfilename,
+ SRV_LOG_SPACE_FIRST_ID,
+ fsp_flags_set_page_size(0, UNIV_PAGE_SIZE),
+ FIL_LOG);
- if (err != DB_SUCCESS
- && srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) {
+ ut_a(fil_validate());
- return((int) err);
- }
+ /* srv_log_file_size is measured in pages; if page size is 16KB,
+ then we have a limit of 64TB on 32 bit systems */
+ ut_a(srv_log_file_size <= ULINT_MAX);
+
+ for (unsigned j = 0; j < i; j++) {
+ sprintf(logfilename + dirnamelen, "ib_logfile%u", j);
+
+ if (!fil_node_create(logfilename,
+ (ulint) srv_log_file_size,
+ SRV_LOG_SPACE_FIRST_ID, FALSE)) {
+ return(DB_ERROR);
+ }
+ }
- if (log_created && !create_new_db
-#ifdef UNIV_LOG_ARCHIVE
- && !srv_archive_recovery
-#endif /* UNIV_LOG_ARCHIVE */
- ) {
- if (max_flushed_lsn != min_flushed_lsn
#ifdef UNIV_LOG_ARCHIVE
- || max_arch_log_no != min_arch_log_no
+ /* Create the file space object for archived logs. Under
+ MySQL, no archiving ever done. */
+ fil_space_create("arch_log_space", SRV_LOG_SPACE_FIRST_ID + 1,
+ 0, FIL_LOG);
#endif /* UNIV_LOG_ARCHIVE */
- ) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Cannot initialize created"
- " log files because\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: data files were not in sync"
- " with each other\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: or the data files are corrupt.\n");
+ log_group_init(0, i, srv_log_file_size * UNIV_PAGE_SIZE,
+ SRV_LOG_SPACE_FIRST_ID,
+ SRV_LOG_SPACE_FIRST_ID + 1);
+ }
- return(DB_ERROR);
- }
+files_checked:
+ /* Open all log files and data files in the system
+ tablespace: we keep them open until database
+ shutdown */
- if (max_flushed_lsn < (lsn_t) 1000) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Cannot initialize created"
- " log files because\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: data files are corrupt,"
- " or new data files were\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: created when the database"
- " was started previous\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: time but the database"
- " was not shut down\n");
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: normally after that.\n");
+ fil_open_log_and_system_tablespace_files();
- return(DB_ERROR);
- }
+ err = srv_undo_tablespaces_init(
+ create_new_db,
+ srv_undo_tablespaces,
+ &srv_undo_tablespaces_open);
- mutex_enter(&(log_sys->mutex));
+ /* If the force recovery is set very high then we carry on regardless
+ of all errors. Basically this is fingers crossed mode. */
-#ifdef UNIV_LOG_ARCHIVE
- /* Do not + 1 arch_log_no because we do not use log
- archiving */
- recv_reset_logs(max_flushed_lsn, max_arch_log_no, TRUE);
-#else
- recv_reset_logs(max_flushed_lsn, TRUE);
-#endif /* UNIV_LOG_ARCHIVE */
+ if (err != DB_SUCCESS
+ && srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) {
- mutex_exit(&(log_sys->mutex));
+ return(err);
+ }
+
+ /* Initialize objects used by dict stats gathering thread, which
+ can also be used by recovery if it tries to drop some table */
+ if (!srv_read_only_mode) {
+ dict_stats_thread_init();
}
trx_sys_file_format_init();
@@ -1968,6 +2185,9 @@ innobase_start_or_create_for_mysql(void)
trx_sys_create();
if (create_new_db) {
+
+ ut_a(!srv_read_only_mode);
+
mtr_start(&mtr);
fsp_header_init(0, sum_of_new_sizes, &mtr);
@@ -1987,16 +2207,34 @@ innobase_start_or_create_for_mysql(void)
trx_purge_sys_create(srv_n_purge_threads, ib_bh);
- dict_create();
+ err = dict_create();
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
srv_startup_is_before_trx_rollback_phase = FALSE;
+ bool success = buf_flush_list(ULINT_MAX, LSN_MAX, NULL);
+ ut_a(success);
+
+ min_flushed_lsn = max_flushed_lsn = log_get_lsn();
+
+ buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
+
+ /* Stamp the LSN to the data files. */
+ fil_write_flushed_lsn_to_data_files(max_flushed_lsn, 0);
+
+ fil_flush_file_spaces(FIL_TABLESPACE);
+
+ create_log_files_rename(logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
#ifdef UNIV_LOG_ARCHIVE
} else if (srv_archive_recovery) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Starting archive"
- " recovery from a backup...\n");
+
+ ib_logf(IB_LOG_LEVEL_INFO,
+ " Starting archive recovery from a backup...");
+
err = recv_recovery_from_archive_start(
min_flushed_lsn, srv_archive_recovery_limit_lsn,
min_arch_log_no);
@@ -2007,7 +2245,11 @@ innobase_start_or_create_for_mysql(void)
/* Since ibuf init is in dict_boot, and ibuf is needed
in any disk i/o, first call dict_boot */
- dict_boot();
+ err = dict_boot();
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
ib_bh = trx_sys_init_at_db_start();
@@ -2051,10 +2293,10 @@ innobase_start_or_create_for_mysql(void)
/* We always try to do a recovery, even if the database had
been shut down normally: this is the normal startup path */
- err = recv_recovery_from_checkpoint_start(LOG_CHECKPOINT,
- IB_ULONGLONG_MAX,
- min_flushed_lsn,
- max_flushed_lsn);
+ err = recv_recovery_from_checkpoint_start(
+ LOG_CHECKPOINT, IB_ULONGLONG_MAX,
+ min_flushed_lsn, max_flushed_lsn);
+
if (err != DB_SUCCESS) {
return(DB_ERROR);
@@ -2066,7 +2308,11 @@ innobase_start_or_create_for_mysql(void)
to access space 0, and the insert buffer at this stage already
works for space 0. */
- dict_boot();
+ err = dict_boot();
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
ib_bh = trx_sys_init_at_db_start();
@@ -2079,6 +2325,7 @@ innobase_start_or_create_for_mysql(void)
are initialized in trx_sys_init_at_db_start(). */
recv_recovery_from_checkpoint_finish();
+
if (srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE) {
/* The following call is necessary for the insert
buffer to work with multiple tablespaces. We must
@@ -2100,6 +2347,90 @@ innobase_start_or_create_for_mysql(void)
recv_needed_recovery);
}
+ if (!srv_force_recovery
+ && !recv_sys->found_corrupt_log
+ && (srv_log_file_size_requested != srv_log_file_size
+ || srv_n_log_files_found != srv_n_log_files)) {
+ /* Prepare to replace the redo log files. */
+
+ if (srv_read_only_mode) {
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Cannot resize log files "
+ "in read-only mode.");
+ return(DB_READ_ONLY);
+ }
+
+ /* Clean the buffer pool. */
+ bool success = buf_flush_list(
+ ULINT_MAX, LSN_MAX, NULL);
+ ut_a(success);
+
+ RECOVERY_CRASH(1);
+
+ min_flushed_lsn = max_flushed_lsn = log_get_lsn();
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Resizing redo log from %u*%u to %u*%u pages"
+ ", LSN=" LSN_PF,
+ (unsigned) i,
+ (unsigned) srv_log_file_size,
+ (unsigned) srv_n_log_files,
+ (unsigned) srv_log_file_size_requested,
+ max_flushed_lsn);
+
+ buf_flush_wait_batch_end(NULL, BUF_FLUSH_LIST);
+
+ RECOVERY_CRASH(2);
+
+ /* Flush the old log files. */
+ log_buffer_flush_to_disk();
+ /* If innodb_flush_method=O_DSYNC,
+ we need to explicitly flush the log buffers. */
+ fil_flush(SRV_LOG_SPACE_FIRST_ID);
+
+ ut_ad(max_flushed_lsn == log_get_lsn());
+
+ /* Prohibit redo log writes from any other
+ threads until creating a log checkpoint at the
+ end of create_log_files(). */
+ ut_d(recv_no_log_write = TRUE);
+ ut_ad(!buf_pool_check_no_pending_io());
+
+ RECOVERY_CRASH(3);
+
+ /* Stamp the LSN to the data files. */
+ fil_write_flushed_lsn_to_data_files(
+ max_flushed_lsn, 0);
+
+ fil_flush_file_spaces(FIL_TABLESPACE);
+
+ RECOVERY_CRASH(4);
+
+ /* Close and free the redo log files, so that
+ we can replace them. */
+ fil_close_log_files(true);
+
+ RECOVERY_CRASH(5);
+
+ /* Free the old log file space. */
+ log_group_close_all();
+
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Starting to delete and rewrite log files.");
+
+ srv_log_file_size = srv_log_file_size_requested;
+
+ err = create_log_files(logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+
+ if (err != DB_SUCCESS) {
+ return(err);
+ }
+
+ create_log_files_rename(logfilename, dirnamelen,
+ max_flushed_lsn, logfile0);
+ }
+
srv_startup_is_before_trx_rollback_phase = FALSE;
recv_recovery_rollback_active();
@@ -2181,31 +2512,39 @@ innobase_start_or_create_for_mysql(void)
if (srv_available_undo_logs == ULINT_UNDEFINED) {
/* Can only happen if force recovery is set. */
- ut_a(srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
+ ut_a(srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO
+ || srv_read_only_mode);
srv_undo_logs = ULONG_UNDEFINED;
}
- /* Create the thread which watches the timeouts for lock waits */
- os_thread_create(
- lock_wait_timeout_thread,
- NULL, thread_ids + 2 + SRV_MAX_N_IO_THREADS);
-
- /* Create the thread which warns of long semaphore waits */
- os_thread_create(
- srv_error_monitor_thread,
- NULL, thread_ids + 3 + SRV_MAX_N_IO_THREADS);
+ if (!srv_read_only_mode) {
+ /* Create the thread which watches the timeouts
+ for lock waits */
+ os_thread_create(
+ lock_wait_timeout_thread,
+ NULL, thread_ids + 2 + SRV_MAX_N_IO_THREADS);
- /* Create the thread which prints InnoDB monitor info */
- os_thread_create(
- srv_monitor_thread,
- NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+ /* Create the thread which warns of long semaphore waits */
+ os_thread_create(
+ srv_error_monitor_thread,
+ NULL, thread_ids + 3 + SRV_MAX_N_IO_THREADS);
- srv_is_being_started = FALSE;
+ /* Create the thread which prints InnoDB monitor info */
+ os_thread_create(
+ srv_monitor_thread,
+ NULL, thread_ids + 4 + SRV_MAX_N_IO_THREADS);
+ }
/* Create the SYS_FOREIGN and SYS_FOREIGN_COLS system tables */
err = dict_create_or_check_foreign_constraint_tables();
if (err != DB_SUCCESS) {
- return((int)DB_ERROR);
+ return(err);
+ }
+
+ /* Create the SYS_TABLESPACES system table */
+ err = dict_create_or_check_sys_tablespace();
+ if (err != DB_SUCCESS) {
+ return(err);
}
srv_is_being_started = FALSE;
@@ -2215,11 +2554,15 @@ innobase_start_or_create_for_mysql(void)
/* Create the master thread which does purge and other utility
operations */
- os_thread_create(
- srv_master_thread,
- NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS));
+ if (!srv_read_only_mode) {
+
+ os_thread_create(
+ srv_master_thread,
+ NULL, thread_ids + (1 + SRV_MAX_N_IO_THREADS));
+ }
- if (srv_force_recovery < SRV_FORCE_NO_BACKGROUND) {
+ if (!srv_read_only_mode
+ && srv_force_recovery < SRV_FORCE_NO_BACKGROUND) {
os_thread_create(
srv_purge_coordinator_thread,
@@ -2234,35 +2577,15 @@ innobase_start_or_create_for_mysql(void)
srv_worker_thread, NULL,
thread_ids + 5 + i + SRV_MAX_N_IO_THREADS);
}
- }
-
- os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL);
-
- /* Wait for the purge coordinator and master thread to startup. */
-
- purge_state_t state = trx_purge_state();
- while (srv_shutdown_state == SRV_SHUTDOWN_NONE
- && srv_force_recovery < SRV_FORCE_NO_BACKGROUND
- && state == PURGE_STATE_INIT) {
-
- switch (state = trx_purge_state()) {
- case PURGE_STATE_RUN:
- case PURGE_STATE_STOP:
- break;
-
- case PURGE_STATE_INIT:
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: "
- "Waiting for the background threads to "
- "start\n");
+ srv_start_wait_for_purge_to_start();
- os_thread_sleep(50000);
- break;
+ } else {
+ purge_sys->state = PURGE_STATE_DISABLED;
+ }
- case PURGE_STATE_EXIT:
- ut_error;
- }
+ if (!srv_read_only_mode) {
+ os_thread_create(buf_flush_page_cleaner_thread, NULL, NULL);
}
#ifdef UNIV_DEBUG
@@ -2276,7 +2599,8 @@ innobase_start_or_create_for_mysql(void)
tablespace_size_in_header = fsp_header_get_tablespace_size();
- if (!srv_auto_extend_last_data_file
+ if (!srv_read_only_mode
+ && !srv_auto_extend_last_data_file
&& sum_of_data_file_sizes != tablespace_size_in_header) {
ut_print_timestamp(stderr);
@@ -2319,7 +2643,8 @@ innobase_start_or_create_for_mysql(void)
}
}
- if (srv_auto_extend_last_data_file
+ if (!srv_read_only_mode
+ && srv_auto_extend_last_data_file
&& sum_of_data_file_sizes < tablespace_size_in_header) {
ut_print_timestamp(stderr);
@@ -2383,23 +2708,17 @@ innobase_start_or_create_for_mysql(void)
os_fast_mutex_free(&srv_os_test_mutex);
if (srv_print_verbose_log) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: %s started; "
- "log sequence number " LSN_PF "\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "%s started; log sequence number " LSN_PF "",
INNODB_VERSION_STR, srv_start_lsn);
}
if (srv_force_recovery > 0) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: !!! innodb_force_recovery"
- " is set to %lu !!!\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "!!! innodb_force_recovery is set to %lu !!!",
(ulong) srv_force_recovery);
}
- fflush(stderr);
-
if (srv_force_recovery == 0) {
/* In the insert buffer we may have even bigger tablespace
id's, because we may have dropped those tablespaces, but
@@ -2409,16 +2728,20 @@ innobase_start_or_create_for_mysql(void)
ibuf_update_max_tablespace_id();
}
- /* Create the buffer pool dump/load thread */
- os_thread_create(buf_dump_thread, NULL, NULL);
+ if (!srv_read_only_mode) {
+ /* Create the buffer pool dump/load thread */
+ os_thread_create(buf_dump_thread, NULL, NULL);
- srv_was_started = TRUE;
+ /* Create the dict stats gathering thread */
+ os_thread_create(dict_stats_thread, NULL, NULL);
- /* Create the thread that will optimize the FTS sub-system
- in a separate background thread. */
- fts_optimize_init();
+ /* Create the thread that will optimize the FTS sub-system. */
+ fts_optimize_init();
+ }
- return((int) DB_SUCCESS);
+ srv_was_started = TRUE;
+
+ return(DB_SUCCESS);
}
#if 0
@@ -2455,27 +2778,28 @@ srv_fts_close(void)
Shuts down the InnoDB database.
@return DB_SUCCESS or error code */
UNIV_INTERN
-int
+dberr_t
innobase_shutdown_for_mysql(void)
/*=============================*/
{
ulint i;
+
if (!srv_was_started) {
if (srv_is_being_started) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Warning: shutting down"
- " a not properly started\n"
- "InnoDB: or created database!\n");
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Shutting down an improperly started, "
+ "or created database!");
}
return(DB_SUCCESS);
}
- /* Shutdown the FTS optimize sub system. */
- fts_optimize_start_shutdown();
+ if (!srv_read_only_mode) {
+ /* Shutdown the FTS optimize sub system. */
+ fts_optimize_start_shutdown();
- fts_optimize_end();
+ fts_optimize_end();
+ }
/* 1. Flush the buffer pool to disk, write the current lsn to
the tablespace header(s), and copy all log data to archive.
@@ -2485,18 +2809,12 @@ innobase_shutdown_for_mysql(void)
logs_empty_and_mark_files_at_shutdown();
if (srv_conc_get_active_threads() != 0) {
- fprintf(stderr,
- "InnoDB: Warning: query counter shows %ld queries"
- " still\n"
- "InnoDB: inside InnoDB at shutdown\n",
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Query counter shows %ld queries still "
+ "inside InnoDB at shutdown",
srv_conc_get_active_threads());
}
- /* This functionality will be used by WL#5522. */
- ut_a(trx_purge_state() == PURGE_STATE_RUN
- || trx_purge_state() == PURGE_STATE_EXIT
- || srv_force_recovery >= SRV_FORCE_NO_BACKGROUND);
-
/* 2. Make all threads created by InnoDB to exit */
srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS;
@@ -2509,22 +2827,28 @@ innobase_shutdown_for_mysql(void)
/* NOTE: IF YOU CREATE THREADS IN INNODB, YOU MUST EXIT THEM
HERE OR EARLIER */
- /* a. Let the lock timeout thread exit */
- os_event_set(srv_timeout_event);
+ if (!srv_read_only_mode) {
+ /* a. Let the lock timeout thread exit */
+ os_event_set(lock_sys->timeout_event);
- /* b. srv error monitor thread exits automatically, no need
- to do anything here */
+ /* b. srv error monitor thread exits automatically,
+ no need to do anything here */
- /* c. We wake the master thread so that it exits */
- srv_wake_master_thread();
+ /* c. We wake the master thread so that it exits */
+ srv_wake_master_thread();
- /* d. Wakeup purge threads. */
- srv_purge_wakeup();
+ /* d. Wakeup purge threads. */
+ srv_purge_wakeup();
+ }
/* e. Exit the i/o threads */
os_aio_wake_all_threads_at_shutdown();
+ /* f. dict_stats_thread is signaled from
+ logs_empty_and_mark_files_at_shutdown() and should have
+ already quit or is quitting right now. */
+
os_mutex_enter(os_sync_mutex);
if (os_thread_count == 0) {
@@ -2549,9 +2873,9 @@ innobase_shutdown_for_mysql(void)
}
if (i == 1000) {
- fprintf(stderr,
- "InnoDB: Warning: %lu threads created by InnoDB"
- " had not exited at shutdown!\n",
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "%lu threads created by InnoDB"
+ " had not exited at shutdown!",
(ulong) os_thread_count);
}
@@ -2563,6 +2887,7 @@ innobase_shutdown_for_mysql(void)
mem_free(srv_monitor_file_name);
}
}
+
if (srv_dict_tmpfile) {
fclose(srv_dict_tmpfile);
srv_dict_tmpfile = 0;
@@ -2573,6 +2898,10 @@ innobase_shutdown_for_mysql(void)
srv_misc_tmpfile = 0;
}
+ if (!srv_read_only_mode) {
+ dict_stats_thread_deinit();
+ }
+
/* This must be disabled before closing the buffer pool
and closing the data dictionary. */
btr_search_disable();
@@ -2583,9 +2912,14 @@ innobase_shutdown_for_mysql(void)
trx_sys_file_format_close();
trx_sys_close();
- mutex_free(&srv_monitor_file_mutex);
- mutex_free(&srv_dict_tmpfile_mutex);
- mutex_free(&srv_misc_tmpfile_mutex);
+ /* We don't create these mutexes in RO mode because we don't create
+ the temp files that the cover. */
+ if (!srv_read_only_mode) {
+ mutex_free(&srv_monitor_file_mutex);
+ mutex_free(&srv_dict_tmpfile_mutex);
+ mutex_free(&srv_misc_tmpfile_mutex);
+ }
+
dict_close();
btr_search_sys_free();
@@ -2594,6 +2928,7 @@ innobase_shutdown_for_mysql(void)
os_aio_free();
que_close();
row_mysql_close();
+ srv_mon_free();
sync_close();
srv_free();
fil_close();
@@ -2618,11 +2953,10 @@ innobase_shutdown_for_mysql(void)
|| os_event_count != 0
|| os_mutex_count != 0
|| os_fast_mutex_count != 0) {
- fprintf(stderr,
- "InnoDB: Warning: some resources were not"
- " cleaned up in shutdown:\n"
- "InnoDB: threads %lu, events %lu,"
- " os_mutexes %lu, os_fast_mutexes %lu\n",
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Some resources were not cleaned up in shutdown: "
+ "threads %lu, events %lu, os_mutexes %lu, "
+ "os_fast_mutexes %lu",
(ulong) os_thread_count, (ulong) os_event_count,
(ulong) os_mutex_count, (ulong) os_fast_mutex_count);
}
@@ -2632,17 +2966,15 @@ innobase_shutdown_for_mysql(void)
}
if (srv_print_verbose_log) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Shutdown completed;"
- " log sequence number " LSN_PF "\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Shutdown completed; log sequence number " LSN_PF "",
srv_shutdown_lsn);
}
srv_was_started = FALSE;
srv_start_has_been_called = FALSE;
- return((int) DB_SUCCESS);
+ return(DB_SUCCESS);
}
#endif /* !UNIV_HOTBACKUP */
@@ -2650,7 +2982,7 @@ innobase_shutdown_for_mysql(void)
/********************************************************************
Signal all per-table background threads to shutdown, and wait for them to do
so. */
-
+UNIV_INTERN
void
srv_shutdown_table_bg_threads(void)
/*===============================*/
@@ -2723,3 +3055,48 @@ srv_shutdown_table_bg_threads(void)
table = next;
}
}
+
+/*****************************************************************//**
+Get the meta-data filename from the table name. */
+UNIV_INTERN
+void
+srv_get_meta_data_filename(
+/*=======================*/
+ dict_table_t* table, /*!< in: table */
+ char* filename, /*!< out: filename */
+ ulint max_len) /*!< in: filename max length */
+{
+ ulint len;
+ char* path;
+ char* suffix;
+ static const ulint suffix_len = strlen(".cfg");
+
+ if (DICT_TF_HAS_DATA_DIR(table->flags)) {
+ dict_get_and_save_data_dir_path(table, false);
+ ut_a(table->data_dir_path);
+
+ path = os_file_make_remote_pathname(
+ table->data_dir_path, table->name, "cfg");
+ } else {
+ path = fil_make_ibd_name(table->name, false);
+ }
+
+ ut_a(path);
+ len = ut_strlen(path);
+ ut_a(max_len >= len);
+
+ suffix = path + (len - suffix_len);
+ if (strncmp(suffix, ".cfg", suffix_len) == 0) {
+ strcpy(filename, path);
+ } else {
+ ut_ad(strncmp(suffix, ".ibd", suffix_len) == 0);
+
+ strncpy(filename, path, len - suffix_len);
+ suffix = filename + (len - suffix_len);
+ strcpy(suffix, ".cfg");
+ }
+
+ mem_free(path);
+
+ srv_normalize_path_for_win(filename);
+}
diff --git a/storage/innobase/sync/sync0arr.cc b/storage/innobase/sync/sync0arr.cc
index b90a5f29589..749258021f7 100644
--- a/storage/innobase/sync/sync0arr.cc
+++ b/storage/innobase/sync/sync0arr.cc
@@ -39,6 +39,7 @@ Created 9/5/1995 Heikki Tuuri
#include "sync0rw.h"
#include "os0sync.h"
#include "os0file.h"
+#include "lock0lock.h"
#include "srv0srv.h"
#include "ha_prototypes.h"
@@ -78,11 +79,11 @@ any waiting threads who have missed the signal. */
/** A cell where an individual thread may wait suspended
until a resource is released. The suspending is implemented
using an operating system event semaphore. */
-struct sync_cell_struct {
+struct sync_cell_t {
void* wait_object; /*!< pointer to the object the
thread is waiting for; if NULL
the cell is free for use */
- mutex_t* old_wait_mutex; /*!< the latest wait mutex in cell */
+ ib_mutex_t* old_wait_mutex; /*!< the latest wait mutex in cell */
rw_lock_t* old_wait_rw_lock;
/*!< the latest wait rw-lock
in cell */
@@ -116,15 +117,15 @@ all changes (set or reset) to the state of the event must be made
while owning the mutex. */
/** Synchronization array */
-struct sync_array_struct {
+struct sync_array_t {
ulint n_reserved; /*!< number of currently reserved
cells in the wait array */
ulint n_cells; /*!< number of cells in the
wait array */
sync_cell_t* array; /*!< pointer to wait array */
- mutex_t mutex; /*!< possible database mutex
+ ib_mutex_t mutex; /*!< possible database mutex
protecting this data structure */
- os_mutex_t os_mutex; /*!< Possible operating system mutex
+ os_ib_mutex_t os_mutex; /*!< Possible operating system mutex
protecting the data structure.
As this data structure is used in
constructing the database mutex,
@@ -293,7 +294,7 @@ sync_cell_get_event(
ulint type = cell->request_type;
if (type == SYNC_MUTEX) {
- return(((mutex_t*) cell->wait_object)->event);
+ return(((ib_mutex_t*) cell->wait_object)->event);
} else if (type == RW_LOCK_WAIT_EX) {
return(((rw_lock_t*) cell->wait_object)->wait_ex_event);
} else { /* RW_LOCK_SHARED and RW_LOCK_EX wait on the same event */
@@ -434,7 +435,7 @@ sync_array_cell_print(
FILE* file, /*!< in: file where to print */
sync_cell_t* cell) /*!< in: sync cell */
{
- mutex_t* mutex;
+ ib_mutex_t* mutex;
rw_lock_t* rwlock;
ulint type;
ulint writer;
@@ -600,7 +601,7 @@ sync_array_detect_deadlock(
sync_cell_t* cell, /*!< in: cell to search */
ulint depth) /*!< in: recursion depth */
{
- mutex_t* mutex;
+ ib_mutex_t* mutex;
rw_lock_t* lock;
os_thread_id_t thread;
ibool ret;
@@ -622,7 +623,7 @@ sync_array_detect_deadlock(
if (cell->request_type == SYNC_MUTEX) {
- mutex = static_cast<mutex_t*>(cell->wait_object);
+ mutex = static_cast<ib_mutex_t*>(cell->wait_object);
if (mutex_get_lock_word(mutex) != 0) {
@@ -736,7 +737,7 @@ sync_arr_cell_can_wake_up(
/*======================*/
sync_cell_t* cell) /*!< in: cell to search */
{
- mutex_t* mutex;
+ ib_mutex_t* mutex;
rw_lock_t* lock;
if (cell->request_type == SYNC_MUTEX) {
@@ -902,6 +903,11 @@ sync_array_print_long_waits_low(
ibool fatal = FALSE;
double longest_diff = 0;
+ /* For huge tables, skip the check during CHECK TABLE etc... */
+ if (fatal_timeout > SRV_SEMAPHORE_WAIT_EXTENSION) {
+ return(FALSE);
+ }
+
#ifdef UNIV_DEBUG_VALGRIND
/* Increase the timeouts if running under valgrind because it executes
extremely slowly. UNIV_DEBUG_VALGRIND does not necessary mean that
@@ -1000,7 +1006,7 @@ sync_array_print_long_waits(
(ulong) os_file_n_pending_pwrites);
srv_print_innodb_monitor = TRUE;
- os_event_set(srv_timeout_event);
+ os_event_set(lock_sys->timeout_event);
os_thread_sleep(30000000);
diff --git a/storage/innobase/sync/sync0rw.cc b/storage/innobase/sync/sync0rw.cc
index dc6c510a3ed..823efecaf6b 100644
--- a/storage/innobase/sync/sync0rw.cc
+++ b/storage/innobase/sync/sync0rw.cc
@@ -57,11 +57,11 @@ lock_word == 0: Write locked
(-lock_word) is the number of readers
that hold the lock.
lock_word <= -X_LOCK_DECR: Recursively write locked. lock_word has been
- decremented by X_LOCK_DECR once for each lock,
- so the number of locks is:
- ((-lock_word) / X_LOCK_DECR) + 1
-When lock_word <= -X_LOCK_DECR, we also know that lock_word % X_LOCK_DECR == 0:
-other values of lock_word are invalid.
+ decremented by X_LOCK_DECR for the first lock
+ and the first recursive lock, then by 1 for
+ each recursive lock thereafter.
+ So the number of locks is:
+ (lock_copy == 0) ? 1 : 2 - (lock_copy + X_LOCK_DECR)
The lock_word is always read and updated atomically and consistently, so that
it always represents the state of the lock, and the state of the lock changes
@@ -124,50 +124,21 @@ wait_ex_event: A thread may only wait on the wait_ex_event after it has
performed the following actions in order:
(1) Decrement lock_word by X_LOCK_DECR.
(2) Record counter value of wait_ex_event (os_event_reset,
- called from sync_array_reserve_cell).
+ called from sync_array_reserve_cell).
(3) Verify that lock_word < 0.
(1) must come first to ensures no other threads become reader
- or next writer, and notifies unlocker that signal must be sent.
- (2) must come before (3) to ensure the signal is not missed.
+ or next writer, and notifies unlocker that signal must be sent.
+ (2) must come before (3) to ensure the signal is not missed.
These restrictions force the above ordering.
Immediately before sending the wake-up signal, we should:
Verify lock_word == 0 (waiting thread holds x_lock)
*/
-
-/** number of spin waits on rw-latches,
-resulted during shared (read) locks */
-UNIV_INTERN ib_int64_t rw_s_spin_wait_count = 0;
-/** number of spin loop rounds on rw-latches,
-resulted during shared (read) locks */
-UNIV_INTERN ib_int64_t rw_s_spin_round_count = 0;
-
-/** number of OS waits on rw-latches,
-resulted during shared (read) locks */
-UNIV_INTERN ib_int64_t rw_s_os_wait_count = 0;
-
-/** number of unlocks (that unlock shared locks),
-set only when UNIV_SYNC_PERF_STAT is defined */
-UNIV_INTERN ib_int64_t rw_s_exit_count = 0;
-
-/** number of spin waits on rw-latches,
-resulted during exclusive (write) locks */
-UNIV_INTERN ib_int64_t rw_x_spin_wait_count = 0;
-/** number of spin loop rounds on rw-latches,
-resulted during exclusive (write) locks */
-UNIV_INTERN ib_int64_t rw_x_spin_round_count = 0;
-
-/** number of OS waits on rw-latches,
-resulted during exclusive (write) locks */
-UNIV_INTERN ib_int64_t rw_x_os_wait_count = 0;
-
-/** number of unlocks (that unlock exclusive locks),
-set only when UNIV_SYNC_PERF_STAT is defined */
-UNIV_INTERN ib_int64_t rw_x_exit_count = 0;
+UNIV_INTERN rw_lock_stats_t rw_lock_stats;
/* The global list of rw-locks */
UNIV_INTERN rw_lock_list_t rw_lock_list;
-UNIV_INTERN mutex_t rw_lock_list_mutex;
+UNIV_INTERN ib_mutex_t rw_lock_list_mutex;
#ifdef UNIV_PFS_MUTEX
UNIV_INTERN mysql_pfs_key_t rw_lock_list_mutex_key;
@@ -179,7 +150,7 @@ UNIV_INTERN mysql_pfs_key_t rw_lock_mutex_key;
To modify the debug info list of an rw-lock, this mutex has to be
acquired in addition to the mutex protecting the lock. */
-UNIV_INTERN mutex_t rw_lock_debug_mutex;
+UNIV_INTERN ib_mutex_t rw_lock_debug_mutex;
# ifdef UNIV_PFS_MUTEX
UNIV_INTERN mysql_pfs_key_t rw_lock_debug_mutex_key;
@@ -258,7 +229,7 @@ rw_lock_create_func(
lock->mutex.cline = cline;
ut_d(lock->mutex.cmutex_name = cmutex_name);
- ut_d(lock->mutex.mutex_type = 1);
+ ut_d(lock->mutex.ib_mutex_type = 1);
#else /* INNODB_RW_LOCKS_USE_ATOMICS */
# ifdef UNIV_DEBUG
UT_NOT_USED(cmutex_name);
@@ -292,8 +263,8 @@ rw_lock_create_func(
lock->last_x_file_name = "not yet reserved";
lock->last_s_line = 0;
lock->last_x_line = 0;
- lock->event = os_event_create(NULL);
- lock->wait_ex_event = os_event_create(NULL);
+ lock->event = os_event_create();
+ lock->wait_ex_event = os_event_create();
mutex_enter(&rw_lock_list_mutex);
@@ -316,7 +287,7 @@ rw_lock_free_func(
rw_lock_t* lock) /*!< in: rw-lock */
{
#ifndef INNODB_RW_LOCKS_USE_ATOMICS
- mutex_t* mutex;
+ ib_mutex_t* mutex;
#endif /* !INNODB_RW_LOCKS_USE_ATOMICS */
ut_ad(rw_lock_validate(lock));
@@ -364,14 +335,15 @@ rw_lock_validate(
ulint waiters;
lint lock_word;
- ut_a(lock);
+ ut_ad(lock);
waiters = rw_lock_get_waiters(lock);
lock_word = lock->lock_word;
ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
- ut_a(waiters == 0 || waiters == 1);
- ut_a(lock_word > -X_LOCK_DECR ||(-lock_word) % X_LOCK_DECR == 0);
+ ut_ad(waiters == 0 || waiters == 1);
+ ut_ad(lock_word > -(2 * X_LOCK_DECR));
+ ut_ad(lock_word <= X_LOCK_DECR);
return(TRUE);
}
@@ -395,10 +367,16 @@ rw_lock_s_lock_spin(
ulint index; /* index of the reserved wait cell */
ulint i = 0; /* spin round count */
sync_array_t* sync_arr;
+ size_t counter_index;
+
+ /* We reuse the thread id to index into the counter, cache
+ it here for efficiency. */
+
+ counter_index = (size_t) os_thread_get_curr_id();
ut_ad(rw_lock_validate(lock));
- rw_s_spin_wait_count++; /*!< Count calls to this function */
+ rw_lock_stats.rw_s_spin_wait_count.add(counter_index, 1);
lock_loop:
/* Spin waiting for the writer field to become free */
@@ -414,19 +392,9 @@ lock_loop:
os_thread_yield();
}
- if (srv_print_latch_waits) {
- fprintf(stderr,
- "Thread %lu spin wait rw-s-lock at %p"
- " cfile %s cline %lu rnds %lu\n",
- (ulong) os_thread_pf(os_thread_get_curr_id()),
- (void*) lock,
- innobase_basename(lock->cfile_name),
- (ulong) lock->cline, (ulong) i);
- }
-
/* We try once again to obtain the lock */
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
- rw_s_spin_round_count += i;
+ rw_lock_stats.rw_s_spin_round_count.add(counter_index, i);
return; /* Success */
} else {
@@ -435,7 +403,7 @@ lock_loop:
goto lock_loop;
}
- rw_s_spin_round_count += i;
+ rw_lock_stats.rw_s_spin_round_count.add(counter_index, i);
sync_arr = sync_array_get();
@@ -444,7 +412,7 @@ lock_loop:
file_name, line, &index);
/* Set waiters before checking lock_word to ensure wake-up
- signal is sent. This may lead to some unnecessary signals. */
+ signal is sent. This may lead to some unnecessary signals. */
rw_lock_set_waiter_flag(lock);
if (TRUE == rw_lock_s_lock_low(lock, pass, file_name, line)) {
@@ -452,19 +420,9 @@ lock_loop:
return; /* Success */
}
- if (srv_print_latch_waits) {
- fprintf(stderr,
- "Thread %lu OS wait rw-s-lock at %p"
- " cfile %s cline %lu\n",
- os_thread_pf(os_thread_get_curr_id()),
- (void*) lock,
- innobase_basename(lock->cfile_name),
- (ulong) lock->cline);
- }
-
/* these stats may not be accurate */
lock->count_os_wait++;
- rw_s_os_wait_count++;
+ rw_lock_stats.rw_s_os_wait_count.add(counter_index, 1);
sync_array_wait_event(sync_arr, index);
@@ -511,6 +469,12 @@ rw_lock_x_lock_wait(
ulint index;
ulint i = 0;
sync_array_t* sync_arr;
+ size_t counter_index;
+
+ /* We reuse the thread id to index into the counter, cache
+ it here for efficiency. */
+
+ counter_index = (size_t) os_thread_get_curr_id();
ut_ad(lock->lock_word <= 0);
@@ -524,7 +488,7 @@ rw_lock_x_lock_wait(
}
/* If there is still a reader, then go to sleep.*/
- rw_x_spin_round_count += i;
+ rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
sync_arr = sync_array_get();
@@ -539,11 +503,11 @@ rw_lock_x_lock_wait(
/* these stats may not be accurate */
lock->count_os_wait++;
- rw_x_os_wait_count++;
+ rw_lock_stats.rw_x_os_wait_count.add(counter_index, 1);
- /* Add debug info as it is needed to detect possible
- deadlock. We must add info for WAIT_EX thread for
- deadlock detection to work properly. */
+ /* Add debug info as it is needed to detect possible
+ deadlock. We must add info for WAIT_EX thread for
+ deadlock detection to work properly. */
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, pass, RW_LOCK_WAIT_EX,
file_name, line);
@@ -551,16 +515,16 @@ rw_lock_x_lock_wait(
sync_array_wait_event(sync_arr, index);
#ifdef UNIV_SYNC_DEBUG
- rw_lock_remove_debug_info(lock, pass,
- RW_LOCK_WAIT_EX);
+ rw_lock_remove_debug_info(
+ lock, pass, RW_LOCK_WAIT_EX);
#endif
- /* It is possible to wake when lock_word < 0.
- We must pass the while-loop check to proceed.*/
+ /* It is possible to wake when lock_word < 0.
+ We must pass the while-loop check to proceed.*/
} else {
sync_array_free_cell(sync_arr, index);
}
}
- rw_x_spin_round_count += i;
+ rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
}
/******************************************************************//**
@@ -576,8 +540,6 @@ rw_lock_x_lock_low(
const char* file_name,/*!< in: file name where lock requested */
ulint line) /*!< in: line where requested */
{
- os_thread_id_t curr_thread = os_thread_get_curr_id();
-
if (rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
/* lock->recursive also tells us if the writer_thread
@@ -587,8 +549,8 @@ rw_lock_x_lock_low(
ut_a(!lock->recursive);
/* Decrement occurred: we are writer or next-writer. */
- rw_lock_set_writer_id_and_recursion_flag(lock,
- pass ? FALSE : TRUE);
+ rw_lock_set_writer_id_and_recursion_flag(
+ lock, pass ? FALSE : TRUE);
rw_lock_x_lock_wait(lock,
#ifdef UNIV_SYNC_DEBUG
@@ -597,19 +559,25 @@ rw_lock_x_lock_low(
file_name, line);
} else {
+ os_thread_id_t thread_id = os_thread_get_curr_id();
+
/* Decrement failed: relock or failed lock */
if (!pass && lock->recursive
- && os_thread_eq(lock->writer_thread, curr_thread)) {
+ && os_thread_eq(lock->writer_thread, thread_id)) {
/* Relock */
- lock->lock_word -= X_LOCK_DECR;
+ if (lock->lock_word == 0) {
+ lock->lock_word -= X_LOCK_DECR;
+ } else {
+ --lock->lock_word;
+ }
+
} else {
/* Another thread locked before us */
return(FALSE);
}
}
#ifdef UNIV_SYNC_DEBUG
- rw_lock_add_debug_info(lock, pass, RW_LOCK_EX,
- file_name, line);
+ rw_lock_add_debug_info(lock, pass, RW_LOCK_EX, file_name, line);
#endif
lock->last_x_file_name = file_name;
lock->last_x_line = (unsigned int) line;
@@ -640,6 +608,12 @@ rw_lock_x_lock_func(
ulint index; /*!< index of the reserved wait cell */
sync_array_t* sync_arr;
ibool spinning = FALSE;
+ size_t counter_index;
+
+ /* We reuse the thread id to index into the counter, cache
+ it here for efficiency. */
+
+ counter_index = (size_t) os_thread_get_curr_id();
ut_ad(rw_lock_validate(lock));
#ifdef UNIV_SYNC_DEBUG
@@ -651,15 +625,17 @@ rw_lock_x_lock_func(
lock_loop:
if (rw_lock_x_lock_low(lock, pass, file_name, line)) {
- rw_x_spin_round_count += i;
+ rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
return; /* Locking succeeded */
} else {
- if (!spinning) {
- spinning = TRUE;
- rw_x_spin_wait_count++;
+ if (!spinning) {
+ spinning = TRUE;
+
+ rw_lock_stats.rw_x_spin_wait_count.add(
+ counter_index, 1);
}
/* Spin waiting for the lock_word to become free */
@@ -679,16 +655,7 @@ lock_loop:
}
}
- rw_x_spin_round_count += i;
-
- if (srv_print_latch_waits) {
- fprintf(stderr,
- "Thread %lu spin wait rw-x-lock at %p"
- " cfile %s cline %lu rnds %lu\n",
- os_thread_pf(os_thread_get_curr_id()), (void*) lock,
- innobase_basename(lock->cfile_name),
- (ulong) lock->cline, (ulong) i);
- }
+ rw_lock_stats.rw_x_spin_round_count.add(counter_index, i);
sync_arr = sync_array_get();
@@ -704,18 +671,9 @@ lock_loop:
return; /* Locking succeeded */
}
- if (srv_print_latch_waits) {
- fprintf(stderr,
- "Thread %lu OS wait for rw-x-lock at %p"
- " cfile %s cline %lu\n",
- os_thread_pf(os_thread_get_curr_id()), (void*) lock,
- innobase_basename(lock->cfile_name),
- (ulong) lock->cline);
- }
-
/* these stats may not be accurate */
lock->count_os_wait++;
- rw_x_os_wait_count++;
+ rw_lock_stats.rw_x_os_wait_count.add(counter_index, 1);
sync_array_wait_event(sync_arr, index);
diff --git a/storage/innobase/sync/sync0sync.cc b/storage/innobase/sync/sync0sync.cc
index c492edf89b6..d6f7325e2a3 100644
--- a/storage/innobase/sync/sync0sync.cc
+++ b/storage/innobase/sync/sync0sync.cc
@@ -171,25 +171,25 @@ Q.E.D. */
/** The number of iterations in the mutex_spin_wait() spin loop.
Intended for performance monitoring. */
-static ib_int64_t mutex_spin_round_count = 0;
+static ib_counter_t<ib_int64_t, IB_N_SLOTS> mutex_spin_round_count;
/** The number of mutex_spin_wait() calls. Intended for
performance monitoring. */
-static ib_int64_t mutex_spin_wait_count = 0;
+static ib_counter_t<ib_int64_t, IB_N_SLOTS> mutex_spin_wait_count;
/** The number of OS waits in mutex_spin_wait(). Intended for
performance monitoring. */
-static ib_int64_t mutex_os_wait_count = 0;
+static ib_counter_t<ib_int64_t, IB_N_SLOTS> mutex_os_wait_count;
/** The number of mutex_exit() calls. Intended for performance
monitoring. */
-UNIV_INTERN ib_int64_t mutex_exit_count = 0;
+UNIV_INTERN ib_int64_t mutex_exit_count;
/** This variable is set to TRUE when sync_init is called */
UNIV_INTERN ibool sync_initialized = FALSE;
#ifdef UNIV_SYNC_DEBUG
/** An acquired mutex or rw-lock and its level in the latching order */
-typedef struct sync_level_struct sync_level_t;
+struct sync_level_t;
/** Mutexes or rw-locks held by a thread */
-typedef struct sync_thread_struct sync_thread_t;
+struct sync_thread_t;
/** The latch levels currently owned by threads are stored in this data
structure; the size of this array is OS_THREAD_MAX_N */
@@ -197,7 +197,7 @@ structure; the size of this array is OS_THREAD_MAX_N */
UNIV_INTERN sync_thread_t* sync_thread_level_arrays;
/** Mutex protecting sync_thread_level_arrays */
-UNIV_INTERN mutex_t sync_thread_mutex;
+UNIV_INTERN ib_mutex_t sync_thread_mutex;
# ifdef UNIV_PFS_MUTEX
UNIV_INTERN mysql_pfs_key_t sync_thread_mutex_key;
@@ -208,7 +208,7 @@ UNIV_INTERN mysql_pfs_key_t sync_thread_mutex_key;
UNIV_INTERN ut_list_base_node_t mutex_list;
/** Mutex protecting the mutex_list variable */
-UNIV_INTERN mutex_t mutex_list_mutex;
+UNIV_INTERN ib_mutex_t mutex_list_mutex;
#ifdef UNIV_PFS_MUTEX
UNIV_INTERN mysql_pfs_key_t mutex_list_mutex_key;
@@ -221,10 +221,8 @@ UNIV_INTERN ibool sync_order_checks_on = FALSE;
/** Number of slots reserved for each OS thread in the sync level array */
static const ulint SYNC_THREAD_N_LEVELS = 10000;
-typedef struct sync_arr_struct sync_arr_t;
-
/** Array for tracking sync levels per thread. */
-struct sync_arr_struct {
+struct sync_arr_t {
ulint in_use; /*!< Number of active cells */
ulint n_elems; /*!< Number of elements in the array */
ulint max_elems; /*!< Maximum elements */
@@ -234,14 +232,14 @@ struct sync_arr_struct {
};
/** Mutexes or rw-locks held by a thread */
-struct sync_thread_struct{
+struct sync_thread_t{
os_thread_id_t id; /*!< OS thread id */
sync_arr_t* levels; /*!< level array for this thread; if
this is NULL this slot is unused */
};
/** An acquired mutex or rw-lock and its level in the latching order */
-struct sync_level_struct{
+struct sync_level_t{
void* latch; /*!< pointer to a mutex or an
rw-lock; NULL means that
the slot is empty */
@@ -264,7 +262,7 @@ UNIV_INTERN
void
mutex_create_func(
/*==============*/
- mutex_t* mutex, /*!< in: pointer to memory */
+ ib_mutex_t* mutex, /*!< in: pointer to memory */
#ifdef UNIV_DEBUG
const char* cmutex_name, /*!< in: mutex name */
# ifdef UNIV_SYNC_DEBUG
@@ -280,7 +278,7 @@ mutex_create_func(
os_fast_mutex_init(PFS_NOT_INSTRUMENTED, &mutex->os_fast_mutex);
mutex->lock_word = 0;
#endif
- mutex->event = os_event_create(NULL);
+ mutex->event = os_event_create();
mutex_set_waiters(mutex, 0);
#ifdef UNIV_DEBUG
mutex->magic_n = MUTEX_MAGIC_N;
@@ -293,25 +291,15 @@ mutex_create_func(
mutex->cfile_name = cfile_name;
mutex->cline = cline;
mutex->count_os_wait = 0;
-#ifdef UNIV_DEBUG
- mutex->cmutex_name= cmutex_name;
- mutex->count_using= 0;
- mutex->mutex_type= 0;
- mutex->lspent_time= 0;
- mutex->lmax_spent_time= 0;
- mutex->count_spin_loop= 0;
- mutex->count_spin_rounds= 0;
- mutex->count_os_yield= 0;
-#endif /* UNIV_DEBUG */
/* Check that lock_word is aligned; this is important on Intel */
ut_ad(((ulint)(&(mutex->lock_word))) % 4 == 0);
/* NOTE! The very first mutexes are not put to the mutex list */
- if (mutex == &mutex_list_mutex
+ if ((mutex == &mutex_list_mutex)
#ifdef UNIV_SYNC_DEBUG
- || mutex == &sync_thread_mutex
+ || (mutex == &sync_thread_mutex)
#endif /* UNIV_SYNC_DEBUG */
) {
@@ -337,7 +325,7 @@ UNIV_INTERN
void
mutex_free_func(
/*============*/
- mutex_t* mutex) /*!< in: mutex */
+ ib_mutex_t* mutex) /*!< in: mutex */
{
ut_ad(mutex_validate(mutex));
ut_a(mutex_get_lock_word(mutex) == 0);
@@ -397,7 +385,7 @@ UNIV_INTERN
ulint
mutex_enter_nowait_func(
/*====================*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name __attribute__((unused)),
/*!< in: file name where mutex
requested */
@@ -406,7 +394,7 @@ mutex_enter_nowait_func(
{
ut_ad(mutex_validate(mutex));
- if (!mutex_test_and_set(mutex)) {
+ if (!ib_mutex_test_and_set(mutex)) {
ut_d(mutex->thread_id = os_thread_get_curr_id());
#ifdef UNIV_SYNC_DEBUG
@@ -427,7 +415,7 @@ UNIV_INTERN
ibool
mutex_validate(
/*===========*/
- const mutex_t* mutex) /*!< in: mutex */
+ const ib_mutex_t* mutex) /*!< in: mutex */
{
ut_a(mutex);
ut_a(mutex->magic_n == MUTEX_MAGIC_N);
@@ -443,7 +431,7 @@ UNIV_INTERN
ibool
mutex_own(
/*======*/
- const mutex_t* mutex) /*!< in: mutex */
+ const ib_mutex_t* mutex) /*!< in: mutex */
{
ut_ad(mutex_validate(mutex));
@@ -458,7 +446,7 @@ UNIV_INTERN
void
mutex_set_waiters(
/*==============*/
- mutex_t* mutex, /*!< in: mutex */
+ ib_mutex_t* mutex, /*!< in: mutex */
ulint n) /*!< in: value to set */
{
volatile ulint* ptr; /* declared volatile to ensure that
@@ -479,7 +467,7 @@ UNIV_INTERN
void
mutex_spin_wait(
/*============*/
- mutex_t* mutex, /*!< in: pointer to mutex */
+ ib_mutex_t* mutex, /*!< in: pointer to mutex */
const char* file_name, /*!< in: file name where mutex
requested */
ulint line) /*!< in: line where requested */
@@ -487,6 +475,9 @@ mutex_spin_wait(
ulint i; /* spin round count */
ulint index; /* index of the reserved wait cell */
sync_array_t* sync_arr;
+ size_t counter_index;
+
+ counter_index = (size_t) os_thread_get_curr_id();
ut_ad(mutex);
@@ -494,7 +485,7 @@ mutex_spin_wait(
isn't exact. Moved out of ifdef that follows because we are willing
to sacrifice the cost of counting this as the data is valuable.
Count the number of calls to mutex_spin_wait. */
- mutex_spin_wait_count++;
+ mutex_spin_wait_count.add(counter_index, 1);
mutex_loop:
@@ -507,7 +498,6 @@ mutex_loop:
a memory word. */
spin_loop:
- ut_d(mutex->count_spin_loop++);
while (mutex_get_lock_word(mutex) != 0 && i < SYNC_SPIN_ROUNDS) {
if (srv_spin_wait_delay) {
@@ -518,26 +508,12 @@ spin_loop:
}
if (i == SYNC_SPIN_ROUNDS) {
-#ifdef UNIV_DEBUG
- mutex->count_os_yield++;
-#endif /* UNIV_DEBUG */
os_thread_yield();
}
-#ifdef UNIV_SRV_PRINT_LATCH_WAITS
- fprintf(stderr,
- "Thread %lu spin wait mutex at %p"
- " cfile %s cline %lu rnds %lu\n",
- (ulong) os_thread_pf(os_thread_get_curr_id()), (void*) mutex,
- innobase_basename(mutex->cfile_name),
- (ulong) mutex->cline, (ulong) i);
-#endif
-
- mutex_spin_round_count += i;
-
- ut_d(mutex->count_spin_rounds += i);
+ mutex_spin_round_count.add(counter_index, i);
- if (mutex_test_and_set(mutex) == 0) {
+ if (ib_mutex_test_and_set(mutex) == 0) {
/* Succeeded! */
ut_d(mutex->thread_id = os_thread_get_curr_id());
@@ -550,7 +526,7 @@ spin_loop:
/* We may end up with a situation where lock_word is 0 but the OS
fast mutex is still reserved. On FreeBSD the OS does not seem to
schedule a thread which is constantly calling pthread_mutex_trylock
- (in mutex_test_and_set implementation). Then we could end up
+ (in ib_mutex_test_and_set implementation). Then we could end up
spinning here indefinitely. The following 'i++' stops this infinite
spin. */
@@ -575,7 +551,7 @@ spin_loop:
/* Try to reserve still a few times */
for (i = 0; i < 4; i++) {
- if (mutex_test_and_set(mutex) == 0) {
+ if (ib_mutex_test_and_set(mutex) == 0) {
/* Succeeded! Free the reserved wait cell */
sync_array_free_cell(sync_arr, index);
@@ -585,13 +561,6 @@ spin_loop:
mutex_set_debug_info(mutex, file_name, line);
#endif
-#ifdef UNIV_SRV_PRINT_LATCH_WAITS
- fprintf(stderr, "Thread %lu spin wait succeeds at 2:"
- " mutex at %p\n",
- (ulong) os_thread_pf(os_thread_get_curr_id()),
- (void*) mutex);
-#endif
-
return;
/* Note that in this case we leave the waiters field
@@ -604,19 +573,12 @@ spin_loop:
after the change in the wait array and the waiters field was made.
Now there is no risk of infinite wait on the event. */
-#ifdef UNIV_SRV_PRINT_LATCH_WAITS
- fprintf(stderr,
- "Thread %lu OS wait mutex at %p cfile %s cline %lu rnds %lu\n",
- (ulong) os_thread_pf(os_thread_get_curr_id()), (void*) mutex,
- innobase_basename(mutex->cfile_name),
- (ulong) mutex->cline, (ulong) i);
-#endif
-
- mutex_os_wait_count++;
+ mutex_os_wait_count.add(counter_index, 1);
mutex->count_os_wait++;
sync_array_wait_event(sync_arr, index);
+
goto mutex_loop;
}
@@ -626,7 +588,7 @@ UNIV_INTERN
void
mutex_signal_object(
/*================*/
- mutex_t* mutex) /*!< in: mutex */
+ ib_mutex_t* mutex) /*!< in: mutex */
{
mutex_set_waiters(mutex, 0);
@@ -643,7 +605,7 @@ UNIV_INTERN
void
mutex_set_debug_info(
/*=================*/
- mutex_t* mutex, /*!< in: mutex */
+ ib_mutex_t* mutex, /*!< in: mutex */
const char* file_name, /*!< in: file where requested */
ulint line) /*!< in: line where requested */
{
@@ -662,7 +624,7 @@ UNIV_INTERN
void
mutex_get_debug_info(
/*=================*/
- mutex_t* mutex, /*!< in: mutex */
+ ib_mutex_t* mutex, /*!< in: mutex */
const char** file_name, /*!< out: file where requested */
ulint* line, /*!< out: line where requested */
os_thread_id_t* thread_id) /*!< out: id of the thread which owns
@@ -683,7 +645,7 @@ mutex_list_print_info(
/*==================*/
FILE* file) /*!< in: file where to print */
{
- mutex_t* mutex;
+ ib_mutex_t* mutex;
const char* file_name;
ulint line;
os_thread_id_t thread_id;
@@ -726,7 +688,7 @@ ulint
mutex_n_reserved(void)
/*==================*/
{
- mutex_t* mutex;
+ ib_mutex_t* mutex;
ulint count = 0;
mutex_enter(&mutex_list_mutex);
@@ -825,9 +787,9 @@ sync_print_warning(
const sync_level_t* slot) /*!< in: slot for which to
print warning */
{
- mutex_t* mutex;
+ ib_mutex_t* mutex;
- mutex = static_cast<mutex_t*>(slot->latch);
+ mutex = static_cast<ib_mutex_t*>(slot->latch);
if (mutex->magic_n == MUTEX_MAGIC_N) {
fprintf(stderr,
@@ -1200,6 +1162,8 @@ sync_thread_add_level(
case SYNC_TRX_I_S_RWLOCK:
case SYNC_TRX_I_S_LAST_READ:
case SYNC_IBUF_MUTEX:
+ case SYNC_INDEX_ONLINE_LOG:
+ case SYNC_STATS_AUTO_RECALC:
if (!sync_thread_levels_g(array, level, TRUE)) {
fprintf(stderr,
"InnoDB: sync_thread_levels_g(array, %lu)"
@@ -1448,7 +1412,7 @@ sync_thread_reset_level(
return(TRUE);
}
- if (((mutex_t*) latch)->magic_n != MUTEX_MAGIC_N) {
+ if (((ib_mutex_t*) latch)->magic_n != MUTEX_MAGIC_N) {
rw_lock_t* rw_lock;
rw_lock = (rw_lock_t*) latch;
@@ -1511,7 +1475,7 @@ sync_init(void)
mutex_create(rw_lock_debug_mutex_key, &rw_lock_debug_mutex,
SYNC_NO_ORDER_CHECK);
- rw_lock_debug_event = os_event_create(NULL);
+ rw_lock_debug_event = os_event_create();
rw_lock_debug_waiters = FALSE;
#endif /* UNIV_SYNC_DEBUG */
}
@@ -1552,7 +1516,7 @@ void
sync_close(void)
/*===========*/
{
- mutex_t* mutex;
+ ib_mutex_t* mutex;
sync_array_close();
@@ -1569,7 +1533,7 @@ sync_close(void)
mutex_free(mutex);
- mutex = UT_LIST_GET_FIRST(mutex_list);
+ mutex = UT_LIST_GET_FIRST(mutex_list);
}
mutex_free(&mutex_list_mutex);
@@ -1593,13 +1557,6 @@ sync_print_wait_info(
/*=================*/
FILE* file) /*!< in: file where to print */
{
-#ifdef UNIV_SYNC_DEBUG
- fprintf(file,
- "Mutex exits "UINT64PF", "
- "rws exits "UINT64PF", rwx exits "UINT64PF"\n",
- mutex_exit_count, rw_s_exit_count, rw_x_exit_count);
-#endif
-
fprintf(file,
"Mutex spin waits "UINT64PF", rounds "UINT64PF", "
"OS waits "UINT64PF"\n"
@@ -1607,25 +1564,27 @@ sync_print_wait_info(
"OS waits "UINT64PF"\n"
"RW-excl spins "UINT64PF", rounds "UINT64PF", "
"OS waits "UINT64PF"\n",
- mutex_spin_wait_count,
- mutex_spin_round_count,
- mutex_os_wait_count,
- rw_s_spin_wait_count,
- rw_s_spin_round_count,
- rw_s_os_wait_count,
- rw_x_spin_wait_count,
- rw_x_spin_round_count,
- rw_x_os_wait_count);
+ (ib_uint64_t) mutex_spin_wait_count,
+ (ib_uint64_t) mutex_spin_round_count,
+ (ib_uint64_t) mutex_os_wait_count,
+ (ib_uint64_t) rw_lock_stats.rw_s_spin_wait_count,
+ (ib_uint64_t) rw_lock_stats.rw_s_spin_round_count,
+ (ib_uint64_t) rw_lock_stats.rw_s_os_wait_count,
+ (ib_uint64_t) rw_lock_stats.rw_x_spin_wait_count,
+ (ib_uint64_t) rw_lock_stats.rw_x_spin_round_count,
+ (ib_uint64_t) rw_lock_stats.rw_x_os_wait_count);
fprintf(file,
"Spin rounds per wait: %.2f mutex, %.2f RW-shared, "
"%.2f RW-excl\n",
(double) mutex_spin_round_count /
(mutex_spin_wait_count ? mutex_spin_wait_count : 1),
- (double) rw_s_spin_round_count /
- (rw_s_spin_wait_count ? rw_s_spin_wait_count : 1),
- (double) rw_x_spin_round_count /
- (rw_x_spin_wait_count ? rw_x_spin_wait_count : 1));
+ (double) rw_lock_stats.rw_s_spin_round_count /
+ (rw_lock_stats.rw_s_spin_wait_count
+ ? rw_lock_stats.rw_s_spin_wait_count : 1),
+ (double) rw_lock_stats.rw_x_spin_round_count /
+ (rw_lock_stats.rw_x_spin_wait_count
+ ? rw_lock_stats.rw_x_spin_wait_count : 1));
}
/*******************************************************************//**
diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc
index cbf90afae0d..f6360562ae7 100644
--- a/storage/innobase/trx/trx0i_s.cc
+++ b/storage/innobase/trx/trx0i_s.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 2007, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 2007, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -131,25 +131,25 @@ noop because it will be empty. */
/** Memory for each table in the intermediate buffer is allocated in
separate chunks. These chunks are considered to be concatenated to
represent one flat array of rows. */
-typedef struct i_s_mem_chunk_struct {
+struct i_s_mem_chunk_t {
ulint offset; /*!< offset, in number of rows */
ulint rows_allocd; /*!< the size of this chunk, in number
of rows */
void* base; /*!< start of the chunk */
-} i_s_mem_chunk_t;
+};
/** This represents one table's cache. */
-typedef struct i_s_table_cache_struct {
+struct i_s_table_cache_t {
ulint rows_used; /*!< number of used rows */
ulint rows_allocd; /*!< number of allocated rows */
ulint row_size; /*!< size of a single row */
i_s_mem_chunk_t chunks[MEM_CHUNKS_IN_TABLE_CACHE]; /*!< array of
memory chunks that stores the
rows */
-} i_s_table_cache_t;
+};
/** This structure describes the intermediate buffer */
-struct trx_i_s_cache_struct {
+struct trx_i_s_cache_t {
rw_lock_t rw_lock; /*!< read-write lock protecting
the rest of this structure */
ullint last_read; /*!< last time the cache was read;
@@ -501,8 +501,7 @@ fill_trx_row(
goto thd_done;
}
- row->trx_mysql_thread_id = thd_get_thread_id(
- static_cast<const THD*>(trx->mysql_thd));
+ row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd);
stmt = innobase_get_stmt(trx->mysql_thd, &stmt_len);
@@ -1290,7 +1289,10 @@ fetch_data_into_cache_low(
for (trx = UT_LIST_GET_FIRST(*trx_list);
trx != NULL;
- trx = UT_LIST_GET_NEXT(trx_list, trx)) {
+ trx =
+ (trx_list == &trx_sys->mysql_trx_list
+ ? UT_LIST_GET_NEXT(mysql_trx_list, trx)
+ : UT_LIST_GET_NEXT(trx_list, trx))) {
i_s_trx_row_t* trx_row;
i_s_locks_row_t* requested_lock_row;
diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc
index d050e7461e7..f6d8dfc6b40 100644
--- a/storage/innobase/trx/trx0purge.cc
+++ b/storage/innobase/trx/trx0purge.cc
@@ -73,20 +73,6 @@ UNIV_INTERN mysql_pfs_key_t purge_sys_bh_mutex_key;
UNIV_INTERN my_bool srv_purge_view_update_only_debug;
#endif /* UNIV_DEBUG */
-/********************************************************************//**
-Fetches the next undo log record from the history list to purge. It must be
-released with the corresponding release function.
-@return copy of an undo log record or pointer to trx_purge_dummy_rec,
-if the whole undo log can skipped in purge; NULL if none left */
-static
-trx_undo_rec_t*
-trx_purge_fetch_next_rec(
-/*=====================*/
- roll_ptr_t* roll_ptr, /*!< out: roll pointer to undo record */
- ulint* n_pages_handled,/*!< in/out: number of UNDO log pages
- handled */
- mem_heap_t* heap); /*!< in: memory heap where copied */
-
/****************************************************************//**
Builds a purge 'query' graph. The actual purge is performed by executing
this query graph.
@@ -133,7 +119,7 @@ trx_purge_sys_create(
purge_sys = static_cast<trx_purge_t*>(mem_zalloc(sizeof(*purge_sys)));
purge_sys->state = PURGE_STATE_INIT;
- purge_sys->event = os_event_create("purge");
+ purge_sys->event = os_event_create();
/* Take ownership of ib_bh, we are responsible for freeing it. */
purge_sys->ib_bh = ib_bh;
@@ -543,7 +529,6 @@ trx_purge_truncate_history(
}
}
-
/***********************************************************************//**
Updates the last not yet purged history log info in rseg when we have purged
a whole undo log. Advances also purge_sys->purge_trx_no past the purged log. */
@@ -707,7 +692,7 @@ trx_purge_get_rseg_with_min_trx_id(
/* We assume in purge of externally stored fields that space id is
in the range of UNDO tablespace space ids */
- ut_a(purge_sys->rseg->space <= srv_undo_tablespaces);
+ ut_a(purge_sys->rseg->space <= srv_undo_tablespaces_open);
zip_size = purge_sys->rseg->zip_size;
@@ -928,7 +913,7 @@ Fetches the next undo log record from the history list to purge. It must be
released with the corresponding release function.
@return copy of an undo log record or pointer to trx_purge_dummy_rec,
if the whole undo log can skipped in purge; NULL if none left */
-static
+static __attribute__((warn_unused_result, nonnull))
trx_undo_rec_t*
trx_purge_fetch_next_rec(
/*=====================*/
@@ -1270,6 +1255,14 @@ run_synchronously:
ut_a(purge_sys->n_submitted == purge_sys->n_completed);
+#ifdef UNIV_DEBUG
+ if (purge_sys->limit.trx_no == 0) {
+ purge_sys->done = purge_sys->iter;
+ } else {
+ purge_sys->done = purge_sys->limit;
+ }
+#endif /* UNIV_DEBUG */
+
if (truncate) {
trx_purge_truncate();
}
@@ -1315,14 +1308,14 @@ trx_purge_stop(void)
ut_a(purge_sys->state != PURGE_STATE_INIT);
ut_a(purge_sys->state != PURGE_STATE_EXIT);
+ ut_a(purge_sys->state != PURGE_STATE_DISABLED);
++purge_sys->n_stop;
state = purge_sys->state;
if (state == PURGE_STATE_RUN) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Stopping purge.\n");
+ ib_logf(IB_LOG_LEVEL_INFO, "Stopping purge");
/* We need to wakeup the purge thread in case it is suspended,
so that it can acknowledge the state change. */
@@ -1339,6 +1332,28 @@ trx_purge_stop(void)
/* Wait for purge coordinator to signal that it
is suspended. */
os_event_wait_low(purge_sys->event, sig_count);
+ } else {
+ bool once = true;
+
+ rw_lock_x_lock(&purge_sys->latch);
+
+ /* Wait for purge to signal that it has actually stopped. */
+ while (purge_sys->running) {
+
+ if (once) {
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Waiting for purge to stop");
+ once = false;
+ }
+
+ rw_lock_x_unlock(&purge_sys->latch);
+
+ os_thread_sleep(10000);
+
+ rw_lock_x_lock(&purge_sys->latch);
+ }
+
+ rw_lock_x_unlock(&purge_sys->latch);
}
MONITOR_INC_VALUE(MONITOR_PURGE_STOP_COUNT, 1);
@@ -1353,8 +1368,16 @@ trx_purge_run(void)
{
rw_lock_x_lock(&purge_sys->latch);
- ut_a(purge_sys->state != PURGE_STATE_INIT);
- ut_a(purge_sys->state != PURGE_STATE_EXIT);
+ switch(purge_sys->state) {
+ case PURGE_STATE_INIT:
+ case PURGE_STATE_EXIT:
+ case PURGE_STATE_DISABLED:
+ ut_error;
+
+ case PURGE_STATE_RUN:
+ case PURGE_STATE_STOP:
+ break;
+ }
if (purge_sys->n_stop > 0) {
@@ -1364,8 +1387,7 @@ trx_purge_run(void)
if (purge_sys->n_stop == 0) {
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: Resuming purge.\n");
+ ib_logf(IB_LOG_LEVEL_INFO, "Resuming purge");
purge_sys->state = PURGE_STATE_RUN;
}
diff --git a/storage/innobase/trx/trx0rec.cc b/storage/innobase/trx/trx0rec.cc
index 203139f23fd..a698b37c2a6 100644
--- a/storage/innobase/trx/trx0rec.cc
+++ b/storage/innobase/trx/trx0rec.cc
@@ -287,7 +287,7 @@ trx_undo_rec_get_pars(
TRX_UNDO_INSERT_REC, ... */
ulint* cmpl_info, /*!< out: compiler info, relevant only
for update type records */
- ibool* updated_extern, /*!< out: TRUE if we updated an
+ bool* updated_extern, /*!< out: true if we updated an
externally stored fild */
undo_no_t* undo_no, /*!< out: undo log record number */
table_id_t* table_id) /*!< out: table id */
@@ -300,12 +300,8 @@ trx_undo_rec_get_pars(
type_cmpl = mach_read_from_1(ptr);
ptr++;
- if (type_cmpl & TRX_UNDO_UPD_EXTERN) {
- *updated_extern = TRUE;
- type_cmpl -= TRX_UNDO_UPD_EXTERN;
- } else {
- *updated_extern = FALSE;
- }
+ *updated_extern = !!(type_cmpl & TRX_UNDO_UPD_EXTERN);
+ type_cmpl &= ~TRX_UNDO_UPD_EXTERN;
*type = type_cmpl & (TRX_UNDO_CMPL_INFO_MULT - 1);
*cmpl_info = type_cmpl / TRX_UNDO_CMPL_INFO_MULT;
@@ -588,6 +584,7 @@ trx_undo_page_report_modify(
/* Store first some general parameters to the undo log */
if (!update) {
+ ut_ad(!rec_get_deleted_flag(rec, dict_table_is_comp(table)));
type_cmpl = TRX_UNDO_DEL_MARK_REC;
} else if (rec_get_deleted_flag(rec, dict_table_is_comp(table))) {
type_cmpl = TRX_UNDO_UPD_DEL_REC;
@@ -1040,8 +1037,9 @@ trx_undo_update_rec_get_update(
}
/*******************************************************************//**
-Builds a partial row from an update undo log record. It contains the
-columns which occur as ordering in any index of the table.
+Builds a partial row from an update undo log record, for purge.
+It contains the columns which occur as ordering in any index of the table.
+Any missing columns are indicated by col->mtype == DATA_MISSING.
@return pointer to remaining part of undo record */
UNIV_INTERN
byte*
@@ -1075,7 +1073,12 @@ trx_undo_rec_get_partial_row(
*row = dtuple_create(heap, row_len);
- dict_table_copy_types(*row, index->table);
+ /* Mark all columns in the row uninitialized, so that
+ we can distinguish missing fields from fields that are SQL NULL. */
+ for (ulint i = 0; i < row_len; i++) {
+ dfield_get_type(dtuple_get_nth_field(*row, i))
+ ->mtype = DATA_MISSING;
+ }
end_ptr = ptr + mach_read_from_2(ptr);
ptr += 2;
@@ -1097,7 +1100,9 @@ trx_undo_rec_get_partial_row(
ptr = trx_undo_rec_get_col_val(ptr, &field, &len, &orig_len);
dfield = dtuple_get_nth_field(*row, col_no);
-
+ dict_col_copy_type(
+ dict_table_get_nth_col(index->table, col_no),
+ dfield_get_type(dfield));
dfield_set_data(dfield, field, len);
if (len != UNIV_SQL_NULL
@@ -1177,7 +1182,7 @@ transaction and in consistent reads that must look to the history of this
transaction.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
trx_undo_report_row_operation(
/*==========================*/
ulint flags, /*!< in: if BTR_NO_UNDO_LOG_FLAG bit is
@@ -1196,6 +1201,7 @@ trx_undo_report_row_operation(
const rec_t* rec, /*!< in: in case of an update or delete
marking, the record in the clustered
index, otherwise NULL */
+ const ulint* offsets, /*!< in: rec_get_offsets(rec) */
roll_ptr_t* roll_ptr) /*!< out: rollback pointer to the
inserted undo log record,
0 if BTR_NO_UNDO_LOG
@@ -1207,16 +1213,14 @@ trx_undo_report_row_operation(
buf_block_t* undo_block;
trx_rseg_t* rseg;
mtr_t mtr;
- ulint err = DB_SUCCESS;
- mem_heap_t* heap = NULL;
- ulint offsets_[REC_OFFS_NORMAL_SIZE];
- ulint* offsets = offsets_;
+ dberr_t err = DB_SUCCESS;
#ifdef UNIV_DEBUG
int loop_count = 0;
#endif /* UNIV_DEBUG */
- rec_offs_init(offsets_);
+ ut_ad(!srv_read_only_mode);
ut_a(dict_index_is_clust(index));
+ ut_ad(!rec || rec_offs_validate(rec, index, offsets));
if (flags & BTR_NO_UNDO_LOG_FLAG) {
@@ -1230,6 +1234,17 @@ trx_undo_report_row_operation(
|| (clust_entry && !update && !rec));
trx = thr_get_trx(thr);
+
+ /* This table is visible only to the session that created it. */
+ if (trx->read_only) {
+ ut_ad(!srv_read_only_mode);
+ /* MySQL should block writes to non-temporary tables. */
+ ut_a(DICT_TF2_FLAG_IS_SET(index->table, DICT_TF2_TEMPORARY));
+ if (trx->rseg == 0) {
+ trx_assign_rseg(trx);
+ }
+ }
+
rseg = trx->rseg;
mtr_start(&mtr);
@@ -1272,8 +1287,6 @@ trx_undo_report_row_operation(
}
ut_ad(err == DB_SUCCESS);
- offsets = rec_get_offsets(rec, index, offsets,
- ULINT_UNDEFINED, &heap);
}
page_no = undo->last_page_no;
@@ -1352,8 +1365,7 @@ trx_undo_report_row_operation(
*roll_ptr = trx_undo_build_roll_ptr(
op_type == TRX_UNDO_INSERT_OP,
rseg->id, page_no, offset);
- err = DB_SUCCESS;
- goto func_exit;
+ return(DB_SUCCESS);
}
ut_ad(page_no == undo->last_page_no);
@@ -1380,10 +1392,6 @@ trx_undo_report_row_operation(
err_exit:
mutex_exit(&trx->undo_mutex);
mtr_commit(&mtr);
-func_exit:
- if (UNIV_LIKELY_NULL(heap)) {
- mem_heap_free(heap);
- }
return(err);
}
@@ -1428,39 +1436,34 @@ trx_undo_get_undo_rec_low(
/******************************************************************//**
Copies an undo record to heap.
-NOTE: the caller must have latches on the clustered index page and
-purge_view.
+NOTE: the caller must have latches on the clustered index page.
-@return DB_SUCCESS, or DB_MISSING_HISTORY if the undo log has been
-truncated and we cannot fetch the old version */
-static
-ulint
+@retval true if the undo log has been
+truncated and we cannot fetch the old version
+@retval false if the undo log record is available */
+static __attribute__((nonnull, warn_unused_result))
+bool
trx_undo_get_undo_rec(
/*==================*/
roll_ptr_t roll_ptr, /*!< in: roll pointer to record */
trx_id_t trx_id, /*!< in: id of the trx that generated
the roll pointer: it points to an
undo log of this transaction */
- trx_undo_rec_t** undo_rec, /*!< out, own: copy of the record */
+ trx_undo_rec_t**undo_rec, /*!< out, own: copy of the record */
mem_heap_t* heap) /*!< in: memory heap where copied */
{
- ibool missing_history;
+ bool missing_history;
rw_lock_s_lock(&purge_sys->latch);
missing_history = read_view_sees_trx_id(purge_sys->view, trx_id);
- rw_lock_s_unlock(&purge_sys->latch);
- if (UNIV_UNLIKELY(missing_history)) {
-
- /* It may be that the necessary undo log has already been
- deleted */
-
- return(DB_MISSING_HISTORY);
+ if (!missing_history) {
+ *undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
}
- *undo_rec = trx_undo_get_undo_rec_low(roll_ptr, heap);
+ rw_lock_s_unlock(&purge_sys->latch);
- return(DB_SUCCESS);
+ return(missing_history);
}
#ifdef UNIV_DEBUG
@@ -1471,13 +1474,13 @@ trx_undo_get_undo_rec(
/*******************************************************************//**
Build a previous version of a clustered index record. The caller must
-hold a latch on the index page of the clustered index record, to
-guarantee that the stack of versions is locked all the way down to the
-purge_sys->view.
-@return DB_SUCCESS, or DB_MISSING_HISTORY if the previous version is
-earlier than purge_view, which means that it may have been removed */
+hold a latch on the index page of the clustered index record.
+@retval true if previous version was built, or if it was an insert
+or the table has been rebuilt
+@retval false if the previous version is earlier than purge_view,
+which means that it may have been removed */
UNIV_INTERN
-ulint
+bool
trx_undo_prev_version_build(
/*========================*/
const rec_t* index_rec ATTRIB_USED_ONLY_IN_DEBUG,
@@ -1488,7 +1491,7 @@ trx_undo_prev_version_build(
index_rec page and purge_view */
const rec_t* rec, /*!< in: version of a clustered index record */
dict_index_t* index, /*!< in: clustered index */
- ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
+ ulint* offsets,/*!< in/out: rec_get_offsets(rec, index) */
mem_heap_t* heap, /*!< in: memory heap from which the memory
needed is allocated */
rec_t** old_vers)/*!< out, own: previous version, or NULL if
@@ -1509,9 +1512,8 @@ trx_undo_prev_version_build(
byte* ptr;
ulint info_bits;
ulint cmpl_info;
- ibool dummy_extern;
+ bool dummy_extern;
byte* buf;
- ulint err;
#ifdef UNIV_SYNC_DEBUG
ut_ad(!rw_lock_own(&purge_sys->latch, RW_LOCK_SHARED));
#endif /* UNIV_SYNC_DEBUG */
@@ -1526,28 +1528,28 @@ trx_undo_prev_version_build(
*old_vers = NULL;
if (trx_undo_roll_ptr_is_insert(roll_ptr)) {
-
/* The record rec is the first inserted version */
-
- return(DB_SUCCESS);
+ return(true);
}
rec_trx_id = row_get_rec_trx_id(rec, index, offsets);
- err = trx_undo_get_undo_rec(roll_ptr, rec_trx_id, &undo_rec, heap);
-
- if (UNIV_UNLIKELY(err != DB_SUCCESS)) {
- /* The undo record may already have been purged.
- This should never happen for user transactions, but
- it can happen in purge. */
- ut_ad(err == DB_MISSING_HISTORY);
-
- return(err);
+ if (trx_undo_get_undo_rec(roll_ptr, rec_trx_id, &undo_rec, heap)) {
+ /* The undo record may already have been purged,
+ during purge or semi-consistent read. */
+ return(false);
}
ptr = trx_undo_rec_get_pars(undo_rec, &type, &cmpl_info,
&dummy_extern, &undo_no, &table_id);
+ if (table_id != index->table->id) {
+ /* The table should have been rebuilt, but purge has
+ not yet removed the undo log records for the
+ now-dropped old table (table_id). */
+ return(true);
+ }
+
ptr = trx_undo_update_rec_get_sys_cols(ptr, &trx_id, &roll_ptr,
&info_bits);
@@ -1578,7 +1580,6 @@ trx_undo_prev_version_build(
ptr = trx_undo_update_rec_get_update(ptr, index, type, trx_id,
roll_ptr, info_bits,
NULL, heap, &update);
- ut_a(table_id == index->table->id);
ut_a(ptr);
# if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
@@ -1600,11 +1601,24 @@ trx_undo_prev_version_build(
delete-marked record by trx_id, no transactions need to access
the BLOB. */
+ /* the row_upd_changes_disowned_external(update) call could be
+ omitted, but the synchronization on purge_sys->latch is likely
+ more expensive. */
+
if ((update->info_bits & REC_INFO_DELETED_FLAG)
- && read_view_sees_trx_id(purge_sys->view, trx_id)) {
- /* treat as a fresh insert, not to
- cause assertion error at the caller. */
- return(DB_SUCCESS);
+ && row_upd_changes_disowned_external(update)) {
+ bool missing_extern;
+
+ rw_lock_s_lock(&purge_sys->latch);
+ missing_extern = read_view_sees_trx_id(purge_sys->view,
+ trx_id);
+ rw_lock_s_unlock(&purge_sys->latch);
+
+ if (missing_extern) {
+ /* treat as a fresh insert, not to
+ cause assertion error at the caller. */
+ return(true);
+ }
}
/* We have to set the appropriate extern storage bits in the
@@ -1613,8 +1627,8 @@ trx_undo_prev_version_build(
those fields that update updates to become externally stored
fields. Store the info: */
- entry = row_rec_to_index_entry(ROW_COPY_DATA, rec, index,
- offsets, &n_ext, heap);
+ entry = row_rec_to_index_entry(
+ rec, index, offsets, &n_ext, heap);
n_ext += btr_push_update_extern_fields(entry, update, heap);
/* The page containing the clustered index record
corresponding to entry is latched in mtr. Thus the
@@ -1637,6 +1651,6 @@ trx_undo_prev_version_build(
row_upd_rec_in_place(*old_vers, index, offsets, update, NULL);
}
- return(DB_SUCCESS);
+ return(true);
}
#endif /* !UNIV_HOTBACKUP */
diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc
index 042b5b87da7..d07e40c506d 100644
--- a/storage/innobase/trx/trx0roll.cc
+++ b/storage/innobase/trx/trx0roll.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -133,7 +133,7 @@ trx_rollback_to_savepoint_low(
Rollback a transaction to a given savepoint or do a complete rollback.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
trx_rollback_to_savepoint(
/*======================*/
trx_t* trx, /*!< in: transaction handle */
@@ -157,14 +157,14 @@ trx_rollback_to_savepoint(
srv_active_wake_master_thread();
- return((int) trx->error_state);
+ return(trx->error_state);
}
/*******************************************************************//**
Rollback a transaction used in MySQL.
@return error code or DB_SUCCESS */
static
-enum db_err
+dberr_t
trx_rollback_for_mysql_low(
/*=======================*/
trx_t* trx) /*!< in/out: transaction */
@@ -193,7 +193,7 @@ trx_rollback_for_mysql_low(
Rollback a transaction used in MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
trx_rollback_for_mysql(
/*===================*/
trx_t* trx) /*!< in/out: transaction */
@@ -214,7 +214,7 @@ trx_rollback_for_mysql(
return(trx_rollback_for_mysql_low(trx));
case TRX_STATE_PREPARED:
- assert_trx_in_rw_list(trx);
+ ut_ad(!trx_is_autocommit_non_locking(trx));
return(trx_rollback_for_mysql_low(trx));
case TRX_STATE_COMMITTED_IN_MEMORY:
@@ -223,19 +223,19 @@ trx_rollback_for_mysql(
}
ut_error;
- return((int) DB_CORRUPTION);
+ return(DB_CORRUPTION);
}
/*******************************************************************//**
Rollback the latest SQL statement for MySQL.
@return error code or DB_SUCCESS */
UNIV_INTERN
-int
+dberr_t
trx_rollback_last_sql_stat_for_mysql(
/*=================================*/
trx_t* trx) /*!< in/out: transaction */
{
- int err;
+ dberr_t err;
/* We are reading trx->state without holding trx_sys->mutex
here, because the statement rollback should be invoked for a
@@ -344,8 +344,8 @@ the row, these locks are naturally released in the rollback. Savepoints which
were set after this savepoint are deleted.
@return if no savepoint of the name found then DB_NO_SAVEPOINT,
otherwise DB_SUCCESS */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
trx_rollback_to_savepoint_for_mysql_low(
/*====================================*/
trx_t* trx, /*!< in/out: transaction */
@@ -358,7 +358,7 @@ trx_rollback_to_savepoint_for_mysql_low(
binlog entries of the queries
executed after the savepoint */
{
- ulint err;
+ dberr_t err;
ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE));
ut_ad(trx->in_mysql_trx_list);
@@ -395,7 +395,7 @@ were set after this savepoint are deleted.
@return if no savepoint of the name found then DB_NO_SAVEPOINT,
otherwise DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
trx_rollback_to_savepoint_for_mysql(
/*================================*/
trx_t* trx, /*!< in: transaction handle */
@@ -449,7 +449,7 @@ savepoint and replaces it with a new. Savepoints are deleted in a transaction
commit or rollback.
@return always DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
trx_savepoint_for_mysql(
/*====================*/
trx_t* trx, /*!< in: transaction handle */
@@ -495,7 +495,7 @@ savepoint are left as is.
@return if no savepoint of the name found then DB_NO_SAVEPOINT,
otherwise DB_SUCCESS */
UNIV_INTERN
-ulint
+dberr_t
trx_release_savepoint_for_mysql(
/*============================*/
trx_t* trx, /*!< in: transaction handle */
@@ -623,18 +623,16 @@ trx_rollback_active(
if (trx_get_dict_operation(trx) != TRX_DICT_OP_NONE
&& trx->table_id != 0) {
- /* If the transaction was for a dictionary operation, we
- drop the relevant table, if it still exists */
+ /* If the transaction was for a dictionary operation,
+ we drop the relevant table only if it is not flagged
+ as DISCARDED. If it still exists. */
- fprintf(stderr,
- "InnoDB: Dropping table with id "UINT64PF
- " in recovery if it exists\n",
- (ib_uint64_t) trx->table_id);
+ table = dict_table_open_on_id(
+ trx->table_id, dictionary_locked, FALSE);
- table = dict_table_open_on_id(trx->table_id, dictionary_locked);
+ if (table && !dict_table_is_discarded(table)) {
- if (table) {
- ulint err;
+ dberr_t err;
/* Ensure that the table doesn't get evicted from the
cache, keeps things simple for drop. */
@@ -643,16 +641,17 @@ trx_rollback_active(
dict_table_move_from_lru_to_non_lru(table);
}
- dict_table_close(table, dictionary_locked);
+ dict_table_close(table, dictionary_locked, FALSE);
- fputs("InnoDB: Table found: dropping table ", stderr);
- ut_print_name(stderr, trx, TRUE, table->name);
- fputs(" in recovery\n", stderr);
+ ib_logf(IB_LOG_LEVEL_WARN,
+ "Dropping table '%s', with id " UINT64PF " "
+ "in recovery",
+ table->name, trx->table_id);
err = row_drop_table_for_mysql(table->name, trx, TRUE);
trx_commit_for_mysql(trx);
- ut_a(err == (int) DB_SUCCESS);
+ ut_a(err == DB_SUCCESS);
}
}
@@ -660,9 +659,8 @@ trx_rollback_active(
row_mysql_unlock_data_dictionary(trx);
}
- fprintf(stderr, "\nInnoDB: Rolling back of trx id " TRX_ID_FMT
- " completed\n",
- trx->id);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Rollback of trx with id " TRX_ID_FMT " completed", trx->id);
mem_heap_free(heap);
@@ -808,6 +806,8 @@ DECLARE_THREAD(trx_rollback_or_clean_all_recovered)(
/*!< in: a dummy parameter required by
os_thread_create */
{
+ ut_ad(!srv_read_only_mode);
+
#ifdef UNIV_PFS_THREAD
pfs_register_thread(trx_rollback_clean_thread_key);
#endif /* UNIV_PFS_THREAD */
diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc
index 97fd1f36943..7c2bbc90ad9 100644
--- a/storage/innobase/trx/trx0sys.cc
+++ b/storage/innobase/trx/trx0sys.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -43,20 +43,16 @@ Created 3/26/1996 Heikki Tuuri
#include "log0recv.h"
#include "os0file.h"
#include "read0read.h"
-#include "buf0dblwr.h"
/** The file format tag structure with id and name. */
-struct file_format_struct {
+struct file_format_t {
ulint id; /*!< id of the file format */
const char* name; /*!< text representation of the
file format */
- mutex_t mutex; /*!< covers changes to the above
+ ib_mutex_t mutex; /*!< covers changes to the above
fields */
};
-/** The file format tag */
-typedef struct file_format_struct file_format_t;
-
/** The transaction system */
UNIV_INTERN trx_sys_t* trx_sys = NULL;
@@ -122,12 +118,12 @@ UNIV_INTERN mysql_pfs_key_t file_format_max_mutex_key;
UNIV_INTERN mysql_pfs_key_t trx_sys_mutex_key;
#endif /* UNIV_PFS_RWLOCK */
+#ifndef UNIV_HOTBACKUP
#ifdef UNIV_DEBUG
/* Flag to control TRX_RSEG_N_SLOTS behavior debugging. */
uint trx_rseg_n_slots_debug = 0;
#endif
-#ifndef UNIV_HOTBACKUP
/** This is used to track the maximum file format id known to InnoDB. It's
updated via SET GLOBAL innodb_file_format_max = 'x' or when we open
or create a table. */
@@ -180,13 +176,17 @@ trx_sys_flush_max_trx_id(void)
ut_ad(mutex_own(&trx_sys->mutex));
- mtr_start(&mtr);
+ if (!srv_read_only_mode) {
+ mtr_start(&mtr);
- sys_header = trx_sysf_get(&mtr);
+ sys_header = trx_sysf_get(&mtr);
- mlog_write_ull(sys_header + TRX_SYS_TRX_ID_STORE,
- trx_sys->max_trx_id, &mtr);
- mtr_commit(&mtr);
+ mlog_write_ull(
+ sys_header + TRX_SYS_TRX_ID_STORE,
+ trx_sys->max_trx_id, &mtr);
+
+ mtr_commit(&mtr);
+ }
}
/*****************************************************************//**
@@ -524,6 +524,8 @@ trx_sys_init_at_db_start(void)
+ TRX_SYS_TRX_ID_STORE),
TRX_SYS_TRX_ID_WRITE_MARGIN);
+ ut_d(trx_sys->rw_max_trx_id = trx_sys->max_trx_id);
+
UT_LIST_INIT(trx_sys->mysql_trx_list);
trx_dummy_sess = sess_open();
@@ -701,7 +703,7 @@ Check for the max file format tag stored on disk. Note: If max_format_id
is == UNIV_FORMAT_MAX + 1 then we only print a warning.
@return DB_SUCCESS or error code */
UNIV_INTERN
-ulint
+dberr_t
trx_sys_file_format_max_check(
/*==========================*/
ulint max_format_id) /*!< in: max format id to check */
@@ -718,21 +720,18 @@ trx_sys_file_format_max_check(
format_id = UNIV_FORMAT_MIN;
}
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: highest supported file format is %s.\n",
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "Highest supported file format is %s.",
trx_sys_file_format_id_to_name(UNIV_FORMAT_MAX));
if (format_id > UNIV_FORMAT_MAX) {
ut_a(format_id < FILE_FORMAT_NAME_N);
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: %s: the system tablespace is in a file "
- "format that this version doesn't support - %s\n",
- ((max_format_id <= UNIV_FORMAT_MAX)
- ? "Error" : "Warning"),
+ ib_logf(max_format_id <= UNIV_FORMAT_MAX
+ ? IB_LOG_LEVEL_ERROR : IB_LOG_LEVEL_WARN,
+ "The system tablespace is in a file "
+ "format that this version doesn't support - %s.",
trx_sys_file_format_id_to_name(format_id));
if (max_format_id <= UNIV_FORMAT_MAX) {
@@ -883,7 +882,7 @@ trx_sys_create_rsegs(
ut_a(n_spaces < TRX_SYS_N_RSEGS);
ut_a(n_rsegs <= TRX_SYS_N_RSEGS);
- if (srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO) {
+ if (srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO || srv_read_only_mode) {
return(ULINT_UNDEFINED);
}
@@ -926,9 +925,8 @@ trx_sys_create_rsegs(
}
}
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: %lu rollback segment(s) are active.\n",
- n_used);
+ ib_logf(IB_LOG_LEVEL_INFO,
+ "%lu rollback segment(s) are active.", n_used);
return(n_used);
}
@@ -1000,7 +998,7 @@ trx_sys_read_file_format_id(
);
if (!success) {
/* The following call prints an error message */
- os_file_get_last_error(TRUE);
+ os_file_get_last_error(true);
ut_print_timestamp(stderr);
@@ -1019,7 +1017,7 @@ trx_sys_read_file_format_id(
if (!success) {
/* The following call prints an error message */
- os_file_get_last_error(TRUE);
+ os_file_get_last_error(true);
ut_print_timestamp(stderr);
@@ -1080,7 +1078,7 @@ trx_sys_read_pertable_file_format_id(
);
if (!success) {
/* The following call prints an error message */
- os_file_get_last_error(TRUE);
+ os_file_get_last_error(true);
ut_print_timestamp(stderr);
@@ -1099,7 +1097,7 @@ trx_sys_read_pertable_file_format_id(
if (!success) {
/* The following call prints an error message */
- os_file_get_last_error(TRUE);
+ os_file_get_last_error(true);
ut_print_timestamp(stderr);
@@ -1120,11 +1118,11 @@ trx_sys_read_pertable_file_format_id(
if (flags == 0) {
/* file format is Antelope */
*format_id = 0;
- return (TRUE);
+ return(TRUE);
} else if (flags & 1) {
/* tablespace flags are ok */
*format_id = (flags / 32) % 128;
- return (TRUE);
+ return(TRUE);
} else {
/* bad tablespace flags */
return(FALSE);
@@ -1143,7 +1141,7 @@ trx_sys_file_format_id_to_name(
{
if (!(id < FILE_FORMAT_NAME_N)) {
/* unknown id */
- return ("Unknown");
+ return("Unknown");
}
return(file_format_name_map[id]);
@@ -1252,7 +1250,7 @@ trx_sys_any_active_transactions(void)
mutex_enter(&trx_sys->mutex);
total_trx = UT_LIST_GET_LEN(trx_sys->rw_trx_list)
- + trx_sys->n_mysql_trx;
+ + UT_LIST_GET_LEN(trx_sys->mysql_trx_list);
ut_a(total_trx >= trx_sys->n_prepared_trx);
total_trx -= trx_sys->n_prepared_trx;
diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc
index 80ebe0df2b3..449b970842a 100644
--- a/storage/innobase/trx/trx0trx.cc
+++ b/storage/innobase/trx/trx0trx.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1996, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1996, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -105,6 +105,7 @@ trx_create(void)
trx->state = TRX_STATE_NOT_STARTED;
+ trx->active_commit_ordered = 0;
trx->isolation_level = TRX_ISO_REPEATABLE_READ;
trx->no = IB_ULONGLONG_MAX;
@@ -146,10 +147,6 @@ trx_create(void)
trx->lock.table_locks = ib_vector_create(
heap_alloc, sizeof(void**), 32);
- /* For non-locking selects we avoid calling ut_time() too frequently.
- Set the time here for new transactions. */
- trx->start_time = ut_time();
-
return(trx);
}
@@ -184,8 +181,6 @@ trx_allocate_for_mysql(void)
mutex_enter(&trx_sys->mutex);
- trx_sys->n_mysql_trx++;
-
ut_d(trx->in_mysql_trx_list = TRUE);
UT_LIST_ADD_FIRST(mysql_trx_list, trx_sys->mysql_trx_list, trx);
@@ -205,6 +200,7 @@ trx_free(
ut_a(trx->magic_n == TRX_MAGIC_N);
ut_ad(!trx->in_ro_trx_list);
ut_ad(!trx->in_rw_trx_list);
+ ut_ad(!trx->in_mysql_trx_list);
mutex_free(&trx->undo_mutex);
@@ -233,8 +229,10 @@ trx_free(
/* We allocated a dedicated heap for the vector. */
ib_vector_free(trx->autoinc_locks);
- /* We allocated a dedicated heap for the vector. */
- ib_vector_free(trx->lock.table_locks);
+ if (trx->lock.table_locks != NULL) {
+ /* We allocated a dedicated heap for the vector. */
+ ib_vector_free(trx->lock.table_locks);
+ }
mutex_free(&trx->mutex);
@@ -249,11 +247,12 @@ trx_free_for_background(
/*====================*/
trx_t* trx) /*!< in, own: trx object */
{
- if (UNIV_UNLIKELY(trx->declared_to_be_inside_innodb)) {
- ut_print_timestamp(stderr);
- fputs(" InnoDB: Error: Freeing a trx which is declared"
- " to be processing\n"
- "InnoDB: inside InnoDB.\n", stderr);
+ if (trx->declared_to_be_inside_innodb) {
+
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "Freeing a trx (%p, " TRX_ID_FMT ") which is declared "
+ "to be processing inside InnoDB", trx, trx->id);
+
trx_print(stderr, trx, 600);
putc('\n', stderr);
@@ -262,16 +261,16 @@ trx_free_for_background(
srv_conc_force_exit_innodb(trx);
}
- if (UNIV_UNLIKELY(trx->n_mysql_tables_in_use != 0
- || trx->mysql_n_tables_locked != 0)) {
+ if (trx->n_mysql_tables_in_use != 0
+ || trx->mysql_n_tables_locked != 0) {
- ut_print_timestamp(stderr);
- fprintf(stderr,
- " InnoDB: Error: MySQL is freeing a thd\n"
- "InnoDB: though trx->n_mysql_tables_in_use is %lu\n"
- "InnoDB: and trx->mysql_n_tables_locked is %lu.\n",
+ ib_logf(IB_LOG_LEVEL_ERROR,
+ "MySQL is freeing a thd though "
+ "trx->n_mysql_tables_in_use is %lu and "
+ "trx->mysql_n_tables_locked is %lu.",
(ulong) trx->n_mysql_tables_in_use,
(ulong) trx->mysql_n_tables_locked);
+
trx_print(stderr, trx, 600);
ut_print_buf(stderr, trx, sizeof(trx_t));
putc('\n', stderr);
@@ -326,8 +325,6 @@ trx_free_for_mysql(
ut_ad(trx_sys_validate_trx_list());
- trx_sys->n_mysql_trx--;
-
mutex_exit(&trx_sys->mutex);
trx_free_for_background(trx);
@@ -348,6 +345,9 @@ trx_list_rw_insert_ordered(
ut_ad(!trx->read_only);
+ ut_d(trx->start_file = __FILE__);
+ ut_d(trx->start_line = __LINE__);
+
ut_a(srv_is_being_started);
ut_ad(!trx->in_ro_trx_list);
ut_ad(!trx->in_rw_trx_list);
@@ -372,6 +372,7 @@ trx_list_rw_insert_ordered(
if (trx2 == NULL) {
UT_LIST_ADD_FIRST(trx_list, trx_sys->rw_trx_list, trx);
+ ut_d(trx_sys->rw_max_trx_id = trx->id);
} else {
UT_LIST_INSERT_AFTER(
trx_list, trx_sys->rw_trx_list, trx2, trx);
@@ -423,6 +424,7 @@ trx_resurrect_insert(
trx->state = TRX_STATE_PREPARED;
trx_sys->n_prepared_trx++;
+ trx_sys->n_prepared_recovered_trx++;
} else {
fprintf(stderr,
"InnoDB: Since innodb_force_recovery"
@@ -483,6 +485,7 @@ trx_resurrect_update_in_prepared_state(
if (srv_force_recovery == 0) {
if (trx_state_eq(trx, TRX_STATE_NOT_STARTED)) {
trx_sys->n_prepared_trx++;
+ trx_sys->n_prepared_recovered_trx++;
} else {
ut_ad(trx_state_eq(trx, TRX_STATE_PREPARED));
}
@@ -620,10 +623,10 @@ trx_lists_init_at_db_start(void)
/******************************************************************//**
Assigns a rollback segment to a transaction in a round-robin fashion.
@return assigned rollback segment instance */
-UNIV_INLINE
+static
trx_rseg_t*
-trx_assign_rseg(
-/*============*/
+trx_assign_rseg_low(
+/*================*/
ulong max_undo_logs, /*!< in: maximum number of UNDO logs to use */
ulint n_tablespaces) /*!< in: number of rollback tablespaces */
{
@@ -631,7 +634,7 @@ trx_assign_rseg(
trx_rseg_t* rseg;
static ulint latest_rseg = 0;
- if (srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO) {
+ if (srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO || srv_read_only_mode) {
ut_a(max_undo_logs == ULONG_UNDEFINED);
return(NULL);
}
@@ -668,6 +671,24 @@ trx_assign_rseg(
}
/****************************************************************//**
+Assign a read-only transaction a rollback-segment, if it is attempting
+to write to a TEMPORARY table. */
+UNIV_INTERN
+void
+trx_assign_rseg(
+/*============*/
+ trx_t* trx) /*!< A read-only transaction that
+ needs to be assigned a RBS. */
+{
+ ut_a(trx->rseg == 0);
+ ut_a(trx->read_only);
+ ut_a(!srv_read_only_mode);
+ ut_a(!trx_is_autocommit_non_locking(trx));
+
+ trx->rseg = trx_assign_rseg_low(srv_undo_logs, srv_undo_tablespaces);
+}
+
+/****************************************************************//**
Starts a transaction. */
static
void
@@ -675,10 +696,10 @@ trx_start_low(
/*==========*/
trx_t* trx) /*!< in: transaction */
{
- static ulint n_start_times;
-
ut_ad(trx->rseg == NULL);
+ ut_ad(trx->start_file != 0);
+ ut_ad(trx->start_line != 0);
ut_ad(!trx->is_recovered);
ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED));
ut_ad(UT_LIST_GET_LEN(trx->lock.trx_locks) == 0);
@@ -686,7 +707,9 @@ trx_start_low(
/* Check whether it is an AUTOCOMMIT SELECT */
trx->auto_commit = thd_trx_is_auto_commit(trx->mysql_thd);
- trx->read_only = thd_trx_is_read_only(trx->mysql_thd);
+ trx->read_only =
+ (!trx->ddl && thd_trx_is_read_only(trx->mysql_thd))
+ || srv_read_only_mode;
if (!trx->auto_commit) {
++trx->will_lock;
@@ -695,16 +718,10 @@ trx_start_low(
}
if (!trx->read_only) {
- trx->rseg = trx_assign_rseg(
+ trx->rseg = trx_assign_rseg_low(
srv_undo_logs, srv_undo_tablespaces);
}
- /* Avoid making an unnecessary system call, for non-locking
- auto-commit selects we reuse the start_time for every 32 starts. */
- if (!trx_is_autocommit_non_locking(trx) || !(n_start_times++ % 32)) {
- trx->start_time = ut_time();
- }
-
/* The initial value for trx->no: IB_ULONGLONG_MAX is used in
read_view_open_now: */
@@ -745,12 +762,15 @@ trx_start_low(
ut_ad(!trx_is_autocommit_non_locking(trx));
UT_LIST_ADD_FIRST(trx_list, trx_sys->rw_trx_list, trx);
ut_d(trx->in_rw_trx_list = TRUE);
+ ut_d(trx_sys->rw_max_trx_id = trx->id);
}
ut_ad(trx_sys_validate_trx_list());
mutex_exit(&trx_sys->mutex);
+ trx->start_time = ut_time();
+
MONITOR_INC(MONITOR_TRX_ACTIVE);
}
@@ -971,6 +991,52 @@ trx_finalize_for_fts(
trx->fts_trx = NULL;
}
+/**********************************************************************//**
+If required, flushes the log to disk based on the value of
+innodb_flush_log_at_trx_commit. */
+static
+void
+trx_flush_log_if_needed_low(
+/*========================*/
+ lsn_t lsn) /*!< in: lsn up to which logs are to be
+ flushed. */
+{
+ switch (srv_flush_log_at_trx_commit) {
+ case 0:
+ /* Do nothing */
+ break;
+ case 1:
+ case 3:
+ /* Write the log and optionally flush it to disk */
+ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP,
+ srv_unix_file_flush_method != SRV_UNIX_NOSYNC);
+ break;
+ case 2:
+ /* Write the log but do not flush it to disk */
+ log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE);
+
+ break;
+ default:
+ ut_error;
+ }
+}
+
+/**********************************************************************//**
+If required, flushes the log to disk based on the value of
+innodb_flush_log_at_trx_commit. */
+static __attribute__((nonnull))
+void
+trx_flush_log_if_needed(
+/*====================*/
+ lsn_t lsn, /*!< in: lsn up to which logs are to be
+ flushed. */
+ trx_t* trx) /*!< in/out: transaction */
+{
+ trx->op_info = "flushing log";
+ trx_flush_log_if_needed_low(lsn);
+ trx->op_info = "";
+}
+
/****************************************************************//**
Commits a transaction. */
UNIV_INTERN
@@ -987,7 +1053,7 @@ trx_commit(
ut_ad(!trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY));
/* undo_no is non-zero if we're doing the final commit. */
- if (trx->fts_trx && (trx->undo_no != 0)) {
+ if (trx->fts_trx && trx->undo_no != 0) {
ulint error;
ut_a(!trx_is_autocommit_non_locking(trx));
@@ -1043,6 +1109,8 @@ trx_commit(
trx->state = TRX_STATE_NOT_STARTED;
+ read_view_remove(trx->global_read_view, false);
+
MONITOR_INC(MONITOR_TRX_NL_RO_COMMIT);
} else {
lock_trx_release_locks(trx);
@@ -1057,7 +1125,6 @@ trx_commit(
assert_trx_in_list(trx);
if (trx->read_only) {
- ut_ad(trx->rseg == NULL);
UT_LIST_REMOVE(trx_list, trx_sys->ro_trx_list, trx);
ut_d(trx->in_ro_trx_list = FALSE);
MONITOR_INC(MONITOR_TRX_RO_COMMIT);
@@ -1075,13 +1142,16 @@ trx_commit(
trx->state = TRX_STATE_NOT_STARTED;
+ /* We already own the trx_sys_t::mutex, by doing it here we
+ avoid a potential context switch later. */
+ read_view_remove(trx->global_read_view, true);
+
ut_ad(trx_sys_validate_trx_list());
mutex_exit(&trx_sys->mutex);
}
if (trx->global_read_view != NULL) {
- read_view_remove(trx->global_read_view);
mem_heap_empty(trx->global_read_view_heap);
@@ -1129,26 +1199,8 @@ trx_commit(
trx->must_flush_log_later = TRUE;
} else if (srv_flush_log_at_trx_commit == 0) {
/* Do nothing */
- } else if (srv_flush_log_at_trx_commit == 1 ||
- srv_flush_log_at_trx_commit == 3) {
- if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
- /* Write the log but do not flush it to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP,
- FALSE);
- } else {
- /* Write the log to the log files AND flush
- them to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE);
- }
- } else if (srv_flush_log_at_trx_commit == 2) {
-
- /* Write the log but do not flush it to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE);
} else {
- ut_error;
+ trx_flush_log_if_needed(lsn, trx);
}
trx->commit_lsn = lsn;
@@ -1162,6 +1214,14 @@ trx_commit(
trx->undo_no = 0;
trx->last_sql_stat_start.least_undo_no = 0;
+ trx->ddl = false;
+#ifdef UNIV_DEBUG
+ ut_ad(trx->start_file != 0);
+ ut_ad(trx->start_line != 0);
+ trx->start_file = 0;
+ trx->start_line = 0;
+#endif /* UNIV_DEBUG */
+
trx->will_lock = 0;
trx->read_only = FALSE;
trx->auto_commit = FALSE;
@@ -1175,6 +1235,8 @@ trx_commit(
ut_ad(!trx->in_ro_trx_list);
ut_ad(!trx->in_rw_trx_list);
+ trx->dict_operation = TRX_DICT_OP_NONE;
+
trx->error_state = DB_SUCCESS;
/* trx->in_mysql_trx_list would hold between
@@ -1365,7 +1427,7 @@ trx_commit_step(
Does the transaction commit for MySQL.
@return DB_SUCCESS or error number */
UNIV_INTERN
-ulint
+dberr_t
trx_commit_for_mysql(
/*=================*/
trx_t* trx) /*!< in/out: transaction */
@@ -1389,6 +1451,9 @@ trx_commit_for_mysql(
records, generated by the same transaction do not. */
trx->support_xa = thd_supports_xa(trx->mysql_thd);
+ ut_d(trx->start_file = __FILE__);
+ ut_d(trx->start_line = __LINE__);
+
trx_start_low(trx);
/* fall through */
case TRX_STATE_ACTIVE:
@@ -1407,53 +1472,23 @@ trx_commit_for_mysql(
/**********************************************************************//**
If required, flushes the log to disk if we called trx_commit_for_mysql()
-with trx->flush_log_later == TRUE.
-@return 0 or error number */
+with trx->flush_log_later == TRUE. */
UNIV_INTERN
-ulint
+void
trx_commit_complete_for_mysql(
/*==========================*/
- trx_t* trx) /*!< in: trx handle */
+ trx_t* trx) /*!< in/out: transaction */
{
- lsn_t lsn = trx->commit_lsn;
-
ut_a(trx);
- trx->op_info = "flushing log";
-
- if (!trx->must_flush_log_later) {
- /* Do nothing */
- } else if (srv_flush_log_at_trx_commit == 0) {
- /* Do nothing */
- } else if (srv_flush_log_at_trx_commit == 1 && trx->active_commit_ordered) {
- /* Do nothing - we already flushed the prepare and binlog write
- to disk, so transaction is durable (will be recovered from
- binlog if necessary) */
- } else if (srv_flush_log_at_trx_commit == 1 || srv_flush_log_at_trx_commit == 3) {
- if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
- /* Write the log but do not flush it to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE);
- } else {
- /* Write the log to the log files AND flush them to
- disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE);
- }
- } else if (srv_flush_log_at_trx_commit == 2) {
-
- /* Write the log but do not flush it to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE);
- } else {
- ut_error;
+ if (!trx->must_flush_log_later
+ || (srv_flush_log_at_trx_commit == 1 && trx->active_commit_ordered)) {
+ return;
}
- trx->must_flush_log_later = FALSE;
-
- trx->op_info = "";
+ trx_flush_log_if_needed(trx->commit_lsn, trx);
- return(0);
+ trx->must_flush_log_later = FALSE;
}
/**********************************************************************//**
@@ -1500,9 +1535,9 @@ trx_print_low(
ulint max_query_len,
/*!< in: max query length to print,
or 0 to use the default max length */
- ulint n_lock_rec,
+ ulint n_rec_locks,
/*!< in: lock_number_of_rows_locked(&trx->lock) */
- ulint n_lock_struct,
+ ulint n_trx_locks,
/*!< in: length of trx->lock.trx_locks */
ulint heap_size)
/*!< in: mem_heap_get_size(trx->lock.lock_heap) */
@@ -1581,14 +1616,14 @@ state_ok:
fprintf(f, "que state %lu ", (ulong) trx->lock.que_state);
}
- if (n_lock_struct > 0 || heap_size > 400) {
+ if (n_trx_locks > 0 || heap_size > 400) {
newline = TRUE;
fprintf(f, "%lu lock struct(s), heap size %lu,"
" %lu row lock(s)",
- (ulong) n_lock_struct,
+ (ulong) n_trx_locks,
(ulong) heap_size,
- (ulong) n_lock_rec);
+ (ulong) n_rec_locks);
}
if (trx->has_search_latch) {
@@ -1644,19 +1679,19 @@ trx_print(
ulint max_query_len) /*!< in: max query length to print,
or 0 to use the default max length */
{
- ulint n_lock_rec;
- ulint n_lock_struct;
+ ulint n_rec_locks;
+ ulint n_trx_locks;
ulint heap_size;
lock_mutex_enter();
- n_lock_rec = lock_number_of_rows_locked(&trx->lock);
- n_lock_struct = UT_LIST_GET_LEN(trx->lock.trx_locks);
+ n_rec_locks = lock_number_of_rows_locked(&trx->lock);
+ n_trx_locks = UT_LIST_GET_LEN(trx->lock.trx_locks);
heap_size = mem_heap_get_size(trx->lock.lock_heap);
lock_mutex_exit();
mutex_enter(&trx_sys->mutex);
trx_print_low(f, trx, max_query_len,
- n_lock_rec, n_lock_struct, heap_size);
+ n_rec_locks, n_trx_locks, heap_size);
mutex_exit(&trx_sys->mutex);
}
@@ -1684,7 +1719,6 @@ trx_assert_started(
switch (trx->state) {
case TRX_STATE_PREPARED:
- assert_trx_in_rw_list(trx);
return(TRUE);
case TRX_STATE_ACTIVE:
@@ -1826,28 +1860,7 @@ trx_prepare(
TODO: find out if MySQL holds some mutex when calling this.
That would spoil our group prepare algorithm. */
- if (srv_flush_log_at_trx_commit == 0) {
- /* Do nothing */
- } else if (srv_flush_log_at_trx_commit == 1 || srv_flush_log_at_trx_commit == 3) {
- if (srv_unix_file_flush_method == SRV_UNIX_NOSYNC) {
- /* Write the log but do not flush it to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP,
- FALSE);
- } else {
- /* Write the log to the log files AND flush
- them to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, TRUE);
- }
- } else if (srv_flush_log_at_trx_commit == 2) {
-
- /* Write the log but do not flush it to disk */
-
- log_write_up_to(lsn, LOG_WAIT_ONE_GROUP, FALSE);
- } else {
- ut_error;
- }
+ trx_flush_log_if_needed(lsn, trx);
}
}
@@ -1859,7 +1872,7 @@ trx_prepare_for_mysql(
/*==================*/
trx_t* trx) /*!< in/out: trx handle */
{
- trx_start_if_not_started_xa(trx);
+ trx_start_if_not_started_xa_low(trx);
trx->op_info = "preparing";
@@ -1935,12 +1948,12 @@ trx_recover_for_mysql(
if (count > 0){
ut_print_timestamp(stderr);
fprintf(stderr,
- " InnoDB: %lu transactions in prepared state"
+ " InnoDB: %d transactions in prepared state"
" after recovery\n",
- (ulong) count);
+ int (count));
}
- return ((int) count);
+ return(int (count));
}
/*******************************************************************//**
@@ -2023,8 +2036,8 @@ trx_get_trx_by_xid(
Starts the transaction if it is not yet started. */
UNIV_INTERN
void
-trx_start_if_not_started_xa(
-/*========================*/
+trx_start_if_not_started_xa_low(
+/*============================*/
trx_t* trx) /*!< in: transaction */
{
switch (trx->state) {
@@ -2057,8 +2070,8 @@ trx_start_if_not_started_xa(
Starts the transaction if it is not yet started. */
UNIV_INTERN
void
-trx_start_if_not_started(
-/*=====================*/
+trx_start_if_not_started_low(
+/*=========================*/
trx_t* trx) /*!< in: transaction */
{
switch (trx->state) {
@@ -2074,3 +2087,45 @@ trx_start_if_not_started(
ut_error;
}
+
+/*************************************************************//**
+Starts the transaction for a DDL operation. */
+UNIV_INTERN
+void
+trx_start_for_ddl_low(
+/*==================*/
+ trx_t* trx, /*!< in/out: transaction */
+ trx_dict_op_t op) /*!< in: dictionary operation type */
+{
+ switch (trx->state) {
+ case TRX_STATE_NOT_STARTED:
+ /* Flag this transaction as a dictionary operation, so that
+ the data dictionary will be locked in crash recovery. */
+
+ trx_set_dict_operation(trx, op);
+
+ /* Ensure it is not flagged as an auto-commit-non-locking
+ transation. */
+ trx->will_lock = 1;
+
+ trx->ddl = true;
+
+ trx_start_low(trx);
+ return;
+
+ case TRX_STATE_ACTIVE:
+ /* We have this start if not started idiom, therefore we
+ can't add stronger checks here. */
+ trx->ddl = true;
+
+ ut_ad(trx->dict_operation != TRX_DICT_OP_NONE);
+ ut_ad(trx->will_lock > 0);
+ return;
+ case TRX_STATE_PREPARED:
+ case TRX_STATE_COMMITTED_IN_MEMORY:
+ break;
+ }
+
+ ut_error;
+}
+
diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc
index 13ad2bb3755..c4480b11366 100644
--- a/storage/innobase/trx/trx0undo.cc
+++ b/storage/innobase/trx/trx0undo.cc
@@ -413,8 +413,8 @@ trx_undo_page_init(
Creates a new undo log segment in file.
@return DB_SUCCESS if page creation OK possible error codes are:
DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
trx_undo_seg_create(
/*================*/
trx_rseg_t* rseg __attribute__((unused)),/*!< in: rollback segment */
@@ -435,7 +435,7 @@ trx_undo_seg_create(
trx_usegf_t* seg_hdr;
ulint n_reserved;
ibool success;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ut_ad(mtr && id && rseg_hdr);
ut_ad(mutex_own(&(rseg->mutex)));
@@ -1468,7 +1468,7 @@ trx_undo_mem_create(
if (undo == NULL) {
- return NULL;
+ return(NULL);
}
undo->id = id;
@@ -1551,8 +1551,8 @@ Creates a new undo log.
@return DB_SUCCESS if successful in creating the new undo lob object,
possible error codes are: DB_TOO_MANY_CONCURRENT_TRXS
DB_OUT_OF_FILE_SPACE DB_OUT_OF_MEMORY */
-static
-ulint
+static __attribute__((nonnull, warn_unused_result))
+dberr_t
trx_undo_create(
/*============*/
trx_t* trx, /*!< in: transaction */
@@ -1571,7 +1571,7 @@ trx_undo_create(
ulint offset;
ulint id;
page_t* undo_page;
- ulint err;
+ dberr_t err;
ut_ad(mutex_own(&(rseg->mutex)));
@@ -1746,7 +1746,7 @@ undo log reused.
are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE DB_READ_ONLY
DB_OUT_OF_MEMORY */
UNIV_INTERN
-ulint
+dberr_t
trx_undo_assign_undo(
/*=================*/
trx_t* trx, /*!< in: transaction */
@@ -1755,7 +1755,7 @@ trx_undo_assign_undo(
trx_rseg_t* rseg;
trx_undo_t* undo;
mtr_t mtr;
- ulint err = DB_SUCCESS;
+ dberr_t err = DB_SUCCESS;
ut_ad(trx);
@@ -1771,11 +1771,17 @@ trx_undo_assign_undo(
mutex_enter(&rseg->mutex);
+ DBUG_EXECUTE_IF(
+ "ib_create_table_fail_too_many_trx",
+ err = DB_TOO_MANY_CONCURRENT_TRXS;
+ goto func_exit;
+ );
+
undo = trx_undo_reuse_cached(trx, rseg, type, trx->id, &trx->xid,
&mtr);
if (undo == NULL) {
err = trx_undo_create(trx, rseg, type, trx->id, &trx->xid,
- &undo, &mtr);
+ &undo, &mtr);
if (err != DB_SUCCESS) {
goto func_exit;
@@ -1800,7 +1806,7 @@ func_exit:
mutex_exit(&(rseg->mutex));
mtr_commit(&mtr);
- return err;
+ return(err);
}
/******************************************************************//**
diff --git a/storage/innobase/ut/ut0crc32.cc b/storage/innobase/ut/ut0crc32.cc
index 538879dd9e2..695035d6ae8 100644
--- a/storage/innobase/ut/ut0crc32.cc
+++ b/storage/innobase/ut/ut0crc32.cc
@@ -79,11 +79,11 @@ mysys/my_perf.c, contributed by Facebook under the following license.
* factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3.
*/
-#include <string.h> /* memcmp() */
-
#include "univ.i"
#include "ut0crc32.h"
+#include <string.h>
+
ib_ut_crc32_t ut_crc32;
/* Precalculated table used to generate the CRC32 if the CPU does not
@@ -92,7 +92,7 @@ static ib_uint32_t ut_crc32_slice8_table[8][256];
static ibool ut_crc32_slice8_table_initialized = FALSE;
/* Flag that tells whether the CPU supports CRC32 or not */
-static ibool ut_crc32_sse2_enabled = FALSE;
+UNIV_INTERN bool ut_crc32_sse2_enabled = false;
/********************************************************************//**
Initializes the table that is used to generate the CRC32 if the CPU does
@@ -315,8 +315,4 @@ ut_crc32_init()
ut_crc32_slice8_table_init();
ut_crc32 = ut_crc32_slice8;
}
-
- ut_print_timestamp(stderr);
- fprintf(stderr, " InnoDB: CPU %s crc32 instructions\n",
- ut_crc32_sse2_enabled ? "supports" : "does not support");
}
diff --git a/storage/innobase/ut/ut0mem.cc b/storage/innobase/ut/ut0mem.cc
index 42ad180d373..2bb5d9ce332 100644
--- a/storage/innobase/ut/ut0mem.cc
+++ b/storage/innobase/ut/ut0mem.cc
@@ -35,9 +35,6 @@ Created 5/11/1994 Heikki Tuuri
#include <stdlib.h>
-/** This struct is placed first in every allocated memory block */
-typedef struct ut_mem_block_struct ut_mem_block_t;
-
/** The total amount of memory currently allocated from the operating
system with os_mem_alloc_large() or malloc(). Does not count malloc()
if srv_use_sys_malloc is set. Protected by ut_list_mutex. */
@@ -52,14 +49,14 @@ UNIV_INTERN mysql_pfs_key_t ut_list_mutex_key;
#endif
/** Dynamically allocated memory block */
-struct ut_mem_block_struct{
+struct ut_mem_block_t{
UT_LIST_NODE_T(ut_mem_block_t) mem_block_list;
/*!< mem block list node */
ulint size; /*!< size of allocated memory */
ulint magic_n;/*!< magic number (UT_MEM_MAGIC_N) */
};
-/** The value of ut_mem_block_struct::magic_n. Used in detecting
+/** The value of ut_mem_block_t::magic_n. Used in detecting
memory corruption. */
#define UT_MEM_MAGIC_N 1601650166
diff --git a/storage/innobase/ut/ut0rbt.cc b/storage/innobase/ut/ut0rbt.cc
index b21543a679d..e93844af600 100644
--- a/storage/innobase/ut/ut0rbt.cc
+++ b/storage/innobase/ut/ut0rbt.cc
@@ -773,7 +773,7 @@ rbt_create_arg_cmp(
size_t sizeof_value, /*!< in: sizeof data item */
ib_rbt_arg_compare
compare, /*!< in: fn to compare items */
- const void* cmp_arg) /*!< in: compare fn arg */
+ void* cmp_arg) /*!< in: compare fn arg */
{
ib_rbt_t* tree;
diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc
index 2268cfd2493..3c94d96c3ac 100644
--- a/storage/innobase/ut/ut0ut.cc
+++ b/storage/innobase/ut/ut0ut.cc
@@ -1,6 +1,6 @@
/*****************************************************************************
-Copyright (c) 1994, 2011, Oracle and/or its affiliates. All Rights Reserved.
+Copyright (c) 1994, 2012, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -28,6 +28,7 @@ Created 5/11/1994 Heikki Tuuri
#ifndef UNIV_INNOCHECKSUM
#include "ut0sort.h"
+#include "os0thread.h" /* thread-ID */
#ifdef UNIV_NONINL
#include "ut0ut.ic"
@@ -218,18 +219,25 @@ ut_print_timestamp(
/*===============*/
FILE* file) /*!< in: file where to print */
{
+ ulint thread_id = 0;
+
+#ifndef UNIV_INNOCHECKSUM
+ thread_id = os_thread_pf(os_thread_get_curr_id());
+#endif
+
#ifdef __WIN__
SYSTEMTIME cal_tm;
GetLocalTime(&cal_tm);
- fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
- (int) cal_tm.wYear % 100,
+ fprintf(file, "%d-%02d-%02d %02d:%02d:%02d %lx",
+ (int) cal_tm.wYear,
(int) cal_tm.wMonth,
(int) cal_tm.wDay,
(int) cal_tm.wHour,
(int) cal_tm.wMinute,
- (int) cal_tm.wSecond);
+ (int) cal_tm.wSecond,
+ thread_id);
#else
struct tm* cal_tm_ptr;
time_t tm;
@@ -243,13 +251,14 @@ ut_print_timestamp(
time(&tm);
cal_tm_ptr = localtime(&tm);
#endif
- fprintf(file,"%02d%02d%02d %2d:%02d:%02d",
- cal_tm_ptr->tm_year % 100,
+ fprintf(file, "%d-%02d-%02d %02d:%02d:%02d %lx",
+ cal_tm_ptr->tm_year + 1900,
cal_tm_ptr->tm_mon + 1,
cal_tm_ptr->tm_mday,
cal_tm_ptr->tm_hour,
cal_tm_ptr->tm_min,
- cal_tm_ptr->tm_sec);
+ cal_tm_ptr->tm_sec,
+ thread_id);
#endif
}
@@ -515,7 +524,7 @@ void
ut_print_name(
/*==========*/
FILE* f, /*!< in: output stream */
- trx_t* trx, /*!< in: transaction */
+ const trx_t* trx, /*!< in: transaction */
ibool table_id,/*!< in: TRUE=print a table name,
FALSE=print other identifier */
const char* name) /*!< in: name to print */
@@ -533,7 +542,7 @@ void
ut_print_namel(
/*===========*/
FILE* f, /*!< in: output stream */
- trx_t* trx, /*!< in: transaction (NULL=no quotes) */
+ const trx_t* trx, /*!< in: transaction (NULL=no quotes) */
ibool table_id,/*!< in: TRUE=print a table name,
FALSE=print other identifier */
const char* name, /*!< in: name to print */
@@ -553,6 +562,50 @@ ut_print_namel(
}
/**********************************************************************//**
+Formats a table or index name, quoted as an SQL identifier. If the name
+contains a slash '/', the result will contain two identifiers separated by
+a period (.), as in SQL database_name.identifier.
+@return pointer to 'formatted' */
+UNIV_INTERN
+char*
+ut_format_name(
+/*===========*/
+ const char* name, /*!< in: table or index name, must be
+ '\0'-terminated */
+ ibool is_table, /*!< in: if TRUE then 'name' is a table
+ name */
+ char* formatted, /*!< out: formatted result, will be
+ '\0'-terminated */
+ ulint formatted_size) /*!< out: no more than this number of
+ bytes will be written to 'formatted' */
+{
+ switch (formatted_size) {
+ case 1:
+ formatted[0] = '\0';
+ /* FALL-THROUGH */
+ case 0:
+ return(formatted);
+ }
+
+ char* end;
+
+ end = innobase_convert_name(formatted, formatted_size,
+ name, strlen(name), NULL, is_table);
+
+ /* If the space in 'formatted' was completely used, then sacrifice
+ the last character in order to write '\0' at the end. */
+ if ((ulint) (end - formatted) == formatted_size) {
+ end--;
+ }
+
+ ut_a((ulint) (end - formatted) < formatted_size);
+
+ *end = '\0';
+
+ return(formatted);
+}
+
+/**********************************************************************//**
Catenate files. */
UNIV_INTERN
void
@@ -648,7 +701,7 @@ UNIV_INTERN
const char*
ut_strerr(
/*======*/
- enum db_err num) /*!< in: error number */
+ dberr_t num) /*!< in: error number */
{
switch (num) {
case DB_SUCCESS:
@@ -703,10 +756,12 @@ ut_strerr(
return("Cannot drop constraint");
case DB_NO_SAVEPOINT:
return("No such savepoint");
- case DB_TABLESPACE_ALREADY_EXISTS:
+ case DB_TABLESPACE_EXISTS:
return("Tablespace already exists");
case DB_TABLESPACE_DELETED:
- return("No such tablespace");
+ return("Tablespace deleted or being deleted");
+ case DB_TABLESPACE_NOT_FOUND:
+ return("Tablespace not found");
case DB_LOCK_TABLE_FULL:
return("Lock structs have exhausted the buffer pool");
case DB_FOREIGN_DUPLICATE_KEY:
@@ -717,8 +772,8 @@ ut_strerr(
return("Too many concurrent transactions");
case DB_UNSUPPORTED:
return("Unsupported");
- case DB_PRIMARY_KEY_IS_NULL:
- return("Primary key is NULL");
+ case DB_INVALID_NULL:
+ return("NULL value encountered in NOT NULL column");
case DB_STATS_DO_NOT_EXIST:
return("Persistent statistics do not exist");
case DB_FAIL:
@@ -745,6 +800,21 @@ ut_strerr(
return("Undo record too big");
case DB_END_OF_INDEX:
return("End of index");
+ case DB_IO_ERROR:
+ return("I/O error");
+ case DB_TABLE_IN_FK_CHECK:
+ return("Table is being used in foreign key check");
+ case DB_DATA_MISMATCH:
+ return("data mismatch");
+ case DB_SCHEMA_NOT_LOCKED:
+ return("schema not locked");
+ case DB_NOT_FOUND:
+ return("not found");
+ case DB_ONLINE_LOG_TOO_BIG:
+ return("Log size exceeded during online index creation");
+ case DB_DICT_CHANGED:
+ return("Table dictionary has changed");
+
/* do not add default: in order to produce a warning if new code
is added to the enum but not added here */
}
diff --git a/storage/innobase/ut/ut0vec.cc b/storage/innobase/ut/ut0vec.cc
index 8ac5d9dc5d3..5842d9f1c0e 100644
--- a/storage/innobase/ut/ut0vec.cc
+++ b/storage/innobase/ut/ut0vec.cc
@@ -44,12 +44,14 @@ ib_vector_create(
ut_a(size > 0);
- vec = static_cast<ib_vector_t*>(allocator->mem_malloc(allocator, sizeof(*vec)));
+ vec = static_cast<ib_vector_t*>(
+ allocator->mem_malloc(allocator, sizeof(*vec)));
vec->used = 0;
vec->total = size;
vec->allocator = allocator;
vec->sizeof_value = sizeof_value;
+
vec->data = static_cast<void*>(
allocator->mem_malloc(allocator, vec->sizeof_value * size));
diff --git a/storage/innobase/ut/ut0wqueue.cc b/storage/innobase/ut/ut0wqueue.cc
index 6d410524fe7..d1ba36b3b00 100644
--- a/storage/innobase/ut/ut0wqueue.cc
+++ b/storage/innobase/ut/ut0wqueue.cc
@@ -40,7 +40,7 @@ ib_wqueue_create(void)
mutex_create(PFS_NOT_INSTRUMENTED, &wq->mutex, SYNC_WORK_QUEUE);
wq->items = ib_list_create();
- wq->event = os_event_create(NULL);
+ wq->event = os_event_create();
return(wq);
}
diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc
index ae2756e5155..946051d8a2a 100644
--- a/storage/maria/ha_maria.cc
+++ b/storage/maria/ha_maria.cc
@@ -511,8 +511,8 @@ static int table2maria(TABLE *table_arg, data_file_type row_type,
pos->algorithm;
keydef[i].block_length= pos->block_size;
keydef[i].seg= keyseg;
- keydef[i].keysegs= pos->key_parts;
- for (j= 0; j < pos->key_parts; j++)
+ keydef[i].keysegs= pos->user_defined_key_parts;
+ for (j= 0; j < pos->user_defined_key_parts; j++)
{
Field *field= pos->key_part[j].field;
type= field->key_type();
@@ -564,7 +564,7 @@ static int table2maria(TABLE *table_arg, data_file_type row_type,
keydef[i].seg[j].flag|= HA_BLOB_PART;
/* save number of bytes used to pack length */
keydef[i].seg[j].bit_start= (uint) (field->pack_length() -
- share->blob_ptr_size);
+ portable_sizeof_char_ptr);
}
else if (field->type() == MYSQL_TYPE_BIT)
{
@@ -574,7 +574,7 @@ static int table2maria(TABLE *table_arg, data_file_type row_type,
(uchar*) table_arg->record[0]);
}
}
- keyseg+= pos->key_parts;
+ keyseg+= pos->user_defined_key_parts;
}
if (table_arg->found_next_number_field)
keydef[share->next_number_index].flag|= HA_AUTO_KEY;
@@ -1036,7 +1036,7 @@ ulong ha_maria::index_flags(uint inx, uint part, bool all_parts) const
double ha_maria::scan_time()
{
if (file->s->data_file_type == BLOCK_RECORD)
- return ulonglong2double(stats.data_file_length - file->s->block_size) / max(file->s->block_size / 2, IO_SIZE) + 2;
+ return ulonglong2double(stats.data_file_length - file->s->block_size) / MY_MAX(file->s->block_size / 2, IO_SIZE) + 2;
return handler::scan_time();
}
@@ -1183,7 +1183,7 @@ int ha_maria::open(const char *name, int mode, uint test_if_locked)
{
if (my_errno == HA_ERR_OLD_FILE)
{
- push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(current_thd, Sql_condition::WARN_LEVEL_NOTE,
ER_CRASHED_ON_USAGE,
zerofill_error_msg);
}
@@ -1639,8 +1639,8 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
}
if (error && file->create_unique_index_by_sort &&
share->state.dupp_key != MAX_KEY)
- print_keydup_error(share->state.dupp_key,
- ER(ER_DUP_ENTRY_WITH_KEY_NAME), MYF(0));
+ print_keydup_error(table, &table->key_info[share->state.dupp_key],
+ MYF(0));
}
else
{
@@ -2201,8 +2201,8 @@ bool ha_maria::check_and_repair(THD *thd)
STATE_MOVED)
{
/* Remove error about crashed table */
- thd->warning_info->clear_warning_info(thd->query_id);
- push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ thd->get_stmt_da()->clear_warning_info(thd->query_id);
+ push_warning_printf(current_thd, Sql_condition::WARN_LEVEL_NOTE,
ER_CRASHED_ON_USAGE,
"Zerofilling moved table %s", table->s->path.str);
sql_print_information("Zerofilling moved table: '%s'",
@@ -2474,7 +2474,7 @@ int ha_maria::info(uint flag)
ref_length= maria_info.reflength;
share->db_options_in_use= maria_info.options;
stats.block_size= maria_block_size;
- stats.mrr_length_per_rec= maria_info.reflength + 8; // 8 = max(sizeof(void *))
+ stats.mrr_length_per_rec= maria_info.reflength + 8; // 8 = MY_MAX(sizeof(void *))
/* Update share */
share->keys_in_use.set_prefix(share->keys);
@@ -2721,7 +2721,7 @@ int ha_maria::external_lock(THD *thd, int lock_type)
This is a bit excessive, ACID requires this only if there are some
changes to commit (rollback shouldn't be tested).
*/
- DBUG_ASSERT(!thd->stmt_da->is_sent ||
+ DBUG_ASSERT(!thd->get_stmt_da()->is_sent() ||
thd->killed == KILL_CONNECTION);
/* autocommit ? rollback a transaction */
#ifdef MARIA_CANNOT_ROLLBACK
@@ -2947,9 +2947,12 @@ void ha_maria::update_create_info(HA_CREATE_INFO *create_info)
}
create_info->data_file_name= data_file_name;
create_info->index_file_name= index_file_name;
- /* We need to restore the row type as Maria can change it */
+ /*
+ Keep user-specified row_type for ALTER,
+ but show the actually used one in SHOW
+ */
if (create_info->row_type != ROW_TYPE_DEFAULT &&
- !(create_info->used_fields & HA_CREATE_USED_ROW_FORMAT))
+ !(thd_sql_command(ha_thd()) == SQLCOM_ALTER_TABLE))
create_info->row_type= get_row_type();
/*
Show always page checksums, as this can be forced with
@@ -3015,7 +3018,7 @@ int ha_maria::create(const char *name, register TABLE *table_arg,
ha_create_info->row_type != ROW_TYPE_PAGE &&
ha_create_info->row_type != ROW_TYPE_NOT_USED &&
ha_create_info->row_type != ROW_TYPE_DEFAULT)
- push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE,
+ push_warning(thd, Sql_condition::WARN_LEVEL_NOTE,
ER_ILLEGAL_HA_CREATE_OPTION,
"Row format set to PAGE because of TRANSACTIONAL=1 option");
@@ -3200,10 +3203,17 @@ bool ha_maria::check_if_incompatible_data(HA_CREATE_INFO *create_info,
{
DBUG_ENTER("check_if_incompatible_data");
uint options= table->s->db_options_in_use;
+ enum ha_choice page_checksum= table->s->page_checksum;
+
+ if (page_checksum == HA_CHOICE_UNDEF)
+ page_checksum= file->s->options & HA_OPTION_PAGE_CHECKSUM ? HA_CHOICE_YES
+ : HA_CHOICE_NO;
if (create_info->auto_increment_value != stats.auto_increment_value ||
create_info->data_file_name != data_file_name ||
create_info->index_file_name != index_file_name ||
+ create_info->page_checksum != page_checksum ||
+ create_info->transactional != table->s->transactional ||
(maria_row_type(create_info) != data_file_type &&
create_info->row_type != ROW_TYPE_DEFAULT) ||
table_changes == IS_EQUAL_NO ||
@@ -3817,7 +3827,7 @@ Item *ha_maria::idx_cond_push(uint keyno_arg, Item* idx_cond_arg)
*/
const KEY *key= &table_share->key_info[keyno_arg];
- for (uint k= 0; k < key->key_parts; ++k)
+ for (uint k= 0; k < key->user_defined_key_parts; ++k)
{
const KEY_PART_INFO *key_part= &key->key_part[k];
if (key_part->key_part_flag & HA_BLOB_PART)
diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c
index d48e8df5cf1..e1ccceb78cd 100644
--- a/storage/maria/ma_bitmap.c
+++ b/storage/maria/ma_bitmap.c
@@ -315,7 +315,11 @@ my_bool _ma_bitmap_init(MARIA_SHARE *share, File file,
my_bool _ma_bitmap_end(MARIA_SHARE *share)
{
my_bool res;
- mysql_mutex_assert_owner(&share->close_lock);
+
+#ifndef DBUG_OFF
+ if (! share->internal_table)
+ mysql_mutex_assert_owner(&share->close_lock);
+#endif
DBUG_ASSERT(share->bitmap.non_flushable == 0);
DBUG_ASSERT(share->bitmap.flush_all_requested == 0);
DBUG_ASSERT(share->bitmap.waiting_for_non_flushable == 0 &&
@@ -1389,7 +1393,7 @@ found:
IMPLEMENTATION
We will return the smallest area >= size. If there is no such
block, we will return the biggest area that satisfies
- area_size >= min(BLOB_SEGMENT_MIN_SIZE*full_page_size, size)
+ area_size >= MY_MIN(BLOB_SEGMENT_MIN_SIZE*full_page_size, size)
To speed up searches, we will only consider areas that has at least 16 free
pages starting on an even boundary. When finding such an area, we will
@@ -1497,7 +1501,7 @@ static ulong allocate_full_pages(MARIA_FILE_BITMAP *bitmap,
DBUG_RETURN(0); /* No room on page */
/*
- Now allocate min(pages_needed, area_size), starting from
+ Now allocate MY_MIN(pages_needed, area_size), starting from
best_start + best_prefix_area_size
*/
if (best_area_size > pages_needed)
diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c
index 03fd0200d18..7f8fd699e5d 100644
--- a/storage/maria/ma_blockrec.c
+++ b/storage/maria/ma_blockrec.c
@@ -1230,7 +1230,7 @@ static my_bool extend_directory(MARIA_HA *info, uchar *buff, uint block_size,
}
check_directory(buff, block_size,
- info ? min(info->s->base.min_block_length, length) : 0,
+ info ? MY_MIN(info->s->base.min_block_length, length) : 0,
*empty_space);
DBUG_RETURN(0);
}
@@ -2126,7 +2126,7 @@ static my_bool write_full_pages(MARIA_HA *info,
}
lsn_store(buff, lsn);
buff[PAGE_TYPE_OFFSET]= (uchar) BLOB_PAGE;
- copy_length= min(data_size, length);
+ copy_length= MY_MIN(data_size, length);
memcpy(buff + LSN_SIZE + PAGE_TYPE_SIZE, data, copy_length);
length-= copy_length;
@@ -3504,7 +3504,7 @@ static my_bool allocate_and_write_block_record(MARIA_HA *info,
/* page will be pinned & locked by get_head_or_tail_page */
if (get_head_or_tail_page(info, blocks->block, info->buff,
- max(row->space_on_head_page,
+ MY_MAX(row->space_on_head_page,
info->s->base.min_block_length),
HEAD_PAGE,
PAGECACHE_LOCK_WRITE, &row_pos))
@@ -3952,7 +3952,7 @@ static my_bool _ma_update_at_original_place(MARIA_HA *info,
*/
DBUG_ASSERT(blocks->count > 1 ||
- max(new_row->total_length, share->base.min_block_length) <=
+ MY_MAX(new_row->total_length, share->base.min_block_length) <=
length_on_head_page);
/* Store same amount of data on head page as on original page */
diff --git a/storage/maria/ma_cache.c b/storage/maria/ma_cache.c
index 829189baeed..35926d37e03 100644
--- a/storage/maria/ma_cache.c
+++ b/storage/maria/ma_cache.c
@@ -61,7 +61,7 @@ my_bool _ma_read_cache(MARIA_HA *handler, IO_CACHE *info, uchar *buff,
(my_off_t) (info->read_end - info->request_pos))
{
in_buff_pos=info->request_pos+(uint) offset;
- in_buff_length= min(length,(size_t) (info->read_end-in_buff_pos));
+ in_buff_length= MY_MIN(length,(size_t) (info->read_end-in_buff_pos));
memcpy(buff,info->request_pos+(uint) offset,(size_t) in_buff_length);
if (!(length-=in_buff_length))
DBUG_RETURN(0);
diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c
index fdb2bae3d8c..78550145535 100644
--- a/storage/maria/ma_check.c
+++ b/storage/maria/ma_check.c
@@ -2406,7 +2406,7 @@ static int initialize_variables_for_repair(HA_CHECK *param,
else
{
ulong rec_length;
- rec_length= max(share->base.min_pack_length,
+ rec_length= MY_MAX(share->base.min_pack_length,
share->base.min_block_length);
sort_info->max_records= (ha_rows) (sort_info->filelength / rec_length);
}
@@ -3612,7 +3612,7 @@ int maria_filecopy(HA_CHECK *param, File to,File from,my_off_t start,
ulong buff_length;
DBUG_ENTER("maria_filecopy");
- buff_length=(ulong) min(param->write_buffer_length,length);
+ buff_length=(ulong) MY_MIN(param->write_buffer_length,length);
if (!(buff=my_malloc(buff_length,MYF(0))))
{
buff=tmp_buff; buff_length=IO_SIZE;
@@ -5670,7 +5670,7 @@ word_init_ft_buf:
ft_buf->buf=ft_buf->lastkey+a_len;
/*
32 is just a safety margin here
- (at least max(val_len, sizeof(nod_flag)) should be there).
+ (at least MY_MAX(val_len, sizeof(nod_flag)) should be there).
May be better performance could be achieved if we'd put
(sort_info->keyinfo->block_length-32)/XXX
instead.
@@ -6083,7 +6083,7 @@ int maria_recreate_table(HA_CHECK *param, MARIA_HA **org_info, char *filename)
maria_close(*org_info);
bzero((char*) &create_info,sizeof(create_info));
- create_info.max_rows=max(max_records,share.base.records);
+ create_info.max_rows=MY_MAX(max_records,share.base.records);
create_info.reloc_rows=share.base.reloc;
create_info.old_options=(share.options |
(unpack ? HA_OPTION_TEMP_COMPRESS_RECORD : 0));
@@ -6506,7 +6506,8 @@ static my_bool create_new_data_handle(MARIA_SORT_PARAM *param, File new_file)
DBUG_ENTER("create_new_data_handle");
if (!(sort_info->new_info= maria_open(info->s->open_file_name.str, O_RDWR,
- HA_OPEN_COPY | HA_OPEN_FOR_REPAIR)))
+ HA_OPEN_COPY | HA_OPEN_FOR_REPAIR |
+ HA_OPEN_INTERNAL_TABLE)))
DBUG_RETURN(1);
new_info= sort_info->new_info;
@@ -6927,7 +6928,7 @@ static TrID max_trid_in_system(void)
{
TrID id= trnman_get_max_trid(); /* 0 if transac manager not initialized */
/* 'id' may be far bigger, if last shutdown is old */
- return max(id, max_trid_in_control_file);
+ return MY_MAX(id, max_trid_in_control_file);
}
diff --git a/storage/maria/ma_checkpoint.c b/storage/maria/ma_checkpoint.c
index c4fa39a91d7..98d18b2f420 100644
--- a/storage/maria/ma_checkpoint.c
+++ b/storage/maria/ma_checkpoint.c
@@ -563,7 +563,7 @@ pthread_handler_t ma_checkpoint_background(void *arg)
DBUG_ASSERT(interval > 0);
#ifdef HAVE_PSI_THREAD_INTERFACE
- PSI_CALL(set_thread_user_host)(0,0,0,0);
+ PSI_THREAD_CALL(set_thread_user_host)(0,0,0,0);
#endif
/*
@@ -859,11 +859,11 @@ static int collect_tables(LEX_STRING *str, LSN checkpoint_start_log_horizon)
my_malloc(STATE_COPIES * sizeof(struct st_state_copy), MYF(MY_WME));
dfiles= (PAGECACHE_FILE *)my_realloc((uchar *)dfiles,
/* avoid size of 0 for my_realloc */
- max(1, nb) * sizeof(PAGECACHE_FILE),
+ MY_MAX(1, nb) * sizeof(PAGECACHE_FILE),
MYF(MY_WME | MY_ALLOW_ZERO_PTR));
kfiles= (PAGECACHE_FILE *)my_realloc((uchar *)kfiles,
/* avoid size of 0 for my_realloc */
- max(1, nb) * sizeof(PAGECACHE_FILE),
+ MY_MAX(1, nb) * sizeof(PAGECACHE_FILE),
MYF(MY_WME | MY_ALLOW_ZERO_PTR));
if (unlikely((state_copies == NULL) ||
(dfiles == NULL) || (kfiles == NULL)))
@@ -896,7 +896,7 @@ static int collect_tables(LEX_STRING *str, LSN checkpoint_start_log_horizon)
Collect and cache a bunch of states. We do this for many states at a
time, to not lock/unlock the log's lock too often.
*/
- uint j, bound= min(nb, i + STATE_COPIES);
+ uint j, bound= MY_MIN(nb, i + STATE_COPIES);
state_copy= state_copies;
/* part of the state is protected by log's lock */
translog_lock();
diff --git a/storage/maria/ma_close.c b/storage/maria/ma_close.c
index c355f1f1def..dd3a034425a 100644
--- a/storage/maria/ma_close.c
+++ b/storage/maria/ma_close.c
@@ -27,6 +27,7 @@ int maria_close(register MARIA_HA *info)
int error=0,flag;
my_bool share_can_be_freed= FALSE;
MARIA_SHARE *share= info->s;
+ my_bool internal_table= share->internal_table;
DBUG_ENTER("maria_close");
DBUG_PRINT("enter",("name: '%s' base: 0x%lx reopen: %u locks: %u",
share->open_file_name.str,
@@ -49,9 +50,9 @@ int maria_close(register MARIA_HA *info)
error= my_errno;
}
-
/* Ensure no one can open this file while we are closing it */
- mysql_mutex_lock(&THR_LOCK_maria);
+ if (!internal_table)
+ mysql_mutex_lock(&THR_LOCK_maria);
if (info->lock_type == F_EXTRA_LCK)
info->lock_type=F_UNLCK; /* HA_EXTRA_NO_USER_CHANGE */
@@ -60,8 +61,11 @@ int maria_close(register MARIA_HA *info)
if (maria_lock_database(info,F_UNLCK))
error=my_errno;
}
- mysql_mutex_lock(&share->close_lock);
- mysql_mutex_lock(&share->intern_lock);
+ if (!internal_table)
+ {
+ mysql_mutex_lock(&share->close_lock);
+ mysql_mutex_lock(&share->intern_lock);
+ }
if (share->options & HA_OPTION_READ_ONLY_DATA)
{
@@ -75,7 +79,8 @@ int maria_close(register MARIA_HA *info)
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
}
flag= !--share->reopen;
- maria_open_list=list_delete(maria_open_list,&info->open_list);
+ if (!internal_table)
+ maria_open_list=list_delete(maria_open_list,&info->open_list);
my_free(info->rec_buff);
(*share->end)(info);
@@ -159,7 +164,8 @@ int maria_close(register MARIA_HA *info)
error= my_errno;
}
thr_lock_delete(&share->lock);
- (void) mysql_mutex_destroy(&share->key_del_lock);
+ mysql_mutex_destroy(&share->key_del_lock);
+
{
int i,keys;
keys = share->state.header.keys;
@@ -181,9 +187,11 @@ int maria_close(register MARIA_HA *info)
We have to unlock share->intern_lock then lock it after
LOCK_trn_list (trnman_lock()) to avoid dead locks.
*/
- mysql_mutex_unlock(&share->intern_lock);
+ if (!internal_table)
+ mysql_mutex_unlock(&share->intern_lock);
_ma_remove_not_visible_states_with_lock(share, TRUE);
- mysql_mutex_lock(&share->intern_lock);
+ if (!internal_table)
+ mysql_mutex_lock(&share->intern_lock);
if (share->in_checkpoint & MARIA_CHECKPOINT_LOOKS_AT_ME)
{
@@ -220,9 +228,12 @@ int maria_close(register MARIA_HA *info)
share->state_history= 0;
}
}
- mysql_mutex_unlock(&THR_LOCK_maria);
- mysql_mutex_unlock(&share->intern_lock);
- mysql_mutex_unlock(&share->close_lock);
+ if (!internal_table)
+ {
+ mysql_mutex_unlock(&THR_LOCK_maria);
+ mysql_mutex_unlock(&share->intern_lock);
+ mysql_mutex_unlock(&share->close_lock);
+ }
if (share_can_be_freed)
{
(void) mysql_mutex_destroy(&share->intern_lock);
diff --git a/storage/maria/ma_commit.c b/storage/maria/ma_commit.c
index 70bc668a220..46db3ca4ae5 100644
--- a/storage/maria/ma_commit.c
+++ b/storage/maria/ma_commit.c
@@ -39,11 +39,11 @@ int ma_commit(TRN *trn)
/*
- if COMMIT record is written before trnman_commit_trn():
if Checkpoint comes in the middle it will see trn is not committed,
- then if crash, Recovery might roll back trn (if min(rec_lsn) is after
+ then if crash, Recovery might roll back trn (if MY_MIN(rec_lsn) is after
COMMIT record) and this is not an issue as
* transaction's updates were not made visible to other transactions
* "commit ok" was not sent to client
- Alternatively, Recovery might commit trn (if min(rec_lsn) is before COMMIT
+ Alternatively, Recovery might commit trn (if MY_MIN(rec_lsn) is before COMMIT
record), which is ok too. All in all it means that "trn committed" is not
100% equal to "COMMIT record written".
- if COMMIT record is written after trnman_commit_trn():
diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c
index 4ed00598c2f..232f4a330eb 100644
--- a/storage/maria/ma_create.c
+++ b/storage/maria/ma_create.c
@@ -51,6 +51,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
base_pos,long_varchar_count,varchar_length,
unique_key_parts,fulltext_keys,offset, not_block_record_extra_length;
uint max_field_lengths, extra_header_size, column_nr;
+ uint internal_table= flags & HA_CREATE_INTERNAL_TABLE;
ulong reclength, real_reclength,min_pack_length;
char filename[FN_REFLEN], linkname[FN_REFLEN], *linkname_ptr;
ulong pack_reclength;
@@ -713,7 +714,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
got from MAI file header (see also mariapack.c:save_state)
*/
share.base.key_reflength=
- maria_get_pointer_length(max(ci->key_file_length,tmp),3);
+ maria_get_pointer_length(MY_MAX(ci->key_file_length,tmp),3);
share.base.keys= share.state.header.keys= keys;
share.state.header.uniques= uniques;
share.state.header.fulltext_keys= fulltext_keys;
@@ -780,7 +781,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
share.base.min_block_length=
(share.base.pack_reclength+3 < MARIA_EXTEND_BLOCK_LENGTH &&
! share.base.blobs) ?
- max(share.base.pack_reclength,MARIA_MIN_BLOCK_LENGTH) :
+ MY_MAX(share.base.pack_reclength,MARIA_MIN_BLOCK_LENGTH) :
MARIA_EXTEND_BLOCK_LENGTH;
}
else if (datafile_type == STATIC_RECORD)
@@ -789,7 +790,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
if (! (flags & HA_DONT_TOUCH_DATA))
share.state.create_time= time((time_t*) 0);
- mysql_mutex_lock(&THR_LOCK_maria);
+ if (!internal_table)
+ mysql_mutex_lock(&THR_LOCK_maria);
/*
NOTE: For test_if_reopen() we need a real path name. Hence we need
@@ -854,7 +856,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
NOTE: The filename is compared against unique_file_name of every
open table. Hence we need a real path here.
*/
- if (_ma_test_if_reopen(filename))
+ if (!internal_table && _ma_test_if_reopen(filename))
{
my_printf_error(HA_ERR_TABLE_EXIST, "Aria table '%s' is in use "
"(most likely by a MERGE table). Try FLUSH TABLES.",
@@ -1171,7 +1173,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
if (mysql_file_close(dfile,MYF(0)))
goto err;
}
- mysql_mutex_unlock(&THR_LOCK_maria);
+ if (!internal_table)
+ mysql_mutex_unlock(&THR_LOCK_maria);
res= 0;
my_free((char*) rec_per_key_part);
errpos=0;
@@ -1180,7 +1183,8 @@ int maria_create(const char *name, enum data_file_type datafile_type,
DBUG_RETURN(res);
err:
- mysql_mutex_unlock(&THR_LOCK_maria);
+ if (!internal_table)
+ mysql_mutex_unlock(&THR_LOCK_maria);
err_no_lock:
save_errno=my_errno;
diff --git a/storage/maria/ma_delete.c b/storage/maria/ma_delete.c
index 22f7341098d..31773ef2dfc 100644
--- a/storage/maria/ma_delete.c
+++ b/storage/maria/ma_delete.c
@@ -988,7 +988,7 @@ static int underflow(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
*/
if (_ma_log_add(anc_page, anc_length, keypos,
anc_key_inserted.move_length +
- max(anc_key_inserted.changed_length -
+ MY_MAX(anc_key_inserted.changed_length -
anc_key_inserted.move_length,
key_deleted.changed_length),
anc_key_inserted.move_length -
@@ -1230,7 +1230,7 @@ static int underflow(MARIA_HA *info, MARIA_KEYDEF *keyinfo,
*/
if (_ma_log_add(anc_page, anc_length, keypos,
anc_key_inserted.move_length +
- max(anc_key_inserted.changed_length -
+ MY_MAX(anc_key_inserted.changed_length -
anc_key_inserted.move_length,
key_deleted.changed_length),
anc_key_inserted.move_length -
@@ -1571,7 +1571,7 @@ my_bool _ma_log_delete(MARIA_PAGE *ma_page, const uchar *key_pos,
current_size != share->max_index_block_size)
{
/* Append data that didn't fit on the page before */
- uint length= (min(ma_page->size, share->max_index_block_size) -
+ uint length= (MY_MIN(ma_page->size, share->max_index_block_size) -
current_size);
uchar *data= ma_page->buff + current_size;
diff --git a/storage/maria/ma_dynrec.c b/storage/maria/ma_dynrec.c
index c1c0a8e9729..4bb51d0dcf3 100644
--- a/storage/maria/ma_dynrec.c
+++ b/storage/maria/ma_dynrec.c
@@ -851,7 +851,7 @@ static my_bool update_dynamic_record(MARIA_HA *info, MARIA_RECORD_POS filepos,
uint tmp=MY_ALIGN(reclength - length + 3 +
test(reclength >= 65520L),MARIA_DYN_ALIGN_SIZE);
/* Don't create a block bigger than MARIA_MAX_BLOCK_LENGTH */
- tmp= min(length+tmp, MARIA_MAX_BLOCK_LENGTH)-length;
+ tmp= MY_MIN(length+tmp, MARIA_MAX_BLOCK_LENGTH)-length;
/* Check if we can extend this block */
if (block_info.filepos + block_info.block_len ==
info->state->data_file_length &&
diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c
index 0847f3c729c..66e7b4033c7 100644
--- a/storage/maria/ma_extra.c
+++ b/storage/maria/ma_extra.c
@@ -105,7 +105,7 @@ int maria_extra(MARIA_HA *info, enum ha_extra_function function,
cache_size= (extra_arg ? *(ulong*) extra_arg :
my_default_record_cache_size);
if (!(init_io_cache(&info->rec_cache, info->dfile.file,
- (uint) min(share->state.state.data_file_length+1,
+ (uint) MY_MIN(share->state.state.data_file_length+1,
cache_size),
READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK),
MYF(share->write_flag & MY_WAIT_IF_FULL))))
diff --git a/storage/maria/ma_ft_boolean_search.c b/storage/maria/ma_ft_boolean_search.c
index c98c4b599fc..e69c90c671c 100644
--- a/storage/maria/ma_ft_boolean_search.c
+++ b/storage/maria/ma_ft_boolean_search.c
@@ -46,9 +46,9 @@
three subexpressions (including the top-level one),
every one has its own max_docid, updated by its plus word.
but for the search word6 uses
- max(word1.max_docid, word3.max_docid, word5.max_docid),
+ MY_MAX(word1.max_docid, word3.max_docid, word5.max_docid),
while word4 uses, accordingly,
- max(word1.max_docid, word3.max_docid).
+ MY_MAX(word1.max_docid, word3.max_docid).
*/
#define FT_CORE
@@ -338,7 +338,7 @@ static int _ftb_no_dupes_cmp(void* not_used __attribute__((unused)),
/* returns 1 if the search was finished (must-word wasn't found) */
-static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
+static int _ft2_search_no_lock(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
{
int r;
int subkeys=1;
@@ -439,7 +439,7 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
ftbw->key_root=info->s->state.key_root[ftb->keynr];
ftbw->keyinfo=info->s->keyinfo+ftb->keynr;
ftbw->off=0;
- return _ft2_search(ftb, ftbw, 0);
+ return _ft2_search_no_lock(ftb, ftbw, 0);
}
/* matching key found */
@@ -469,6 +469,19 @@ static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
return 0;
}
+static int _ft2_search(FTB *ftb, FTB_WORD *ftbw, my_bool init_search)
+{
+ int r;
+ MARIA_SHARE *share= ftb->info->s;
+ if (share->lock_key_trees)
+ mysql_rwlock_rdlock(&share->keyinfo[ftb->keynr].root_lock);
+ r= _ft2_search_no_lock(ftb, ftbw, init_search);
+ if (share->lock_key_trees)
+ mysql_rwlock_unlock(&share->keyinfo[ftb->keynr].root_lock);
+ return r;
+}
+
+
static void _ftb_init_index_search(FT_INFO *ftb)
{
int i;
diff --git a/storage/maria/ma_ft_nlq_search.c b/storage/maria/ma_ft_nlq_search.c
index c6d9c2411c6..613f13e64a9 100644
--- a/storage/maria/ma_ft_nlq_search.c
+++ b/storage/maria/ma_ft_nlq_search.c
@@ -71,10 +71,11 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio)
TREE_ELEMENT *selem;
double gweight=1;
MARIA_HA *info= aio->info;
+ MARIA_SHARE *share= info->s;
uchar *keybuff= aio->keybuff;
- MARIA_KEYDEF *keyinfo= info->s->keyinfo+aio->keynr;
- my_off_t key_root=info->s->state.key_root[aio->keynr];
- uint extra=HA_FT_WLEN+info->s->rec_reflength;
+ MARIA_KEYDEF *keyinfo= share->keyinfo+aio->keynr;
+ my_off_t key_root;
+ uint extra=HA_FT_WLEN+share->rec_reflength;
MARIA_KEY key;
#if HA_FT_WTYPE == HA_KEYTYPE_FLOAT
float tmp_weight;
@@ -92,6 +93,11 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio)
key.data_length-= HA_FT_WLEN;
doc_cnt=0;
+ if (share->lock_key_trees)
+ mysql_rwlock_rdlock(&share->keyinfo[aio->keynr].root_lock);
+
+ key_root= share->state.key_root[aio->keynr];
+
/* Skip rows inserted by current inserted */
for (r= _ma_search(info, &key, SEARCH_FIND, key_root) ;
!r &&
@@ -102,12 +108,14 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio)
r= _ma_search_next(info, &info->last_key, SEARCH_BIGGER, key_root))
;
+ if (share->lock_key_trees)
+ mysql_rwlock_unlock(&share->keyinfo[aio->keynr].root_lock);
+
info->update|= HA_STATE_AKTIV; /* for _ma_test_if_changed() */
/* The following should be safe, even if we compare doubles */
while (!r && gweight)
{
-
if (key.data_length &&
ha_compare_text(aio->charset,
info->last_key.data+1,
@@ -125,9 +133,11 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio)
be skipped (based on subkeys) ?
*/
keybuff+= key.data_length;
- keyinfo= &info->s->ft2_keyinfo;
+ keyinfo= &share->ft2_keyinfo;
key_root= info->cur_row.lastpos;
key.data_length= 0;
+ if (share->lock_key_trees)
+ mysql_rwlock_rdlock(&share->keyinfo[aio->keynr].root_lock);
r= _ma_search_first(info, keyinfo, key_root);
goto do_skip;
}
@@ -163,6 +173,9 @@ static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio)
if (gweight < 0 || doc_cnt > 2000000)
gweight=0;
+ if (share->lock_key_trees)
+ mysql_rwlock_rdlock(&share->keyinfo[aio->keynr].root_lock);
+
if (_ma_test_if_changed(info) == 0)
r= _ma_search_next(info, &info->last_key, SEARCH_BIGGER, key_root);
else
@@ -174,6 +187,8 @@ do_skip:
!r && info->cur_row.lastpos >= info->state->data_file_length)
r= _ma_search_next(info, &info->last_key, SEARCH_BIGGER, key_root);
+ if (share->lock_key_trees)
+ mysql_rwlock_unlock(&share->keyinfo[aio->keynr].root_lock);
}
word->weight=gweight;
diff --git a/storage/maria/ma_info.c b/storage/maria/ma_info.c
index 341ea147785..912ed0984a3 100644
--- a/storage/maria/ma_info.c
+++ b/storage/maria/ma_info.c
@@ -31,7 +31,7 @@ MARIA_RECORD_POS maria_position(MARIA_HA *info)
uint maria_max_key_length()
{
uint tmp= (_ma_max_key_length() - 8 - HA_MAX_KEY_SEG*3);
- return min(HA_MAX_KEY_LENGTH, tmp);
+ return MY_MIN(HA_MAX_KEY_LENGTH, tmp);
}
/* Get information about the table */
diff --git a/storage/maria/ma_key_recover.c b/storage/maria/ma_key_recover.c
index 502ac2b8809..ae9427981ea 100644
--- a/storage/maria/ma_key_recover.c
+++ b/storage/maria/ma_key_recover.c
@@ -506,7 +506,7 @@ my_bool _ma_log_add(MARIA_PAGE *ma_page,
move_length));
DBUG_ASSERT(info->s->now_transactional);
DBUG_ASSERT(move_length <= (int) changed_length);
- DBUG_ASSERT(ma_page->org_size == min(org_page_length, max_page_size));
+ DBUG_ASSERT(ma_page->org_size == MY_MIN(org_page_length, max_page_size));
DBUG_ASSERT(ma_page->size == org_page_length + move_length);
DBUG_ASSERT(offset <= ma_page->org_size);
@@ -618,7 +618,7 @@ my_bool _ma_log_add(MARIA_PAGE *ma_page,
DBUG_ASSERT(current_size <= max_page_size && current_size <= ma_page->size);
if (current_size != ma_page->size && current_size != max_page_size)
{
- uint length= min(ma_page->size, max_page_size) - current_size;
+ uint length= MY_MIN(ma_page->size, max_page_size) - current_size;
uchar *data= ma_page->buff + current_size;
log_pos[0]= KEY_OP_ADD_SUFFIX;
@@ -641,7 +641,7 @@ my_bool _ma_log_add(MARIA_PAGE *ma_page,
overflow!
*/
ma_page->org_size= current_size;
- DBUG_ASSERT(ma_page->org_size == min(ma_page->size, max_page_size));
+ DBUG_ASSERT(ma_page->org_size == MY_MIN(ma_page->size, max_page_size));
if (translog_write_record(&lsn, LOGREC_REDO_INDEX,
info->trn, info,
@@ -663,7 +663,7 @@ void _ma_log_key_changes(MARIA_PAGE *ma_page, LEX_CUSTRING *log_array,
uint *translog_parts)
{
MARIA_SHARE *share= ma_page->info->s;
- int page_length= min(ma_page->size, share->max_index_block_size);
+ int page_length= MY_MIN(ma_page->size, share->max_index_block_size);
uint org_length;
ha_checksum crc;
@@ -1111,7 +1111,7 @@ uint _ma_apply_redo_index(MARIA_HA *info,
uint2korr(header), uint2korr(header+2)));
DBUG_ASSERT(uint2korr(header) == page_length);
#ifndef DBUG_OFF
- new_page_length= min(uint2korr(header+2), max_page_size);
+ new_page_length= MY_MIN(uint2korr(header+2), max_page_size);
#endif
header+= 4;
break;
@@ -1148,7 +1148,7 @@ uint _ma_apply_redo_index(MARIA_HA *info,
from= uint2korr(header);
header+= 2;
/* "from" is a place in the existing page */
- DBUG_ASSERT(max(from, to) < max_page_size);
+ DBUG_ASSERT(MY_MAX(from, to) < max_page_size);
memcpy(buff + to, buff + from, full_length);
}
break;
diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c
index f0ade217341..ae58be55105 100644
--- a/storage/maria/ma_loghandler.c
+++ b/storage/maria/ma_loghandler.c
@@ -4808,7 +4808,7 @@ static my_bool translog_advance_pointer(int pages, uint16 last_page_data)
}
#endif
- min_offset= min(buffer_end_offset, file_end_offset);
+ min_offset= MY_MIN(buffer_end_offset, file_end_offset);
/* TODO: check is it ptr or size enough */
log_descriptor.bc.buffer->size+= min_offset;
log_descriptor.bc.ptr+= min_offset;
@@ -6833,7 +6833,7 @@ translog_variable_length_header(uchar *page, translog_size_t page_offset,
page_rest= (uint16) (TRANSLOG_PAGE_SIZE - (src - page));
base_lsn= buff->lsn;
- body_len= min(page_rest, buff->record_length);
+ body_len= MY_MIN(page_rest, buff->record_length);
}
else
{
@@ -7396,7 +7396,7 @@ translog_size_t translog_read_record(LSN lsn,
data->scanner.fixed_horizon));
if (offset < data->read_header)
{
- uint16 len= min(data->read_header, end) - offset;
+ uint16 len= MY_MIN(data->read_header, end) - offset;
DBUG_PRINT("info",
("enter header offset: %lu length: %lu",
(ulong) offset, (ulong) length));
diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c
index d2ae10ccb18..eb0dc5f9def 100644
--- a/storage/maria/ma_open.c
+++ b/storage/maria/ma_open.c
@@ -78,6 +78,7 @@ MARIA_HA *_ma_test_if_reopen(const char *filename)
mode Mode of table (O_RDONLY | O_RDWR)
data_file Filedescriptor of data file to use < 0 if one should open
open it.
+ internal_table <> 0 if this is an internal temporary table
RETURN
# Maria handler
@@ -86,7 +87,8 @@ MARIA_HA *_ma_test_if_reopen(const char *filename)
static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
- int mode, File data_file)
+ int mode, File data_file,
+ uint internal_table)
{
int save_errno;
uint errpos;
@@ -159,7 +161,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
/* The following should be big enough for all pinning purposes */
if (my_init_dynamic_array(&info.pinned_pages,
sizeof(MARIA_PINNED_PAGE),
- max(share->base.blobs*2 + 4,
+ MY_MAX(share->base.blobs*2 + 4,
MARIA_MAX_TREE_LEVELS*3), 16, MYF(0)))
goto err;
@@ -207,9 +209,17 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, const char *name,
if (share->options & HA_OPTION_TMP_TABLE)
m_info->lock.type= TL_WRITE;
- m_info->open_list.data=(void*) m_info;
- maria_open_list=list_add(maria_open_list,&m_info->open_list);
-
+ if (!internal_table)
+ {
+ m_info->open_list.data=(void*) m_info;
+ maria_open_list=list_add(maria_open_list,&m_info->open_list);
+ }
+ else
+ {
+ /* We don't need to mark internal temporary tables as changed on disk */
+ share->internal_table= 1;
+ share->global_changed= 1;
+ }
DBUG_RETURN(m_info);
err:
@@ -243,7 +253,7 @@ MARIA_HA *maria_clone(MARIA_SHARE *share, int mode)
mysql_mutex_lock(&THR_LOCK_maria);
new_info= maria_clone_internal(share, NullS, mode,
share->data_file_type == BLOCK_RECORD ?
- share->bitmap.file.file : -1);
+ share->bitmap.file.file : -1, 0);
mysql_mutex_unlock(&THR_LOCK_maria);
return new_info;
}
@@ -263,6 +273,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
int kfile,open_mode,save_errno;
uint i,j,len,errpos,head_length,base_pos,keys, realpath_err,
key_parts,unique_key_parts,fulltext_keys,uniques;
+ uint internal_table= test(open_flags & HA_OPEN_INTERNAL_TABLE);
size_t info_length;
char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN],
data_name[FN_REFLEN];
@@ -293,10 +304,11 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
DBUG_RETURN(0);
}
- mysql_mutex_lock(&THR_LOCK_maria);
old_info= 0;
+ if (!internal_table)
+ mysql_mutex_lock(&THR_LOCK_maria);
if ((open_flags & HA_OPEN_COPY) ||
- !(old_info=_ma_test_if_reopen(name_buff)))
+ (internal_table || !(old_info=_ma_test_if_reopen(name_buff))))
{
share= &share_buff;
bzero((uchar*) &share_buff,sizeof(share_buff));
@@ -600,7 +612,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
{
/* Packed key, ensure we don't get overflow in underflow() */
keyinfo->underflow_block_length=
- max((int) (share->max_index_block_size - keyinfo->maxlength * 3),
+ MY_MAX((int) (share->max_index_block_size - keyinfo->maxlength * 3),
(int) (share->keypage_header + share->base.key_reflength));
set_if_smaller(keyinfo->underflow_block_length,
keyinfo->block_length/3);
@@ -788,7 +800,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
/* Need some extra bytes for decode_bytes */
share->base.extra_rec_buff_size+= 7;
}
- share->base.default_rec_buff_size= max(share->base.pack_reclength +
+ share->base.default_rec_buff_size= MY_MAX(share->base.pack_reclength +
share->base.extra_rec_buff_size,
share->base.max_key_length);
@@ -991,14 +1003,16 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
data_file= share->bitmap.file.file; /* Only opened once */
}
- if (!(m_info= maria_clone_internal(share, name, mode, data_file)))
+ if (!(m_info= maria_clone_internal(share, name, mode, data_file,
+ internal_table)))
goto err;
if (maria_is_crashed(m_info))
DBUG_PRINT("warning", ("table is crashed: changed: %u",
share->state.changed));
- mysql_mutex_unlock(&THR_LOCK_maria);
+ if (!internal_table)
+ mysql_mutex_unlock(&THR_LOCK_maria);
m_info->open_flags= open_flags;
DBUG_PRINT("exit", ("table: %p name: %s",m_info, name));
@@ -1037,7 +1051,8 @@ err:
default:
break;
}
- mysql_mutex_unlock(&THR_LOCK_maria);
+ if (!internal_table)
+ mysql_mutex_unlock(&THR_LOCK_maria);
my_errno= save_errno;
DBUG_RETURN (NULL);
} /* maria_open */
diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c
index c14e69414b5..9b06c0d4f78 100644
--- a/storage/maria/ma_packrec.c
+++ b/storage/maria/ma_packrec.c
@@ -718,7 +718,7 @@ static uint find_longest_bitstream(uint16 *table, uint16 *end)
return OFFSET_TABLE_SIZE;
}
length2= find_longest_bitstream(next, end) + 1;
- length=max(length,length2);
+ length=MY_MAX(length,length2);
}
return length;
}
@@ -1447,7 +1447,7 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff,
info->filepos=filepos+head_length;
if (file > 0)
{
- info->offset=min(info->rec_len, ref_length - head_length);
+ info->offset=MY_MIN(info->rec_len, ref_length - head_length);
memcpy(*rec_buff_p, header + head_length, info->offset);
}
return 0;
diff --git a/storage/maria/ma_recovery.c b/storage/maria/ma_recovery.c
index bd2bde1c89a..3645bf69960 100644
--- a/storage/maria/ma_recovery.c
+++ b/storage/maria/ma_recovery.c
@@ -3679,7 +3679,7 @@ static void print_redo_phase_progress(TRANSLOG_ADDRESS addr)
cur_offset= LSN_OFFSET(addr);
local_remainder= (cur_logno == end_logno) ? (end_offset - cur_offset) :
(((longlong)log_file_size) - cur_offset +
- max(end_logno - cur_logno - 1, 0) * ((longlong)log_file_size) +
+ MY_MAX(end_logno - cur_logno - 1, 0) * ((longlong)log_file_size) +
end_offset);
if (initial_remainder == (ulonglong)(-1))
initial_remainder= local_remainder;
diff --git a/storage/maria/ma_rt_mbr.c b/storage/maria/ma_rt_mbr.c
index b3e2b0ceab8..496ace2a84f 100644
--- a/storage/maria/ma_rt_mbr.c
+++ b/storage/maria/ma_rt_mbr.c
@@ -329,8 +329,8 @@ int maria_rtree_d_mbr(const HA_KEYSEG *keyseg, const uchar *a,
bmin= korr_func(b); \
amax= korr_func(a+len); \
bmax= korr_func(b+len); \
- amin= min(amin, bmin); \
- amax= max(amax, bmax); \
+ amin= MY_MIN(amin, bmin); \
+ amax= MY_MAX(amax, bmax); \
store_func(c, amin); \
store_func(c+len, amax); \
}
@@ -342,8 +342,8 @@ int maria_rtree_d_mbr(const HA_KEYSEG *keyseg, const uchar *a,
get_func(bmin, b); \
get_func(amax, a+len); \
get_func(bmax, b+len); \
- amin= min(amin, bmin); \
- amax= max(amax, bmax); \
+ amin= MY_MIN(amin, bmin); \
+ amax= MY_MAX(amax, bmax); \
store_func(c, amin); \
store_func(c+len, amax); \
}
@@ -422,8 +422,8 @@ int maria_rtree_combine_rect(const HA_KEYSEG *keyseg, const uchar* a,
bmin= korr_func(b); \
amax= korr_func(a+len); \
bmax= korr_func(b+len); \
- amin= max(amin, bmin); \
- amax= min(amax, bmax); \
+ amin= MY_MAX(amin, bmin); \
+ amax= MY_MIN(amax, bmax); \
if (amin >= amax) \
return 0; \
res *= amax - amin; \
@@ -436,8 +436,8 @@ int maria_rtree_combine_rect(const HA_KEYSEG *keyseg, const uchar* a,
get_func(bmin, b); \
get_func(amax, a+len); \
get_func(bmax, b+len); \
- amin= max(amin, bmin); \
- amax= min(amax, bmax); \
+ amin= MY_MAX(amin, bmin); \
+ amax= MY_MIN(amax, bmax); \
if (amin >= amax) \
return 0; \
res *= amax - amin; \
@@ -513,7 +513,7 @@ double maria_rtree_overlapping_area(HA_KEYSEG *keyseg, uchar* a, uchar* b,
amax= korr_func(a+len); \
bmax= korr_func(b+len); \
a_area *= (((double)amax) - ((double)amin)); \
- loc_ab_area *= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ loc_ab_area *= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
#define RT_AREA_INC_GET(type, get_func, len)\
@@ -524,7 +524,7 @@ double maria_rtree_overlapping_area(HA_KEYSEG *keyseg, uchar* a, uchar* b,
get_func(amax, a+len); \
get_func(bmax, b+len); \
a_area *= (((double)amax) - ((double)amin)); \
- loc_ab_area *= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ loc_ab_area *= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
/*
@@ -612,7 +612,7 @@ safe_end:
amax= korr_func(a+len); \
bmax= korr_func(b+len); \
a_perim+= (((double)amax) - ((double)amin)); \
- *ab_perim+= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ *ab_perim+= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
#define RT_PERIM_INC_GET(type, get_func, len)\
@@ -623,7 +623,7 @@ safe_end:
get_func(amax, a+len); \
get_func(bmax, b+len); \
a_perim+= (((double)amax) - ((double)amin)); \
- *ab_perim+= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ *ab_perim+= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
/*
diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c
index 65035e77c76..433862ea54a 100644
--- a/storage/maria/ma_sort.c
+++ b/storage/maria/ma_sort.c
@@ -136,7 +136,7 @@ int _ma_create_index_by_sort(MARIA_SORT_PARAM *info, my_bool no_messages,
sort_keys= (uchar **) NULL; error= 1;
maxbuffer=1;
- memavl=max(sortbuff_size,MIN_SORT_MEMORY);
+ memavl=MY_MAX(sortbuff_size,MIN_SORT_MEMORY);
records= info->sort_info->max_records;
sort_length= info->key_length;
LINT_INIT(keys);
@@ -157,7 +157,7 @@ int _ma_create_index_by_sort(MARIA_SORT_PARAM *info, my_bool no_messages,
will be allocated when needed.
*/
keys= memavl / (sort_length+sizeof(char*));
- maxbuffer= (uint) min((ulonglong) 1000, (records / keys)+1);
+ maxbuffer= (uint) MY_MIN((ulonglong) 1000, (records / keys)+1);
}
else
{
@@ -189,7 +189,7 @@ int _ma_create_index_by_sort(MARIA_SORT_PARAM *info, my_bool no_messages,
HA_FT_MAXBYTELEN, MYF(0))))
{
if (my_init_dynamic_array(&buffpek, sizeof(BUFFPEK), maxbuffer,
- min(maxbuffer/2, 1000), MYF(0)))
+ MY_MIN(maxbuffer/2, 1000), MYF(0)))
{
my_free(sort_keys);
sort_keys= 0;
@@ -399,7 +399,7 @@ pthread_handler_t _ma_thr_find_all_keys(void *arg)
bzero((char*) &sort_param->unique, sizeof(sort_param->unique));
sortbuff_size= sort_param->sortbuff_size;
- memavl= max(sortbuff_size, MIN_SORT_MEMORY);
+ memavl= MY_MAX(sortbuff_size, MIN_SORT_MEMORY);
idx= (ha_keys) sort_param->sort_info->max_records;
sort_length= sort_param->key_length;
maxbuffer= 1;
@@ -418,7 +418,7 @@ pthread_handler_t _ma_thr_find_all_keys(void *arg)
will be allocated when needed.
*/
keys= memavl / (sort_length+sizeof(char*));
- maxbuffer= (uint) min((ulonglong) 1000, (idx / keys)+1);
+ maxbuffer= (uint) MY_MIN((ulonglong) 1000, (idx / keys)+1);
}
else
{
@@ -445,7 +445,7 @@ pthread_handler_t _ma_thr_find_all_keys(void *arg)
HA_FT_MAXBYTELEN : 0), MYF(0))))
{
if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
- maxbuffer, min(maxbuffer/2, 1000), MYF(0)))
+ maxbuffer, MY_MIN(maxbuffer/2, 1000), MYF(0)))
{
my_free(sort_keys);
sort_keys= (uchar **) NULL; /* for err: label */
@@ -929,7 +929,7 @@ static my_off_t read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
register ha_keys count;
my_off_t length;
- if ((count= (ha_keys) min((ha_rows) buffpek->max_keys,buffpek->count)))
+ if ((count= (ha_keys) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
if (mysql_file_pread(fromfile->file, (uchar*) buffpek->base,
(length= sort_length * count),
@@ -951,7 +951,7 @@ static my_off_t read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek,
uint idx;
uchar *buffp;
- if ((count= (ha_keys) min((ha_rows) buffpek->max_keys,buffpek->count)))
+ if ((count= (ha_keys) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
buffp= buffpek->base;
diff --git a/storage/maria/ma_test1.c b/storage/maria/ma_test1.c
index 5a655d4412a..001717932f9 100644
--- a/storage/maria/ma_test1.c
+++ b/storage/maria/ma_test1.c
@@ -632,7 +632,7 @@ static void create_record(uchar *record,uint rownr)
uint tmp;
uchar *ptr;;
sprintf((char*) blob_record,"... row: %d", rownr);
- strappend((char*) blob_record,max(MAX_REC_LENGTH-rownr,10),' ');
+ strappend((char*) blob_record,MY_MAX(MAX_REC_LENGTH-rownr,10),' ');
tmp=strlen((char*) blob_record);
int4store(pos,tmp);
ptr=blob_record;
diff --git a/storage/maria/ma_test2.c b/storage/maria/ma_test2.c
index a3b7a2a9e98..7b4f29cf972 100644
--- a/storage/maria/ma_test2.c
+++ b/storage/maria/ma_test2.c
@@ -698,7 +698,7 @@ int main(int argc, char *argv[])
goto err2;
}
- for (i=min(2,keys) ; i-- > 0 ;)
+ for (i=MY_MIN(2,keys) ; i-- > 0 ;)
{
if (maria_rsame(file,read_record2,(int) i)) goto err;
if (bcmp(read_record,read_record2,reclength) != 0)
diff --git a/storage/maria/ma_write.c b/storage/maria/ma_write.c
index ecae92cb455..629b774706e 100644
--- a/storage/maria/ma_write.c
+++ b/storage/maria/ma_write.c
@@ -934,7 +934,7 @@ ChangeSet@1.2562, 2008-04-09 07:41:40+02:00, serg@janus.mylan +9 -0
&s_temp));
}
DBUG_RETURN(_ma_split_page(info, key, anc_page,
- min(org_anc_length,
+ MY_MIN(org_anc_length,
info->s->max_index_block_size),
key_pos, s_temp.changed_length, t_length,
key_buff, insert_last));
@@ -2076,7 +2076,7 @@ static my_bool _ma_log_split(MARIA_PAGE *ma_page,
Handle case when split happened directly after the newly inserted key.
*/
max_key_length= new_length - offset;
- extra_length= min(key_length, max_key_length);
+ extra_length= MY_MIN(key_length, max_key_length);
if (offset + move_length > new_length)
{
/* This is true when move_length includes changes for next packed key */
diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h
index 0180356056b..b26a39a4d38 100644
--- a/storage/maria/maria_def.h
+++ b/storage/maria/maria_def.h
@@ -466,6 +466,7 @@ typedef struct st_maria_share
my_bool changed, /* If changed since lock */
global_changed, /* If changed since open */
not_flushed;
+ my_bool internal_table; /* Internal tmp table */
my_bool lock_key_trees; /* If we have to lock trees on read */
my_bool non_transactional_concurrent_insert;
my_bool delay_key_write;
diff --git a/storage/maria/maria_pack.c b/storage/maria/maria_pack.c
index 788bc5c2ad3..66085a8981b 100644
--- a/storage/maria/maria_pack.c
+++ b/storage/maria/maria_pack.c
@@ -1243,7 +1243,7 @@ static void check_counts(HUFF_COUNTS *huff_counts, uint trees,
{
if (huff_counts->field_length > 2 &&
huff_counts->empty_fields + (records - huff_counts->empty_fields)*
- (1+max_bit(max(huff_counts->max_pre_space,
+ (1+max_bit(MY_MAX(huff_counts->max_pre_space,
huff_counts->max_end_space))) <
records * max_bit(huff_counts->field_length))
{
@@ -3021,7 +3021,7 @@ static int save_state_mrg(File file,PACK_MRG_INFO *mrg,my_off_t new_length,
if (mrg->src_file_has_indexes_disabled)
{
isam_file->s->state.state.key_file_length=
- max(isam_file->s->state.state.key_file_length, new_length);
+ MY_MAX(isam_file->s->state.state.key_file_length, new_length);
}
state.dellink= HA_OFFSET_ERROR;
state.version=(ulong) time((time_t*) 0);
diff --git a/storage/maria/trnman.c b/storage/maria/trnman.c
index 4979b806c5e..d2d14092cac 100644
--- a/storage/maria/trnman.c
+++ b/storage/maria/trnman.c
@@ -875,7 +875,7 @@ TrID trnman_get_min_safe_trid()
{
TrID trid;
mysql_mutex_lock(&LOCK_trn_list);
- trid= min(active_list_min.next->min_read_from,
+ trid= MY_MIN(active_list_min.next->min_read_from,
global_trid_generator);
mysql_mutex_unlock(&LOCK_trn_list);
return trid;
diff --git a/storage/maria/unittest/ma_test_all-t b/storage/maria/unittest/ma_test_all-t
index e66d269ab93..18b26a7bd45 100755
--- a/storage/maria/unittest/ma_test_all-t
+++ b/storage/maria/unittest/ma_test_all-t
@@ -650,6 +650,8 @@ sub ok
{
exit 1;
}
+ # Unlink all files so that we can continue on error
+ unlink_all_possible_tmp_files();
return 0;
}
@@ -702,7 +704,7 @@ sub unlink_all_possible_tmp_files()
unlink_log_files();
# Unlink tmp files that may have been created when testing the test programs
- unlink <$full_tmpdir/*.TMD $full_tmpdir/aria_read_log_test1.txt $full_tmpdir/test1*.MA? $full_tmpdir/ma_test_recovery.output aria_log_control aria_log.00000001 aria_log.00000002 aria_logtest1.MA? test1.MA? test2.MA? test3.MA?>;
+ unlink <$full_tmpdir/*.TMD $full_tmpdir/aria_read_log_test1.txt $full_tmpdir/test1*.MA? $full_tmpdir/ma_test_recovery.output aria_log_control aria_log.00000001 aria_log.00000002 aria_logtest1.MA? test1.MA? test2.MA? test3.MA? *.TMD>;
}
####
diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc
index 756d52a4890..3c69bb65d53 100644
--- a/storage/myisam/ha_myisam.cc
+++ b/storage/myisam/ha_myisam.cc
@@ -248,8 +248,8 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
pos->algorithm;
keydef[i].block_length= pos->block_size;
keydef[i].seg= keyseg;
- keydef[i].keysegs= pos->key_parts;
- for (j= 0; j < pos->key_parts; j++)
+ keydef[i].keysegs= pos->user_defined_key_parts;
+ for (j= 0; j < pos->user_defined_key_parts; j++)
{
Field *field= pos->key_part[j].field;
type= field->key_type();
@@ -301,7 +301,7 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
keydef[i].seg[j].flag|= HA_BLOB_PART;
/* save number of bytes used to pack length */
keydef[i].seg[j].bit_start= (uint) (field->pack_length() -
- share->blob_ptr_size);
+ portable_sizeof_char_ptr);
}
else if (field->type() == MYSQL_TYPE_BIT)
{
@@ -311,7 +311,7 @@ int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
(uchar*) table_arg->record[0]);
}
}
- keyseg+= pos->key_parts;
+ keyseg+= pos->user_defined_key_parts;
}
if (table_arg->found_next_number_field)
keydef[share->next_number_index].flag|= HA_AUTO_KEY;
@@ -1132,8 +1132,8 @@ int ha_myisam::repair(THD *thd, HA_CHECK &param, bool do_optimize)
}
if (error && file->create_unique_index_by_sort &&
share->state.dupp_key != MAX_KEY)
- print_keydup_error(share->state.dupp_key,
- ER(ER_DUP_ENTRY_WITH_KEY_NAME), MYF(0));
+ print_keydup_error(table, &table->key_info[share->state.dupp_key],
+ MYF(0));
}
else
{
@@ -1523,8 +1523,8 @@ void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
{
DBUG_ENTER("ha_myisam::start_bulk_insert");
THD *thd= current_thd;
- ulong size= min(thd->variables.read_buff_size,
- (ulong) (table->s->avg_row_length*rows));
+ ulong size= MY_MIN(thd->variables.read_buff_size,
+ (ulong) (table->s->avg_row_length*rows));
DBUG_PRINT("info",("start_bulk_insert: rows %lu size %lu",
(ulong) rows, size));
@@ -1535,37 +1535,34 @@ void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
can_enable_indexes= mi_is_all_keys_active(file->s->state.key_map,
file->s->base.keys);
- if (!(specialflag & SPECIAL_SAFE_MODE))
+ /*
+ Only disable old index if the table was empty and we are inserting
+ a lot of rows.
+ Note that in end_bulk_insert() we may truncate the table if
+ enable_indexes() failed, thus it's essential that indexes are
+ disabled ONLY for an empty table.
+ */
+ if (file->state->records == 0 && can_enable_indexes &&
+ (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES))
{
- /*
- Only disable old index if the table was empty and we are inserting
- a lot of rows.
- Note that in end_bulk_insert() we may truncate the table if
- enable_indexes() failed, thus it's essential that indexes are
- disabled ONLY for an empty table.
- */
- if (file->state->records == 0 && can_enable_indexes &&
- (!rows || rows >= MI_MIN_ROWS_TO_DISABLE_INDEXES))
+ if (file->open_flag & HA_OPEN_INTERNAL_TABLE)
{
- if (file->open_flag & HA_OPEN_INTERNAL_TABLE)
- {
- file->update|= HA_STATE_CHANGED;
- mi_clear_all_keys_active(file->s->state.key_map);
- }
- else
- {
- my_bool all_keys= test(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
- mi_disable_indexes_for_rebuild(file, rows, all_keys);
- }
+ file->update|= HA_STATE_CHANGED;
+ mi_clear_all_keys_active(file->s->state.key_map);
}
else
+ {
+ my_bool all_keys= test(flags & HA_CREATE_UNIQUE_INDEX_BY_SORT);
+ mi_disable_indexes_for_rebuild(file, rows, all_keys);
+ }
+ }
+ else
if (!file->bulk_insert &&
(!rows || rows >= MI_MIN_ROWS_TO_USE_BULK_INSERT))
{
mi_init_bulk_insert(file, (size_t) thd->variables.bulk_insert_buff_size,
rows);
}
- }
DBUG_VOID_RETURN;
}
@@ -1843,15 +1840,14 @@ int ha_myisam::info(uint flag)
number of records in the buffer results in a different number of buffer
refills and in a different order of records in the result set.
*/
- stats.mrr_length_per_rec= misam_info.reflength + 8; // 8=max(sizeof(void *))
+ stats.mrr_length_per_rec= misam_info.reflength + 8; // 8=MY_MAX(sizeof(void *))
ref_length= misam_info.reflength;
share->db_options_in_use= misam_info.options;
stats.block_size= myisam_block_size; /* record block size */
- /* Update share */
- if (share->tmp_table == NO_TMP_TABLE)
- mysql_mutex_lock(&share->LOCK_ha_data);
+ if (table_share->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_lock(&table_share->LOCK_share);
share->keys_in_use.set_prefix(share->keys);
share->keys_in_use.intersect_extended(misam_info.key_map);
share->keys_for_keyread.intersect(share->keys_in_use);
@@ -1860,8 +1856,8 @@ int ha_myisam::info(uint flag)
memcpy((char*) table->key_info[0].rec_per_key,
(char*) misam_info.rec_per_key,
sizeof(table->key_info[0].rec_per_key[0])*share->key_parts);
- if (share->tmp_table == NO_TMP_TABLE)
- mysql_mutex_unlock(&share->LOCK_ha_data);
+ if (table_share->tmp_table == NO_TMP_TABLE)
+ mysql_mutex_unlock(&table_share->LOCK_share);
/*
Set data_file_name and index_file_name to point at the symlink value
@@ -1893,8 +1889,6 @@ int ha_myisam::info(uint flag)
int ha_myisam::extra(enum ha_extra_function operation)
{
- if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_KEYREAD)
- return 0;
if (operation == HA_EXTRA_MMAP && !opt_myisam_use_mmap)
return 0;
return mi_extra(file, operation, 0);
@@ -1912,8 +1906,6 @@ int ha_myisam::reset(void)
int ha_myisam::extra_opt(enum ha_extra_function operation, ulong cache_size)
{
- if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_WRITE_CACHE)
- return 0;
return mi_extra(file, operation, (void*) &cache_size);
}
@@ -2288,7 +2280,7 @@ Item *ha_myisam::idx_cond_push(uint keyno_arg, Item* idx_cond_arg)
*/
const KEY *key= &table_share->key_info[keyno_arg];
- for (uint k= 0; k < key->key_parts; ++k)
+ for (uint k= 0; k < key->user_defined_key_parts; ++k)
{
const KEY_PART_INFO *key_part= &key->key_part[k];
if (key_part->key_part_flag & HA_BLOB_PART)
diff --git a/storage/myisam/mi_cache.c b/storage/myisam/mi_cache.c
index 6e9feaefb2d..3477e67eae5 100644
--- a/storage/myisam/mi_cache.c
+++ b/storage/myisam/mi_cache.c
@@ -62,7 +62,7 @@ int _mi_read_cache(IO_CACHE *info, uchar *buff, my_off_t pos, uint length,
(my_off_t) (info->read_end - info->request_pos))
{
in_buff_pos=info->request_pos+(uint) offset;
- in_buff_length= min(length, (size_t) (info->read_end-in_buff_pos));
+ in_buff_length= MY_MIN(length, (size_t) (info->read_end-in_buff_pos));
memcpy(buff,info->request_pos+(uint) offset,(size_t) in_buff_length);
if (!(length-=in_buff_length))
DBUG_RETURN(0);
diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c
index 97ea1d17c26..4d23457acc0 100644
--- a/storage/myisam/mi_check.c
+++ b/storage/myisam/mi_check.c
@@ -1946,7 +1946,13 @@ int mi_sort_index(HA_CHECK *param, register MI_INFO *info, char * name)
key++,keyinfo++)
{
if (! mi_is_key_active(info->s->state.key_map, key))
+ {
+ /* Since the key is not active, this should not be read, but we
+ initialize it anyway to silence a Valgrind warn when passing that
+ chunk of memory to pwrite(). */
+ index_pos[key]= HA_OFFSET_ERROR;
continue;
+ }
if (share->state.key_root[key] != HA_OFFSET_ERROR)
{
@@ -2145,7 +2151,7 @@ int filecopy(HA_CHECK *param, File to,File from,my_off_t start,
ulong buff_length;
DBUG_ENTER("filecopy");
- buff_length=(ulong) min(param->write_buffer_length,length);
+ buff_length=(ulong) MY_MIN(param->write_buffer_length,length);
if (!(buff=my_malloc(buff_length,MYF(0))))
{
buff=tmp_buff; buff_length=IO_SIZE;
@@ -2303,7 +2309,7 @@ int mi_repair_by_sort(HA_CHECK *param, register MI_INFO *info,
MYF(param->malloc_flags));
if (share->data_file_type == DYNAMIC_RECORD)
- length=max(share->base.min_pack_length+1,share->base.min_block_length);
+ length=MY_MAX(share->base.min_pack_length+1,share->base.min_block_length);
else if (share->data_file_type == COMPRESSED_RECORD)
length=share->base.min_block_length;
else
@@ -2392,7 +2398,7 @@ int mi_repair_by_sort(HA_CHECK *param, register MI_INFO *info,
(see _create_index_by_sort)
*/
sort_info.max_records= 10 *
- max(param->sort_buffer_length, MIN_SORT_BUFFER) /
+ MY_MAX(param->sort_buffer_length, MIN_SORT_BUFFER) /
sort_param.key_length;
}
@@ -2759,7 +2765,7 @@ int mi_repair_parallel(HA_CHECK *param, register MI_INFO *info,
mysql_file_seek(param->read_cache.file, 0L, MY_SEEK_END, MYF(0));
if (share->data_file_type == DYNAMIC_RECORD)
- rec_length=max(share->base.min_pack_length+1,share->base.min_block_length);
+ rec_length=MY_MAX(share->base.min_pack_length+1,share->base.min_block_length);
else if (share->data_file_type == COMPRESSED_RECORD)
rec_length=share->base.min_block_length;
else
@@ -3984,7 +3990,7 @@ word_init_ft_buf:
ft_buf->buf=ft_buf->lastkey+a_len;
/*
32 is just a safety margin here
- (at least max(val_len, sizeof(nod_flag)) should be there).
+ (at least MY_MAX(val_len, sizeof(nod_flag)) should be there).
May be better performance could be achieved if we'd put
(sort_info->keyinfo->block_length-32)/XXX
instead.
diff --git a/storage/myisam/mi_close.c b/storage/myisam/mi_close.c
index e58c2e0f189..f0a82bcef04 100644
--- a/storage/myisam/mi_close.c
+++ b/storage/myisam/mi_close.c
@@ -31,7 +31,8 @@ int mi_close(register MI_INFO *info)
(long) info, (uint) share->reopen,
(uint) share->tot_locks));
- mysql_mutex_lock(&THR_LOCK_myisam);
+ if (info->open_list.data)
+ mysql_mutex_lock(&THR_LOCK_myisam);
if (info->lock_type == F_EXTRA_LCK)
info->lock_type=F_UNLCK; /* HA_EXTRA_NO_USER_CHANGE */
@@ -54,7 +55,8 @@ int mi_close(register MI_INFO *info)
info->opt_flag&= ~(READ_CACHE_USED | WRITE_CACHE_USED);
}
flag= !--share->reopen;
- myisam_open_list=list_delete(myisam_open_list,&info->open_list);
+ if (info->open_list.data)
+ myisam_open_list= list_delete(myisam_open_list, &info->open_list);
mysql_mutex_unlock(&share->intern_lock);
my_free(mi_get_rec_buff_ptr(info, info->rec_buff));
@@ -111,7 +113,8 @@ int mi_close(register MI_INFO *info)
}
my_free(info->s);
}
- mysql_mutex_unlock(&THR_LOCK_myisam);
+ if (info->open_list.data)
+ mysql_mutex_unlock(&THR_LOCK_myisam);
if (info->ftparser_param)
{
my_free(info->ftparser_param);
diff --git a/storage/myisam/mi_create.c b/storage/myisam/mi_create.c
index 0792d90cfaa..4de218864de 100644
--- a/storage/myisam/mi_create.c
+++ b/storage/myisam/mi_create.c
@@ -44,6 +44,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
base_pos,long_varchar_count,varchar_length,
max_key_block_length,unique_key_parts,fulltext_keys,offset;
uint aligned_key_start, block_length, res;
+ uint internal_table= flags & HA_CREATE_INTERNAL_TABLE;
ulong reclength, real_reclength,min_pack_length;
char filename[FN_REFLEN],linkname[FN_REFLEN], *linkname_ptr;
ulong pack_reclength;
@@ -447,8 +448,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
block_length= (keydef->block_length ?
my_round_up_to_next_power(keydef->block_length) :
myisam_block_size);
- block_length= max(block_length, MI_MIN_KEY_BLOCK_LENGTH);
- block_length= min(block_length, MI_MAX_KEY_BLOCK_LENGTH);
+ block_length= MY_MAX(block_length, MI_MIN_KEY_BLOCK_LENGTH);
+ block_length= MY_MIN(block_length, MI_MAX_KEY_BLOCK_LENGTH);
keydef->block_length= (uint16) MI_BLOCK_SIZE(length-real_length_diff,
pointer,MI_MAX_KEYPTR_SIZE,
@@ -537,7 +538,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
got from MYI file header (see also myisampack.c:save_state)
*/
share.base.key_reflength=
- mi_get_pointer_length(max(ci->key_file_length,tmp),3);
+ mi_get_pointer_length(MY_MAX(ci->key_file_length,tmp),3);
share.base.keys= share.state.header.keys= keys;
share.state.header.uniques= uniques;
share.state.header.fulltext_keys= fulltext_keys;
@@ -570,12 +571,13 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
share.base.min_block_length=
(share.base.pack_reclength+3 < MI_EXTEND_BLOCK_LENGTH &&
! share.base.blobs) ?
- max(share.base.pack_reclength,MI_MIN_BLOCK_LENGTH) :
+ MY_MAX(share.base.pack_reclength,MI_MIN_BLOCK_LENGTH) :
MI_EXTEND_BLOCK_LENGTH;
if (! (flags & HA_DONT_TOUCH_DATA))
share.state.create_time= time((time_t*) 0);
- mysql_mutex_lock(&THR_LOCK_myisam);
+ if (!internal_table)
+ mysql_mutex_lock(&THR_LOCK_myisam);
/*
NOTE: For test_if_reopen() we need a real path name. Hence we need
@@ -632,7 +634,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
NOTE: The filename is compared against unique_file_name of every
open table. Hence we need a real path here.
*/
- if (test_if_reopen(filename))
+ if (!internal_table && test_if_reopen(filename))
{
my_printf_error(HA_ERR_TABLE_EXIST, "MyISAM table '%s' is in use "
"(most likely by a MERGE table). Try FLUSH TABLES.",
@@ -821,7 +823,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
goto err;
}
errpos=0;
- mysql_mutex_unlock(&THR_LOCK_myisam);
+ if (!internal_table)
+ mysql_mutex_unlock(&THR_LOCK_myisam);
res= 0;
if (mysql_file_close(file, MYF(0)))
res= my_errno;
@@ -829,7 +832,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs,
DBUG_RETURN(res);
err:
- mysql_mutex_unlock(&THR_LOCK_myisam);
+ if (!internal_table)
+ mysql_mutex_unlock(&THR_LOCK_myisam);
err_no_lock:
save_errno=my_errno;
diff --git a/storage/myisam/mi_dynrec.c b/storage/myisam/mi_dynrec.c
index 009a2affe0c..021809ed892 100644
--- a/storage/myisam/mi_dynrec.c
+++ b/storage/myisam/mi_dynrec.c
@@ -118,7 +118,8 @@ int mi_munmap_file(MI_INFO *info)
{
int ret;
DBUG_ENTER("mi_unmap_file");
- if ((ret= my_munmap(info->s->file_map, (size_t) info->s->mmaped_length)))
+ if ((ret= my_munmap((void*) info->s->file_map,
+ (size_t) info->s->mmaped_length)))
DBUG_RETURN(ret);
info->s->file_read= mi_nommap_pread;
info->s->file_write= mi_nommap_pwrite;
@@ -865,7 +866,7 @@ static int update_dynamic_record(MI_INFO *info, my_off_t filepos, uchar *record,
uint tmp=MY_ALIGN(reclength - length + 3 +
test(reclength >= 65520L),MI_DYN_ALIGN_SIZE);
/* Don't create a block bigger than MI_MAX_BLOCK_LENGTH */
- tmp= min(length+tmp, MI_MAX_BLOCK_LENGTH)-length;
+ tmp= MY_MIN(length+tmp, MI_MAX_BLOCK_LENGTH)-length;
/* Check if we can extend this block */
if (block_info.filepos + block_info.block_len ==
info->state->data_file_length &&
@@ -1780,15 +1781,21 @@ int _mi_read_rnd_dynamic_record(MI_INFO *info, uchar *buf,
if (b_type & (BLOCK_DELETED | BLOCK_ERROR | BLOCK_SYNC_ERROR |
BLOCK_FATAL_ERROR))
{
- if ((b_type & (BLOCK_DELETED | BLOCK_SYNC_ERROR))
- && skip_deleted_blocks)
- {
- filepos=block_info.filepos+block_info.block_len;
- block_info.second_read=0;
- continue; /* Search after next_record */
- }
- if (b_type & (BLOCK_DELETED | BLOCK_SYNC_ERROR))
+ if ((b_type & (BLOCK_DELETED | BLOCK_SYNC_ERROR)))
{
+ if (skip_deleted_blocks)
+ {
+ filepos=block_info.filepos+block_info.block_len;
+ block_info.second_read=0;
+ continue; /* Search after next_record */
+ }
+ /*
+ If we're not on the first block of a record and
+ the block is marked as deleted or out of sync,
+ something's gone wrong: the record is damaged.
+ */
+ if (block_of_record != 0)
+ goto panic;
my_errno=HA_ERR_RECORD_DELETED;
info->lastpos=block_info.filepos;
info->nextpos=block_info.filepos+block_info.block_len;
diff --git a/storage/myisam/mi_extra.c b/storage/myisam/mi_extra.c
index dab1f66ed6d..f57fba5c2c5 100644
--- a/storage/myisam/mi_extra.c
+++ b/storage/myisam/mi_extra.c
@@ -100,7 +100,7 @@ int mi_extra(MI_INFO *info, enum ha_extra_function function, void *extra_arg)
cache_size= (extra_arg ? *(ulong*) extra_arg :
my_default_record_cache_size);
if (!(init_io_cache(&info->rec_cache,info->dfile,
- (uint) min(info->state->data_file_length+1,
+ (uint) MY_MIN(info->state->data_file_length+1,
cache_size),
READ_CACHE,0L,(pbool) (info->lock_type != F_UNLCK),
MYF(share->write_flag & MY_WAIT_IF_FULL))))
diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c
index 22225303bae..f8213b1a3a5 100644
--- a/storage/myisam/mi_open.c
+++ b/storage/myisam/mi_open.c
@@ -14,7 +14,18 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
-/* open a isam-database */
+/*
+ open a isam-database
+
+ Internal temporary tables
+ -------------------------
+ Since only single instance of internal temporary table is required by
+ optimizer, such tables are not registered on myisam_open_list. In effect
+ it means (a) THR_LOCK_myisam is not held while such table is being created,
+ opened or closed; (b) no iteration through myisam_open_list while opening a
+ table. This optimization gives nice scalability benefit in concurrent
+ environment. MEMORY internal temporary tables are optimized similarly.
+*/
#include "fulltext.h"
#include "sp_defs.h"
@@ -74,10 +85,11 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
int lock_error,kfile,open_mode,save_errno,have_rtree=0, realpath_err;
uint i,j,len,errpos,head_length,base_pos,offset,info_length,keys,
key_parts,unique_key_parts,base_key_parts,fulltext_keys,uniques;
+ uint internal_table= open_flags & HA_OPEN_INTERNAL_TABLE;
char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN],
data_name[FN_REFLEN];
uchar *UNINIT_VAR(disk_cache), *disk_pos, *end_pos;
- MI_INFO info,*UNINIT_VAR(m_info),*old_info;
+ MI_INFO info,*UNINIT_VAR(m_info),*old_info= NULL;
MYISAM_SHARE share_buff,*share;
ulong *rec_per_key_part= 0;
my_off_t *key_root, *key_del;
@@ -99,8 +111,13 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
DBUG_RETURN (NULL);
}
- mysql_mutex_lock(&THR_LOCK_myisam);
- if (!(old_info=test_if_reopen(name_buff)))
+ if (!internal_table)
+ {
+ mysql_mutex_lock(&THR_LOCK_myisam);
+ old_info= test_if_reopen(name_buff);
+ }
+
+ if (!old_info)
{
share= &share_buff;
bzero((uchar*) &share_buff,sizeof(share_buff));
@@ -311,7 +328,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
strmov(share->index_file_name, index_name);
strmov(share->data_file_name, data_name);
- share->blocksize=min(IO_SIZE,myisam_block_size);
+ share->blocksize=MY_MIN(IO_SIZE,myisam_block_size);
{
HA_KEYSEG *pos=share->keyparts;
uint32 ftkey_nr= 1;
@@ -497,7 +514,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
share->base.margin_key_file_length=(share->base.max_key_file_length -
(keys ? MI_INDEX_BLOCK_MARGIN *
share->blocksize * keys : 0));
- share->blocksize=min(IO_SIZE,myisam_block_size);
+ share->blocksize=MY_MIN(IO_SIZE,myisam_block_size);
share->data_file_type=STATIC_RECORD;
if (share->options & HA_OPTION_COMPRESS_RECORD)
{
@@ -644,10 +661,13 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags)
*m_info=info;
thr_lock_data_init(&share->lock,&m_info->lock,(void*) m_info);
- m_info->open_list.data=(void*) m_info;
- myisam_open_list=list_add(myisam_open_list,&m_info->open_list);
- mysql_mutex_unlock(&THR_LOCK_myisam);
+ if (!internal_table)
+ {
+ m_info->open_list.data= (void*) m_info;
+ myisam_open_list= list_add(myisam_open_list, &m_info->open_list);
+ mysql_mutex_unlock(&THR_LOCK_myisam);
+ }
bzero(info.buff, share->base.max_key_block_length * 2);
my_free(rec_per_key_part);
@@ -692,7 +712,8 @@ err:
default:
break;
}
- mysql_mutex_unlock(&THR_LOCK_myisam);
+ if (!internal_table)
+ mysql_mutex_unlock(&THR_LOCK_myisam);
my_errno=save_errno;
DBUG_RETURN (NULL);
} /* mi_open */
@@ -712,10 +733,10 @@ uchar *mi_alloc_rec_buff(MI_INFO *info, ulong length, uchar **buf)
if (length == (ulong) -1)
{
if (info->s->options & HA_OPTION_COMPRESS_RECORD)
- length= max(info->s->base.pack_reclength, info->s->max_pack_length);
+ length= MY_MAX(info->s->base.pack_reclength, info->s->max_pack_length);
else
length= info->s->base.pack_reclength;
- length= max(length, info->s->base.max_key_length);
+ length= MY_MAX(length, info->s->base.max_key_length);
/* Avoid unnecessary realloc */
if (newptr && length == old_length)
return newptr;
diff --git a/storage/myisam/mi_packrec.c b/storage/myisam/mi_packrec.c
index 7e2403b64c9..c95afe57725 100644
--- a/storage/myisam/mi_packrec.c
+++ b/storage/myisam/mi_packrec.c
@@ -685,7 +685,7 @@ static uint find_longest_bitstream(uint16 *table, uint16 *end)
return OFFSET_TABLE_SIZE;
}
length2= find_longest_bitstream(next, end) + 1;
- length=max(length,length2);
+ length=MY_MAX(length,length2);
}
return length;
}
@@ -1399,7 +1399,7 @@ uint _mi_pack_get_block_info(MI_INFO *myisam, MI_BIT_BUFF *bit_buff,
info->filepos=filepos+head_length;
if (file > 0)
{
- info->offset=min(info->rec_len, ref_length - head_length);
+ info->offset=MY_MIN(info->rec_len, ref_length - head_length);
memcpy(*rec_buff_p, header + head_length, info->offset);
}
return 0;
diff --git a/storage/myisam/mi_test1.c b/storage/myisam/mi_test1.c
index 3b2597eb01e..9e4e1c46891 100644
--- a/storage/myisam/mi_test1.c
+++ b/storage/myisam/mi_test1.c
@@ -439,7 +439,7 @@ static void create_record(uchar *record,uint rownr)
uint tmp;
uchar *ptr;;
sprintf((char*) blob_record,"... row: %d", rownr);
- strappend((char*) blob_record,max(MAX_REC_LENGTH-rownr,10),' ');
+ strappend((char*) blob_record,MY_MAX(MAX_REC_LENGTH-rownr,10),' ');
tmp=strlen((char*) blob_record);
int4store(pos,tmp);
ptr=blob_record;
diff --git a/storage/myisam/mi_test2.c b/storage/myisam/mi_test2.c
index 3ec12ef5cca..e53c68874b2 100644
--- a/storage/myisam/mi_test2.c
+++ b/storage/myisam/mi_test2.c
@@ -597,7 +597,7 @@ int main(int argc, char *argv[])
goto err;
bmove(read_record2,read_record,reclength);
- for (i=min(2,keys) ; i-- > 0 ;)
+ for (i=MY_MIN(2,keys) ; i-- > 0 ;)
{
if (mi_rsame(file,read_record2,(int) i)) goto err;
if (memcmp(read_record,read_record2,reclength) != 0)
diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c
index fd9d8652508..7ad35c92e6f 100644
--- a/storage/myisam/myisamchk.c
+++ b/storage/myisam/myisamchk.c
@@ -16,6 +16,7 @@
/* Describe, check and repair of MyISAM tables */
#include "fulltext.h"
+#include "my_default.h"
#include <m_ctype.h>
#include <stdarg.h>
#include <my_getopt.h>
diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c
index 1624213851b..86e1978edaa 100644
--- a/storage/myisam/myisamlog.c
+++ b/storage/myisam/myisamlog.c
@@ -91,7 +91,7 @@ int main(int argc, char **argv)
log_filename=myisam_log_filename;
get_options(&argc,&argv);
/* Number of MyISAM files we can have open at one time */
- max_files= (my_set_max_open_files(min(max_files,8))-6)/2;
+ max_files= (my_set_max_open_files(MY_MIN(max_files,8))-6)/2;
if (update)
printf("Trying to %s MyISAM files according to log '%s'\n",
(recover ? "recover" : "update"),log_filename);
diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c
index 74be8b99d87..461a3eddcca 100644
--- a/storage/myisam/myisampack.c
+++ b/storage/myisam/myisampack.c
@@ -21,6 +21,7 @@
#endif
#include "myisamdef.h"
+#include "my_default.h"
#include <queues.h>
#include <my_tree.h>
#include "mysys_err.h"
@@ -784,7 +785,7 @@ static int create_dest_frm(char *source_table, char *dest_table)
*/
(void) my_copy(source_name, dest_name, MYF(MY_DONT_OVERWRITE_FILE));
- return 0;
+ DBUG_RETURN(0);
}
@@ -1270,7 +1271,7 @@ static void check_counts(HUFF_COUNTS *huff_counts, uint trees,
{
if (huff_counts->field_length > 2 &&
huff_counts->empty_fields + (records - huff_counts->empty_fields)*
- (1+max_bit(max(huff_counts->max_pre_space,
+ (1+max_bit(MY_MAX(huff_counts->max_pre_space,
huff_counts->max_end_space))) <
records * max_bit(huff_counts->field_length))
{
@@ -3024,7 +3025,7 @@ static int save_state_mrg(File file,PACK_MRG_INFO *mrg,my_off_t new_length,
if (mrg->src_file_has_indexes_disabled)
{
isam_file->s->state.state.key_file_length=
- max(isam_file->s->state.state.key_file_length, new_length);
+ MY_MAX(isam_file->s->state.state.key_file_length, new_length);
}
state.dellink= HA_OFFSET_ERROR;
state.version=(ulong) time((time_t*) 0);
diff --git a/storage/myisam/rt_mbr.c b/storage/myisam/rt_mbr.c
index 06fb1c3b0a7..64e220b2968 100644
--- a/storage/myisam/rt_mbr.c
+++ b/storage/myisam/rt_mbr.c
@@ -325,8 +325,8 @@ int rtree_d_mbr(HA_KEYSEG *keyseg, uchar *a, uint key_length, double *res)
bmin = korr_func(b); \
amax = korr_func(a+len); \
bmax = korr_func(b+len); \
- amin = min(amin, bmin); \
- amax = max(amax, bmax); \
+ amin = MY_MIN(amin, bmin); \
+ amax = MY_MAX(amax, bmax); \
store_func(c, amin); \
store_func(c+len, amax); \
}
@@ -338,8 +338,8 @@ int rtree_d_mbr(HA_KEYSEG *keyseg, uchar *a, uint key_length, double *res)
get_func(bmin, b); \
get_func(amax, a+len); \
get_func(bmax, b+len); \
- amin = min(amin, bmin); \
- amax = max(amax, bmax); \
+ amin = MY_MIN(amin, bmin); \
+ amax = MY_MAX(amax, bmax); \
store_func(c, amin); \
store_func(c+len, amax); \
}
@@ -417,8 +417,8 @@ int rtree_combine_rect(HA_KEYSEG *keyseg, uchar* a, uchar* b, uchar* c,
bmin = korr_func(b); \
amax = korr_func(a+len); \
bmax = korr_func(b+len); \
- amin = max(amin, bmin); \
- amax = min(amax, bmax); \
+ amin = MY_MAX(amin, bmin); \
+ amax = MY_MIN(amax, bmax); \
if (amin >= amax) \
return 0; \
res *= amax - amin; \
@@ -431,8 +431,8 @@ int rtree_combine_rect(HA_KEYSEG *keyseg, uchar* a, uchar* b, uchar* c,
get_func(bmin, b); \
get_func(amax, a+len); \
get_func(bmax, b+len); \
- amin = max(amin, bmin); \
- amax = min(amax, bmax); \
+ amin = MY_MAX(amin, bmin); \
+ amax = MY_MIN(amax, bmax); \
if (amin >= amax) \
return 0; \
res *= amax - amin; \
@@ -508,7 +508,7 @@ double rtree_overlapping_area(HA_KEYSEG *keyseg, uchar* a, uchar* b,
amax = korr_func(a+len); \
bmax = korr_func(b+len); \
a_area *= (((double)amax) - ((double)amin)); \
- loc_ab_area *= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ loc_ab_area *= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
#define RT_AREA_INC_GET(type, get_func, len)\
@@ -519,7 +519,7 @@ double rtree_overlapping_area(HA_KEYSEG *keyseg, uchar* a, uchar* b,
get_func(amax, a+len); \
get_func(bmax, b+len); \
a_area *= (((double)amax) - ((double)amin)); \
- loc_ab_area *= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ loc_ab_area *= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
/*
@@ -604,7 +604,7 @@ safe_end:
amax = korr_func(a+len); \
bmax = korr_func(b+len); \
a_perim+= (((double)amax) - ((double)amin)); \
- *ab_perim+= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ *ab_perim+= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
#define RT_PERIM_INC_GET(type, get_func, len)\
@@ -615,7 +615,7 @@ safe_end:
get_func(amax, a+len); \
get_func(bmax, b+len); \
a_perim+= (((double)amax) - ((double)amin)); \
- *ab_perim+= ((double)max(amax, bmax) - (double)min(amin, bmin)); \
+ *ab_perim+= ((double)MY_MAX(amax, bmax) - (double)MY_MIN(amin, bmin)); \
}
/*
diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c
index 4bff7b14e1a..3d68b486f00 100644
--- a/storage/myisam/sort.c
+++ b/storage/myisam/sort.c
@@ -132,7 +132,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
sort_keys= (uchar **) NULL; error= 1;
maxbuffer=1;
- memavl= max(sortbuff_size, MIN_SORT_BUFFER);
+ memavl= MY_MAX(sortbuff_size, MIN_SORT_BUFFER);
records= info->sort_info->max_records;
sort_length= info->key_length;
LINT_INIT(keys);
@@ -153,7 +153,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
will be allocated when needed.
*/
keys= memavl / (sort_length+sizeof(char*));
- maxbuffer= (uint) min((ulonglong) 1000, (records / keys)+1);
+ maxbuffer= (uint) MY_MIN((ulonglong) 1000, (records / keys)+1);
}
else
{
@@ -185,7 +185,7 @@ int _create_index_by_sort(MI_SORT_PARAM *info,my_bool no_messages,
HA_FT_MAXBYTELEN, MYF(0))))
{
if (my_init_dynamic_array(&buffpek, sizeof(BUFFPEK), maxbuffer,
- min(maxbuffer/2, 1000), MYF(0)))
+ MY_MIN(maxbuffer/2, 1000), MYF(0)))
{
my_free(sort_keys);
sort_keys= 0;
@@ -380,7 +380,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
sort_keys= (uchar **) NULL;
sortbuff_size= sort_param->sortbuff_size;
- memavl= max(sortbuff_size, MIN_SORT_BUFFER);
+ memavl= MY_MAX(sortbuff_size, MIN_SORT_BUFFER);
idx= (ha_keys) sort_param->sort_info->max_records;
sort_length= sort_param->key_length;
maxbuffer= 1;
@@ -400,7 +400,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
will be allocated when needed.
*/
keys= memavl / (sort_length+sizeof(char*));
- maxbuffer= (uint) min((ulonglong) 1000, (idx / keys)+1);
+ maxbuffer= (uint) MY_MIN((ulonglong) 1000, (idx / keys)+1);
}
else
{
@@ -427,7 +427,7 @@ pthread_handler_t thr_find_all_keys(void *arg)
HA_FT_MAXBYTELEN : 0), MYF(0))))
{
if (my_init_dynamic_array(&sort_param->buffpek, sizeof(BUFFPEK),
- maxbuffer, min(maxbuffer/2, 1000), MYF(0)))
+ maxbuffer, MY_MIN(maxbuffer/2, 1000), MYF(0)))
{
my_free(sort_keys);
sort_keys= (uchar **) NULL; /* for err: label */
@@ -877,7 +877,7 @@ static my_off_t read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek,
register ha_keys count;
my_off_t length;
- if ((count= (ha_keys) min((ha_rows) buffpek->max_keys,buffpek->count)))
+ if ((count= (ha_keys) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
if (mysql_file_pread(fromfile->file, (uchar*) buffpek->base,
(length= sort_length * count),
@@ -900,7 +900,7 @@ static my_off_t read_to_buffer_varlen(IO_CACHE *fromfile, BUFFPEK *buffpek,
uint idx;
uchar *buffp;
- if ((count= (ha_keys) min((ha_rows) buffpek->max_keys,buffpek->count)))
+ if ((count= (ha_keys) MY_MIN((ha_rows) buffpek->max_keys,buffpek->count)))
{
buffp = buffpek->base;
diff --git a/storage/myisammrg/ha_myisammrg.cc b/storage/myisammrg/ha_myisammrg.cc
index 69f84a9805a..bb225002dc0 100644
--- a/storage/myisammrg/ha_myisammrg.cc
+++ b/storage/myisammrg/ha_myisammrg.cc
@@ -135,7 +135,7 @@ ha_myisammrg::~ha_myisammrg(void)
static const char *ha_myisammrg_exts[] = {
- ".MRG",
+ MYRG_NAME_EXT,
NullS
};
extern int table2myisam(TABLE *table_arg, MI_KEYDEF **keydef_out,
@@ -493,6 +493,11 @@ int ha_myisammrg::add_children_list(void)
child_l->set_table_ref_id(mrg_child_def->get_child_table_ref_type(),
mrg_child_def->get_child_def_version());
/*
+ Copy parent's prelocking attribute to allow opening of child
+ temporary residing in the prelocking list.
+ */
+ child_l->prelocking_placeholder= parent_l->prelocking_placeholder;
+ /*
For statements which acquire a SNW metadata lock on a parent table and
then later try to upgrade it to an X lock (e.g. ALTER TABLE), SNW
locks should be also taken on the children tables.
@@ -515,7 +520,7 @@ int ha_myisammrg::add_children_list(void)
DDL on implicitly locked underlying tables of a MERGE table.
*/
if (! thd->locked_tables_mode &&
- parent_l->mdl_request.type == MDL_SHARED_NO_WRITE)
+ parent_l->mdl_request.type == MDL_SHARED_UPGRADABLE)
child_l->mdl_request.set_type(MDL_SHARED_NO_WRITE);
/* Link TABLE_LIST object into the children list. */
if (this->children_last_l)
@@ -1312,7 +1317,7 @@ int ha_myisammrg::info(uint flag)
memcpy((char*) table->key_info[0].rec_per_key,
(char*) mrg_info.rec_per_key,
sizeof(table->key_info[0].rec_per_key[0]) *
- min(file->keys, table->s->key_parts));
+ MY_MIN(file->keys, table->s->key_parts));
}
}
if (flag & HA_STATUS_ERRKEY)
@@ -1375,8 +1380,6 @@ int ha_myisammrg::reset(void)
int ha_myisammrg::extra_opt(enum ha_extra_function operation, ulong cache_size)
{
DBUG_ASSERT(this->file->children_attached);
- if ((specialflag & SPECIAL_SAFE_MODE) && operation == HA_EXTRA_WRITE_CACHE)
- return 0;
return myrg_extra(file, operation, (void*) &cache_size);
}
@@ -1509,15 +1512,14 @@ err:
}
-int ha_myisammrg::create(const char *name, register TABLE *form,
- HA_CREATE_INFO *create_info)
+int ha_myisammrg::create_mrg(const char *name, HA_CREATE_INFO *create_info)
{
char buff[FN_REFLEN];
const char **table_names, **pos;
TABLE_LIST *tables= create_info->merge_list.first;
THD *thd= current_thd;
size_t dirlgt= dirname_length(name);
- DBUG_ENTER("ha_myisammrg::create");
+ DBUG_ENTER("ha_myisammrg::create_mrg");
/* Allocate a table_names array in thread mem_root. */
if (!(table_names= (const char**)
@@ -1565,12 +1567,19 @@ int ha_myisammrg::create(const char *name, register TABLE *form,
*pos=0;
/* Create a MERGE meta file from the table_names array. */
- DBUG_RETURN(myrg_create(fn_format(buff,name,"","",
- MY_RESOLVE_SYMLINKS|
- MY_UNPACK_FILENAME|MY_APPEND_EXT),
- table_names,
- create_info->merge_insert_method,
- (my_bool) 0));
+ int res= myrg_create(name, table_names, create_info->merge_insert_method, 0);
+ DBUG_RETURN(res);
+}
+
+
+int ha_myisammrg::create(const char *name, register TABLE *form,
+ HA_CREATE_INFO *create_info)
+{
+ char buff[FN_REFLEN];
+ DBUG_ENTER("ha_myisammrg::create");
+ fn_format(buff, name, "", MYRG_NAME_EXT, MY_UNPACK_FILENAME | MY_APPEND_EXT);
+ int res= create_mrg(buff, create_info);
+ DBUG_RETURN(res);
}
@@ -1621,17 +1630,41 @@ void ha_myisammrg::append_create_info(String *packet)
}
-bool ha_myisammrg::check_if_incompatible_data(HA_CREATE_INFO *info,
- uint table_changes)
+enum_alter_inplace_result
+ha_myisammrg::check_if_supported_inplace_alter(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
{
/*
- For myisammrg, we should always re-generate the mapping file as this
- is trivial to do
+ We always support inplace ALTER in the new API, because old
+ HA_NO_COPY_ON_ALTER table_flags() hack prevents non-inplace ALTER anyway.
*/
- return COMPATIBLE_DATA_NO;
+ return HA_ALTER_INPLACE_EXCLUSIVE_LOCK;
}
+bool ha_myisammrg::inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info)
+{
+ char tmp_path[FN_REFLEN];
+ char *name= table->s->normalized_path.str;
+ DBUG_ENTER("ha_myisammrg::inplace_alter_table");
+ fn_format(tmp_path, name, "", MYRG_NAME_TMPEXT, MY_UNPACK_FILENAME | MY_APPEND_EXT);
+ int res= create_mrg(tmp_path, ha_alter_info->create_info);
+ if (res)
+ mysql_file_delete(rg_key_file_MRG, tmp_path, MYF(0));
+ else
+ {
+ char path[FN_REFLEN];
+ fn_format(path, name, "", MYRG_NAME_EXT, MY_UNPACK_FILENAME | MY_APPEND_EXT);
+ if (mysql_file_rename(rg_key_file_MRG, tmp_path, path, MYF(0)))
+ {
+ res= my_errno;
+ mysql_file_delete(rg_key_file_MRG, tmp_path, MYF(0));
+ }
+ }
+ DBUG_RETURN(res);
+}
+
int ha_myisammrg::check(THD* thd, HA_CHECK_OPT* check_opt)
{
return this->file->children_attached ? HA_ADMIN_OK : HA_ADMIN_CORRUPT;
@@ -1677,7 +1710,7 @@ my_bool ha_myisammrg::register_query_cache_dependant_tables(THD *thd
There are not callback function for for MyISAM, and engine data
*/
if (!cache->insert_table(key_length, key, (*block_table),
- db_length,
+ db_length, 0,
table_cache_type(),
0, 0, TRUE))
DBUG_RETURN(TRUE);
diff --git a/storage/myisammrg/ha_myisammrg.h b/storage/myisammrg/ha_myisammrg.h
index 455819c5526..6b4301616d9 100644
--- a/storage/myisammrg/ha_myisammrg.h
+++ b/storage/myisammrg/ha_myisammrg.h
@@ -137,6 +137,7 @@ public:
int extra_opt(enum ha_extra_function operation, ulong cache_size);
int external_lock(THD *thd, int lock_type);
uint lock_count(void) const;
+ int create_mrg(const char *name, HA_CREATE_INFO *create_info);
int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type);
@@ -144,7 +145,10 @@ public:
void append_create_info(String *packet);
MYRG_INFO *myrg_info() { return file; }
TABLE *table_ptr() { return table; }
- bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes);
+ enum_alter_inplace_result check_if_supported_inplace_alter(TABLE *,
+ Alter_inplace_info *);
+ bool inplace_alter_table(TABLE *altered_table,
+ Alter_inplace_info *ha_alter_info);
int check(THD* thd, HA_CHECK_OPT* check_opt);
ha_rows records();
virtual uint count_query_cache_dependant_tables(uint8 *tables_type);
diff --git a/storage/myisammrg/myrg_create.c b/storage/myisammrg/myrg_create.c
index 53c104b4c4a..715a5bcbd32 100644
--- a/storage/myisammrg/myrg_create.c
+++ b/storage/myisammrg/myrg_create.c
@@ -33,9 +33,7 @@ int myrg_create(const char *name, const char **table_names,
DBUG_ENTER("myrg_create");
errpos=0;
- if ((file= mysql_file_create(rg_key_file_MRG,
- fn_format(buff, name, "", MYRG_NAME_EXT,
- MY_UNPACK_FILENAME|MY_APPEND_EXT), 0,
+ if ((file= mysql_file_create(rg_key_file_MRG, name, 0,
O_RDWR | O_EXCL | O_NOFOLLOW, MYF(MY_WME))) < 0)
goto err;
errpos=1;
diff --git a/storage/myisammrg/myrg_def.h b/storage/myisammrg/myrg_def.h
index e6fa869ff9b..cf60390b22e 100644
--- a/storage/myisammrg/myrg_def.h
+++ b/storage/myisammrg/myrg_def.h
@@ -33,11 +33,10 @@ extern "C"
void myrg_print_wrong_table(const char *table_name);
#ifdef HAVE_PSI_INTERFACE
-extern PSI_mutex_key rg_key_mutex_MYRG_INFO_mutex;
-
-extern PSI_file_key rg_key_file_MRG;
C_MODE_START
+extern PSI_mutex_key rg_key_mutex_MYRG_INFO_mutex;
+extern PSI_file_key rg_key_file_MRG;
void init_myisammrg_psi_keys();
C_MODE_END
#endif /* HAVE_PSI_INTERFACE */
diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc
index b4292f33066..c47c520335c 100644
--- a/storage/oqgraph/ha_oqgraph.cc
+++ b/storage/oqgraph/ha_oqgraph.cc
@@ -261,7 +261,7 @@ static int oqgraph_check_table_structure (TABLE *table_arg)
if (!(field[0] == key->key_part[0].field &&
HA_KEY_ALG_HASH == key->algorithm))
DBUG_RETURN(-1);
- if (key->key_parts == 3)
+ if (key->user_defined_key_parts == 3)
{
/* KEY (latch, origid, destid) USING HASH */
/* KEY (latch, destid, origid) USING HASH */
@@ -349,7 +349,7 @@ void ha_oqgraph::update_key_stats()
if (key->algorithm != HA_KEY_ALG_BTREE)
{
if (key->flags & HA_NOSAME)
- key->rec_per_key[key->key_parts-1]= 1;
+ key->rec_per_key[key->user_defined_key_parts-1]= 1;
else
{
unsigned vertices= graph->vertices_count();
@@ -357,7 +357,7 @@ void ha_oqgraph::update_key_stats()
uint no_records= vertices ? 2 * (edges + vertices) / vertices : 2;
if (no_records < 2)
no_records= 2;
- key->rec_per_key[key->key_parts-1]= no_records;
+ key->rec_per_key[key->user_defined_key_parts-1]= no_records;
}
}
}
@@ -874,7 +874,7 @@ ha_rows ha_oqgraph::records_in_range(uint inx, key_range *min_key,
/* Assert that info() did run. We need current statistics here. */
DBUG_ASSERT(key_stat_version == share->key_stat_version);
- ha_rows result= key->rec_per_key[key->key_parts-1];
+ ha_rows result= key->rec_per_key[key->user_defined_key_parts-1];
return result;
}
diff --git a/storage/perfschema/CMakeLists.txt b/storage/perfschema/CMakeLists.txt
index 0c9713d45d4..ef644030317 100644
--- a/storage/perfschema/CMakeLists.txt
+++ b/storage/perfschema/CMakeLists.txt
@@ -118,6 +118,10 @@ table_tiws_by_index_usage.h
table_tiws_by_table.h
table_tlws_by_table.h
table_users.h
+cursor_by_thread_connect_attr.h
+table_session_connect.h
+table_session_connect_attrs.h
+table_session_account_connect_attrs.h
cursor_by_account.cc
cursor_by_host.cc
cursor_by_thread.cc
@@ -126,6 +130,7 @@ ha_perfschema.cc
pfs.cc
pfs_account.cc
pfs_atomic.cc
+pfs_autosize.cc
pfs_check.cc
pfs_column_values.cc
pfs_con_slice.cc
@@ -189,6 +194,10 @@ table_tiws_by_index_usage.cc
table_tiws_by_table.cc
table_tlws_by_table.cc
table_users.cc
+cursor_by_thread_connect_attr.cc
+table_session_connect.cc
+table_session_connect_attrs.cc
+table_session_account_connect_attrs.cc
)
MYSQL_ADD_PLUGIN(perfschema ${PERFSCHEMA_SOURCES} STORAGE_ENGINE DEFAULT STATIC_ONLY)
diff --git a/storage/perfschema/cursor_by_thread_connect_attr.cc b/storage/perfschema/cursor_by_thread_connect_attr.cc
new file mode 100644
index 00000000000..7a0dd04119d
--- /dev/null
+++ b/storage/perfschema/cursor_by_thread_connect_attr.cc
@@ -0,0 +1,71 @@
+/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#include "my_global.h"
+#include "cursor_by_thread_connect_attr.h"
+
+cursor_by_thread_connect_attr::cursor_by_thread_connect_attr(
+ const PFS_engine_table_share *share) :
+ PFS_engine_table(share, &m_pos), m_row_exists(false)
+{}
+
+int cursor_by_thread_connect_attr::rnd_next(void)
+{
+ PFS_thread *thread;
+
+ for (m_pos.set_at(&m_next_pos);
+ m_pos.has_more_thread();
+ m_pos.next_thread())
+ {
+ thread= &thread_array[m_pos.m_index_1];
+
+ if (thread->m_lock.is_populated())
+ {
+ make_row(thread, m_pos.m_index_2);
+ if (m_row_exists)
+ {
+ m_next_pos.set_after(&m_pos);
+ return 0;
+ }
+ }
+ }
+ return HA_ERR_END_OF_FILE;
+}
+
+
+int cursor_by_thread_connect_attr::rnd_pos(const void *pos)
+{
+ PFS_thread *thread;
+
+ set_position(pos);
+ DBUG_ASSERT(m_pos.m_index_1 < thread_max);
+
+ thread= &thread_array[m_pos.m_index_1];
+ if (!thread->m_lock.is_populated())
+ return HA_ERR_RECORD_DELETED;
+
+ make_row(thread, m_pos.m_index_2);
+ if (m_row_exists)
+ return 0;
+
+ return HA_ERR_RECORD_DELETED;
+}
+
+
+void cursor_by_thread_connect_attr::reset_position(void)
+{
+ m_pos.reset();
+ m_next_pos.reset();
+}
diff --git a/storage/perfschema/cursor_by_thread_connect_attr.h b/storage/perfschema/cursor_by_thread_connect_attr.h
new file mode 100644
index 00000000000..fbce56f208d
--- /dev/null
+++ b/storage/perfschema/cursor_by_thread_connect_attr.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#ifndef CURSOR_BY_THREAD_CONNECT_ATTR_H
+#define CURSOR_BY_THREAD_CONNECT_ATTR_H
+
+#include "pfs_column_types.h"
+#include "pfs_engine_table.h"
+#include "pfs_instr.h"
+
+/**
+ \addtogroup Performance_schema_tables
+ @{
+*/
+
+struct pos_connect_attr_by_thread_by_attr
+: public PFS_double_index
+{
+ pos_connect_attr_by_thread_by_attr()
+ : PFS_double_index(0, 0)
+ {}
+
+ inline bool has_more_thread(void)
+ {
+ return (m_index_1 < thread_max);
+ }
+
+ inline void next_thread(void)
+ {
+ m_index_1++;
+ m_index_2= 0;
+ }
+
+ inline void reset(void)
+ {
+ m_index_1= 0;
+ m_index_2= 0;
+ }
+};
+
+/** Cursor CURSOR_BY_THREAD_CONNECT_ATTR. */
+class cursor_by_thread_connect_attr : public PFS_engine_table
+{
+public:
+ virtual int rnd_next();
+ virtual int rnd_pos(const void *pos);
+ virtual void reset_position(void);
+
+protected:
+ cursor_by_thread_connect_attr(const PFS_engine_table_share *share);
+
+public:
+ ~cursor_by_thread_connect_attr()
+ {}
+
+protected:
+ virtual void make_row(PFS_thread *thread, uint ordinal)= 0;
+ /** True if row exists */
+ bool m_row_exists;
+
+private:
+ /** Current position. */
+ pos_connect_attr_by_thread_by_attr m_pos;
+ /** Next position. */
+ pos_connect_attr_by_thread_by_attr m_next_pos;
+};
+
+/** @} */
+#endif
diff --git a/storage/perfschema/gen_pfs_lex_token.cc b/storage/perfschema/gen_pfs_lex_token.cc
index b7470061de1..7581255b284 100644
--- a/storage/perfschema/gen_pfs_lex_token.cc
+++ b/storage/perfschema/gen_pfs_lex_token.cc
@@ -1,5 +1,5 @@
/*
- Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -243,7 +243,7 @@ void print_tokens()
int main(int argc,char **argv)
{
puts("/*");
- puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2011, 2012"));
+ puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2011"));
puts("*/");
printf("/*\n");
diff --git a/storage/perfschema/ha_perfschema.cc b/storage/perfschema/ha_perfschema.cc
index aca5ad8c731..2934afefbe6 100644
--- a/storage/perfschema/ha_perfschema.cc
+++ b/storage/perfschema/ha_perfschema.cc
@@ -164,6 +164,8 @@ static struct st_mysql_show_var pfs_status_vars[]=
(char*) &statement_class_lost, SHOW_LONG},
{"Performance_schema_digest_lost",
(char*) &digest_lost, SHOW_LONG},
+ {"Performance_schema_session_connect_attrs_lost",
+ (char*) &session_connect_attrs_lost, SHOW_LONG},
{NullS, NullS, SHOW_LONG}
};
@@ -245,12 +247,12 @@ int ha_perfschema::write_row(uchar *buf)
int result;
DBUG_ENTER("ha_perfschema::write_row");
+ if (!pfs_initialized)
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
- ha_statistic_increment(&SSV::ha_write_count);
DBUG_ASSERT(m_table_share);
-
+ ha_statistic_increment(&SSV::ha_write_count);
result= m_table_share->write_row(table, buf, table->field);
-
DBUG_RETURN(result);
}
@@ -268,7 +270,9 @@ void ha_perfschema::use_hidden_primary_key(void)
int ha_perfschema::update_row(const uchar *old_data, uchar *new_data)
{
DBUG_ENTER("ha_perfschema::update_row");
-
+ if (!pfs_initialized)
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
+
DBUG_ASSERT(m_table);
ha_statistic_increment(&SSV::ha_update_count);
int result= m_table->update_row(table, old_data, new_data, table->field);
@@ -278,6 +282,8 @@ int ha_perfschema::update_row(const uchar *old_data, uchar *new_data)
int ha_perfschema::delete_row(const uchar *buf)
{
DBUG_ENTER("ha_perfschema::delete_row");
+ if (!pfs_initialized)
+ DBUG_RETURN(HA_ERR_WRONG_COMMAND);
DBUG_ASSERT(m_table);
ha_statistic_increment(&SSV::ha_delete_count);
@@ -318,6 +324,8 @@ int ha_perfschema::rnd_end(void)
int ha_perfschema::rnd_next(uchar *buf)
{
DBUG_ENTER("ha_perfschema::rnd_next");
+ if (!pfs_initialized)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
DBUG_ASSERT(m_table);
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
@@ -344,6 +352,8 @@ void ha_perfschema::position(const uchar *record)
int ha_perfschema::rnd_pos(uchar *buf, uchar *pos)
{
DBUG_ENTER("ha_perfschema::rnd_pos");
+ if (!pfs_initialized)
+ DBUG_RETURN(HA_ERR_END_OF_FILE);
DBUG_ASSERT(m_table);
ha_statistic_increment(&SSV::ha_read_rnd_count);
@@ -369,6 +379,8 @@ int ha_perfschema::delete_all_rows(void)
int result;
DBUG_ENTER("ha_perfschema::delete_all_rows");
+ if (!pfs_initialized)
+ DBUG_RETURN(0);
DBUG_ASSERT(m_table_share);
if (m_table_share->m_delete_all_rows)
diff --git a/storage/perfschema/ha_perfschema.h b/storage/perfschema/ha_perfschema.h
index e088c79b26f..f8ed3ea52e0 100644
--- a/storage/perfschema/ha_perfschema.h
+++ b/storage/perfschema/ha_perfschema.h
@@ -70,8 +70,7 @@ public:
records.
*/
return (HA_NO_TRANSACTIONS | HA_REC_NOT_IN_SEQ | HA_NO_AUTO_INCREMENT |
- HA_BINLOG_ROW_CAPABLE | HA_BINLOG_STMT_CAPABLE |
- HA_PRIMARY_KEY_REQUIRED_FOR_DELETE | HA_HAS_OWN_BINLOGGING);
+ HA_PRIMARY_KEY_REQUIRED_FOR_DELETE);
}
/**
diff --git a/storage/perfschema/pfs.cc b/storage/perfschema/pfs.cc
index d3de38d025c..33b21ee2817 100644
--- a/storage/perfschema/pfs.cc
+++ b/storage/perfschema/pfs.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -688,6 +688,7 @@ static inline int mysql_mutex_lock(...)
- socket io (MYSQL_SOCKET)
- table io
- table lock
+ - idle
The flow of data between aggregates tables varies for each instrumentation.
@@ -857,24 +858,35 @@ static inline int mysql_mutex_lock(...)
@subsection IMPL_WAIT_SOCKET Socket waits
@verbatim
- socket_locker(T, F)
+ socket_locker(T, S)
|
| [1]
|
- |-> pfs_socket(F) =====>> [A], [B], [C], [D], [E]
+ |-> pfs_socket(S) =====>> [A], [B], [C], [D], [E]
|
| [2]
|
- |-> pfs_socket_class(F.class) =====>> [C], [D]
+ |-> pfs_socket_class(S.class) =====>> [C], [D]
|
- |-> pfs_thread(T).event_name(F) =====>> [A]
+ |-> pfs_thread(T).event_name(S) =====>> [A]
|
- ...
+ | [3]
+ |
+ 3a |-> pfs_account(U, H).event_name(S) =====>> [F], [G], [H]
+ . |
+ . | [4-RESET]
+ . |
+ 3b .....+-> pfs_user(U).event_name(S) =====>> [G]
+ . |
+ 3c .....+-> pfs_host(H).event_name(S) =====>> [H]
@endverbatim
Implemented as:
- [1] @c start_socket_wait_v1(), @c end_socket_wait_v1().
- [2] @c close_socket_v1()
+ - [3] @c aggregate_thread_waits()
+ - [4] @c PFS_account::aggregate_waits()
+ - [5] @c PFS_host::aggregate_waits()
- [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
@c table_ews_by_thread_by_event_name::make_row()
- [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
@@ -885,37 +897,78 @@ static inline int mysql_mutex_lock(...)
@c table_socket_summary_by_event_name::make_row()
- [E] SOCKET_SUMMARY_BY_INSTANCE,
@c table_socket_summary_by_instance::make_row()
+ - [F] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
+ @c table_ews_by_account_by_event_name::make_row()
+ - [G] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
+ @c table_ews_by_user_by_event_name::make_row()
+ - [H] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
+ @c table_ews_by_host_by_event_name::make_row()
@subsection IMPL_WAIT_TABLE Table waits
@verbatim
- table_locker(T, Tb)
+ table_locker(Thread Th, Table Tb, Event = io or lock)
|
| [1]
|
- |-> pfs_table(Tb) =====>> [B], [C], [D]
- |
- | [2]
- |
- |-> pfs_table_share(Tb.share) =====>> [C], [D]
- |
- |-> pfs_thread(T).event_name(Tb) =====>> [A]
- |
- ...
+1a |-> pfs_table(Tb) =====>> [A], [B], [C]
+ | |
+ | | [2]
+ | |
+ | |-> pfs_table_share(Tb.share) =====>> [B], [C]
+ | |
+ | | [3]
+ | |
+ | |-> global_table_io_stat =====>> [C]
+ | |
+ | |-> global_table_lock_stat =====>> [C]
+ |
+1b |-> pfs_thread(Th).event_name(E) =====>> [D], [E], [F], [G]
+ | |
+ | | [ 4-RESET]
+ | |
+ | |-> pfs_account(U, H).event_name(E) =====>> [E], [F], [G]
+ | . |
+ | . | [5-RESET]
+ | . |
+ | .....+-> pfs_user(U).event_name(E) =====>> [F]
+ | . |
+ | .....+-> pfs_host(H).event_name(E) =====>> [G]
+ |
+1c |-> pfs_thread(Th).waits_current(W) =====>> [H]
+ |
+1d |-> pfs_thread(Th).waits_history(W) =====>> [I]
+ |
+1e |-> waits_history_long(W) =====>> [J]
@endverbatim
Implemented as:
- [1] @c start_table_io_wait_v1(), @c end_table_io_wait_v1()
- [2] @c close_table_v1()
- - [A] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
- @c table_ews_by_thread_by_event_name::make_row()
- - [B] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
+ - [3] @c drop_table_share_v1()
+ - [4] @c TRUNCATE TABLE EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
+ - [5] @c TRUNCATE TABLE EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME
+ - [A] EVENTS_WAITS_SUMMARY_BY_INSTANCE,
@c table_events_waits_summary_by_instance::make_table_row()
+ - [B] OBJECTS_SUMMARY_GLOBAL_BY_TYPE,
+ @c table_os_global_by_type::make_row()
- [C] EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME,
@c table_ews_global_by_event_name::make_table_io_row(),
@c table_ews_global_by_event_name::make_table_lock_row()
- - [D] OBJECTS_SUMMARY_GLOBAL_BY_TYPE,
- @c table_os_global_by_type::make_row()
+ - [D] EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME,
+ @c table_ews_by_thread_by_event_name::make_row()
+ - [E] EVENTS_WAITS_SUMMARY_BY_ACCOUNT_BY_EVENT_NAME,
+ @c table_ews_by_user_by_account_name::make_row()
+ - [F] EVENTS_WAITS_SUMMARY_BY_USER_BY_EVENT_NAME,
+ @c table_ews_by_user_by_event_name::make_row()
+ - [G] EVENTS_WAITS_SUMMARY_BY_HOST_BY_EVENT_NAME,
+ @c table_ews_by_host_by_event_name::make_row()
+ - [H] EVENTS_WAITS_CURRENT,
+ @c table_events_waits_common::make_row()
+ - [I] EVENTS_WAITS_HISTORY,
+ @c table_events_waits_common::make_row()
+ - [J] EVENTS_WAITS_HISTORY_LONG,
+ @c table_events_waits_common::make_row()
@section IMPL_STAGE Implementation for stages aggregates
@@ -1594,7 +1647,6 @@ static void unbind_table_v1(PSI_table *table)
PFS_table *pfs= reinterpret_cast<PFS_table*> (table);
if (likely(pfs != NULL))
{
- pfs->aggregate();
pfs->m_thread_owner= NULL;
}
}
@@ -1615,12 +1667,6 @@ rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table)
/* The table handle was already instrumented, reuse it for this thread. */
thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
- if (unlikely(thread == NULL))
- {
- destroy_table(pfs);
- return NULL;
- }
-
if (unlikely(! pfs->m_share->m_enabled))
{
destroy_table(pfs);
@@ -1660,8 +1706,6 @@ rebind_table_v1(PSI_table_share *share, const void *identity, PSI_table *table)
return NULL;
PFS_thread *thread= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
- if (unlikely(thread == NULL))
- return NULL;
PFS_table *pfs_table= create_table(pfs_table_share, thread, identity);
return reinterpret_cast<PSI_table *> (pfs_table);
@@ -1681,9 +1725,18 @@ static void close_table_v1(PSI_table *table)
}
static PSI_socket*
-init_socket_v1(PSI_socket_key key, const my_socket *fd)
+init_socket_v1(PSI_socket_key key, const my_socket *fd,
+ const struct sockaddr *addr, socklen_t addr_len)
{
- INIT_BODY_V1(socket, key, fd);
+ PFS_socket_class *klass;
+ PFS_socket *pfs;
+ klass= find_socket_class(key);
+ if (unlikely(klass == NULL))
+ return NULL;
+ if (! klass->m_enabled)
+ return NULL;
+ pfs= create_socket(klass, fd, addr, addr_len);
+ return reinterpret_cast<PSI_socket *> (pfs);
}
static void destroy_socket_v1(PSI_socket *socket)
@@ -1731,7 +1784,7 @@ static void create_file_v1(PSI_file_key key, const char *name, File file)
}
uint len= strlen(name);
- PFS_file *pfs_file= find_or_create_file(pfs_thread, klass, name, len);
+ PFS_file *pfs_file= find_or_create_file(pfs_thread, klass, name, len, true);
file_handle_array[index]= pfs_file;
}
@@ -1835,13 +1888,13 @@ static int spawn_thread_v1(PSI_thread_key key,
@sa PSI_v1::new_thread.
*/
static PSI_thread*
-new_thread_v1(PSI_thread_key key, const void *identity, ulong thread_id)
+new_thread_v1(PSI_thread_key key, const void *identity, ulonglong processlist_id)
{
PFS_thread *pfs;
PFS_thread_class *klass= find_thread_class(key);
if (likely(klass != NULL))
- pfs= create_thread(klass, identity, thread_id);
+ pfs= create_thread(klass, identity, processlist_id);
else
pfs= NULL;
@@ -1852,12 +1905,12 @@ new_thread_v1(PSI_thread_key key, const void *identity, ulong thread_id)
Implementation of the thread instrumentation interface.
@sa PSI_v1::set_thread_id.
*/
-static void set_thread_id_v1(PSI_thread *thread, unsigned long id)
+static void set_thread_id_v1(PSI_thread *thread, ulonglong processlist_id)
{
PFS_thread *pfs= reinterpret_cast<PFS_thread*> (thread);
if (unlikely(pfs == NULL))
return;
- pfs->m_thread_id= id;
+ pfs->m_processlist_id= processlist_id;
}
/**
@@ -2045,10 +2098,10 @@ static void set_thread_state_v1(const char* state)
{
int state_len= state ? strlen(state) : 0;
- pfs->m_lock.allocated_to_dirty();
+ pfs->m_processlist_lock.allocated_to_dirty();
pfs->m_processlist_state_ptr= state;
pfs->m_processlist_state_length= state_len;
- pfs->m_lock.dirty_to_allocated();
+ pfs->m_processlist_lock.dirty_to_allocated();
}
}
@@ -2060,12 +2113,14 @@ static void set_thread_info_v1(const char* info, int info_len)
{
PFS_thread *pfs= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
+ DBUG_ASSERT((info != NULL) || (info_len == 0));
+
if (likely(pfs != NULL))
{
- pfs->m_lock.allocated_to_dirty();
+ pfs->m_processlist_lock.allocated_to_dirty();
pfs->m_processlist_info_ptr= info;
pfs->m_processlist_info_length= info_len;
- pfs->m_lock.dirty_to_allocated();
+ pfs->m_processlist_lock.dirty_to_allocated();
}
}
@@ -2196,7 +2251,7 @@ start_mutex_wait_v1(PSI_mutex_locker_state *state,
Complete shortcut.
*/
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
- pfs_mutex->m_wait_stat.aggregate_counted();
+ pfs_mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
return NULL;
}
}
@@ -2294,7 +2349,7 @@ start_rwlock_wait_v1(PSI_rwlock_locker_state *state,
Complete shortcut.
*/
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
- pfs_rwlock->m_wait_stat.aggregate_counted();
+ pfs_rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
return NULL;
}
}
@@ -2401,7 +2456,7 @@ start_cond_wait_v1(PSI_cond_locker_state *state,
Complete shortcut.
*/
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
- pfs_cond->m_wait_stat.aggregate_counted();
+ pfs_cond->m_cond_stat.m_wait_stat.aggregate_counted();
return NULL;
}
}
@@ -2478,8 +2533,6 @@ start_table_io_wait_v1(PSI_table_locker_state *state,
return NULL;
PFS_thread *pfs_thread= pfs_table->m_thread_owner;
- if (unlikely(pfs_thread == NULL))
- return NULL;
DBUG_ASSERT(pfs_thread ==
my_pthread_getspecific_ptr(PFS_thread*, THR_PFS));
@@ -2489,6 +2542,8 @@ start_table_io_wait_v1(PSI_table_locker_state *state,
if (flag_thread_instrumentation)
{
+ if (pfs_thread == NULL)
+ return NULL;
if (! pfs_thread->m_enabled)
return NULL;
state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
@@ -2538,7 +2593,6 @@ start_table_io_wait_v1(PSI_table_locker_state *state,
pfs_thread->m_events_waits_current++;
}
- /* TODO: consider a shortcut here */
}
else
{
@@ -2585,11 +2639,6 @@ start_table_lock_wait_v1(PSI_table_locker_state *state,
return NULL;
PFS_thread *pfs_thread= pfs_table->m_thread_owner;
- if (unlikely(pfs_thread == NULL))
- return NULL;
-
- DBUG_ASSERT(pfs_thread ==
- my_pthread_getspecific_ptr(PFS_thread*, THR_PFS));
PFS_TL_LOCK_TYPE lock_type;
@@ -2619,6 +2668,8 @@ start_table_lock_wait_v1(PSI_table_locker_state *state,
if (flag_thread_instrumentation)
{
+ if (pfs_thread == NULL)
+ return NULL;
if (! pfs_thread->m_enabled)
return NULL;
state->m_thread= reinterpret_cast<PSI_thread *> (pfs_thread);
@@ -2668,7 +2719,6 @@ start_table_lock_wait_v1(PSI_table_locker_state *state,
pfs_thread->m_events_waits_current++;
}
- /* TODO: consider a shortcut here */
}
else
{
@@ -2729,11 +2779,6 @@ get_thread_file_name_locker_v1(PSI_file_locker_state *state,
if (klass->m_timed)
flags|= STATE_FLAG_TIMED;
- uint len= strlen(name);
- PFS_file *pfs_file= find_or_create_file(pfs_thread, klass, name, len);
- if (unlikely(pfs_file == NULL))
- return NULL;
-
if (flag_events_waits_current)
{
if (unlikely(pfs_thread->m_events_waits_current >=
@@ -2755,9 +2800,9 @@ get_thread_file_name_locker_v1(PSI_file_locker_state *state,
wait->m_class= klass;
wait->m_timer_start= 0;
wait->m_timer_end= 0;
- wait->m_object_instance_addr= pfs_file;
- wait->m_weak_file= pfs_file;
- wait->m_weak_version= pfs_file->get_version();
+ wait->m_object_instance_addr= NULL;
+ wait->m_weak_file= NULL;
+ wait->m_weak_version= 0;
wait->m_event_id= pfs_thread->m_event_id++;
wait->m_end_event_id= 0;
wait->m_operation= file_operation_map[static_cast<int> (op)];
@@ -2767,7 +2812,9 @@ get_thread_file_name_locker_v1(PSI_file_locker_state *state,
}
state->m_flags= flags;
- state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
+ state->m_file= NULL;
+ state->m_name= name;
+ state->m_class= klass;
state->m_operation= op;
return reinterpret_cast<PSI_file_locker*> (state);
}
@@ -2788,6 +2835,7 @@ get_thread_file_stream_locker_v1(PSI_file_locker_state *state,
if (unlikely(pfs_file == NULL))
return NULL;
DBUG_ASSERT(pfs_file->m_class != NULL);
+ PFS_file_class *klass= pfs_file->m_class;
if (! pfs_file->m_enabled)
return NULL;
@@ -2825,7 +2873,7 @@ get_thread_file_stream_locker_v1(PSI_file_locker_state *state,
wait->m_nesting_event_type= parent_event->m_event_type;
wait->m_thread= pfs_thread;
- wait->m_class= pfs_file->m_class;
+ wait->m_class= klass;
wait->m_timer_start= 0;
wait->m_timer_end= 0;
wait->m_object_instance_addr= pfs_file;
@@ -2856,6 +2904,8 @@ get_thread_file_stream_locker_v1(PSI_file_locker_state *state,
state->m_flags= flags;
state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
state->m_operation= op;
+ state->m_name= NULL;
+ state->m_class= klass;
return reinterpret_cast<PSI_file_locker*> (state);
}
@@ -2890,10 +2940,12 @@ get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state,
if (op == PSI_FILE_CLOSE)
file_handle_array[index]= NULL;
- DBUG_ASSERT(pfs_file->m_class != NULL);
if (! pfs_file->m_enabled)
return NULL;
+ DBUG_ASSERT(pfs_file->m_class != NULL);
+ PFS_file_class *klass= pfs_file->m_class;
+
register uint flags;
if (flag_thread_instrumentation)
@@ -2927,7 +2979,7 @@ get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state,
wait->m_nesting_event_type= parent_event->m_event_type;
wait->m_thread= pfs_thread;
- wait->m_class= pfs_file->m_class;
+ wait->m_class= klass;
wait->m_timer_start= 0;
wait->m_timer_end= 0;
wait->m_object_instance_addr= pfs_file;
@@ -2958,6 +3010,8 @@ get_thread_file_descriptor_locker_v1(PSI_file_locker_state *state,
state->m_flags= flags;
state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
state->m_operation= op;
+ state->m_name= NULL;
+ state->m_class= klass;
return reinterpret_cast<PSI_file_locker*> (state);
}
@@ -2991,14 +3045,6 @@ start_socket_wait_v1(PSI_socket_locker_state *state,
if (unlikely(pfs_thread == NULL))
return NULL;
-#ifdef LATER
- /*
- Needs refinement, because of KILL.
- */
- DBUG_ASSERT(pfs_thread ==
- my_pthread_getspecific_ptr(PFS_thread*, THR_PFS));
-#endif
-
if (!pfs_thread->m_enabled)
return NULL;
@@ -3112,22 +3158,15 @@ static void unlock_mutex_v1(PSI_mutex *mutex)
PFS_mutex::m_lock_stat is not exposed in user visible tables
currently, so there is no point spending time computing it.
*/
- PFS_thread *pfs_thread= reinterpret_cast<PFS_thread*> (thread);
- DBUG_ASSERT(pfs_thread != NULL);
-
- if (unlikely(! flag_events_waits_current))
- return;
- if (! pfs_mutex->m_class->m_enabled)
+ if (! pfs_mutex->m_enabled)
return;
- if (! pfs_thread->m_enabled)
+
+ if (! pfs_mutex->m_timed)
return;
- if (pfs_mutex->m_class->m_timed)
- {
- ulonglong locked_time;
- locked_time= get_timer_pico_value(wait_timer) - pfs_mutex->m_last_locked;
- aggregate_single_stat_chain(&pfs_mutex->m_lock_stat, locked_time);
- }
+ ulonglong locked_time;
+ locked_time= get_timer_pico_value(wait_timer) - pfs_mutex->m_last_locked;
+ pfs_mutex->m_mutex_stat.m_lock_stat.aggregate_value(locked_time);
#endif
}
@@ -3185,32 +3224,23 @@ static void unlock_rwlock_v1(PSI_rwlock *rwlock)
#ifdef LATER_WL2333
/* See WL#2333: SHOW ENGINE ... LOCK STATUS. */
- PFS_thread *pfs_thread= reinterpret_cast<PFS_thread*> (thread);
- DBUG_ASSERT(pfs_thread != NULL);
- if (unlikely(! flag_events_waits_current))
- return;
- if (! pfs_rwlock->m_class->m_enabled)
+ if (! pfs_rwlock->m_enabled)
return;
- if (! pfs_thread->m_enabled)
+
+ if (! pfs_rwlock->m_timed)
return;
ulonglong locked_time;
if (last_writer)
{
- if (pfs_rwlock->m_class->m_timed)
- {
- locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_written;
- aggregate_single_stat_chain(&pfs_rwlock->m_write_lock_stat, locked_time);
- }
+ locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_written;
+ pfs_rwlock->m_rwlock_stat.m_write_lock_stat.aggregate_value(locked_time);
}
else if (last_reader)
{
- if (pfs_rwlock->m_class->m_timed)
- {
- locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_read;
- aggregate_single_stat_chain(&pfs_rwlock->m_read_lock_stat, locked_time);
- }
+ locked_time= get_timer_pico_value(wait_timer) - pfs_rwlock->m_last_read;
+ pfs_rwlock->m_rwlock_stat.m_read_lock_stat.aggregate_value(locked_time);
}
#else
(void) last_reader;
@@ -3352,17 +3382,16 @@ static void end_idle_wait_v1(PSI_idle_locker* locker)
PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
PFS_single_stat *event_name_array;
event_name_array= thread->m_instr_class_waits_stats;
- uint index= global_idle_class.m_event_name_index;
if (flags & STATE_FLAG_TIMED)
{
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (timed) */
- event_name_array[index].aggregate_value(wait_time);
+ event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_value(wait_time);
}
else
{
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME (counted) */
- event_name_array[index].aggregate_counted();
+ event_name_array[GLOBAL_IDLE_EVENT_INDEX].aggregate_counted();
}
if (flags & STATE_FLAG_EVENT)
@@ -3379,6 +3408,17 @@ static void end_idle_wait_v1(PSI_idle_locker* locker)
thread->m_events_waits_current--;
}
}
+
+ if (flags & STATE_FLAG_TIMED)
+ {
+ /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (timed) */
+ global_idle_stat.aggregate_value(wait_time);
+ }
+ else
+ {
+ /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME (counted) */
+ global_idle_stat.aggregate_counted();
+ }
}
/**
@@ -3404,12 +3444,12 @@ static void end_mutex_wait_v1(PSI_mutex_locker* locker, int rc)
timer_end= state->m_timer();
wait_time= timer_end - state->m_timer_start;
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
- mutex->m_wait_stat.aggregate_value(wait_time);
+ mutex->m_mutex_stat.m_wait_stat.aggregate_value(wait_time);
}
else
{
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
- mutex->m_wait_stat.aggregate_counted();
+ mutex->m_mutex_stat.m_wait_stat.aggregate_counted();
}
if (likely(rc == 0))
@@ -3471,12 +3511,12 @@ static void end_rwlock_rdwait_v1(PSI_rwlock_locker* locker, int rc)
timer_end= state->m_timer();
wait_time= timer_end - state->m_timer_start;
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
- rwlock->m_wait_stat.aggregate_value(wait_time);
+ rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
}
else
{
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
- rwlock->m_wait_stat.aggregate_counted();
+ rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
}
if (rc == 0)
@@ -3551,12 +3591,12 @@ static void end_rwlock_wrwait_v1(PSI_rwlock_locker* locker, int rc)
timer_end= state->m_timer();
wait_time= timer_end - state->m_timer_start;
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
- rwlock->m_wait_stat.aggregate_value(wait_time);
+ rwlock->m_rwlock_stat.m_wait_stat.aggregate_value(wait_time);
}
else
{
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
- rwlock->m_wait_stat.aggregate_counted();
+ rwlock->m_rwlock_stat.m_wait_stat.aggregate_counted();
}
if (likely(rc == 0))
@@ -3622,12 +3662,12 @@ static void end_cond_wait_v1(PSI_cond_locker* locker, int rc)
timer_end= state->m_timer();
wait_time= timer_end - state->m_timer_start;
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (timed) */
- cond->m_wait_stat.aggregate_value(wait_time);
+ cond->m_cond_stat.m_wait_stat.aggregate_value(wait_time);
}
else
{
/* Aggregate to EVENTS_WAITS_SUMMARY_BY_INSTANCE (counted) */
- cond->m_wait_stat.aggregate_counted();
+ cond->m_cond_stat.m_wait_stat.aggregate_counted();
}
if (state->m_flags & STATE_FLAG_THREAD)
@@ -3682,23 +3722,27 @@ static void end_table_io_wait_v1(PSI_table_locker* locker)
DBUG_ASSERT(table != NULL);
PFS_single_stat *stat;
+ PFS_table_io_stat *table_io_stat;
DBUG_ASSERT((state->m_index < table->m_share->m_key_count) ||
- (state->m_index == MAX_KEY));
+ (state->m_index == MAX_INDEXES));
+
+ table_io_stat= & table->m_table_stat.m_index_stat[state->m_index];
+ table_io_stat->m_has_data= true;
switch (state->m_io_operation)
{
case PSI_TABLE_FETCH_ROW:
- stat= & table->m_table_stat.m_index_stat[state->m_index].m_fetch;
+ stat= & table_io_stat->m_fetch;
break;
case PSI_TABLE_WRITE_ROW:
- stat= & table->m_table_stat.m_index_stat[state->m_index].m_insert;
+ stat= & table_io_stat->m_insert;
break;
case PSI_TABLE_UPDATE_ROW:
- stat= & table->m_table_stat.m_index_stat[state->m_index].m_update;
+ stat= & table_io_stat->m_update;
break;
case PSI_TABLE_DELETE_ROW:
- stat= & table->m_table_stat.m_index_stat[state->m_index].m_delete;
+ stat= & table_io_stat->m_delete;
break;
default:
DBUG_ASSERT(false);
@@ -3719,22 +3763,40 @@ static void end_table_io_wait_v1(PSI_table_locker* locker)
stat->aggregate_counted();
}
- if (flags & STATE_FLAG_EVENT)
+ if (flags & STATE_FLAG_THREAD)
{
- DBUG_ASSERT(flags & STATE_FLAG_THREAD);
PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
DBUG_ASSERT(thread != NULL);
- PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
- DBUG_ASSERT(wait != NULL);
+ PFS_single_stat *event_name_array;
+ event_name_array= thread->m_instr_class_waits_stats;
- wait->m_timer_end= timer_end;
- wait->m_end_event_id= thread->m_event_id;
- if (flag_events_waits_history)
- insert_events_waits_history(thread, wait);
- if (flag_events_waits_history_long)
- insert_events_waits_history_long(wait);
- thread->m_events_waits_current--;
+ /*
+ Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
+ (for wait/io/table/sql/handler)
+ */
+ if (flags & STATE_FLAG_TIMED)
+ {
+ event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_value(wait_time);
+ }
+ else
+ {
+ event_name_array[GLOBAL_TABLE_IO_EVENT_INDEX].aggregate_counted();
+ }
+
+ if (flags & STATE_FLAG_EVENT)
+ {
+ PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
+ DBUG_ASSERT(wait != NULL);
+
+ wait->m_timer_end= timer_end;
+ wait->m_end_event_id= thread->m_event_id;
+ if (flag_events_waits_history)
+ insert_events_waits_history(thread, wait);
+ if (flag_events_waits_history_long)
+ insert_events_waits_history_long(wait);
+ thread->m_events_waits_current--;
+ }
}
table->m_has_io_stats= true;
@@ -3770,22 +3832,40 @@ static void end_table_lock_wait_v1(PSI_table_locker* locker)
stat->aggregate_counted();
}
- if (flags & STATE_FLAG_EVENT)
+ if (flags & STATE_FLAG_THREAD)
{
- DBUG_ASSERT(flags & STATE_FLAG_THREAD);
PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
DBUG_ASSERT(thread != NULL);
- PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
- DBUG_ASSERT(wait != NULL);
+ PFS_single_stat *event_name_array;
+ event_name_array= thread->m_instr_class_waits_stats;
- wait->m_timer_end= timer_end;
- wait->m_end_event_id= thread->m_event_id;
- if (flag_events_waits_history)
- insert_events_waits_history(thread, wait);
- if (flag_events_waits_history_long)
- insert_events_waits_history_long(wait);
- thread->m_events_waits_current--;
+ /*
+ Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
+ (for wait/lock/table/sql/handler)
+ */
+ if (flags & STATE_FLAG_TIMED)
+ {
+ event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_value(wait_time);
+ }
+ else
+ {
+ event_name_array[GLOBAL_TABLE_LOCK_EVENT_INDEX].aggregate_counted();
+ }
+
+ if (flags & STATE_FLAG_EVENT)
+ {
+ PFS_events_waits *wait= reinterpret_cast<PFS_events_waits*> (state->m_wait);
+ DBUG_ASSERT(wait != NULL);
+
+ wait->m_timer_end= timer_end;
+ wait->m_end_event_id= thread->m_event_id;
+ if (flag_events_waits_history)
+ insert_events_waits_history(thread, wait);
+ if (flag_events_waits_history_long)
+ insert_events_waits_history_long(wait);
+ thread->m_events_waits_current--;
+ }
}
table->m_has_lock_stats= true;
@@ -3803,25 +3883,50 @@ static void end_file_wait_v1(PSI_file_locker *locker,
Implementation of the file instrumentation interface.
@sa PSI_v1::start_file_open_wait.
*/
-static PSI_file* start_file_open_wait_v1(PSI_file_locker *locker,
- const char *src_file,
- uint src_line)
+static void start_file_open_wait_v1(PSI_file_locker *locker,
+ const char *src_file,
+ uint src_line)
{
- PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
- DBUG_ASSERT(state != NULL);
-
start_file_wait_v1(locker, 0, src_file, src_line);
- return state->m_file;
+ return;
}
/**
Implementation of the file instrumentation interface.
@sa PSI_v1::end_file_open_wait.
*/
-static void end_file_open_wait_v1(PSI_file_locker *locker)
+static PSI_file* end_file_open_wait_v1(PSI_file_locker *locker,
+ void *result)
{
+ PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
+ DBUG_ASSERT(state != NULL);
+
+ switch (state->m_operation)
+ {
+ case PSI_FILE_STAT:
+ break;
+ case PSI_FILE_STREAM_OPEN:
+ case PSI_FILE_CREATE:
+ if (result != NULL)
+ {
+ PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
+ PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
+ const char *name= state->m_name;
+ uint len= strlen(name);
+ PFS_file *pfs_file= find_or_create_file(thread, klass, name, len, true);
+ state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
+ }
+ break;
+ case PSI_FILE_OPEN:
+ default:
+ DBUG_ASSERT(false);
+ break;
+ }
+
end_file_wait_v1(locker, 0);
+
+ return state->m_file;
}
/**
@@ -3831,25 +3936,33 @@ static void end_file_open_wait_v1(PSI_file_locker *locker)
static void end_file_open_wait_and_bind_to_descriptor_v1
(PSI_file_locker *locker, File file)
{
+ PFS_file *pfs_file= NULL;
int index= (int) file;
PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
DBUG_ASSERT(state != NULL);
- end_file_wait_v1(locker, 0);
+ if (index >= 0)
+ {
+ PFS_file_class *klass= reinterpret_cast<PFS_file_class*> (state->m_class);
+ PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
+ const char *name= state->m_name;
+ uint len= strlen(name);
+ pfs_file= find_or_create_file(thread, klass, name, len, true);
+ state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
+ }
- PFS_file *pfs_file= reinterpret_cast<PFS_file*> (state->m_file);
- DBUG_ASSERT(pfs_file != NULL);
+ end_file_wait_v1(locker, 0);
if (likely(index >= 0))
{
if (likely(index < file_handle_max))
file_handle_array[index]= pfs_file;
else
+ {
+ if (pfs_file != NULL)
+ release_file(pfs_file);
file_handle_lost++;
- }
- else
- {
- release_file(pfs_file);
+ }
}
}
@@ -3896,7 +4009,7 @@ static void end_file_wait_v1(PSI_file_locker *locker,
PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
DBUG_ASSERT(state != NULL);
PFS_file *file= reinterpret_cast<PFS_file *> (state->m_file);
- DBUG_ASSERT(file != NULL);
+ PFS_file_class *klass= reinterpret_cast<PFS_file_class *> (state->m_class);
PFS_thread *thread= reinterpret_cast<PFS_thread *> (state->m_thread);
ulonglong timer_end= 0;
@@ -3905,15 +4018,26 @@ static void end_file_wait_v1(PSI_file_locker *locker,
register uint flags= state->m_flags;
size_t bytes= ((int)byte_count > -1 ? byte_count : 0);
+ PFS_file_stat *file_stat;
+
+ if (file != NULL)
+ {
+ file_stat= & file->m_file_stat;
+ }
+ else
+ {
+ file_stat= & klass->m_file_stat;
+ }
+
switch (state->m_operation)
{
/* Group read operations */
case PSI_FILE_READ:
- byte_stat= &file->m_file_stat.m_io_stat.m_read;
+ byte_stat= &file_stat->m_io_stat.m_read;
break;
/* Group write operations */
case PSI_FILE_WRITE:
- byte_stat= &file->m_file_stat.m_io_stat.m_write;
+ byte_stat= &file_stat->m_io_stat.m_write;
break;
/* Group remaining operations as miscellaneous */
case PSI_FILE_CREATE:
@@ -3931,7 +4055,7 @@ static void end_file_wait_v1(PSI_file_locker *locker,
case PSI_FILE_SYNC:
case PSI_FILE_STAT:
case PSI_FILE_CLOSE:
- byte_stat= &file->m_file_stat.m_io_stat.m_misc;
+ byte_stat= &file_stat->m_io_stat.m_misc;
break;
default:
DBUG_ASSERT(false);
@@ -3959,7 +4083,7 @@ static void end_file_wait_v1(PSI_file_locker *locker,
PFS_single_stat *event_name_array;
event_name_array= thread->m_instr_class_waits_stats;
- uint index= file->m_class->m_event_name_index;
+ uint index= klass->m_event_name_index;
if (flags & STATE_FLAG_TIMED)
{
@@ -3980,6 +4104,9 @@ static void end_file_wait_v1(PSI_file_locker *locker,
wait->m_timer_end= timer_end;
wait->m_number_of_bytes= bytes;
wait->m_end_event_id= thread->m_event_id;
+ wait->m_object_instance_addr= file;
+ wait->m_weak_file= file;
+ wait->m_weak_version= (file ? file->get_version() : 0);
if (flag_events_waits_history)
insert_events_waits_history(thread, wait);
@@ -3988,22 +4115,79 @@ static void end_file_wait_v1(PSI_file_locker *locker,
thread->m_events_waits_current--;
}
}
+}
- /* Release or destroy the file if necessary */
- switch(state->m_operation)
+/**
+ Implementation of the file instrumentation interface.
+ @sa PSI_v1::start_file_close_wait.
+*/
+static void start_file_close_wait_v1(PSI_file_locker *locker,
+ const char *src_file,
+ uint src_line)
+{
+ PFS_thread *thread;
+ const char *name;
+ uint len;
+ PFS_file *pfs_file;
+ PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
+ DBUG_ASSERT(state != NULL);
+
+ switch (state->m_operation)
{
- case PSI_FILE_CLOSE:
- case PSI_FILE_STREAM_CLOSE:
- case PSI_FILE_STAT:
- release_file(file);
- break;
case PSI_FILE_DELETE:
- DBUG_ASSERT(thread != NULL);
- destroy_file(thread, file);
+ thread= reinterpret_cast<PFS_thread*> (state->m_thread);
+ name= state->m_name;
+ len= strlen(name);
+ pfs_file= find_or_create_file(thread, NULL, name, len, false);
+ state->m_file= reinterpret_cast<PSI_file*> (pfs_file);
+ break;
+ case PSI_FILE_STREAM_CLOSE:
+ case PSI_FILE_CLOSE:
break;
default:
+ DBUG_ASSERT(false);
break;
}
+
+ start_file_wait_v1(locker, 0, src_file, src_line);
+
+ return;
+}
+
+/**
+ Implementation of the file instrumentation interface.
+ @sa PSI_v1::end_file_close_wait.
+*/
+static void end_file_close_wait_v1(PSI_file_locker *locker, int rc)
+{
+ PSI_file_locker_state *state= reinterpret_cast<PSI_file_locker_state*> (locker);
+ DBUG_ASSERT(state != NULL);
+
+ end_file_wait_v1(locker, 0);
+
+ if (rc == 0)
+ {
+ PFS_thread *thread= reinterpret_cast<PFS_thread*> (state->m_thread);
+ PFS_file *file= reinterpret_cast<PFS_file*> (state->m_file);
+
+ /* Release or destroy the file if necessary */
+ switch(state->m_operation)
+ {
+ case PSI_FILE_CLOSE:
+ case PSI_FILE_STREAM_CLOSE:
+ if (file != NULL)
+ release_file(file);
+ break;
+ case PSI_FILE_DELETE:
+ if (file != NULL)
+ destroy_file(thread, file);
+ break;
+ default:
+ DBUG_ASSERT(false);
+ break;
+ }
+ }
+ return;
}
static void start_stage_v1(PSI_stage_key key, const char *src_file, int src_line)
@@ -4165,7 +4349,8 @@ static void end_stage_v1()
static PSI_statement_locker*
get_thread_statement_locker_v1(PSI_statement_locker_state *state,
- PSI_statement_key key)
+ PSI_statement_key key,
+ const void *charset)
{
DBUG_ASSERT(state != NULL);
if (! flag_global_instrumentation)
@@ -4262,9 +4447,11 @@ get_thread_statement_locker_v1(PSI_statement_locker_state *state,
if (flag_statements_digest)
{
+ const CHARSET_INFO *cs= static_cast <const CHARSET_INFO*> (charset);
flags|= STATE_FLAG_DIGEST;
state->m_digest_state.m_last_id_index= 0;
digest_reset(& state->m_digest_state.m_digest_storage);
+ state->m_digest_state.m_digest_storage.m_charset_number= cs->number;
}
state->m_discarded= false;
@@ -4288,6 +4475,8 @@ get_thread_statement_locker_v1(PSI_statement_locker_state *state,
state->m_no_index_used= 0;
state->m_no_good_index_used= 0;
+ state->m_schema_name_length= 0;
+
return reinterpret_cast<PSI_statement_locker*> (state);
}
@@ -4352,6 +4541,13 @@ static void start_statement_v1(PSI_statement_locker *locker,
state->m_timer_start= timer_start;
}
+ compile_time_assert(PSI_SCHEMA_NAME_LEN == NAME_LEN);
+ DBUG_ASSERT(db_len <= sizeof(state->m_schema_name));
+
+ if (db_len > 0)
+ memcpy(state->m_schema_name, db, db_len);
+ state->m_schema_name_length= db_len;
+
if (flags & STATE_FLAG_EVENT)
{
PFS_events_statements *pfs= reinterpret_cast<PFS_events_statements*> (state->m_statement);
@@ -4563,11 +4759,10 @@ static void end_statement_v1(PSI_statement_locker *locker, void *stmt_da)
if (flags & STATE_FLAG_DIGEST)
{
digest_storage= &state->m_digest_state.m_digest_storage;
-
- /*
- Populate PFS_statements_digest_stat with computed digest information.
- */
- digest_stat= find_or_create_digest(thread, digest_storage);
+ /* Populate PFS_statements_digest_stat with computed digest information.*/
+ digest_stat= find_or_create_digest(thread, digest_storage,
+ state->m_schema_name,
+ state->m_schema_name_length);
}
if (flags & STATE_FLAG_EVENT)
@@ -4633,11 +4828,10 @@ static void end_statement_v1(PSI_statement_locker *locker, void *stmt_da)
{
/* Set digest stat. */
digest_storage= &state->m_digest_state.m_digest_storage;
-
- /*
- Populate PFS_statements_digest_stat with computed digest information.
- */
- digest_stat= find_or_create_digest(thread, digest_storage);
+ /* Populate statements_digest_stat with computed digest information. */
+ digest_stat= find_or_create_digest(thread, digest_storage,
+ state->m_schema_name,
+ state->m_schema_name_length);
}
}
@@ -4869,6 +5063,42 @@ static void set_socket_thread_owner_v1(PSI_socket *socket)
pfs_socket->m_thread_owner= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
}
+
+/**
+ Implementation of the thread attribute connection interface
+ @sa PSI_v1::set_thread_connect_attr.
+*/
+static int set_thread_connect_attrs_v1(const char *buffer, uint length,
+ const void *from_cs)
+{
+
+ PFS_thread *thd= my_pthread_getspecific_ptr(PFS_thread*, THR_PFS);
+
+ DBUG_ASSERT(buffer != NULL);
+
+ if (likely(thd != NULL) && session_connect_attrs_size_per_thread > 0)
+ {
+ /* copy from the input buffer as much as we can fit */
+ uint copy_size= (uint)(length < session_connect_attrs_size_per_thread ?
+ length : session_connect_attrs_size_per_thread);
+ thd->m_lock.allocated_to_dirty();
+ memcpy(thd->m_session_connect_attrs, buffer, copy_size);
+ thd->m_session_connect_attrs_length= copy_size;
+ thd->m_session_connect_attrs_cs= (const CHARSET_INFO *) from_cs;
+ thd->m_lock.dirty_to_allocated();
+
+ if (copy_size == length)
+ return 0;
+ else
+ {
+ session_connect_attrs_lost++;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
/**
Implementation of the instrumentation interface.
@sa PSI_v1.
@@ -4939,6 +5169,8 @@ PSI_v1 PFS_v1=
end_file_open_wait_and_bind_to_descriptor_v1,
start_file_wait_v1,
end_file_wait_v1,
+ start_file_close_wait_v1,
+ end_file_close_wait_v1,
start_stage_v1,
end_stage_v1,
get_thread_statement_locker_v1,
@@ -4968,7 +5200,8 @@ PSI_v1 PFS_v1=
set_socket_info_v1,
set_socket_thread_owner_v1,
pfs_digest_start_v1,
- pfs_digest_add_token_v1
+ pfs_digest_add_token_v1,
+ set_thread_connect_attrs_v1,
};
static void* get_interface(int version)
diff --git a/storage/perfschema/pfs_account.cc b/storage/perfschema/pfs_account.cc
index 18716478681..b91039f6cd7 100644
--- a/storage/perfschema/pfs_account.cc
+++ b/storage/perfschema/pfs_account.cc
@@ -45,7 +45,7 @@ static PFS_single_stat *account_instr_class_waits_array= NULL;
static PFS_stage_stat *account_instr_class_stages_array= NULL;
static PFS_statement_stat *account_instr_class_statements_array= NULL;
-static LF_HASH account_hash;
+LF_HASH account_hash;
static bool account_hash_inited= false;
/**
@@ -149,10 +149,11 @@ C_MODE_END
*/
int init_account_hash(void)
{
- if (! account_hash_inited)
+ if ((! account_hash_inited) && (account_max > 0))
{
lf_hash_init(&account_hash, sizeof(PFS_account*), LF_HASH_UNIQUE,
0, 0, account_hash_get_key, &my_charset_bin);
+ /* account_hash.size= account_max; */
account_hash_inited= true;
}
return 0;
diff --git a/storage/perfschema/pfs_account.h b/storage/perfschema/pfs_account.h
index 77a9dfab7ba..1ac379e0fc9 100644
--- a/storage/perfschema/pfs_account.h
+++ b/storage/perfschema/pfs_account.h
@@ -46,7 +46,7 @@ struct PFS_account_key
uint m_key_length;
};
-struct PFS_account : PFS_connection_slice
+struct PFS_ALIGNED PFS_account : PFS_connection_slice
{
public:
inline void init_refcount(void)
@@ -115,6 +115,8 @@ extern ulong account_lost;
extern PFS_account *account_array;
+extern LF_HASH account_hash;
+
/** @} */
#endif
diff --git a/storage/perfschema/pfs_atomic.h b/storage/perfschema/pfs_atomic.h
index ffb4c24ecbf..61b8c2b2804 100644
--- a/storage/perfschema/pfs_atomic.h
+++ b/storage/perfschema/pfs_atomic.h
@@ -43,6 +43,16 @@ public:
}
/** Atomic load. */
+ static inline int64 load_64(volatile int64 *ptr)
+ {
+ int64 result;
+ rdlock(ptr);
+ result= my_atomic_load64(ptr);
+ rdunlock(ptr);
+ return result;
+ }
+
+ /** Atomic load. */
static inline uint32 load_u32(volatile uint32 *ptr)
{
uint32 result;
@@ -52,6 +62,16 @@ public:
return result;
}
+ /** Atomic load. */
+ static inline uint64 load_u64(volatile uint64 *ptr)
+ {
+ uint64 result;
+ rdlock(ptr);
+ result= (uint64) my_atomic_load64((int64*) ptr);
+ rdunlock(ptr);
+ return result;
+ }
+
/** Atomic store. */
static inline void store_32(volatile int32 *ptr, int32 value)
{
@@ -61,6 +81,14 @@ public:
}
/** Atomic store. */
+ static inline void store_64(volatile int64 *ptr, int64 value)
+ {
+ wrlock(ptr);
+ my_atomic_store64(ptr, value);
+ wrunlock(ptr);
+ }
+
+ /** Atomic store. */
static inline void store_u32(volatile uint32 *ptr, uint32 value)
{
wrlock(ptr);
@@ -68,6 +96,14 @@ public:
wrunlock(ptr);
}
+ /** Atomic store. */
+ static inline void store_u64(volatile uint64 *ptr, uint64 value)
+ {
+ wrlock(ptr);
+ my_atomic_store64((int64*) ptr, (int64) value);
+ wrunlock(ptr);
+ }
+
/** Atomic add. */
static inline int32 add_32(volatile int32 *ptr, int32 value)
{
@@ -79,6 +115,16 @@ public:
}
/** Atomic add. */
+ static inline int64 add_64(volatile int64 *ptr, int64 value)
+ {
+ int64 result;
+ wrlock(ptr);
+ result= my_atomic_add64(ptr, value);
+ wrunlock(ptr);
+ return result;
+ }
+
+ /** Atomic add. */
static inline uint32 add_u32(volatile uint32 *ptr, uint32 value)
{
uint32 result;
@@ -88,6 +134,16 @@ public:
return result;
}
+ /** Atomic add. */
+ static inline uint64 add_u64(volatile uint64 *ptr, uint64 value)
+ {
+ uint64 result;
+ wrlock(ptr);
+ result= (uint64) my_atomic_add64((int64*) ptr, (int64) value);
+ wrunlock(ptr);
+ return result;
+ }
+
/** Atomic compare and swap. */
static inline bool cas_32(volatile int32 *ptr, int32 *old_value,
int32 new_value)
@@ -100,6 +156,17 @@ public:
}
/** Atomic compare and swap. */
+ static inline bool cas_64(volatile int64 *ptr, int64 *old_value,
+ int64 new_value)
+ {
+ bool result;
+ wrlock(ptr);
+ result= my_atomic_cas64(ptr, old_value, new_value);
+ wrunlock(ptr);
+ return result;
+ }
+
+ /** Atomic compare and swap. */
static inline bool cas_u32(volatile uint32 *ptr, uint32 *old_value,
uint32 new_value)
{
@@ -111,6 +178,18 @@ public:
return result;
}
+ /** Atomic compare and swap. */
+ static inline bool cas_u64(volatile uint64 *ptr, uint64 *old_value,
+ uint64 new_value)
+ {
+ bool result;
+ wrlock(ptr);
+ result= my_atomic_cas64((int64*) ptr, (int64*) old_value,
+ (uint64) new_value);
+ wrunlock(ptr);
+ return result;
+ }
+
private:
static my_atomic_rwlock_t m_rwlock_array[256];
diff --git a/storage/perfschema/pfs_autosize.cc b/storage/perfschema/pfs_autosize.cc
new file mode 100644
index 00000000000..38bd36d8321
--- /dev/null
+++ b/storage/perfschema/pfs_autosize.cc
@@ -0,0 +1,366 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+/**
+ @file storage/perfschema/pfs_autosize.cc
+ Private interface for the server (implementation).
+*/
+
+#include "my_global.h"
+#include "sql_const.h"
+#include "pfs_server.h"
+
+#include <algorithm>
+using std::min;
+using std::max;
+
+static const ulong fixed_mutex_instances= 500;
+static const ulong fixed_rwlock_instances= 200;
+static const ulong fixed_cond_instances= 50;
+static const ulong fixed_file_instances= 200;
+static const ulong fixed_socket_instances= 10;
+static const ulong fixed_thread_instances= 50;
+
+static const ulong mutex_per_connection= 3;
+static const ulong rwlock_per_connection= 1;
+static const ulong cond_per_connection= 2;
+static const ulong file_per_connection= 0;
+static const ulong socket_per_connection= 1;
+static const ulong thread_per_connection= 1;
+
+static const ulong mutex_per_handle= 0;
+static const ulong rwlock_per_handle= 0;
+static const ulong cond_per_handle= 0;
+static const ulong file_per_handle= 0;
+static const ulong socket_per_handle= 0;
+static const ulong thread_per_handle= 0;
+
+static const ulong mutex_per_share= 5;
+static const ulong rwlock_per_share= 3;
+static const ulong cond_per_share= 1;
+static const ulong file_per_share= 3;
+static const ulong socket_per_share= 0;
+static const ulong thread_per_share= 0;
+
+struct PFS_sizing_data
+{
+ /** Default value for @c PFS_param.m_account_sizing. */
+ ulong m_account_sizing;
+ /** Default value for @c PFS_param.m_user_sizing. */
+ ulong m_user_sizing;
+ /** Default value for @c PFS_param.m_host_sizing. */
+ ulong m_host_sizing;
+
+ /** Default value for @c PFS_param.m_events_waits_history_sizing. */
+ ulong m_events_waits_history_sizing;
+ /** Default value for @c PFS_param.m_events_waits_history_long_sizing. */
+ ulong m_events_waits_history_long_sizing;
+ /** Default value for @c PFS_param.m_events_stages_history_sizing. */
+ ulong m_events_stages_history_sizing;
+ /** Default value for @c PFS_param.m_events_stages_history_long_sizing. */
+ ulong m_events_stages_history_long_sizing;
+ /** Default value for @c PFS_param.m_events_statements_history_sizing. */
+ ulong m_events_statements_history_sizing;
+ /** Default value for @c PFS_param.m_events_statements_history_long_sizing. */
+ ulong m_events_statements_history_long_sizing;
+ /** Default value for @c PFS_param.m_digest_sizing. */
+ ulong m_digest_sizing;
+ /** Default value for @c PFS_param.m_session_connect_attrs_sizing. */
+ ulong m_session_connect_attrs_sizing;
+
+ /**
+ Minimum number of tables to keep statistics for.
+ On small deployments, all the tables can fit into the table definition cache,
+ and this value can be 0.
+ On big deployments, the table definition cache is only a subset of all the tables
+ in the database, which are accounted for here.
+ */
+ ulong m_min_number_of_tables;
+
+ /**
+ Load factor for 'volatile' objects (mutexes, table handles, ...).
+ Instrumented objects that:
+ - use little memory
+ - are created/destroyed very frequently
+ should be stored in a low density (mostly empty) memory buffer,
+ to optimize for speed.
+ */
+ float m_load_factor_volatile;
+ /**
+ Load factor for 'normal' objects (files).
+ Instrumented objects that:
+ - use a medium amount of memory
+ - are created/destroyed
+ should be stored in a medium density memory buffer,
+ as a trade off between space and speed.
+ */
+ float m_load_factor_normal;
+ /**
+ Load factor for 'static' objects (table shares).
+ Instrumented objects that:
+ - use a lot of memory
+ - are created/destroyed very rarely
+ can be stored in a high density (mostly packed) memory buffer,
+ to optimize for space.
+ */
+ float m_load_factor_static;
+};
+
+PFS_sizing_data small_data=
+{
+ /* Account / user / host */
+ 10, 5, 20,
+ /* History sizes */
+ 5, 100, 5, 100, 5, 100,
+ /* Digests */
+ 1000,
+ /* Session connect attrs. */
+ 512,
+ /* Min tables */
+ 200,
+ /* Load factors */
+ 0.90, 0.90, 0.90
+};
+
+PFS_sizing_data medium_data=
+{
+ /* Account / user / host */
+ 100, 100, 100,
+ /* History sizes */
+ 10, 1000, 10, 1000, 10, 1000,
+ /* Digests */
+ 5000,
+ /* Session connect attrs. */
+ 512,
+ /* Min tables */
+ 500,
+ /* Load factors */
+ 0.70, 0.80, 0.90
+};
+
+PFS_sizing_data large_data=
+{
+ /* Account / user / host */
+ 100, 100, 100,
+ /* History sizes */
+ 10, 10000, 10, 10000, 10, 10000,
+ /* Digests */
+ 10000,
+ /* Session connect attrs. */
+ 512,
+ /* Min tables */
+ 10000,
+ /* Load factors */
+ 0.50, 0.65, 0.80
+};
+
+static inline ulong apply_load_factor(ulong raw_value, float factor)
+{
+ float value = ((float) raw_value) / factor;
+ return (ulong) ceil(value);
+}
+
+PFS_sizing_data *estimate_hints(PFS_global_param *param)
+{
+ if ((param->m_hints.m_max_connections <= MAX_CONNECTIONS_DEFAULT) &&
+ (param->m_hints.m_table_definition_cache <= TABLE_DEF_CACHE_DEFAULT) &&
+ (param->m_hints.m_table_open_cache <= TABLE_OPEN_CACHE_DEFAULT))
+ {
+ /* The my.cnf used is either unchanged, or lower than factory defaults. */
+ return & small_data;
+ }
+
+ if ((param->m_hints.m_max_connections <= MAX_CONNECTIONS_DEFAULT * 2) &&
+ (param->m_hints.m_table_definition_cache <= TABLE_DEF_CACHE_DEFAULT * 2) &&
+ (param->m_hints.m_table_open_cache <= TABLE_OPEN_CACHE_DEFAULT * 2))
+ {
+ /* Some defaults have been increased, to "moderate" values. */
+ return & medium_data;
+ }
+
+ /* Looks like a server in production. */
+ return & large_data;
+}
+
+static void apply_heuristic(PFS_global_param *p, PFS_sizing_data *h)
+{
+ ulong count;
+ ulong con = p->m_hints.m_max_connections;
+ ulong handle = p->m_hints.m_table_open_cache;
+ ulong share = p->m_hints.m_table_definition_cache;
+ ulong file = p->m_hints.m_open_files_limit;
+
+ if (p->m_table_sizing < 0)
+ {
+ count= handle;
+
+ p->m_table_sizing= apply_load_factor(count, h->m_load_factor_volatile);
+ }
+
+ if (p->m_table_share_sizing < 0)
+ {
+ count= share;
+
+ count= max<ulong>(count, h->m_min_number_of_tables);
+ p->m_table_share_sizing= apply_load_factor(count, h->m_load_factor_static);
+ }
+
+ if (p->m_account_sizing < 0)
+ {
+ p->m_account_sizing= h->m_account_sizing;
+ }
+
+ if (p->m_user_sizing < 0)
+ {
+ p->m_user_sizing= h->m_user_sizing;
+ }
+
+ if (p->m_host_sizing < 0)
+ {
+ p->m_host_sizing= h->m_host_sizing;
+ }
+
+ if (p->m_events_waits_history_sizing < 0)
+ {
+ p->m_events_waits_history_sizing= h->m_events_waits_history_sizing;
+ }
+
+ if (p->m_events_waits_history_long_sizing < 0)
+ {
+ p->m_events_waits_history_long_sizing= h->m_events_waits_history_long_sizing;
+ }
+
+ if (p->m_events_stages_history_sizing < 0)
+ {
+ p->m_events_stages_history_sizing= h->m_events_stages_history_sizing;
+ }
+
+ if (p->m_events_stages_history_long_sizing < 0)
+ {
+ p->m_events_stages_history_long_sizing= h->m_events_stages_history_long_sizing;
+ }
+
+ if (p->m_events_statements_history_sizing < 0)
+ {
+ p->m_events_statements_history_sizing= h->m_events_statements_history_sizing;
+ }
+
+ if (p->m_events_statements_history_long_sizing < 0)
+ {
+ p->m_events_statements_history_long_sizing= h->m_events_statements_history_long_sizing;
+ }
+
+ if (p->m_digest_sizing < 0)
+ {
+ p->m_digest_sizing= h->m_digest_sizing;
+ }
+
+ if (p->m_session_connect_attrs_sizing < 0)
+ {
+ p->m_session_connect_attrs_sizing= h->m_session_connect_attrs_sizing;
+ }
+
+ if (p->m_mutex_sizing < 0)
+ {
+ count= fixed_mutex_instances
+ + con * mutex_per_connection
+ + handle * mutex_per_handle
+ + share * mutex_per_share;
+
+ p->m_mutex_sizing= apply_load_factor(count, h->m_load_factor_volatile);
+ }
+
+ if (p->m_rwlock_sizing < 0)
+ {
+ count= fixed_rwlock_instances
+ + con * rwlock_per_connection
+ + handle * rwlock_per_handle
+ + share * rwlock_per_share;
+
+ p->m_rwlock_sizing= apply_load_factor(count, h->m_load_factor_volatile);
+ }
+
+ if (p->m_cond_sizing < 0)
+ {
+ ulong count;
+ count= fixed_cond_instances
+ + con * cond_per_connection
+ + handle * cond_per_handle
+ + share * cond_per_share;
+
+ p->m_cond_sizing= apply_load_factor(count, h->m_load_factor_volatile);
+ }
+
+ if (p->m_file_sizing < 0)
+ {
+ count= fixed_file_instances
+ + con * file_per_connection
+ + handle * file_per_handle
+ + share * file_per_share;
+
+ count= max<ulong>(count, file);
+ p->m_file_sizing= apply_load_factor(count, h->m_load_factor_normal);
+ }
+
+ if (p->m_socket_sizing < 0)
+ {
+ count= fixed_socket_instances
+ + con * socket_per_connection
+ + handle * socket_per_handle
+ + share * socket_per_share;
+
+ p->m_socket_sizing= apply_load_factor(count, h->m_load_factor_volatile);
+ }
+
+ if (p->m_thread_sizing < 0)
+ {
+ count= fixed_thread_instances
+ + con * thread_per_connection
+ + handle * thread_per_handle
+ + share * thread_per_share;
+
+ p->m_thread_sizing= apply_load_factor(count, h->m_load_factor_volatile);
+ }
+}
+
+void pfs_automated_sizing(PFS_global_param *param)
+{
+ PFS_sizing_data *heuristic;
+ heuristic= estimate_hints(param);
+ apply_heuristic(param, heuristic);
+
+ DBUG_ASSERT(param->m_account_sizing >= 0);
+ DBUG_ASSERT(param->m_digest_sizing >= 0);
+ DBUG_ASSERT(param->m_host_sizing >= 0);
+ DBUG_ASSERT(param->m_user_sizing >= 0);
+
+ DBUG_ASSERT(param->m_events_waits_history_sizing >= 0);
+ DBUG_ASSERT(param->m_events_waits_history_long_sizing >= 0);
+ DBUG_ASSERT(param->m_events_stages_history_sizing >= 0);
+ DBUG_ASSERT(param->m_events_stages_history_long_sizing >= 0);
+ DBUG_ASSERT(param->m_events_statements_history_sizing >= 0);
+ DBUG_ASSERT(param->m_events_statements_history_long_sizing >= 0);
+ DBUG_ASSERT(param->m_session_connect_attrs_sizing >= 0);
+
+ DBUG_ASSERT(param->m_mutex_sizing >= 0);
+ DBUG_ASSERT(param->m_rwlock_sizing >= 0);
+ DBUG_ASSERT(param->m_cond_sizing >= 0);
+ DBUG_ASSERT(param->m_file_sizing >= 0);
+ DBUG_ASSERT(param->m_socket_sizing >= 0);
+ DBUG_ASSERT(param->m_thread_sizing >= 0);
+ DBUG_ASSERT(param->m_table_sizing >= 0);
+ DBUG_ASSERT(param->m_table_share_sizing >= 0);
+}
+
diff --git a/storage/perfschema/pfs_digest.cc b/storage/perfschema/pfs_digest.cc
index 92c27b2e85f..6edcba4c013 100644
--- a/storage/perfschema/pfs_digest.cc
+++ b/storage/perfschema/pfs_digest.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -30,6 +30,8 @@
#include "table_helper.h"
#include "my_md5.h"
#include "sql_lex.h"
+#include "sql_get_diagnostics.h"
+#include "sql_string.h"
#include <string.h>
/* Generated code */
@@ -58,7 +60,6 @@
ulong digest_max= 0;
ulong digest_lost= 0;
-
/** EVENTS_STATEMENTS_HISTORY_LONG circular buffer. */
PFS_statements_digest_stat *statements_digest_stat_array= NULL;
/** Consumer flag for table EVENTS_STATEMENTS_SUMMARY_BY_DIGEST. */
@@ -69,7 +70,7 @@ bool flag_statements_digest= true;
*/
volatile uint32 digest_index= 1;
-static LF_HASH digest_hash;
+LF_HASH digest_hash;
static bool digest_hash_inited= false;
/**
@@ -123,8 +124,8 @@ static uchar *digest_hash_get_key(const uchar *entry, size_t *length,
DBUG_ASSERT(typed_entry != NULL);
digest= *typed_entry;
DBUG_ASSERT(digest != NULL);
- *length= PFS_MD5_SIZE;
- result= digest->m_digest_hash.m_md5;
+ *length= sizeof (PFS_digest_key);
+ result= & digest->m_digest_key;
return const_cast<uchar*> (reinterpret_cast<const uchar*> (result));
}
C_MODE_END
@@ -136,11 +137,12 @@ C_MODE_END
*/
int init_digest_hash(void)
{
- if (! digest_hash_inited)
+ if ((! digest_hash_inited) && (digest_max > 0))
{
lf_hash_init(&digest_hash, sizeof(PFS_statements_digest_stat*),
LF_HASH_UNIQUE, 0, 0, digest_hash_get_key,
&my_charset_bin);
+ /* digest_hash.size= digest_max; */
digest_hash_inited= true;
}
return 0;
@@ -167,8 +169,10 @@ static LF_PINS* get_digest_hash_pins(PFS_thread *thread)
}
PFS_statement_stat*
-find_or_create_digest(PFS_thread* thread,
- PSI_digest_storage* digest_storage)
+find_or_create_digest(PFS_thread *thread,
+ PSI_digest_storage *digest_storage,
+ const char *schema_name,
+ uint schema_name_length)
{
if (statements_digest_stat_array == NULL)
return NULL;
@@ -180,13 +184,21 @@ find_or_create_digest(PFS_thread* thread,
if (unlikely(pins == NULL))
return NULL;
+ /*
+ Note: the LF_HASH key is a block of memory,
+ make sure to clean unused bytes,
+ so that memcmp() can compare keys.
+ */
+ PFS_digest_key hash_key;
+ memset(& hash_key, 0, sizeof(hash_key));
/* Compute MD5 Hash of the tokens received. */
- PFS_digest_hash md5;
- compute_md5_hash((char *) md5.m_md5,
+ compute_md5_hash((char *) hash_key.m_md5,
(char *) digest_storage->m_token_array,
digest_storage->m_byte_count);
-
- unsigned char* hash_key= md5.m_md5;
+ /* Add the current schema to the key */
+ hash_key.m_schema_name_length= schema_name_length;
+ if (schema_name_length > 0)
+ memcpy(hash_key.m_schema_name, schema_name, schema_name_length);
int res;
ulong safe_index;
@@ -202,7 +214,7 @@ search:
/* Lookup LF_HASH using this new key. */
entry= reinterpret_cast<PFS_statements_digest_stat**>
(lf_hash_search(&digest_hash, pins,
- hash_key, PFS_MD5_SIZE));
+ &hash_key, sizeof(PFS_digest_key)));
if (entry && (entry != MY_ERRPTR))
{
@@ -244,7 +256,7 @@ search:
pfs= &statements_digest_stat_array[safe_index];
/* Copy digest hash/LF Hash search key. */
- memcpy(pfs->m_digest_hash.m_md5, md5.m_md5, PFS_MD5_SIZE);
+ memcpy(& pfs->m_digest_key, &hash_key, sizeof(PFS_digest_key));
/*
Copy digest storage to statement_digest_stat_array so that it could be
@@ -278,7 +290,7 @@ search:
return NULL;
}
-void purge_digest(PFS_thread* thread, unsigned char* hash_key)
+void purge_digest(PFS_thread* thread, PFS_digest_key *hash_key)
{
LF_PINS *pins= get_digest_hash_pins(thread);
if (unlikely(pins == NULL))
@@ -289,12 +301,12 @@ void purge_digest(PFS_thread* thread, unsigned char* hash_key)
/* Lookup LF_HASH using this new key. */
entry= reinterpret_cast<PFS_statements_digest_stat**>
(lf_hash_search(&digest_hash, pins,
- hash_key, PFS_MD5_SIZE));
+ hash_key, sizeof(PFS_digest_key)));
if (entry && (entry != MY_ERRPTR))
- {
+ {
lf_hash_delete(&digest_hash, pins,
- hash_key, PFS_MD5_SIZE);
+ hash_key, sizeof(PFS_digest_key));
}
lf_hash_search_unpin(pins);
return;
@@ -313,7 +325,7 @@ void PFS_statements_digest_stat::reset_index(PFS_thread *thread)
/* Only remove entries that exists in the HASH index. */
if (m_digest_storage.m_byte_count > 0)
{
- purge_digest(thread, m_digest_hash.m_md5);
+ purge_digest(thread, & m_digest_key);
}
}
@@ -347,98 +359,130 @@ void reset_esms_by_digest()
*/
void get_digest_text(char* digest_text, PSI_digest_storage* digest_storage)
{
+ DBUG_ASSERT(digest_storage != NULL);
bool truncated= false;
int byte_count= digest_storage->m_byte_count;
- int need_bytes;
+ int bytes_needed= 0;
uint tok= 0;
- char *id_string;
- int id_length;
int current_byte= 0;
lex_token_string *tok_data;
/* -4 is to make sure extra space for '...' and a '\0' at the end. */
- int available_bytes_to_write= COL_DIGEST_TEXT_SIZE - 4;
+ int bytes_available= COL_DIGEST_TEXT_SIZE - 4;
+
+ /* Convert text to utf8 */
+ const CHARSET_INFO *from_cs= get_charset(digest_storage->m_charset_number, MYF(0));
+ const CHARSET_INFO *to_cs= &my_charset_utf8_bin;
+
+ if (from_cs == NULL)
+ {
+ /*
+ Can happen, as we do dirty reads on digest_storage,
+ which can be written to in another thread.
+ */
+ *digest_text= '\0';
+ return;
+ }
+
+ /*
+ Max converted size is number of characters * max multibyte length of the
+ target charset, which is 4 for UTF8.
+ */
+ const uint max_converted_size= PSI_MAX_DIGEST_STORAGE_SIZE * 4;
+ char id_buffer[max_converted_size];
+ char *id_string;
+ int id_length;
+ bool convert_text= !my_charset_same(from_cs, to_cs);
DBUG_ASSERT(byte_count <= PSI_MAX_DIGEST_STORAGE_SIZE);
while ((current_byte < byte_count) &&
- (available_bytes_to_write > 0) &&
- (! truncated))
+ (bytes_available > 0) &&
+ !truncated)
{
current_byte= read_token(digest_storage, current_byte, &tok);
- tok_data= & lex_token_array[tok];
+ tok_data= &lex_token_array[tok];
switch (tok)
{
/* All identifiers are printed with their name. */
case IDENT:
- current_byte= read_identifier(digest_storage, current_byte,
- & id_string, & id_length);
- need_bytes= id_length + 1; /* <id> space */
- if (need_bytes <= available_bytes_to_write)
+ case IDENT_QUOTED:
{
- if (id_length > 0)
+ char *id_ptr;
+ int id_len;
+ uint err_cs= 0;
+
+ /* Get the next identifier from the storage buffer. */
+ current_byte= read_identifier(digest_storage, current_byte,
+ &id_ptr, &id_len);
+ if (convert_text)
{
- strncpy(digest_text, id_string, id_length);
- digest_text+= id_length;
+ /* Verify that the converted text will fit. */
+ if (to_cs->mbmaxlen*id_len > max_converted_size)
+ {
+ truncated= true;
+ break;
+ }
+ /* Convert identifier string into the storage character set. */
+ id_length= my_convert(id_buffer, max_converted_size, to_cs,
+ id_ptr, id_len, from_cs, &err_cs);
+ id_string= id_buffer;
}
- *digest_text= ' ';
- digest_text++;
- available_bytes_to_write-= need_bytes;
- }
- else
- {
- truncated= true;
- }
- break;
- case IDENT_QUOTED:
- current_byte= read_identifier(digest_storage, current_byte,
- & id_string, & id_length);
- need_bytes= id_length + 3; /* quote <id> quote space */
- if (need_bytes <= available_bytes_to_write)
- {
- *digest_text= '`';
- digest_text++;
- if (id_length > 0)
+ else
{
- strncpy(digest_text, id_string, id_length);
- digest_text+= id_length;
+ id_string= id_ptr;
+ id_length= id_len;
+ }
+
+ if (id_length == 0 || err_cs != 0)
+ {
+ truncated= true;
+ break;
+ }
+ /* Copy the converted identifier into the digest string. */
+ bytes_needed= id_length + (tok == IDENT ? 1 : 3);
+ if (bytes_needed <= bytes_available)
+ {
+ if (tok == IDENT_QUOTED)
+ *digest_text++= '`';
+ if (id_length > 0)
+ {
+ memcpy(digest_text, id_string, id_length);
+ digest_text+= id_length;
+ }
+ if (tok == IDENT_QUOTED)
+ *digest_text++= '`';
+ *digest_text++= ' ';
+ bytes_available-= bytes_needed;
+ }
+ else
+ {
+ truncated= true;
}
- *digest_text= '`';
- digest_text++;
- *digest_text= ' ';
- digest_text++;
- available_bytes_to_write-= need_bytes;
- }
- else
- {
- truncated= true;
}
break;
/* Everything else is printed as is. */
default:
/*
- Make sure not to overflow digest_text buffer while writing
- this token string.
+ Make sure not to overflow digest_text buffer.
+1 is to make sure extra space for ' '.
*/
int tok_length= tok_data->m_token_length;
- need_bytes= tok_length + 1;
+ bytes_needed= tok_length + 1;
- if (need_bytes <= available_bytes_to_write)
+ if (bytes_needed <= bytes_available)
{
- strncpy(digest_text,
- tok_data->m_token_string,
- tok_length);
+ strncpy(digest_text, tok_data->m_token_string, tok_length);
digest_text+= tok_length;
- *digest_text= ' ';
- digest_text++;
- available_bytes_to_write-= need_bytes;
+ *digest_text++= ' ';
+ bytes_available-= bytes_needed;
}
else
{
truncated= true;
}
+ break;
}
}
@@ -524,7 +568,11 @@ PSI_digest_locker* pfs_digest_add_token_v1(PSI_digest_locker *locker,
digest_storage= &state->m_digest_storage;
- if (digest_storage->m_full)
+ /*
+ Stop collecting further tokens if digest storage is full or
+ if END token is received.
+ */
+ if (digest_storage->m_full || token == END_OF_INPUT)
return NULL;
/*
@@ -555,19 +603,23 @@ PSI_digest_locker* pfs_digest_add_token_v1(PSI_digest_locker *locker,
TOK_PFS_GENERIC_VALUE := BIN_NUM | DECIMAL_NUM | ... | ULONGLONG_NUM
*/
token= TOK_PFS_GENERIC_VALUE;
-
+ }
+ /* fall through */
+ case NULL_SYM:
+ {
if ((last_token2 == TOK_PFS_GENERIC_VALUE ||
- last_token2 == TOK_PFS_GENERIC_VALUE_LIST) &&
+ last_token2 == TOK_PFS_GENERIC_VALUE_LIST ||
+ last_token2 == NULL_SYM) &&
(last_token == ','))
{
/*
REDUCE:
TOK_PFS_GENERIC_VALUE_LIST :=
- TOK_PFS_GENERIC_VALUE ',' TOK_PFS_GENERIC_VALUE
+ (TOK_PFS_GENERIC_VALUE|NULL_SYM) ',' (TOK_PFS_GENERIC_VALUE|NULL_SYM)
REDUCE:
TOK_PFS_GENERIC_VALUE_LIST :=
- TOK_PFS_GENERIC_VALUE_LIST ',' TOK_PFS_GENERIC_VALUE
+ TOK_PFS_GENERIC_VALUE_LIST ',' (TOK_PFS_GENERIC_VALUE|NULL_SYM)
*/
digest_storage->m_byte_count-= 2*PFS_SIZE_OF_A_TOKEN;
token= TOK_PFS_GENERIC_VALUE_LIST;
diff --git a/storage/perfschema/pfs_digest.h b/storage/perfschema/pfs_digest.h
index 2646596171c..d2453dc32c6 100644
--- a/storage/perfschema/pfs_digest.h
+++ b/storage/perfschema/pfs_digest.h
@@ -38,32 +38,26 @@ struct PFS_thread;
/**
Structure to store a MD5 hash value (digest) for a statement.
*/
-struct PFS_digest_hash
+struct PFS_digest_key
{
unsigned char m_md5[PFS_MD5_SIZE];
+ char m_schema_name[NAME_LEN];
+ uint m_schema_name_length;
};
/** A statement digest stat record. */
-struct PFS_statements_digest_stat
+struct PFS_ALIGNED PFS_statements_digest_stat
{
- /**
- Digest MD5 Hash.
- */
- PFS_digest_hash m_digest_hash;
+ /** Digest Schema + MD5 Hash. */
+ PFS_digest_key m_digest_key;
- /**
- Digest Storage.
- */
+ /** Digest Storage. */
PSI_digest_storage m_digest_storage;
- /**
- Statement stat.
- */
+ /** Statement stat. */
PFS_statement_stat m_stat;
- /**
- First Seen/last seen.
- */
+ /** First and last seen timestamps.*/
ulonglong m_first_seen;
ulonglong m_last_seen;
@@ -78,10 +72,12 @@ void cleanup_digest();
int init_digest_hash(void);
void cleanup_digest_hash(void);
-PFS_statement_stat* find_or_create_digest(PFS_thread*,
- PSI_digest_storage*);
+PFS_statement_stat* find_or_create_digest(PFS_thread *thread,
+ PSI_digest_storage *digest_storage,
+ const char *schema_name,
+ uint schema_name_length);
-void get_digest_text(char* digest_text, PSI_digest_storage*);
+void get_digest_text(char *digest_text, PSI_digest_storage *digest_storage);
void reset_esms_by_digest();
@@ -90,8 +86,8 @@ extern PFS_statements_digest_stat *statements_digest_stat_array;
/* Instrumentation callbacks for pfs.cc */
-struct PSI_digest_locker* pfs_digest_start_v1(PSI_statement_locker *locker);
-PSI_digest_locker* pfs_digest_add_token_v1(PSI_digest_locker *locker,
+struct PSI_digest_locker *pfs_digest_start_v1(PSI_statement_locker *locker);
+PSI_digest_locker *pfs_digest_add_token_v1(PSI_digest_locker *locker,
uint token,
OPAQUE_LEX_YYSTYPE *yylval);
@@ -99,6 +95,7 @@ static inline void digest_reset(PSI_digest_storage *digest)
{
digest->m_full= false;
digest->m_byte_count= 0;
+ digest->m_charset_number= 0;
}
static inline void digest_copy(PSI_digest_storage *to, const PSI_digest_storage *from)
@@ -107,20 +104,21 @@ static inline void digest_copy(PSI_digest_storage *to, const PSI_digest_storage
{
to->m_full= from->m_full;
to->m_byte_count= from->m_byte_count;
+ to->m_charset_number= from->m_charset_number;
DBUG_ASSERT(to->m_byte_count <= PSI_MAX_DIGEST_STORAGE_SIZE);
memcpy(to->m_token_array, from->m_token_array, to->m_byte_count);
}
else
{
- DBUG_ASSERT(! from->m_full);
DBUG_ASSERT(from->m_byte_count == 0);
to->m_full= false;
to->m_byte_count= 0;
+ to->m_charset_number= 0;
}
}
/**
- Function to read a single token from token array.
+ Read a single token from token array.
*/
inline int read_token(PSI_digest_storage *digest_storage,
int index, uint *tok)
@@ -141,7 +139,7 @@ inline int read_token(PSI_digest_storage *digest_storage,
}
/**
- Function to store a single token in token array.
+ Store a single token in token array.
*/
inline void store_token(PSI_digest_storage* digest_storage, uint token)
{
@@ -162,7 +160,7 @@ inline void store_token(PSI_digest_storage* digest_storage, uint token)
}
/**
- Function to read an identifier from token array.
+ Read an identifier from token array.
*/
inline int read_identifier(PSI_digest_storage* digest_storage,
int index, char ** id_string, int *id_length)
@@ -186,7 +184,7 @@ inline int read_identifier(PSI_digest_storage* digest_storage,
}
/**
- Function to store an identifier in token array.
+ Store an identifier in token array.
*/
inline void store_token_identifier(PSI_digest_storage* digest_storage,
uint token,
@@ -207,9 +205,7 @@ inline void store_token_identifier(PSI_digest_storage* digest_storage,
dest[3]= (id_length >> 8) & 0xff;
/* Write the string data */
if (id_length > 0)
- {
- strncpy((char *)(dest + 4), id_name, id_length);
- }
+ memcpy((char *)(dest + 4), id_name, id_length);
digest_storage->m_byte_count+= bytes_needed;
}
else
@@ -218,4 +214,6 @@ inline void store_token_identifier(PSI_digest_storage* digest_storage,
}
}
+extern LF_HASH digest_hash;
+
#endif
diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc
index c3f29787951..958a2bdd7bd 100644
--- a/storage/perfschema/pfs_engine_table.cc
+++ b/storage/perfschema/pfs_engine_table.cc
@@ -20,6 +20,7 @@
#include "my_global.h"
#include "my_pthread.h"
+#include "hostname.h" /* For Host_entry */
#include "pfs_engine_table.h"
#include "table_events_waits.h"
@@ -69,6 +70,8 @@
#include "table_socket_instances.h"
#include "table_socket_summary_by_instance.h"
#include "table_socket_summary_by_event_name.h"
+#include "table_session_connect_attrs.h"
+#include "table_session_account_connect_attrs.h"
/* For show status */
#include "pfs_column_values.h"
@@ -102,9 +105,7 @@ static PFS_engine_table_share *all_shares[]=
&table_file_instances::m_share,
&table_file_summary_by_event_name::m_share,
&table_file_summary_by_instance::m_share,
-#ifdef QQ_NOT_YET
&table_host_cache::m_share,
-#endif
&table_mutex_instances::m_share,
&table_os_global_by_type::m_share,
&table_performance_timers::m_share,
@@ -145,6 +146,8 @@ static PFS_engine_table_share *all_shares[]=
&table_socket_instances::m_share,
&table_socket_summary_by_instance::m_share,
&table_socket_summary_by_event_name::m_share,
+ &table_session_connect_attrs::m_share,
+ &table_session_account_connect_attrs::m_share,
NULL
};
@@ -159,7 +162,7 @@ void PFS_engine_table_share::check_all_tables(THD *thd)
DBUG_EXECUTE_IF("tampered_perfschema_table1",
{
/* Hack SETUP_INSTRUMENT, incompatible change. */
- all_shares[19]->m_field_def->count++;
+ all_shares[20]->m_field_def->count++;
});
for (current= &all_shares[0]; (*current) != NULL; current++)
@@ -683,20 +686,22 @@ PFS_unknown_acl pfs_unknown_acl;
ACL_internal_access_result
PFS_unknown_acl::check(ulong want_access, ulong *save_priv) const
{
- const ulong always_forbidden= INSERT_ACL | UPDATE_ACL | DELETE_ACL
- | CREATE_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL
- | CREATE_VIEW_ACL | TRIGGER_ACL | LOCK_TABLES_ACL;
+ const ulong always_forbidden= CREATE_ACL
+ | REFERENCES_ACL | INDEX_ACL | ALTER_ACL
+ | CREATE_VIEW_ACL | TRIGGER_ACL;
if (unlikely(want_access & always_forbidden))
return ACL_INTERNAL_ACCESS_DENIED;
/*
- There is no point in hidding (by enforcing ACCESS_DENIED for SELECT_ACL
+ There is no point in hiding (by enforcing ACCESS_DENIED for SELECT_ACL
on performance_schema.*) tables that do not exist anyway.
When SELECT_ACL is granted on performance_schema.* or *.*,
SELECT * from performance_schema.wrong_table
will fail with a more understandable ER_NO_SUCH_TABLE error,
instead of ER_TABLEACCESS_DENIED_ERROR.
+ The same goes for other DML (INSERT_ACL | UPDATE_ACL | DELETE_ACL),
+ for ease of use: error messages will be less surprising.
*/
return ACL_INTERNAL_ACCESS_CHECK_GRANT;
}
@@ -978,363 +983,441 @@ bool pfs_show_status(handlerton *hton, THD *thd,
total_memory+= size;
break;
case 56:
- name= "events_waits_summary_global_by_event_name.row_size";
- size= sizeof(PFS_single_stat);
- break;
- case 57:
- name= "events_waits_summary_global_by_event_name.row_count";
- size= wait_class_max;
- break;
- case 58:
- name= "events_waits_summary_global_by_event_name.memory";
- size= wait_class_max * sizeof(PFS_single_stat);
- total_memory+= size;
- break;
- case 59:
name= "(pfs_account).row_size";
size= sizeof(PFS_account);
break;
- case 60:
+ case 57:
name= "(pfs_account).row_count";
size= account_max;
break;
- case 61:
+ case 58:
name= "(pfs_account).memory";
size= account_max * sizeof(PFS_account);
total_memory+= size;
break;
- case 62:
+ case 59:
name= "events_waits_summary_by_account_by_event_name.row_size";
size= sizeof(PFS_single_stat);
break;
- case 63:
+ case 60:
name= "events_waits_summary_by_account_by_event_name.row_count";
size= account_max * wait_class_max;
break;
- case 64:
+ case 61:
name= "events_waits_summary_by_account_by_event_name.memory";
size= account_max * wait_class_max * sizeof(PFS_single_stat);
total_memory+= size;
break;
- case 65:
+ case 62:
name= "events_waits_summary_by_user_by_event_name.row_size";
size= sizeof(PFS_single_stat);
break;
- case 66:
+ case 63:
name= "events_waits_summary_by_user_by_event_name.row_count";
size= user_max * wait_class_max;
break;
- case 67:
+ case 64:
name= "events_waits_summary_by_user_by_event_name.memory";
size= user_max * wait_class_max * sizeof(PFS_single_stat);
total_memory+= size;
break;
- case 68:
+ case 65:
name= "events_waits_summary_by_host_by_event_name.row_size";
size= sizeof(PFS_single_stat);
break;
- case 69:
+ case 66:
name= "events_waits_summary_by_host_by_event_name.row_count";
size= host_max * wait_class_max;
break;
- case 70:
+ case 67:
name= "events_waits_summary_by_host_by_event_name.memory";
size= host_max * wait_class_max * sizeof(PFS_single_stat);
total_memory+= size;
break;
- case 71:
+ case 68:
name= "(pfs_user).row_size";
size= sizeof(PFS_user);
break;
- case 72:
+ case 69:
name= "(pfs_user).row_count";
size= user_max;
break;
- case 73:
+ case 70:
name= "(pfs_user).memory";
size= user_max * sizeof(PFS_user);
total_memory+= size;
break;
- case 74:
+ case 71:
name= "(pfs_host).row_size";
size= sizeof(PFS_host);
break;
- case 75:
+ case 72:
name= "(pfs_host).row_count";
size= host_max;
break;
- case 76:
+ case 73:
name= "(pfs_host).memory";
size= host_max * sizeof(PFS_host);
total_memory+= size;
break;
- case 77:
+ case 74:
name= "(pfs_stage_class).row_size";
size= sizeof(PFS_stage_class);
break;
- case 78:
+ case 75:
name= "(pfs_stage_class).row_count";
size= stage_class_max;
break;
- case 79:
+ case 76:
name= "(pfs_stage_class).memory";
size= stage_class_max * sizeof(PFS_stage_class);
total_memory+= size;
break;
- case 80:
+ case 77:
name= "events_stages_history.row_size";
size= sizeof(PFS_events_stages);
break;
- case 81:
+ case 78:
name= "events_stages_history.row_count";
size= events_stages_history_per_thread * thread_max;
break;
- case 82:
+ case 79:
name= "events_stages_history.memory";
size= events_stages_history_per_thread * thread_max
* sizeof(PFS_events_stages);
total_memory+= size;
break;
- case 83:
+ case 80:
name= "events_stages_history_long.row_size";
size= sizeof(PFS_events_stages);
break;
- case 84:
+ case 81:
name= "events_stages_history_long.row_count";
size= events_stages_history_long_size;
break;
- case 85:
+ case 82:
name= "events_stages_history_long.memory";
size= events_stages_history_long_size * sizeof(PFS_events_stages);
total_memory+= size;
break;
- case 86:
+ case 83:
name= "events_stages_summary_by_thread_by_event_name.row_size";
size= sizeof(PFS_stage_stat);
break;
- case 87:
+ case 84:
name= "events_stages_summary_by_thread_by_event_name.row_count";
size= thread_max * stage_class_max;
break;
- case 88:
+ case 85:
name= "events_stages_summary_by_thread_by_event_name.memory";
size= thread_max * stage_class_max * sizeof(PFS_stage_stat);
total_memory+= size;
break;
- case 89:
+ case 86:
name= "events_stages_summary_global_by_event_name.row_size";
size= sizeof(PFS_stage_stat);
break;
- case 90:
+ case 87:
name= "events_stages_summary_global_by_event_name.row_count";
size= stage_class_max;
break;
- case 91:
+ case 88:
name= "events_stages_summary_global_by_event_name.memory";
size= stage_class_max * sizeof(PFS_stage_stat);
total_memory+= size;
break;
- case 92:
+ case 89:
name= "events_stages_summary_by_account_by_event_name.row_size";
size= sizeof(PFS_stage_stat);
break;
- case 93:
+ case 90:
name= "events_stages_summary_by_account_by_event_name.row_count";
size= account_max * stage_class_max;
break;
- case 94:
+ case 91:
name= "events_stages_summary_by_account_by_event_name.memory";
size= account_max * stage_class_max * sizeof(PFS_stage_stat);
total_memory+= size;
break;
- case 95:
+ case 92:
name= "events_stages_summary_by_user_by_event_name.row_size";
size= sizeof(PFS_stage_stat);
break;
- case 96:
+ case 93:
name= "events_stages_summary_by_user_by_event_name.row_count";
size= user_max * stage_class_max;
break;
- case 97:
+ case 94:
name= "events_stages_summary_by_user_by_event_name.memory";
size= user_max * stage_class_max * sizeof(PFS_stage_stat);
total_memory+= size;
break;
- case 98:
+ case 95:
name= "events_stages_summary_by_host_by_event_name.row_size";
size= sizeof(PFS_stage_stat);
break;
- case 99:
+ case 96:
name= "events_stages_summary_by_host_by_event_name.row_count";
size= host_max * stage_class_max;
break;
- case 100:
+ case 97:
name= "events_stages_summary_by_host_by_event_name.memory";
size= host_max * stage_class_max * sizeof(PFS_stage_stat);
total_memory+= size;
break;
- case 101:
+ case 98:
name= "(pfs_statement_class).row_size";
size= sizeof(PFS_statement_class);
break;
- case 102:
+ case 99:
name= "(pfs_statement_class).row_count";
size= statement_class_max;
break;
- case 103:
+ case 100:
name= "(pfs_statement_class).memory";
size= statement_class_max * sizeof(PFS_statement_class);
total_memory+= size;
break;
- case 104:
+ case 101:
name= "events_statements_history.row_size";
size= sizeof(PFS_events_statements);
break;
- case 105:
+ case 102:
name= "events_statements_history.row_count";
size= events_statements_history_per_thread * thread_max;
break;
- case 106:
+ case 103:
name= "events_statements_history.memory";
size= events_statements_history_per_thread * thread_max
* sizeof(PFS_events_statements);
total_memory+= size;
break;
- case 107:
+ case 104:
name= "events_statements_history_long.row_size";
size= sizeof(PFS_events_statements);
break;
- case 108:
+ case 105:
name= "events_statements_history_long.row_count";
size= events_statements_history_long_size;
break;
- case 109:
+ case 106:
name= "events_statements_history_long.memory";
size= events_statements_history_long_size * sizeof(PFS_events_statements);
total_memory+= size;
break;
- case 110:
+ case 107:
name= "events_statements_summary_by_thread_by_event_name.row_size";
size= sizeof(PFS_statement_stat);
break;
- case 111:
+ case 108:
name= "events_statements_summary_by_thread_by_event_name.row_count";
size= thread_max * statement_class_max;
break;
- case 112:
+ case 109:
name= "events_statements_summary_by_thread_by_event_name.memory";
size= thread_max * statement_class_max * sizeof(PFS_statement_stat);
total_memory+= size;
break;
- case 113:
+ case 110:
name= "events_statements_summary_global_by_event_name.row_size";
size= sizeof(PFS_statement_stat);
break;
- case 114:
+ case 111:
name= "events_statements_summary_global_by_event_name.row_count";
size= statement_class_max;
break;
- case 115:
+ case 112:
name= "events_statements_summary_global_by_event_name.memory";
size= statement_class_max * sizeof(PFS_statement_stat);
total_memory+= size;
break;
- case 116:
+ case 113:
name= "events_statements_summary_by_account_by_event_name.row_size";
size= sizeof(PFS_statement_stat);
break;
- case 117:
+ case 114:
name= "events_statements_summary_by_account_by_event_name.row_count";
size= account_max * statement_class_max;
break;
- case 118:
+ case 115:
name= "events_statements_summary_by_account_by_event_name.memory";
size= account_max * statement_class_max * sizeof(PFS_statement_stat);
total_memory+= size;
break;
- case 119:
+ case 116:
name= "events_statements_summary_by_user_by_event_name.row_size";
size= sizeof(PFS_statement_stat);
break;
- case 120:
+ case 117:
name= "events_statements_summary_by_user_by_event_name.row_count";
size= user_max * statement_class_max;
break;
- case 121:
+ case 118:
name= "events_statements_summary_by_user_by_event_name.memory";
size= user_max * statement_class_max * sizeof(PFS_statement_stat);
total_memory+= size;
break;
- case 122:
+ case 119:
name= "events_statements_summary_by_host_by_event_name.row_size";
size= sizeof(PFS_statement_stat);
break;
- case 123:
+ case 120:
name= "events_statements_summary_by_host_by_event_name.row_count";
size= host_max * statement_class_max;
break;
- case 124:
+ case 121:
name= "events_statements_summary_by_host_by_event_name.memory";
size= host_max * statement_class_max * sizeof(PFS_statement_stat);
total_memory+= size;
break;
- case 125:
+ case 122:
name= "events_statements_current.row_size";
size= sizeof(PFS_events_statements);
break;
- case 126:
+ case 123:
name= "events_statements_current.row_count";
size= thread_max * statement_stack_max;
break;
- case 127:
+ case 124:
name= "events_statements_current.memory";
size= thread_max * statement_stack_max * sizeof(PFS_events_statements);
total_memory+= size;
break;
- case 128:
+ case 125:
name= "(pfs_socket_class).row_size";
size= sizeof(PFS_socket_class);
break;
- case 129:
+ case 126:
name= "(pfs_socket_class).row_count";
size= socket_class_max;
break;
- case 130:
+ case 127:
name= "(pfs_socket_class).memory";
size= socket_class_max * sizeof(PFS_socket_class);
total_memory+= size;
break;
- case 131:
+ case 128:
name= "socket_instances.row_size";
size= sizeof(PFS_socket);
break;
- case 132:
+ case 129:
name= "socket_instances.row_count";
size= socket_max;
break;
- case 133:
+ case 130:
name= "socket_instances.memory";
size= socket_max * sizeof(PFS_socket);
total_memory+= size;
break;
- case 134:
+ case 131:
name= "events_statements_summary_by_digest.row_size";
size= sizeof(PFS_statements_digest_stat);
break;
- case 135:
+ case 132:
name= "events_statements_summary_by_digest.row_count";
size= digest_max;
break;
- case 136:
+ case 133:
name= "events_statements_summary_by_digest.memory";
size= digest_max * sizeof(PFS_statements_digest_stat);
total_memory+= size;
- break;
+ break;
+ case 134:
+ name= "session_connect_attrs.row_size";
+ size= thread_max;
+ break;
+ case 135:
+ name= "session_connect_attrs.row_count";
+ size= session_connect_attrs_size_per_thread;
+ break;
+ case 136:
+ name= "session_connect_attrs.memory";
+ size= thread_max * session_connect_attrs_size_per_thread;
+ total_memory+= size;
+ break;
+
+ case 137:
+ name= "(account_hash).count";
+ size= account_hash.count;
+ break;
+ case 138:
+ name= "(account_hash).size";
+ size= account_hash.size;
+ break;
+ case 139:
+ name= "(digest_hash).count";
+ size= digest_hash.count;
+ break;
+ case 140:
+ name= "(digest_hash).size";
+ size= digest_hash.size;
+ break;
+ case 141:
+ name= "(filename_hash).count";
+ size= filename_hash.count;
+ break;
+ case 142:
+ name= "(filename_hash).size";
+ size= filename_hash.size;
+ break;
+ case 143:
+ name= "(host_hash).count";
+ size= host_hash.count;
+ break;
+ case 144:
+ name= "(host_hash).size";
+ size= host_hash.size;
+ break;
+ case 145:
+ name= "(setup_actor_hash).count";
+ size= setup_actor_hash.count;
+ break;
+ case 146:
+ name= "(setup_actor_hash).size";
+ size= setup_actor_hash.size;
+ break;
+ case 147:
+ name= "(setup_object_hash).count";
+ size= setup_object_hash.count;
+ break;
+ case 148:
+ name= "(setup_object_hash).size";
+ size= setup_object_hash.size;
+ break;
+ case 149:
+ name= "(table_share_hash).count";
+ size= table_share_hash.count;
+ break;
+ case 150:
+ name= "(table_share_hash).size";
+ size= table_share_hash.size;
+ break;
+ case 151:
+ name= "(user_hash).count";
+ size= user_hash.count;
+ break;
+ case 152:
+ name= "(user_hash).size";
+ size= user_hash.size;
+ break;
+ case 153:
+ /*
+ This is not a performance_schema buffer,
+ the data is maintained in the server,
+ in hostname_cache.
+ Print the size only, there are:
+ - no host_cache.count
+ - no host_cache.memory
+ */
+ name= "host_cache.size";
+ size= sizeof(Host_entry);
+ break;
+
/*
This case must be last,
for aggregation in total_memory.
*/
- case 137:
+ case 154:
name= "performance_schema.memory";
size= total_memory;
/* This will fail if something is not advertised here */
diff --git a/storage/perfschema/pfs_engine_table.h b/storage/perfschema/pfs_engine_table.h
index 40f5404d0b7..981d72ee19e 100644
--- a/storage/perfschema/pfs_engine_table.h
+++ b/storage/perfschema/pfs_engine_table.h
@@ -263,7 +263,7 @@ public:
~PFS_readonly_acl()
{}
- ACL_internal_access_result check(ulong want_access, ulong *save_priv) const;
+ virtual ACL_internal_access_result check(ulong want_access, ulong *save_priv) const;
};
/** Singleton instance of PFS_readonly_acl. */
diff --git a/storage/perfschema/pfs_events.h b/storage/perfschema/pfs_events.h
index c9586df11bd..97fb7e08d63 100644
--- a/storage/perfschema/pfs_events.h
+++ b/storage/perfschema/pfs_events.h
@@ -29,7 +29,7 @@ struct PFS_instr_class;
struct PFS_events
{
/** THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** EVENT_ID. */
ulonglong m_event_id;
/** END_EVENT_ID. */
diff --git a/storage/perfschema/pfs_events_waits.cc b/storage/perfschema/pfs_events_waits.cc
index 2ee9ec292a2..c8a9d20a2f1 100644
--- a/storage/perfschema/pfs_events_waits.cc
+++ b/storage/perfschema/pfs_events_waits.cc
@@ -230,16 +230,6 @@ void reset_events_waits_by_host()
}
}
-/** Reset table EVENTS_WAITS_GLOBAL_BY_EVENT_NAME data. */
-void reset_events_waits_global()
-{
- PFS_single_stat *stat= global_instr_class_waits_array;
- PFS_single_stat *stat_last= global_instr_class_waits_array + wait_class_max;
-
- for ( ; stat < stat_last; stat++)
- stat->reset();
-}
-
void reset_table_waits_by_table()
{
PFS_table_share *pfs= table_share_array;
diff --git a/storage/perfschema/pfs_global.cc b/storage/perfschema/pfs_global.cc
index 2351b829894..546597ef33e 100644
--- a/storage/perfschema/pfs_global.cc
+++ b/storage/perfschema/pfs_global.cc
@@ -18,13 +18,16 @@
Miscellaneous global dependencies (implementation).
*/
-#include "my_global.h"
-#include "my_sys.h"
#include "pfs_global.h"
-#include "my_net.h"
+#include <my_sys.h>
+#include <my_net.h>
+#ifdef HAVE_MALLOC_H
+#include <malloc.h> /* memalign() may be here */
+#endif
-#include <stdlib.h>
-#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
#ifdef __WIN__
#include <winsock2.h>
@@ -45,18 +48,65 @@ void *pfs_malloc(size_t size, myf flags)
DBUG_ASSERT(! pfs_initialized);
DBUG_ASSERT(size > 0);
- void *ptr= malloc(size);
- if (likely(ptr != NULL))
- pfs_allocated_memory+= size;
- if (likely((ptr != NULL) && (flags & MY_ZEROFILL)))
+ void *ptr;
+
+#ifdef PFS_ALIGNEMENT
+#ifdef HAVE_POSIX_MEMALIGN
+ /* Linux */
+ if (unlikely(posix_memalign(& ptr, PFS_ALIGNEMENT, size)))
+ return NULL;
+#else
+#ifdef HAVE_MEMALIGN
+ /* Solaris */
+ ptr= memalign(PFS_ALIGNEMENT, size);
+ if (unlikely(ptr == NULL))
+ return NULL;
+#else
+#ifdef HAVE_ALIGNED_MALLOC
+ /* Windows */
+ ptr= _aligned_malloc(size, PFS_ALIGNEMENT);
+ if (unlikely(ptr == NULL))
+ return NULL;
+#else
+#error "Missing implementation for PFS_ALIGNENT"
+#endif /* HAVE_ALIGNED_MALLOC */
+#endif /* HAVE_MEMALIGN */
+#endif /* HAVE_POSIX_MEMALIGN */
+#else /* PFS_ALIGNMENT */
+ /* Everything else */
+ ptr= malloc(size);
+ if (unlikely(ptr == NULL))
+ return NULL;
+#endif
+
+ pfs_allocated_memory+= size;
+ if (flags & MY_ZEROFILL)
memset(ptr, 0, size);
return ptr;
}
void pfs_free(void *ptr)
{
- if (ptr != NULL)
- free(ptr);
+ if (ptr == NULL)
+ return;
+
+#ifdef HAVE_POSIX_MEMALIGN
+ /* Allocated with posix_memalign() */
+ free(ptr);
+#else
+#ifdef HAVE_MEMALIGN
+ /* Allocated with memalign() */
+ free(ptr);
+#else
+#ifdef HAVE_ALIGNED_MALLOC
+ /* Allocated with _aligned_malloc() */
+ _aligned_free(ptr);
+#else
+ /* Allocated with malloc() */
+ free(ptr);
+#endif /* HAVE_ALIGNED_MALLOC */
+#endif /* HAVE_MEMALIGN */
+#endif /* HAVE_POSIX_MEMALIGN */
}
void pfs_print_error(const char *format, ...)
diff --git a/storage/perfschema/pfs_global.h b/storage/perfschema/pfs_global.h
index a0e6c97a406..f9687524cd5 100644
--- a/storage/perfschema/pfs_global.h
+++ b/storage/perfschema/pfs_global.h
@@ -16,6 +16,9 @@
#ifndef PFS_GLOBAL_H
#define PFS_GLOBAL_H
+#include "my_global.h"
+#include "my_compiler.h"
+
/**
@file storage/perfschema/pfs_global.h
Miscellaneous global dependencies (declarations).
@@ -27,6 +30,18 @@ extern bool pfs_initialized;
/** Total memory allocated by the performance schema, in bytes. */
extern size_t pfs_allocated_memory;
+#if defined(HAVE_POSIX_MEMALIGN) || defined(HAVE_MEMALIGN) || defined(HAVE_ALIGNED_MALLOC)
+#define PFS_ALIGNEMENT 64
+#define PFS_ALIGNED MY_ALIGNED(PFS_ALIGNEMENT)
+#else
+/*
+ Known platforms that do not provide aligned memory:
+ - MacOSX Darwin (osx10.5)
+ For these platforms, compile without the alignment optimization.
+*/
+#define PFS_ALIGNED
+#endif /* HAVE_POSIX_MEMALIGN || HAVE_MEMALIGN || HAVE_ALIGNED_MALLOC */
+
void *pfs_malloc(size_t size, myf flags);
/**
diff --git a/storage/perfschema/pfs_host.cc b/storage/perfschema/pfs_host.cc
index 82b78e19ce8..ac6308ff991 100644
--- a/storage/perfschema/pfs_host.cc
+++ b/storage/perfschema/pfs_host.cc
@@ -42,7 +42,7 @@ static PFS_single_stat *host_instr_class_waits_array= NULL;
static PFS_stage_stat *host_instr_class_stages_array= NULL;
static PFS_statement_stat *host_instr_class_statements_array= NULL;
-static LF_HASH host_hash;
+LF_HASH host_hash;
static bool host_hash_inited= false;
/**
@@ -146,10 +146,11 @@ C_MODE_END
*/
int init_host_hash(void)
{
- if (! host_hash_inited)
+ if ((! host_hash_inited) && (host_max > 0))
{
lf_hash_init(&host_hash, sizeof(PFS_host*), LF_HASH_UNIQUE,
0, 0, host_hash_get_key, &my_charset_bin);
+ /* host_hash.size= host_max; */
host_hash_inited= true;
}
return 0;
diff --git a/storage/perfschema/pfs_host.h b/storage/perfschema/pfs_host.h
index d04b88e62f3..eb0ff6efc6f 100644
--- a/storage/perfschema/pfs_host.h
+++ b/storage/perfschema/pfs_host.h
@@ -44,7 +44,7 @@ struct PFS_host_key
uint m_key_length;
};
-struct PFS_host : PFS_connection_slice
+struct PFS_ALIGNED PFS_host : PFS_connection_slice
{
public:
inline void init_refcount(void)
@@ -105,6 +105,8 @@ extern ulong host_lost;
extern PFS_host *host_array;
+extern LF_HASH host_hash;
+
/** @} */
#endif
diff --git a/storage/perfschema/pfs_instr.cc b/storage/perfschema/pfs_instr.cc
index 39caabaf030..7b7340cc713 100644
--- a/storage/perfschema/pfs_instr.cc
+++ b/storage/perfschema/pfs_instr.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -82,6 +82,10 @@ uint statement_stack_max;
ulong locker_lost= 0;
/** Number of statement lost. @sa STATEMENT_STACK_SIZE. */
ulong statement_lost= 0;
+/** Size of connection attribute storage per thread */
+ulong session_connect_attrs_size_per_thread;
+/** Number of connection attributes lost */
+ulong session_connect_attrs_lost= 0;
/**
Mutex instrumentation instances array.
@@ -140,11 +144,10 @@ PFS_table *table_array= NULL;
*/
PFS_socket *socket_array= NULL;
-PFS_single_stat *global_instr_class_waits_array= NULL;
PFS_stage_stat *global_instr_class_stages_array= NULL;
PFS_statement_stat *global_instr_class_statements_array= NULL;
-static volatile uint32 thread_internal_id_counter= 0;
+static volatile uint64 thread_internal_id_counter= 0;
static uint thread_instr_class_waits_sizing;
static uint thread_instr_class_stages_sizing;
@@ -157,9 +160,10 @@ static PFS_events_waits *thread_waits_history_array= NULL;
static PFS_events_stages *thread_stages_history_array= NULL;
static PFS_events_statements *thread_statements_history_array= NULL;
static PFS_events_statements *thread_statements_stack_array= NULL;
+static char *thread_session_connect_attrs_array= NULL;
/** Hash table for instrumented files. */
-static LF_HASH filename_hash;
+LF_HASH filename_hash;
/** True if filename_hash is initialized. */
static bool filename_hash_inited= false;
@@ -174,6 +178,7 @@ int init_instruments(const PFS_global_param *param)
uint thread_stages_history_sizing;
uint thread_statements_history_sizing;
uint thread_statements_stack_sizing;
+ uint thread_session_connect_attrs_sizing;
uint index;
DBUG_ENTER("init_instruments");
@@ -221,6 +226,11 @@ int init_instruments(const PFS_global_param *param)
thread_instr_class_statements_sizing= param->m_thread_sizing
* param->m_statement_class_sizing;
+ session_connect_attrs_size_per_thread= param->m_session_connect_attrs_sizing;
+ thread_session_connect_attrs_sizing= param->m_thread_sizing
+ * session_connect_attrs_size_per_thread;
+ session_connect_attrs_lost= 0;
+
mutex_array= NULL;
rwlock_array= NULL;
cond_array= NULL;
@@ -366,6 +376,14 @@ int init_instruments(const PFS_global_param *param)
thread_instr_class_statements_array[index].reset();
}
+ if (thread_session_connect_attrs_sizing > 0)
+ {
+ thread_session_connect_attrs_array=
+ (char *)pfs_malloc(thread_session_connect_attrs_sizing, MYF(MY_ZEROFILL));
+ if (unlikely(thread_session_connect_attrs_array == NULL))
+ return 1;
+ }
+
for (index= 0; index < thread_max; index++)
{
thread_array[index].m_waits_history=
@@ -382,18 +400,8 @@ int init_instruments(const PFS_global_param *param)
&thread_statements_stack_array[index * statement_stack_max];
thread_array[index].m_instr_class_statements_stats=
&thread_instr_class_statements_array[index * statement_class_max];
- }
-
- if (wait_class_max > 0)
- {
- global_instr_class_waits_array=
- PFS_MALLOC_ARRAY(wait_class_max,
- PFS_single_stat, MYF(MY_ZEROFILL));
- if (unlikely(global_instr_class_waits_array == NULL))
- DBUG_RETURN(1);
-
- for (index= 0; index < wait_class_max; index++)
- global_instr_class_waits_array[index].reset();
+ thread_array[index].m_session_connect_attrs=
+ &thread_session_connect_attrs_array[index * session_connect_attrs_size_per_thread];
}
if (stage_class_max > 0)
@@ -461,8 +469,6 @@ void cleanup_instruments(void)
thread_statements_stack_array= NULL;
pfs_free(thread_instr_class_waits_array);
thread_instr_class_waits_array= NULL;
- pfs_free(global_instr_class_waits_array);
- global_instr_class_waits_array= NULL;
pfs_free(global_instr_class_stages_array);
global_instr_class_stages_array= NULL;
pfs_free(global_instr_class_statements_array);
@@ -471,6 +477,9 @@ void cleanup_instruments(void)
thread_instr_class_statements_array= NULL;
pfs_free(thread_instr_class_stages_array);
thread_instr_class_stages_array= NULL;
+ pfs_free(thread_session_connect_attrs_array);
+ thread_session_connect_attrs_array=NULL;
+
DBUG_VOID_RETURN;
}
@@ -502,10 +511,11 @@ int init_file_hash(void)
{
DBUG_ENTER("init_file_hash");
- if (! filename_hash_inited)
+ if ((! filename_hash_inited) && (file_max > 0))
{
lf_hash_init(&filename_hash, sizeof(PFS_file*), LF_HASH_UNIQUE,
0, 0, filename_hash_get_key, &my_charset_bin);
+ /* filename_hash.size= file_max; */
filename_hash_inited= true;
}
DBUG_RETURN(0);
@@ -604,7 +614,7 @@ void PFS_scan::init(uint random, uint max_size)
*/
PFS_mutex* create_mutex(PFS_mutex_class *klass, const void *identity)
{
- static uint mutex_monotonic_index= 0;
+ static uint PFS_ALIGNED mutex_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_mutex *pfs;
@@ -642,8 +652,7 @@ PFS_mutex* create_mutex(PFS_mutex_class *klass, const void *identity)
pfs->m_class= klass;
pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
pfs->m_timed= klass->m_timed;
- pfs->m_wait_stat.reset();
- pfs->m_lock_stat.reset();
+ pfs->m_mutex_stat.reset();
pfs->m_owner= NULL;
pfs->m_last_locked= 0;
pfs->m_lock.dirty_to_allocated();
@@ -667,10 +676,9 @@ void destroy_mutex(PFS_mutex *pfs)
DBUG_ENTER("destroy_mutex");
DBUG_ASSERT(pfs != NULL);
PFS_mutex_class *klass= pfs->m_class;
- /* Aggregate to EVENTS_WAITS_SUMMARY_BY_EVENT_NAME */
- uint index= klass->m_event_name_index;
- global_instr_class_waits_array[index].aggregate(& pfs->m_wait_stat);
- pfs->m_wait_stat.reset();
+ /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
+ klass->m_mutex_stat.aggregate(& pfs->m_mutex_stat);
+ pfs->m_mutex_stat.reset();
if (klass->is_singleton())
klass->m_singleton= NULL;
pfs->m_lock.allocated_to_free();
@@ -685,7 +693,7 @@ void destroy_mutex(PFS_mutex *pfs)
*/
PFS_rwlock* create_rwlock(PFS_rwlock_class *klass, const void *identity)
{
- static uint rwlock_monotonic_index= 0;
+ static uint PFS_ALIGNED rwlock_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_rwlock *pfs;
@@ -705,10 +713,8 @@ PFS_rwlock* create_rwlock(PFS_rwlock_class *klass, const void *identity)
pfs->m_class= klass;
pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
pfs->m_timed= klass->m_timed;
- pfs->m_wait_stat.reset();
+ pfs->m_rwlock_stat.reset();
pfs->m_lock.dirty_to_allocated();
- pfs->m_read_lock_stat.reset();
- pfs->m_write_lock_stat.reset();
pfs->m_writer= NULL;
pfs->m_readers= 0;
pfs->m_last_written= 0;
@@ -733,10 +739,9 @@ void destroy_rwlock(PFS_rwlock *pfs)
DBUG_ENTER("destroy_rwlock");
DBUG_ASSERT(pfs != NULL);
PFS_rwlock_class *klass= pfs->m_class;
- /* Aggregate to EVENTS_WAITS_SUMMARY_BY_EVENT_NAME */
- uint index= klass->m_event_name_index;
- global_instr_class_waits_array[index].aggregate(& pfs->m_wait_stat);
- pfs->m_wait_stat.reset();
+ /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
+ klass->m_rwlock_stat.aggregate(& pfs->m_rwlock_stat);
+ pfs->m_rwlock_stat.reset();
if (klass->is_singleton())
klass->m_singleton= NULL;
pfs->m_lock.allocated_to_free();
@@ -751,7 +756,7 @@ void destroy_rwlock(PFS_rwlock *pfs)
*/
PFS_cond* create_cond(PFS_cond_class *klass, const void *identity)
{
- static uint cond_monotonic_index= 0;
+ static uint PFS_ALIGNED cond_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_cond *pfs;
@@ -792,13 +797,12 @@ PFS_cond* create_cond(PFS_cond_class *klass, const void *identity)
*/
void destroy_cond(PFS_cond *pfs)
{
- DBUG_ENTER("destroy_thread");
+ DBUG_ENTER("destroy_cond");
DBUG_ASSERT(pfs != NULL);
PFS_cond_class *klass= pfs->m_class;
- /* Aggregate to EVENTS_WAITS_SUMMARY_BY_EVENT_NAME */
- uint index= klass->m_event_name_index;
- global_instr_class_waits_array[index].aggregate(& pfs->m_wait_stat);
+ /* Aggregate to EVENTS_WAITS_SUMMARY_GLOBAL_BY_EVENT_NAME */
+ klass->m_cond_stat.aggregate(& pfs->m_cond_stat);
pfs->m_wait_stat.reset();
if (klass->is_singleton())
klass->m_singleton= NULL;
@@ -812,19 +816,32 @@ PFS_thread* PFS_thread::get_current_thread()
return pfs;
}
+void PFS_thread::reset_session_connect_attrs()
+{
+ m_session_connect_attrs_length= 0;
+ m_session_connect_attrs_cs= NULL;
+
+ if ((m_session_connect_attrs != NULL) &&
+ (session_connect_attrs_size_per_thread > 0) )
+ {
+ /* Do not keep user data */
+ memset(m_session_connect_attrs, 0, session_connect_attrs_size_per_thread);
+ }
+}
+
/**
Create instrumentation for a thread instance.
@param klass the thread class
@param identity the thread address,
or a value characteristic of this thread
- @param thread_id the PROCESSLIST thread id,
+ @param processlist_id the PROCESSLIST id,
or 0 if unknown
@return a thread instance, or NULL
*/
PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
- ulong thread_id)
+ ulonglong processlist_id)
{
- static uint thread_monotonic_index= 0;
+ static uint PFS_ALIGNED thread_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_thread *pfs;
@@ -841,9 +858,9 @@ PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
if (pfs->m_lock.free_to_dirty())
{
pfs->m_thread_internal_id=
- PFS_atomic::add_u32(&thread_internal_id_counter, 1);
+ PFS_atomic::add_u64(&thread_internal_id_counter, 1);
pfs->m_parent_thread_internal_id= 0;
- pfs->m_thread_id= thread_id;
+ pfs->m_processlist_id= processlist_id;
pfs->m_event_id= 1;
pfs->m_enabled= true;
pfs->m_class= klass;
@@ -856,6 +873,7 @@ PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
pfs->m_statements_history_index= 0;
pfs->reset_stats();
+ pfs->reset_session_connect_attrs();
pfs->m_filename_hash_pins= NULL;
pfs->m_table_share_hash_pins= NULL;
@@ -871,8 +889,11 @@ PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
pfs->m_dbname_length= 0;
pfs->m_command= 0;
pfs->m_start_time= 0;
+ pfs->m_processlist_state_ptr= NULL;
pfs->m_processlist_state_length= 0;
+ pfs->m_processlist_info_ptr= NULL;
pfs->m_processlist_info_length= 0;
+ pfs->m_processlist_lock.set_allocated();
pfs->m_host= NULL;
pfs->m_user= NULL;
@@ -999,6 +1020,7 @@ PFS_socket *sanitize_socket(PFS_socket *unsafe)
void destroy_thread(PFS_thread *pfs)
{
DBUG_ASSERT(pfs != NULL);
+ pfs->reset_session_connect_attrs();
if (pfs->m_account != NULL)
{
pfs->m_account->release();
@@ -1084,11 +1106,12 @@ LF_PINS* get_filename_hash_pins(PFS_thread *thread)
@param klass the file class
@param filename the file name
@param len the length in bytes of filename
+ @param create create a file instance if none found
@return a file instance, or NULL
*/
PFS_file*
find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
- const char *filename, uint len)
+ const char *filename, uint len, bool create)
{
PFS_file *pfs;
LF_PINS *pins;
@@ -1096,6 +1119,8 @@ find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
const char *safe_filename;
DBUG_ENTER("find_or_create_file");
+ DBUG_ASSERT(klass != NULL || ! create);
+
pins= get_filename_hash_pins(thread);
if (unlikely(pins == NULL))
{
@@ -1171,7 +1196,7 @@ find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
/* Append the unresolved file name to the resolved path */
char *ptr= buffer + strlen(buffer);
char *buf_end= &buffer[sizeof(buffer)-1];
- if (buf_end > ptr)
+ if ((buf_end > ptr) && (*(ptr-1) != FN_LIBCHAR))
*ptr++= FN_LIBCHAR;
if (buf_end > ptr)
strncpy(ptr, safe_filename + dirlen, buf_end - ptr);
@@ -1183,7 +1208,7 @@ find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
PFS_file **entry;
uint retry_count= 0;
const uint retry_max= 3;
- static uint file_monotonic_index= 0;
+ static uint PFS_ALIGNED file_monotonic_index= 0;
uint index;
uint attempts= 0;
@@ -1202,6 +1227,12 @@ search:
lf_hash_search_unpin(pins);
+ if (! create)
+ {
+ /* No lost counter, just looking for the file existence. */
+ DBUG_RETURN(NULL);
+ }
+
while (++attempts <= file_max)
{
/* See create_mutex() */
@@ -1218,7 +1249,6 @@ search:
strncpy(pfs->m_filename, normalized_filename, normalized_length);
pfs->m_filename[normalized_length]= '\0';
pfs->m_filename_length= normalized_length;
- pfs->m_wait_stat.reset();
pfs->m_file_stat.m_open_count= 1;
pfs->m_file_stat.m_io_stat.reset();
pfs->m_identity= (const void *)pfs;
@@ -1250,7 +1280,7 @@ search:
/* OOM in lf_hash_insert */
file_lost++;
- return NULL;
+ DBUG_RETURN(NULL);
}
}
}
@@ -1285,14 +1315,9 @@ void destroy_file(PFS_thread *thread, PFS_file *pfs)
DBUG_ASSERT(pfs != NULL);
PFS_file_class *klass= pfs->m_class;
- /* Aggregate to EVENTS_WAITS_SUMMARY_BY_EVENT_NAME */
- uint index= klass->m_event_name_index;
- global_instr_class_waits_array[index].aggregate(& pfs->m_wait_stat);
- pfs->m_wait_stat.reset();
-
/* Aggregate to FILE_SUMMARY_BY_EVENT_NAME */
- klass->m_file_stat.m_io_stat.aggregate(& pfs->m_file_stat.m_io_stat);
- pfs->m_file_stat.m_io_stat.reset();
+ klass->m_file_stat.aggregate(& pfs->m_file_stat);
+ pfs->m_file_stat.reset();
if (klass->is_singleton())
klass->m_singleton= NULL;
@@ -1318,7 +1343,7 @@ void destroy_file(PFS_thread *thread, PFS_file *pfs)
PFS_table* create_table(PFS_table_share *share, PFS_thread *opening_thread,
const void *identity)
{
- static uint table_monotonic_index= 0;
+ static uint PFS_ALIGNED table_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_table *pfs;
@@ -1364,23 +1389,33 @@ void PFS_table::sanitized_aggregate(void)
and not own the table handle.
*/
PFS_table_share *safe_share= sanitize_table_share(m_share);
- PFS_thread *safe_thread= sanitize_thread(m_thread_owner);
- if ((safe_share != NULL && safe_thread != NULL) &&
- (m_has_io_stats || m_has_lock_stats))
+ if (safe_share != NULL)
{
- safe_aggregate(& m_table_stat, safe_share, safe_thread);
- m_has_io_stats= false;
- m_has_lock_stats= false;
+ if (m_has_io_stats && m_has_lock_stats)
+ {
+ safe_aggregate(& m_table_stat, safe_share);
+ m_has_io_stats= false;
+ m_has_lock_stats= false;
+ }
+ else if (m_has_io_stats)
+ {
+ safe_aggregate_io(& m_table_stat, safe_share);
+ m_has_io_stats= false;
+ }
+ else if (m_has_lock_stats)
+ {
+ safe_aggregate_lock(& m_table_stat, safe_share);
+ m_has_lock_stats= false;
+ }
}
}
void PFS_table::sanitized_aggregate_io(void)
{
PFS_table_share *safe_share= sanitize_table_share(m_share);
- PFS_thread *safe_thread= sanitize_thread(m_thread_owner);
- if (safe_share != NULL && safe_thread != NULL && m_has_io_stats)
+ if (safe_share != NULL && m_has_io_stats)
{
- safe_aggregate_io(& m_table_stat, safe_share, safe_thread);
+ safe_aggregate_io(& m_table_stat, safe_share);
m_has_io_stats= false;
}
}
@@ -1388,96 +1423,44 @@ void PFS_table::sanitized_aggregate_io(void)
void PFS_table::sanitized_aggregate_lock(void)
{
PFS_table_share *safe_share= sanitize_table_share(m_share);
- PFS_thread *safe_thread= sanitize_thread(m_thread_owner);
- if (safe_share != NULL && safe_thread != NULL && m_has_lock_stats)
+ if (safe_share != NULL && m_has_lock_stats)
{
- safe_aggregate_lock(& m_table_stat, safe_share, safe_thread);
+ safe_aggregate_lock(& m_table_stat, safe_share);
m_has_lock_stats= false;
}
}
void PFS_table::safe_aggregate(PFS_table_stat *table_stat,
- PFS_table_share *table_share,
- PFS_thread *thread)
+ PFS_table_share *table_share)
{
DBUG_ASSERT(table_stat != NULL);
DBUG_ASSERT(table_share != NULL);
- DBUG_ASSERT(thread != NULL);
-
- if (flag_thread_instrumentation && thread->m_enabled)
- {
- PFS_single_stat *event_name_array;
- uint index;
- event_name_array= thread->m_instr_class_waits_stats;
-
- /*
- Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
- (for wait/io/table/sql/handler)
- */
- index= global_table_io_class.m_event_name_index;
- table_stat->sum_io(& event_name_array[index]);
- /*
- Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
- (for wait/lock/table/sql/handler)
- */
- index= global_table_lock_class.m_event_name_index;
- table_stat->sum_lock(& event_name_array[index]);
- }
+ uint key_count= sanitize_index_count(table_share->m_key_count);
/* Aggregate to TABLE_IO_SUMMARY, TABLE_LOCK_SUMMARY */
- table_share->m_table_stat.aggregate(table_stat);
+ table_share->m_table_stat.aggregate(table_stat, key_count);
table_stat->fast_reset();
}
void PFS_table::safe_aggregate_io(PFS_table_stat *table_stat,
- PFS_table_share *table_share,
- PFS_thread *thread)
+ PFS_table_share *table_share)
{
DBUG_ASSERT(table_stat != NULL);
DBUG_ASSERT(table_share != NULL);
- DBUG_ASSERT(thread != NULL);
-
- if (flag_thread_instrumentation && thread->m_enabled)
- {
- PFS_single_stat *event_name_array;
- uint index;
- event_name_array= thread->m_instr_class_waits_stats;
- /*
- Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
- (for wait/io/table/sql/handler)
- */
- index= global_table_io_class.m_event_name_index;
- table_stat->sum_io(& event_name_array[index]);
- }
+ uint key_count= sanitize_index_count(table_share->m_key_count);
/* Aggregate to TABLE_IO_SUMMARY */
- table_share->m_table_stat.aggregate_io(table_stat);
+ table_share->m_table_stat.aggregate_io(table_stat, key_count);
table_stat->fast_reset_io();
}
void PFS_table::safe_aggregate_lock(PFS_table_stat *table_stat,
- PFS_table_share *table_share,
- PFS_thread *thread)
+ PFS_table_share *table_share)
{
DBUG_ASSERT(table_stat != NULL);
DBUG_ASSERT(table_share != NULL);
- DBUG_ASSERT(thread != NULL);
-
- if (flag_thread_instrumentation && thread->m_enabled)
- {
- PFS_single_stat *event_name_array;
- uint index;
- event_name_array= thread->m_instr_class_waits_stats;
-
- /*
- Aggregate to EVENTS_WAITS_SUMMARY_BY_THREAD_BY_EVENT_NAME
- (for wait/lock/table/sql/handler)
- */
- index= global_table_lock_class.m_event_name_index;
- table_stat->sum_lock(& event_name_array[index]);
- }
/* Aggregate to TABLE_LOCK_SUMMARY */
table_share->m_table_stat.aggregate_lock(table_stat);
@@ -1504,47 +1487,59 @@ void destroy_table(PFS_table *pfs)
@param identity the socket descriptor
@return a socket instance, or NULL
*/
-PFS_socket* create_socket(PFS_socket_class *klass, const void *identity)
+PFS_socket* create_socket(PFS_socket_class *klass, const my_socket *fd,
+ const struct sockaddr *addr, socklen_t addr_len)
{
- PFS_scan scan;
+ static uint PFS_ALIGNED socket_monotonic_index= 0;
+ uint index;
+ uint attempts= 0;
+ PFS_socket *pfs;
DBUG_ENTER("create_socket");
- /**
- Unlike other instrumented objects, there is no socket 'object' to use as a
- unique identifier. Instead, a pointer to the PFS_socket object will be used
- to identify this socket instance. The socket descriptor will be used to
- seed the the random index assignment.
- */
- my_socket fd= likely(identity != NULL) ?
- *(reinterpret_cast<const my_socket*>(identity)) : 0;
- my_ptrdiff_t ptr= fd;
- uint random= randomized_index((const void *)ptr, socket_max);
-
- for (scan.init(random, socket_max);
- scan.has_pass();
- scan.next_pass())
- {
- PFS_socket *pfs= socket_array + scan.first();
- PFS_socket *pfs_last= socket_array + scan.last();
- for ( ; pfs < pfs_last; pfs++)
+ uint fd_used= 0;
+ uint addr_len_used= addr_len;
+
+ if (fd != NULL)
+ fd_used= *fd;
+
+ if (addr_len_used > sizeof(sockaddr_storage))
+ addr_len_used= sizeof(sockaddr_storage);
+
+ while (++attempts <= socket_max)
+ {
+ index= PFS_atomic::add_u32(& socket_monotonic_index, 1) % socket_max;
+ pfs= socket_array + index;
+
+ if (pfs->m_lock.is_free())
{
- if (pfs->m_lock.is_free())
+ if (pfs->m_lock.free_to_dirty())
{
- if (pfs->m_lock.free_to_dirty())
+ pfs->m_fd= fd_used;
+ /* There is no socket object, so we use the instrumentation. */
+ pfs->m_identity= pfs;
+ pfs->m_class= klass;
+ pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
+ pfs->m_timed= klass->m_timed;
+ pfs->m_idle= false;
+ pfs->m_socket_stat.reset();
+ pfs->m_thread_owner= NULL;
+
+ pfs->m_addr_len= addr_len_used;
+ if ((addr != NULL) && (addr_len_used > 0))
{
- pfs->m_fd= fd;
- pfs->m_identity= pfs;
- pfs->m_class= klass;
- pfs->m_enabled= klass->m_enabled && flag_global_instrumentation;
- pfs->m_timed= klass->m_timed;
- pfs->m_idle= false;
- pfs->m_socket_stat.reset();
- pfs->m_lock.dirty_to_allocated();
- pfs->m_thread_owner= NULL;
- if (klass->is_singleton())
- klass->m_singleton= pfs;
- DBUG_RETURN(pfs);
+ pfs->m_addr_len= addr_len_used;
+ memcpy(&pfs->m_sock_addr, addr, addr_len_used);
}
+ else
+ {
+ pfs->m_addr_len= 0;
+ }
+
+ pfs->m_lock.dirty_to_allocated();
+
+ if (klass->is_singleton())
+ klass->m_singleton= pfs;
+ DBUG_RETURN(pfs);
}
}
}
@@ -1598,7 +1593,7 @@ static void reset_mutex_waits_by_instance(void)
DBUG_ENTER("reset_mutex_waits_by_instance");
for ( ; pfs < pfs_last; pfs++)
- pfs->m_wait_stat.reset();
+ pfs->m_mutex_stat.reset();
DBUG_VOID_RETURN;
}
@@ -1609,7 +1604,7 @@ static void reset_rwlock_waits_by_instance(void)
DBUG_ENTER("reset_rwlock_waits_by_instance");
for ( ; pfs < pfs_last; pfs++)
- pfs->m_wait_stat.reset();
+ pfs->m_rwlock_stat.reset();
DBUG_VOID_RETURN;
}
@@ -1620,7 +1615,7 @@ static void reset_cond_waits_by_instance(void)
DBUG_ENTER("reset_cond_waits_by_instance");
for ( ; pfs < pfs_last; pfs++)
- pfs->m_wait_stat.reset();
+ pfs->m_cond_stat.reset();
DBUG_VOID_RETURN;
}
@@ -1678,15 +1673,6 @@ void reset_socket_instance_io(void)
DBUG_VOID_RETURN;
}
-void reset_global_wait_stat()
-{
- PFS_single_stat *stat= global_instr_class_waits_array;
- PFS_single_stat *stat_last= global_instr_class_waits_array + wait_class_max;
-
- for ( ; stat < stat_last; stat++)
- stat->reset();
-}
-
void aggregate_all_event_names(PFS_single_stat *from_array,
PFS_single_stat *to_array)
{
diff --git a/storage/perfschema/pfs_instr.h b/storage/perfschema/pfs_instr.h
index b579c1d7902..2ea44830d2b 100644
--- a/storage/perfschema/pfs_instr.h
+++ b/storage/perfschema/pfs_instr.h
@@ -34,6 +34,8 @@ struct PFS_socket_class;
#else
#include <arpa/inet.h>
#endif
+#include "my_global.h"
+#include "my_compiler.h"
#include "pfs_lock.h"
#include "pfs_stat.h"
#include "pfs_instr_class.h"
@@ -63,24 +65,17 @@ struct PFS_instr
bool m_enabled;
/** Timed flag. */
bool m_timed;
- /** Instrument wait statistics. */
- PFS_single_stat m_wait_stat;
};
/** Instrumented mutex implementation. @see PSI_mutex. */
-struct PFS_mutex : public PFS_instr
+struct PFS_ALIGNED PFS_mutex : public PFS_instr
{
/** Mutex identity, typically a pthread_mutex_t. */
const void *m_identity;
/** Mutex class. */
PFS_mutex_class *m_class;
- /** Instrument wait statistics. */
- PFS_single_stat m_wait_stat;
- /**
- Mutex lock usage statistics.
- This statistic is not exposed in user visible tables yet.
- */
- PFS_single_stat m_lock_stat;
+ /** Instrument statistics. */
+ PFS_mutex_stat m_mutex_stat;
/** Current owner. */
PFS_thread *m_owner;
/**
@@ -91,24 +86,14 @@ struct PFS_mutex : public PFS_instr
};
/** Instrumented rwlock implementation. @see PSI_rwlock. */
-struct PFS_rwlock : public PFS_instr
+struct PFS_ALIGNED PFS_rwlock : public PFS_instr
{
/** RWLock identity, typically a pthread_rwlock_t. */
const void *m_identity;
/** RWLock class. */
PFS_rwlock_class *m_class;
- /** Instrument wait statistics. */
- PFS_single_stat m_wait_stat;
- /**
- RWLock read lock usage statistics.
- This statistic is not exposed in user visible tables yet.
- */
- PFS_single_stat m_read_lock_stat;
- /**
- RWLock write lock usage statistics.
- This statistic is not exposed in user visible tables yet.
- */
- PFS_single_stat m_write_lock_stat;
+ /** Instrument statistics. */
+ PFS_rwlock_stat m_rwlock_stat;
/** Current writer thread. */
PFS_thread *m_writer;
/** Current count of readers. */
@@ -126,7 +111,7 @@ struct PFS_rwlock : public PFS_instr
};
/** Instrumented cond implementation. @see PSI_cond. */
-struct PFS_cond : public PFS_instr
+struct PFS_ALIGNED PFS_cond : public PFS_instr
{
/** Condition identity, typically a pthread_cond_t. */
const void *m_identity;
@@ -139,7 +124,7 @@ struct PFS_cond : public PFS_instr
};
/** Instrumented File and FILE implementation. @see PSI_file. */
-struct PFS_file : public PFS_instr
+struct PFS_ALIGNED PFS_file : public PFS_instr
{
uint32 get_version()
{ return m_lock.get_version(); }
@@ -152,14 +137,12 @@ struct PFS_file : public PFS_instr
uint m_filename_length;
/** File class. */
PFS_file_class *m_class;
- /** Instrument wait statistics. */
- PFS_single_stat m_wait_stat;
/** File usage statistics. */
PFS_file_stat m_file_stat;
};
/** Instrumented table implementation. @see PSI_table. */
-struct PFS_table
+struct PFS_ALIGNED PFS_table
{
/**
True if table io instrumentation is enabled.
@@ -196,12 +179,22 @@ public:
*/
void aggregate(void)
{
- if (likely((m_thread_owner != NULL) && (m_has_io_stats || m_has_lock_stats)))
+ if (m_has_io_stats && m_has_lock_stats)
{
- safe_aggregate(& m_table_stat, m_share, m_thread_owner);
+ safe_aggregate(& m_table_stat, m_share);
m_has_io_stats= false;
m_has_lock_stats= false;
}
+ else if (m_has_io_stats)
+ {
+ safe_aggregate_io(& m_table_stat, m_share);
+ m_has_io_stats= false;
+ }
+ else if (m_has_lock_stats)
+ {
+ safe_aggregate_lock(& m_table_stat, m_share);
+ m_has_lock_stats= false;
+ }
}
/**
@@ -238,18 +231,15 @@ public:
private:
static void safe_aggregate(PFS_table_stat *stat,
- PFS_table_share *safe_share,
- PFS_thread *safe_thread);
+ PFS_table_share *safe_share);
static void safe_aggregate_io(PFS_table_stat *stat,
- PFS_table_share *safe_share,
- PFS_thread *safe_thread);
+ PFS_table_share *safe_share);
static void safe_aggregate_lock(PFS_table_stat *stat,
- PFS_table_share *safe_share,
- PFS_thread *safe_thread);
+ PFS_table_share *safe_share);
};
/** Instrumented socket implementation. @see PSI_socket. */
-struct PFS_socket : public PFS_instr
+struct PFS_ALIGNED PFS_socket : public PFS_instr
{
uint32 get_version()
{ return m_lock.get_version(); }
@@ -371,7 +361,7 @@ private:
/** Instrumented thread implementation. @see PSI_thread. */
-struct PFS_thread : PFS_connection_slice
+struct PFS_ALIGNED PFS_thread : PFS_connection_slice
{
static PFS_thread* get_current_thread(void);
@@ -400,11 +390,11 @@ struct PFS_thread : PFS_connection_slice
/** Pins for digest_hash. */
LF_PINS *m_digest_hash_pins;
/** Internal thread identifier, unique. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Parent internal thread identifier. */
- ulong m_parent_thread_internal_id;
+ ulonglong m_parent_thread_internal_id;
/** External (SHOW PROCESSLIST) thread identifier, not unique. */
- ulong m_thread_id;
+ ulong m_processlist_id;
/** Thread class. */
PFS_thread_class *m_class;
/**
@@ -486,6 +476,8 @@ struct PFS_thread : PFS_connection_slice
int m_command;
/** Start time. */
time_t m_start_time;
+ /** Lock for Processlist state, Processlist info. */
+ pfs_lock m_processlist_lock;
/** Processlist state. */
const char *m_processlist_state_ptr;
/** Length of @c m_processlist_state_ptr. */
@@ -504,9 +496,18 @@ struct PFS_thread : PFS_connection_slice
PFS_host *m_host;
PFS_user *m_user;
PFS_account *m_account;
+
+ /** Reset session connect attributes */
+ void reset_session_connect_attrs();
+
+ /** a buffer for the connection attributes */
+ char *m_session_connect_attrs;
+ /** length used by @c m_connect_attrs */
+ uint m_session_connect_attrs_length;
+ /** character set in which @c m_connect_attrs are encoded */
+ const CHARSET_INFO *m_session_connect_attrs_cs;
};
-extern PFS_single_stat *global_instr_class_waits_array;
extern PFS_stage_stat *global_instr_class_stages_array;
extern PFS_statement_stat *global_instr_class_statements_array;
@@ -529,12 +530,12 @@ PFS_cond* create_cond(PFS_cond_class *klass, const void *identity);
void destroy_cond(PFS_cond *pfs);
PFS_thread* create_thread(PFS_thread_class *klass, const void *identity,
- ulong thread_id);
+ ulonglong processlist_id);
void destroy_thread(PFS_thread *pfs);
PFS_file* find_or_create_file(PFS_thread *thread, PFS_file_class *klass,
- const char *filename, uint len);
+ const char *filename, uint len, bool create);
void release_file(PFS_file *pfs);
void destroy_file(PFS_thread *thread, PFS_file *pfs);
@@ -542,7 +543,10 @@ PFS_table* create_table(PFS_table_share *share, PFS_thread *opening_thread,
const void *identity);
void destroy_table(PFS_table *pfs);
-PFS_socket* create_socket(PFS_socket_class *socket_class, const void *identity);
+PFS_socket* create_socket(PFS_socket_class *socket_class,
+ const my_socket *fd,
+ const struct sockaddr *addr,
+ socklen_t addr_len);
void destroy_socket(PFS_socket *pfs);
/* For iterators and show status. */
@@ -568,6 +572,8 @@ extern ulong events_stages_history_per_thread;
extern ulong events_statements_history_per_thread;
extern ulong locker_lost;
extern ulong statement_lost;
+extern ulong session_connect_attrs_lost;
+extern ulong session_connect_attrs_size_per_thread;
/* Exposing the data directly, for iterators. */
@@ -624,6 +630,8 @@ void update_socket_derived_flags();
/** Update derived flags for all instruments. */
void update_instruments_derived_flags();
+extern LF_HASH filename_hash;
+
/** @} */
#endif
diff --git a/storage/perfschema/pfs_instr_class.cc b/storage/perfschema/pfs_instr_class.cc
index 0a4b47404a4..24a06cf494c 100644
--- a/storage/perfschema/pfs_instr_class.cc
+++ b/storage/perfschema/pfs_instr_class.cc
@@ -135,9 +135,12 @@ static PFS_thread_class *thread_class_array= NULL;
*/
PFS_table_share *table_share_array= NULL;
-PFS_instr_class global_table_io_class;
-PFS_instr_class global_table_lock_class;
-PFS_instr_class global_idle_class;
+PFS_ALIGNED PFS_single_stat global_idle_stat;
+PFS_ALIGNED PFS_table_io_stat global_table_io_stat;
+PFS_ALIGNED PFS_table_lock_stat global_table_lock_stat;
+PFS_ALIGNED PFS_instr_class global_table_io_class;
+PFS_ALIGNED PFS_instr_class global_table_lock_class;
+PFS_ALIGNED PFS_instr_class global_idle_class;
/** Class-timer map */
enum_timer_name *class_timers[] =
@@ -165,7 +168,7 @@ enum_timer_name *class_timers[] =
@sa table_share_hash_get_key
@sa get_table_share_hash_pins
*/
-static LF_HASH table_share_hash;
+LF_HASH table_share_hash;
/** True if table_share_hash is initialized. */
static bool table_share_hash_inited= false;
@@ -193,19 +196,17 @@ uint mutex_class_start= 0;
uint rwlock_class_start= 0;
uint cond_class_start= 0;
uint file_class_start= 0;
-uint table_class_start= 0;
uint wait_class_max= 0;
uint socket_class_start= 0;
void init_event_name_sizing(const PFS_global_param *param)
{
- mutex_class_start= 0;
+ mutex_class_start= 3; /* global table io, table lock, idle */
rwlock_class_start= mutex_class_start + param->m_mutex_class_sizing;
cond_class_start= rwlock_class_start + param->m_rwlock_class_sizing;
file_class_start= cond_class_start + param->m_cond_class_sizing;
socket_class_start= file_class_start + param->m_file_class_sizing;
- table_class_start= socket_class_start + param->m_socket_class_sizing;
- wait_class_max= table_class_start + 3; /* global table io, lock, idle */
+ wait_class_max= socket_class_start + param->m_socket_class_sizing;
}
void register_global_classes()
@@ -213,19 +214,19 @@ void register_global_classes()
/* Table IO class */
init_instr_class(&global_table_io_class, "wait/io/table/sql/handler", 25,
0, PFS_CLASS_TABLE_IO);
- global_table_io_class.m_event_name_index= table_class_start;
+ global_table_io_class.m_event_name_index= GLOBAL_TABLE_IO_EVENT_INDEX;
configure_instr_class(&global_table_io_class);
/* Table lock class */
init_instr_class(&global_table_lock_class, "wait/lock/table/sql/handler", 27,
0, PFS_CLASS_TABLE_LOCK);
- global_table_lock_class.m_event_name_index= table_class_start + 1;
+ global_table_lock_class.m_event_name_index= GLOBAL_TABLE_LOCK_EVENT_INDEX;
configure_instr_class(&global_table_lock_class);
/* Idle class */
init_instr_class(&global_idle_class, "idle", 4,
0, PFS_CLASS_IDLE);
- global_idle_class.m_event_name_index= table_class_start + 2;
+ global_idle_class.m_event_name_index= GLOBAL_IDLE_EVENT_INDEX;
configure_instr_class(&global_idle_class);
}
@@ -384,6 +385,7 @@ int init_table_share_hash(void)
{
lf_hash_init(&table_share_hash, sizeof(PFS_table_share*), LF_HASH_UNIQUE,
0, 0, table_share_hash_get_key, &my_charset_bin);
+ /* table_share_hash.size= table_share_max; */
table_share_hash_inited= true;
}
return 0;
@@ -715,7 +717,7 @@ PFS_sync_key register_mutex_class(const char *name, uint name_length,
*/
entry= &mutex_class_array[index];
init_instr_class(entry, name, name_length, flags, PFS_CLASS_MUTEX);
- entry->m_lock_stat.reset();
+ entry->m_mutex_stat.reset();
entry->m_event_name_index= mutex_class_start + index;
entry->m_singleton= NULL;
entry->m_enabled= false; /* disabled by default */
@@ -781,8 +783,7 @@ PFS_sync_key register_rwlock_class(const char *name, uint name_length,
{
entry= &rwlock_class_array[index];
init_instr_class(entry, name, name_length, flags, PFS_CLASS_RWLOCK);
- entry->m_read_lock_stat.reset();
- entry->m_write_lock_stat.reset();
+ entry->m_rwlock_stat.reset();
entry->m_event_name_index= rwlock_class_start + index;
entry->m_singleton= NULL;
entry->m_enabled= false; /* disabled by default */
@@ -1193,7 +1194,7 @@ static void set_keys(PFS_table_share *pfs, const TABLE_SHARE *share)
pfs_key->m_name_length= len;
}
- pfs_key_last= pfs->m_keys + MAX_KEY;
+ pfs_key_last= pfs->m_keys + MAX_INDEXES;
for ( ; pfs_key < pfs_key_last; pfs_key++)
pfs_key->m_name_length= 0;
}
@@ -1256,7 +1257,7 @@ PFS_table_share* find_or_create_table_share(PFS_thread *thread,
const uint retry_max= 3;
bool enabled= true;
bool timed= true;
- static uint table_share_monotonic_index= 0;
+ static uint PFS_ALIGNED table_share_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_table_share *pfs;
@@ -1299,8 +1300,7 @@ search:
while (++attempts <= table_share_max)
{
/* See create_mutex() */
- PFS_atomic::add_u32(& table_share_monotonic_index, 1);
- index= table_share_monotonic_index % table_share_max;
+ index= PFS_atomic::add_u32(& table_share_monotonic_index, 1) % table_share_max;
pfs= table_share_array + index;
if (pfs->m_lock.is_free())
@@ -1353,17 +1353,28 @@ search:
void PFS_table_share::aggregate_io(void)
{
- uint index= global_table_io_class.m_event_name_index;
- PFS_single_stat *table_io_total= & global_instr_class_waits_array[index];
- m_table_stat.sum_io(table_io_total);
+ uint safe_key_count= sanitize_index_count(m_key_count);
+ PFS_table_io_stat *from_stat;
+ PFS_table_io_stat *from_stat_last;
+ PFS_table_io_stat sum_io;
+
+ /* Aggregate stats for each index, if any */
+ from_stat= & m_table_stat.m_index_stat[0];
+ from_stat_last= from_stat + safe_key_count;
+ for ( ; from_stat < from_stat_last ; from_stat++)
+ sum_io.aggregate(from_stat);
+
+ /* Aggregate stats for the table */
+ sum_io.aggregate(& m_table_stat.m_index_stat[MAX_INDEXES]);
+
+ /* Add this table stats to the global sink. */
+ global_table_io_stat.aggregate(& sum_io);
m_table_stat.fast_reset_io();
}
void PFS_table_share::aggregate_lock(void)
{
- uint index= global_table_lock_class.m_event_name_index;
- PFS_single_stat *table_lock_total= & global_instr_class_waits_array[index];
- m_table_stat.sum_lock(table_lock_total);
+ global_table_lock_stat.aggregate(& m_table_stat.m_lock_stat);
m_table_stat.fast_reset_lock();
}
@@ -1418,6 +1429,16 @@ PFS_table_share *sanitize_table_share(PFS_table_share *unsafe)
SANITIZE_ARRAY_BODY(PFS_table_share, table_share_array, table_share_max, unsafe);
}
+/** Reset the wait statistics per instrument class. */
+void reset_events_waits_by_class()
+{
+ reset_file_class_io();
+ reset_socket_class_io();
+ global_idle_stat.reset();
+ global_table_io_stat.reset();
+ global_table_lock_stat.reset();
+}
+
/** Reset the io statistics per file class. */
void reset_file_class_io(void)
{
diff --git a/storage/perfschema/pfs_instr_class.h b/storage/perfschema/pfs_instr_class.h
index bef25e76467..d0b90734b66 100644
--- a/storage/perfschema/pfs_instr_class.h
+++ b/storage/perfschema/pfs_instr_class.h
@@ -16,7 +16,10 @@
#ifndef PFS_INSTR_CLASS_H
#define PFS_INSTR_CLASS_H
+#include "my_global.h"
#include "mysql_com.h" /* NAME_LEN */
+#include "lf.h"
+#include "pfs_global.h"
/**
@file storage/perfschema/pfs_instr_class.h
@@ -112,7 +115,6 @@ extern uint mutex_class_start;
extern uint rwlock_class_start;
extern uint cond_class_start;
extern uint file_class_start;
-extern uint table_class_start;
extern uint socket_class_start;
extern uint wait_class_max;
@@ -166,13 +168,10 @@ struct PFS_instr_class
struct PFS_mutex;
/** Instrumentation metadata for a MUTEX. */
-struct PFS_mutex_class : public PFS_instr_class
+struct PFS_ALIGNED PFS_mutex_class : public PFS_instr_class
{
- /**
- Lock statistics.
- This statistic is not exposed in user visible tables yet.
- */
- PFS_single_stat m_lock_stat;
+ /** Mutex usage statistics. */
+ PFS_mutex_stat m_mutex_stat;
/** Singleton instance. */
PFS_mutex *m_singleton;
};
@@ -180,18 +179,10 @@ struct PFS_mutex_class : public PFS_instr_class
struct PFS_rwlock;
/** Instrumentation metadata for a RWLOCK. */
-struct PFS_rwlock_class : public PFS_instr_class
+struct PFS_ALIGNED PFS_rwlock_class : public PFS_instr_class
{
- /**
- Read lock statistics.
- This statistic is not exposed in user visible tables yet.
- */
- PFS_single_stat m_read_lock_stat;
- /**
- Write lock statistics.
- This statistic is not exposed in user visible tables yet.
- */
- PFS_single_stat m_write_lock_stat;
+ /** Rwlock usage statistics. */
+ PFS_rwlock_stat m_rwlock_stat;
/** Singleton instance. */
PFS_rwlock *m_singleton;
};
@@ -199,7 +190,7 @@ struct PFS_rwlock_class : public PFS_instr_class
struct PFS_cond;
/** Instrumentation metadata for a COND. */
-struct PFS_cond_class : public PFS_instr_class
+struct PFS_ALIGNED PFS_cond_class : public PFS_instr_class
{
/**
Condition usage statistics.
@@ -211,7 +202,7 @@ struct PFS_cond_class : public PFS_instr_class
};
/** Instrumentation metadata of a thread. */
-struct PFS_thread_class
+struct PFS_ALIGNED PFS_thread_class
{
/** True if this thread instrument is enabled. */
bool m_enabled;
@@ -249,7 +240,7 @@ struct PFS_table_key
};
/** Instrumentation metadata for a table share. */
-struct PFS_table_share
+struct PFS_ALIGNED PFS_table_share
{
public:
uint32 get_version()
@@ -318,13 +309,31 @@ public:
/** Table statistics. */
PFS_table_stat m_table_stat;
/** Index names. */
- PFS_table_key m_keys[MAX_KEY];
+ PFS_table_key m_keys[MAX_INDEXES];
private:
/** Number of opened table handles. */
int m_refcount;
};
+/** Statistics for the IDLE instrument. */
+extern PFS_single_stat global_idle_stat;
+/** Statistics for dropped table io. */
+extern PFS_table_io_stat global_table_io_stat;
+/** Statistics for dropped table lock. */
+extern PFS_table_lock_stat global_table_lock_stat;
+
+inline uint sanitize_index_count(uint count)
+{
+ if (likely(count <= MAX_INDEXES))
+ return count;
+ return 0;
+}
+
+#define GLOBAL_TABLE_IO_EVENT_INDEX 0
+#define GLOBAL_TABLE_LOCK_EVENT_INDEX 1
+#define GLOBAL_IDLE_EVENT_INDEX 2
+
/**
Instrument controlling all table io.
This instrument is used with table SETUP_OBJECTS.
@@ -345,7 +354,7 @@ extern PFS_instr_class global_idle_class;
struct PFS_file;
/** Instrumentation metadata for a file. */
-struct PFS_file_class : public PFS_instr_class
+struct PFS_ALIGNED PFS_file_class : public PFS_instr_class
{
/** File usage statistics. */
PFS_file_stat m_file_stat;
@@ -354,21 +363,21 @@ struct PFS_file_class : public PFS_instr_class
};
/** Instrumentation metadata for a stage. */
-struct PFS_stage_class : public PFS_instr_class
+struct PFS_ALIGNED PFS_stage_class : public PFS_instr_class
{
/** Stage usage statistics. */
PFS_stage_stat m_stage_stat;
};
/** Instrumentation metadata for a statement. */
-struct PFS_statement_class : public PFS_instr_class
+struct PFS_ALIGNED PFS_statement_class : public PFS_instr_class
{
};
struct PFS_socket;
/** Instrumentation metadata for a socket. */
-struct PFS_socket_class : public PFS_instr_class
+struct PFS_ALIGNED PFS_socket_class : public PFS_instr_class
{
/** Socket usage statistics. */
PFS_socket_stat m_socket_stat;
@@ -483,12 +492,15 @@ extern PFS_cond_class *cond_class_array;
extern PFS_file_class *file_class_array;
extern PFS_table_share *table_share_array;
+void reset_events_waits_by_class();
void reset_file_class_io();
void reset_socket_class_io();
/** Update derived flags for all table shares. */
void update_table_share_derived_flags(PFS_thread *thread);
+extern LF_HASH table_share_hash;
+
/** @} */
#endif
diff --git a/storage/perfschema/pfs_lock.h b/storage/perfschema/pfs_lock.h
index 65937e94ece..09efecd1c5f 100644
--- a/storage/perfschema/pfs_lock.h
+++ b/storage/perfschema/pfs_lock.h
@@ -33,7 +33,7 @@
Values of a free record should not be read by a reader.
Writers can concurrently attempt to allocate a free record.
*/
-#define PFS_LOCK_FREE 0
+#define PFS_LOCK_FREE 0x00
/**
State of a dirty record.
Values of a dirty record should not be read by a reader,
@@ -41,14 +41,18 @@
Only one writer, the writer which owns the record, should
modify the record content.
*/
-#define PFS_LOCK_DIRTY 1
+#define PFS_LOCK_DIRTY 0x01
/**
State of an allocated record.
Values of an allocated record are safe to read by a reader.
A writer may modify some but not all properties of the record:
only modifying values that can never cause the reader to crash is allowed.
*/
-#define PFS_LOCK_ALLOCATED 2
+#define PFS_LOCK_ALLOCATED 0x02
+
+#define VERSION_MASK 0xFFFFFFFC
+#define STATE_MASK 0x00000003
+#define VERSION_INC 4
/**
A 'lock' protecting performance schema internal buffers.
@@ -60,15 +64,11 @@
struct pfs_lock
{
/**
- The record internal state.
+ The record internal version and state
@sa PFS_LOCK_FREE
@sa PFS_LOCK_DIRTY
@sa PFS_LOCK_ALLOCATED
- */
- volatile int32 m_state;
- /**
- The record internal version number.
- This version number is to transform the 'ABA' problem
+ The version number is to transform the 'ABA' problem
(see http://en.wikipedia.org/wiki/ABA_problem)
into an 'A(n)BA(n + 1)' problem, where 'n' is the m_version number.
When the performance schema instrumentation deletes a record,
@@ -76,21 +76,23 @@ struct pfs_lock
the version number is incremented, so that a reader can detect that
the record was changed. Note that the version number is never
reset to zero when a new record is created.
+ The version number is stored in the high 30 bits.
+ The state is stored in the low 2 bits.
*/
- volatile uint32 m_version;
+ volatile uint32 m_version_state;
/** Returns true if the record is free. */
bool is_free(void)
{
- /* This is a dirty read */
- return (m_state == PFS_LOCK_FREE);
+ uint32 copy= m_version_state; /* non volatile copy, and dirty read */
+ return ((copy & STATE_MASK) == PFS_LOCK_FREE);
}
/** Returns true if the record contains values that can be read. */
bool is_populated(void)
{
- int32 copy= m_state; /* non volatile copy, and dirty read */
- return (copy == PFS_LOCK_ALLOCATED);
+ uint32 copy= m_version_state; /* non volatile copy, and dirty read */
+ return ((copy & STATE_MASK) == PFS_LOCK_ALLOCATED);
}
/**
@@ -101,10 +103,11 @@ struct pfs_lock
*/
bool free_to_dirty(void)
{
- int32 old_state= PFS_LOCK_FREE;
- int32 new_state= PFS_LOCK_DIRTY;
+ uint32 copy= m_version_state; /* non volatile copy, and dirty read */
+ uint32 old_val= (copy & VERSION_MASK) + PFS_LOCK_FREE;
+ uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_DIRTY;
- return (PFS_atomic::cas_32(&m_state, &old_state, new_state));
+ return (PFS_atomic::cas_u32(&m_version_state, &old_val, new_val));
}
/**
@@ -114,8 +117,13 @@ struct pfs_lock
*/
void allocated_to_dirty(void)
{
- DBUG_ASSERT(m_state == PFS_LOCK_ALLOCATED);
- PFS_atomic::store_32(&m_state, PFS_LOCK_DIRTY);
+ uint32 copy= PFS_atomic::load_u32(&m_version_state);
+ /* Make sure the record was ALLOCATED. */
+ DBUG_ASSERT((copy & STATE_MASK) == PFS_LOCK_ALLOCATED);
+ /* Keep the same version, set the DIRTY state */
+ uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_DIRTY;
+ /* We own the record, no need to use compare and swap. */
+ PFS_atomic::store_u32(&m_version_state, new_val);
}
/**
@@ -125,9 +133,26 @@ struct pfs_lock
*/
void dirty_to_allocated(void)
{
- DBUG_ASSERT(m_state == PFS_LOCK_DIRTY);
- PFS_atomic::add_u32(&m_version, 1);
- PFS_atomic::store_32(&m_state, PFS_LOCK_ALLOCATED);
+ uint32 copy= PFS_atomic::load_u32(&m_version_state);
+ /* Make sure the record was DIRTY. */
+ DBUG_ASSERT((copy & STATE_MASK) == PFS_LOCK_DIRTY);
+ /* Increment the version, set the ALLOCATED state */
+ uint32 new_val= (copy & VERSION_MASK) + VERSION_INC + PFS_LOCK_ALLOCATED;
+ PFS_atomic::store_u32(&m_version_state, new_val);
+ }
+
+ /**
+ Initialize a lock to allocated.
+ This transition should be executed by the writer that owns the record and the lock,
+ after the record is in a state ready to be read.
+ */
+ void set_allocated(void)
+ {
+ /* Do not set the version to 0, read the previous value. */
+ uint32 copy= PFS_atomic::load_u32(&m_version_state);
+ /* Increment the version, set the ALLOCATED state */
+ uint32 new_val= (copy & VERSION_MASK) + VERSION_INC + PFS_LOCK_ALLOCATED;
+ PFS_atomic::store_u32(&m_version_state, new_val);
}
/**
@@ -136,8 +161,12 @@ struct pfs_lock
*/
void dirty_to_free(void)
{
- DBUG_ASSERT(m_state == PFS_LOCK_DIRTY);
- PFS_atomic::store_32(&m_state, PFS_LOCK_FREE);
+ uint32 copy= PFS_atomic::load_u32(&m_version_state);
+ /* Make sure the record was DIRTY. */
+ DBUG_ASSERT((copy & STATE_MASK) == PFS_LOCK_DIRTY);
+ /* Keep the same version, set the FREE state */
+ uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_FREE;
+ PFS_atomic::store_u32(&m_version_state, new_val);
}
/**
@@ -153,8 +182,12 @@ struct pfs_lock
The correct assert to use here to guarantee data integrity is simply:
DBUG_ASSERT(m_state == PFS_LOCK_ALLOCATED);
*/
- DBUG_ASSERT(m_state == PFS_LOCK_ALLOCATED);
- PFS_atomic::store_32(&m_state, PFS_LOCK_FREE);
+ uint32 copy= PFS_atomic::load_u32(&m_version_state);
+ /* Make sure the record was ALLOCATED. */
+ DBUG_ASSERT(((copy & STATE_MASK) == PFS_LOCK_ALLOCATED));
+ /* Keep the same version, set the FREE state */
+ uint32 new_val= (copy & VERSION_MASK) + PFS_LOCK_FREE;
+ PFS_atomic::store_u32(&m_version_state, new_val);
}
/**
@@ -163,8 +196,7 @@ struct pfs_lock
*/
void begin_optimistic_lock(struct pfs_lock *copy)
{
- copy->m_version= PFS_atomic::load_u32(&m_version);
- copy->m_state= PFS_atomic::load_32(&m_state);
+ copy->m_version_state= PFS_atomic::load_u32(&m_version_state);
}
/**
@@ -174,19 +206,20 @@ struct pfs_lock
*/
bool end_optimistic_lock(struct pfs_lock *copy)
{
- /*
- return true if:
- - the version + state has not changed
- - and there was valid data to look at
- */
- return ((copy->m_version == PFS_atomic::load_u32(&m_version)) &&
- (copy->m_state == PFS_atomic::load_32(&m_state)) &&
- (copy->m_state == PFS_LOCK_ALLOCATED));
+ /* Check there was valid data to look at. */
+ if ((copy->m_version_state & STATE_MASK) != PFS_LOCK_ALLOCATED)
+ return false;
+
+ /* Check the version + state has not changed. */
+ if (copy->m_version_state != PFS_atomic::load_u32(&m_version_state))
+ return false;
+
+ return true;
}
uint32 get_version()
{
- return PFS_atomic::load_u32(&m_version);
+ return (PFS_atomic::load_u32(&m_version_state) & VERSION_MASK);
}
};
diff --git a/storage/perfschema/pfs_server.cc b/storage/perfschema/pfs_server.cc
index 3df0f27f652..383a46785fb 100644
--- a/storage/perfschema/pfs_server.cc
+++ b/storage/perfschema/pfs_server.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -50,11 +50,16 @@ static void cleanup_performance_schema(void);
void cleanup_instrument_config(void);
struct PSI_bootstrap*
-initialize_performance_schema(const PFS_global_param *param)
+initialize_performance_schema(PFS_global_param *param)
{
pfs_initialized= false;
PFS_table_stat::g_reset_template.reset();
+ global_idle_stat.reset();
+ global_table_io_stat.reset();
+ global_table_lock_stat.reset();
+
+ pfs_automated_sizing(param);
if (! param->m_enabled)
{
diff --git a/storage/perfschema/pfs_server.h b/storage/perfschema/pfs_server.h
index f65febdeb6d..e0c782fde58 100644
--- a/storage/perfschema/pfs_server.h
+++ b/storage/perfschema/pfs_server.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -24,96 +24,50 @@
#ifndef PFS_MAX_MUTEX_CLASS
#define PFS_MAX_MUTEX_CLASS 200
#endif
-#ifndef PFS_MAX_MUTEX
- #define PFS_MAX_MUTEX 1000000
-#endif
#ifndef PFS_MAX_RWLOCK_CLASS
#define PFS_MAX_RWLOCK_CLASS 30
#endif
-#ifndef PFS_MAX_RWLOCK
- #define PFS_MAX_RWLOCK 1000000
-#endif
#ifndef PFS_MAX_COND_CLASS
#define PFS_MAX_COND_CLASS 80
#endif
-#ifndef PFS_MAX_COND
- #define PFS_MAX_COND 1000
-#endif
#ifndef PFS_MAX_THREAD_CLASS
#define PFS_MAX_THREAD_CLASS 50
#endif
-#ifndef PFS_MAX_THREAD
- #define PFS_MAX_THREAD 1000
-#endif
#ifndef PFS_MAX_FILE_CLASS
#define PFS_MAX_FILE_CLASS 50
#endif
-#ifndef PFS_MAX_FILE
- #define PFS_MAX_FILE 10000
-#endif
#ifndef PFS_MAX_FILE_HANDLE
#define PFS_MAX_FILE_HANDLE 32768
#endif
-#ifndef PFS_MAX_SOCKETS
- #define PFS_MAX_SOCKETS 1000
-#endif
#ifndef PFS_MAX_SOCKET_CLASS
#define PFS_MAX_SOCKET_CLASS 10
#endif
-#ifndef PFS_MAX_TABLE_SHARE
- #define PFS_MAX_TABLE_SHARE 1000
-#endif
-#ifndef PFS_MAX_TABLE
- #define PFS_MAX_TABLE 10000
-#endif
-#ifndef PFS_WAITS_HISTORY_SIZE
- #define PFS_WAITS_HISTORY_SIZE 10
-#endif
-#ifndef PFS_WAITS_HISTORY_LONG_SIZE
- #define PFS_WAITS_HISTORY_LONG_SIZE 10000
-#endif
#ifndef PFS_MAX_SETUP_ACTOR
#define PFS_MAX_SETUP_ACTOR 100
#endif
#ifndef PFS_MAX_SETUP_OBJECT
#define PFS_MAX_SETUP_OBJECT 100
#endif
-#ifndef PFS_MAX_HOST
- #define PFS_MAX_HOST 100
-#endif
-#ifndef PFS_MAX_USER
- #define PFS_MAX_USER 100
-#endif
-#ifndef PFS_MAX_ACCOUNT
- #define PFS_MAX_ACCOUNT 100
-#endif
#ifndef PFS_MAX_STAGE_CLASS
#define PFS_MAX_STAGE_CLASS 150
#endif
-#ifndef PFS_STAGES_HISTORY_SIZE
- #define PFS_STAGES_HISTORY_SIZE 10
-#endif
-#ifndef PFS_STAGES_HISTORY_LONG_SIZE
- #define PFS_STAGES_HISTORY_LONG_SIZE 10000
-#endif
-#ifndef PFS_STATEMENTS_HISTORY_SIZE
- #define PFS_STATEMENTS_HISTORY_SIZE 10
-#endif
-#ifndef PFS_STATEMENTS_HISTORY_LONG_SIZE
- #define PFS_STATEMENTS_HISTORY_LONG_SIZE 10000
-#endif
#ifndef PFS_STATEMENTS_STACK_SIZE
#define PFS_STATEMENTS_STACK_SIZE 10
#endif
-#ifndef PFS_DIGEST_SIZE
- #define PFS_DIGEST_SIZE 200
-#endif
+
+struct PFS_sizing_hints
+{
+ long m_table_definition_cache;
+ long m_table_open_cache;
+ long m_max_connections;
+ long m_open_files_limit;
+};
/** Performance schema global sizing parameters. */
struct PFS_global_param
{
/** True if the performance schema is enabled. */
- bool m_enabled;
+ bool m_enabled;
/** Default values for SETUP_CONSUMERS. */
bool m_consumer_events_stages_current_enabled;
bool m_consumer_events_stages_history_enabled;
@@ -155,7 +109,7 @@ struct PFS_global_param
Maximum number of instrumented table share.
@sa table_share_lost.
*/
- ulong m_table_share_sizing;
+ long m_table_share_sizing;
/**
Maximum number of instrumented file classes.
@sa file_class_lost.
@@ -165,81 +119,86 @@ struct PFS_global_param
Maximum number of instrumented mutex instances.
@sa mutex_lost.
*/
- ulong m_mutex_sizing;
+ long m_mutex_sizing;
/**
Maximum number of instrumented rwlock instances.
@sa rwlock_lost.
*/
- ulong m_rwlock_sizing;
+ long m_rwlock_sizing;
/**
Maximum number of instrumented cond instances.
@sa cond_lost.
*/
- ulong m_cond_sizing;
+ long m_cond_sizing;
/**
Maximum number of instrumented thread instances.
@sa thread_lost.
*/
- ulong m_thread_sizing;
+ long m_thread_sizing;
/**
Maximum number of instrumented table handles.
@sa table_lost.
*/
- ulong m_table_sizing;
+ long m_table_sizing;
/**
Maximum number of instrumented file instances.
@sa file_lost.
*/
- ulong m_file_sizing;
+ long m_file_sizing;
/**
Maximum number of instrumented file handles.
@sa file_handle_lost.
*/
- ulong m_file_handle_sizing;
+ long m_file_handle_sizing;
/**
Maxium number of instrumented socket instances
@sa socket_lost
*/
- ulong m_socket_sizing;
+ long m_socket_sizing;
/**
Maximum number of instrumented socket classes.
@sa socket_class_lost.
*/
ulong m_socket_class_sizing;
/** Maximum number of rows per thread in table EVENTS_WAITS_HISTORY. */
- ulong m_events_waits_history_sizing;
+ long m_events_waits_history_sizing;
/** Maximum number of rows in table EVENTS_WAITS_HISTORY_LONG. */
- ulong m_events_waits_history_long_sizing;
+ long m_events_waits_history_long_sizing;
/** Maximum number of rows in table SETUP_ACTORS. */
ulong m_setup_actor_sizing;
/** Maximum number of rows in table SETUP_OBJECTS. */
ulong m_setup_object_sizing;
/** Maximum number of rows in table HOSTS. */
- ulong m_host_sizing;
+ long m_host_sizing;
/** Maximum number of rows in table USERS. */
- ulong m_user_sizing;
+ long m_user_sizing;
/** Maximum number of rows in table ACCOUNTS. */
- ulong m_account_sizing;
+ long m_account_sizing;
/**
Maximum number of instrumented stage classes.
@sa stage_class_lost.
*/
ulong m_stage_class_sizing;
/** Maximum number of rows per thread in table EVENTS_STAGES_HISTORY. */
- ulong m_events_stages_history_sizing;
+ long m_events_stages_history_sizing;
/** Maximum number of rows in table EVENTS_STAGES_HISTORY_LONG. */
- ulong m_events_stages_history_long_sizing;
+ long m_events_stages_history_long_sizing;
/**
Maximum number of instrumented statement classes.
@sa statement_class_lost.
*/
ulong m_statement_class_sizing;
/** Maximum number of rows per thread in table EVENTS_STATEMENT_HISTORY. */
- ulong m_events_statements_history_sizing;
+ long m_events_statements_history_sizing;
/** Maximum number of rows in table EVENTS_STATEMENTS_HISTORY_LONG. */
- ulong m_events_statements_history_long_sizing;
+ long m_events_statements_history_long_sizing;
/** Maximum number of digests to be captured */
- ulong m_digest_sizing;
+ long m_digest_sizing;
+ /** Maximum number of session attribute strings per thread */
+ long m_session_connect_attrs_sizing;
+
+ /** Sizing hints, for auto tuning. */
+ PFS_sizing_hints m_hints;
};
/**
@@ -254,7 +213,9 @@ extern PFS_global_param pfs_param;
@return A boostrap handle, or NULL.
*/
struct PSI_bootstrap*
-initialize_performance_schema(const PFS_global_param *param);
+initialize_performance_schema(PFS_global_param *param);
+
+void pfs_automated_sizing(PFS_global_param *param);
/**
Initialize the performance schema ACL.
diff --git a/storage/perfschema/pfs_setup_actor.cc b/storage/perfschema/pfs_setup_actor.cc
index a587d3643d2..ff45e4a0971 100644
--- a/storage/perfschema/pfs_setup_actor.cc
+++ b/storage/perfschema/pfs_setup_actor.cc
@@ -43,7 +43,7 @@ ulong setup_actor_max;
PFS_setup_actor *setup_actor_array= NULL;
/** Hash table for setup_actor records. */
-static LF_HASH setup_actor_hash;
+LF_HASH setup_actor_hash;
/** True if @c setup_actor_hash is initialized. */
static bool setup_actor_hash_inited= false;
@@ -100,10 +100,11 @@ C_MODE_END
*/
int init_setup_actor_hash(void)
{
- if (! setup_actor_hash_inited)
+ if ((! setup_actor_hash_inited) && (setup_actor_max > 0))
{
lf_hash_init(&setup_actor_hash, sizeof(PFS_setup_actor*), LF_HASH_UNIQUE,
0, 0, setup_actor_hash_get_key, &my_charset_bin);
+ /* setup_actor_hash.size= setup_actor_max; */
setup_actor_hash_inited= true;
}
return 0;
@@ -167,7 +168,7 @@ int insert_setup_actor(const String *user, const String *host, const String *rol
if (unlikely(pins == NULL))
return HA_ERR_OUT_OF_MEM;
- static uint setup_actor_monotonic_index= 0;
+ static uint PFS_ALIGNED setup_actor_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_setup_actor *pfs;
@@ -175,8 +176,7 @@ int insert_setup_actor(const String *user, const String *host, const String *rol
while (++attempts <= setup_actor_max)
{
/* See create_mutex() */
- PFS_atomic::add_u32(& setup_actor_monotonic_index, 1);
- index= setup_actor_monotonic_index % setup_actor_max;
+ index= PFS_atomic::add_u32(& setup_actor_monotonic_index, 1) % setup_actor_max;
pfs= setup_actor_array + index;
if (pfs->m_lock.is_free())
diff --git a/storage/perfschema/pfs_setup_actor.h b/storage/perfschema/pfs_setup_actor.h
index 8b0ee8a485c..baebd27f0ad 100644
--- a/storage/perfschema/pfs_setup_actor.h
+++ b/storage/perfschema/pfs_setup_actor.h
@@ -49,7 +49,7 @@ struct PFS_setup_actor_key
};
/** A setup_actor record. */
-struct PFS_setup_actor
+struct PFS_ALIGNED PFS_setup_actor
{
/** Internal lock. */
pfs_lock m_lock;
@@ -92,6 +92,8 @@ extern ulong setup_actor_max;
extern PFS_setup_actor *setup_actor_array;
+extern LF_HASH setup_actor_hash;
+
/** @} */
#endif
diff --git a/storage/perfschema/pfs_setup_object.cc b/storage/perfschema/pfs_setup_object.cc
index a9e9bb7881b..535dd53bfc7 100644
--- a/storage/perfschema/pfs_setup_object.cc
+++ b/storage/perfschema/pfs_setup_object.cc
@@ -39,7 +39,7 @@ ulong setup_object_max;
PFS_setup_object *setup_object_array= NULL;
-static LF_HASH setup_object_hash;
+LF_HASH setup_object_hash;
static bool setup_object_hash_inited= false;
/**
@@ -95,10 +95,11 @@ C_MODE_END
*/
int init_setup_object_hash(void)
{
- if (! setup_object_hash_inited)
+ if ((! setup_object_hash_inited) && (setup_object_max > 0))
{
lf_hash_init(&setup_object_hash, sizeof(PFS_setup_object*), LF_HASH_UNIQUE,
0, 0, setup_object_hash_get_key, &my_charset_bin);
+ /* setup_object_hash.size= setup_object_max; */
setup_object_hash_inited= true;
}
return 0;
@@ -109,8 +110,8 @@ void cleanup_setup_object_hash(void)
{
if (setup_object_hash_inited)
{
- lf_hash_destroy(&setup_object_hash);
setup_object_hash_inited= false;
+ lf_hash_destroy(&setup_object_hash);
}
}
@@ -161,7 +162,7 @@ int insert_setup_object(enum_object_type object_type, const String *schema,
if (unlikely(pins == NULL))
return HA_ERR_OUT_OF_MEM;
- static uint setup_object_monotonic_index= 0;
+ static uint PFS_ALIGNED setup_object_monotonic_index= 0;
uint index;
uint attempts= 0;
PFS_setup_object *pfs;
@@ -169,8 +170,7 @@ int insert_setup_object(enum_object_type object_type, const String *schema,
while (++attempts <= setup_object_max)
{
/* See create_mutex() */
- PFS_atomic::add_u32(& setup_object_monotonic_index, 1);
- index= setup_object_monotonic_index % setup_object_max;
+ index= PFS_atomic::add_u32(& setup_object_monotonic_index, 1) % setup_object_max;
pfs= setup_object_array + index;
if (pfs->m_lock.is_free())
diff --git a/storage/perfschema/pfs_setup_object.h b/storage/perfschema/pfs_setup_object.h
index 44d2b76c627..2615802fe01 100644
--- a/storage/perfschema/pfs_setup_object.h
+++ b/storage/perfschema/pfs_setup_object.h
@@ -45,7 +45,7 @@ struct PFS_setup_object_key
};
/** A setup_object record. */
-struct PFS_setup_object
+struct PFS_ALIGNED PFS_setup_object
{
enum_object_type get_object_type()
{
@@ -96,6 +96,8 @@ extern ulong setup_object_max;
extern PFS_setup_object *setup_object_array;
+extern LF_HASH setup_object_hash;
+
/** @} */
#endif
diff --git a/storage/perfschema/pfs_stat.h b/storage/perfschema/pfs_stat.h
index 32c462b8ba2..2a255a9e5b2 100644
--- a/storage/perfschema/pfs_stat.h
+++ b/storage/perfschema/pfs_stat.h
@@ -140,13 +140,90 @@ struct PFS_byte_stat : public PFS_single_stat
}
};
+/** Statistics for mutex usage. */
+struct PFS_mutex_stat
+{
+ /** Wait statistics. */
+ PFS_single_stat m_wait_stat;
+ /**
+ Lock statistics.
+ This statistic is not exposed in user visible tables yet.
+ */
+ PFS_single_stat m_lock_stat;
+
+ inline void aggregate(const PFS_mutex_stat *stat)
+ {
+ m_wait_stat.aggregate(&stat->m_wait_stat);
+ m_lock_stat.aggregate(&stat->m_lock_stat);
+ }
+
+ inline void reset(void)
+ {
+ m_wait_stat.reset();
+ m_lock_stat.reset();
+ }
+};
+
+/** Statistics for rwlock usage. */
+struct PFS_rwlock_stat
+{
+ /** Wait statistics. */
+ PFS_single_stat m_wait_stat;
+ /**
+ RWLock read lock usage statistics.
+ This statistic is not exposed in user visible tables yet.
+ */
+ PFS_single_stat m_read_lock_stat;
+ /**
+ RWLock write lock usage statistics.
+ This statistic is not exposed in user visible tables yet.
+ */
+ PFS_single_stat m_write_lock_stat;
+
+ inline void aggregate(const PFS_rwlock_stat *stat)
+ {
+ m_wait_stat.aggregate(&stat->m_wait_stat);
+ m_read_lock_stat.aggregate(&stat->m_read_lock_stat);
+ m_write_lock_stat.aggregate(&stat->m_write_lock_stat);
+ }
+
+ inline void reset(void)
+ {
+ m_wait_stat.reset();
+ m_read_lock_stat.reset();
+ m_write_lock_stat.reset();
+ }
+};
+
/** Statistics for COND usage. */
struct PFS_cond_stat
{
- /** Number of times a condition was signalled. */
+ /** Wait statistics. */
+ PFS_single_stat m_wait_stat;
+ /**
+ Number of times a condition was signalled.
+ This statistic is not exposed in user visible tables yet.
+ */
ulonglong m_signal_count;
- /** Number of times a condition was broadcasted. */
+ /**
+ Number of times a condition was broadcast.
+ This statistic is not exposed in user visible tables yet.
+ */
ulonglong m_broadcast_count;
+
+ inline void aggregate(const PFS_cond_stat *stat)
+ {
+ m_wait_stat.aggregate(&stat->m_wait_stat);
+ m_signal_count+= stat->m_signal_count;
+ m_broadcast_count+= stat->m_broadcast_count;
+ }
+
+ inline void reset(void)
+ {
+ m_wait_stat.reset();
+ m_signal_count= 0;
+ m_broadcast_count= 0;
+ }
};
/** Statistics for FILE IO. Used for both waits and byte counts. */
@@ -198,6 +275,11 @@ struct PFS_file_stat
/** File IO statistics. */
PFS_file_io_stat m_io_stat;
+ inline void aggregate(const PFS_file_stat *stat)
+ {
+ m_io_stat.aggregate(&stat->m_io_stat);
+ }
+
/** Reset file statistics. */
inline void reset(void)
{
@@ -329,6 +411,7 @@ struct PFS_statement_stat
/** Single table io statistic. */
struct PFS_table_io_stat
{
+ bool m_has_data;
/** FETCH statistics */
PFS_single_stat m_fetch;
/** INSERT statistics */
@@ -338,8 +421,14 @@ struct PFS_table_io_stat
/** DELETE statistics */
PFS_single_stat m_delete;
+ PFS_table_io_stat()
+ {
+ m_has_data= false;
+ }
+
inline void reset(void)
{
+ m_has_data= false;
m_fetch.reset();
m_insert.reset();
m_update.reset();
@@ -348,18 +437,25 @@ struct PFS_table_io_stat
inline void aggregate(const PFS_table_io_stat *stat)
{
- m_fetch.aggregate(&stat->m_fetch);
- m_insert.aggregate(&stat->m_insert);
- m_update.aggregate(&stat->m_update);
- m_delete.aggregate(&stat->m_delete);
+ if (stat->m_has_data)
+ {
+ m_has_data= true;
+ m_fetch.aggregate(&stat->m_fetch);
+ m_insert.aggregate(&stat->m_insert);
+ m_update.aggregate(&stat->m_update);
+ m_delete.aggregate(&stat->m_delete);
+ }
}
inline void sum(PFS_single_stat *result)
{
- result->aggregate(& m_fetch);
- result->aggregate(& m_insert);
- result->aggregate(& m_update);
- result->aggregate(& m_delete);
+ if (m_has_data)
+ {
+ result->aggregate(& m_fetch);
+ result->aggregate(& m_insert);
+ result->aggregate(& m_update);
+ result->aggregate(& m_delete);
+ }
}
};
@@ -419,10 +515,10 @@ struct PFS_table_stat
{
/**
Statistics, per index.
- Each index stat is in [0, MAX_KEY-1],
- stats when using no index are in [MAX_KEY].
+ Each index stat is in [0, MAX_INDEXES-1],
+ stats when using no index are in [MAX_INDEXES].
*/
- PFS_table_io_stat m_index_stat[MAX_KEY + 1];
+ PFS_table_io_stat m_index_stat[MAX_INDEXES + 1];
/**
Statistics, per lock type.
@@ -433,7 +529,7 @@ struct PFS_table_stat
inline void reset_io(void)
{
PFS_table_io_stat *stat= & m_index_stat[0];
- PFS_table_io_stat *stat_last= & m_index_stat[MAX_KEY + 1];
+ PFS_table_io_stat *stat_last= & m_index_stat[MAX_INDEXES + 1];
for ( ; stat < stat_last ; stat++)
stat->reset();
}
@@ -466,13 +562,25 @@ struct PFS_table_stat
memcpy(this, & g_reset_template, sizeof(*this));
}
- inline void aggregate_io(const PFS_table_stat *stat)
+ inline void aggregate_io(const PFS_table_stat *stat, uint key_count)
{
- PFS_table_io_stat *to_stat= & m_index_stat[0];
- PFS_table_io_stat *to_stat_last= & m_index_stat[MAX_KEY + 1];
- const PFS_table_io_stat *from_stat= & stat->m_index_stat[0];
+ PFS_table_io_stat *to_stat;
+ PFS_table_io_stat *to_stat_last;
+ const PFS_table_io_stat *from_stat;
+
+ DBUG_ASSERT(key_count <= MAX_INDEXES);
+
+ /* Aggregate stats for each index, if any */
+ to_stat= & m_index_stat[0];
+ to_stat_last= to_stat + key_count;
+ from_stat= & stat->m_index_stat[0];
for ( ; to_stat < to_stat_last ; from_stat++, to_stat++)
to_stat->aggregate(from_stat);
+
+ /* Aggregate stats for the table */
+ to_stat= & m_index_stat[MAX_INDEXES];
+ from_stat= & stat->m_index_stat[MAX_INDEXES];
+ to_stat->aggregate(from_stat);
}
inline void aggregate_lock(const PFS_table_stat *stat)
@@ -480,18 +588,27 @@ struct PFS_table_stat
m_lock_stat.aggregate(& stat->m_lock_stat);
}
- inline void aggregate(const PFS_table_stat *stat)
+ inline void aggregate(const PFS_table_stat *stat, uint key_count)
{
- aggregate_io(stat);
+ aggregate_io(stat, key_count);
aggregate_lock(stat);
}
- inline void sum_io(PFS_single_stat *result)
+ inline void sum_io(PFS_single_stat *result, uint key_count)
{
- PFS_table_io_stat *stat= & m_index_stat[0];
- PFS_table_io_stat *stat_last= & m_index_stat[MAX_KEY + 1];
+ PFS_table_io_stat *stat;
+ PFS_table_io_stat *stat_last;
+
+ DBUG_ASSERT(key_count <= MAX_INDEXES);
+
+ /* Sum stats for each index, if any */
+ stat= & m_index_stat[0];
+ stat_last= stat + key_count;
for ( ; stat < stat_last ; stat++)
stat->sum(result);
+
+ /* Sum stats for the table */
+ m_index_stat[MAX_INDEXES].sum(result);
}
inline void sum_lock(PFS_single_stat *result)
@@ -499,9 +616,9 @@ struct PFS_table_stat
m_lock_stat.sum(result);
}
- inline void sum(PFS_single_stat *result)
+ inline void sum(PFS_single_stat *result, uint key_count)
{
- sum_io(result);
+ sum_io(result, key_count);
sum_lock(result);
}
diff --git a/storage/perfschema/pfs_timer.cc b/storage/perfschema/pfs_timer.cc
index 3d8d2e07ce5..8c3553db2b2 100644
--- a/storage/perfschema/pfs_timer.cc
+++ b/storage/perfschema/pfs_timer.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -117,6 +117,75 @@ void init_timers(void)
to_pico_data[TIMER_NAME_TICK].m_v0= tick_v0;
to_pico_data[TIMER_NAME_TICK].m_factor= tick_to_pico;
+
+ /*
+ Depending on the platform and build options,
+ some timers may not be available.
+ Pick best replacements.
+ */
+
+ /*
+ For STAGE and STATEMENT, a timer with a fixed frequency is better.
+ The prefered timer is nanosecond, or lower resolutions.
+ */
+
+ if (nanosec_to_pico != 0)
+ {
+ /* Normal case. */
+ stage_timer= TIMER_NAME_NANOSEC;
+ statement_timer= TIMER_NAME_NANOSEC;
+ }
+ else if (microsec_to_pico != 0)
+ {
+ /* Windows. */
+ stage_timer= TIMER_NAME_MICROSEC;
+ statement_timer= TIMER_NAME_MICROSEC;
+ }
+ else if (millisec_to_pico != 0)
+ {
+ /* Robustness, no known cases. */
+ stage_timer= TIMER_NAME_MILLISEC;
+ statement_timer= TIMER_NAME_MILLISEC;
+ }
+ else if (tick_to_pico != 0)
+ {
+ /* Robustness, no known cases. */
+ stage_timer= TIMER_NAME_TICK;
+ statement_timer= TIMER_NAME_TICK;
+ }
+ else
+ {
+ /* Robustness, no known cases. */
+ stage_timer= TIMER_NAME_CYCLE;
+ statement_timer= TIMER_NAME_CYCLE;
+ }
+
+ /*
+ For IDLE, a timer with a fixed frequency is critical,
+ as the CPU clock may slow down a lot if the server is completely idle.
+ The prefered timer is microsecond, or lower resolutions.
+ */
+
+ if (microsec_to_pico != 0)
+ {
+ /* Normal case. */
+ idle_timer= TIMER_NAME_MICROSEC;
+ }
+ else if (millisec_to_pico != 0)
+ {
+ /* Robustness, no known cases. */
+ idle_timer= TIMER_NAME_MILLISEC;
+ }
+ else if (tick_to_pico != 0)
+ {
+ /* Robustness, no known cases. */
+ idle_timer= TIMER_NAME_TICK;
+ }
+ else
+ {
+ /* Robustness, no known cases. */
+ idle_timer= TIMER_NAME_CYCLE;
+ }
}
ulonglong get_timer_raw_value(enum_timer_name timer_name)
diff --git a/storage/perfschema/pfs_user.cc b/storage/perfschema/pfs_user.cc
index d7794a131a1..60d6a2e99f6 100644
--- a/storage/perfschema/pfs_user.cc
+++ b/storage/perfschema/pfs_user.cc
@@ -42,7 +42,7 @@ static PFS_single_stat *user_instr_class_waits_array= NULL;
static PFS_stage_stat *user_instr_class_stages_array= NULL;
static PFS_statement_stat *user_instr_class_statements_array= NULL;
-static LF_HASH user_hash;
+LF_HASH user_hash;
static bool user_hash_inited= false;
/**
@@ -146,10 +146,11 @@ C_MODE_END
*/
int init_user_hash(void)
{
- if (! user_hash_inited)
+ if ((! user_hash_inited) && (user_max > 0))
{
lf_hash_init(&user_hash, sizeof(PFS_user*), LF_HASH_UNIQUE,
0, 0, user_hash_get_key, &my_charset_bin);
+ /* user_hash.size= user_max; */
user_hash_inited= true;
}
return 0;
diff --git a/storage/perfschema/pfs_user.h b/storage/perfschema/pfs_user.h
index 0f937c6c927..dda7e221ca8 100644
--- a/storage/perfschema/pfs_user.h
+++ b/storage/perfschema/pfs_user.h
@@ -44,7 +44,7 @@ struct PFS_user_key
uint m_key_length;
};
-struct PFS_user : public PFS_connection_slice
+struct PFS_ALIGNED PFS_user : public PFS_connection_slice
{
public:
inline void init_refcount(void)
@@ -108,6 +108,8 @@ extern ulong user_lost;
extern PFS_user *user_array;
+extern LF_HASH user_hash;
+
/** @} */
#endif
diff --git a/storage/perfschema/pfs_visitor.cc b/storage/perfschema/pfs_visitor.cc
index fe2b16a2f76..616bc27900a 100644
--- a/storage/perfschema/pfs_visitor.cc
+++ b/storage/perfschema/pfs_visitor.cc
@@ -666,7 +666,7 @@ void PFS_connection_wait_visitor::visit_global()
it is more efficient.
*/
DBUG_ASSERT(m_index == global_idle_class.m_event_name_index);
- m_stat.aggregate(& global_instr_class_waits_array[m_index]);
+ m_stat.aggregate(& global_idle_stat);
}
void PFS_connection_wait_visitor::visit_host(PFS_host *pfs)
@@ -883,54 +883,44 @@ PFS_instance_wait_visitor::PFS_instance_wait_visitor()
PFS_instance_wait_visitor::~PFS_instance_wait_visitor()
{}
-void PFS_instance_wait_visitor::visit_mutex_class(PFS_mutex_class *pfs)
+void PFS_instance_wait_visitor::visit_mutex_class(PFS_mutex_class *pfs)
{
- uint index= pfs->m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
+ m_stat.aggregate(&pfs->m_mutex_stat.m_wait_stat);
}
-void PFS_instance_wait_visitor::visit_rwlock_class(PFS_rwlock_class *pfs)
+void PFS_instance_wait_visitor::visit_rwlock_class(PFS_rwlock_class *pfs)
{
- uint index= pfs->m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
+ m_stat.aggregate(&pfs->m_rwlock_stat.m_wait_stat);
}
-void PFS_instance_wait_visitor::visit_cond_class(PFS_cond_class *pfs)
+void PFS_instance_wait_visitor::visit_cond_class(PFS_cond_class *pfs)
{
- uint index= pfs->m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
+ m_stat.aggregate(&pfs->m_cond_stat.m_wait_stat);
}
-void PFS_instance_wait_visitor::visit_file_class(PFS_file_class *pfs)
+void PFS_instance_wait_visitor::visit_file_class(PFS_file_class *pfs)
{
- uint index= pfs->m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
+ pfs->m_file_stat.m_io_stat.sum_waits(&m_stat);
}
-void PFS_instance_wait_visitor::visit_socket_class(PFS_socket_class *pfs)
+void PFS_instance_wait_visitor::visit_socket_class(PFS_socket_class *pfs)
{
- /* Collect global wait stats */
- uint index= pfs->m_event_name_index;
- m_stat.aggregate(&global_instr_class_waits_array[index]);
-
- /* If deferred, then pull wait stats directly from the socket class. */
- if (pfs->is_deferred())
- pfs->m_socket_stat.m_io_stat.sum_waits(&m_stat);
+ pfs->m_socket_stat.m_io_stat.sum_waits(&m_stat);
}
-void PFS_instance_wait_visitor::visit_mutex(PFS_mutex *pfs)
+void PFS_instance_wait_visitor::visit_mutex(PFS_mutex *pfs)
{
- m_stat.aggregate(& pfs->m_wait_stat);
+ m_stat.aggregate(& pfs->m_mutex_stat.m_wait_stat);
}
-void PFS_instance_wait_visitor::visit_rwlock(PFS_rwlock *pfs)
+void PFS_instance_wait_visitor::visit_rwlock(PFS_rwlock *pfs)
{
- m_stat.aggregate(& pfs->m_wait_stat);
+ m_stat.aggregate(& pfs->m_rwlock_stat.m_wait_stat);
}
-void PFS_instance_wait_visitor::visit_cond(PFS_cond *pfs)
+void PFS_instance_wait_visitor::visit_cond(PFS_cond *pfs)
{
- m_stat.aggregate(& pfs->m_wait_stat);
+ m_stat.aggregate(& pfs->m_cond_stat.m_wait_stat);
}
void PFS_instance_wait_visitor::visit_file(PFS_file *pfs)
@@ -959,23 +949,24 @@ PFS_object_wait_visitor::~PFS_object_wait_visitor()
void PFS_object_wait_visitor::visit_global()
{
- uint index;
-
- index= global_table_io_class.m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
-
- index= global_table_lock_class.m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
+ global_table_io_stat.sum(& m_stat);
+ global_table_lock_stat.sum(& m_stat);
}
void PFS_object_wait_visitor::visit_table_share(PFS_table_share *pfs)
{
- pfs->m_table_stat.sum(& m_stat);
+ uint safe_key_count= sanitize_index_count(pfs->m_key_count);
+ pfs->m_table_stat.sum(& m_stat, safe_key_count);
}
void PFS_object_wait_visitor::visit_table(PFS_table *pfs)
{
- pfs->m_table_stat.sum(& m_stat);
+ PFS_table_share *table_share= sanitize_table_share(pfs->m_share);
+ if (table_share != NULL)
+ {
+ uint safe_key_count= sanitize_index_count(table_share->m_key_count);
+ pfs->m_table_stat.sum(& m_stat, safe_key_count);
+ }
}
PFS_table_io_wait_visitor::PFS_table_io_wait_visitor()
@@ -986,21 +977,21 @@ PFS_table_io_wait_visitor::~PFS_table_io_wait_visitor()
void PFS_table_io_wait_visitor::visit_global()
{
- uint index= global_table_io_class.m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
+ global_table_io_stat.sum(& m_stat);
}
void PFS_table_io_wait_visitor::visit_table_share(PFS_table_share *pfs)
{
PFS_table_io_stat io_stat;
+ uint safe_key_count= sanitize_index_count(pfs->m_key_count);
uint index;
/* Aggregate index stats */
- for (index= 0; index < pfs->m_key_count; index++)
+ for (index= 0; index < safe_key_count; index++)
io_stat.aggregate(& pfs->m_table_stat.m_index_stat[index]);
/* Aggregate global stats */
- io_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_KEY]);
+ io_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_INDEXES]);
io_stat.sum(& m_stat);
}
@@ -1012,14 +1003,15 @@ void PFS_table_io_wait_visitor::visit_table(PFS_table *pfs)
if (likely(safe_share != NULL))
{
PFS_table_io_stat io_stat;
+ uint safe_key_count= sanitize_index_count(safe_share->m_key_count);
uint index;
/* Aggregate index stats */
- for (index= 0; index < safe_share->m_key_count; index++)
+ for (index= 0; index < safe_key_count; index++)
io_stat.aggregate(& pfs->m_table_stat.m_index_stat[index]);
/* Aggregate global stats */
- io_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_KEY]);
+ io_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_INDEXES]);
io_stat.sum(& m_stat);
}
@@ -1035,14 +1027,15 @@ PFS_table_io_stat_visitor::~PFS_table_io_stat_visitor()
void PFS_table_io_stat_visitor::visit_table_share(PFS_table_share *pfs)
{
+ uint safe_key_count= sanitize_index_count(pfs->m_key_count);
uint index;
/* Aggregate index stats */
- for (index= 0; index < pfs->m_key_count; index++)
+ for (index= 0; index < safe_key_count; index++)
m_stat.aggregate(& pfs->m_table_stat.m_index_stat[index]);
/* Aggregate global stats */
- m_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_KEY]);
+ m_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_INDEXES]);
}
void PFS_table_io_stat_visitor::visit_table(PFS_table *pfs)
@@ -1051,14 +1044,15 @@ void PFS_table_io_stat_visitor::visit_table(PFS_table *pfs)
if (likely(safe_share != NULL))
{
+ uint safe_key_count= sanitize_index_count(safe_share->m_key_count);
uint index;
/* Aggregate index stats */
- for (index= 0; index < safe_share->m_key_count; index++)
+ for (index= 0; index < safe_key_count; index++)
m_stat.aggregate(& pfs->m_table_stat.m_index_stat[index]);
/* Aggregate global stats */
- m_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_KEY]);
+ m_stat.aggregate(& pfs->m_table_stat.m_index_stat[MAX_INDEXES]);
}
}
@@ -1090,8 +1084,7 @@ PFS_table_lock_wait_visitor::~PFS_table_lock_wait_visitor()
void PFS_table_lock_wait_visitor::visit_global()
{
- uint index= global_table_lock_class.m_event_name_index;
- m_stat.aggregate(& global_instr_class_waits_array[index]);
+ global_table_lock_stat.sum(& m_stat);
}
void PFS_table_lock_wait_visitor::visit_table_share(PFS_table_share *pfs)
diff --git a/storage/perfschema/table_esgs_by_thread_by_event_name.cc b/storage/perfschema/table_esgs_by_thread_by_event_name.cc
index 2a69ec24277..eeef6c3fbb2 100644
--- a/storage/perfschema/table_esgs_by_thread_by_event_name.cc
+++ b/storage/perfschema/table_esgs_by_thread_by_event_name.cc
@@ -33,7 +33,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -212,7 +212,7 @@ int table_esgs_by_thread_by_event_name
switch(f->field_index)
{
case 0: /* THREAD_ID */
- set_field_ulong(f, m_row.m_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_thread_internal_id);
break;
case 1: /* NAME */
m_row.m_event_name.set_field(f);
diff --git a/storage/perfschema/table_esgs_by_thread_by_event_name.h b/storage/perfschema/table_esgs_by_thread_by_event_name.h
index 049c8997396..5295a9eacdf 100644
--- a/storage/perfschema/table_esgs_by_thread_by_event_name.h
+++ b/storage/perfschema/table_esgs_by_thread_by_event_name.h
@@ -39,7 +39,7 @@
struct row_esgs_by_thread_by_event_name
{
/** Column THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Column EVENT_NAME. */
PFS_event_name_row m_event_name;
/** Columns COUNT_STAR, SUM/MIN/AVG/MAX TIMER_WAIT. */
diff --git a/storage/perfschema/table_esgs_global_by_event_name.cc b/storage/perfschema/table_esgs_global_by_event_name.cc
index 2ac22fb1551..276ac8d7704 100644
--- a/storage/perfschema/table_esgs_global_by_event_name.cc
+++ b/storage/perfschema/table_esgs_global_by_event_name.cc
@@ -95,6 +95,9 @@ int
table_esgs_global_by_event_name::delete_all_rows(void)
{
reset_events_stages_by_thread();
+ reset_events_stages_by_account();
+ reset_events_stages_by_user();
+ reset_events_stages_by_host();
reset_events_stages_global();
return 0;
}
diff --git a/storage/perfschema/table_esms_by_digest.cc b/storage/perfschema/table_esms_by_digest.cc
index dac8d3b01dc..d0250c14e5d 100644
--- a/storage/perfschema/table_esms_by_digest.cc
+++ b/storage/perfschema/table_esms_by_digest.cc
@@ -36,6 +36,11 @@ THR_LOCK table_esms_by_digest::m_table_lock;
static const TABLE_FIELD_TYPE field_types[]=
{
{
+ { C_STRING_WITH_LEN("SCHEMA_NAME") },
+ { C_STRING_WITH_LEN("varchar(64)") },
+ { NULL, 0}
+ },
+ {
{ C_STRING_WITH_LEN("DIGEST") },
{ C_STRING_WITH_LEN("varchar(32)") },
{ NULL, 0}
@@ -45,7 +50,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{ C_STRING_WITH_LEN("longtext") },
{ NULL, 0}
},
- {
+ {
{ C_STRING_WITH_LEN("COUNT_STAR") },
{ C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
@@ -170,7 +175,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{ C_STRING_WITH_LEN("timestamp") },
{ NULL, 0}
},
- {
+ {
{ C_STRING_WITH_LEN("LAST_SEEN") },
{ C_STRING_WITH_LEN("timestamp") },
{ NULL, 0}
@@ -179,7 +184,7 @@ static const TABLE_FIELD_TYPE field_types[]=
TABLE_FIELD_DEF
table_esms_by_digest::m_field_def=
-{ 28, field_types };
+{ 29, field_types };
PFS_engine_table_share
table_esms_by_digest::m_share=
@@ -303,18 +308,19 @@ int table_esms_by_digest
{
switch(f->field_index)
{
- case 0: /* DIGEST */
- case 1: /* DIGEST_TEXT */
+ case 0: /* SCHEMA_NAME */
+ case 1: /* DIGEST */
+ case 2: /* DIGEST_TEXT */
m_row.m_digest.set_field(f->field_index, f);
break;
- case 26: /* FIRST_SEEN */
+ case 27: /* FIRST_SEEN */
set_field_timestamp(f, m_row.m_first_seen);
break;
- case 27: /* LAST_SEEN */
+ case 28: /* LAST_SEEN */
set_field_timestamp(f, m_row.m_last_seen);
break;
- default: /* 1, ... COUNT/SUM/MIN/AVG/MAX */
- m_row.m_stat.set_field(f->field_index - 2, f);
+ default: /* 3, ... COUNT/SUM/MIN/AVG/MAX */
+ m_row.m_stat.set_field(f->field_index - 3, f);
break;
}
}
diff --git a/storage/perfschema/table_esms_by_thread_by_event_name.cc b/storage/perfschema/table_esms_by_thread_by_event_name.cc
index 5a7faca1b79..fccdf5dea60 100644
--- a/storage/perfschema/table_esms_by_thread_by_event_name.cc
+++ b/storage/perfschema/table_esms_by_thread_by_event_name.cc
@@ -33,7 +33,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -308,7 +308,7 @@ int table_esms_by_thread_by_event_name
switch(f->field_index)
{
case 0: /* THREAD_ID */
- set_field_ulong(f, m_row.m_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_thread_internal_id);
break;
case 1: /* EVENT_NAME */
m_row.m_event_name.set_field(f);
diff --git a/storage/perfschema/table_esms_by_thread_by_event_name.h b/storage/perfschema/table_esms_by_thread_by_event_name.h
index 2f36606a5e1..9fb9f7c58dc 100644
--- a/storage/perfschema/table_esms_by_thread_by_event_name.h
+++ b/storage/perfschema/table_esms_by_thread_by_event_name.h
@@ -39,7 +39,7 @@
struct row_esms_by_thread_by_event_name
{
/** Column THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Column EVENT_NAME. */
PFS_event_name_row m_event_name;
/** Columns COUNT_STAR, SUM/MIN/AVG/MAX TIMER_WAIT. */
diff --git a/storage/perfschema/table_esms_global_by_event_name.cc b/storage/perfschema/table_esms_global_by_event_name.cc
index 22c87f09137..efcb5b6fa7c 100644
--- a/storage/perfschema/table_esms_global_by_event_name.cc
+++ b/storage/perfschema/table_esms_global_by_event_name.cc
@@ -190,6 +190,9 @@ int
table_esms_global_by_event_name::delete_all_rows(void)
{
reset_events_statements_by_thread();
+ reset_events_statements_by_account();
+ reset_events_statements_by_user();
+ reset_events_statements_by_host();
reset_events_statements_global();
return 0;
}
diff --git a/storage/perfschema/table_events_stages.cc b/storage/perfschema/table_events_stages.cc
index e438249fbd3..854e1be15cd 100644
--- a/storage/perfschema/table_events_stages.cc
+++ b/storage/perfschema/table_events_stages.cc
@@ -32,7 +32,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -207,7 +207,7 @@ int table_events_stages_common::read_row_values(TABLE *table,
switch(f->field_index)
{
case 0: /* THREAD_ID */
- set_field_ulong(f, m_row.m_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_thread_internal_id);
break;
case 1: /* EVENT_ID */
set_field_ulonglong(f, m_row.m_event_id);
diff --git a/storage/perfschema/table_events_stages.h b/storage/perfschema/table_events_stages.h
index 6bc712c15a5..09c555c80fd 100644
--- a/storage/perfschema/table_events_stages.h
+++ b/storage/perfschema/table_events_stages.h
@@ -36,7 +36,7 @@ struct PFS_thread;
struct row_events_stages
{
/** Column THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Column EVENT_ID. */
ulonglong m_event_id;
/** Column END_EVENT_ID. */
diff --git a/storage/perfschema/table_events_statements.cc b/storage/perfschema/table_events_statements.cc
index d453b14470f..fb2b4b242d4 100644
--- a/storage/perfschema/table_events_statements.cc
+++ b/storage/perfschema/table_events_statements.cc
@@ -35,7 +35,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -372,7 +372,7 @@ void table_events_statements_common::make_row(PFS_events_statements *statement)
PSI_digest_storage *digest= & statement->m_digest_storage;
if (digest->m_byte_count > 0)
{
- PFS_digest_hash md5;
+ PFS_digest_key md5;
compute_md5_hash((char *) md5.m_md5,
(char *) digest->m_token_array,
digest->m_byte_count);
@@ -420,7 +420,7 @@ int table_events_statements_common::read_row_values(TABLE *table,
switch(f->field_index)
{
case 0: /* THREAD_ID */
- set_field_ulong(f, m_row.m_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_thread_internal_id);
break;
case 1: /* EVENT_ID */
set_field_ulonglong(f, m_row.m_event_id);
diff --git a/storage/perfschema/table_events_statements.h b/storage/perfschema/table_events_statements.h
index acd82de4fcf..dcc6611f555 100644
--- a/storage/perfschema/table_events_statements.h
+++ b/storage/perfschema/table_events_statements.h
@@ -37,7 +37,7 @@ struct PFS_thread;
struct row_events_statements
{
/** Column THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Column EVENT_ID. */
ulonglong m_event_id;
/** Column END_EVENT_ID. */
diff --git a/storage/perfschema/table_events_waits.cc b/storage/perfschema/table_events_waits.cc
index 245abc34695..8fb7ca91c44 100644
--- a/storage/perfschema/table_events_waits.cc
+++ b/storage/perfschema/table_events_waits.cc
@@ -34,7 +34,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -239,7 +239,8 @@ int table_events_waits_common::make_table_object_columns(volatile PFS_events_wai
/* INDEX NAME */
safe_index= wait->m_index;
- if (safe_index < MAX_KEY && safe_index < safe_table_share->m_key_count)
+ uint safe_key_count= sanitize_index_count(safe_table_share->m_key_count);
+ if (safe_index < safe_key_count)
{
PFS_table_key *key= & safe_table_share->m_keys[safe_index];
m_row.m_index_name_length= key->m_name_length;
@@ -602,7 +603,7 @@ int table_events_waits_common::read_row_values(TABLE *table,
switch(f->field_index)
{
case 0: /* THREAD_ID */
- set_field_ulong(f, m_row.m_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_thread_internal_id);
break;
case 1: /* EVENT_ID */
set_field_ulonglong(f, m_row.m_event_id);
diff --git a/storage/perfschema/table_events_waits.h b/storage/perfschema/table_events_waits.h
index 72065c765ca..065bf95e5a6 100644
--- a/storage/perfschema/table_events_waits.h
+++ b/storage/perfschema/table_events_waits.h
@@ -36,7 +36,7 @@ struct PFS_thread;
struct row_events_waits
{
/** Column THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Column EVENT_ID. */
ulonglong m_event_id;
/** Column END_EVENT_ID. */
diff --git a/storage/perfschema/table_events_waits_summary.cc b/storage/perfschema/table_events_waits_summary.cc
index 2a144a07344..f437e83f3ff 100644
--- a/storage/perfschema/table_events_waits_summary.cc
+++ b/storage/perfschema/table_events_waits_summary.cc
@@ -139,7 +139,7 @@ void table_events_waits_summary_by_instance::make_mutex_row(PFS_mutex *pfs)
if (unlikely(safe_class == NULL))
return;
- make_instr_row(pfs, safe_class, pfs->m_identity, &pfs->m_wait_stat);
+ make_instr_row(pfs, safe_class, pfs->m_identity, &pfs->m_mutex_stat.m_wait_stat);
}
/**
@@ -153,7 +153,7 @@ void table_events_waits_summary_by_instance::make_rwlock_row(PFS_rwlock *pfs)
if (unlikely(safe_class == NULL))
return;
- make_instr_row(pfs, safe_class, pfs->m_identity, &pfs->m_wait_stat);
+ make_instr_row(pfs, safe_class, pfs->m_identity, &pfs->m_rwlock_stat.m_wait_stat);
}
/**
@@ -167,7 +167,7 @@ void table_events_waits_summary_by_instance::make_cond_row(PFS_cond *pfs)
if (unlikely(safe_class == NULL))
return;
- make_instr_row(pfs, safe_class, pfs->m_identity, &pfs->m_wait_stat);
+ make_instr_row(pfs, safe_class, pfs->m_identity, &pfs->m_cond_stat.m_wait_stat);
}
/**
@@ -181,11 +181,13 @@ void table_events_waits_summary_by_instance::make_file_row(PFS_file *pfs)
if (unlikely(safe_class == NULL))
return;
+ PFS_single_stat sum;
+ pfs->m_file_stat.m_io_stat.sum_waits(& sum);
/*
Files don't have a in memory structure associated to it,
so we use the address of the PFS_file buffer as object_instance_begin
*/
- make_instr_row(pfs, safe_class, pfs, &pfs->m_wait_stat);
+ make_instr_row(pfs, safe_class, pfs, & sum);
}
/**
diff --git a/storage/perfschema/table_ews_by_thread_by_event_name.cc b/storage/perfschema/table_ews_by_thread_by_event_name.cc
index 25e3cf395c4..4db97b1c98c 100644
--- a/storage/perfschema/table_ews_by_thread_by_event_name.cc
+++ b/storage/perfschema/table_ews_by_thread_by_event_name.cc
@@ -33,7 +33,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -282,7 +282,7 @@ int table_ews_by_thread_by_event_name
switch(f->field_index)
{
case 0: /* THREAD_ID */
- set_field_ulong(f, m_row.m_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_thread_internal_id);
break;
case 1: /* EVENT_NAME */
m_row.m_event_name.set_field(f);
diff --git a/storage/perfschema/table_ews_by_thread_by_event_name.h b/storage/perfschema/table_ews_by_thread_by_event_name.h
index b0710bb8a57..989356be646 100644
--- a/storage/perfschema/table_ews_by_thread_by_event_name.h
+++ b/storage/perfschema/table_ews_by_thread_by_event_name.h
@@ -39,7 +39,7 @@
struct row_ews_by_thread_by_event_name
{
/** Column THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Column EVENT_NAME. */
PFS_event_name_row m_event_name;
/** Columns COUNT_STAR, SUM/MIN/AVG/MAX TIMER_WAIT. */
diff --git a/storage/perfschema/table_ews_global_by_event_name.cc b/storage/perfschema/table_ews_global_by_event_name.cc
index 3f8997b8c77..bc5c3780ecf 100644
--- a/storage/perfschema/table_ews_global_by_event_name.cc
+++ b/storage/perfschema/table_ews_global_by_event_name.cc
@@ -97,7 +97,7 @@ table_ews_global_by_event_name::delete_all_rows(void)
reset_events_waits_by_instance();
reset_table_waits_by_table_handle();
reset_table_waits_by_table();
- reset_events_waits_global();
+ reset_events_waits_by_class();
return 0;
}
@@ -121,9 +121,6 @@ int table_ews_global_by_event_name::rnd_next(void)
PFS_socket_class *socket_class;
PFS_instr_class *instr_class;
- if (global_instr_class_waits_array == NULL)
- return HA_ERR_END_OF_FILE;
-
for (m_pos.set_at(&m_next_pos);
m_pos.has_more_view();
m_pos.next_view())
@@ -218,9 +215,6 @@ table_ews_global_by_event_name::rnd_pos(const void *pos)
set_position(pos);
- if (global_instr_class_waits_array == NULL)
- return HA_ERR_END_OF_FILE;
-
switch (m_pos.m_index_1)
{
case pos_ews_global_by_event_name::VIEW_MUTEX:
diff --git a/storage/perfschema/table_helper.cc b/storage/perfschema/table_helper.cc
index d3954179539..9f803434ab6 100644
--- a/storage/perfschema/table_helper.cc
+++ b/storage/perfschema/table_helper.cc
@@ -110,26 +110,30 @@ int PFS_digest_row::make_row(PFS_statements_digest_stat* pfs)
*/
if (pfs->m_digest_storage.m_byte_count != 0)
{
+ m_schema_name_length= pfs->m_digest_key.m_schema_name_length;
+ if (m_schema_name_length > 0)
+ memcpy(m_schema_name, pfs->m_digest_key.m_schema_name, m_schema_name_length);
/*
Calculate digest from MD5 HASH collected to be shown as
DIGEST in this row.
*/
- MD5_HASH_TO_STRING(pfs->m_digest_hash.m_md5, m_digest);
+ MD5_HASH_TO_STRING(pfs->m_digest_key.m_md5, m_digest);
m_digest_length= MD5_HASH_TO_STRING_LENGTH;
- /*
- Caclulate digest_text information from the token array collected
+ /*
+ Calculate digest_text information from the token array collected
to be shown as DIGEST_TEXT column.
- */
+ */
get_digest_text(m_digest_text, &pfs->m_digest_storage);
m_digest_text_length= strlen(m_digest_text);
}
else
{
+ m_schema_name_length= 0;
m_digest_length= 0;
m_digest_text_length= 0;
}
-
+
return 0;
}
@@ -137,14 +141,21 @@ void PFS_digest_row::set_field(uint index, Field *f)
{
switch (index)
{
- case 0: /* DIGEST */
+ case 0: /* SCHEMA_NAME */
+ if (m_schema_name_length > 0)
+ PFS_engine_table::set_field_varchar_utf8(f, m_schema_name,
+ m_schema_name_length);
+ else
+ f->set_null();
+ break;
+ case 1: /* DIGEST */
if (m_digest_length > 0)
PFS_engine_table::set_field_varchar_utf8(f, m_digest,
m_digest_length);
else
f->set_null();
break;
- case 1: /* DIGEST_TEXT */
+ case 2: /* DIGEST_TEXT */
if (m_digest_text_length > 0)
PFS_engine_table::set_field_longtext_utf8(f, m_digest_text,
m_digest_text_length);
@@ -199,7 +210,7 @@ int PFS_index_row::make_row(PFS_table_share *pfs, uint table_index)
if (m_object_row.make_row(pfs))
return 1;
- if (table_index < MAX_KEY)
+ if (table_index < MAX_INDEXES)
{
PFS_table_key *key= &pfs->m_keys[table_index];
m_index_name_length= key->m_name_length;
diff --git a/storage/perfschema/table_helper.h b/storage/perfschema/table_helper.h
index 798ff16f4e5..769122570eb 100644
--- a/storage/perfschema/table_helper.h
+++ b/storage/perfschema/table_helper.h
@@ -127,6 +127,10 @@ struct PFS_account_row
/** Row fragment for columns DIGEST, DIGEST_TEXT. */
struct PFS_digest_row
{
+ /** Column SCHEMA_NAME. */
+ char m_schema_name[NAME_LEN];
+ /** Length in bytes of @c m_schema_name. */
+ uint m_schema_name_length;
/** Column DIGEST. */
char m_digest[COL_DIGEST_SIZE];
/** Length in bytes of @c m_digest. */
diff --git a/storage/perfschema/table_host_cache.cc b/storage/perfschema/table_host_cache.cc
index d243204ddcd..02c7f72140a 100644
--- a/storage/perfschema/table_host_cache.cc
+++ b/storage/perfschema/table_host_cache.cc
@@ -23,8 +23,6 @@
#include "table_host_cache.h"
#include "hostname.h"
-#ifdef NOT_YET_PORTED
-
THR_LOCK table_host_cache::m_table_lock;
static const TABLE_FIELD_TYPE field_types[]=
@@ -266,9 +264,6 @@ void table_host_cache::materialize(THD *thd)
index++;
row++;
current= current->next();
- /* Host cache is a circular linked list. */
- if (current == first)
- break;
}
m_all_rows= rows;
@@ -481,4 +476,3 @@ int table_host_cache::read_row_values(TABLE *table,
return 0;
}
-#endif /* NOT_YET_PORTED */
diff --git a/storage/perfschema/table_os_global_by_type.cc b/storage/perfschema/table_os_global_by_type.cc
index 82d176cd5b2..70d9d6819ac 100644
--- a/storage/perfschema/table_os_global_by_type.cc
+++ b/storage/perfschema/table_os_global_by_type.cc
@@ -174,6 +174,7 @@ void table_os_global_by_type::make_row(PFS_table_share *share)
{
pfs_lock lock;
PFS_single_stat cumulated_stat;
+ uint safe_key_count;
m_row_exists= false;
@@ -184,7 +185,11 @@ void table_os_global_by_type::make_row(PFS_table_share *share)
m_row.m_schema_name_length= share->m_schema_name_length;
memcpy(m_row.m_object_name, share->m_table_name, share->m_table_name_length);
m_row.m_object_name_length= share->m_table_name_length;
- share->m_table_stat.sum(& cumulated_stat);
+
+ /* This is a dirty read, some thread can write data while we are reading it */
+ safe_key_count= sanitize_index_count(share->m_key_count);
+
+ share->m_table_stat.sum(& cumulated_stat, safe_key_count);
if (! share->m_lock.end_optimistic_lock(&lock))
return;
@@ -204,7 +209,7 @@ void table_os_global_by_type::make_row(PFS_table_share *share)
If the opened table handle is for this table share,
aggregate the table handle statistics.
*/
- table->m_table_stat.sum(& cumulated_stat);
+ table->m_table_stat.sum(& cumulated_stat, safe_key_count);
}
}
}
diff --git a/storage/perfschema/table_session_account_connect_attrs.cc b/storage/perfschema/table_session_account_connect_attrs.cc
new file mode 100644
index 00000000000..4a3fcc22341
--- /dev/null
+++ b/storage/perfschema/table_session_account_connect_attrs.cc
@@ -0,0 +1,70 @@
+/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#include "table_session_account_connect_attrs.h"
+
+THR_LOCK table_session_account_connect_attrs::m_table_lock;
+
+PFS_engine_table_share
+table_session_account_connect_attrs::m_share=
+{
+ { C_STRING_WITH_LEN("session_account_connect_attrs") },
+ &pfs_readonly_acl,
+ &table_session_account_connect_attrs::create,
+ NULL, /* write_row */
+ NULL, /* delete_all_rows */
+ NULL, /* get_row_count */
+ 1000, /* records */
+ sizeof(pos_connect_attr_by_thread_by_attr), /* ref length */
+ &m_table_lock,
+ &m_field_def,
+ false /* checked */
+};
+
+PFS_engine_table* table_session_account_connect_attrs::create()
+{
+ return new table_session_account_connect_attrs();
+}
+
+table_session_account_connect_attrs::table_session_account_connect_attrs()
+ : table_session_connect(&m_share)
+{}
+
+bool
+table_session_account_connect_attrs::thread_fits(PFS_thread *thread)
+{
+ PFS_thread *current_thread= PFS_thread::get_current_thread();
+ /* The current thread may not have instrumentation attached. */
+ if (current_thread == NULL)
+ return false;
+
+ /* The thread we compare to, by definition, has some instrumentation. */
+ DBUG_ASSERT(thread != NULL);
+
+ uint username_length= current_thread->m_username_length;
+ uint hostname_length= current_thread->m_hostname_length;
+
+ if ( (thread->m_username_length != username_length)
+ || (thread->m_hostname_length != hostname_length))
+ return false;
+
+ if (memcmp(thread->m_username, current_thread->m_username, username_length) != 0)
+ return false;
+
+ if (memcmp(thread->m_hostname, current_thread->m_hostname, hostname_length) != 0)
+ return false;
+
+ return true;
+}
diff --git a/storage/perfschema/table_session_account_connect_attrs.h b/storage/perfschema/table_session_account_connect_attrs.h
new file mode 100644
index 00000000000..ba8893e7cad
--- /dev/null
+++ b/storage/perfschema/table_session_account_connect_attrs.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#ifndef TABLE_SESSION_ACCOUNT_CONNECT_ATTRS_H
+#define TABLE_SESSION_ACCOUNT_CONNECT_ATTRS_H
+
+#include "table_session_connect.h"
+/**
+ \addtogroup Performance_schema_tables
+ @{
+*/
+
+/** Table PERFORMANCE_SCHEMA.SESSION_ACCOUNT_CONNECT_ATTRS. */
+class table_session_account_connect_attrs : public table_session_connect
+{
+public:
+ /** Table share */
+ static PFS_engine_table_share m_share;
+ /** Table builder */
+ static PFS_engine_table* create();
+
+protected:
+ table_session_account_connect_attrs();
+
+public:
+ ~table_session_account_connect_attrs()
+ {}
+
+protected:
+ virtual bool thread_fits(PFS_thread *thread);
+
+private:
+ /** Table share lock. */
+ static THR_LOCK m_table_lock;
+};
+
+/** @} */
+#endif
diff --git a/storage/perfschema/table_session_connect.cc b/storage/perfschema/table_session_connect.cc
new file mode 100644
index 00000000000..bd905b5756c
--- /dev/null
+++ b/storage/perfschema/table_session_connect.cc
@@ -0,0 +1,268 @@
+/* Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#include "table_session_connect.h"
+
+static const TABLE_FIELD_TYPE field_types[]=
+{
+ {
+ { C_STRING_WITH_LEN("PROCESSLIST_ID") },
+ { C_STRING_WITH_LEN("int(11)") },
+ { NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("ATTR_NAME") },
+ { C_STRING_WITH_LEN("varchar(32)") },
+ { NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("ATTR_VALUE") },
+ { C_STRING_WITH_LEN("varchar(1024)") },
+ { NULL, 0}
+ },
+ {
+ { C_STRING_WITH_LEN("ORDINAL_POSITION") },
+ { C_STRING_WITH_LEN("int(11)") },
+ { NULL, 0}
+ }
+};
+
+TABLE_FIELD_DEF table_session_connect::m_field_def=
+{ 4, field_types };
+
+table_session_connect::table_session_connect(const PFS_engine_table_share *share) :
+ cursor_by_thread_connect_attr(share)
+{}
+
+/**
+ Take a length encoded string
+
+ @arg ptr inout the input string array
+ @arg dest where to store the result
+ @arg dest_size max size of @c dest
+ @arg copied_len the actual length of the data copied
+ @arg start_ptr pointer to the start of input
+ @arg input_length the length of the incoming data
+ @arg copy_data copy the data or just skip the input
+ @arg from_cs character set in which @c ptr is encoded
+ @arg nchars_max maximum number of characters to read
+ @return status
+ @retval true parsing failed
+ @retval false parsing succeeded
+*/
+bool parse_length_encoded_string(const char **ptr,
+ char *dest, uint dest_size,
+ uint *copied_len,
+ const char *start_ptr, uint input_length,
+ bool copy_data,
+ const CHARSET_INFO *from_cs,
+ uint nchars_max)
+{
+ ulong copy_length, data_length;
+ const char *well_formed_error_pos= NULL, *cannot_convert_error_pos= NULL,
+ *from_end_pos= NULL;
+
+ copy_length= data_length= net_field_length((uchar **) ptr);
+
+ /* we don't tolerate NULL as a length */
+ if (data_length == NULL_LENGTH)
+ return true;
+
+ if (*ptr - start_ptr + data_length > input_length)
+ return true;
+
+ copy_length= well_formed_copy_nchars(&my_charset_utf8_bin, dest, dest_size,
+ from_cs, *ptr, data_length, nchars_max,
+ &well_formed_error_pos,
+ &cannot_convert_error_pos,
+ &from_end_pos);
+ *copied_len= copy_length;
+ (*ptr)+= data_length;
+
+ return false;
+}
+
+/**
+ Take the nth attribute name/value pair
+
+ Parse the attributes blob form the beginning, skipping the attributes
+ whose number is lower than the one we seek.
+ When we reach the attribute at an index we're looking for the values
+ are copied to the output parameters.
+ If parsing fails or no more attributes are found the function stops
+ and returns an error code.
+
+ @arg connect_attrs pointer to the connect attributes blob
+ @arg connect_attrs_length length of @c connect_attrs
+ @arg connect_attrs_cs character set used to encode @c connect_attrs
+ @arg ordinal index of the attribute we need
+ @arg attr_name [out] buffer to receive the attribute name
+ @arg max_attr_name max size of @c attr_name in bytes
+ @arg attr_name_length [out] number of bytes written in @attr_name
+ @arg attr_value [out] buffer to receive the attribute name
+ @arg max_attr_value max size of @c attr_value in bytes
+ @arg attr_value_length [out] number of bytes written in @attr_value
+ @return status
+ @retval true requested attribute pair is found and copied
+ @retval false error. Either because of parsing or too few attributes.
+*/
+bool read_nth_attr(const char *connect_attrs,
+ uint connect_attrs_length,
+ const CHARSET_INFO *connect_attrs_cs,
+ uint ordinal,
+ char *attr_name, uint max_attr_name,
+ uint *attr_name_length,
+ char *attr_value, uint max_attr_value,
+ uint *attr_value_length)
+{
+ uint idx;
+ const char *ptr;
+
+ for (ptr= connect_attrs, idx= 0;
+ (uint)(ptr - connect_attrs) < connect_attrs_length && idx <= ordinal;
+ idx++)
+ {
+ uint copy_length;
+ /* do the copying only if we absolutely have to */
+ bool fill_in_attr_name= idx == ordinal;
+ bool fill_in_attr_value= idx == ordinal;
+
+ /* read the key */
+ if (parse_length_encoded_string(&ptr,
+ attr_name, max_attr_name, &copy_length,
+ connect_attrs,
+ connect_attrs_length,
+ fill_in_attr_name,
+ connect_attrs_cs, 32) ||
+ !copy_length
+ )
+ return false;
+
+ if (idx == ordinal)
+ *attr_name_length= copy_length;
+
+ /* read the value */
+ if (parse_length_encoded_string(&ptr,
+ attr_value, max_attr_value, &copy_length,
+ connect_attrs,
+ connect_attrs_length,
+ fill_in_attr_value,
+ connect_attrs_cs, 1024))
+ return false;
+
+ if (idx == ordinal)
+ *attr_value_length= copy_length;
+
+ if (idx == ordinal)
+ return true;
+ }
+
+ return false;
+}
+
+void table_session_connect::make_row(PFS_thread *pfs, uint ordinal)
+{
+ pfs_lock lock;
+ PFS_thread_class *safe_class;
+
+ m_row_exists= false;
+
+ /* Protect this reader against thread termination */
+ pfs->m_lock.begin_optimistic_lock(&lock);
+ safe_class= sanitize_thread_class(pfs->m_class);
+ if (unlikely(safe_class == NULL))
+ return;
+
+ /* Filtering threads must be done under the protection of the optimistic lock. */
+ if (! thread_fits(pfs))
+ return;
+
+ /* populate the row */
+ if (read_nth_attr(pfs->m_session_connect_attrs,
+ pfs->m_session_connect_attrs_length,
+ pfs->m_session_connect_attrs_cs,
+ ordinal,
+ m_row.m_attr_name, (uint) sizeof(m_row.m_attr_name),
+ &m_row.m_attr_name_length,
+ m_row.m_attr_value, (uint) sizeof(m_row.m_attr_value),
+ &m_row.m_attr_value_length))
+ {
+ /* we don't expect internal threads to have connection attributes */
+ DBUG_ASSERT(pfs->m_processlist_id != 0);
+
+ m_row.m_ordinal_position= ordinal;
+ m_row.m_process_id= pfs->m_processlist_id;
+ }
+ else
+ return;
+
+ if (pfs->m_lock.end_optimistic_lock(& lock))
+ m_row_exists= true;
+}
+
+int table_session_connect::read_row_values(TABLE *table,
+ unsigned char *buf,
+ Field **fields,
+ bool read_all)
+{
+ Field *f;
+
+ if (unlikely(!m_row_exists))
+ return HA_ERR_RECORD_DELETED;
+
+ /* Set the null bits */
+ DBUG_ASSERT(table->s->null_bytes == 1);
+ buf[0]= 0;
+
+ for (; (f= *fields) ; fields++)
+ {
+ if (read_all || bitmap_is_set(table->read_set, f->field_index))
+ {
+ switch(f->field_index)
+ {
+ case FO_PROCESS_ID:
+ if (m_row.m_process_id != 0)
+ set_field_ulong(f, m_row.m_process_id);
+ else
+ f->set_null();
+ break;
+ case FO_ATTR_NAME:
+ set_field_varchar_utf8(f, m_row.m_attr_name,
+ m_row.m_attr_name_length);
+ break;
+ case FO_ATTR_VALUE:
+ if (m_row.m_attr_value_length)
+ set_field_varchar_utf8(f, m_row.m_attr_value,
+ m_row.m_attr_value_length);
+ else
+ f->set_null();
+ break;
+ case FO_ORDINAL_POSITION:
+ set_field_ulong(f, m_row.m_ordinal_position);
+ break;
+ default:
+ DBUG_ASSERT(false);
+ }
+ }
+ }
+ return 0;
+}
+
+bool
+table_session_connect::thread_fits(PFS_thread *thread)
+{
+ return true;
+}
+
diff --git a/storage/perfschema/table_session_connect.h b/storage/perfschema/table_session_connect.h
new file mode 100644
index 00000000000..097623d2c80
--- /dev/null
+++ b/storage/perfschema/table_session_connect.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#ifndef TABLE_SESSION_CONNECT_H
+#define TABLE_SESSION_CONNECT_H
+
+#include "pfs_column_types.h"
+#include "cursor_by_thread_connect_attr.h"
+#include "table_helper.h"
+
+#define MAX_ATTR_NAME_CHARS 32
+#define MAX_ATTR_VALUE_CHARS 1024
+#define MAX_UTF8_BYTES 6
+
+/** symbolic names for field offsets, keep in sync with field_types */
+enum field_offsets {
+ FO_PROCESS_ID,
+ FO_ATTR_NAME,
+ FO_ATTR_VALUE,
+ FO_ORDINAL_POSITION
+};
+
+/**
+ A row of PERFORMANCE_SCHEMA.SESSION_CONNECT_ATTRS and
+ PERFORMANCE_SCHEMA.SESSION_ACCOUNT_CONNECT_ATTRS.
+*/
+struct row_session_connect_attrs
+{
+ /** Column PROCESS_ID. */
+ ulong m_process_id;
+ /** Column ATTR_NAME. In UTF-8 */
+ char m_attr_name[MAX_ATTR_NAME_CHARS * MAX_UTF8_BYTES];
+ /** Length in bytes of @c m_attr_name. */
+ uint m_attr_name_length;
+ /** Column ATTR_VALUE. In UTF-8 */
+ char m_attr_value[MAX_ATTR_VALUE_CHARS * MAX_UTF8_BYTES];
+ /** Length in bytes of @c m_attr_name. */
+ uint m_attr_value_length;
+ /** Column ORDINAL_POSITION. */
+ ulong m_ordinal_position;
+};
+
+class table_session_connect : public cursor_by_thread_connect_attr
+{
+protected:
+ table_session_connect(const PFS_engine_table_share *share);
+
+public:
+ ~table_session_connect()
+ {}
+
+protected:
+ virtual void make_row(PFS_thread *pfs, uint ordinal);
+ virtual bool thread_fits(PFS_thread *thread);
+ virtual int read_row_values(TABLE *table, unsigned char *buf,
+ Field **fields, bool read_all);
+protected:
+ /** Fields definition. */
+ static TABLE_FIELD_DEF m_field_def;
+ /** Current row. */
+ row_session_connect_attrs m_row;
+};
+
+/** @} */
+#endif
diff --git a/storage/perfschema/table_session_connect_attrs.cc b/storage/perfschema/table_session_connect_attrs.cc
new file mode 100644
index 00000000000..9e1804b7294
--- /dev/null
+++ b/storage/perfschema/table_session_connect_attrs.cc
@@ -0,0 +1,43 @@
+/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#include "table_session_connect_attrs.h"
+
+THR_LOCK table_session_connect_attrs::m_table_lock;
+
+PFS_engine_table_share
+table_session_connect_attrs::m_share=
+{
+ { C_STRING_WITH_LEN("session_connect_attrs") },
+ &pfs_readonly_acl,
+ &table_session_connect_attrs::create,
+ NULL, /* write_row */
+ NULL, /* delete_all_rows */
+ NULL, /* get_row_count */
+ 1000, /* records */
+ sizeof(pos_connect_attr_by_thread_by_attr), /* ref length */
+ &m_table_lock,
+ &m_field_def,
+ false /* checked */
+};
+
+PFS_engine_table* table_session_connect_attrs::create()
+{
+ return new table_session_connect_attrs();
+}
+
+table_session_connect_attrs::table_session_connect_attrs()
+ : table_session_connect(&m_share)
+{}
diff --git a/storage/perfschema/table_session_connect_attrs.h b/storage/perfschema/table_session_connect_attrs.h
new file mode 100644
index 00000000000..b10b106ba0d
--- /dev/null
+++ b/storage/perfschema/table_session_connect_attrs.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#ifndef TABLE_SESSION_CONNECT_ATTRS_H
+#define TABLE_SESSION_CONNECT_ATTRS_H
+
+#include "table_session_connect.h"
+/**
+ \addtogroup Performance_schema_tables
+ @{
+*/
+
+/** Table PERFORMANCE_SCHEMA.SESSION_CONNECT_ATTRS. */
+class table_session_connect_attrs : public table_session_connect
+{
+public:
+ /** Table share */
+ static PFS_engine_table_share m_share;
+ /** Table builder */
+ static PFS_engine_table* create();
+
+protected:
+ table_session_connect_attrs();
+
+public:
+ ~table_session_connect_attrs()
+ {}
+
+private:
+ /** Table share lock. */
+ static THR_LOCK m_table_lock;
+};
+
+/** @} */
+#endif
diff --git a/storage/perfschema/table_setup_actors.cc b/storage/perfschema/table_setup_actors.cc
index 15d3d9d22a8..91dbb942ead 100644
--- a/storage/perfschema/table_setup_actors.cc
+++ b/storage/perfschema/table_setup_actors.cc
@@ -105,6 +105,9 @@ int table_setup_actors::write_row(TABLE *table, unsigned char *buf,
}
}
+ if (user->length() == 0 || host->length() == 0 || role->length() == 0)
+ return HA_ERR_WRONG_COMMAND;
+
return insert_setup_actor(user, host, role);
}
@@ -264,39 +267,13 @@ int table_setup_actors::delete_row_values(TABLE *table,
const unsigned char *buf,
Field **fields)
{
- Field *f;
- String user_data("", 0, &my_charset_utf8_bin);
- String host_data("", 0, &my_charset_utf8_bin);
- String role_data("", 0, &my_charset_utf8_bin);
- String *user= NULL;
- String *host= NULL;
- String *role= NULL;
-
- for (; (f= *fields) ; fields++)
- {
- if (bitmap_is_set(table->read_set, f->field_index))
- {
- switch(f->field_index)
- {
- case 0: /* HOST */
- host= get_field_char_utf8(f, &host_data);
- break;
- case 1: /* USER */
- user= get_field_char_utf8(f, &user_data);
- break;
- case 2: /* ROLE */
- role= get_field_char_utf8(f, &role_data);
- break;
- default:
- DBUG_ASSERT(false);
- }
- }
- }
+ DBUG_ASSERT(m_row_exists);
- DBUG_ASSERT(user != NULL);
- DBUG_ASSERT(host != NULL);
- DBUG_ASSERT(role != NULL);
+ CHARSET_INFO *cs= &my_charset_utf8_bin;
+ String user(m_row.m_username, m_row.m_username_length, cs);
+ String role(m_row.m_rolename, m_row.m_rolename_length, cs);
+ String host(m_row.m_hostname, m_row.m_hostname_length, cs);
- return delete_setup_actor(user, host, role);
+ return delete_setup_actor(&user, &host, &role);
}
diff --git a/storage/perfschema/table_setup_objects.cc b/storage/perfschema/table_setup_objects.cc
index 33e360e989b..11fab913ac4 100644
--- a/storage/perfschema/table_setup_objects.cc
+++ b/storage/perfschema/table_setup_objects.cc
@@ -339,42 +339,15 @@ int table_setup_objects::delete_row_values(TABLE *table,
const unsigned char *buf,
Field **fields)
{
- int result;
- Field *f;
- enum_object_type object_type= OBJECT_TYPE_TABLE;
- String object_schema_data("", 0, &my_charset_utf8_bin);
- String object_name_data("", 0, &my_charset_utf8_bin);
- String *object_schema= NULL;
- String *object_name= NULL;
+ DBUG_ASSERT(m_row_exists);
- for (; (f= *fields) ; fields++)
- {
- if (bitmap_is_set(table->read_set, f->field_index))
- {
- switch(f->field_index)
- {
- case 0: /* OBJECT_TYPE */
- object_type= (enum_object_type) get_field_enum(f);
- break;
- case 1: /* OBJECT_SCHEMA */
- object_schema= get_field_varchar_utf8(f, &object_schema_data);
- break;
- case 2: /* OBJECT_NAME */
- object_name= get_field_varchar_utf8(f, &object_name_data);
- break;
- case 3: /* ENABLED */
- case 4: /* TIMED */
- break;
- default:
- DBUG_ASSERT(false);
- }
- }
- }
+ CHARSET_INFO *cs= &my_charset_utf8_bin;
+ enum_object_type object_type= OBJECT_TYPE_TABLE;
+ String object_schema(m_row.m_schema_name, m_row.m_schema_name_length, cs);
+ String object_name(m_row.m_object_name, m_row.m_object_name_length, cs);
- DBUG_ASSERT(object_schema != NULL);
- DBUG_ASSERT(object_name != NULL);
+ int result= delete_setup_object(object_type, &object_schema, &object_name);
- result= delete_setup_object(object_type, object_schema, object_name);
if (result == 0)
result= update_derived_flags();
return result;
diff --git a/storage/perfschema/table_socket_instances.cc b/storage/perfschema/table_socket_instances.cc
index f913c8fcc65..0fa1d2b1a3a 100644
--- a/storage/perfschema/table_socket_instances.cc
+++ b/storage/perfschema/table_socket_instances.cc
@@ -42,7 +42,7 @@ static const TABLE_FIELD_TYPE field_types[]=
},
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -205,7 +205,7 @@ int table_socket_instances::read_row_values(TABLE *table,
break;
case 2: /* THREAD_ID */
if (m_row.m_thread_id_set)
- set_field_ulong(f, m_row.m_thread_id);
+ set_field_ulonglong(f, m_row.m_thread_id);
else
f->set_null();
break;
diff --git a/storage/perfschema/table_socket_instances.h b/storage/perfschema/table_socket_instances.h
index 2a80aeaa76a..080f11c1ba8 100644
--- a/storage/perfschema/table_socket_instances.h
+++ b/storage/perfschema/table_socket_instances.h
@@ -39,7 +39,7 @@ struct row_socket_instances
/** Column OBJECT_INSTANCE_BEGIN */
const void *m_identity;
/** Column THREAD_ID */
- uint m_thread_id;
+ ulonglong m_thread_id;
/** True if thread_is is set */
bool m_thread_id_set;
/** Column SOCKET_ID */
diff --git a/storage/perfschema/table_sync_instances.cc b/storage/perfschema/table_sync_instances.cc
index 06889e735ba..9b53eb3ce57 100644
--- a/storage/perfschema/table_sync_instances.cc
+++ b/storage/perfschema/table_sync_instances.cc
@@ -43,7 +43,7 @@ static const TABLE_FIELD_TYPE mutex_field_types[]=
},
{
{ C_STRING_WITH_LEN("LOCKED_BY_THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
}
};
@@ -178,7 +178,7 @@ int table_mutex_instances::read_row_values(TABLE *table,
break;
case 2: /* LOCKED_BY_THREAD_ID */
if (m_row.m_locked)
- set_field_ulong(f, m_row.m_locked_by_thread_id);
+ set_field_ulonglong(f, m_row.m_locked_by_thread_id);
else
f->set_null();
break;
@@ -207,7 +207,7 @@ static const TABLE_FIELD_TYPE rwlock_field_types[]=
},
{
{ C_STRING_WITH_LEN("WRITE_LOCKED_BY_THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -351,7 +351,7 @@ int table_rwlock_instances::read_row_values(TABLE *table,
break;
case 2: /* WRITE_LOCKED_BY_THREAD_ID */
if (m_row.m_write_locked)
- set_field_ulong(f, m_row.m_write_locked_by_thread_id);
+ set_field_ulonglong(f, m_row.m_write_locked_by_thread_id);
else
f->set_null();
break;
diff --git a/storage/perfschema/table_sync_instances.h b/storage/perfschema/table_sync_instances.h
index b6fc78e1cd5..ff7b2765a11 100644
--- a/storage/perfschema/table_sync_instances.h
+++ b/storage/perfschema/table_sync_instances.h
@@ -45,7 +45,7 @@ struct row_mutex_instances
/** True if column LOCKED_BY_THREAD_ID is not null. */
bool m_locked;
/** Column LOCKED_BY_THREAD_ID. */
- ulong m_locked_by_thread_id;
+ ulonglong m_locked_by_thread_id;
};
/** Table PERFORMANCE_SCHEMA.MUTEX_INSTANCES. */
@@ -102,7 +102,7 @@ struct row_rwlock_instances
/** True if column WRITE_LOCKED_BY_THREAD_ID is not null. */
bool m_write_locked;
/** Column WRITE_LOCKED_BY_THREAD_ID. */
- ulong m_write_locked_by_thread_id;
+ ulonglong m_write_locked_by_thread_id;
/** Column READ_LOCKED_BY_COUNT. */
ulong m_readers;
};
diff --git a/storage/perfschema/table_threads.cc b/storage/perfschema/table_threads.cc
index 2104c24b65c..ef6c272c0a2 100644
--- a/storage/perfschema/table_threads.cc
+++ b/storage/perfschema/table_threads.cc
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+/* Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -26,7 +26,7 @@ static const TABLE_FIELD_TYPE field_types[]=
{
{
{ C_STRING_WITH_LEN("THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -41,7 +41,7 @@ static const TABLE_FIELD_TYPE field_types[]=
},
{
{ C_STRING_WITH_LEN("PROCESSLIST_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -81,7 +81,7 @@ static const TABLE_FIELD_TYPE field_types[]=
},
{
{ C_STRING_WITH_LEN("PARENT_THREAD_ID") },
- { C_STRING_WITH_LEN("int(11)") },
+ { C_STRING_WITH_LEN("bigint(20)") },
{ NULL, 0}
},
{
@@ -129,6 +129,7 @@ table_threads::table_threads()
void table_threads::make_row(PFS_thread *pfs)
{
pfs_lock lock;
+ pfs_lock processlist_lock;
PFS_thread_class *safe_class;
m_row_exists= false;
@@ -142,7 +143,7 @@ void table_threads::make_row(PFS_thread *pfs)
m_row.m_thread_internal_id= pfs->m_thread_internal_id;
m_row.m_parent_thread_internal_id= pfs->m_parent_thread_internal_id;
- m_row.m_thread_id= pfs->m_thread_id;
+ m_row.m_processlist_id= pfs->m_processlist_id;
m_row.m_name= safe_class->m_name;
m_row.m_name_length= safe_class->m_name_length;
@@ -166,12 +167,30 @@ void table_threads::make_row(PFS_thread *pfs)
m_row.m_command= pfs->m_command;
m_row.m_start_time= pfs->m_start_time;
+
+ /* Protect this reader against attribute changes. */
+ pfs->m_processlist_lock.begin_optimistic_lock(&processlist_lock);
+
/* FIXME: need to copy it ? */
m_row.m_processlist_state_ptr= pfs->m_processlist_state_ptr;
m_row.m_processlist_state_length= pfs->m_processlist_state_length;
/* FIXME: need to copy it ? */
m_row.m_processlist_info_ptr= pfs->m_processlist_info_ptr;
m_row.m_processlist_info_length= pfs->m_processlist_info_length;
+
+ if (! pfs->m_processlist_lock.end_optimistic_lock(& processlist_lock))
+ {
+ /*
+ Columns PROCESSLIST_STATE or PROCESSLIST_INFO are being
+ updated while we read them, and are unsafe to use.
+ Do not discard the entire row.
+ Do not loop waiting for a stable value.
+ Just return NULL values for these columns.
+ */
+ m_row.m_processlist_state_length= 0;
+ m_row.m_processlist_info_length= 0;
+ }
+
m_row.m_enabled_ptr= &pfs->m_enabled;
if (pfs->m_lock.end_optimistic_lock(& lock))
@@ -200,20 +219,20 @@ int table_threads::read_row_values(TABLE *table,
switch(f->field_index)
{
case 0: /* THREAD_ID */
- set_field_ulong(f, m_row.m_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_thread_internal_id);
break;
case 1: /* NAME */
set_field_varchar_utf8(f, m_row.m_name, m_row.m_name_length);
break;
case 2: /* TYPE */
- if (m_row.m_thread_id != 0)
+ if (m_row.m_processlist_id != 0)
set_field_varchar_utf8(f, "FOREGROUND", 10);
else
set_field_varchar_utf8(f, "BACKGROUND", 10);
break;
case 3: /* PROCESSLIST_ID */
- if (m_row.m_thread_id != 0)
- set_field_ulong(f, m_row.m_thread_id);
+ if (m_row.m_processlist_id != 0)
+ set_field_ulonglong(f, m_row.m_processlist_id);
else
f->set_null();
break;
@@ -239,7 +258,7 @@ int table_threads::read_row_values(TABLE *table,
f->set_null();
break;
case 7: /* PROCESSLIST_COMMAND */
- if (m_row.m_thread_id != 0)
+ if (m_row.m_processlist_id != 0)
set_field_varchar_utf8(f, command_name[m_row.m_command].str,
command_name[m_row.m_command].length);
else
@@ -271,7 +290,7 @@ int table_threads::read_row_values(TABLE *table,
break;
case 11: /* PARENT_THREAD_ID */
if (m_row.m_parent_thread_internal_id != 0)
- set_field_ulong(f, m_row.m_parent_thread_internal_id);
+ set_field_ulonglong(f, m_row.m_parent_thread_internal_id);
else
f->set_null();
break;
diff --git a/storage/perfschema/table_threads.h b/storage/perfschema/table_threads.h
index 9819822f8c8..bce45c0cbce 100644
--- a/storage/perfschema/table_threads.h
+++ b/storage/perfschema/table_threads.h
@@ -32,9 +32,9 @@ struct PFS_thread;
struct row_threads
{
/** Column THREAD_ID. */
- ulong m_thread_internal_id;
+ ulonglong m_thread_internal_id;
/** Column PROCESSLIST_ID. */
- ulong m_thread_id;
+ ulonglong m_processlist_id;
/** Column NAME. */
const char* m_name;
/** Length in bytes of @c m_name. */
@@ -66,7 +66,7 @@ struct row_threads
/** Column INSTRUMENTED. */
bool *m_enabled_ptr;
/** Column PARENT_THREAD_ID. */
- ulong m_parent_thread_internal_id;
+ ulonglong m_parent_thread_internal_id;
};
/** Table PERFORMANCE_SCHEMA.THREADS. */
diff --git a/storage/perfschema/table_tiws_by_index_usage.cc b/storage/perfschema/table_tiws_by_index_usage.cc
index d354c40d3ed..71455793516 100644
--- a/storage/perfschema/table_tiws_by_index_usage.cc
+++ b/storage/perfschema/table_tiws_by_index_usage.cc
@@ -290,15 +290,16 @@ int table_tiws_by_index_usage::rnd_next(void)
table_share= &table_share_array[m_pos.m_index_1];
if (table_share->m_lock.is_populated())
{
- if (m_pos.m_index_2 < table_share->m_key_count)
+ uint safe_key_count= sanitize_index_count(table_share->m_key_count);
+ if (m_pos.m_index_2 < safe_key_count)
{
make_row(table_share, m_pos.m_index_2);
m_next_pos.set_after(&m_pos);
return 0;
}
- if (m_pos.m_index_2 <= MAX_KEY)
+ if (m_pos.m_index_2 <= MAX_INDEXES)
{
- m_pos.m_index_2= MAX_KEY;
+ m_pos.m_index_2= MAX_INDEXES;
make_row(table_share, m_pos.m_index_2);
m_next_pos.set_after(&m_pos);
return 0;
@@ -319,12 +320,13 @@ table_tiws_by_index_usage::rnd_pos(const void *pos)
table_share= &table_share_array[m_pos.m_index_1];
if (table_share->m_lock.is_populated())
{
- if (m_pos.m_index_2 < table_share->m_key_count)
+ uint safe_key_count= sanitize_index_count(table_share->m_key_count);
+ if (m_pos.m_index_2 < safe_key_count)
{
make_row(table_share, m_pos.m_index_2);
return 0;
}
- if (m_pos.m_index_2 == MAX_KEY)
+ if (m_pos.m_index_2 == MAX_INDEXES)
{
make_row(table_share, m_pos.m_index_2);
return 0;
diff --git a/storage/perfschema/unittest/CMakeLists.txt b/storage/perfschema/unittest/CMakeLists.txt
index 6be5b0f9a50..d98b13622d7 100644
--- a/storage/perfschema/unittest/CMakeLists.txt
+++ b/storage/perfschema/unittest/CMakeLists.txt
@@ -1,5 +1,4 @@
-# Copyright (c) 2009, 2010 Sun Microsystems, Inc.
-# Use is subject to license terms.
+# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -11,18 +10,18 @@
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02111-1307 USA
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/include
${CMAKE_SOURCE_DIR}/include/mysql
${CMAKE_SOURCE_DIR}/regex
${CMAKE_SOURCE_DIR}/sql
- ${CMAKE_SOURCE_DIR}/extra/yassl/include
+ ${SSL_INCLUDE_DIRS}
${CMAKE_SOURCE_DIR}/unittest/mytap
${CMAKE_SOURCE_DIR}/storage/perfschema)
-ADD_DEFINITIONS(-DMYSQL_SERVER)
+ADD_DEFINITIONS(-DMYSQL_SERVER ${SSL_DEFINES})
MY_ADD_TESTS(pfs_instr_class pfs_instr_class-oom pfs_instr pfs_instr-oom pfs_account-oom pfs_host-oom pfs_timer pfs_user-oom pfs
EXT "cc" LINK_LIBRARIES perfschema mysys)
diff --git a/storage/perfschema/unittest/pfs-t.cc b/storage/perfschema/unittest/pfs-t.cc
index c673582ba53..9211443bcca 100644
--- a/storage/perfschema/unittest/pfs-t.cc
+++ b/storage/perfschema/unittest/pfs-t.cc
@@ -111,6 +111,7 @@ void test_bootstrap()
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
param.m_digest_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
boot= initialize_performance_schema(& param);
ok(boot != NULL, "boot");
@@ -168,6 +169,7 @@ PSI * load_perfschema()
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
param.m_digest_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
/* test_bootstrap() covered this, assuming it just works */
boot= initialize_performance_schema(& param);
@@ -759,21 +761,21 @@ void test_init_disabled()
/* disabled S-A + disabled T-1: no instrumentation */
socket_class_A->m_enabled= false;
- socket_A1= psi->init_socket(socket_key_A, NULL);
+ socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 == NULL, "socket_A1 not instrumented");
/* enabled S-A + disabled T-1: instrumentation (for later) */
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(socket_key_A, NULL);
+ socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 != NULL, "socket_A1 instrumented");
/* broken key + disabled T-1: no instrumentation */
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(0, NULL);
+ socket_A1= psi->init_socket(0, NULL, NULL, 0);
ok(socket_A1 == NULL, "socket key 0 not instrumented");
- socket_A1= psi->init_socket(99, NULL);
+ socket_A1= psi->init_socket(99, NULL, NULL, 0);
ok(socket_A1 == NULL, "broken socket key not instrumented");
/* Pretend thread T-1 is enabled */
@@ -892,16 +894,16 @@ void test_init_disabled()
/* enabled S-A + enabled T-1: instrumentation */
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(socket_key_A, NULL);
+ socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 != NULL, "instrumented");
psi->destroy_socket(socket_A1);
/* broken key + enabled T-1: no instrumentation */
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(0, NULL);
+ socket_A1= psi->init_socket(0, NULL, NULL, 0);
ok(socket_A1 == NULL, "not instrumented");
- socket_A1= psi->init_socket(99, NULL);
+ socket_A1= psi->init_socket(99, NULL, NULL, 0);
ok(socket_A1 == NULL, "not instrumented");
/* Pretend the running thread is not instrumented */
@@ -996,21 +998,21 @@ void test_init_disabled()
/* disabled S-A + unknown thread: no instrumentation */
socket_class_A->m_enabled= false;
- socket_A1= psi->init_socket(socket_key_A, NULL);
+ socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 == NULL, "socket_A1 not instrumented");
/* enabled S-A + unknown thread: instrumentation (for later) */
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(socket_key_A, NULL);
+ socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 != NULL, "socket_A1 instrumented");
/* broken key + unknown thread: no instrumentation */
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(0, NULL);
+ socket_A1= psi->init_socket(0, NULL, NULL, 0);
ok(socket_A1 == NULL, "socket key 0 not instrumented");
- socket_A1= psi->init_socket(99, NULL);
+ socket_A1= psi->init_socket(99, NULL, NULL, 0);
ok(socket_A1 == NULL, "broken socket key not instrumented");
shutdown_performance_schema();
@@ -1126,7 +1128,7 @@ void test_locker_disabled()
ok(file_A1 != NULL, "instrumented");
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(socket_key_A, NULL);
+ socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 != NULL, "instrumented");
/* Socket lockers require a thread owner */
@@ -1294,10 +1296,10 @@ void test_locker_disabled()
cond_locker= psi->start_cond_wait(&cond_state, cond_A1, mutex_A1, PSI_COND_WAIT, __FILE__, __LINE__);
ok(cond_locker != NULL, "locker");
psi->end_cond_wait(cond_locker, 0);
- file_locker= psi->get_thread_file_name_locker(&file_state, file_key_A, PSI_FILE_OPEN, "xxx", NULL);
+ file_locker= psi->get_thread_file_name_locker(&file_state, file_key_A, PSI_FILE_STREAM_OPEN, "xxx", NULL);
ok(file_locker != NULL, "locker");
psi->start_file_open_wait(file_locker, __FILE__, __LINE__);
- psi->end_file_open_wait(file_locker);
+ psi->end_file_open_wait(file_locker, NULL);
file_locker= psi->get_thread_file_stream_locker(&file_state, file_A1, PSI_FILE_READ);
ok(file_locker != NULL, "locker");
psi->start_file_wait(file_locker, 10, __FILE__, __LINE__);
@@ -1314,7 +1316,7 @@ void test_locker_disabled()
/* ---------------------------------------------- */
socket_class_A->m_enabled= true;
- socket_A1= psi->init_socket(socket_key_A, NULL);
+ socket_A1= psi->init_socket(socket_key_A, NULL, NULL, 0);
ok(socket_A1 != NULL, "instrumented");
/* Socket thread owner has not been set */
socket_locker= psi->start_socket_wait(&socket_state, socket_A1, PSI_SOCKET_SEND, 12, "foo.cc", 12);
@@ -1485,6 +1487,8 @@ void test_event_name_index()
memset(& param, 0xFF, sizeof(param));
param.m_enabled= true;
+ /* NOTE: Need to add 3 to each index: table io, table lock, idle */
+
/* Per mutex info waits should be at [0..9] */
param.m_mutex_class_sizing= 10;
/* Per rwlock info waits should be at [10..29] */
@@ -1509,6 +1513,7 @@ void test_event_name_index()
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
param.m_digest_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
param.m_mutex_sizing= 0;
param.m_rwlock_sizing= 0;
@@ -1540,10 +1545,10 @@ void test_event_name_index()
psi->register_mutex("X", dummy_mutexes, 2);
mutex_class= find_mutex_class(dummy_mutex_key_1);
ok(mutex_class != NULL, "mutex class 1");
- ok(mutex_class->m_event_name_index == 0, "index 0");
+ ok(mutex_class->m_event_name_index == 3, "index 3");
mutex_class= find_mutex_class(dummy_mutex_key_2);
ok(mutex_class != NULL, "mutex class 2");
- ok(mutex_class->m_event_name_index == 1, "index 1");
+ ok(mutex_class->m_event_name_index == 4, "index 4");
PFS_rwlock_class *rwlock_class;
PSI_rwlock_key dummy_rwlock_key_1;
@@ -1557,10 +1562,10 @@ void test_event_name_index()
psi->register_rwlock("X", dummy_rwlocks, 2);
rwlock_class= find_rwlock_class(dummy_rwlock_key_1);
ok(rwlock_class != NULL, "rwlock class 1");
- ok(rwlock_class->m_event_name_index == 10, "index 10");
+ ok(rwlock_class->m_event_name_index == 13, "index 13");
rwlock_class= find_rwlock_class(dummy_rwlock_key_2);
ok(rwlock_class != NULL, "rwlock class 2");
- ok(rwlock_class->m_event_name_index == 11, "index 11");
+ ok(rwlock_class->m_event_name_index == 14, "index 14");
PFS_cond_class *cond_class;
PSI_cond_key dummy_cond_key_1;
@@ -1574,10 +1579,10 @@ void test_event_name_index()
psi->register_cond("X", dummy_conds, 2);
cond_class= find_cond_class(dummy_cond_key_1);
ok(cond_class != NULL, "cond class 1");
- ok(cond_class->m_event_name_index == 30, "index 30");
+ ok(cond_class->m_event_name_index == 33, "index 33");
cond_class= find_cond_class(dummy_cond_key_2);
ok(cond_class != NULL, "cond class 2");
- ok(cond_class->m_event_name_index == 31, "index 31");
+ ok(cond_class->m_event_name_index == 34, "index 34");
PFS_file_class *file_class;
PSI_file_key dummy_file_key_1;
@@ -1591,10 +1596,10 @@ void test_event_name_index()
psi->register_file("X", dummy_files, 2);
file_class= find_file_class(dummy_file_key_1);
ok(file_class != NULL, "file class 1");
- ok(file_class->m_event_name_index == 70, "index 70");
+ ok(file_class->m_event_name_index == 73, "index 73");
file_class= find_file_class(dummy_file_key_2);
ok(file_class != NULL, "file class 2");
- ok(file_class->m_event_name_index == 71, "index 71");
+ ok(file_class->m_event_name_index == 74, "index 74");
PFS_socket_class *socket_class;
PSI_socket_key dummy_socket_key_1;
@@ -1608,13 +1613,13 @@ void test_event_name_index()
psi->register_socket("X", dummy_sockets, 2);
socket_class= find_socket_class(dummy_socket_key_1);
ok(socket_class != NULL, "socket class 1");
- ok(socket_class->m_event_name_index == 150, "index 150");
+ ok(socket_class->m_event_name_index == 153, "index 153");
socket_class= find_socket_class(dummy_socket_key_2);
ok(socket_class != NULL, "socket class 2");
- ok(socket_class->m_event_name_index == 151, "index 151");
+ ok(socket_class->m_event_name_index == 154, "index 154");
- ok(global_table_io_class.m_event_name_index == 310, "index 310");
- ok(global_table_lock_class.m_event_name_index == 311, "index 311");
+ ok(global_table_io_class.m_event_name_index == 0, "index 0");
+ ok(global_table_lock_class.m_event_name_index == 1, "index 1");
ok(wait_class_max= 313, "313 event names"); // 3 global classes
}
diff --git a/storage/perfschema/unittest/pfs_account-oom-t.cc b/storage/perfschema/unittest/pfs_account-oom-t.cc
index 214ac1c5995..8fa6f340cbf 100644
--- a/storage/perfschema/unittest/pfs_account-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_account-oom-t.cc
@@ -59,6 +59,7 @@ void test_oom()
param.m_statement_class_sizing= 50;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
/* Setup */
diff --git a/storage/perfschema/unittest/pfs_connect_attr-t.cc b/storage/perfschema/unittest/pfs_connect_attr-t.cc
new file mode 100644
index 00000000000..7bee1d063a1
--- /dev/null
+++ b/storage/perfschema/unittest/pfs_connect_attr-t.cc
@@ -0,0 +1,345 @@
+/* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; version 2 of the License.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
+
+#include <my_global.h>
+#include <my_pthread.h>
+#include <pfs_server.h>
+#include <pfs_instr_class.h>
+#include <pfs_instr.h>
+#include <pfs_global.h>
+#include <tap.h>
+
+
+#include <string.h>
+#include <memory.h>
+
+/* test helpers, to inspect data */
+bool read_nth_attr(const char *connect_attrs, uint connect_attrs_length,
+ const CHARSET_INFO *connect_attrs_cs,
+ uint ordinal,
+ char *attr_name, uint max_attr_name,
+ uint *attr_name_length,
+ char *attr_value, uint max_attr_value,
+ uint *attr_value_length);
+
+void test_blob_parser()
+{
+ char name[100], value[4096];
+ unsigned char packet[10000], *ptr;
+ uint name_len, value_len, idx, packet_length;
+ bool result;
+ const CHARSET_INFO *cs= &my_charset_utf8_bin;
+
+ diag("test_blob_parser");
+
+ result= read_nth_attr("", 0, cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "zero length blob");
+
+
+ result= read_nth_attr("\x1", 1, cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "invalid key length");
+
+
+ result= read_nth_attr("\x2k1\x1", 4, cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "invalid value length");
+
+
+ result= read_nth_attr("\x2k1\x2v1", 6, cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "one pair return");
+ ok(name_len == 2, "one pair attr name length");
+ ok(!strncmp(name, "k1", name_len), "one pair attr name");
+ ok(value_len == 2, "one pair value length");
+ ok(!strncmp(value, "v1", value_len), "one pair value");
+
+ result= read_nth_attr("\x2k1\x2v1", 6, cs, 1,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "no second arg");
+
+ result= read_nth_attr("\x2k1\x2v1\x2k2\x2v2", 12, cs, 1,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "two pairs return");
+ ok(name_len == 2, "two pairs attr name length");
+ ok(!strncmp(name, "k2", name_len), "two pairs attr name");
+ ok(value_len == 2, "two pairs value length");
+ ok(!strncmp(value, "v2", value_len), "two pairs value");
+
+ result= read_nth_attr("\x2k1\xff\x2k2\x2v2", 12, cs, 1,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "two pairs first value bad return");
+
+ result= read_nth_attr("\x2k1\x2v1\x2k2\x2v2", 10, cs, 1,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "two pairs wrong global length");
+
+ result= read_nth_attr("\x21z123456789z123456789z123456789z12\x2v1", 37, cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "attr name overflow");
+ ok(name_len == 32, "attr name overflow length");
+ ok(!strncmp(name, "z123456789z123456789z123456789z1", name_len),
+ "attr name overflow name");
+ ok(value_len == 2, "attr name overflow value length");
+ ok(!strncmp(value, "v1", value_len), "attr name overflow value");
+
+ packet[0]= 2;
+ packet[1]= 'k';
+ packet[2]= '1';
+ ptr= net_store_length(packet + 3, 1025);
+ for (idx= 0; idx < 1025; idx++)
+ *ptr++= '0' + (idx % 10);
+ packet_length= (uint) (ptr - packet);
+ result= read_nth_attr((char *) packet, packet_length, cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "attr value overflow");
+ ok(name_len == 2, "attr value overflow length");
+ ok(!strncmp(name, "k1", name_len), "attr value overflow name");
+ ok(value_len == 1024, "attr value overflow value length");
+ for (idx= 0; idx < 1024; idx++)
+ {
+ if (value[idx] != (char) ('0' + (idx % 10)))
+ break;
+ }
+ ok (idx == 1024, "attr value overflow value");
+
+ result= read_nth_attr("\x21z123456789z123456789z123456789z12\x2v1\x2k2\x2v2",
+ 43, cs, 1,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "prev attr name overflow");
+ ok(name_len == 2, "prev attr name overflow length");
+ ok(!strncmp(name, "k2", name_len),
+ "prev attr name overflow name");
+ ok(value_len == 2, "prev attr name overflow value length");
+ ok(!strncmp(value, "v2", value_len), "prev attr name overflow value");
+
+
+ packet[1]= 'k';
+ packet[2]= '1';
+ packet[3]= 2;
+ packet[4]= 'v';
+ packet[5]= '1';
+
+ for(idx= 251; idx < 256; idx++)
+ {
+ packet[0]= idx;
+ result= read_nth_attr((char *) packet, 6, cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "invalid string length %d", idx);
+ }
+
+ memset(packet, 0, sizeof(packet));
+ for (idx=0; idx < 1660 /* *6 = 9960 */; idx++)
+ memcpy(packet + idx * 6, "\x2k1\x2v1", 6);
+ result= read_nth_attr((char *) packet, 8192, cs, 1364,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "last valid attribute %d", 1364);
+ result= read_nth_attr((char *) packet, 8192, cs, 1365,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == false, "first attribute that's cut %d", 1365);
+}
+
+void test_multibyte_lengths()
+{
+ char name[100], value[4096];
+ uint name_len, value_len;
+ bool result;
+ const CHARSET_INFO *cs= &my_charset_utf8_bin;
+
+ unsigned char var_len_packet[] = {
+ 252, 2, 0, 'k', '1',
+ 253, 2, 0, 0, 'v', '1',
+ 254, 2, 0, 0, 0, 0, 0, 0, 0, 'k', '2',
+ 254, 2, 0, 0, 0, 0, 0, 0, 0, 'v', '2'
+ };
+
+ result= read_nth_attr((char *) var_len_packet, sizeof(var_len_packet), cs, 0,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "multibyte lengths return");
+ ok(name_len == 2, "multibyte lengths name length");
+ ok(!strncmp(name, "k1", name_len), "multibyte lengths attr name");
+ ok(value_len == 2, "multibyte lengths value length");
+ ok(!strncmp(value, "v1", value_len), "multibyte lengths value");
+
+ result= read_nth_attr((char *) var_len_packet, sizeof(var_len_packet), cs, 1,
+ name, 32, &name_len, value, 1024, &value_len);
+ ok(result == true, "multibyte lengths second attr return");
+ ok(name_len == 2, "multibyte lengths second attr name length");
+ ok(!strncmp(name, "k2", name_len), "multibyte lengths second attr attr name");
+ ok(value_len == 2, "multibyte lengths value length");
+ ok(!strncmp(value, "v2", value_len), "multibyte lengths second attr value");
+}
+
+
+void test_utf8_parser()
+{
+ /* utf8 max byte length per character is 6 */
+ char name[33 * 6], value[1024 * 6], packet[1500 * 6], *ptr;
+ uint name_len, value_len;
+ bool result;
+ const CHARSET_INFO *cs= &my_charset_utf8_bin;
+
+ /* note : this is encoded in utf-8 */
+ const char *attr1= "Георги";
+ const char *val1= "Кодинов";
+ const char *attr2= "Пловдив";
+ const char *val2= "БългариÑ";
+
+ ptr= packet;
+ *ptr++= strlen(attr1);
+ memcpy(ptr, attr1, strlen(attr1));
+ ptr+= strlen(attr1);
+ *ptr++= strlen(val1);
+ memcpy(ptr, val1, strlen(val1));
+ ptr+= strlen(val1);
+
+ *ptr++= strlen(attr2);
+ memcpy(ptr, attr2, strlen(attr2));
+ ptr+= strlen(attr2);
+ *ptr++= strlen(val2);
+ memcpy(ptr, val2, strlen(val2));
+ ptr+= strlen(val2);
+
+ diag("test_utf8_parser attr pair #1");
+
+ result= read_nth_attr((char *) packet, ptr - packet, cs, 0,
+ name, sizeof(name), &name_len,
+ value, sizeof(value), &value_len);
+ ok(result == true, "return");
+ ok(name_len == strlen(attr1), "name length");
+ ok(!strncmp(name, attr1, name_len), "attr name");
+ ok(value_len == strlen(val1), "value length");
+ ok(!strncmp(value, val1, value_len), "value");
+
+ diag("test_utf8_parser attr pair #2");
+ result= read_nth_attr((char *) packet, ptr - packet, cs, 1,
+ name, sizeof(name), &name_len,
+ value, sizeof(value), &value_len);
+ ok(result == true, "return");
+ ok(name_len == strlen(attr2), "name length");
+ ok(!strncmp(name, attr2, name_len), "attr name");
+ ok(value_len == strlen(val2), "value length");
+ ok(!strncmp(value, val2, value_len), "value");
+}
+
+
+void test_utf8_parser_bad_encoding()
+{
+ /* utf8 max byte length per character is 3*/
+ char name[33 * 3], value[1024 * 3], packet[1500 * 3], *ptr;
+ uint name_len, value_len;
+ bool result;
+ const CHARSET_INFO *cs= &my_charset_utf8_bin;
+
+ /* note : this is encoded in utf-8 */
+ const char *attr= "Георги";
+ const char *val= "Кодинов";
+
+ ptr= packet;
+ *ptr++= strlen(attr);
+ memcpy(ptr, attr, strlen(attr));
+ ptr[0]= 0xFA; // invalid UTF-8 char
+ ptr+= strlen(attr);
+ *ptr++= strlen(val);
+ memcpy(ptr, val, strlen(val));
+ ptr+= strlen(val);
+
+ diag("test_utf8_parser_bad_encoding");
+
+ result= read_nth_attr((char *) packet, ptr - packet, cs, 0,
+ name, sizeof(name), &name_len,
+ value, sizeof(value), &value_len);
+ ok(result == false, "return");
+}
+
+const CHARSET_INFO *cs_cp1251;
+
+void test_cp1251_parser()
+{
+ /* utf8 max byte length per character is 3*/
+ char name[33 * 3], value[1024 * 3], packet[1500 * 3], *ptr;
+ uint name_len, value_len;
+ bool result;
+
+ /* note : this is Георги in windows-1251 */
+ const char *attr1= "\xc3\xe5\xee\xf0\xe3\xe8";
+ /* note : this is Кодинов in windows-1251 */
+ const char *val1= "\xca\xee\xe4\xe8\xed\xee\xe2";
+ /* note : this is Пловдив in windows-1251 */
+ const char *attr2= "\xcf\xeb\xee\xe2\xe4\xe8\xe2";
+ /* note : this is Ð‘ÑŠÐ»Ð³Ð°Ñ€Ð¸Ñ in windows-1251 */
+ const char *val2= "\xc1\xfa\xeb\xe3\xe0\xf0\xe8\xff";
+
+ ptr= packet;
+ *ptr++= strlen(attr1);
+ memcpy(ptr, attr1, strlen(attr1));
+ ptr+= strlen(attr1);
+ *ptr++= strlen(val1);
+ memcpy(ptr, val1, strlen(val1));
+ ptr+= strlen(val1);
+
+ *ptr++= strlen(attr2);
+ memcpy(ptr, attr2, strlen(attr2));
+ ptr+= strlen(attr2);
+ *ptr++= strlen(val2);
+ memcpy(ptr, val2, strlen(val2));
+ ptr+= strlen(val2);
+
+ diag("test_cp1251_parser attr pair #1");
+
+ result= read_nth_attr((char *) packet, ptr - packet, cs_cp1251, 0,
+ name, sizeof(name), &name_len,
+ value, sizeof(value), &value_len);
+ ok(result == true, "return");
+ /* need to compare to the UTF-8 equivalents */
+ ok(name_len == strlen("Георги"), "name length");
+ ok(!strncmp(name, "Георги", name_len), "attr name");
+ ok(value_len == strlen("Кодинов"), "value length");
+ ok(!strncmp(value, "Кодинов", value_len), "value");
+
+ diag("test_cp1251_parser attr pair #2");
+ result= read_nth_attr((char *) packet, ptr - packet, cs_cp1251, 1,
+ name, sizeof(name), &name_len,
+ value, sizeof(value), &value_len);
+ ok(result == true, "return");
+ /* need to compare to the UTF-8 equivalents */
+ ok(name_len == strlen("Пловдив"), "name length");
+ ok(!strncmp(name, "Пловдив", name_len), "attr name");
+ ok(value_len == strlen("БългариÑ"), "value length");
+ ok(!strncmp(value, "БългариÑ", value_len), "value");
+}
+
+
+void do_all_tests()
+{
+ test_blob_parser();
+ test_multibyte_lengths();
+ test_utf8_parser();
+ test_utf8_parser_bad_encoding();
+ test_cp1251_parser();
+}
+
+int main(int, char **)
+{
+ MY_INIT("pfs_connect_attr-t");
+
+ cs_cp1251= get_charset_by_csname("cp1251", MY_CS_PRIMARY, MYF(0));
+ if (!cs_cp1251)
+ diag("skipping the cp1251 tests : missing character set");
+ plan(59 + (cs_cp1251 ? 10 : 0));
+ do_all_tests();
+ return 0;
+}
diff --git a/storage/perfschema/unittest/pfs_host-oom-t.cc b/storage/perfschema/unittest/pfs_host-oom-t.cc
index 455de9bf9ca..3d3dfd6d05e 100644
--- a/storage/perfschema/unittest/pfs_host-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_host-oom-t.cc
@@ -59,6 +59,7 @@ void test_oom()
param.m_statement_class_sizing= 50;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
/* Setup */
diff --git a/storage/perfschema/unittest/pfs_instr-oom-t.cc b/storage/perfschema/unittest/pfs_instr-oom-t.cc
index 0bc329ccddc..18c0029776d 100644
--- a/storage/perfschema/unittest/pfs_instr-oom-t.cc
+++ b/storage/perfschema/unittest/pfs_instr-oom-t.cc
@@ -63,6 +63,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -98,6 +99,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -133,6 +135,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -168,6 +171,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -201,6 +205,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -236,6 +241,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -271,6 +277,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -308,6 +315,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 2;
init_event_name_sizing(& param);
@@ -341,6 +349,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 2;
init_event_name_sizing(& param);
@@ -383,8 +392,9 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
- stub_alloc_fails_after_count= 2;
+ stub_alloc_fails_after_count= 1;
init_event_name_sizing(& param);
rc= init_instruments(& param);
ok(rc == 1, "oom (per thread waits)");
@@ -417,6 +427,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 3;
init_event_name_sizing(& param);
@@ -451,6 +462,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 2;
init_event_name_sizing(& param);
@@ -485,6 +497,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 10;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 2;
init_event_name_sizing(& param);
@@ -519,6 +532,7 @@ void test_oom()
param.m_statement_class_sizing= 50;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 2;
init_event_name_sizing(& param);
@@ -553,6 +567,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 1;
init_event_name_sizing(& param);
@@ -587,6 +602,7 @@ void test_oom()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 3;
init_event_name_sizing(& param);
@@ -624,6 +640,7 @@ void test_oom()
param.m_statement_class_sizing= 20;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
stub_alloc_fails_after_count= 3;
init_event_name_sizing(& param);
diff --git a/storage/perfschema/unittest/pfs_instr-t.cc b/storage/perfschema/unittest/pfs_instr-t.cc
index fd71a722a50..fab22b203d3 100644
--- a/storage/perfschema/unittest/pfs_instr-t.cc
+++ b/storage/perfschema/unittest/pfs_instr-t.cc
@@ -60,6 +60,8 @@ void test_no_instruments()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_digest_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -117,6 +119,8 @@ void test_no_instances()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_digest_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -153,19 +157,19 @@ void test_no_instances()
PFS_thread fake_thread;
fake_thread.m_filename_hash_pins= NULL;
- file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5);
+ file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5, true);
ok(file == NULL, "no file");
ok(file_lost == 1, "lost 1");
- file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5);
+ file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5, true);
ok(file == NULL, "no file");
ok(file_lost == 2, "lost 2");
init_file_hash();
- file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5);
+ file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5, true);
ok(file == NULL, "no file");
ok(file_lost == 3, "lost 3");
- file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5);
+ file= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5, true);
ok(file == NULL, "no file");
ok(file_lost == 4, "lost 4");
@@ -173,7 +177,7 @@ void test_no_instances()
int size= sizeof(long_file_name);
memset(long_file_name, 'X', size);
- file= find_or_create_file(& fake_thread, & dummy_file_class, long_file_name, size);
+ file= find_or_create_file(& fake_thread, & dummy_file_class, long_file_name, size, true);
ok(file == NULL, "no file");
ok(file_lost == 5, "lost 5");
@@ -184,10 +188,10 @@ void test_no_instances()
ok(table == NULL, "no table");
ok(table_lost == 2, "lost 2");
- socket= create_socket(& dummy_socket_class, NULL);
+ socket= create_socket(& dummy_socket_class, NULL, NULL, 0);
ok(socket == NULL, "no socket");
ok(socket_lost == 1, "lost 1");
- socket= create_socket(& dummy_socket_class, NULL);
+ socket= create_socket(& dummy_socket_class, NULL, NULL, 0);
ok(socket == NULL, "no socket");
ok(socket_lost == 2, "lost 2");
@@ -255,6 +259,8 @@ void test_with_instances()
param.m_statement_class_sizing= 0;
param.m_events_statements_history_sizing= 0;
param.m_events_statements_history_long_sizing= 0;
+ param.m_digest_sizing= 0;
+ param.m_session_connect_attrs_sizing= 0;
init_event_name_sizing(& param);
rc= init_instruments(& param);
@@ -325,50 +331,50 @@ void test_with_instances()
PFS_thread fake_thread;
fake_thread.m_filename_hash_pins= NULL;
- file_1= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5);
+ file_1= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5, true);
ok(file_1 == NULL, "no file");
ok(file_lost == 1, "lost 1");
- file_1= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5);
+ file_1= find_or_create_file(& fake_thread, & dummy_file_class, "dummy", 5, true);
ok(file_1 == NULL, "no file");
ok(file_lost == 2, "lost 2");
init_file_hash();
file_lost= 0;
- file_1= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_A", 7);
+ file_1= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_A", 7, true);
ok(file_1 != NULL, "file");
ok(file_1->m_file_stat.m_open_count == 1, "open count 1");
ok(file_lost == 0, "not lost");
- file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_A", 7);
+ file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_A", 7, true);
ok(file_1 == file_2, "same file");
ok(file_1->m_file_stat.m_open_count == 2, "open count 2");
ok(file_lost == 0, "not lost");
release_file(file_2);
ok(file_1->m_file_stat.m_open_count == 1, "open count 1");
- file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_B", 7);
+ file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_B", 7, true);
ok(file_2 != NULL, "file");
ok(file_lost == 0, "not lost");
- file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_C", 7);
+ file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_C", 7, true);
ok(file_2 == NULL, "no file");
ok(file_lost == 1, "lost");
release_file(file_1);
/* the file still exists, not destroyed */
ok(file_1->m_file_stat.m_open_count == 0, "open count 0");
- file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_D", 7);
+ file_2= find_or_create_file(& fake_thread, & dummy_file_class, "dummy_D", 7, true);
ok(file_2 == NULL, "no file");
ok(file_lost == 2, "lost");
- socket_1= create_socket(& dummy_socket_class, NULL);
+ socket_1= create_socket(& dummy_socket_class, NULL, NULL, 0);
ok(socket_1 != NULL, "socket");
ok(socket_lost == 0, "not lost");
- socket_2= create_socket(& dummy_socket_class, NULL);
+ socket_2= create_socket(& dummy_socket_class, NULL, NULL, 0);
ok(socket_2 != NULL, "socket");
ok(socket_lost == 0, "not lost");
- socket_2= create_socket(& dummy_socket_class, NULL);
+ socket_2= create_socket(& dummy_socket_class, NULL, NULL, 0);
ok(socket_2 == NULL, "no socket");
ok(socket_lost == 1, "lost 1");
destroy_socket(socket_1);
- socket_2= create_socket(& dummy_socket_class, NULL);
+ socket_2= create_socket(& dummy_socket_class, NULL, NULL, 0);
ok(socket_2 != NULL, "socket");
ok(socket_lost == 1, "no new loss");
diff --git a/storage/perfschema/unittest/pfs_instr_class-t.cc b/storage/perfschema/unittest/pfs_instr_class-t.cc
index 0ec224cbae3..706c5724a80 100644
--- a/storage/perfschema/unittest/pfs_instr_class-t.cc
+++ b/storage/perfschema/unittest/pfs_instr_class-t.cc
@@ -475,6 +475,7 @@ void test_table_registration()
#endif
}
+#ifdef LATER
void set_wait_stat(PFS_instr_class *klass)
{
PFS_single_stat *stat;
@@ -501,6 +502,7 @@ bool is_empty_stat(PFS_instr_class *klass)
return false;
return true;
}
+#endif
void test_instruments_reset()
{
diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc
index 9f5097b59ce..866bd9e4d31 100644
--- a/storage/sphinx/ha_sphinx.cc
+++ b/storage/sphinx/ha_sphinx.cc
@@ -3370,7 +3370,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
// check index
if (
table->s->keys!=1 ||
- table->key_info[0].key_parts!=1 ||
+ table->key_info[0].user_defined_key_parts != 1 ||
strcasecmp ( table->key_info[0].key_part[0].field->field_name, table->field[2]->field_name ) )
{
my_snprintf ( sError, sizeof(sError), "%s: there must be an index on '%s' column",
@@ -3404,7 +3404,7 @@ int ha_sphinx::create ( const char * name, TABLE * table, HA_CREATE_INFO * )
// check index
if (
table->s->keys!=1 ||
- table->key_info[0].key_parts!=1 ||
+ table->key_info[0].user_defined_key_parts!=1 ||
strcasecmp ( table->key_info[0].key_part[0].field->field_name, "id" ) )
{
my_snprintf ( sError, sizeof(sError), "%s: 'id' column must be indexed", name );
diff --git a/strings/ctype-big5.c b/strings/ctype-big5.c
index 7c7d8c7b2f5..f77e705525c 100644
--- a/strings/ctype-big5.c
+++ b/strings/ctype-big5.c
@@ -870,7 +870,7 @@ static int my_strnncoll_big5(CHARSET_INFO *cs __attribute__((unused)),
const uchar *b, size_t b_length,
my_bool b_is_prefix)
{
- size_t length= min(a_length, b_length);
+ size_t length= MY_MIN(a_length, b_length);
int res= my_strnncoll_big5_internal(&a, &b, length);
return res ? res : (int)((b_is_prefix ? length : a_length) - b_length);
}
@@ -883,7 +883,7 @@ static int my_strnncollsp_big5(CHARSET_INFO * cs __attribute__((unused)),
const uchar *b, size_t b_length,
my_bool diff_if_only_endspace_difference)
{
- size_t length= min(a_length, b_length);
+ size_t length= MY_MIN(a_length, b_length);
int res= my_strnncoll_big5_internal(&a, &b, length);
#ifndef VARCHAR_WITH_DIFF_ENDSPACE_ARE_DIFFERENT_FOR_UNIQUE
diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c
index 07cf9d45f07..2363a235550 100644
--- a/strings/ctype-bin.c
+++ b/strings/ctype-bin.c
@@ -80,7 +80,7 @@ static int my_strnncoll_binary(CHARSET_INFO * cs __attribute__((unused)),
const uchar *t, size_t tlen,
my_bool t_is_prefix)
{
- size_t len=min(slen,tlen);
+ size_t len=MY_MIN(slen,tlen);
int cmp= memcmp(s,t,len);
return cmp ? cmp : (int)((t_is_prefix ? len : slen) - tlen);
}
@@ -131,7 +131,7 @@ static int my_strnncoll_8bit_bin(CHARSET_INFO * cs __attribute__((unused)),
const uchar *t, size_t tlen,
my_bool t_is_prefix)
{
- size_t len=min(slen,tlen);
+ size_t len=MY_MIN(slen,tlen);
int cmp= memcmp(s,t,len);
return cmp ? cmp : (int)((t_is_prefix ? len : slen) - tlen);
}
@@ -175,7 +175,7 @@ static int my_strnncollsp_8bit_bin(CHARSET_INFO * cs __attribute__((unused)),
diff_if_only_endspace_difference= 0;
#endif
- end= a + (length= min(a_length, b_length));
+ end= a + (length= MY_MIN(a_length, b_length));
while (a < end)
{
if (*a++ != *b++)
@@ -414,7 +414,7 @@ static size_t my_strnxfrm_bin(CHARSET_INFO *cs __attribute__((unused)),
const uchar *src, size_t srclen)
{
if (dest != src)
- memcpy(dest, src, min(dstlen,srclen));
+ memcpy(dest, src, MY_MIN(dstlen,srclen));
if (dstlen > srclen)
bfill(dest + srclen, dstlen - srclen, 0);
return dstlen;
@@ -427,7 +427,7 @@ size_t my_strnxfrm_8bit_bin(CHARSET_INFO *cs __attribute__((unused)),
const uchar *src, size_t srclen)
{
if (dest != src)
- memcpy(dest, src, min(dstlen,srclen));
+ memcpy(dest, src, MY_MIN(dstlen,srclen));
if (dstlen > srclen)
bfill(dest + srclen, dstlen - srclen, ' ');
return dstlen;
diff --git a/strings/ctype-gbk.c b/strings/ctype-gbk.c
index 8b37de4a5e7..e21c406d2a9 100644
--- a/strings/ctype-gbk.c
+++ b/strings/ctype-gbk.c
@@ -3470,7 +3470,7 @@ int my_strnncoll_gbk(CHARSET_INFO *cs __attribute__((unused)),
const uchar *b, size_t b_length,
my_bool b_is_prefix)
{
- size_t length= min(a_length, b_length);
+ size_t length= MY_MIN(a_length, b_length);
int res= my_strnncoll_gbk_internal(&a, &b, length);
return res ? res : (int) ((b_is_prefix ? length : a_length) - b_length);
}
@@ -3481,7 +3481,7 @@ static int my_strnncollsp_gbk(CHARSET_INFO * cs __attribute__((unused)),
const uchar *b, size_t b_length,
my_bool diff_if_only_endspace_difference)
{
- size_t length= min(a_length, b_length);
+ size_t length= MY_MIN(a_length, b_length);
int res= my_strnncoll_gbk_internal(&a, &b, length);
#ifndef VARCHAR_WITH_DIFF_ENDSPACE_ARE_DIFFERENT_FOR_UNIQUE
diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c
index 0c0332ea3da..8c7de1d16c7 100644
--- a/strings/ctype-mb.c
+++ b/strings/ctype-mb.c
@@ -486,7 +486,7 @@ my_strnncoll_mb_bin(CHARSET_INFO * cs __attribute__((unused)),
const uchar *t, size_t tlen,
my_bool t_is_prefix)
{
- size_t len=min(slen,tlen);
+ size_t len=MY_MIN(slen,tlen);
int cmp= memcmp(s,t,len);
return cmp ? cmp : (int) ((t_is_prefix ? len : slen) - tlen);
}
@@ -531,7 +531,7 @@ my_strnncollsp_mb_bin(CHARSET_INFO * cs __attribute__((unused)),
diff_if_only_endspace_difference= 0;
#endif
- end= a + (length= min(a_length, b_length));
+ end= a + (length= MY_MIN(a_length, b_length));
while (a < end)
{
if (*a++ != *b++)
@@ -570,7 +570,7 @@ static size_t my_strnxfrm_mb_bin(CHARSET_INFO *cs __attribute__((unused)),
const uchar *src, size_t srclen)
{
if (dest != src)
- memcpy(dest, src, min(dstlen, srclen));
+ memcpy(dest, src, MY_MIN(dstlen, srclen));
if (dstlen > srclen)
bfill(dest + srclen, dstlen - srclen, ' ');
return dstlen;
diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c
index 3cd6805158e..f084ff9949a 100644
--- a/strings/ctype-simple.c
+++ b/strings/ctype-simple.c
@@ -160,7 +160,7 @@ int my_strnncollsp_simple(CHARSET_INFO * cs, const uchar *a, size_t a_length,
diff_if_only_endspace_difference= 0;
#endif
- end= a + (length= min(a_length, b_length));
+ end= a + (length= MY_MIN(a_length, b_length));
while (a < end)
{
if (map[*a++] != map[*b++])
@@ -770,7 +770,7 @@ size_t my_long10_to_str_8bit(CHARSET_INFO *cs __attribute__((unused)),
val= new_val;
}
- len= min(len, (size_t) (e-p));
+ len= MY_MIN(len, (size_t) (e-p));
memcpy(dst, p, len);
return len+sign;
}
@@ -824,7 +824,7 @@ size_t my_longlong10_to_str_8bit(CHARSET_INFO *cs __attribute__((unused)),
long_val= quo;
}
- len= min(len, (size_t) (e-p));
+ len= MY_MIN(len, (size_t) (e-p));
cnv:
memcpy(dst, p, len);
return len+sign;
@@ -1069,7 +1069,7 @@ size_t my_well_formed_len_8bit(CHARSET_INFO *cs __attribute__((unused)),
{
size_t nbytes= (size_t) (end-start);
*error= 0;
- return min(nbytes, nchars);
+ return MY_MIN(nbytes, nchars);
}
diff --git a/strings/ctype-tis620.c b/strings/ctype-tis620.c
index d97f8e5ff08..d84d43a67bd 100644
--- a/strings/ctype-tis620.c
+++ b/strings/ctype-tis620.c
@@ -566,7 +566,7 @@ int my_strnncollsp_tis620(CHARSET_INFO * cs __attribute__((unused)),
a_length= thai2sortable(a, a_length);
b_length= thai2sortable(b, b_length);
- end= a + (length= min(a_length, b_length));
+ end= a + (length= MY_MIN(a_length, b_length));
while (a < end)
{
if (*a++ != *b++)
@@ -623,7 +623,7 @@ size_t my_strnxfrm_tis620(CHARSET_INFO *cs __attribute__((unused)),
const uchar *src, size_t srclen)
{
size_t dstlen= len;
- len= (size_t) (strmake((char*) dest, (char*) src, min(len, srclen)) -
+ len= (size_t) (strmake((char*) dest, (char*) src, MY_MIN(len, srclen)) -
(char*) dest);
len= thai2sortable(dest, len);
if (dstlen > len)
diff --git a/strings/ctype-uca.c b/strings/ctype-uca.c
index 020cfdfcbbe..8bfb6ac5e5a 100644
--- a/strings/ctype-uca.c
+++ b/strings/ctype-uca.c
@@ -7738,7 +7738,7 @@ static void my_coll_lexem_print_error(MY_COLL_LEXEM *lexem,
{
char tail[30];
size_t len= lexem->end - lexem->prev;
- strmake (tail, lexem->prev, (size_t) min(len, sizeof(tail)-1));
+ strmake (tail, lexem->prev, (size_t) MY_MIN(len, sizeof(tail)-1));
errstr[errsize-1]= '\0';
my_snprintf(errstr,errsize-1,"%s at '%s'", txt, tail);
}
diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c
index edb42862e50..b23b88a165d 100644
--- a/strings/ctype-ucs2.c
+++ b/strings/ctype-ucs2.c
@@ -58,7 +58,7 @@ my_bincmp(const uchar *s, const uchar *se,
const uchar *t, const uchar *te)
{
int slen= (int) (se - s), tlen= (int) (te - t);
- int len= min(slen, tlen);
+ int len= MY_MIN(slen, tlen);
int cmp= memcmp(s, t, len);
return cmp ? cmp : slen - tlen;
}
@@ -2658,7 +2658,7 @@ my_strnncollsp_utf32_bin(CHARSET_INFO *cs __attribute__((unused)),
se= s + slen;
te= t + tlen;
- for (minlen= min(slen, tlen); minlen; minlen-= 4)
+ for (minlen= MY_MIN(slen, tlen); minlen; minlen-= 4)
{
my_wc_t s_wc= my_utf32_get(s);
my_wc_t t_wc= my_utf32_get(t);
@@ -3121,7 +3121,7 @@ static int my_strnncollsp_ucs2(CHARSET_INFO *cs __attribute__((unused)),
se= s + slen;
te= t + tlen;
- for (minlen= min(slen, tlen); minlen; minlen-= 2)
+ for (minlen= MY_MIN(slen, tlen); minlen; minlen-= 2)
{
int s_wc = uni_plane[s[0]] ? (int) uni_plane[s[0]][s[1]].sort :
(((int) s[0]) << 8) + (int) s[1];
@@ -3198,7 +3198,7 @@ size_t my_well_formed_len_ucs2(CHARSET_INFO *cs __attribute__((unused)),
size_t nbytes= ((size_t) (e-b)) & ~(size_t) 1;
*error= 0;
nchars*= 2;
- return min(nbytes, nchars);
+ return MY_MIN(nbytes, nchars);
}
@@ -3273,7 +3273,7 @@ static int my_strnncollsp_ucs2_bin(CHARSET_INFO *cs __attribute__((unused)),
se= s + slen;
te= t + tlen;
- for (minlen= min(slen, tlen); minlen; minlen-= 2)
+ for (minlen= MY_MIN(slen, tlen); minlen; minlen-= 2)
{
int s_wc= s[0] * 256 + s[1];
int t_wc= t[0] * 256 + t[1];
diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c
index b17840bd54c..4e9724aed3c 100644
--- a/strings/ctype-utf8.c
+++ b/strings/ctype-utf8.c
@@ -2258,7 +2258,7 @@ static inline int bincmp(const uchar *s, const uchar *se,
const uchar *t, const uchar *te)
{
int slen= (int) (se-s), tlen= (int) (te-t);
- int len=min(slen,tlen);
+ int len=MY_MIN(slen,tlen);
int cmp= memcmp(s,t,len);
return cmp ? cmp : slen-tlen;
}
@@ -4672,7 +4672,7 @@ bincmp_utf8mb4(const uchar *s, const uchar *se,
const uchar *t, const uchar *te)
{
int slen= (int) (se - s), tlen= (int) (te - t);
- int len= min(slen, tlen);
+ int len= MY_MIN(slen, tlen);
int cmp= memcmp(s, t, len);
return cmp ? cmp : slen - tlen;
}
diff --git a/strings/ctype.c b/strings/ctype.c
index 23f18b6617b..b71d7dee4c4 100644
--- a/strings/ctype.c
+++ b/strings/ctype.c
@@ -430,3 +430,144 @@ my_charset_is_ascii_compatible(CHARSET_INFO *cs)
}
return 1;
}
+
+
+/*
+ Convert a string between two character sets.
+ 'to' must be large enough to store (form_length * to_cs->mbmaxlen) bytes.
+
+ @param to[OUT] Store result here
+ @param to_length Size of "to" buffer
+ @param to_cs Character set of result string
+ @param from Copy from here
+ @param from_length Length of the "from" string
+ @param from_cs Character set of the "from" string
+ @param errors[OUT] Number of conversion errors
+
+ @return Number of bytes copied to 'to' string
+*/
+
+static uint32
+my_convert_internal(char *to, uint32 to_length,
+ CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors)
+{
+ int cnvres;
+ my_wc_t wc;
+ const uchar *from_end= (const uchar*) from + from_length;
+ char *to_start= to;
+ uchar *to_end= (uchar*) to + to_length;
+ my_charset_conv_mb_wc mb_wc= from_cs->cset->mb_wc;
+ my_charset_conv_wc_mb wc_mb= to_cs->cset->wc_mb;
+ uint error_count= 0;
+
+ while (1)
+ {
+ if ((cnvres= (*mb_wc)(from_cs, &wc, (uchar*) from, from_end)) > 0)
+ from+= cnvres;
+ else if (cnvres == MY_CS_ILSEQ)
+ {
+ error_count++;
+ from++;
+ wc= '?';
+ }
+ else if (cnvres > MY_CS_TOOSMALL)
+ {
+ /*
+ A correct multibyte sequence detected
+ But it doesn't have Unicode mapping.
+ */
+ error_count++;
+ from+= (-cnvres);
+ wc= '?';
+ }
+ else
+ break; // Not enough characters
+
+outp:
+ if ((cnvres= (*wc_mb)(to_cs, wc, (uchar*) to, to_end)) > 0)
+ to+= cnvres;
+ else if (cnvres == MY_CS_ILUNI && wc != '?')
+ {
+ error_count++;
+ wc= '?';
+ goto outp;
+ }
+ else
+ break;
+ }
+ *errors= error_count;
+ return (uint32) (to - to_start);
+}
+
+
+/*
+ Convert a string between two character sets.
+ Optimized for quick copying of ASCII characters in the range 0x00..0x7F.
+ 'to' must be large enough to store (form_length * to_cs->mbmaxlen) bytes.
+
+ @param to[OUT] Store result here
+ @param to_length Size of "to" buffer
+ @param to_cs Character set of result string
+ @param from Copy from here
+ @param from_length Length of the "from" string
+ @param from_cs Character set of the "from" string
+ @param errors[OUT] Number of conversion errors
+
+ @return Number of bytes copied to 'to' string
+*/
+
+uint32
+my_convert(char *to, uint32 to_length, CHARSET_INFO *to_cs,
+ const char *from, uint32 from_length,
+ CHARSET_INFO *from_cs, uint *errors)
+{
+ uint32 length, length2;
+ /*
+ If any of the character sets is not ASCII compatible,
+ immediately switch to slow mb_wc->wc_mb method.
+ */
+ if ((to_cs->state | from_cs->state) & MY_CS_NONASCII)
+ return my_convert_internal(to, to_length, to_cs,
+ from, from_length, from_cs, errors);
+
+ length= length2= MY_MIN(to_length, from_length);
+
+#if defined(__i386__) || defined(__x86_64__)
+ /*
+ Special loop for i386, it allows to refer to a
+ non-aligned memory block as UINT32, which makes
+ it possible to copy four bytes at once. This
+ gives about 10% performance improvement comparing
+ to byte-by-byte loop.
+ */
+ for ( ; length >= 4; length-= 4, from+= 4, to+= 4)
+ {
+ if ((*(uint32*)from) & 0x80808080)
+ break;
+ *((uint32*) to)= *((const uint32*) from);
+ }
+#endif /* __i386__ */
+
+ for (; ; *to++= *from++, length--)
+ {
+ if (!length)
+ {
+ *errors= 0;
+ return length2;
+ }
+ if (*((unsigned char*) from) > 0x7F) /* A non-ASCII character */
+ {
+ uint32 copied_length= length2 - length;
+ to_length-= copied_length;
+ from_length-= copied_length;
+ return copied_length + my_convert_internal(to, to_length, to_cs,
+ from, from_length, from_cs,
+ errors);
+ }
+ }
+
+ DBUG_ASSERT(FALSE); // Should never get to here
+ return 0; // Make compiler happy
+}
diff --git a/strings/decimal.c b/strings/decimal.c
index 30cc3c30428..6d39d9aec78 100644
--- a/strings/decimal.c
+++ b/strings/decimal.c
@@ -396,7 +396,7 @@ int decimal2string(const decimal_t *from, char *to, int *to_len,
for (; frac>0; frac-=DIG_PER_DEC1)
{
dec1 x=*buf++;
- for (i=min(frac, DIG_PER_DEC1); i; i--)
+ for (i=MY_MIN(frac, DIG_PER_DEC1); i; i--)
{
dec1 y=x/DIG_MASK;
*s1++='0'+(uchar)y;
@@ -419,7 +419,7 @@ int decimal2string(const decimal_t *from, char *to, int *to_len,
for (buf=buf0+ROUND_UP(intg); intg>0; intg-=DIG_PER_DEC1)
{
dec1 x=*--buf;
- for (i=min(intg, DIG_PER_DEC1); i; i--)
+ for (i=MY_MIN(intg, DIG_PER_DEC1); i; i--)
{
dec1 y=x/10;
*--s='0'+(uchar)(x-y*10);
@@ -1513,8 +1513,8 @@ decimal_round(const decimal_t *from, decimal_t *to, int scale,
if (to != from)
{
- dec1 *p0= buf0+intg0+max(frac1, frac0);
- dec1 *p1= buf1+intg0+max(frac1, frac0);
+ dec1 *p0= buf0+intg0+MY_MAX(frac1, frac0);
+ dec1 *p1= buf1+intg0+MY_MAX(frac1, frac0);
DBUG_ASSERT(p0 - buf0 <= len);
DBUG_ASSERT(p1 - buf1 <= len);
@@ -1525,7 +1525,7 @@ decimal_round(const decimal_t *from, decimal_t *to, int scale,
buf0=to->buf;
buf1=to->buf;
to->sign=from->sign;
- to->intg=min(intg0, len)*DIG_PER_DEC1;
+ to->intg=MY_MIN(intg0, len)*DIG_PER_DEC1;
}
if (frac0 > frac1)
@@ -1627,7 +1627,7 @@ decimal_round(const decimal_t *from, decimal_t *to, int scale,
scale=frac0*DIG_PER_DEC1;
error=E_DEC_TRUNCATED; /* XXX */
}
- for (buf1=to->buf+intg0+max(frac0,0); buf1 > to->buf; buf1--)
+ for (buf1=to->buf+intg0+MY_MAX(frac0,0); buf1 > to->buf; buf1--)
{
buf1[0]=buf1[-1];
}
@@ -1646,7 +1646,7 @@ decimal_round(const decimal_t *from, decimal_t *to, int scale,
/* making 'zero' with the proper scale */
dec1 *p0= to->buf + frac0 + 1;
to->intg=1;
- to->frac= max(scale, 0);
+ to->frac= MY_MAX(scale, 0);
to->sign= 0;
for (buf1= to->buf; buf1<p0; buf1++)
*buf1= 0;
@@ -1695,11 +1695,11 @@ int decimal_result_size(decimal_t *from1, decimal_t *from2, char op, int param)
{
switch (op) {
case '-':
- return ROUND_UP(max(from1->intg, from2->intg)) +
- ROUND_UP(max(from1->frac, from2->frac));
+ return ROUND_UP(MY_MAX(from1->intg, from2->intg)) +
+ ROUND_UP(MY_MAX(from1->frac, from2->frac));
case '+':
- return ROUND_UP(max(from1->intg, from2->intg)+1) +
- ROUND_UP(max(from1->frac, from2->frac));
+ return ROUND_UP(MY_MAX(from1->intg, from2->intg)+1) +
+ ROUND_UP(MY_MAX(from1->frac, from2->frac));
case '*':
return ROUND_UP(from1->intg+from2->intg)+
ROUND_UP(from1->frac)+ROUND_UP(from2->frac);
@@ -1714,7 +1714,7 @@ static int do_add(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
{
int intg1=ROUND_UP(from1->intg), intg2=ROUND_UP(from2->intg),
frac1=ROUND_UP(from1->frac), frac2=ROUND_UP(from2->frac),
- frac0=max(frac1, frac2), intg0=max(intg1, intg2), error;
+ frac0=MY_MAX(frac1, frac2), intg0=MY_MAX(intg1, intg2), error;
dec1 *buf1, *buf2, *buf0, *stop, *stop2, x, carry;
sanity(to);
@@ -1739,7 +1739,7 @@ static int do_add(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
buf0=to->buf+intg0+frac0;
to->sign=from1->sign;
- to->frac=max(from1->frac, from2->frac);
+ to->frac=MY_MAX(from1->frac, from2->frac);
to->intg=intg0*DIG_PER_DEC1;
if (unlikely(error))
{
@@ -1750,7 +1750,7 @@ static int do_add(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
set_if_smaller(intg2, intg0);
}
- /* part 1 - max(frac) ... min (frac) */
+ /* part 1 - MY_MAX(frac) ... min (frac) */
if (frac1 > frac2)
{
buf1=from1->buf+intg1+frac1;
@@ -1768,14 +1768,14 @@ static int do_add(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
while (buf1 > stop)
*--buf0=*--buf1;
- /* part 2 - min(frac) ... min(intg) */
+ /* part 2 - MY_MIN(frac) ... MY_MIN(intg) */
carry=0;
while (buf1 > stop2)
{
ADD(*--buf0, *--buf1, *--buf2, carry);
}
- /* part 3 - min(intg) ... max(intg) */
+ /* part 3 - MY_MIN(intg) ... MY_MAX(intg) */
buf1= intg1 > intg2 ? ((stop=from1->buf)+intg1-intg2) :
((stop=from2->buf)+intg2-intg1) ;
while (buf1 > stop)
@@ -1796,7 +1796,7 @@ static int do_sub(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
{
int intg1=ROUND_UP(from1->intg), intg2=ROUND_UP(from2->intg),
frac1=ROUND_UP(from1->frac), frac2=ROUND_UP(from2->frac);
- int frac0=max(frac1, frac2), error;
+ int frac0=MY_MAX(frac1, frac2), error;
dec1 *buf1, *buf2, *buf0, *stop1, *stop2, *start1, *start2;
my_bool carry=0;
@@ -1872,7 +1872,7 @@ static int do_sub(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
FIX_INTG_FRAC_ERROR(to->len, intg1, frac0, error);
buf0=to->buf+intg1+frac0;
- to->frac=max(from1->frac, from2->frac);
+ to->frac=MY_MAX(from1->frac, from2->frac);
to->intg=intg1*DIG_PER_DEC1;
if (unlikely(error))
{
@@ -1883,7 +1883,7 @@ static int do_sub(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
}
carry=0;
- /* part 1 - max(frac) ... min (frac) */
+ /* part 1 - MY_MAX(frac) ... min (frac) */
if (frac1 > frac2)
{
buf1=start1+intg1+frac1;
@@ -1907,7 +1907,7 @@ static int do_sub(const decimal_t *from1, const decimal_t *from2, decimal_t *to)
}
}
- /* part 2 - min(frac) ... intg2 */
+ /* part 2 - MY_MIN(frac) ... intg2 */
while (buf2 > start2)
{
SUB(*--buf0, *--buf1, *--buf2, carry);
@@ -2170,11 +2170,11 @@ static int do_div_mod(const decimal_t *from1, const decimal_t *from2,
{
/* we're calculating N1 % N2.
The result will have
- frac=max(frac1, frac2), as for subtraction
+ frac=MY_MAX(frac1, frac2), as for subtraction
intg=intg2
*/
to->sign=from1->sign;
- to->frac=max(from1->frac, from2->frac);
+ to->frac=MY_MAX(from1->frac, from2->frac);
frac0=0;
}
else
@@ -2307,7 +2307,7 @@ static int do_div_mod(const decimal_t *from1, const decimal_t *from2,
/*
now the result is in tmp1, it has
intg=prec1-frac1
- frac=max(frac1, frac2)=to->frac
+ frac=MY_MAX(frac1, frac2)=to->frac
*/
if (dcarry)
*--start1=dcarry;
@@ -2345,7 +2345,7 @@ static int do_div_mod(const decimal_t *from1, const decimal_t *from2,
}
DBUG_ASSERT(intg0 <= ROUND_UP(from2->intg));
stop1=start1+frac0+intg0;
- to->intg=min(intg0*DIG_PER_DEC1, from2->intg);
+ to->intg=MY_MIN(intg0*DIG_PER_DEC1, from2->intg);
}
if (unlikely(intg0+frac0 > to->len))
{
diff --git a/strings/dtoa.c b/strings/dtoa.c
index 6b216056f66..f3498a7bb1e 100644
--- a/strings/dtoa.c
+++ b/strings/dtoa.c
@@ -132,7 +132,7 @@ size_t my_fcvt(double x, int precision, char *to, my_bool *error)
if (len <= decpt)
*dst++= '.';
- for (i= precision - max(0, (len - decpt)); i > 0; i--)
+ for (i= precision - MY_MAX(0, (len - decpt)); i > 0; i--)
*dst++= '0';
}
@@ -221,7 +221,7 @@ size_t my_gcvt(double x, my_gcvt_arg_type type, int width, char *to,
if (x < 0.)
width--;
- res= dtoa(x, 4, type == MY_GCVT_ARG_DOUBLE ? width : min(width, FLT_DIG),
+ res= dtoa(x, 4, type == MY_GCVT_ARG_DOUBLE ? width : MY_MIN(width, FLT_DIG),
&decpt, &sign, &end, buf, sizeof(buf));
if (decpt == DTOA_OVERFLOW)
{
@@ -2182,7 +2182,7 @@ static char *dtoa(double dd, int mode, int ndigits, int *decpt, int *sign,
1 ==> like 0, but with Steele & White stopping rule;
e.g. with IEEE P754 arithmetic , mode 0 gives
1e23 whereas mode 1 gives 9.999999999999999e22.
- 2 ==> max(1,ndigits) significant digits. This gives a
+ 2 ==> MY_MAX(1,ndigits) significant digits. This gives a
return value similar to that of ecvt, except
that trailing zeros are suppressed.
3 ==> through ndigits past the decimal point. This
diff --git a/strings/my_vsnprintf.c b/strings/my_vsnprintf.c
index 2073d5a93d9..1584a9e2cef 100644
--- a/strings/my_vsnprintf.c
+++ b/strings/my_vsnprintf.c
@@ -96,7 +96,7 @@ static const char *get_length_arg(const char *fmt, ARGS_INFO *args_arr,
uint *arg_count, size_t *length, uint *flags)
{
fmt= get_length(fmt+1, length, flags);
- *arg_count= max(*arg_count, (uint) *length);
+ *arg_count= MY_MAX(*arg_count, (uint) *length);
(*length)--;
DBUG_ASSERT(*fmt == '$' && *length < MAX_ARGS);
args_arr[*length].arg_type= 'd';
@@ -243,7 +243,7 @@ static char *process_dbl_arg(char *to, char *end, size_t width,
width= FLT_DIG; /* width not set, use default */
else if (width >= NOT_FIXED_DEC)
width= NOT_FIXED_DEC - 1; /* max.precision for my_fcvt() */
- width= min(width, (size_t)(end-to) - 1);
+ width= MY_MIN(width, (size_t)(end-to) - 1);
if (arg_type == 'f')
to+= my_fcvt(par, (int)width , to, NULL);
@@ -292,7 +292,7 @@ static char *process_int_arg(char *to, const char *end, size_t length,
/* If %#d syntax was used, we have to pre-zero/pre-space the string */
if (store_start == buff)
{
- length= min(length, to_length);
+ length= MY_MIN(length, to_length);
if (res_length < length)
{
size_t diff= (length- res_length);
@@ -512,7 +512,7 @@ start:
break;
/* Copy data after the % format expression until next % */
- length= min(end - to , print_arr[i].end - print_arr[i].begin);
+ length= MY_MIN(end - to , print_arr[i].end - print_arr[i].begin);
if (to + length < end)
length++;
to= strnmov(to, print_arr[i].begin, length);
@@ -533,7 +533,7 @@ start:
fmt= get_length(fmt, &arg_index, &unused_flags);
DBUG_ASSERT(*fmt == '$');
fmt++;
- arg_count= max(arg_count, arg_index);
+ arg_count= MY_MAX(arg_count, arg_index);
goto start;
}
@@ -735,7 +735,7 @@ int my_vfprintf(FILE *stream, const char* format, va_list args)
char cvtbuf[1024];
int alloc= 0;
char *p= cvtbuf;
- size_t cur_len= sizeof(cvtbuf);
+ size_t cur_len= sizeof(cvtbuf), actual;
int ret;
/*
@@ -746,7 +746,7 @@ int my_vfprintf(FILE *stream, const char* format, va_list args)
for (;;)
{
size_t new_len;
- size_t actual= my_vsnprintf(p, cur_len, format, args);
+ actual= my_vsnprintf(p, cur_len, format, args);
if (actual < cur_len - 1)
break;
/*
@@ -766,7 +766,9 @@ int my_vfprintf(FILE *stream, const char* format, va_list args)
if (!p)
return 0;
}
- ret= fprintf(stream, "%s", p);
+ ret= (int) actual;
+ if (fputs(p, stream) < 0)
+ ret= -1;
if (alloc)
(*my_str_free)(p);
return ret;
diff --git a/strings/str2int.c b/strings/str2int.c
index 64d4e169891..ec89503af5e 100644
--- a/strings/str2int.c
+++ b/strings/str2int.c
@@ -94,7 +94,7 @@ char *str2int(register const char *src, register int radix, long int lower,
machines all, if +|n| is representable, so is -|n|, but on
twos complement machines the converse is not true. So the
"maximum" representable number has a negative representative.
- Limit is set to min(-|lower|,-|upper|); this is the "largest"
+ Limit is set to MY_MIN(-|lower|,-|upper|); this is the "largest"
number we are concerned with. */
/* Calculate Limit using Scale as a scratch variable */
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 0acad6bf30b..a43caf5b4f3 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -34,4 +34,6 @@ FIND_LIBRARY(EVENT_LIBRARY event)
IF(HAVE_EVENT_H AND EVENT_LIBRARY)
ADD_EXECUTABLE(async_queries async_queries.c)
TARGET_LINK_LIBRARIES(async_queries mysqlclient ${EVENT_LIBRARY})
+ SET_TARGET_PROPERTIES(async_queries PROPERTIES LINKER_LANGUAGE CXX)
+
ENDIF()
diff --git a/tests/mysql_client_fw.c b/tests/mysql_client_fw.c
index 90a1ea77a50..eb88be5c0e8 100644
--- a/tests/mysql_client_fw.c
+++ b/tests/mysql_client_fw.c
@@ -583,7 +583,7 @@ static int my_process_stmt_result(MYSQL_STMT *stmt)
return row_count;
}
- field_count= min(mysql_num_fields(result), MAX_RES_FIELDS);
+ field_count= MY_MIN(mysql_num_fields(result), MAX_RES_FIELDS);
bzero((char*) buffer, sizeof(buffer));
bzero((char*) length, sizeof(length));
diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c
index e4454fb8e41..1a653d1072d 100644
--- a/tests/mysql_client_test.c
+++ b/tests/mysql_client_test.c
@@ -17342,11 +17342,10 @@ static void test_wl4166_3()
rc= mysql_stmt_execute(stmt);
check_execute(stmt, rc);
/*
- Sic: only one warning, instead of two. The warning
- about data truncation when assigning a parameter is lost.
+ The warning about data truncation when assigning a parameter is lost.
This is a bug.
*/
- my_process_warnings(mysql, 1);
+ my_process_warnings(mysql, 0);
verify_col_data("t1", "year", "0000-00-00 00:00:00");